Loading...
Note: File does not exist in v3.1.
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright © 2006-2015, Intel Corporation.
4 *
5 * Authors: Ashok Raj <ashok.raj@intel.com>
6 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7 * David Woodhouse <David.Woodhouse@intel.com>
8 */
9
10#ifndef _INTEL_IOMMU_H_
11#define _INTEL_IOMMU_H_
12
13#include <linux/types.h>
14#include <linux/iova.h>
15#include <linux/io.h>
16#include <linux/idr.h>
17#include <linux/mmu_notifier.h>
18#include <linux/list.h>
19#include <linux/iommu.h>
20#include <linux/io-64-nonatomic-lo-hi.h>
21#include <linux/dmar.h>
22#include <linux/ioasid.h>
23#include <linux/bitfield.h>
24#include <linux/xarray.h>
25
26#include <asm/cacheflush.h>
27#include <asm/iommu.h>
28
29/*
30 * VT-d hardware uses 4KiB page size regardless of host page size.
31 */
32#define VTD_PAGE_SHIFT (12)
33#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
34#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
35#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
36
37#define VTD_STRIDE_SHIFT (9)
38#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
39
40#define DMA_PTE_READ BIT_ULL(0)
41#define DMA_PTE_WRITE BIT_ULL(1)
42#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
43#define DMA_PTE_SNP BIT_ULL(11)
44
45#define DMA_FL_PTE_PRESENT BIT_ULL(0)
46#define DMA_FL_PTE_US BIT_ULL(2)
47#define DMA_FL_PTE_ACCESS BIT_ULL(5)
48#define DMA_FL_PTE_DIRTY BIT_ULL(6)
49#define DMA_FL_PTE_XD BIT_ULL(63)
50
51#define ADDR_WIDTH_5LEVEL (57)
52#define ADDR_WIDTH_4LEVEL (48)
53
54#define CONTEXT_TT_MULTI_LEVEL 0
55#define CONTEXT_TT_DEV_IOTLB 1
56#define CONTEXT_TT_PASS_THROUGH 2
57#define CONTEXT_PASIDE BIT_ULL(3)
58
59/*
60 * Intel IOMMU register specification per version 1.0 public spec.
61 */
62#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */
63#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */
64#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */
65#define DMAR_GCMD_REG 0x18 /* Global command register */
66#define DMAR_GSTS_REG 0x1c /* Global status register */
67#define DMAR_RTADDR_REG 0x20 /* Root entry table */
68#define DMAR_CCMD_REG 0x28 /* Context command reg */
69#define DMAR_FSTS_REG 0x34 /* Fault Status register */
70#define DMAR_FECTL_REG 0x38 /* Fault control register */
71#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */
72#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */
73#define DMAR_FEUADDR_REG 0x44 /* Upper address register */
74#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */
75#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */
76#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */
77#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */
78#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */
79#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */
80#define DMAR_IQH_REG 0x80 /* Invalidation queue head register */
81#define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */
82#define DMAR_IQ_SHIFT 4 /* Invalidation queue head/tail shift */
83#define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */
84#define DMAR_ICS_REG 0x9c /* Invalidation complete status register */
85#define DMAR_IQER_REG 0xb0 /* Invalidation queue error record register */
86#define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */
87#define DMAR_PQH_REG 0xc0 /* Page request queue head register */
88#define DMAR_PQT_REG 0xc8 /* Page request queue tail register */
89#define DMAR_PQA_REG 0xd0 /* Page request queue address register */
90#define DMAR_PRS_REG 0xdc /* Page request status register */
91#define DMAR_PECTL_REG 0xe0 /* Page request event control register */
92#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
93#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
94#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
95#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
96#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
97#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
98#define DMAR_MTRR_FIX16K_80000_REG 0x128
99#define DMAR_MTRR_FIX16K_A0000_REG 0x130
100#define DMAR_MTRR_FIX4K_C0000_REG 0x138
101#define DMAR_MTRR_FIX4K_C8000_REG 0x140
102#define DMAR_MTRR_FIX4K_D0000_REG 0x148
103#define DMAR_MTRR_FIX4K_D8000_REG 0x150
104#define DMAR_MTRR_FIX4K_E0000_REG 0x158
105#define DMAR_MTRR_FIX4K_E8000_REG 0x160
106#define DMAR_MTRR_FIX4K_F0000_REG 0x168
107#define DMAR_MTRR_FIX4K_F8000_REG 0x170
108#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
109#define DMAR_MTRR_PHYSMASK0_REG 0x188
110#define DMAR_MTRR_PHYSBASE1_REG 0x190
111#define DMAR_MTRR_PHYSMASK1_REG 0x198
112#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
113#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
114#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
115#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
116#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
117#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
118#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
119#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
120#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
121#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
122#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
123#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
124#define DMAR_MTRR_PHYSBASE8_REG 0x200
125#define DMAR_MTRR_PHYSMASK8_REG 0x208
126#define DMAR_MTRR_PHYSBASE9_REG 0x210
127#define DMAR_MTRR_PHYSMASK9_REG 0x218
128#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */
129#define DMAR_VCMD_REG 0xe00 /* Virtual command register */
130#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */
131
132#define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg)
133#define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg)
134#define DMAR_IQER_REG_ICESID(reg) FIELD_GET(GENMASK_ULL(63, 48), reg)
135
136#define OFFSET_STRIDE (9)
137
138#define dmar_readq(a) readq(a)
139#define dmar_writeq(a,v) writeq(v,a)
140#define dmar_readl(a) readl(a)
141#define dmar_writel(a, v) writel(v, a)
142
143#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
144#define DMAR_VER_MINOR(v) ((v) & 0x0f)
145
146/*
147 * Decoding Capability Register
148 */
149#define cap_esrtps(c) (((c) >> 63) & 1)
150#define cap_esirtps(c) (((c) >> 62) & 1)
151#define cap_fl5lp_support(c) (((c) >> 60) & 1)
152#define cap_pi_support(c) (((c) >> 59) & 1)
153#define cap_fl1gp_support(c) (((c) >> 56) & 1)
154#define cap_read_drain(c) (((c) >> 55) & 1)
155#define cap_write_drain(c) (((c) >> 54) & 1)
156#define cap_max_amask_val(c) (((c) >> 48) & 0x3f)
157#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1)
158#define cap_pgsel_inv(c) (((c) >> 39) & 1)
159
160#define cap_super_page_val(c) (((c) >> 34) & 0xf)
161#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \
162 * OFFSET_STRIDE) + 21)
163
164#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16)
165#define cap_max_fault_reg_offset(c) \
166 (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
167
168#define cap_zlr(c) (((c) >> 22) & 1)
169#define cap_isoch(c) (((c) >> 23) & 1)
170#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1)
171#define cap_sagaw(c) (((c) >> 8) & 0x1f)
172#define cap_caching_mode(c) (((c) >> 7) & 1)
173#define cap_phmr(c) (((c) >> 6) & 1)
174#define cap_plmr(c) (((c) >> 5) & 1)
175#define cap_rwbf(c) (((c) >> 4) & 1)
176#define cap_afl(c) (((c) >> 3) & 1)
177#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
178/*
179 * Extended Capability Register
180 */
181
182#define ecap_rps(e) (((e) >> 49) & 0x1)
183#define ecap_smpwc(e) (((e) >> 48) & 0x1)
184#define ecap_flts(e) (((e) >> 47) & 0x1)
185#define ecap_slts(e) (((e) >> 46) & 0x1)
186#define ecap_slads(e) (((e) >> 45) & 0x1)
187#define ecap_vcs(e) (((e) >> 44) & 0x1)
188#define ecap_smts(e) (((e) >> 43) & 0x1)
189#define ecap_dit(e) (((e) >> 41) & 0x1)
190#define ecap_pds(e) (((e) >> 42) & 0x1)
191#define ecap_pasid(e) (((e) >> 40) & 0x1)
192#define ecap_pss(e) (((e) >> 35) & 0x1f)
193#define ecap_eafs(e) (((e) >> 34) & 0x1)
194#define ecap_nwfs(e) (((e) >> 33) & 0x1)
195#define ecap_srs(e) (((e) >> 31) & 0x1)
196#define ecap_ers(e) (((e) >> 30) & 0x1)
197#define ecap_prs(e) (((e) >> 29) & 0x1)
198#define ecap_broken_pasid(e) (((e) >> 28) & 0x1)
199#define ecap_dis(e) (((e) >> 27) & 0x1)
200#define ecap_nest(e) (((e) >> 26) & 0x1)
201#define ecap_mts(e) (((e) >> 25) & 0x1)
202#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
203#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
204#define ecap_coherent(e) ((e) & 0x1)
205#define ecap_qis(e) ((e) & 0x2)
206#define ecap_pass_through(e) (((e) >> 6) & 0x1)
207#define ecap_eim_support(e) (((e) >> 4) & 0x1)
208#define ecap_ir_support(e) (((e) >> 3) & 0x1)
209#define ecap_dev_iotlb_support(e) (((e) >> 2) & 0x1)
210#define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
211#define ecap_sc_support(e) (((e) >> 7) & 0x1) /* Snooping Control */
212
213/* Virtual command interface capability */
214#define vccap_pasid(v) (((v) & DMA_VCS_PAS)) /* PASID allocation */
215
216/* IOTLB_REG */
217#define DMA_TLB_FLUSH_GRANU_OFFSET 60
218#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
219#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
220#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
221#define DMA_TLB_IIRG(type) ((type >> 60) & 3)
222#define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
223#define DMA_TLB_READ_DRAIN (((u64)1) << 49)
224#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
225#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32)
226#define DMA_TLB_IVT (((u64)1) << 63)
227#define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
228#define DMA_TLB_MAX_SIZE (0x3f)
229
230/* INVALID_DESC */
231#define DMA_CCMD_INVL_GRANU_OFFSET 61
232#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 4)
233#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 4)
234#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 4)
235#define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7)
236#define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6)
237#define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16)))
238#define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6)
239#define DMA_ID_TLB_ADDR(addr) (addr)
240#define DMA_ID_TLB_ADDR_MASK(mask) (mask)
241
242/* PMEN_REG */
243#define DMA_PMEN_EPM (((u32)1)<<31)
244#define DMA_PMEN_PRS (((u32)1)<<0)
245
246/* GCMD_REG */
247#define DMA_GCMD_TE (((u32)1) << 31)
248#define DMA_GCMD_SRTP (((u32)1) << 30)
249#define DMA_GCMD_SFL (((u32)1) << 29)
250#define DMA_GCMD_EAFL (((u32)1) << 28)
251#define DMA_GCMD_WBF (((u32)1) << 27)
252#define DMA_GCMD_QIE (((u32)1) << 26)
253#define DMA_GCMD_SIRTP (((u32)1) << 24)
254#define DMA_GCMD_IRE (((u32) 1) << 25)
255#define DMA_GCMD_CFI (((u32) 1) << 23)
256
257/* GSTS_REG */
258#define DMA_GSTS_TES (((u32)1) << 31)
259#define DMA_GSTS_RTPS (((u32)1) << 30)
260#define DMA_GSTS_FLS (((u32)1) << 29)
261#define DMA_GSTS_AFLS (((u32)1) << 28)
262#define DMA_GSTS_WBFS (((u32)1) << 27)
263#define DMA_GSTS_QIES (((u32)1) << 26)
264#define DMA_GSTS_IRTPS (((u32)1) << 24)
265#define DMA_GSTS_IRES (((u32)1) << 25)
266#define DMA_GSTS_CFIS (((u32)1) << 23)
267
268/* DMA_RTADDR_REG */
269#define DMA_RTADDR_SMT (((u64)1) << 10)
270
271/* CCMD_REG */
272#define DMA_CCMD_ICC (((u64)1) << 63)
273#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
274#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
275#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
276#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
277#define DMA_CCMD_MASK_NOBIT 0
278#define DMA_CCMD_MASK_1BIT 1
279#define DMA_CCMD_MASK_2BIT 2
280#define DMA_CCMD_MASK_3BIT 3
281#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
282#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
283
284/* FECTL_REG */
285#define DMA_FECTL_IM (((u32)1) << 31)
286
287/* FSTS_REG */
288#define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
289#define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
290#define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
291#define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
292#define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
293#define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
294#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
295
296/* FRCD_REG, 32 bits access */
297#define DMA_FRCD_F (((u32)1) << 31)
298#define dma_frcd_type(d) ((d >> 30) & 1)
299#define dma_frcd_fault_reason(c) (c & 0xff)
300#define dma_frcd_source_id(c) (c & 0xffff)
301#define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
302#define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
303/* low 64 bit */
304#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
305
306/* PRS_REG */
307#define DMA_PRS_PPR ((u32)1)
308#define DMA_PRS_PRO ((u32)2)
309
310#define DMA_VCS_PAS ((u64)1)
311
312#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
313do { \
314 cycles_t start_time = get_cycles(); \
315 while (1) { \
316 sts = op(iommu->reg + offset); \
317 if (cond) \
318 break; \
319 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
320 panic("DMAR hardware is malfunctioning\n"); \
321 cpu_relax(); \
322 } \
323} while (0)
324
325#define QI_LENGTH 256 /* queue length */
326
327enum {
328 QI_FREE,
329 QI_IN_USE,
330 QI_DONE,
331 QI_ABORT
332};
333
334#define QI_CC_TYPE 0x1
335#define QI_IOTLB_TYPE 0x2
336#define QI_DIOTLB_TYPE 0x3
337#define QI_IEC_TYPE 0x4
338#define QI_IWD_TYPE 0x5
339#define QI_EIOTLB_TYPE 0x6
340#define QI_PC_TYPE 0x7
341#define QI_DEIOTLB_TYPE 0x8
342#define QI_PGRP_RESP_TYPE 0x9
343#define QI_PSTRM_RESP_TYPE 0xa
344
345#define QI_IEC_SELECTIVE (((u64)1) << 4)
346#define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32))
347#define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27))
348
349#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
350#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
351#define QI_IWD_FENCE (((u64)1) << 6)
352#define QI_IWD_PRQ_DRAIN (((u64)1) << 7)
353
354#define QI_IOTLB_DID(did) (((u64)did) << 16)
355#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
356#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
357#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
358#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
359#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
360#define QI_IOTLB_AM(am) (((u8)am) & 0x3f)
361
362#define QI_CC_FM(fm) (((u64)fm) << 48)
363#define QI_CC_SID(sid) (((u64)sid) << 32)
364#define QI_CC_DID(did) (((u64)did) << 16)
365#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
366
367#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
368#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
369#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
370#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
371 ((u64)((pfsid >> 4) & 0xfff) << 52))
372#define QI_DEV_IOTLB_SIZE 1
373#define QI_DEV_IOTLB_MAX_INVS 32
374
375#define QI_PC_PASID(pasid) (((u64)pasid) << 32)
376#define QI_PC_DID(did) (((u64)did) << 16)
377#define QI_PC_GRAN(gran) (((u64)gran) << 4)
378
379/* PASID cache invalidation granu */
380#define QI_PC_ALL_PASIDS 0
381#define QI_PC_PASID_SEL 1
382#define QI_PC_GLOBAL 3
383
384#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
385#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
386#define QI_EIOTLB_AM(am) (((u64)am) & 0x3f)
387#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
388#define QI_EIOTLB_DID(did) (((u64)did) << 16)
389#define QI_EIOTLB_GRAN(gran) (((u64)gran) << 4)
390
391/* QI Dev-IOTLB inv granu */
392#define QI_DEV_IOTLB_GRAN_ALL 1
393#define QI_DEV_IOTLB_GRAN_PASID_SEL 0
394
395#define QI_DEV_EIOTLB_ADDR(a) ((u64)(a) & VTD_PAGE_MASK)
396#define QI_DEV_EIOTLB_SIZE (((u64)1) << 11)
397#define QI_DEV_EIOTLB_PASID(p) ((u64)((p) & 0xfffff) << 32)
398#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
399#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
400#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
401 ((u64)((pfsid >> 4) & 0xfff) << 52))
402#define QI_DEV_EIOTLB_MAX_INVS 32
403
404/* Page group response descriptor QW0 */
405#define QI_PGRP_PASID_P(p) (((u64)(p)) << 4)
406#define QI_PGRP_PDP(p) (((u64)(p)) << 5)
407#define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12)
408#define QI_PGRP_DID(rid) (((u64)(rid)) << 16)
409#define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32)
410
411/* Page group response descriptor QW1 */
412#define QI_PGRP_LPIG(x) (((u64)(x)) << 2)
413#define QI_PGRP_IDX(idx) (((u64)(idx)) << 3)
414
415
416#define QI_RESP_SUCCESS 0x0
417#define QI_RESP_INVALID 0x1
418#define QI_RESP_FAILURE 0xf
419
420#define QI_GRAN_NONG_PASID 2
421#define QI_GRAN_PSI_PASID 3
422
423#define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
424
425struct qi_desc {
426 u64 qw0;
427 u64 qw1;
428 u64 qw2;
429 u64 qw3;
430};
431
432struct q_inval {
433 raw_spinlock_t q_lock;
434 void *desc; /* invalidation queue */
435 int *desc_status; /* desc status */
436 int free_head; /* first free entry */
437 int free_tail; /* last free entry */
438 int free_cnt;
439};
440
441struct dmar_pci_notify_info;
442
443#ifdef CONFIG_IRQ_REMAP
444/* 1MB - maximum possible interrupt remapping table size */
445#define INTR_REMAP_PAGE_ORDER 8
446#define INTR_REMAP_TABLE_REG_SIZE 0xf
447#define INTR_REMAP_TABLE_REG_SIZE_MASK 0xf
448
449#define INTR_REMAP_TABLE_ENTRIES 65536
450
451struct irq_domain;
452
453struct ir_table {
454 struct irte *base;
455 unsigned long *bitmap;
456};
457
458void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
459#else
460static inline void
461intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
462#endif
463
464struct iommu_flush {
465 void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
466 u8 fm, u64 type);
467 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
468 unsigned int size_order, u64 type);
469};
470
471enum {
472 SR_DMAR_FECTL_REG,
473 SR_DMAR_FEDATA_REG,
474 SR_DMAR_FEADDR_REG,
475 SR_DMAR_FEUADDR_REG,
476 MAX_SR_DMAR_REGS
477};
478
479#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
480#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
481#define VTD_FLAG_SVM_CAPABLE (1 << 2)
482
483#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
484#define pasid_supported(iommu) (sm_supported(iommu) && \
485 ecap_pasid((iommu)->ecap))
486
487struct pasid_entry;
488struct pasid_state_entry;
489struct page_req_dsc;
490
491/*
492 * 0: Present
493 * 1-11: Reserved
494 * 12-63: Context Ptr (12 - (haw-1))
495 * 64-127: Reserved
496 */
497struct root_entry {
498 u64 lo;
499 u64 hi;
500};
501
502/*
503 * low 64 bits:
504 * 0: present
505 * 1: fault processing disable
506 * 2-3: translation type
507 * 12-63: address space root
508 * high 64 bits:
509 * 0-2: address width
510 * 3-6: aval
511 * 8-23: domain id
512 */
513struct context_entry {
514 u64 lo;
515 u64 hi;
516};
517
518struct iommu_domain_info {
519 struct intel_iommu *iommu;
520 unsigned int refcnt; /* Refcount of devices per iommu */
521 u16 did; /* Domain ids per IOMMU. Use u16 since
522 * domain ids are 16 bit wide according
523 * to VT-d spec, section 9.3 */
524};
525
526struct dmar_domain {
527 int nid; /* node id */
528 struct xarray iommu_array; /* Attached IOMMU array */
529
530 u8 has_iotlb_device: 1;
531 u8 iommu_coherency: 1; /* indicate coherency of iommu access */
532 u8 force_snooping : 1; /* Create IOPTEs with snoop control */
533 u8 set_pte_snp:1;
534 u8 use_first_level:1; /* DMA translation for the domain goes
535 * through the first level page table,
536 * otherwise, goes through the second
537 * level.
538 */
539
540 spinlock_t lock; /* Protect device tracking lists */
541 struct list_head devices; /* all devices' list */
542
543 struct dma_pte *pgd; /* virtual address */
544 int gaw; /* max guest address width */
545
546 /* adjusted guest address width, 0 is level 2 30-bit */
547 int agaw;
548 int iommu_superpage;/* Level of superpages supported:
549 0 == 4KiB (no superpages), 1 == 2MiB,
550 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
551 u64 max_addr; /* maximum mapped address */
552
553 struct iommu_domain domain; /* generic domain data structure for
554 iommu core */
555};
556
557struct intel_iommu {
558 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
559 u64 reg_phys; /* physical address of hw register set */
560 u64 reg_size; /* size of hw register set */
561 u64 cap;
562 u64 ecap;
563 u64 vccap;
564 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
565 raw_spinlock_t register_lock; /* protect register handling */
566 int seq_id; /* sequence id of the iommu */
567 int agaw; /* agaw of this iommu */
568 int msagaw; /* max sagaw of this iommu */
569 unsigned int irq, pr_irq;
570 u16 segment; /* PCI segment# */
571 unsigned char name[13]; /* Device Name */
572
573#ifdef CONFIG_INTEL_IOMMU
574 unsigned long *domain_ids; /* bitmap of domains */
575 unsigned long *copied_tables; /* bitmap of copied tables */
576 spinlock_t lock; /* protect context, domain ids */
577 struct root_entry *root_entry; /* virtual address */
578
579 struct iommu_flush flush;
580#endif
581#ifdef CONFIG_INTEL_IOMMU_SVM
582 struct page_req_dsc *prq;
583 unsigned char prq_name[16]; /* Name for PRQ interrupt */
584 unsigned long prq_seq_number;
585 struct completion prq_complete;
586 struct ioasid_allocator_ops pasid_allocator; /* Custom allocator for PASIDs */
587#endif
588 struct iopf_queue *iopf_queue;
589 unsigned char iopfq_name[16];
590 struct q_inval *qi; /* Queued invalidation info */
591 u32 *iommu_state; /* Store iommu states between suspend and resume.*/
592
593#ifdef CONFIG_IRQ_REMAP
594 struct ir_table *ir_table; /* Interrupt remapping info */
595 struct irq_domain *ir_domain;
596#endif
597 struct iommu_device iommu; /* IOMMU core code handle */
598 int node;
599 u32 flags; /* Software defined flags */
600
601 struct dmar_drhd_unit *drhd;
602 void *perf_statistic;
603};
604
605/* PCI domain-device relationship */
606struct device_domain_info {
607 struct list_head link; /* link to domain siblings */
608 u32 segment; /* PCI segment number */
609 u8 bus; /* PCI bus number */
610 u8 devfn; /* PCI devfn number */
611 u16 pfsid; /* SRIOV physical function source ID */
612 u8 pasid_supported:3;
613 u8 pasid_enabled:1;
614 u8 pri_supported:1;
615 u8 pri_enabled:1;
616 u8 ats_supported:1;
617 u8 ats_enabled:1;
618 u8 dtlb_extra_inval:1; /* Quirk for devices need extra flush */
619 u8 ats_qdep;
620 struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
621 struct intel_iommu *iommu; /* IOMMU used by this device */
622 struct dmar_domain *domain; /* pointer to domain */
623 struct pasid_table *pasid_table; /* pasid table */
624};
625
626static inline void __iommu_flush_cache(
627 struct intel_iommu *iommu, void *addr, int size)
628{
629 if (!ecap_coherent(iommu->ecap))
630 clflush_cache_range(addr, size);
631}
632
633/* Convert generic struct iommu_domain to private struct dmar_domain */
634static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
635{
636 return container_of(dom, struct dmar_domain, domain);
637}
638
639/* Retrieve the domain ID which has allocated to the domain */
640static inline u16
641domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
642{
643 struct iommu_domain_info *info =
644 xa_load(&domain->iommu_array, iommu->seq_id);
645
646 return info->did;
647}
648
649/*
650 * 0: readable
651 * 1: writable
652 * 2-6: reserved
653 * 7: super page
654 * 8-10: available
655 * 11: snoop behavior
656 * 12-63: Host physical address
657 */
658struct dma_pte {
659 u64 val;
660};
661
662static inline void dma_clear_pte(struct dma_pte *pte)
663{
664 pte->val = 0;
665}
666
667static inline u64 dma_pte_addr(struct dma_pte *pte)
668{
669#ifdef CONFIG_64BIT
670 return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
671#else
672 /* Must have a full atomic 64-bit read */
673 return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
674 VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
675#endif
676}
677
678static inline bool dma_pte_present(struct dma_pte *pte)
679{
680 return (pte->val & 3) != 0;
681}
682
683static inline bool dma_pte_superpage(struct dma_pte *pte)
684{
685 return (pte->val & DMA_PTE_LARGE_PAGE);
686}
687
688static inline bool first_pte_in_page(struct dma_pte *pte)
689{
690 return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
691}
692
693static inline int nr_pte_to_next_page(struct dma_pte *pte)
694{
695 return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
696 (struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
697}
698
699static inline bool context_present(struct context_entry *context)
700{
701 return (context->lo & 1);
702}
703
704extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
705
706extern int dmar_enable_qi(struct intel_iommu *iommu);
707extern void dmar_disable_qi(struct intel_iommu *iommu);
708extern int dmar_reenable_qi(struct intel_iommu *iommu);
709extern void qi_global_iec(struct intel_iommu *iommu);
710
711extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
712 u8 fm, u64 type);
713extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
714 unsigned int size_order, u64 type);
715extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
716 u16 qdep, u64 addr, unsigned mask);
717
718void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
719 unsigned long npages, bool ih);
720
721void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
722 u32 pasid, u16 qdep, u64 addr,
723 unsigned int size_order);
724void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
725 unsigned long address, unsigned long pages,
726 u32 pasid, u16 qdep);
727void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
728 u32 pasid);
729
730int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
731 unsigned int count, unsigned long options);
732/*
733 * Options used in qi_submit_sync:
734 * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
735 */
736#define QI_OPT_WAIT_DRAIN BIT(0)
737
738extern int dmar_ir_support(void);
739
740void *alloc_pgtable_page(int node);
741void free_pgtable_page(void *vaddr);
742void iommu_flush_write_buffer(struct intel_iommu *iommu);
743struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
744
745#ifdef CONFIG_INTEL_IOMMU_SVM
746extern void intel_svm_check(struct intel_iommu *iommu);
747extern int intel_svm_enable_prq(struct intel_iommu *iommu);
748extern int intel_svm_finish_prq(struct intel_iommu *iommu);
749int intel_svm_page_response(struct device *dev, struct iommu_fault_event *evt,
750 struct iommu_page_response *msg);
751struct iommu_domain *intel_svm_domain_alloc(void);
752void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid);
753
754struct intel_svm_dev {
755 struct list_head list;
756 struct rcu_head rcu;
757 struct device *dev;
758 struct intel_iommu *iommu;
759 struct iommu_sva sva;
760 u32 pasid;
761 int users;
762 u16 did;
763 u16 dev_iotlb:1;
764 u16 sid, qdep;
765};
766
767struct intel_svm {
768 struct mmu_notifier notifier;
769 struct mm_struct *mm;
770
771 unsigned int flags;
772 u32 pasid;
773 struct list_head devs;
774};
775#else
776static inline void intel_svm_check(struct intel_iommu *iommu) {}
777static inline struct iommu_domain *intel_svm_domain_alloc(void)
778{
779 return NULL;
780}
781
782static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid)
783{
784}
785#endif
786
787#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
788void intel_iommu_debugfs_init(void);
789#else
790static inline void intel_iommu_debugfs_init(void) {}
791#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
792
793extern const struct attribute_group *intel_iommu_groups[];
794struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
795 u8 devfn, int alloc);
796
797extern const struct iommu_ops intel_iommu_ops;
798
799#ifdef CONFIG_INTEL_IOMMU
800extern int intel_iommu_sm;
801extern int iommu_calculate_agaw(struct intel_iommu *iommu);
802extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
803extern int dmar_disabled;
804extern int intel_iommu_enabled;
805#else
806static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
807{
808 return 0;
809}
810static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
811{
812 return 0;
813}
814#define dmar_disabled (1)
815#define intel_iommu_enabled (0)
816#define intel_iommu_sm (0)
817#endif
818
819static inline const char *decode_prq_descriptor(char *str, size_t size,
820 u64 dw0, u64 dw1, u64 dw2, u64 dw3)
821{
822 char *buf = str;
823 int bytes;
824
825 bytes = snprintf(buf, size,
826 "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
827 FIELD_GET(GENMASK_ULL(31, 16), dw0),
828 FIELD_GET(GENMASK_ULL(63, 12), dw1),
829 dw1 & BIT_ULL(0) ? 'r' : '-',
830 dw1 & BIT_ULL(1) ? 'w' : '-',
831 dw0 & BIT_ULL(52) ? 'x' : '-',
832 dw0 & BIT_ULL(53) ? 'p' : '-',
833 dw1 & BIT_ULL(2) ? 'l' : '-',
834 FIELD_GET(GENMASK_ULL(51, 32), dw0),
835 FIELD_GET(GENMASK_ULL(11, 3), dw1));
836
837 /* Private Data */
838 if (dw0 & BIT_ULL(9)) {
839 size -= bytes;
840 buf += bytes;
841 snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
842 }
843
844 return str;
845}
846
847#endif