Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright © 2018 Intel Corporation.
  4 *
  5 * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
  6 *	    Sohil Mehta <sohil.mehta@intel.com>
  7 *	    Jacob Pan <jacob.jun.pan@linux.intel.com>
  8 *	    Lu Baolu <baolu.lu@linux.intel.com>
  9 */
 10
 11#include <linux/debugfs.h>
 12#include <linux/dmar.h>
 13#include <linux/intel-iommu.h>
 14#include <linux/pci.h>
 15
 16#include <asm/irq_remapping.h>
 17
 18#include "pasid.h"
 
 19
 20struct tbl_walk {
 21	u16 bus;
 22	u16 devfn;
 23	u32 pasid;
 24	struct root_entry *rt_entry;
 25	struct context_entry *ctx_entry;
 26	struct pasid_entry *pasid_tbl_entry;
 27};
 28
 29struct iommu_regset {
 30	int offset;
 31	const char *regs;
 32};
 33
 
 
 
 34#define IOMMU_REGSET_ENTRY(_reg_)					\
 35	{ DMAR_##_reg_##_REG, __stringify(_reg_) }
 36
 37static const struct iommu_regset iommu_regs_32[] = {
 38	IOMMU_REGSET_ENTRY(VER),
 39	IOMMU_REGSET_ENTRY(GCMD),
 40	IOMMU_REGSET_ENTRY(GSTS),
 41	IOMMU_REGSET_ENTRY(FSTS),
 42	IOMMU_REGSET_ENTRY(FECTL),
 43	IOMMU_REGSET_ENTRY(FEDATA),
 44	IOMMU_REGSET_ENTRY(FEADDR),
 45	IOMMU_REGSET_ENTRY(FEUADDR),
 46	IOMMU_REGSET_ENTRY(PMEN),
 47	IOMMU_REGSET_ENTRY(PLMBASE),
 48	IOMMU_REGSET_ENTRY(PLMLIMIT),
 49	IOMMU_REGSET_ENTRY(ICS),
 50	IOMMU_REGSET_ENTRY(PRS),
 51	IOMMU_REGSET_ENTRY(PECTL),
 52	IOMMU_REGSET_ENTRY(PEDATA),
 53	IOMMU_REGSET_ENTRY(PEADDR),
 54	IOMMU_REGSET_ENTRY(PEUADDR),
 55};
 56
 57static const struct iommu_regset iommu_regs_64[] = {
 58	IOMMU_REGSET_ENTRY(CAP),
 59	IOMMU_REGSET_ENTRY(ECAP),
 60	IOMMU_REGSET_ENTRY(RTADDR),
 61	IOMMU_REGSET_ENTRY(CCMD),
 62	IOMMU_REGSET_ENTRY(AFLOG),
 63	IOMMU_REGSET_ENTRY(PHMBASE),
 64	IOMMU_REGSET_ENTRY(PHMLIMIT),
 65	IOMMU_REGSET_ENTRY(IQH),
 66	IOMMU_REGSET_ENTRY(IQT),
 67	IOMMU_REGSET_ENTRY(IQA),
 68	IOMMU_REGSET_ENTRY(IRTA),
 69	IOMMU_REGSET_ENTRY(PQH),
 70	IOMMU_REGSET_ENTRY(PQT),
 71	IOMMU_REGSET_ENTRY(PQA),
 72	IOMMU_REGSET_ENTRY(MTRRCAP),
 73	IOMMU_REGSET_ENTRY(MTRRDEF),
 74	IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
 75	IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
 76	IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
 77	IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
 78	IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
 79	IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
 80	IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
 81	IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
 82	IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
 83	IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
 84	IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
 85	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
 86	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
 87	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
 88	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
 89	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
 90	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
 91	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
 92	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
 93	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
 94	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
 95	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
 96	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
 97	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
 98	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
 99	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
100	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
101	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
102	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
103	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
104	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
105	IOMMU_REGSET_ENTRY(VCCAP),
106	IOMMU_REGSET_ENTRY(VCMD),
107	IOMMU_REGSET_ENTRY(VCRSP),
108};
109
110static int iommu_regset_show(struct seq_file *m, void *unused)
111{
112	struct dmar_drhd_unit *drhd;
113	struct intel_iommu *iommu;
114	unsigned long flag;
115	int i, ret = 0;
116	u64 value;
117
118	rcu_read_lock();
119	for_each_active_iommu(iommu, drhd) {
120		if (!drhd->reg_base_addr) {
121			seq_puts(m, "IOMMU: Invalid base address\n");
122			ret = -EINVAL;
123			goto out;
124		}
125
126		seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
127			   iommu->name, drhd->reg_base_addr);
128		seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
129		/*
130		 * Publish the contents of the 64-bit hardware registers
131		 * by adding the offset to the pointer (virtual address).
132		 */
133		raw_spin_lock_irqsave(&iommu->register_lock, flag);
134		for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
135			value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
136			seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
137				   iommu_regs_32[i].regs, iommu_regs_32[i].offset,
138				   value);
139		}
140		for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
141			value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
142			seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
143				   iommu_regs_64[i].regs, iommu_regs_64[i].offset,
144				   value);
145		}
146		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
147		seq_putc(m, '\n');
148	}
149out:
150	rcu_read_unlock();
151
152	return ret;
153}
154DEFINE_SHOW_ATTRIBUTE(iommu_regset);
155
156static inline void print_tbl_walk(struct seq_file *m)
157{
158	struct tbl_walk *tbl_wlk = m->private;
159
160	seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
161		   tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
162		   PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
163		   tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
164		   tbl_wlk->ctx_entry->lo);
165
166	/*
167	 * A legacy mode DMAR doesn't support PASID, hence default it to -1
168	 * indicating that it's invalid. Also, default all PASID related fields
169	 * to 0.
170	 */
171	if (!tbl_wlk->pasid_tbl_entry)
172		seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
173			   (u64)0, (u64)0, (u64)0);
174	else
175		seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
176			   tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
177			   tbl_wlk->pasid_tbl_entry->val[1],
178			   tbl_wlk->pasid_tbl_entry->val[0]);
179}
180
181static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
182			   u16 dir_idx)
183{
184	struct tbl_walk *tbl_wlk = m->private;
185	u8 tbl_idx;
186
187	for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
188		if (pasid_pte_is_present(tbl_entry)) {
189			tbl_wlk->pasid_tbl_entry = tbl_entry;
190			tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
191			print_tbl_walk(m);
192		}
193
194		tbl_entry++;
195	}
196}
197
198static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
199			   u16 pasid_dir_size)
200{
201	struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
202	struct pasid_entry *pasid_tbl;
203	u16 dir_idx;
204
205	for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
206		pasid_tbl = get_pasid_table_from_pde(dir_entry);
207		if (pasid_tbl)
208			pasid_tbl_walk(m, pasid_tbl, dir_idx);
209
210		dir_entry++;
211	}
212}
213
214static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
215{
216	struct context_entry *context;
217	u16 devfn, pasid_dir_size;
218	u64 pasid_dir_ptr;
219
220	for (devfn = 0; devfn < 256; devfn++) {
221		struct tbl_walk tbl_wlk = {0};
222
223		/*
224		 * Scalable mode root entry points to upper scalable mode
225		 * context table and lower scalable mode context table. Each
226		 * scalable mode context table has 128 context entries where as
227		 * legacy mode context table has 256 context entries. So in
228		 * scalable mode, the context entries for former 128 devices are
229		 * in the lower scalable mode context table, while the latter
230		 * 128 devices are in the upper scalable mode context table.
231		 * In scalable mode, when devfn > 127, iommu_context_addr()
232		 * automatically refers to upper scalable mode context table and
233		 * hence the caller doesn't have to worry about differences
234		 * between scalable mode and non scalable mode.
235		 */
236		context = iommu_context_addr(iommu, bus, devfn, 0);
237		if (!context)
238			return;
239
240		if (!context_present(context))
241			continue;
242
243		tbl_wlk.bus = bus;
244		tbl_wlk.devfn = devfn;
245		tbl_wlk.rt_entry = &iommu->root_entry[bus];
246		tbl_wlk.ctx_entry = context;
247		m->private = &tbl_wlk;
248
249		if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
250			pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
251			pasid_dir_size = get_pasid_dir_size(context);
252			pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
253			continue;
254		}
255
256		print_tbl_walk(m);
257	}
258}
259
260static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
261{
262	unsigned long flags;
263	u16 bus;
264
265	spin_lock_irqsave(&iommu->lock, flags);
266	seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
267		   (u64)virt_to_phys(iommu->root_entry));
268	seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
269
270	/*
271	 * No need to check if the root entry is present or not because
272	 * iommu_context_addr() performs the same check before returning
273	 * context entry.
274	 */
275	for (bus = 0; bus < 256; bus++)
276		ctx_tbl_walk(m, iommu, bus);
277
278	spin_unlock_irqrestore(&iommu->lock, flags);
279}
280
281static int dmar_translation_struct_show(struct seq_file *m, void *unused)
282{
283	struct dmar_drhd_unit *drhd;
284	struct intel_iommu *iommu;
285	u32 sts;
286
287	rcu_read_lock();
288	for_each_active_iommu(iommu, drhd) {
289		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
290		if (!(sts & DMA_GSTS_TES)) {
291			seq_printf(m, "DMA Remapping is not enabled on %s\n",
292				   iommu->name);
293			continue;
294		}
295		root_tbl_walk(m, iommu);
296		seq_putc(m, '\n');
297	}
298	rcu_read_unlock();
299
300	return 0;
301}
302DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
303
304static inline unsigned long level_to_directory_size(int level)
305{
306	return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
307}
308
309static inline void
310dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
311{
312	seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
313		   iova >> VTD_PAGE_SHIFT, path[5], path[4],
314		   path[3], path[2], path[1]);
315}
316
317static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
318			       int level, unsigned long start,
319			       u64 *path)
320{
321	int i;
322
323	if (level > 5 || level < 1)
324		return;
325
326	for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
327			i++, pde++, start += level_to_directory_size(level)) {
328		if (!dma_pte_present(pde))
329			continue;
330
331		path[level] = pde->val;
332		if (dma_pte_superpage(pde) || level == 1)
333			dump_page_info(m, start, path);
334		else
335			pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
336					   level - 1, start, path);
337		path[level] = 0;
338	}
339}
340
341static int show_device_domain_translation(struct device *dev, void *data)
342{
343	struct dmar_domain *domain = find_domain(dev);
344	struct seq_file *m = data;
345	u64 path[6] = { 0 };
346
347	if (!domain)
348		return 0;
349
350	seq_printf(m, "Device %s with pasid %d @0x%llx\n",
351		   dev_name(dev), domain->default_pasid,
352		   (u64)virt_to_phys(domain->pgd));
353	seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
354
355	pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
356	seq_putc(m, '\n');
357
358	return 0;
359}
360
361static int domain_translation_struct_show(struct seq_file *m, void *unused)
362{
363	unsigned long flags;
364	int ret;
365
366	spin_lock_irqsave(&device_domain_lock, flags);
367	ret = bus_for_each_dev(&pci_bus_type, NULL, m,
368			       show_device_domain_translation);
369	spin_unlock_irqrestore(&device_domain_lock, flags);
370
371	return ret;
372}
373DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
374
375static void invalidation_queue_entry_show(struct seq_file *m,
376					  struct intel_iommu *iommu)
377{
378	int index, shift = qi_shift(iommu);
379	struct qi_desc *desc;
380	int offset;
381
382	if (ecap_smts(iommu->ecap))
383		seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n");
384	else
385		seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n");
386
387	for (index = 0; index < QI_LENGTH; index++) {
388		offset = index << shift;
389		desc = iommu->qi->desc + offset;
390		if (ecap_smts(iommu->ecap))
391			seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n",
392				   index, desc->qw0, desc->qw1,
393				   desc->qw2, desc->qw3,
394				   iommu->qi->desc_status[index]);
395		else
396			seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n",
397				   index, desc->qw0, desc->qw1,
398				   iommu->qi->desc_status[index]);
399	}
400}
401
402static int invalidation_queue_show(struct seq_file *m, void *unused)
403{
404	struct dmar_drhd_unit *drhd;
405	struct intel_iommu *iommu;
406	unsigned long flags;
407	struct q_inval *qi;
408	int shift;
409
410	rcu_read_lock();
411	for_each_active_iommu(iommu, drhd) {
412		qi = iommu->qi;
413		shift = qi_shift(iommu);
414
415		if (!qi || !ecap_qis(iommu->ecap))
416			continue;
417
418		seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
419
420		raw_spin_lock_irqsave(&qi->q_lock, flags);
421		seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n",
422			   (u64)virt_to_phys(qi->desc),
423			   dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
424			   dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
425		invalidation_queue_entry_show(m, iommu);
426		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
427		seq_putc(m, '\n');
428	}
429	rcu_read_unlock();
430
431	return 0;
432}
433DEFINE_SHOW_ATTRIBUTE(invalidation_queue);
434
435#ifdef CONFIG_IRQ_REMAP
436static void ir_tbl_remap_entry_show(struct seq_file *m,
437				    struct intel_iommu *iommu)
438{
439	struct irte *ri_entry;
440	unsigned long flags;
441	int idx;
442
443	seq_puts(m, " Entry SrcID   DstID    Vct IRTE_high\t\tIRTE_low\n");
444
445	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
446	for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
447		ri_entry = &iommu->ir_table->base[idx];
448		if (!ri_entry->present || ri_entry->p_pst)
449			continue;
450
451		seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x  %016llx\t%016llx\n",
452			   idx, PCI_BUS_NUM(ri_entry->sid),
453			   PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
454			   ri_entry->dest_id, ri_entry->vector,
455			   ri_entry->high, ri_entry->low);
456	}
457	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
458}
459
460static void ir_tbl_posted_entry_show(struct seq_file *m,
461				     struct intel_iommu *iommu)
462{
463	struct irte *pi_entry;
464	unsigned long flags;
465	int idx;
466
467	seq_puts(m, " Entry SrcID   PDA_high PDA_low  Vct IRTE_high\t\tIRTE_low\n");
468
469	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
470	for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
471		pi_entry = &iommu->ir_table->base[idx];
472		if (!pi_entry->present || !pi_entry->p_pst)
473			continue;
474
475		seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x  %016llx\t%016llx\n",
476			   idx, PCI_BUS_NUM(pi_entry->sid),
477			   PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
478			   pi_entry->pda_h, pi_entry->pda_l << 6,
479			   pi_entry->vector, pi_entry->high,
480			   pi_entry->low);
481	}
482	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
483}
484
485/*
486 * For active IOMMUs go through the Interrupt remapping
487 * table and print valid entries in a table format for
488 * Remapped and Posted Interrupts.
489 */
490static int ir_translation_struct_show(struct seq_file *m, void *unused)
491{
492	struct dmar_drhd_unit *drhd;
493	struct intel_iommu *iommu;
494	u64 irta;
495	u32 sts;
496
497	rcu_read_lock();
498	for_each_active_iommu(iommu, drhd) {
499		if (!ecap_ir_support(iommu->ecap))
500			continue;
501
502		seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
503			   iommu->name);
504
505		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
506		if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
507			irta = virt_to_phys(iommu->ir_table->base);
508			seq_printf(m, " IR table address:%llx\n", irta);
509			ir_tbl_remap_entry_show(m, iommu);
510		} else {
511			seq_puts(m, "Interrupt Remapping is not enabled\n");
512		}
513		seq_putc(m, '\n');
514	}
515
516	seq_puts(m, "****\n\n");
517
518	for_each_active_iommu(iommu, drhd) {
519		if (!cap_pi_support(iommu->cap))
520			continue;
521
522		seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
523			   iommu->name);
524
525		if (iommu->ir_table) {
526			irta = virt_to_phys(iommu->ir_table->base);
527			seq_printf(m, " IR table address:%llx\n", irta);
528			ir_tbl_posted_entry_show(m, iommu);
529		} else {
530			seq_puts(m, "Interrupt Remapping is not enabled\n");
531		}
532		seq_putc(m, '\n');
533	}
534	rcu_read_unlock();
535
536	return 0;
537}
538DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
539#endif
540
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
541void __init intel_iommu_debugfs_init(void)
542{
543	struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
544						iommu_debugfs_dir);
545
546	debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
547			    &iommu_regset_fops);
548	debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
549			    NULL, &dmar_translation_struct_fops);
550	debugfs_create_file("domain_translation_struct", 0444,
551			    intel_iommu_debug, NULL,
552			    &domain_translation_struct_fops);
553	debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
554			    NULL, &invalidation_queue_fops);
555#ifdef CONFIG_IRQ_REMAP
556	debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
557			    NULL, &ir_translation_struct_fops);
558#endif
 
 
559}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright © 2018 Intel Corporation.
  4 *
  5 * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
  6 *	    Sohil Mehta <sohil.mehta@intel.com>
  7 *	    Jacob Pan <jacob.jun.pan@linux.intel.com>
  8 *	    Lu Baolu <baolu.lu@linux.intel.com>
  9 */
 10
 11#include <linux/debugfs.h>
 12#include <linux/dmar.h>
 13#include <linux/intel-iommu.h>
 14#include <linux/pci.h>
 15
 16#include <asm/irq_remapping.h>
 17
 18#include "pasid.h"
 19#include "perf.h"
 20
 21struct tbl_walk {
 22	u16 bus;
 23	u16 devfn;
 24	u32 pasid;
 25	struct root_entry *rt_entry;
 26	struct context_entry *ctx_entry;
 27	struct pasid_entry *pasid_tbl_entry;
 28};
 29
 30struct iommu_regset {
 31	int offset;
 32	const char *regs;
 33};
 34
 35#define DEBUG_BUFFER_SIZE	1024
 36static char debug_buf[DEBUG_BUFFER_SIZE];
 37
 38#define IOMMU_REGSET_ENTRY(_reg_)					\
 39	{ DMAR_##_reg_##_REG, __stringify(_reg_) }
 40
 41static const struct iommu_regset iommu_regs_32[] = {
 42	IOMMU_REGSET_ENTRY(VER),
 43	IOMMU_REGSET_ENTRY(GCMD),
 44	IOMMU_REGSET_ENTRY(GSTS),
 45	IOMMU_REGSET_ENTRY(FSTS),
 46	IOMMU_REGSET_ENTRY(FECTL),
 47	IOMMU_REGSET_ENTRY(FEDATA),
 48	IOMMU_REGSET_ENTRY(FEADDR),
 49	IOMMU_REGSET_ENTRY(FEUADDR),
 50	IOMMU_REGSET_ENTRY(PMEN),
 51	IOMMU_REGSET_ENTRY(PLMBASE),
 52	IOMMU_REGSET_ENTRY(PLMLIMIT),
 53	IOMMU_REGSET_ENTRY(ICS),
 54	IOMMU_REGSET_ENTRY(PRS),
 55	IOMMU_REGSET_ENTRY(PECTL),
 56	IOMMU_REGSET_ENTRY(PEDATA),
 57	IOMMU_REGSET_ENTRY(PEADDR),
 58	IOMMU_REGSET_ENTRY(PEUADDR),
 59};
 60
 61static const struct iommu_regset iommu_regs_64[] = {
 62	IOMMU_REGSET_ENTRY(CAP),
 63	IOMMU_REGSET_ENTRY(ECAP),
 64	IOMMU_REGSET_ENTRY(RTADDR),
 65	IOMMU_REGSET_ENTRY(CCMD),
 66	IOMMU_REGSET_ENTRY(AFLOG),
 67	IOMMU_REGSET_ENTRY(PHMBASE),
 68	IOMMU_REGSET_ENTRY(PHMLIMIT),
 69	IOMMU_REGSET_ENTRY(IQH),
 70	IOMMU_REGSET_ENTRY(IQT),
 71	IOMMU_REGSET_ENTRY(IQA),
 72	IOMMU_REGSET_ENTRY(IRTA),
 73	IOMMU_REGSET_ENTRY(PQH),
 74	IOMMU_REGSET_ENTRY(PQT),
 75	IOMMU_REGSET_ENTRY(PQA),
 76	IOMMU_REGSET_ENTRY(MTRRCAP),
 77	IOMMU_REGSET_ENTRY(MTRRDEF),
 78	IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
 79	IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
 80	IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
 81	IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
 82	IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
 83	IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
 84	IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
 85	IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
 86	IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
 87	IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
 88	IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
 89	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
 90	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
 91	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
 92	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
 93	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
 94	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
 95	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
 96	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
 97	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
 98	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
 99	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
100	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
101	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
102	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
103	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
104	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
105	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
106	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
107	IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
108	IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
109	IOMMU_REGSET_ENTRY(VCCAP),
110	IOMMU_REGSET_ENTRY(VCMD),
111	IOMMU_REGSET_ENTRY(VCRSP),
112};
113
114static int iommu_regset_show(struct seq_file *m, void *unused)
115{
116	struct dmar_drhd_unit *drhd;
117	struct intel_iommu *iommu;
118	unsigned long flag;
119	int i, ret = 0;
120	u64 value;
121
122	rcu_read_lock();
123	for_each_active_iommu(iommu, drhd) {
124		if (!drhd->reg_base_addr) {
125			seq_puts(m, "IOMMU: Invalid base address\n");
126			ret = -EINVAL;
127			goto out;
128		}
129
130		seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
131			   iommu->name, drhd->reg_base_addr);
132		seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
133		/*
134		 * Publish the contents of the 64-bit hardware registers
135		 * by adding the offset to the pointer (virtual address).
136		 */
137		raw_spin_lock_irqsave(&iommu->register_lock, flag);
138		for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
139			value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
140			seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
141				   iommu_regs_32[i].regs, iommu_regs_32[i].offset,
142				   value);
143		}
144		for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
145			value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
146			seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
147				   iommu_regs_64[i].regs, iommu_regs_64[i].offset,
148				   value);
149		}
150		raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
151		seq_putc(m, '\n');
152	}
153out:
154	rcu_read_unlock();
155
156	return ret;
157}
158DEFINE_SHOW_ATTRIBUTE(iommu_regset);
159
160static inline void print_tbl_walk(struct seq_file *m)
161{
162	struct tbl_walk *tbl_wlk = m->private;
163
164	seq_printf(m, "%02x:%02x.%x\t0x%016llx:0x%016llx\t0x%016llx:0x%016llx\t",
165		   tbl_wlk->bus, PCI_SLOT(tbl_wlk->devfn),
166		   PCI_FUNC(tbl_wlk->devfn), tbl_wlk->rt_entry->hi,
167		   tbl_wlk->rt_entry->lo, tbl_wlk->ctx_entry->hi,
168		   tbl_wlk->ctx_entry->lo);
169
170	/*
171	 * A legacy mode DMAR doesn't support PASID, hence default it to -1
172	 * indicating that it's invalid. Also, default all PASID related fields
173	 * to 0.
174	 */
175	if (!tbl_wlk->pasid_tbl_entry)
176		seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n", -1,
177			   (u64)0, (u64)0, (u64)0);
178	else
179		seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
180			   tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
181			   tbl_wlk->pasid_tbl_entry->val[1],
182			   tbl_wlk->pasid_tbl_entry->val[0]);
183}
184
185static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
186			   u16 dir_idx)
187{
188	struct tbl_walk *tbl_wlk = m->private;
189	u8 tbl_idx;
190
191	for (tbl_idx = 0; tbl_idx < PASID_TBL_ENTRIES; tbl_idx++) {
192		if (pasid_pte_is_present(tbl_entry)) {
193			tbl_wlk->pasid_tbl_entry = tbl_entry;
194			tbl_wlk->pasid = (dir_idx << PASID_PDE_SHIFT) + tbl_idx;
195			print_tbl_walk(m);
196		}
197
198		tbl_entry++;
199	}
200}
201
202static void pasid_dir_walk(struct seq_file *m, u64 pasid_dir_ptr,
203			   u16 pasid_dir_size)
204{
205	struct pasid_dir_entry *dir_entry = phys_to_virt(pasid_dir_ptr);
206	struct pasid_entry *pasid_tbl;
207	u16 dir_idx;
208
209	for (dir_idx = 0; dir_idx < pasid_dir_size; dir_idx++) {
210		pasid_tbl = get_pasid_table_from_pde(dir_entry);
211		if (pasid_tbl)
212			pasid_tbl_walk(m, pasid_tbl, dir_idx);
213
214		dir_entry++;
215	}
216}
217
218static void ctx_tbl_walk(struct seq_file *m, struct intel_iommu *iommu, u16 bus)
219{
220	struct context_entry *context;
221	u16 devfn, pasid_dir_size;
222	u64 pasid_dir_ptr;
223
224	for (devfn = 0; devfn < 256; devfn++) {
225		struct tbl_walk tbl_wlk = {0};
226
227		/*
228		 * Scalable mode root entry points to upper scalable mode
229		 * context table and lower scalable mode context table. Each
230		 * scalable mode context table has 128 context entries where as
231		 * legacy mode context table has 256 context entries. So in
232		 * scalable mode, the context entries for former 128 devices are
233		 * in the lower scalable mode context table, while the latter
234		 * 128 devices are in the upper scalable mode context table.
235		 * In scalable mode, when devfn > 127, iommu_context_addr()
236		 * automatically refers to upper scalable mode context table and
237		 * hence the caller doesn't have to worry about differences
238		 * between scalable mode and non scalable mode.
239		 */
240		context = iommu_context_addr(iommu, bus, devfn, 0);
241		if (!context)
242			return;
243
244		if (!context_present(context))
245			continue;
246
247		tbl_wlk.bus = bus;
248		tbl_wlk.devfn = devfn;
249		tbl_wlk.rt_entry = &iommu->root_entry[bus];
250		tbl_wlk.ctx_entry = context;
251		m->private = &tbl_wlk;
252
253		if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT) {
254			pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
255			pasid_dir_size = get_pasid_dir_size(context);
256			pasid_dir_walk(m, pasid_dir_ptr, pasid_dir_size);
257			continue;
258		}
259
260		print_tbl_walk(m);
261	}
262}
263
264static void root_tbl_walk(struct seq_file *m, struct intel_iommu *iommu)
265{
266	unsigned long flags;
267	u16 bus;
268
269	spin_lock_irqsave(&iommu->lock, flags);
270	seq_printf(m, "IOMMU %s: Root Table Address: 0x%llx\n", iommu->name,
271		   (u64)virt_to_phys(iommu->root_entry));
272	seq_puts(m, "B.D.F\tRoot_entry\t\t\t\tContext_entry\t\t\t\tPASID\tPASID_table_entry\n");
273
274	/*
275	 * No need to check if the root entry is present or not because
276	 * iommu_context_addr() performs the same check before returning
277	 * context entry.
278	 */
279	for (bus = 0; bus < 256; bus++)
280		ctx_tbl_walk(m, iommu, bus);
281
282	spin_unlock_irqrestore(&iommu->lock, flags);
283}
284
285static int dmar_translation_struct_show(struct seq_file *m, void *unused)
286{
287	struct dmar_drhd_unit *drhd;
288	struct intel_iommu *iommu;
289	u32 sts;
290
291	rcu_read_lock();
292	for_each_active_iommu(iommu, drhd) {
293		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
294		if (!(sts & DMA_GSTS_TES)) {
295			seq_printf(m, "DMA Remapping is not enabled on %s\n",
296				   iommu->name);
297			continue;
298		}
299		root_tbl_walk(m, iommu);
300		seq_putc(m, '\n');
301	}
302	rcu_read_unlock();
303
304	return 0;
305}
306DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
307
308static inline unsigned long level_to_directory_size(int level)
309{
310	return BIT_ULL(VTD_PAGE_SHIFT + VTD_STRIDE_SHIFT * (level - 1));
311}
312
313static inline void
314dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
315{
316	seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
317		   iova >> VTD_PAGE_SHIFT, path[5], path[4],
318		   path[3], path[2], path[1]);
319}
320
321static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
322			       int level, unsigned long start,
323			       u64 *path)
324{
325	int i;
326
327	if (level > 5 || level < 1)
328		return;
329
330	for (i = 0; i < BIT_ULL(VTD_STRIDE_SHIFT);
331			i++, pde++, start += level_to_directory_size(level)) {
332		if (!dma_pte_present(pde))
333			continue;
334
335		path[level] = pde->val;
336		if (dma_pte_superpage(pde) || level == 1)
337			dump_page_info(m, start, path);
338		else
339			pgtable_walk_level(m, phys_to_virt(dma_pte_addr(pde)),
340					   level - 1, start, path);
341		path[level] = 0;
342	}
343}
344
345static int show_device_domain_translation(struct device *dev, void *data)
346{
347	struct dmar_domain *domain = find_domain(dev);
348	struct seq_file *m = data;
349	u64 path[6] = { 0 };
350
351	if (!domain)
352		return 0;
353
354	seq_printf(m, "Device %s with pasid %d @0x%llx\n",
355		   dev_name(dev), domain->default_pasid,
356		   (u64)virt_to_phys(domain->pgd));
357	seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
358
359	pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
360	seq_putc(m, '\n');
361
362	return 0;
363}
364
365static int domain_translation_struct_show(struct seq_file *m, void *unused)
366{
367	unsigned long flags;
368	int ret;
369
370	spin_lock_irqsave(&device_domain_lock, flags);
371	ret = bus_for_each_dev(&pci_bus_type, NULL, m,
372			       show_device_domain_translation);
373	spin_unlock_irqrestore(&device_domain_lock, flags);
374
375	return ret;
376}
377DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
378
379static void invalidation_queue_entry_show(struct seq_file *m,
380					  struct intel_iommu *iommu)
381{
382	int index, shift = qi_shift(iommu);
383	struct qi_desc *desc;
384	int offset;
385
386	if (ecap_smts(iommu->ecap))
387		seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tqw2\t\t\tqw3\t\t\tstatus\n");
388	else
389		seq_puts(m, "Index\t\tqw0\t\t\tqw1\t\t\tstatus\n");
390
391	for (index = 0; index < QI_LENGTH; index++) {
392		offset = index << shift;
393		desc = iommu->qi->desc + offset;
394		if (ecap_smts(iommu->ecap))
395			seq_printf(m, "%5d\t%016llx\t%016llx\t%016llx\t%016llx\t%016x\n",
396				   index, desc->qw0, desc->qw1,
397				   desc->qw2, desc->qw3,
398				   iommu->qi->desc_status[index]);
399		else
400			seq_printf(m, "%5d\t%016llx\t%016llx\t%016x\n",
401				   index, desc->qw0, desc->qw1,
402				   iommu->qi->desc_status[index]);
403	}
404}
405
406static int invalidation_queue_show(struct seq_file *m, void *unused)
407{
408	struct dmar_drhd_unit *drhd;
409	struct intel_iommu *iommu;
410	unsigned long flags;
411	struct q_inval *qi;
412	int shift;
413
414	rcu_read_lock();
415	for_each_active_iommu(iommu, drhd) {
416		qi = iommu->qi;
417		shift = qi_shift(iommu);
418
419		if (!qi || !ecap_qis(iommu->ecap))
420			continue;
421
422		seq_printf(m, "Invalidation queue on IOMMU: %s\n", iommu->name);
423
424		raw_spin_lock_irqsave(&qi->q_lock, flags);
425		seq_printf(m, " Base: 0x%llx\tHead: %lld\tTail: %lld\n",
426			   (u64)virt_to_phys(qi->desc),
427			   dmar_readq(iommu->reg + DMAR_IQH_REG) >> shift,
428			   dmar_readq(iommu->reg + DMAR_IQT_REG) >> shift);
429		invalidation_queue_entry_show(m, iommu);
430		raw_spin_unlock_irqrestore(&qi->q_lock, flags);
431		seq_putc(m, '\n');
432	}
433	rcu_read_unlock();
434
435	return 0;
436}
437DEFINE_SHOW_ATTRIBUTE(invalidation_queue);
438
439#ifdef CONFIG_IRQ_REMAP
440static void ir_tbl_remap_entry_show(struct seq_file *m,
441				    struct intel_iommu *iommu)
442{
443	struct irte *ri_entry;
444	unsigned long flags;
445	int idx;
446
447	seq_puts(m, " Entry SrcID   DstID    Vct IRTE_high\t\tIRTE_low\n");
448
449	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
450	for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
451		ri_entry = &iommu->ir_table->base[idx];
452		if (!ri_entry->present || ri_entry->p_pst)
453			continue;
454
455		seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x  %016llx\t%016llx\n",
456			   idx, PCI_BUS_NUM(ri_entry->sid),
457			   PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
458			   ri_entry->dest_id, ri_entry->vector,
459			   ri_entry->high, ri_entry->low);
460	}
461	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
462}
463
464static void ir_tbl_posted_entry_show(struct seq_file *m,
465				     struct intel_iommu *iommu)
466{
467	struct irte *pi_entry;
468	unsigned long flags;
469	int idx;
470
471	seq_puts(m, " Entry SrcID   PDA_high PDA_low  Vct IRTE_high\t\tIRTE_low\n");
472
473	raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
474	for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
475		pi_entry = &iommu->ir_table->base[idx];
476		if (!pi_entry->present || !pi_entry->p_pst)
477			continue;
478
479		seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x  %016llx\t%016llx\n",
480			   idx, PCI_BUS_NUM(pi_entry->sid),
481			   PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
482			   pi_entry->pda_h, pi_entry->pda_l << 6,
483			   pi_entry->vector, pi_entry->high,
484			   pi_entry->low);
485	}
486	raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
487}
488
489/*
490 * For active IOMMUs go through the Interrupt remapping
491 * table and print valid entries in a table format for
492 * Remapped and Posted Interrupts.
493 */
494static int ir_translation_struct_show(struct seq_file *m, void *unused)
495{
496	struct dmar_drhd_unit *drhd;
497	struct intel_iommu *iommu;
498	u64 irta;
499	u32 sts;
500
501	rcu_read_lock();
502	for_each_active_iommu(iommu, drhd) {
503		if (!ecap_ir_support(iommu->ecap))
504			continue;
505
506		seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
507			   iommu->name);
508
509		sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
510		if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
511			irta = virt_to_phys(iommu->ir_table->base);
512			seq_printf(m, " IR table address:%llx\n", irta);
513			ir_tbl_remap_entry_show(m, iommu);
514		} else {
515			seq_puts(m, "Interrupt Remapping is not enabled\n");
516		}
517		seq_putc(m, '\n');
518	}
519
520	seq_puts(m, "****\n\n");
521
522	for_each_active_iommu(iommu, drhd) {
523		if (!cap_pi_support(iommu->cap))
524			continue;
525
526		seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
527			   iommu->name);
528
529		if (iommu->ir_table) {
530			irta = virt_to_phys(iommu->ir_table->base);
531			seq_printf(m, " IR table address:%llx\n", irta);
532			ir_tbl_posted_entry_show(m, iommu);
533		} else {
534			seq_puts(m, "Interrupt Remapping is not enabled\n");
535		}
536		seq_putc(m, '\n');
537	}
538	rcu_read_unlock();
539
540	return 0;
541}
542DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
543#endif
544
545static void latency_show_one(struct seq_file *m, struct intel_iommu *iommu,
546			     struct dmar_drhd_unit *drhd)
547{
548	int ret;
549
550	seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
551		   iommu->name, drhd->reg_base_addr);
552
553	ret = dmar_latency_snapshot(iommu, debug_buf, DEBUG_BUFFER_SIZE);
554	if (ret < 0)
555		seq_puts(m, "Failed to get latency snapshot");
556	else
557		seq_puts(m, debug_buf);
558	seq_puts(m, "\n");
559}
560
561static int latency_show(struct seq_file *m, void *v)
562{
563	struct dmar_drhd_unit *drhd;
564	struct intel_iommu *iommu;
565
566	rcu_read_lock();
567	for_each_active_iommu(iommu, drhd)
568		latency_show_one(m, iommu, drhd);
569	rcu_read_unlock();
570
571	return 0;
572}
573
574static int dmar_perf_latency_open(struct inode *inode, struct file *filp)
575{
576	return single_open(filp, latency_show, NULL);
577}
578
579static ssize_t dmar_perf_latency_write(struct file *filp,
580				       const char __user *ubuf,
581				       size_t cnt, loff_t *ppos)
582{
583	struct dmar_drhd_unit *drhd;
584	struct intel_iommu *iommu;
585	int counting;
586	char buf[64];
587
588	if (cnt > 63)
589		cnt = 63;
590
591	if (copy_from_user(&buf, ubuf, cnt))
592		return -EFAULT;
593
594	buf[cnt] = 0;
595
596	if (kstrtoint(buf, 0, &counting))
597		return -EINVAL;
598
599	switch (counting) {
600	case 0:
601		rcu_read_lock();
602		for_each_active_iommu(iommu, drhd) {
603			dmar_latency_disable(iommu, DMAR_LATENCY_INV_IOTLB);
604			dmar_latency_disable(iommu, DMAR_LATENCY_INV_DEVTLB);
605			dmar_latency_disable(iommu, DMAR_LATENCY_INV_IEC);
606			dmar_latency_disable(iommu, DMAR_LATENCY_PRQ);
607		}
608		rcu_read_unlock();
609		break;
610	case 1:
611		rcu_read_lock();
612		for_each_active_iommu(iommu, drhd)
613			dmar_latency_enable(iommu, DMAR_LATENCY_INV_IOTLB);
614		rcu_read_unlock();
615		break;
616	case 2:
617		rcu_read_lock();
618		for_each_active_iommu(iommu, drhd)
619			dmar_latency_enable(iommu, DMAR_LATENCY_INV_DEVTLB);
620		rcu_read_unlock();
621		break;
622	case 3:
623		rcu_read_lock();
624		for_each_active_iommu(iommu, drhd)
625			dmar_latency_enable(iommu, DMAR_LATENCY_INV_IEC);
626		rcu_read_unlock();
627		break;
628	case 4:
629		rcu_read_lock();
630		for_each_active_iommu(iommu, drhd)
631			dmar_latency_enable(iommu, DMAR_LATENCY_PRQ);
632		rcu_read_unlock();
633		break;
634	default:
635		return -EINVAL;
636	}
637
638	*ppos += cnt;
639	return cnt;
640}
641
642static const struct file_operations dmar_perf_latency_fops = {
643	.open		= dmar_perf_latency_open,
644	.write		= dmar_perf_latency_write,
645	.read		= seq_read,
646	.llseek		= seq_lseek,
647	.release	= single_release,
648};
649
650void __init intel_iommu_debugfs_init(void)
651{
652	struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
653						iommu_debugfs_dir);
654
655	debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
656			    &iommu_regset_fops);
657	debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
658			    NULL, &dmar_translation_struct_fops);
659	debugfs_create_file("domain_translation_struct", 0444,
660			    intel_iommu_debug, NULL,
661			    &domain_translation_struct_fops);
662	debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
663			    NULL, &invalidation_queue_fops);
664#ifdef CONFIG_IRQ_REMAP
665	debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
666			    NULL, &ir_translation_struct_fops);
667#endif
668	debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug,
669			    NULL, &dmar_perf_latency_fops);
670}