Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2015 Intel Corporation
  4 *
  5 * Originally split from drivers/iommu/intel/svm.c
  6 */
  7
  8#include <linux/pci.h>
  9#include <linux/pci-ats.h>
 10
 11#include "iommu.h"
 12#include "pasid.h"
 13#include "../iommu-pages.h"
 14#include "trace.h"
 15
 16/* Page request queue descriptor */
 17struct page_req_dsc {
 18	union {
 19		struct {
 20			u64 type:8;
 21			u64 pasid_present:1;
 22			u64 rsvd:7;
 23			u64 rid:16;
 24			u64 pasid:20;
 25			u64 exe_req:1;
 26			u64 pm_req:1;
 27			u64 rsvd2:10;
 28		};
 29		u64 qw_0;
 30	};
 31	union {
 32		struct {
 33			u64 rd_req:1;
 34			u64 wr_req:1;
 35			u64 lpig:1;
 36			u64 prg_index:9;
 37			u64 addr:52;
 38		};
 39		u64 qw_1;
 40	};
 41	u64 qw_2;
 42	u64 qw_3;
 43};
 44
 45/**
 46 * intel_iommu_drain_pasid_prq - Drain page requests and responses for a pasid
 47 * @dev: target device
 48 * @pasid: pasid for draining
 49 *
 50 * Drain all pending page requests and responses related to @pasid in both
 51 * software and hardware. This is supposed to be called after the device
 52 * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
 53 * and DevTLB have been invalidated.
 54 *
 55 * It waits until all pending page requests for @pasid in the page fault
 56 * queue are completed by the prq handling thread. Then follow the steps
 57 * described in VT-d spec CH7.10 to drain all page requests and page
 58 * responses pending in the hardware.
 59 */
 60void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
 61{
 62	struct device_domain_info *info;
 63	struct dmar_domain *domain;
 64	struct intel_iommu *iommu;
 65	struct qi_desc desc[3];
 66	int head, tail;
 67	u16 sid, did;
 68
 69	info = dev_iommu_priv_get(dev);
 70	if (!info->pri_enabled)
 71		return;
 72
 73	iommu = info->iommu;
 74	domain = info->domain;
 75	sid = PCI_DEVID(info->bus, info->devfn);
 76	did = domain ? domain_id_iommu(domain, iommu) : FLPT_DEFAULT_DID;
 77
 78	/*
 79	 * Check and wait until all pending page requests in the queue are
 80	 * handled by the prq handling thread.
 81	 */
 82prq_retry:
 83	reinit_completion(&iommu->prq_complete);
 84	tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
 85	head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
 86	while (head != tail) {
 87		struct page_req_dsc *req;
 88
 89		req = &iommu->prq[head / sizeof(*req)];
 90		if (req->rid != sid ||
 91		    (req->pasid_present && pasid != req->pasid) ||
 92		    (!req->pasid_present && pasid != IOMMU_NO_PASID)) {
 93			head = (head + sizeof(*req)) & PRQ_RING_MASK;
 94			continue;
 95		}
 96
 97		wait_for_completion(&iommu->prq_complete);
 98		goto prq_retry;
 99	}
100
101	iopf_queue_flush_dev(dev);
102
103	/*
104	 * Perform steps described in VT-d spec CH7.10 to drain page
105	 * requests and responses in hardware.
106	 */
107	memset(desc, 0, sizeof(desc));
108	desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
109			QI_IWD_FENCE |
110			QI_IWD_TYPE;
111	if (pasid == IOMMU_NO_PASID) {
112		qi_desc_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, &desc[1]);
113		qi_desc_dev_iotlb(sid, info->pfsid, info->ats_qdep, 0,
114				  MAX_AGAW_PFN_WIDTH, &desc[2]);
115	} else {
116		qi_desc_piotlb(did, pasid, 0, -1, 0, &desc[1]);
117		qi_desc_dev_iotlb_pasid(sid, info->pfsid, pasid, info->ats_qdep,
118					0, MAX_AGAW_PFN_WIDTH, &desc[2]);
119	}
120qi_retry:
121	reinit_completion(&iommu->prq_complete);
122	qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
123	if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
124		wait_for_completion(&iommu->prq_complete);
125		goto qi_retry;
126	}
127}
128
129static bool is_canonical_address(u64 addr)
130{
131	int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
132	long saddr = (long)addr;
133
134	return (((saddr << shift) >> shift) == saddr);
135}
136
137static void handle_bad_prq_event(struct intel_iommu *iommu,
138				 struct page_req_dsc *req, int result)
139{
140	struct qi_desc desc = { };
141
142	pr_err("%s: Invalid page request: %08llx %08llx\n",
143	       iommu->name, ((unsigned long long *)req)[0],
144	       ((unsigned long long *)req)[1]);
145
146	if (!req->lpig)
147		return;
148
149	desc.qw0 = QI_PGRP_PASID(req->pasid) |
150			QI_PGRP_DID(req->rid) |
151			QI_PGRP_PASID_P(req->pasid_present) |
152			QI_PGRP_RESP_CODE(result) |
153			QI_PGRP_RESP_TYPE;
154	desc.qw1 = QI_PGRP_IDX(req->prg_index) |
155			QI_PGRP_LPIG(req->lpig);
156
157	qi_submit_sync(iommu, &desc, 1, 0);
158}
159
160static int prq_to_iommu_prot(struct page_req_dsc *req)
161{
162	int prot = 0;
163
164	if (req->rd_req)
165		prot |= IOMMU_FAULT_PERM_READ;
166	if (req->wr_req)
167		prot |= IOMMU_FAULT_PERM_WRITE;
168	if (req->exe_req)
169		prot |= IOMMU_FAULT_PERM_EXEC;
170	if (req->pm_req)
171		prot |= IOMMU_FAULT_PERM_PRIV;
172
173	return prot;
174}
175
176static void intel_prq_report(struct intel_iommu *iommu, struct device *dev,
177			     struct page_req_dsc *desc)
178{
179	struct iopf_fault event = { };
180
181	/* Fill in event data for device specific processing */
182	event.fault.type = IOMMU_FAULT_PAGE_REQ;
183	event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
184	event.fault.prm.pasid = desc->pasid;
185	event.fault.prm.grpid = desc->prg_index;
186	event.fault.prm.perm = prq_to_iommu_prot(desc);
187
188	if (desc->lpig)
189		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
190	if (desc->pasid_present) {
191		event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
192		event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
193	}
194
195	iommu_report_device_fault(dev, &event);
196}
197
198static irqreturn_t prq_event_thread(int irq, void *d)
199{
200	struct intel_iommu *iommu = d;
201	struct page_req_dsc *req;
202	int head, tail, handled;
203	struct device *dev;
204	u64 address;
205
206	/*
207	 * Clear PPR bit before reading head/tail registers, to ensure that
208	 * we get a new interrupt if needed.
209	 */
210	writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
211
212	tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
213	head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
214	handled = (head != tail);
215	while (head != tail) {
216		req = &iommu->prq[head / sizeof(*req)];
217		address = (u64)req->addr << VTD_PAGE_SHIFT;
218
219		if (unlikely(!is_canonical_address(address))) {
220			pr_err("IOMMU: %s: Address is not canonical\n",
221			       iommu->name);
222bad_req:
223			handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
224			goto prq_advance;
225		}
226
227		if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
228			pr_err("IOMMU: %s: Page request in Privilege Mode\n",
229			       iommu->name);
230			goto bad_req;
231		}
232
233		if (unlikely(req->exe_req && req->rd_req)) {
234			pr_err("IOMMU: %s: Execution request not supported\n",
235			       iommu->name);
236			goto bad_req;
237		}
238
239		/* Drop Stop Marker message. No need for a response. */
240		if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
241			goto prq_advance;
242
243		/*
244		 * If prq is to be handled outside iommu driver via receiver of
245		 * the fault notifiers, we skip the page response here.
246		 */
247		mutex_lock(&iommu->iopf_lock);
248		dev = device_rbtree_find(iommu, req->rid);
249		if (!dev) {
250			mutex_unlock(&iommu->iopf_lock);
251			goto bad_req;
252		}
253
254		intel_prq_report(iommu, dev, req);
255		trace_prq_report(iommu, dev, req->qw_0, req->qw_1,
256				 req->qw_2, req->qw_3,
257				 iommu->prq_seq_number++);
258		mutex_unlock(&iommu->iopf_lock);
259prq_advance:
260		head = (head + sizeof(*req)) & PRQ_RING_MASK;
261	}
262
263	dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
264
265	/*
266	 * Clear the page request overflow bit and wake up all threads that
267	 * are waiting for the completion of this handling.
268	 */
269	if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
270		pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
271				    iommu->name);
272		head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
273		tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
274		if (head == tail) {
275			iopf_queue_discard_partial(iommu->iopf_queue);
276			writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
277			pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
278					    iommu->name);
279		}
280	}
281
282	if (!completion_done(&iommu->prq_complete))
283		complete(&iommu->prq_complete);
284
285	return IRQ_RETVAL(handled);
286}
287
288int intel_iommu_enable_prq(struct intel_iommu *iommu)
289{
290	struct iopf_queue *iopfq;
291	int irq, ret;
292
293	iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
294	if (!iommu->prq) {
295		pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
296			iommu->name);
297		return -ENOMEM;
298	}
299
300	irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
301	if (irq <= 0) {
302		pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
303		       iommu->name);
304		ret = -EINVAL;
305		goto free_prq;
306	}
307	iommu->pr_irq = irq;
308
309	snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
310		 "dmar%d-iopfq", iommu->seq_id);
311	iopfq = iopf_queue_alloc(iommu->iopfq_name);
312	if (!iopfq) {
313		pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
314		ret = -ENOMEM;
315		goto free_hwirq;
316	}
317	iommu->iopf_queue = iopfq;
318
319	snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
320
321	ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
322				   iommu->prq_name, iommu);
323	if (ret) {
324		pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
325		       iommu->name);
326		goto free_iopfq;
327	}
328	dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
329	dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
330	dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
331
332	init_completion(&iommu->prq_complete);
333
334	return 0;
335
336free_iopfq:
337	iopf_queue_free(iommu->iopf_queue);
338	iommu->iopf_queue = NULL;
339free_hwirq:
340	dmar_free_hwirq(irq);
341	iommu->pr_irq = 0;
342free_prq:
343	iommu_free_pages(iommu->prq, PRQ_ORDER);
344	iommu->prq = NULL;
345
346	return ret;
347}
348
349int intel_iommu_finish_prq(struct intel_iommu *iommu)
350{
351	dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
352	dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
353	dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
354
355	if (iommu->pr_irq) {
356		free_irq(iommu->pr_irq, iommu);
357		dmar_free_hwirq(iommu->pr_irq);
358		iommu->pr_irq = 0;
359	}
360
361	if (iommu->iopf_queue) {
362		iopf_queue_free(iommu->iopf_queue);
363		iommu->iopf_queue = NULL;
364	}
365
366	iommu_free_pages(iommu->prq, PRQ_ORDER);
367	iommu->prq = NULL;
368
369	return 0;
370}
371
372void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
373			       struct iommu_page_response *msg)
374{
375	struct device_domain_info *info = dev_iommu_priv_get(dev);
376	struct intel_iommu *iommu = info->iommu;
377	u8 bus = info->bus, devfn = info->devfn;
378	struct iommu_fault_page_request *prm;
379	struct qi_desc desc;
380	bool pasid_present;
381	bool last_page;
382	u16 sid;
383
384	prm = &evt->fault.prm;
385	sid = PCI_DEVID(bus, devfn);
386	pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
387	last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
388
389	desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
390			QI_PGRP_PASID_P(pasid_present) |
391			QI_PGRP_RESP_CODE(msg->code) |
392			QI_PGRP_RESP_TYPE;
393	desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
394	desc.qw2 = 0;
395	desc.qw3 = 0;
396
397	qi_submit_sync(iommu, &desc, 1, 0);
398}