Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Handle device page faults
  4 *
  5 * Copyright (C) 2020 ARM Ltd.
  6 */
  7
  8#include <linux/iommu.h>
  9#include <linux/list.h>
 10#include <linux/sched/mm.h>
 11#include <linux/slab.h>
 12#include <linux/workqueue.h>
 13
 14#include "iommu-sva.h"
 15
 16/**
 17 * struct iopf_queue - IO Page Fault queue
 18 * @wq: the fault workqueue
 19 * @devices: devices attached to this queue
 20 * @lock: protects the device list
 21 */
 22struct iopf_queue {
 23	struct workqueue_struct		*wq;
 24	struct list_head		devices;
 25	struct mutex			lock;
 26};
 27
 28/**
 29 * struct iopf_device_param - IO Page Fault data attached to a device
 30 * @dev: the device that owns this param
 31 * @queue: IOPF queue
 32 * @queue_list: index into queue->devices
 33 * @partial: faults that are part of a Page Request Group for which the last
 34 *           request hasn't been submitted yet.
 35 */
 36struct iopf_device_param {
 37	struct device			*dev;
 38	struct iopf_queue		*queue;
 39	struct list_head		queue_list;
 40	struct list_head		partial;
 41};
 42
 43struct iopf_fault {
 44	struct iommu_fault		fault;
 45	struct list_head		list;
 46};
 47
 48struct iopf_group {
 49	struct iopf_fault		last_fault;
 50	struct list_head		faults;
 51	struct work_struct		work;
 52	struct device			*dev;
 53};
 54
 55static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
 56			       enum iommu_page_response_code status)
 57{
 58	struct iommu_page_response resp = {
 59		.version		= IOMMU_PAGE_RESP_VERSION_1,
 60		.pasid			= iopf->fault.prm.pasid,
 61		.grpid			= iopf->fault.prm.grpid,
 62		.code			= status,
 63	};
 64
 65	if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
 66	    (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
 67		resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
 68
 69	return iommu_page_response(dev, &resp);
 
 
 
 
 
 
 70}
 71
 72static void iopf_handler(struct work_struct *work)
 73{
 74	struct iopf_group *group;
 75	struct iommu_domain *domain;
 76	struct iopf_fault *iopf, *next;
 77	enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
 78
 79	group = container_of(work, struct iopf_group, work);
 80	domain = iommu_get_domain_for_dev_pasid(group->dev,
 81				group->last_fault.fault.prm.pasid, 0);
 82	if (!domain || !domain->iopf_handler)
 83		status = IOMMU_PAGE_RESP_INVALID;
 84
 85	list_for_each_entry_safe(iopf, next, &group->faults, list) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86		/*
 87		 * For the moment, errors are sticky: don't handle subsequent
 88		 * faults in the group if there is an error.
 89		 */
 90		if (status == IOMMU_PAGE_RESP_SUCCESS)
 91			status = domain->iopf_handler(&iopf->fault,
 92						      domain->fault_data);
 93
 94		if (!(iopf->fault.prm.flags &
 95		      IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
 96			kfree(iopf);
 
 
 
 
 
 
 
 
 
 97	}
 
 
 98
 99	iopf_complete_group(group->dev, &group->last_fault, status);
100	kfree(group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101}
102
103/**
104 * iommu_queue_iopf - IO Page Fault handler
105 * @fault: fault event
106 * @cookie: struct device, passed to iommu_register_device_fault_handler.
107 *
108 * Add a fault to the device workqueue, to be handled by mm.
 
 
109 *
110 * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
111 * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
112 * expect a response. It may be generated when disabling a PASID (issuing a
113 * PASID stop request) by some PCI devices.
114 *
115 * The PASID stop request is issued by the device driver before unbind(). Once
116 * it completes, no page request is generated for this PASID anymore and
117 * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
118 * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
119 * for all outstanding page requests to come back with a response before
120 * completing the PASID stop request. Others do not wait for page responses, and
121 * instead issue this Stop Marker that tells us when the PASID can be
122 * reallocated.
123 *
124 * It is safe to discard the Stop Marker because it is an optimization.
125 * a. Page requests, which are posted requests, have been flushed to the IOMMU
126 *    when the stop request completes.
127 * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
128 *    PASID.
129 *
130 * So even though the Stop Marker might be issued by the device *after* the stop
131 * request completes, outstanding faults will have been dealt with by the time
132 * the PASID is freed.
133 *
134 * Any valid page fault will be eventually routed to an iommu domain and the
135 * page fault handler installed there will get called. The users of this
136 * handling framework should guarantee that the iommu domain could only be
137 * freed after the device has stopped generating page faults (or the iommu
138 * hardware has been set to block the page faults) and the pending page faults
139 * have been flushed.
 
140 *
141 * Return: 0 on success and <0 on error.
142 */
143int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
144{
145	int ret;
 
 
 
146	struct iopf_group *group;
147	struct iopf_fault *iopf, *next;
148	struct iopf_device_param *iopf_param;
149
150	struct device *dev = cookie;
151	struct dev_iommu *param = dev->iommu;
152
153	lockdep_assert_held(&param->lock);
154
155	if (fault->type != IOMMU_FAULT_PAGE_REQ)
156		/* Not a recoverable page fault */
157		return -EOPNOTSUPP;
158
159	/*
160	 * As long as we're holding param->lock, the queue can't be unlinked
161	 * from the device and therefore cannot disappear.
162	 */
163	iopf_param = param->iopf_param;
164	if (!iopf_param)
165		return -ENODEV;
166
167	if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
168		iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
169		if (!iopf)
170			return -ENOMEM;
171
172		iopf->fault = *fault;
173
174		/* Non-last request of a group. Postpone until the last one */
175		list_add(&iopf->list, &iopf_param->partial);
 
176
177		return 0;
178	}
179
180	group = kzalloc(sizeof(*group), GFP_KERNEL);
181	if (!group) {
182		/*
183		 * The caller will send a response to the hardware. But we do
184		 * need to clean up before leaving, otherwise partial faults
185		 * will be stuck.
186		 */
187		ret = -ENOMEM;
188		goto cleanup_partial;
189	}
 
190
191	group->dev = dev;
192	group->last_fault.fault = *fault;
193	INIT_LIST_HEAD(&group->faults);
194	list_add(&group->last_fault.list, &group->faults);
195	INIT_WORK(&group->work, iopf_handler);
196
197	/* See if we have partial faults for this group */
198	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
199		if (iopf->fault.prm.grpid == fault->prm.grpid)
200			/* Insert *before* the last fault */
201			list_move(&iopf->list, &group->faults);
202	}
203
204	queue_work(iopf_param->queue->wq, &group->work);
205	return 0;
206
207cleanup_partial:
208	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
209		if (iopf->fault.prm.grpid == fault->prm.grpid) {
210			list_del(&iopf->list);
211			kfree(iopf);
212		}
213	}
214	return ret;
 
 
 
 
 
 
 
 
215}
216EXPORT_SYMBOL_GPL(iommu_queue_iopf);
217
218/**
219 * iopf_queue_flush_dev - Ensure that all queued faults have been processed
220 * @dev: the endpoint whose faults need to be flushed.
221 *
222 * The IOMMU driver calls this before releasing a PASID, to ensure that all
223 * pending faults for this PASID have been handled, and won't hit the address
224 * space of the next process that uses this PASID. The driver must make sure
225 * that no new fault is added to the queue. In particular it must flush its
226 * low-level queue before calling this function.
227 *
228 * Return: 0 on success and <0 on error.
229 */
230int iopf_queue_flush_dev(struct device *dev)
231{
232	int ret = 0;
233	struct iopf_device_param *iopf_param;
234	struct dev_iommu *param = dev->iommu;
235
236	if (!param)
 
 
 
 
 
 
237		return -ENODEV;
238
239	mutex_lock(&param->lock);
240	iopf_param = param->iopf_param;
241	if (iopf_param)
242		flush_workqueue(iopf_param->queue->wq);
243	else
244		ret = -ENODEV;
245	mutex_unlock(&param->lock);
246
247	return ret;
248}
249EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
250
251/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252 * iopf_queue_discard_partial - Remove all pending partial fault
253 * @queue: the queue whose partial faults need to be discarded
254 *
255 * When the hardware queue overflows, last page faults in a group may have been
256 * lost and the IOMMU driver calls this to discard all partial faults. The
257 * driver shouldn't be adding new faults to this queue concurrently.
258 *
259 * Return: 0 on success and <0 on error.
260 */
261int iopf_queue_discard_partial(struct iopf_queue *queue)
262{
263	struct iopf_fault *iopf, *next;
264	struct iopf_device_param *iopf_param;
265
266	if (!queue)
267		return -EINVAL;
268
269	mutex_lock(&queue->lock);
270	list_for_each_entry(iopf_param, &queue->devices, queue_list) {
 
271		list_for_each_entry_safe(iopf, next, &iopf_param->partial,
272					 list) {
273			list_del(&iopf->list);
274			kfree(iopf);
275		}
 
276	}
277	mutex_unlock(&queue->lock);
278	return 0;
279}
280EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
281
282/**
283 * iopf_queue_add_device - Add producer to the fault queue
284 * @queue: IOPF queue
285 * @dev: device to add
286 *
287 * Return: 0 on success and <0 on error.
288 */
289int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
290{
291	int ret = -EBUSY;
292	struct iopf_device_param *iopf_param;
293	struct dev_iommu *param = dev->iommu;
 
 
294
295	if (!param)
296		return -ENODEV;
297
298	iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
299	if (!iopf_param)
300		return -ENOMEM;
301
302	INIT_LIST_HEAD(&iopf_param->partial);
303	iopf_param->queue = queue;
304	iopf_param->dev = dev;
305
306	mutex_lock(&queue->lock);
307	mutex_lock(&param->lock);
308	if (!param->iopf_param) {
309		list_add(&iopf_param->queue_list, &queue->devices);
310		param->iopf_param = iopf_param;
311		ret = 0;
 
 
 
 
 
 
312	}
 
 
 
 
 
 
 
 
 
 
 
 
313	mutex_unlock(&param->lock);
314	mutex_unlock(&queue->lock);
315
316	if (ret)
317		kfree(iopf_param);
318
319	return ret;
320}
321EXPORT_SYMBOL_GPL(iopf_queue_add_device);
322
323/**
324 * iopf_queue_remove_device - Remove producer from fault queue
325 * @queue: IOPF queue
326 * @dev: device to remove
327 *
328 * Caller makes sure that no more faults are reported for this device.
 
329 *
330 * Return: 0 on success and <0 on error.
 
 
 
 
 
 
 
 
 
 
 
 
 
331 */
332int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
333{
334	int ret = -EINVAL;
335	struct iopf_fault *iopf, *next;
336	struct iopf_device_param *iopf_param;
337	struct dev_iommu *param = dev->iommu;
338
339	if (!param || !queue)
340		return -EINVAL;
341
342	mutex_lock(&queue->lock);
343	mutex_lock(&param->lock);
344	iopf_param = param->iopf_param;
345	if (iopf_param && iopf_param->queue == queue) {
346		list_del(&iopf_param->queue_list);
347		param->iopf_param = NULL;
348		ret = 0;
349	}
350	mutex_unlock(&param->lock);
351	mutex_unlock(&queue->lock);
352	if (ret)
353		return ret;
354
355	/* Just in case some faults are still stuck */
356	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
357		kfree(iopf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358
359	kfree(iopf_param);
360
361	return 0;
 
 
 
 
 
362}
363EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
364
365/**
366 * iopf_queue_alloc - Allocate and initialize a fault queue
367 * @name: a unique string identifying the queue (for workqueue)
368 *
369 * Return: the queue on success and NULL on error.
370 */
371struct iopf_queue *iopf_queue_alloc(const char *name)
372{
373	struct iopf_queue *queue;
374
375	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
376	if (!queue)
377		return NULL;
378
379	/*
380	 * The WQ is unordered because the low-level handler enqueues faults by
381	 * group. PRI requests within a group have to be ordered, but once
382	 * that's dealt with, the high-level function can handle groups out of
383	 * order.
384	 */
385	queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
386	if (!queue->wq) {
387		kfree(queue);
388		return NULL;
389	}
390
391	INIT_LIST_HEAD(&queue->devices);
392	mutex_init(&queue->lock);
393
394	return queue;
395}
396EXPORT_SYMBOL_GPL(iopf_queue_alloc);
397
398/**
399 * iopf_queue_free - Free IOPF queue
400 * @queue: queue to free
401 *
402 * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
403 * adding/removing devices on this queue anymore.
404 */
405void iopf_queue_free(struct iopf_queue *queue)
406{
407	struct iopf_device_param *iopf_param, *next;
408
409	if (!queue)
410		return;
411
412	list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
413		iopf_queue_remove_device(queue, iopf_param->dev);
414
415	destroy_workqueue(queue->wq);
416	kfree(queue);
417}
418EXPORT_SYMBOL_GPL(iopf_queue_free);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Handle device page faults
  4 *
  5 * Copyright (C) 2020 ARM Ltd.
  6 */
  7
  8#include <linux/iommu.h>
  9#include <linux/list.h>
 10#include <linux/sched/mm.h>
 11#include <linux/slab.h>
 12#include <linux/workqueue.h>
 13
 14#include "iommu-priv.h"
 15
 16/*
 17 * Return the fault parameter of a device if it exists. Otherwise, return NULL.
 18 * On a successful return, the caller takes a reference of this parameter and
 19 * should put it after use by calling iopf_put_dev_fault_param().
 
 20 */
 21static struct iommu_fault_param *iopf_get_dev_fault_param(struct device *dev)
 22{
 23	struct dev_iommu *param = dev->iommu;
 24	struct iommu_fault_param *fault_param;
 
 25
 26	rcu_read_lock();
 27	fault_param = rcu_dereference(param->fault_param);
 28	if (fault_param && !refcount_inc_not_zero(&fault_param->users))
 29		fault_param = NULL;
 30	rcu_read_unlock();
 31
 32	return fault_param;
 33}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34
 35/* Caller must hold a reference of the fault parameter. */
 36static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
 37{
 38	if (refcount_dec_and_test(&fault_param->users))
 39		kfree_rcu(fault_param, rcu);
 40}
 
 
 
 41
 42static void __iopf_free_group(struct iopf_group *group)
 43{
 44	struct iopf_fault *iopf, *next;
 45
 46	list_for_each_entry_safe(iopf, next, &group->faults, list) {
 47		if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
 48			kfree(iopf);
 49	}
 50
 51	/* Pair with iommu_report_device_fault(). */
 52	iopf_put_dev_fault_param(group->fault_param);
 53}
 54
 55void iopf_free_group(struct iopf_group *group)
 56{
 57	__iopf_free_group(group);
 58	kfree(group);
 59}
 60EXPORT_SYMBOL_GPL(iopf_free_group);
 61
 62/* Non-last request of a group. Postpone until the last one. */
 63static int report_partial_fault(struct iommu_fault_param *fault_param,
 64				struct iommu_fault *fault)
 65{
 66	struct iopf_fault *iopf;
 67
 68	iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
 69	if (!iopf)
 70		return -ENOMEM;
 71
 72	iopf->fault = *fault;
 73
 74	mutex_lock(&fault_param->lock);
 75	list_add(&iopf->list, &fault_param->partial);
 76	mutex_unlock(&fault_param->lock);
 77
 78	return 0;
 79}
 80
 81static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
 82					   struct iopf_fault *evt,
 83					   struct iopf_group *abort_group)
 84{
 85	struct iopf_fault *iopf, *next;
 86	struct iopf_group *group;
 87
 88	group = kzalloc(sizeof(*group), GFP_KERNEL);
 89	if (!group) {
 90		/*
 91		 * We always need to construct the group as we need it to abort
 92		 * the request at the driver if it can't be handled.
 93		 */
 94		group = abort_group;
 95	}
 
 96
 97	group->fault_param = iopf_param;
 98	group->last_fault.fault = evt->fault;
 99	INIT_LIST_HEAD(&group->faults);
100	INIT_LIST_HEAD(&group->pending_node);
101	list_add(&group->last_fault.list, &group->faults);
102
103	/* See if we have partial faults for this group */
104	mutex_lock(&iopf_param->lock);
105	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
106		if (iopf->fault.prm.grpid == evt->fault.prm.grpid)
107			/* Insert *before* the last fault */
108			list_move(&iopf->list, &group->faults);
109	}
110	list_add(&group->pending_node, &iopf_param->faults);
111	mutex_unlock(&iopf_param->lock);
112
113	group->fault_count = list_count_nodes(&group->faults);
114
115	return group;
116}
117
118static struct iommu_attach_handle *find_fault_handler(struct device *dev,
119						     struct iopf_fault *evt)
120{
121	struct iommu_fault *fault = &evt->fault;
122	struct iommu_attach_handle *attach_handle;
123
124	if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
125		attach_handle = iommu_attach_handle_get(dev->iommu_group,
126				fault->prm.pasid, 0);
127		if (IS_ERR(attach_handle)) {
128			const struct iommu_ops *ops = dev_iommu_ops(dev);
129
130			if (!ops->user_pasid_table)
131				return NULL;
132			/*
133			 * The iommu driver for this device supports user-
134			 * managed PASID table. Therefore page faults for
135			 * any PASID should go through the NESTING domain
136			 * attached to the device RID.
137			 */
138			attach_handle = iommu_attach_handle_get(
139					dev->iommu_group, IOMMU_NO_PASID,
140					IOMMU_DOMAIN_NESTED);
141			if (IS_ERR(attach_handle))
142				return NULL;
143		}
144	} else {
145		attach_handle = iommu_attach_handle_get(dev->iommu_group,
146				IOMMU_NO_PASID, 0);
147
148		if (IS_ERR(attach_handle))
149			return NULL;
150	}
151
152	if (!attach_handle->domain->iopf_handler)
153		return NULL;
154
155	return attach_handle;
156}
157
158static void iopf_error_response(struct device *dev, struct iopf_fault *evt)
159{
160	const struct iommu_ops *ops = dev_iommu_ops(dev);
161	struct iommu_fault *fault = &evt->fault;
162	struct iommu_page_response resp = {
163		.pasid = fault->prm.pasid,
164		.grpid = fault->prm.grpid,
165		.code = IOMMU_PAGE_RESP_INVALID
166	};
167
168	ops->page_response(dev, evt, &resp);
169}
170
171/**
172 * iommu_report_device_fault() - Report fault event to device driver
173 * @dev: the device
174 * @evt: fault event data
175 *
176 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
177 * handler. If this function fails then ops->page_response() was called to
178 * complete evt if required.
179 *
180 * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
181 * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
182 * expect a response. It may be generated when disabling a PASID (issuing a
183 * PASID stop request) by some PCI devices.
184 *
185 * The PASID stop request is issued by the device driver before unbind(). Once
186 * it completes, no page request is generated for this PASID anymore and
187 * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
188 * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
189 * for all outstanding page requests to come back with a response before
190 * completing the PASID stop request. Others do not wait for page responses, and
191 * instead issue this Stop Marker that tells us when the PASID can be
192 * reallocated.
193 *
194 * It is safe to discard the Stop Marker because it is an optimization.
195 * a. Page requests, which are posted requests, have been flushed to the IOMMU
196 *    when the stop request completes.
197 * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
198 *    PASID.
199 *
200 * So even though the Stop Marker might be issued by the device *after* the stop
201 * request completes, outstanding faults will have been dealt with by the time
202 * the PASID is freed.
203 *
204 * Any valid page fault will be eventually routed to an iommu domain and the
205 * page fault handler installed there will get called. The users of this
206 * handling framework should guarantee that the iommu domain could only be
207 * freed after the device has stopped generating page faults (or the iommu
208 * hardware has been set to block the page faults) and the pending page faults
209 * have been flushed. In case no page fault handler is attached or no iopf params
210 * are setup, then the ops->page_response() is called to complete the evt.
211 *
212 * Returns 0 on success, or an error in case of a bad/failed iopf setup.
213 */
214int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
215{
216	struct iommu_attach_handle *attach_handle;
217	struct iommu_fault *fault = &evt->fault;
218	struct iommu_fault_param *iopf_param;
219	struct iopf_group abort_group = {};
220	struct iopf_group *group;
 
 
 
 
 
 
 
221
222	attach_handle = find_fault_handler(dev, evt);
223	if (!attach_handle)
224		goto err_bad_iopf;
225
226	/*
227	 * Something has gone wrong if a fault capable domain is attached but no
228	 * iopf_param is setup
229	 */
230	iopf_param = iopf_get_dev_fault_param(dev);
231	if (WARN_ON(!iopf_param))
232		goto err_bad_iopf;
233
234	if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
235		int ret;
 
 
 
 
236
237		ret = report_partial_fault(iopf_param, fault);
238		iopf_put_dev_fault_param(iopf_param);
239		/* A request that is not the last does not need to be ack'd */
240
241		return ret;
242	}
243
244	/*
245	 * This is the last page fault of a group. Allocate an iopf group and
246	 * pass it to domain's page fault handler. The group holds a reference
247	 * count of the fault parameter. It will be released after response or
248	 * error path of this function. If an error is returned, the caller
249	 * will send a response to the hardware. We need to clean up before
250	 * leaving, otherwise partial faults will be stuck.
251	 */
252	group = iopf_group_alloc(iopf_param, evt, &abort_group);
253	if (group == &abort_group)
254		goto err_abort;
255
256	group->attach_handle = attach_handle;
 
 
 
 
257
258	/*
259	 * On success iopf_handler must call iopf_group_response() and
260	 * iopf_free_group()
261	 */
262	if (group->attach_handle->domain->iopf_handler(group))
263		goto err_abort;
264
 
265	return 0;
266
267err_abort:
268	dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n",
269			     fault->prm.pasid);
270	iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
271	if (group == &abort_group)
272		__iopf_free_group(group);
273	else
274		iopf_free_group(group);
275
276	return 0;
277
278err_bad_iopf:
279	if (fault->type == IOMMU_FAULT_PAGE_REQ)
280		iopf_error_response(dev, evt);
281
282	return -EINVAL;
283}
284EXPORT_SYMBOL_GPL(iommu_report_device_fault);
285
286/**
287 * iopf_queue_flush_dev - Ensure that all queued faults have been processed
288 * @dev: the endpoint whose faults need to be flushed.
289 *
290 * The IOMMU driver calls this before releasing a PASID, to ensure that all
291 * pending faults for this PASID have been handled, and won't hit the address
292 * space of the next process that uses this PASID. The driver must make sure
293 * that no new fault is added to the queue. In particular it must flush its
294 * low-level queue before calling this function.
295 *
296 * Return: 0 on success and <0 on error.
297 */
298int iopf_queue_flush_dev(struct device *dev)
299{
300	struct iommu_fault_param *iopf_param;
 
 
301
302	/*
303	 * It's a driver bug to be here after iopf_queue_remove_device().
304	 * Therefore, it's safe to dereference the fault parameter without
305	 * holding the lock.
306	 */
307	iopf_param = rcu_dereference_check(dev->iommu->fault_param, true);
308	if (WARN_ON(!iopf_param))
309		return -ENODEV;
310
311	flush_workqueue(iopf_param->queue->wq);
 
 
 
 
 
 
312
313	return 0;
314}
315EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
316
317/**
318 * iopf_group_response - Respond a group of page faults
319 * @group: the group of faults with the same group id
320 * @status: the response code
321 */
322void iopf_group_response(struct iopf_group *group,
323			 enum iommu_page_response_code status)
324{
325	struct iommu_fault_param *fault_param = group->fault_param;
326	struct iopf_fault *iopf = &group->last_fault;
327	struct device *dev = group->fault_param->dev;
328	const struct iommu_ops *ops = dev_iommu_ops(dev);
329	struct iommu_page_response resp = {
330		.pasid = iopf->fault.prm.pasid,
331		.grpid = iopf->fault.prm.grpid,
332		.code = status,
333	};
334
335	/* Only send response if there is a fault report pending */
336	mutex_lock(&fault_param->lock);
337	if (!list_empty(&group->pending_node)) {
338		ops->page_response(dev, &group->last_fault, &resp);
339		list_del_init(&group->pending_node);
340	}
341	mutex_unlock(&fault_param->lock);
342}
343EXPORT_SYMBOL_GPL(iopf_group_response);
344
345/**
346 * iopf_queue_discard_partial - Remove all pending partial fault
347 * @queue: the queue whose partial faults need to be discarded
348 *
349 * When the hardware queue overflows, last page faults in a group may have been
350 * lost and the IOMMU driver calls this to discard all partial faults. The
351 * driver shouldn't be adding new faults to this queue concurrently.
352 *
353 * Return: 0 on success and <0 on error.
354 */
355int iopf_queue_discard_partial(struct iopf_queue *queue)
356{
357	struct iopf_fault *iopf, *next;
358	struct iommu_fault_param *iopf_param;
359
360	if (!queue)
361		return -EINVAL;
362
363	mutex_lock(&queue->lock);
364	list_for_each_entry(iopf_param, &queue->devices, queue_list) {
365		mutex_lock(&iopf_param->lock);
366		list_for_each_entry_safe(iopf, next, &iopf_param->partial,
367					 list) {
368			list_del(&iopf->list);
369			kfree(iopf);
370		}
371		mutex_unlock(&iopf_param->lock);
372	}
373	mutex_unlock(&queue->lock);
374	return 0;
375}
376EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
377
378/**
379 * iopf_queue_add_device - Add producer to the fault queue
380 * @queue: IOPF queue
381 * @dev: device to add
382 *
383 * Return: 0 on success and <0 on error.
384 */
385int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
386{
387	int ret = 0;
 
388	struct dev_iommu *param = dev->iommu;
389	struct iommu_fault_param *fault_param;
390	const struct iommu_ops *ops = dev_iommu_ops(dev);
391
392	if (!ops->page_response)
393		return -ENODEV;
394
 
 
 
 
 
 
 
 
395	mutex_lock(&queue->lock);
396	mutex_lock(&param->lock);
397	if (rcu_dereference_check(param->fault_param,
398				  lockdep_is_held(&param->lock))) {
399		ret = -EBUSY;
400		goto done_unlock;
401	}
402
403	fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL);
404	if (!fault_param) {
405		ret = -ENOMEM;
406		goto done_unlock;
407	}
408
409	mutex_init(&fault_param->lock);
410	INIT_LIST_HEAD(&fault_param->faults);
411	INIT_LIST_HEAD(&fault_param->partial);
412	fault_param->dev = dev;
413	refcount_set(&fault_param->users, 1);
414	list_add(&fault_param->queue_list, &queue->devices);
415	fault_param->queue = queue;
416
417	rcu_assign_pointer(param->fault_param, fault_param);
418
419done_unlock:
420	mutex_unlock(&param->lock);
421	mutex_unlock(&queue->lock);
422
 
 
 
423	return ret;
424}
425EXPORT_SYMBOL_GPL(iopf_queue_add_device);
426
427/**
428 * iopf_queue_remove_device - Remove producer from fault queue
429 * @queue: IOPF queue
430 * @dev: device to remove
431 *
432 * Removing a device from an iopf_queue. It's recommended to follow these
433 * steps when removing a device:
434 *
435 * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
436 *   and flush any hardware page request queues. This should be done before
437 *   calling into this helper.
438 * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
439 *   page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
440 *   not retry. This helper function handles this.
441 * - Disable PRI on the device: After calling this helper, the caller could
442 *   then disable PRI on the device.
443 *
444 * Calling iopf_queue_remove_device() essentially disassociates the device.
445 * The fault_param might still exist, but iommu_page_response() will do
446 * nothing. The device fault parameter reference count has been properly
447 * passed from iommu_report_device_fault() to the fault handling work, and
448 * will eventually be released after iommu_page_response().
449 */
450void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
451{
452	struct iopf_fault *partial_iopf;
453	struct iopf_fault *next;
454	struct iopf_group *group, *temp;
455	struct dev_iommu *param = dev->iommu;
456	struct iommu_fault_param *fault_param;
457	const struct iommu_ops *ops = dev_iommu_ops(dev);
 
458
459	mutex_lock(&queue->lock);
460	mutex_lock(&param->lock);
461	fault_param = rcu_dereference_check(param->fault_param,
462					    lockdep_is_held(&param->lock));
463
464	if (WARN_ON(!fault_param || fault_param->queue != queue))
465		goto unlock;
 
 
 
 
 
466
467	mutex_lock(&fault_param->lock);
468	list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list)
469		kfree(partial_iopf);
470
471	list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) {
472		struct iopf_fault *iopf = &group->last_fault;
473		struct iommu_page_response resp = {
474			.pasid = iopf->fault.prm.pasid,
475			.grpid = iopf->fault.prm.grpid,
476			.code = IOMMU_PAGE_RESP_INVALID
477		};
478
479		ops->page_response(dev, iopf, &resp);
480		list_del_init(&group->pending_node);
481		iopf_free_group(group);
482	}
483	mutex_unlock(&fault_param->lock);
484
485	list_del(&fault_param->queue_list);
486
487	/* dec the ref owned by iopf_queue_add_device() */
488	rcu_assign_pointer(param->fault_param, NULL);
489	iopf_put_dev_fault_param(fault_param);
490unlock:
491	mutex_unlock(&param->lock);
492	mutex_unlock(&queue->lock);
493}
494EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
495
496/**
497 * iopf_queue_alloc - Allocate and initialize a fault queue
498 * @name: a unique string identifying the queue (for workqueue)
499 *
500 * Return: the queue on success and NULL on error.
501 */
502struct iopf_queue *iopf_queue_alloc(const char *name)
503{
504	struct iopf_queue *queue;
505
506	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
507	if (!queue)
508		return NULL;
509
510	/*
511	 * The WQ is unordered because the low-level handler enqueues faults by
512	 * group. PRI requests within a group have to be ordered, but once
513	 * that's dealt with, the high-level function can handle groups out of
514	 * order.
515	 */
516	queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
517	if (!queue->wq) {
518		kfree(queue);
519		return NULL;
520	}
521
522	INIT_LIST_HEAD(&queue->devices);
523	mutex_init(&queue->lock);
524
525	return queue;
526}
527EXPORT_SYMBOL_GPL(iopf_queue_alloc);
528
529/**
530 * iopf_queue_free - Free IOPF queue
531 * @queue: queue to free
532 *
533 * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
534 * adding/removing devices on this queue anymore.
535 */
536void iopf_queue_free(struct iopf_queue *queue)
537{
538	struct iommu_fault_param *iopf_param, *next;
539
540	if (!queue)
541		return;
542
543	list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
544		iopf_queue_remove_device(queue, iopf_param->dev);
545
546	destroy_workqueue(queue->wq);
547	kfree(queue);
548}
549EXPORT_SYMBOL_GPL(iopf_queue_free);