Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Handle device page faults
  4 *
  5 * Copyright (C) 2020 ARM Ltd.
  6 */
  7
  8#include <linux/iommu.h>
  9#include <linux/list.h>
 10#include <linux/sched/mm.h>
 11#include <linux/slab.h>
 12#include <linux/workqueue.h>
 13
 14#include "iommu-priv.h"
 15
 16/*
 17 * Return the fault parameter of a device if it exists. Otherwise, return NULL.
 18 * On a successful return, the caller takes a reference of this parameter and
 19 * should put it after use by calling iopf_put_dev_fault_param().
 
 20 */
 21static struct iommu_fault_param *iopf_get_dev_fault_param(struct device *dev)
 22{
 23	struct dev_iommu *param = dev->iommu;
 24	struct iommu_fault_param *fault_param;
 
 25
 26	rcu_read_lock();
 27	fault_param = rcu_dereference(param->fault_param);
 28	if (fault_param && !refcount_inc_not_zero(&fault_param->users))
 29		fault_param = NULL;
 30	rcu_read_unlock();
 31
 32	return fault_param;
 33}
 34
 35/* Caller must hold a reference of the fault parameter. */
 36static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
 37{
 38	if (refcount_dec_and_test(&fault_param->users))
 39		kfree_rcu(fault_param, rcu);
 40}
 
 
 
 
 
 
 
 
 
 
 
 41
 42static void __iopf_free_group(struct iopf_group *group)
 
 43{
 44	struct iopf_fault *iopf, *next;
 
 
 
 
 
 45
 46	list_for_each_entry_safe(iopf, next, &group->faults, list) {
 47		if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
 48			kfree(iopf);
 49	}
 50
 51	/* Pair with iommu_report_device_fault(). */
 52	iopf_put_dev_fault_param(group->fault_param);
 53}
 54
 55void iopf_free_group(struct iopf_group *group)
 
 56{
 57	__iopf_free_group(group);
 58	kfree(group);
 59}
 60EXPORT_SYMBOL_GPL(iopf_free_group);
 
 
 
 61
 62/* Non-last request of a group. Postpone until the last one. */
 63static int report_partial_fault(struct iommu_fault_param *fault_param,
 64				struct iommu_fault *fault)
 65{
 66	struct iopf_fault *iopf;
 67
 68	iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
 69	if (!iopf)
 70		return -ENOMEM;
 71
 72	iopf->fault = *fault;
 73
 74	mutex_lock(&fault_param->lock);
 75	list_add(&iopf->list, &fault_param->partial);
 76	mutex_unlock(&fault_param->lock);
 
 77
 78	return 0;
 79}
 80
 81static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
 82					   struct iopf_fault *evt,
 83					   struct iopf_group *abort_group)
 84{
 85	struct iopf_fault *iopf, *next;
 86	struct iopf_group *group;
 87
 88	group = kzalloc(sizeof(*group), GFP_KERNEL);
 89	if (!group) {
 90		/*
 91		 * We always need to construct the group as we need it to abort
 92		 * the request at the driver if it can't be handled.
 93		 */
 94		group = abort_group;
 95	}
 96
 97	group->fault_param = iopf_param;
 98	group->last_fault.fault = evt->fault;
 99	INIT_LIST_HEAD(&group->faults);
100	INIT_LIST_HEAD(&group->pending_node);
101	list_add(&group->last_fault.list, &group->faults);
102
103	/* See if we have partial faults for this group */
104	mutex_lock(&iopf_param->lock);
105	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
106		if (iopf->fault.prm.grpid == evt->fault.prm.grpid)
107			/* Insert *before* the last fault */
108			list_move(&iopf->list, &group->faults);
109	}
110	list_add(&group->pending_node, &iopf_param->faults);
111	mutex_unlock(&iopf_param->lock);
112
113	group->fault_count = list_count_nodes(&group->faults);
 
 
114
115	return group;
116}
117
118static struct iommu_attach_handle *find_fault_handler(struct device *dev,
119						     struct iopf_fault *evt)
120{
121	struct iommu_fault *fault = &evt->fault;
122	struct iommu_attach_handle *attach_handle;
123
124	if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
125		attach_handle = iommu_attach_handle_get(dev->iommu_group,
126				fault->prm.pasid, 0);
127		if (IS_ERR(attach_handle)) {
128			const struct iommu_ops *ops = dev_iommu_ops(dev);
129
130			if (!ops->user_pasid_table)
131				return NULL;
132			/*
133			 * The iommu driver for this device supports user-
134			 * managed PASID table. Therefore page faults for
135			 * any PASID should go through the NESTING domain
136			 * attached to the device RID.
137			 */
138			attach_handle = iommu_attach_handle_get(
139					dev->iommu_group, IOMMU_NO_PASID,
140					IOMMU_DOMAIN_NESTED);
141			if (IS_ERR(attach_handle))
142				return NULL;
143		}
144	} else {
145		attach_handle = iommu_attach_handle_get(dev->iommu_group,
146				IOMMU_NO_PASID, 0);
147
148		if (IS_ERR(attach_handle))
149			return NULL;
150	}
151
152	if (!attach_handle->domain->iopf_handler)
153		return NULL;
154
155	return attach_handle;
156}
 
 
 
 
 
157
158static void iopf_error_response(struct device *dev, struct iopf_fault *evt)
159{
160	const struct iommu_ops *ops = dev_iommu_ops(dev);
161	struct iommu_fault *fault = &evt->fault;
162	struct iommu_page_response resp = {
163		.pasid = fault->prm.pasid,
164		.grpid = fault->prm.grpid,
165		.code = IOMMU_PAGE_RESP_INVALID
166	};
167
168	ops->page_response(dev, evt, &resp);
 
169}
170
171/**
172 * iommu_report_device_fault() - Report fault event to device driver
173 * @dev: the device
174 * @evt: fault event data
175 *
176 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
177 * handler. If this function fails then ops->page_response() was called to
178 * complete evt if required.
179 *
180 * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
181 * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
182 * expect a response. It may be generated when disabling a PASID (issuing a
183 * PASID stop request) by some PCI devices.
184 *
185 * The PASID stop request is issued by the device driver before unbind(). Once
186 * it completes, no page request is generated for this PASID anymore and
187 * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
188 * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
189 * for all outstanding page requests to come back with a response before
190 * completing the PASID stop request. Others do not wait for page responses, and
191 * instead issue this Stop Marker that tells us when the PASID can be
192 * reallocated.
193 *
194 * It is safe to discard the Stop Marker because it is an optimization.
195 * a. Page requests, which are posted requests, have been flushed to the IOMMU
196 *    when the stop request completes.
197 * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
198 *    PASID.
199 *
200 * So even though the Stop Marker might be issued by the device *after* the stop
201 * request completes, outstanding faults will have been dealt with by the time
202 * the PASID is freed.
203 *
204 * Any valid page fault will be eventually routed to an iommu domain and the
205 * page fault handler installed there will get called. The users of this
206 * handling framework should guarantee that the iommu domain could only be
207 * freed after the device has stopped generating page faults (or the iommu
208 * hardware has been set to block the page faults) and the pending page faults
209 * have been flushed. In case no page fault handler is attached or no iopf params
210 * are setup, then the ops->page_response() is called to complete the evt.
211 *
212 * Returns 0 on success, or an error in case of a bad/failed iopf setup.
213 */
214int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
215{
216	struct iommu_attach_handle *attach_handle;
217	struct iommu_fault *fault = &evt->fault;
218	struct iommu_fault_param *iopf_param;
219	struct iopf_group abort_group = {};
220	struct iopf_group *group;
 
 
221
222	attach_handle = find_fault_handler(dev, evt);
223	if (!attach_handle)
224		goto err_bad_iopf;
 
 
 
 
 
225
226	/*
227	 * Something has gone wrong if a fault capable domain is attached but no
228	 * iopf_param is setup
229	 */
230	iopf_param = iopf_get_dev_fault_param(dev);
231	if (WARN_ON(!iopf_param))
232		goto err_bad_iopf;
233
234	if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
235		int ret;
 
 
236
237		ret = report_partial_fault(iopf_param, fault);
238		iopf_put_dev_fault_param(iopf_param);
239		/* A request that is not the last does not need to be ack'd */
240
241		return ret;
242	}
243
244	/*
245	 * This is the last page fault of a group. Allocate an iopf group and
246	 * pass it to domain's page fault handler. The group holds a reference
247	 * count of the fault parameter. It will be released after response or
248	 * error path of this function. If an error is returned, the caller
249	 * will send a response to the hardware. We need to clean up before
250	 * leaving, otherwise partial faults will be stuck.
251	 */
252	group = iopf_group_alloc(iopf_param, evt, &abort_group);
253	if (group == &abort_group)
254		goto err_abort;
255
256	group->attach_handle = attach_handle;
257
258	/*
259	 * On success iopf_handler must call iopf_group_response() and
260	 * iopf_free_group()
261	 */
262	if (group->attach_handle->domain->iopf_handler(group))
263		goto err_abort;
 
 
 
 
264
265	return 0;
 
 
 
 
266
267err_abort:
268	dev_warn_ratelimited(dev, "iopf with pasid %d aborted\n",
269			     fault->prm.pasid);
270	iopf_group_response(group, IOMMU_PAGE_RESP_FAILURE);
271	if (group == &abort_group)
272		__iopf_free_group(group);
273	else
274		iopf_free_group(group);
275
 
276	return 0;
277
278err_bad_iopf:
279	if (fault->type == IOMMU_FAULT_PAGE_REQ)
280		iopf_error_response(dev, evt);
281
282	return -EINVAL;
 
 
 
283}
284EXPORT_SYMBOL_GPL(iommu_report_device_fault);
285
286/**
287 * iopf_queue_flush_dev - Ensure that all queued faults have been processed
288 * @dev: the endpoint whose faults need to be flushed.
289 *
290 * The IOMMU driver calls this before releasing a PASID, to ensure that all
291 * pending faults for this PASID have been handled, and won't hit the address
292 * space of the next process that uses this PASID. The driver must make sure
293 * that no new fault is added to the queue. In particular it must flush its
294 * low-level queue before calling this function.
295 *
296 * Return: 0 on success and <0 on error.
297 */
298int iopf_queue_flush_dev(struct device *dev)
299{
300	struct iommu_fault_param *iopf_param;
 
 
301
302	/*
303	 * It's a driver bug to be here after iopf_queue_remove_device().
304	 * Therefore, it's safe to dereference the fault parameter without
305	 * holding the lock.
306	 */
307	iopf_param = rcu_dereference_check(dev->iommu->fault_param, true);
308	if (WARN_ON(!iopf_param))
309		return -ENODEV;
310
311	flush_workqueue(iopf_param->queue->wq);
 
 
 
 
 
 
312
313	return 0;
314}
315EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
316
317/**
318 * iopf_group_response - Respond a group of page faults
319 * @group: the group of faults with the same group id
320 * @status: the response code
321 */
322void iopf_group_response(struct iopf_group *group,
323			 enum iommu_page_response_code status)
324{
325	struct iommu_fault_param *fault_param = group->fault_param;
326	struct iopf_fault *iopf = &group->last_fault;
327	struct device *dev = group->fault_param->dev;
328	const struct iommu_ops *ops = dev_iommu_ops(dev);
329	struct iommu_page_response resp = {
330		.pasid = iopf->fault.prm.pasid,
331		.grpid = iopf->fault.prm.grpid,
332		.code = status,
333	};
334
335	/* Only send response if there is a fault report pending */
336	mutex_lock(&fault_param->lock);
337	if (!list_empty(&group->pending_node)) {
338		ops->page_response(dev, &group->last_fault, &resp);
339		list_del_init(&group->pending_node);
340	}
341	mutex_unlock(&fault_param->lock);
342}
343EXPORT_SYMBOL_GPL(iopf_group_response);
344
345/**
346 * iopf_queue_discard_partial - Remove all pending partial fault
347 * @queue: the queue whose partial faults need to be discarded
348 *
349 * When the hardware queue overflows, last page faults in a group may have been
350 * lost and the IOMMU driver calls this to discard all partial faults. The
351 * driver shouldn't be adding new faults to this queue concurrently.
352 *
353 * Return: 0 on success and <0 on error.
354 */
355int iopf_queue_discard_partial(struct iopf_queue *queue)
356{
357	struct iopf_fault *iopf, *next;
358	struct iommu_fault_param *iopf_param;
359
360	if (!queue)
361		return -EINVAL;
362
363	mutex_lock(&queue->lock);
364	list_for_each_entry(iopf_param, &queue->devices, queue_list) {
365		mutex_lock(&iopf_param->lock);
366		list_for_each_entry_safe(iopf, next, &iopf_param->partial,
367					 list) {
368			list_del(&iopf->list);
369			kfree(iopf);
370		}
371		mutex_unlock(&iopf_param->lock);
372	}
373	mutex_unlock(&queue->lock);
374	return 0;
375}
376EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
377
378/**
379 * iopf_queue_add_device - Add producer to the fault queue
380 * @queue: IOPF queue
381 * @dev: device to add
382 *
383 * Return: 0 on success and <0 on error.
384 */
385int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
386{
387	int ret = 0;
 
388	struct dev_iommu *param = dev->iommu;
389	struct iommu_fault_param *fault_param;
390	const struct iommu_ops *ops = dev_iommu_ops(dev);
391
392	if (!ops->page_response)
393		return -ENODEV;
394
 
 
 
 
 
 
 
 
395	mutex_lock(&queue->lock);
396	mutex_lock(&param->lock);
397	if (rcu_dereference_check(param->fault_param,
398				  lockdep_is_held(&param->lock))) {
399		ret = -EBUSY;
400		goto done_unlock;
401	}
402
403	fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL);
404	if (!fault_param) {
405		ret = -ENOMEM;
406		goto done_unlock;
407	}
408
409	mutex_init(&fault_param->lock);
410	INIT_LIST_HEAD(&fault_param->faults);
411	INIT_LIST_HEAD(&fault_param->partial);
412	fault_param->dev = dev;
413	refcount_set(&fault_param->users, 1);
414	list_add(&fault_param->queue_list, &queue->devices);
415	fault_param->queue = queue;
416
417	rcu_assign_pointer(param->fault_param, fault_param);
418
419done_unlock:
420	mutex_unlock(&param->lock);
421	mutex_unlock(&queue->lock);
422
 
 
 
423	return ret;
424}
425EXPORT_SYMBOL_GPL(iopf_queue_add_device);
426
427/**
428 * iopf_queue_remove_device - Remove producer from fault queue
429 * @queue: IOPF queue
430 * @dev: device to remove
431 *
432 * Removing a device from an iopf_queue. It's recommended to follow these
433 * steps when removing a device:
434 *
435 * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
436 *   and flush any hardware page request queues. This should be done before
437 *   calling into this helper.
438 * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
439 *   page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
440 *   not retry. This helper function handles this.
441 * - Disable PRI on the device: After calling this helper, the caller could
442 *   then disable PRI on the device.
443 *
444 * Calling iopf_queue_remove_device() essentially disassociates the device.
445 * The fault_param might still exist, but iommu_page_response() will do
446 * nothing. The device fault parameter reference count has been properly
447 * passed from iommu_report_device_fault() to the fault handling work, and
448 * will eventually be released after iommu_page_response().
449 */
450void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
451{
452	struct iopf_fault *partial_iopf;
453	struct iopf_fault *next;
454	struct iopf_group *group, *temp;
455	struct dev_iommu *param = dev->iommu;
456	struct iommu_fault_param *fault_param;
457	const struct iommu_ops *ops = dev_iommu_ops(dev);
 
458
459	mutex_lock(&queue->lock);
460	mutex_lock(&param->lock);
461	fault_param = rcu_dereference_check(param->fault_param,
462					    lockdep_is_held(&param->lock));
463
464	if (WARN_ON(!fault_param || fault_param->queue != queue))
465		goto unlock;
466
467	mutex_lock(&fault_param->lock);
468	list_for_each_entry_safe(partial_iopf, next, &fault_param->partial, list)
469		kfree(partial_iopf);
470
471	list_for_each_entry_safe(group, temp, &fault_param->faults, pending_node) {
472		struct iopf_fault *iopf = &group->last_fault;
473		struct iommu_page_response resp = {
474			.pasid = iopf->fault.prm.pasid,
475			.grpid = iopf->fault.prm.grpid,
476			.code = IOMMU_PAGE_RESP_INVALID
477		};
478
479		ops->page_response(dev, iopf, &resp);
480		list_del_init(&group->pending_node);
481		iopf_free_group(group);
482	}
483	mutex_unlock(&fault_param->lock);
484
485	list_del(&fault_param->queue_list);
486
487	/* dec the ref owned by iopf_queue_add_device() */
488	rcu_assign_pointer(param->fault_param, NULL);
489	iopf_put_dev_fault_param(fault_param);
490unlock:
491	mutex_unlock(&param->lock);
492	mutex_unlock(&queue->lock);
 
 
 
 
 
 
 
 
 
 
493}
494EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
495
496/**
497 * iopf_queue_alloc - Allocate and initialize a fault queue
498 * @name: a unique string identifying the queue (for workqueue)
499 *
500 * Return: the queue on success and NULL on error.
501 */
502struct iopf_queue *iopf_queue_alloc(const char *name)
503{
504	struct iopf_queue *queue;
505
506	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
507	if (!queue)
508		return NULL;
509
510	/*
511	 * The WQ is unordered because the low-level handler enqueues faults by
512	 * group. PRI requests within a group have to be ordered, but once
513	 * that's dealt with, the high-level function can handle groups out of
514	 * order.
515	 */
516	queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
517	if (!queue->wq) {
518		kfree(queue);
519		return NULL;
520	}
521
522	INIT_LIST_HEAD(&queue->devices);
523	mutex_init(&queue->lock);
524
525	return queue;
526}
527EXPORT_SYMBOL_GPL(iopf_queue_alloc);
528
529/**
530 * iopf_queue_free - Free IOPF queue
531 * @queue: queue to free
532 *
533 * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
534 * adding/removing devices on this queue anymore.
535 */
536void iopf_queue_free(struct iopf_queue *queue)
537{
538	struct iommu_fault_param *iopf_param, *next;
539
540	if (!queue)
541		return;
542
543	list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
544		iopf_queue_remove_device(queue, iopf_param->dev);
545
546	destroy_workqueue(queue->wq);
547	kfree(queue);
548}
549EXPORT_SYMBOL_GPL(iopf_queue_free);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Handle device page faults
  4 *
  5 * Copyright (C) 2020 ARM Ltd.
  6 */
  7
  8#include <linux/iommu.h>
  9#include <linux/list.h>
 10#include <linux/sched/mm.h>
 11#include <linux/slab.h>
 12#include <linux/workqueue.h>
 13
 14#include "iommu-sva-lib.h"
 15
 16/**
 17 * struct iopf_queue - IO Page Fault queue
 18 * @wq: the fault workqueue
 19 * @devices: devices attached to this queue
 20 * @lock: protects the device list
 21 */
 22struct iopf_queue {
 23	struct workqueue_struct		*wq;
 24	struct list_head		devices;
 25	struct mutex			lock;
 26};
 27
 28/**
 29 * struct iopf_device_param - IO Page Fault data attached to a device
 30 * @dev: the device that owns this param
 31 * @queue: IOPF queue
 32 * @queue_list: index into queue->devices
 33 * @partial: faults that are part of a Page Request Group for which the last
 34 *           request hasn't been submitted yet.
 35 */
 36struct iopf_device_param {
 37	struct device			*dev;
 38	struct iopf_queue		*queue;
 39	struct list_head		queue_list;
 40	struct list_head		partial;
 41};
 42
 43struct iopf_fault {
 44	struct iommu_fault		fault;
 45	struct list_head		list;
 46};
 47
 48struct iopf_group {
 49	struct iopf_fault		last_fault;
 50	struct list_head		faults;
 51	struct work_struct		work;
 52	struct device			*dev;
 53};
 54
 55static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
 56			       enum iommu_page_response_code status)
 57{
 58	struct iommu_page_response resp = {
 59		.version		= IOMMU_PAGE_RESP_VERSION_1,
 60		.pasid			= iopf->fault.prm.pasid,
 61		.grpid			= iopf->fault.prm.grpid,
 62		.code			= status,
 63	};
 64
 65	if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
 66	    (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
 67		resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
 
 68
 69	return iommu_page_response(dev, &resp);
 
 70}
 71
 72static enum iommu_page_response_code
 73iopf_handle_single(struct iopf_fault *iopf)
 74{
 75	vm_fault_t ret;
 76	struct mm_struct *mm;
 77	struct vm_area_struct *vma;
 78	unsigned int access_flags = 0;
 79	unsigned int fault_flags = FAULT_FLAG_REMOTE;
 80	struct iommu_fault_page_request *prm = &iopf->fault.prm;
 81	enum iommu_page_response_code status = IOMMU_PAGE_RESP_INVALID;
 82
 83	if (!(prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID))
 84		return status;
 
 
 
 85
 86	mm = iommu_sva_find(prm->pasid);
 87	if (IS_ERR_OR_NULL(mm))
 88		return status;
 89
 90	mmap_read_lock(mm);
 91
 92	vma = find_extend_vma(mm, prm->addr);
 93	if (!vma)
 94		/* Unmapped area */
 95		goto out_put_mm;
 96
 97	if (prm->perm & IOMMU_FAULT_PERM_READ)
 98		access_flags |= VM_READ;
 99
100	if (prm->perm & IOMMU_FAULT_PERM_WRITE) {
101		access_flags |= VM_WRITE;
102		fault_flags |= FAULT_FLAG_WRITE;
103	}
 
 
104
105	if (prm->perm & IOMMU_FAULT_PERM_EXEC) {
106		access_flags |= VM_EXEC;
107		fault_flags |= FAULT_FLAG_INSTRUCTION;
 
 
 
 
108	}
109
110	if (!(prm->perm & IOMMU_FAULT_PERM_PRIV))
111		fault_flags |= FAULT_FLAG_USER;
 
 
 
112
113	if (access_flags & ~vma->vm_flags)
114		/* Access fault */
115		goto out_put_mm;
116
117	ret = handle_mm_fault(vma, prm->addr, fault_flags, NULL);
118	status = ret & VM_FAULT_ERROR ? IOMMU_PAGE_RESP_INVALID :
119		IOMMU_PAGE_RESP_SUCCESS;
 
 
120
121out_put_mm:
122	mmap_read_unlock(mm);
123	mmput(mm);
124
125	return status;
126}
127
128static void iopf_handle_group(struct work_struct *work)
 
129{
130	struct iopf_group *group;
131	struct iopf_fault *iopf, *next;
132	enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
134	group = container_of(work, struct iopf_group, work);
 
135
136	list_for_each_entry_safe(iopf, next, &group->faults, list) {
137		/*
138		 * For the moment, errors are sticky: don't handle subsequent
139		 * faults in the group if there is an error.
140		 */
141		if (status == IOMMU_PAGE_RESP_SUCCESS)
142			status = iopf_handle_single(iopf);
143
144		if (!(iopf->fault.prm.flags &
145		      IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
146			kfree(iopf);
147	}
 
 
 
 
 
148
149	iopf_complete_group(group->dev, &group->last_fault, status);
150	kfree(group);
151}
152
153/**
154 * iommu_queue_iopf - IO Page Fault handler
155 * @fault: fault event
156 * @cookie: struct device, passed to iommu_register_device_fault_handler.
157 *
158 * Add a fault to the device workqueue, to be handled by mm.
 
 
159 *
160 * This module doesn't handle PCI PASID Stop Marker; IOMMU drivers must discard
161 * them before reporting faults. A PASID Stop Marker (LRW = 0b100) doesn't
162 * expect a response. It may be generated when disabling a PASID (issuing a
163 * PASID stop request) by some PCI devices.
164 *
165 * The PASID stop request is issued by the device driver before unbind(). Once
166 * it completes, no page request is generated for this PASID anymore and
167 * outstanding ones have been pushed to the IOMMU (as per PCIe 4.0r1.0 - 6.20.1
168 * and 10.4.1.2 - Managing PASID TLP Prefix Usage). Some PCI devices will wait
169 * for all outstanding page requests to come back with a response before
170 * completing the PASID stop request. Others do not wait for page responses, and
171 * instead issue this Stop Marker that tells us when the PASID can be
172 * reallocated.
173 *
174 * It is safe to discard the Stop Marker because it is an optimization.
175 * a. Page requests, which are posted requests, have been flushed to the IOMMU
176 *    when the stop request completes.
177 * b. The IOMMU driver flushes all fault queues on unbind() before freeing the
178 *    PASID.
179 *
180 * So even though the Stop Marker might be issued by the device *after* the stop
181 * request completes, outstanding faults will have been dealt with by the time
182 * the PASID is freed.
183 *
184 * Return: 0 on success and <0 on error.
 
 
 
 
 
 
 
 
185 */
186int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
187{
188	int ret;
 
 
 
189	struct iopf_group *group;
190	struct iopf_fault *iopf, *next;
191	struct iopf_device_param *iopf_param;
192
193	struct device *dev = cookie;
194	struct dev_iommu *param = dev->iommu;
195
196	lockdep_assert_held(&param->lock);
197
198	if (fault->type != IOMMU_FAULT_PAGE_REQ)
199		/* Not a recoverable page fault */
200		return -EOPNOTSUPP;
201
202	/*
203	 * As long as we're holding param->lock, the queue can't be unlinked
204	 * from the device and therefore cannot disappear.
205	 */
206	iopf_param = param->iopf_param;
207	if (!iopf_param)
208		return -ENODEV;
209
210	if (!(fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
211		iopf = kzalloc(sizeof(*iopf), GFP_KERNEL);
212		if (!iopf)
213			return -ENOMEM;
214
215		iopf->fault = *fault;
 
 
216
217		/* Non-last request of a group. Postpone until the last one */
218		list_add(&iopf->list, &iopf_param->partial);
219
220		return 0;
221	}
 
 
 
 
 
 
 
 
 
 
 
222
223	group = kzalloc(sizeof(*group), GFP_KERNEL);
224	if (!group) {
225		/*
226		 * The caller will send a response to the hardware. But we do
227		 * need to clean up before leaving, otherwise partial faults
228		 * will be stuck.
229		 */
230		ret = -ENOMEM;
231		goto cleanup_partial;
232	}
233
234	group->dev = dev;
235	group->last_fault.fault = *fault;
236	INIT_LIST_HEAD(&group->faults);
237	list_add(&group->last_fault.list, &group->faults);
238	INIT_WORK(&group->work, iopf_handle_group);
239
240	/* See if we have partial faults for this group */
241	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
242		if (iopf->fault.prm.grpid == fault->prm.grpid)
243			/* Insert *before* the last fault */
244			list_move(&iopf->list, &group->faults);
245	}
 
 
246
247	queue_work(iopf_param->queue->wq, &group->work);
248	return 0;
249
250cleanup_partial:
251	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
252		if (iopf->fault.prm.grpid == fault->prm.grpid) {
253			list_del(&iopf->list);
254			kfree(iopf);
255		}
256	}
257	return ret;
258}
259EXPORT_SYMBOL_GPL(iommu_queue_iopf);
260
261/**
262 * iopf_queue_flush_dev - Ensure that all queued faults have been processed
263 * @dev: the endpoint whose faults need to be flushed.
264 *
265 * The IOMMU driver calls this before releasing a PASID, to ensure that all
266 * pending faults for this PASID have been handled, and won't hit the address
267 * space of the next process that uses this PASID. The driver must make sure
268 * that no new fault is added to the queue. In particular it must flush its
269 * low-level queue before calling this function.
270 *
271 * Return: 0 on success and <0 on error.
272 */
273int iopf_queue_flush_dev(struct device *dev)
274{
275	int ret = 0;
276	struct iopf_device_param *iopf_param;
277	struct dev_iommu *param = dev->iommu;
278
279	if (!param)
 
 
 
 
 
 
280		return -ENODEV;
281
282	mutex_lock(&param->lock);
283	iopf_param = param->iopf_param;
284	if (iopf_param)
285		flush_workqueue(iopf_param->queue->wq);
286	else
287		ret = -ENODEV;
288	mutex_unlock(&param->lock);
289
290	return ret;
291}
292EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
293
294/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295 * iopf_queue_discard_partial - Remove all pending partial fault
296 * @queue: the queue whose partial faults need to be discarded
297 *
298 * When the hardware queue overflows, last page faults in a group may have been
299 * lost and the IOMMU driver calls this to discard all partial faults. The
300 * driver shouldn't be adding new faults to this queue concurrently.
301 *
302 * Return: 0 on success and <0 on error.
303 */
304int iopf_queue_discard_partial(struct iopf_queue *queue)
305{
306	struct iopf_fault *iopf, *next;
307	struct iopf_device_param *iopf_param;
308
309	if (!queue)
310		return -EINVAL;
311
312	mutex_lock(&queue->lock);
313	list_for_each_entry(iopf_param, &queue->devices, queue_list) {
 
314		list_for_each_entry_safe(iopf, next, &iopf_param->partial,
315					 list) {
316			list_del(&iopf->list);
317			kfree(iopf);
318		}
 
319	}
320	mutex_unlock(&queue->lock);
321	return 0;
322}
323EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
324
325/**
326 * iopf_queue_add_device - Add producer to the fault queue
327 * @queue: IOPF queue
328 * @dev: device to add
329 *
330 * Return: 0 on success and <0 on error.
331 */
332int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
333{
334	int ret = -EBUSY;
335	struct iopf_device_param *iopf_param;
336	struct dev_iommu *param = dev->iommu;
 
 
337
338	if (!param)
339		return -ENODEV;
340
341	iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
342	if (!iopf_param)
343		return -ENOMEM;
344
345	INIT_LIST_HEAD(&iopf_param->partial);
346	iopf_param->queue = queue;
347	iopf_param->dev = dev;
348
349	mutex_lock(&queue->lock);
350	mutex_lock(&param->lock);
351	if (!param->iopf_param) {
352		list_add(&iopf_param->queue_list, &queue->devices);
353		param->iopf_param = iopf_param;
354		ret = 0;
 
 
 
 
 
 
355	}
 
 
 
 
 
 
 
 
 
 
 
 
356	mutex_unlock(&param->lock);
357	mutex_unlock(&queue->lock);
358
359	if (ret)
360		kfree(iopf_param);
361
362	return ret;
363}
364EXPORT_SYMBOL_GPL(iopf_queue_add_device);
365
366/**
367 * iopf_queue_remove_device - Remove producer from fault queue
368 * @queue: IOPF queue
369 * @dev: device to remove
370 *
371 * Caller makes sure that no more faults are reported for this device.
 
372 *
373 * Return: 0 on success and <0 on error.
 
 
 
 
 
 
 
 
 
 
 
 
 
374 */
375int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
376{
377	int ret = -EINVAL;
378	struct iopf_fault *iopf, *next;
379	struct iopf_device_param *iopf_param;
380	struct dev_iommu *param = dev->iommu;
381
382	if (!param || !queue)
383		return -EINVAL;
384
385	mutex_lock(&queue->lock);
386	mutex_lock(&param->lock);
387	iopf_param = param->iopf_param;
388	if (iopf_param && iopf_param->queue == queue) {
389		list_del(&iopf_param->queue_list);
390		param->iopf_param = NULL;
391		ret = 0;
392	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393	mutex_unlock(&param->lock);
394	mutex_unlock(&queue->lock);
395	if (ret)
396		return ret;
397
398	/* Just in case some faults are still stuck */
399	list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
400		kfree(iopf);
401
402	kfree(iopf_param);
403
404	return 0;
405}
406EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
407
408/**
409 * iopf_queue_alloc - Allocate and initialize a fault queue
410 * @name: a unique string identifying the queue (for workqueue)
411 *
412 * Return: the queue on success and NULL on error.
413 */
414struct iopf_queue *iopf_queue_alloc(const char *name)
415{
416	struct iopf_queue *queue;
417
418	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
419	if (!queue)
420		return NULL;
421
422	/*
423	 * The WQ is unordered because the low-level handler enqueues faults by
424	 * group. PRI requests within a group have to be ordered, but once
425	 * that's dealt with, the high-level function can handle groups out of
426	 * order.
427	 */
428	queue->wq = alloc_workqueue("iopf_queue/%s", WQ_UNBOUND, 0, name);
429	if (!queue->wq) {
430		kfree(queue);
431		return NULL;
432	}
433
434	INIT_LIST_HEAD(&queue->devices);
435	mutex_init(&queue->lock);
436
437	return queue;
438}
439EXPORT_SYMBOL_GPL(iopf_queue_alloc);
440
441/**
442 * iopf_queue_free - Free IOPF queue
443 * @queue: queue to free
444 *
445 * Counterpart to iopf_queue_alloc(). The driver must not be queuing faults or
446 * adding/removing devices on this queue anymore.
447 */
448void iopf_queue_free(struct iopf_queue *queue)
449{
450	struct iopf_device_param *iopf_param, *next;
451
452	if (!queue)
453		return;
454
455	list_for_each_entry_safe(iopf_param, next, &queue->devices, queue_list)
456		iopf_queue_remove_device(queue, iopf_param->dev);
457
458	destroy_workqueue(queue->wq);
459	kfree(queue);
460}
461EXPORT_SYMBOL_GPL(iopf_queue_free);