Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2022 Intel Corporation
  4 */
  5
  6#include "xe_preempt_fence.h"
  7
  8#include <linux/slab.h>
  9
 10#include "xe_exec_queue.h"
 11#include "xe_vm.h"
 12
 13static void preempt_fence_work_func(struct work_struct *w)
 14{
 15	bool cookie = dma_fence_begin_signalling();
 16	struct xe_preempt_fence *pfence =
 17		container_of(w, typeof(*pfence), preempt_work);
 18	struct xe_exec_queue *q = pfence->q;
 19
 20	if (pfence->error) {
 21		dma_fence_set_error(&pfence->base, pfence->error);
 22	} else if (!q->ops->reset_status(q)) {
 23		int err = q->ops->suspend_wait(q);
 24
 25		if (err)
 26			dma_fence_set_error(&pfence->base, err);
 27	} else {
 28		dma_fence_set_error(&pfence->base, -ENOENT);
 29	}
 30
 31	dma_fence_signal(&pfence->base);
 32	/*
 33	 * Opt for keep everything in the fence critical section. This looks really strange since we
 34	 * have just signalled the fence, however the preempt fences are all signalled via single
 35	 * global ordered-wq, therefore anything that happens in this callback can easily block
 36	 * progress on the entire wq, which itself may prevent other published preempt fences from
 37	 * ever signalling.  Therefore try to keep everything here in the callback in the fence
 38	 * critical section. For example if something below grabs a scary lock like vm->lock,
 39	 * lockdep should complain since we also hold that lock whilst waiting on preempt fences to
 40	 * complete.
 41	 */
 42	xe_vm_queue_rebind_worker(q->vm);
 43	xe_exec_queue_put(q);
 44	dma_fence_end_signalling(cookie);
 45}
 46
 47static const char *
 48preempt_fence_get_driver_name(struct dma_fence *fence)
 49{
 50	return "xe";
 51}
 52
 53static const char *
 54preempt_fence_get_timeline_name(struct dma_fence *fence)
 55{
 56	return "preempt";
 57}
 58
 59static bool preempt_fence_enable_signaling(struct dma_fence *fence)
 60{
 61	struct xe_preempt_fence *pfence =
 62		container_of(fence, typeof(*pfence), base);
 63	struct xe_exec_queue *q = pfence->q;
 64
 65	pfence->error = q->ops->suspend(q);
 66	queue_work(q->vm->xe->preempt_fence_wq, &pfence->preempt_work);
 67	return true;
 68}
 69
 70static const struct dma_fence_ops preempt_fence_ops = {
 71	.get_driver_name = preempt_fence_get_driver_name,
 72	.get_timeline_name = preempt_fence_get_timeline_name,
 73	.enable_signaling = preempt_fence_enable_signaling,
 74};
 75
 76/**
 77 * xe_preempt_fence_alloc() - Allocate a preempt fence with minimal
 78 * initialization
 79 *
 80 * Allocate a preempt fence, and initialize its list head.
 81 * If the preempt_fence allocated has been armed with
 82 * xe_preempt_fence_arm(), it must be freed using dma_fence_put(). If not,
 83 * it must be freed using xe_preempt_fence_free().
 84 *
 85 * Return: A struct xe_preempt_fence pointer used for calling into
 86 * xe_preempt_fence_arm() or xe_preempt_fence_free().
 87 * An error pointer on error.
 88 */
 89struct xe_preempt_fence *xe_preempt_fence_alloc(void)
 90{
 91	struct xe_preempt_fence *pfence;
 92
 93	pfence = kmalloc(sizeof(*pfence), GFP_KERNEL);
 94	if (!pfence)
 95		return ERR_PTR(-ENOMEM);
 96
 97	INIT_LIST_HEAD(&pfence->link);
 98	INIT_WORK(&pfence->preempt_work, preempt_fence_work_func);
 99
100	return pfence;
101}
102
103/**
104 * xe_preempt_fence_free() - Free a preempt fence allocated using
105 * xe_preempt_fence_alloc().
106 * @pfence: pointer obtained from xe_preempt_fence_alloc();
107 *
108 * Free a preempt fence that has not yet been armed.
109 */
110void xe_preempt_fence_free(struct xe_preempt_fence *pfence)
111{
112	list_del(&pfence->link);
113	kfree(pfence);
114}
115
116/**
117 * xe_preempt_fence_arm() - Arm a preempt fence allocated using
118 * xe_preempt_fence_alloc().
119 * @pfence: The struct xe_preempt_fence pointer returned from
120 *          xe_preempt_fence_alloc().
121 * @q: The struct xe_exec_queue used for arming.
122 * @context: The dma-fence context used for arming.
123 * @seqno: The dma-fence seqno used for arming.
124 *
125 * Inserts the preempt fence into @context's timeline, takes @link off any
126 * list, and registers the struct xe_exec_queue as the xe_engine to be preempted.
127 *
128 * Return: A pointer to a struct dma_fence embedded into the preempt fence.
129 * This function doesn't error.
130 */
131struct dma_fence *
132xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
133		     u64 context, u32 seqno)
134{
135	list_del_init(&pfence->link);
136	pfence->q = xe_exec_queue_get(q);
137	spin_lock_init(&pfence->lock);
138	dma_fence_init(&pfence->base, &preempt_fence_ops,
139		      &pfence->lock, context, seqno);
140
141	return &pfence->base;
142}
143
144/**
145 * xe_preempt_fence_create() - Helper to create and arm a preempt fence.
146 * @q: The struct xe_exec_queue used for arming.
147 * @context: The dma-fence context used for arming.
148 * @seqno: The dma-fence seqno used for arming.
149 *
150 * Allocates and inserts the preempt fence into @context's timeline,
151 * and registers @e as the struct xe_exec_queue to be preempted.
152 *
153 * Return: A pointer to the resulting struct dma_fence on success. An error
154 * pointer on error. In particular if allocation fails it returns
155 * ERR_PTR(-ENOMEM);
156 */
157struct dma_fence *
158xe_preempt_fence_create(struct xe_exec_queue *q,
159			u64 context, u32 seqno)
160{
161	struct xe_preempt_fence *pfence;
162
163	pfence = xe_preempt_fence_alloc();
164	if (IS_ERR(pfence))
165		return ERR_CAST(pfence);
166
167	return xe_preempt_fence_arm(pfence, q, context, seqno);
168}
169
170bool xe_fence_is_xe_preempt(const struct dma_fence *fence)
171{
172	return fence->ops == &preempt_fence_ops;
173}