Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 | /* * SPDX-License-Identifier: MIT * * Copyright © 2019 Intel Corporation */ #ifndef _I915_ACTIVE_H_ #define _I915_ACTIVE_H_ #include <linux/lockdep.h> #include "i915_active_types.h" #include "i915_request.h" struct i915_request; struct intel_engine_cs; struct intel_timeline; /* * We treat requests as fences. This is not be to confused with our * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. * We use the fences to synchronize access from the CPU with activity on the * GPU, for example, we should not rewrite an object's PTE whilst the GPU * is reading them. We also track fences at a higher level to provide * implicit synchronisation around GEM objects, e.g. set-domain will wait * for outstanding GPU rendering before marking the object ready for CPU * access, or a pageflip will wait until the GPU is complete before showing * the frame on the scanout. * * In order to use a fence, the object must track the fence it needs to * serialise with. For example, GEM objects want to track both read and * write access so that we can perform concurrent read operations between * the CPU and GPU engines, as well as waiting for all rendering to * complete, or waiting for the last GPU user of a "fence register". The * object then embeds a #i915_active_fence to track the most recent (in * retirement order) request relevant for the desired mode of access. * The #i915_active_fence is updated with i915_active_fence_set() to * track the most recent fence request, typically this is done as part of * i915_vma_move_to_active(). * * When the #i915_active_fence completes (is retired), it will * signal its completion to the owner through a callback as well as mark * itself as idle (i915_active_fence.request == NULL). The owner * can then perform any action, such as delayed freeing of an active * resource including itself. */ void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb); /** * __i915_active_fence_init - prepares the activity tracker for use * @active: the active tracker * @fence: initial fence to track, can be NULL * @fn: a callback when then the tracker is retired (becomes idle), * can be NULL * * i915_active_fence_init() prepares the embedded @active struct for use as * an activity tracker, that is for tracking the last known active fence * associated with it. When the last fence becomes idle, when it is retired * after completion, the optional callback @func is invoked. */ static inline void __i915_active_fence_init(struct i915_active_fence *active, void *fence, dma_fence_func_t fn) { RCU_INIT_POINTER(active->fence, fence); active->cb.func = fn ?: i915_active_noop; } #define INIT_ACTIVE_FENCE(A) \ __i915_active_fence_init((A), NULL, NULL) struct dma_fence * __i915_active_fence_set(struct i915_active_fence *active, struct dma_fence *fence); /** * i915_active_fence_set - updates the tracker to watch the current fence * @active: the active tracker * @rq: the request to watch * * i915_active_fence_set() watches the given @rq for completion. While * that @rq is busy, the @active reports busy. When that @rq is signaled * (or else retired) the @active tracker is updated to report idle. */ int __must_check i915_active_fence_set(struct i915_active_fence *active, struct i915_request *rq); /** * i915_active_fence_get - return a reference to the active fence * @active: the active tracker * * i915_active_fence_get() returns a reference to the active fence, * or NULL if the active tracker is idle. The reference is obtained under RCU, * so no locking is required by the caller. * * The reference should be freed with dma_fence_put(). */ static inline struct dma_fence * i915_active_fence_get(struct i915_active_fence *active) { struct dma_fence *fence; rcu_read_lock(); fence = dma_fence_get_rcu_safe(&active->fence); rcu_read_unlock(); return fence; } /** * i915_active_fence_isset - report whether the active tracker is assigned * @active: the active tracker * * i915_active_fence_isset() returns true if the active tracker is currently * assigned to a fence. Due to the lazy retiring, that fence may be idle * and this may report stale information. */ static inline bool i915_active_fence_isset(const struct i915_active_fence *active) { return rcu_access_pointer(active->fence); } /* * GPU activity tracking * * Each set of commands submitted to the GPU compromises a single request that * signals a fence upon completion. struct i915_request combines the * command submission, scheduling and fence signaling roles. If we want to see * if a particular task is complete, we need to grab the fence (struct * i915_request) for that task and check or wait for it to be signaled. More * often though we want to track the status of a bunch of tasks, for example * to wait for the GPU to finish accessing some memory across a variety of * different command pipelines from different clients. We could choose to * track every single request associated with the task, but knowing that * each request belongs to an ordered timeline (later requests within a * timeline must wait for earlier requests), we need only track the * latest request in each timeline to determine the overall status of the * task. * * struct i915_active provides this tracking across timelines. It builds a * composite shared-fence, and is updated as new work is submitted to the task, * forming a snapshot of the current status. It should be embedded into the * different resources that need to track their associated GPU activity to * provide a callback when that GPU activity has ceased, or otherwise to * provide a serialisation point either for request submission or for CPU * synchronisation. */ void __i915_active_init(struct i915_active *ref, int (*active)(struct i915_active *ref), void (*retire)(struct i915_active *ref), unsigned long flags, struct lock_class_key *mkey, struct lock_class_key *wkey); /* Specialise each class of i915_active to avoid impossible lockdep cycles. */ #define i915_active_init(ref, active, retire, flags) do { \ static struct lock_class_key __mkey; \ static struct lock_class_key __wkey; \ \ __i915_active_init(ref, active, retire, flags, &__mkey, &__wkey); \ } while (0) int i915_active_add_request(struct i915_active *ref, struct i915_request *rq); struct dma_fence * i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f); int __i915_active_wait(struct i915_active *ref, int state); static inline int i915_active_wait(struct i915_active *ref) { return __i915_active_wait(ref, TASK_INTERRUPTIBLE); } int i915_sw_fence_await_active(struct i915_sw_fence *fence, struct i915_active *ref, unsigned int flags); int i915_request_await_active(struct i915_request *rq, struct i915_active *ref, unsigned int flags); #define I915_ACTIVE_AWAIT_EXCL BIT(0) #define I915_ACTIVE_AWAIT_ACTIVE BIT(1) #define I915_ACTIVE_AWAIT_BARRIER BIT(2) int i915_active_acquire(struct i915_active *ref); int i915_active_acquire_for_context(struct i915_active *ref, u64 idx); bool i915_active_acquire_if_busy(struct i915_active *ref); void i915_active_release(struct i915_active *ref); static inline void __i915_active_acquire(struct i915_active *ref) { GEM_BUG_ON(!atomic_read(&ref->count)); atomic_inc(&ref->count); } static inline bool i915_active_is_idle(const struct i915_active *ref) { return !atomic_read(&ref->count); } void i915_active_fini(struct i915_active *ref); int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine); void i915_active_acquire_barrier(struct i915_active *ref); void i915_request_add_active_barriers(struct i915_request *rq); void i915_active_print(struct i915_active *ref, struct drm_printer *m); void i915_active_unlock_wait(struct i915_active *ref); struct i915_active *i915_active_create(void); struct i915_active *i915_active_get(struct i915_active *ref); void i915_active_put(struct i915_active *ref); static inline int __i915_request_await_exclusive(struct i915_request *rq, struct i915_active *active) { struct dma_fence *fence; int err = 0; fence = i915_active_fence_get(&active->excl); if (fence) { err = i915_request_await_dma_fence(rq, fence); dma_fence_put(fence); } return err; } void i915_active_module_exit(void); int i915_active_module_init(void); #endif /* _I915_ACTIVE_H_ */ |