Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/kthread.h>
25#include <linux/slab.h>
26
27#include <drm/drm_print.h>
28#include <drm/gpu_scheduler.h>
29
30#include "gpu_scheduler_trace.h"
31
32#define to_drm_sched_job(sched_job) \
33 container_of((sched_job), struct drm_sched_job, queue_node)
34
35/**
36 * drm_sched_entity_init - Init a context entity used by scheduler when
37 * submit to HW ring.
38 *
39 * @entity: scheduler entity to init
40 * @rq_list: the list of run queue on which jobs from this
41 * entity can be submitted
42 * @num_rq_list: number of run queue in rq_list
43 * @guilty: atomic_t set to 1 when a job on this queue
44 * is found to be guilty causing a timeout
45 *
46 * Note: the rq_list should have atleast one element to schedule
47 * the entity
48 *
49 * Returns 0 on success or a negative error code on failure.
50 */
51int drm_sched_entity_init(struct drm_sched_entity *entity,
52 struct drm_sched_rq **rq_list,
53 unsigned int num_rq_list,
54 atomic_t *guilty)
55{
56 int i;
57
58 if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
59 return -EINVAL;
60
61 memset(entity, 0, sizeof(struct drm_sched_entity));
62 INIT_LIST_HEAD(&entity->list);
63 entity->rq = NULL;
64 entity->guilty = guilty;
65 entity->num_rq_list = num_rq_list;
66 entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
67 GFP_KERNEL);
68 if (!entity->rq_list)
69 return -ENOMEM;
70
71 for (i = 0; i < num_rq_list; ++i)
72 entity->rq_list[i] = rq_list[i];
73
74 if (num_rq_list)
75 entity->rq = rq_list[0];
76
77 entity->last_scheduled = NULL;
78
79 spin_lock_init(&entity->rq_lock);
80 spsc_queue_init(&entity->job_queue);
81
82 atomic_set(&entity->fence_seq, 0);
83 entity->fence_context = dma_fence_context_alloc(2);
84
85 return 0;
86}
87EXPORT_SYMBOL(drm_sched_entity_init);
88
89/**
90 * drm_sched_entity_is_idle - Check if entity is idle
91 *
92 * @entity: scheduler entity
93 *
94 * Returns true if the entity does not have any unscheduled jobs.
95 */
96static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
97{
98 rmb(); /* for list_empty to work without lock */
99
100 if (list_empty(&entity->list) ||
101 spsc_queue_count(&entity->job_queue) == 0)
102 return true;
103
104 return false;
105}
106
107/**
108 * drm_sched_entity_is_ready - Check if entity is ready
109 *
110 * @entity: scheduler entity
111 *
112 * Return true if entity could provide a job.
113 */
114bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
115{
116 if (spsc_queue_peek(&entity->job_queue) == NULL)
117 return false;
118
119 if (READ_ONCE(entity->dependency))
120 return false;
121
122 return true;
123}
124
125/**
126 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
127 *
128 * @entity: scheduler entity
129 *
130 * Return the pointer to the rq with least load.
131 */
132static struct drm_sched_rq *
133drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
134{
135 struct drm_sched_rq *rq = NULL;
136 unsigned int min_jobs = UINT_MAX, num_jobs;
137 int i;
138
139 for (i = 0; i < entity->num_rq_list; ++i) {
140 struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
141
142 if (!entity->rq_list[i]->sched->ready) {
143 DRM_WARN("sched%s is not ready, skipping", sched->name);
144 continue;
145 }
146
147 num_jobs = atomic_read(&sched->num_jobs);
148 if (num_jobs < min_jobs) {
149 min_jobs = num_jobs;
150 rq = entity->rq_list[i];
151 }
152 }
153
154 return rq;
155}
156
157/**
158 * drm_sched_entity_flush - Flush a context entity
159 *
160 * @entity: scheduler entity
161 * @timeout: time to wait in for Q to become empty in jiffies.
162 *
163 * Splitting drm_sched_entity_fini() into two functions, The first one does the
164 * waiting, removes the entity from the runqueue and returns an error when the
165 * process was killed.
166 *
167 * Returns the remaining time in jiffies left from the input timeout
168 */
169long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
170{
171 struct drm_gpu_scheduler *sched;
172 struct task_struct *last_user;
173 long ret = timeout;
174
175 if (!entity->rq)
176 return 0;
177
178 sched = entity->rq->sched;
179 /**
180 * The client will not queue more IBs during this fini, consume existing
181 * queued IBs or discard them on SIGKILL
182 */
183 if (current->flags & PF_EXITING) {
184 if (timeout)
185 ret = wait_event_timeout(
186 sched->job_scheduled,
187 drm_sched_entity_is_idle(entity),
188 timeout);
189 } else {
190 wait_event_killable(sched->job_scheduled,
191 drm_sched_entity_is_idle(entity));
192 }
193
194 /* For killed process disable any more IBs enqueue right now */
195 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
196 if ((!last_user || last_user == current->group_leader) &&
197 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
198 spin_lock(&entity->rq_lock);
199 entity->stopped = true;
200 drm_sched_rq_remove_entity(entity->rq, entity);
201 spin_unlock(&entity->rq_lock);
202 }
203
204 return ret;
205}
206EXPORT_SYMBOL(drm_sched_entity_flush);
207
208/**
209 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
210 *
211 * @f: signaled fence
212 * @cb: our callback structure
213 *
214 * Signal the scheduler finished fence when the entity in question is killed.
215 */
216static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
217 struct dma_fence_cb *cb)
218{
219 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
220 finish_cb);
221
222 drm_sched_fence_finished(job->s_fence);
223 WARN_ON(job->s_fence->parent);
224 job->sched->ops->free_job(job);
225}
226
227/**
228 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
229 *
230 * @entity: entity which is cleaned up
231 *
232 * Makes sure that all remaining jobs in an entity are killed before it is
233 * destroyed.
234 */
235static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
236{
237 struct drm_sched_job *job;
238 int r;
239
240 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
241 struct drm_sched_fence *s_fence = job->s_fence;
242
243 drm_sched_fence_scheduled(s_fence);
244 dma_fence_set_error(&s_fence->finished, -ESRCH);
245
246 /*
247 * When pipe is hanged by older entity, new entity might
248 * not even have chance to submit it's first job to HW
249 * and so entity->last_scheduled will remain NULL
250 */
251 if (!entity->last_scheduled) {
252 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
253 continue;
254 }
255
256 r = dma_fence_add_callback(entity->last_scheduled,
257 &job->finish_cb,
258 drm_sched_entity_kill_jobs_cb);
259 if (r == -ENOENT)
260 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
261 else if (r)
262 DRM_ERROR("fence add callback failed (%d)\n", r);
263 }
264}
265
266/**
267 * drm_sched_entity_cleanup - Destroy a context entity
268 *
269 * @entity: scheduler entity
270 *
271 * This should be called after @drm_sched_entity_do_release. It goes over the
272 * entity and signals all jobs with an error code if the process was killed.
273 *
274 */
275void drm_sched_entity_fini(struct drm_sched_entity *entity)
276{
277 struct drm_gpu_scheduler *sched = NULL;
278
279 if (entity->rq) {
280 sched = entity->rq->sched;
281 drm_sched_rq_remove_entity(entity->rq, entity);
282 }
283
284 /* Consumption of existing IBs wasn't completed. Forcefully
285 * remove them here.
286 */
287 if (spsc_queue_count(&entity->job_queue)) {
288 if (sched) {
289 /* Park the kernel for a moment to make sure it isn't processing
290 * our enity.
291 */
292 kthread_park(sched->thread);
293 kthread_unpark(sched->thread);
294 }
295 if (entity->dependency) {
296 dma_fence_remove_callback(entity->dependency,
297 &entity->cb);
298 dma_fence_put(entity->dependency);
299 entity->dependency = NULL;
300 }
301
302 drm_sched_entity_kill_jobs(entity);
303 }
304
305 dma_fence_put(entity->last_scheduled);
306 entity->last_scheduled = NULL;
307 kfree(entity->rq_list);
308}
309EXPORT_SYMBOL(drm_sched_entity_fini);
310
311/**
312 * drm_sched_entity_fini - Destroy a context entity
313 *
314 * @entity: scheduler entity
315 *
316 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
317 */
318void drm_sched_entity_destroy(struct drm_sched_entity *entity)
319{
320 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
321 drm_sched_entity_fini(entity);
322}
323EXPORT_SYMBOL(drm_sched_entity_destroy);
324
325/**
326 * drm_sched_entity_clear_dep - callback to clear the entities dependency
327 */
328static void drm_sched_entity_clear_dep(struct dma_fence *f,
329 struct dma_fence_cb *cb)
330{
331 struct drm_sched_entity *entity =
332 container_of(cb, struct drm_sched_entity, cb);
333
334 entity->dependency = NULL;
335 dma_fence_put(f);
336}
337
338/**
339 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
340 * wake up scheduler
341 */
342static void drm_sched_entity_wakeup(struct dma_fence *f,
343 struct dma_fence_cb *cb)
344{
345 struct drm_sched_entity *entity =
346 container_of(cb, struct drm_sched_entity, cb);
347
348 drm_sched_entity_clear_dep(f, cb);
349 drm_sched_wakeup(entity->rq->sched);
350}
351
352/**
353 * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
354 */
355static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
356 enum drm_sched_priority priority)
357{
358 *rq = &(*rq)->sched->sched_rq[priority];
359}
360
361/**
362 * drm_sched_entity_set_priority - Sets priority of the entity
363 *
364 * @entity: scheduler entity
365 * @priority: scheduler priority
366 *
367 * Update the priority of runqueus used for the entity.
368 */
369void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
370 enum drm_sched_priority priority)
371{
372 unsigned int i;
373
374 spin_lock(&entity->rq_lock);
375
376 for (i = 0; i < entity->num_rq_list; ++i)
377 drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
378
379 if (entity->rq) {
380 drm_sched_rq_remove_entity(entity->rq, entity);
381 drm_sched_entity_set_rq_priority(&entity->rq, priority);
382 drm_sched_rq_add_entity(entity->rq, entity);
383 }
384
385 spin_unlock(&entity->rq_lock);
386}
387EXPORT_SYMBOL(drm_sched_entity_set_priority);
388
389/**
390 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
391 *
392 * @entity: entity with dependency
393 *
394 * Add a callback to the current dependency of the entity to wake up the
395 * scheduler when the entity becomes available.
396 */
397static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
398{
399 struct drm_gpu_scheduler *sched = entity->rq->sched;
400 struct dma_fence *fence = entity->dependency;
401 struct drm_sched_fence *s_fence;
402
403 if (fence->context == entity->fence_context ||
404 fence->context == entity->fence_context + 1) {
405 /*
406 * Fence is a scheduled/finished fence from a job
407 * which belongs to the same entity, we can ignore
408 * fences from ourself
409 */
410 dma_fence_put(entity->dependency);
411 return false;
412 }
413
414 s_fence = to_drm_sched_fence(fence);
415 if (s_fence && s_fence->sched == sched) {
416
417 /*
418 * Fence is from the same scheduler, only need to wait for
419 * it to be scheduled
420 */
421 fence = dma_fence_get(&s_fence->scheduled);
422 dma_fence_put(entity->dependency);
423 entity->dependency = fence;
424 if (!dma_fence_add_callback(fence, &entity->cb,
425 drm_sched_entity_clear_dep))
426 return true;
427
428 /* Ignore it when it is already scheduled */
429 dma_fence_put(fence);
430 return false;
431 }
432
433 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
434 drm_sched_entity_wakeup))
435 return true;
436
437 dma_fence_put(entity->dependency);
438 return false;
439}
440
441/**
442 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
443 *
444 * @entity: entity to get the job from
445 *
446 * Process all dependencies and try to get one job from the entities queue.
447 */
448struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
449{
450 struct drm_gpu_scheduler *sched = entity->rq->sched;
451 struct drm_sched_job *sched_job;
452
453 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
454 if (!sched_job)
455 return NULL;
456
457 while ((entity->dependency =
458 sched->ops->dependency(sched_job, entity))) {
459 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
460
461 if (drm_sched_entity_add_dependency_cb(entity))
462 return NULL;
463 }
464
465 /* skip jobs from entity that marked guilty */
466 if (entity->guilty && atomic_read(entity->guilty))
467 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
468
469 dma_fence_put(entity->last_scheduled);
470 entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
471
472 spsc_queue_pop(&entity->job_queue);
473 return sched_job;
474}
475
476/**
477 * drm_sched_entity_select_rq - select a new rq for the entity
478 *
479 * @entity: scheduler entity
480 *
481 * Check all prerequisites and select a new rq for the entity for load
482 * balancing.
483 */
484void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
485{
486 struct dma_fence *fence;
487 struct drm_sched_rq *rq;
488
489 if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
490 return;
491
492 fence = READ_ONCE(entity->last_scheduled);
493 if (fence && !dma_fence_is_signaled(fence))
494 return;
495
496 rq = drm_sched_entity_get_free_sched(entity);
497 if (rq == entity->rq)
498 return;
499
500 spin_lock(&entity->rq_lock);
501 drm_sched_rq_remove_entity(entity->rq, entity);
502 entity->rq = rq;
503 spin_unlock(&entity->rq_lock);
504}
505
506/**
507 * drm_sched_entity_push_job - Submit a job to the entity's job queue
508 *
509 * @sched_job: job to submit
510 * @entity: scheduler entity
511 *
512 * Note: To guarantee that the order of insertion to queue matches
513 * the job's fence sequence number this function should be
514 * called with drm_sched_job_init under common lock.
515 *
516 * Returns 0 for success, negative error code otherwise.
517 */
518void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
519 struct drm_sched_entity *entity)
520{
521 bool first;
522
523 trace_drm_sched_job(sched_job, entity);
524 atomic_inc(&entity->rq->sched->num_jobs);
525 WRITE_ONCE(entity->last_user, current->group_leader);
526 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
527
528 /* first job wakes up scheduler */
529 if (first) {
530 /* Add the entity to the run queue */
531 spin_lock(&entity->rq_lock);
532 if (entity->stopped) {
533 spin_unlock(&entity->rq_lock);
534
535 DRM_ERROR("Trying to push to a killed entity\n");
536 return;
537 }
538 drm_sched_rq_add_entity(entity->rq, entity);
539 spin_unlock(&entity->rq_lock);
540 drm_sched_wakeup(entity->rq->sched);
541 }
542}
543EXPORT_SYMBOL(drm_sched_entity_push_job);
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/kthread.h>
25#include <linux/slab.h>
26#include <linux/completion.h>
27
28#include <drm/drm_print.h>
29#include <drm/gpu_scheduler.h>
30
31#include "gpu_scheduler_trace.h"
32
33#define to_drm_sched_job(sched_job) \
34 container_of((sched_job), struct drm_sched_job, queue_node)
35
36/**
37 * drm_sched_entity_init - Init a context entity used by scheduler when
38 * submit to HW ring.
39 *
40 * @entity: scheduler entity to init
41 * @priority: priority of the entity
42 * @sched_list: the list of drm scheds on which jobs from this
43 * entity can be submitted
44 * @num_sched_list: number of drm sched in sched_list
45 * @guilty: atomic_t set to 1 when a job on this queue
46 * is found to be guilty causing a timeout
47 *
48 * Note that the &sched_list must have at least one element to schedule the entity.
49 *
50 * For changing @priority later on at runtime see
51 * drm_sched_entity_set_priority(). For changing the set of schedulers
52 * @sched_list at runtime see drm_sched_entity_modify_sched().
53 *
54 * An entity is cleaned up by calling drm_sched_entity_fini(). See also
55 * drm_sched_entity_destroy().
56 *
57 * Returns 0 on success or a negative error code on failure.
58 */
59int drm_sched_entity_init(struct drm_sched_entity *entity,
60 enum drm_sched_priority priority,
61 struct drm_gpu_scheduler **sched_list,
62 unsigned int num_sched_list,
63 atomic_t *guilty)
64{
65 if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
66 return -EINVAL;
67
68 memset(entity, 0, sizeof(struct drm_sched_entity));
69 INIT_LIST_HEAD(&entity->list);
70 entity->rq = NULL;
71 entity->guilty = guilty;
72 entity->num_sched_list = num_sched_list;
73 entity->priority = priority;
74 /*
75 * It's perfectly valid to initialize an entity without having a valid
76 * scheduler attached. It's just not valid to use the scheduler before it
77 * is initialized itself.
78 */
79 entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
80 RCU_INIT_POINTER(entity->last_scheduled, NULL);
81 RB_CLEAR_NODE(&entity->rb_tree_node);
82
83 if (num_sched_list && !sched_list[0]->sched_rq) {
84 /* Since every entry covered by num_sched_list
85 * should be non-NULL and therefore we warn drivers
86 * not to do this and to fix their DRM calling order.
87 */
88 pr_warn("%s: called with uninitialized scheduler\n", __func__);
89 } else if (num_sched_list) {
90 /* The "priority" of an entity cannot exceed the number of run-queues of a
91 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
92 * the lowest priority available.
93 */
94 if (entity->priority >= sched_list[0]->num_rqs) {
95 drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
96 entity->priority, sched_list[0]->num_rqs);
97 entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
98 (s32) DRM_SCHED_PRIORITY_KERNEL);
99 }
100 entity->rq = sched_list[0]->sched_rq[entity->priority];
101 }
102
103 init_completion(&entity->entity_idle);
104
105 /* We start in an idle state. */
106 complete_all(&entity->entity_idle);
107
108 spin_lock_init(&entity->lock);
109 spsc_queue_init(&entity->job_queue);
110
111 atomic_set(&entity->fence_seq, 0);
112 entity->fence_context = dma_fence_context_alloc(2);
113
114 return 0;
115}
116EXPORT_SYMBOL(drm_sched_entity_init);
117
118/**
119 * drm_sched_entity_modify_sched - Modify sched of an entity
120 * @entity: scheduler entity to init
121 * @sched_list: the list of new drm scheds which will replace
122 * existing entity->sched_list
123 * @num_sched_list: number of drm sched in sched_list
124 *
125 * Note that this must be called under the same common lock for @entity as
126 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
127 * guarantee through some other means that this is never called while new jobs
128 * can be pushed to @entity.
129 */
130void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
131 struct drm_gpu_scheduler **sched_list,
132 unsigned int num_sched_list)
133{
134 WARN_ON(!num_sched_list || !sched_list);
135
136 spin_lock(&entity->lock);
137 entity->sched_list = sched_list;
138 entity->num_sched_list = num_sched_list;
139 spin_unlock(&entity->lock);
140}
141EXPORT_SYMBOL(drm_sched_entity_modify_sched);
142
143static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
144{
145 rmb(); /* for list_empty to work without lock */
146
147 if (list_empty(&entity->list) ||
148 spsc_queue_count(&entity->job_queue) == 0 ||
149 entity->stopped)
150 return true;
151
152 return false;
153}
154
155/* Return true if entity could provide a job. */
156bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
157{
158 if (spsc_queue_peek(&entity->job_queue) == NULL)
159 return false;
160
161 if (READ_ONCE(entity->dependency))
162 return false;
163
164 return true;
165}
166
167/**
168 * drm_sched_entity_error - return error of last scheduled job
169 * @entity: scheduler entity to check
170 *
171 * Opportunistically return the error of the last scheduled job. Result can
172 * change any time when new jobs are pushed to the hw.
173 */
174int drm_sched_entity_error(struct drm_sched_entity *entity)
175{
176 struct dma_fence *fence;
177 int r;
178
179 rcu_read_lock();
180 fence = rcu_dereference(entity->last_scheduled);
181 r = fence ? fence->error : 0;
182 rcu_read_unlock();
183
184 return r;
185}
186EXPORT_SYMBOL(drm_sched_entity_error);
187
188static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
189{
190 struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
191
192 drm_sched_fence_finished(job->s_fence, -ESRCH);
193 WARN_ON(job->s_fence->parent);
194 job->sched->ops->free_job(job);
195}
196
197/* Signal the scheduler finished fence when the entity in question is killed. */
198static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
199 struct dma_fence_cb *cb)
200{
201 struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
202 finish_cb);
203 unsigned long index;
204
205 dma_fence_put(f);
206
207 /* Wait for all dependencies to avoid data corruptions */
208 xa_for_each(&job->dependencies, index, f) {
209 struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
210
211 if (s_fence && f == &s_fence->scheduled) {
212 /* The dependencies array had a reference on the scheduled
213 * fence, and the finished fence refcount might have
214 * dropped to zero. Use dma_fence_get_rcu() so we get
215 * a NULL fence in that case.
216 */
217 f = dma_fence_get_rcu(&s_fence->finished);
218
219 /* Now that we have a reference on the finished fence,
220 * we can release the reference the dependencies array
221 * had on the scheduled fence.
222 */
223 dma_fence_put(&s_fence->scheduled);
224 }
225
226 xa_erase(&job->dependencies, index);
227 if (f && !dma_fence_add_callback(f, &job->finish_cb,
228 drm_sched_entity_kill_jobs_cb))
229 return;
230
231 dma_fence_put(f);
232 }
233
234 INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
235 schedule_work(&job->work);
236}
237
238/* Remove the entity from the scheduler and kill all pending jobs */
239static void drm_sched_entity_kill(struct drm_sched_entity *entity)
240{
241 struct drm_sched_job *job;
242 struct dma_fence *prev;
243
244 if (!entity->rq)
245 return;
246
247 spin_lock(&entity->lock);
248 entity->stopped = true;
249 drm_sched_rq_remove_entity(entity->rq, entity);
250 spin_unlock(&entity->lock);
251
252 /* Make sure this entity is not used by the scheduler at the moment */
253 wait_for_completion(&entity->entity_idle);
254
255 /* The entity is guaranteed to not be used by the scheduler */
256 prev = rcu_dereference_check(entity->last_scheduled, true);
257 dma_fence_get(prev);
258 while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
259 struct drm_sched_fence *s_fence = job->s_fence;
260
261 dma_fence_get(&s_fence->finished);
262 if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
263 drm_sched_entity_kill_jobs_cb))
264 drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
265
266 prev = &s_fence->finished;
267 }
268 dma_fence_put(prev);
269}
270
271/**
272 * drm_sched_entity_flush - Flush a context entity
273 *
274 * @entity: scheduler entity
275 * @timeout: time to wait in for Q to become empty in jiffies.
276 *
277 * Splitting drm_sched_entity_fini() into two functions, The first one does the
278 * waiting, removes the entity from the runqueue and returns an error when the
279 * process was killed.
280 *
281 * Returns the remaining time in jiffies left from the input timeout
282 */
283long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
284{
285 struct drm_gpu_scheduler *sched;
286 struct task_struct *last_user;
287 long ret = timeout;
288
289 if (!entity->rq)
290 return 0;
291
292 sched = entity->rq->sched;
293 /**
294 * The client will not queue more IBs during this fini, consume existing
295 * queued IBs or discard them on SIGKILL
296 */
297 if (current->flags & PF_EXITING) {
298 if (timeout)
299 ret = wait_event_timeout(
300 sched->job_scheduled,
301 drm_sched_entity_is_idle(entity),
302 timeout);
303 } else {
304 wait_event_killable(sched->job_scheduled,
305 drm_sched_entity_is_idle(entity));
306 }
307
308 /* For killed process disable any more IBs enqueue right now */
309 last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
310 if ((!last_user || last_user == current->group_leader) &&
311 (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
312 drm_sched_entity_kill(entity);
313
314 return ret;
315}
316EXPORT_SYMBOL(drm_sched_entity_flush);
317
318/**
319 * drm_sched_entity_fini - Destroy a context entity
320 *
321 * @entity: scheduler entity
322 *
323 * Cleanups up @entity which has been initialized by drm_sched_entity_init().
324 *
325 * If there are potentially job still in flight or getting newly queued
326 * drm_sched_entity_flush() must be called first. This function then goes over
327 * the entity and signals all jobs with an error code if the process was killed.
328 */
329void drm_sched_entity_fini(struct drm_sched_entity *entity)
330{
331 /*
332 * If consumption of existing IBs wasn't completed. Forcefully remove
333 * them here. Also makes sure that the scheduler won't touch this entity
334 * any more.
335 */
336 drm_sched_entity_kill(entity);
337
338 if (entity->dependency) {
339 dma_fence_remove_callback(entity->dependency, &entity->cb);
340 dma_fence_put(entity->dependency);
341 entity->dependency = NULL;
342 }
343
344 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
345 RCU_INIT_POINTER(entity->last_scheduled, NULL);
346}
347EXPORT_SYMBOL(drm_sched_entity_fini);
348
349/**
350 * drm_sched_entity_destroy - Destroy a context entity
351 * @entity: scheduler entity
352 *
353 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
354 * convenience wrapper.
355 */
356void drm_sched_entity_destroy(struct drm_sched_entity *entity)
357{
358 drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
359 drm_sched_entity_fini(entity);
360}
361EXPORT_SYMBOL(drm_sched_entity_destroy);
362
363/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
364static void drm_sched_entity_clear_dep(struct dma_fence *f,
365 struct dma_fence_cb *cb)
366{
367 struct drm_sched_entity *entity =
368 container_of(cb, struct drm_sched_entity, cb);
369
370 entity->dependency = NULL;
371 dma_fence_put(f);
372}
373
374/*
375 * drm_sched_entity_wakeup - callback to clear the entity's dependency and
376 * wake up the scheduler
377 */
378static void drm_sched_entity_wakeup(struct dma_fence *f,
379 struct dma_fence_cb *cb)
380{
381 struct drm_sched_entity *entity =
382 container_of(cb, struct drm_sched_entity, cb);
383
384 drm_sched_entity_clear_dep(f, cb);
385 drm_sched_wakeup(entity->rq->sched);
386}
387
388/**
389 * drm_sched_entity_set_priority - Sets priority of the entity
390 *
391 * @entity: scheduler entity
392 * @priority: scheduler priority
393 *
394 * Update the priority of runqueues used for the entity.
395 */
396void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
397 enum drm_sched_priority priority)
398{
399 spin_lock(&entity->lock);
400 entity->priority = priority;
401 spin_unlock(&entity->lock);
402}
403EXPORT_SYMBOL(drm_sched_entity_set_priority);
404
405/*
406 * Add a callback to the current dependency of the entity to wake up the
407 * scheduler when the entity becomes available.
408 */
409static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
410{
411 struct drm_gpu_scheduler *sched = entity->rq->sched;
412 struct dma_fence *fence = entity->dependency;
413 struct drm_sched_fence *s_fence;
414
415 if (fence->context == entity->fence_context ||
416 fence->context == entity->fence_context + 1) {
417 /*
418 * Fence is a scheduled/finished fence from a job
419 * which belongs to the same entity, we can ignore
420 * fences from ourself
421 */
422 dma_fence_put(entity->dependency);
423 return false;
424 }
425
426 s_fence = to_drm_sched_fence(fence);
427 if (!fence->error && s_fence && s_fence->sched == sched &&
428 !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
429
430 /*
431 * Fence is from the same scheduler, only need to wait for
432 * it to be scheduled
433 */
434 fence = dma_fence_get(&s_fence->scheduled);
435 dma_fence_put(entity->dependency);
436 entity->dependency = fence;
437 if (!dma_fence_add_callback(fence, &entity->cb,
438 drm_sched_entity_clear_dep))
439 return true;
440
441 /* Ignore it when it is already scheduled */
442 dma_fence_put(fence);
443 return false;
444 }
445
446 if (!dma_fence_add_callback(entity->dependency, &entity->cb,
447 drm_sched_entity_wakeup))
448 return true;
449
450 dma_fence_put(entity->dependency);
451 return false;
452}
453
454static struct dma_fence *
455drm_sched_job_dependency(struct drm_sched_job *job,
456 struct drm_sched_entity *entity)
457{
458 struct dma_fence *f;
459
460 /* We keep the fence around, so we can iterate over all dependencies
461 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
462 * before killing the job.
463 */
464 f = xa_load(&job->dependencies, job->last_dependency);
465 if (f) {
466 job->last_dependency++;
467 return dma_fence_get(f);
468 }
469
470 if (job->sched->ops->prepare_job)
471 return job->sched->ops->prepare_job(job, entity);
472
473 return NULL;
474}
475
476struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
477{
478 struct drm_sched_job *sched_job;
479
480 sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
481 if (!sched_job)
482 return NULL;
483
484 while ((entity->dependency =
485 drm_sched_job_dependency(sched_job, entity))) {
486 trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
487
488 if (drm_sched_entity_add_dependency_cb(entity))
489 return NULL;
490 }
491
492 /* skip jobs from entity that marked guilty */
493 if (entity->guilty && atomic_read(entity->guilty))
494 dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
495
496 dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
497 rcu_assign_pointer(entity->last_scheduled,
498 dma_fence_get(&sched_job->s_fence->finished));
499
500 /*
501 * If the queue is empty we allow drm_sched_entity_select_rq() to
502 * locklessly access ->last_scheduled. This only works if we set the
503 * pointer before we dequeue and if we a write barrier here.
504 */
505 smp_wmb();
506
507 spsc_queue_pop(&entity->job_queue);
508
509 /*
510 * Update the entity's location in the min heap according to
511 * the timestamp of the next job, if any.
512 */
513 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
514 struct drm_sched_job *next;
515
516 next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
517 if (next) {
518 struct drm_sched_rq *rq;
519
520 spin_lock(&entity->lock);
521 rq = entity->rq;
522 spin_lock(&rq->lock);
523 drm_sched_rq_update_fifo_locked(entity, rq,
524 next->submit_ts);
525 spin_unlock(&rq->lock);
526 spin_unlock(&entity->lock);
527 }
528 }
529
530 /* Jobs and entities might have different lifecycles. Since we're
531 * removing the job from the entities queue, set the jobs entity pointer
532 * to NULL to prevent any future access of the entity through this job.
533 */
534 sched_job->entity = NULL;
535
536 return sched_job;
537}
538
539void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
540{
541 struct dma_fence *fence;
542 struct drm_gpu_scheduler *sched;
543 struct drm_sched_rq *rq;
544
545 /* single possible engine and already selected */
546 if (!entity->sched_list)
547 return;
548
549 /* queue non-empty, stay on the same engine */
550 if (spsc_queue_count(&entity->job_queue))
551 return;
552
553 /*
554 * Only when the queue is empty are we guaranteed that the scheduler
555 * thread cannot change ->last_scheduled. To enforce ordering we need
556 * a read barrier here. See drm_sched_entity_pop_job() for the other
557 * side.
558 */
559 smp_rmb();
560
561 fence = rcu_dereference_check(entity->last_scheduled, true);
562
563 /* stay on the same engine if the previous job hasn't finished */
564 if (fence && !dma_fence_is_signaled(fence))
565 return;
566
567 spin_lock(&entity->lock);
568 sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
569 rq = sched ? sched->sched_rq[entity->priority] : NULL;
570 if (rq != entity->rq) {
571 drm_sched_rq_remove_entity(entity->rq, entity);
572 entity->rq = rq;
573 }
574 spin_unlock(&entity->lock);
575
576 if (entity->num_sched_list == 1)
577 entity->sched_list = NULL;
578}
579
580/**
581 * drm_sched_entity_push_job - Submit a job to the entity's job queue
582 * @sched_job: job to submit
583 *
584 * Note: To guarantee that the order of insertion to queue matches the job's
585 * fence sequence number this function should be called with drm_sched_job_arm()
586 * under common lock for the struct drm_sched_entity that was set up for
587 * @sched_job in drm_sched_job_init().
588 */
589void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
590{
591 struct drm_sched_entity *entity = sched_job->entity;
592 bool first;
593 ktime_t submit_ts;
594
595 trace_drm_sched_job(sched_job, entity);
596 atomic_inc(entity->rq->sched->score);
597 WRITE_ONCE(entity->last_user, current->group_leader);
598
599 /*
600 * After the sched_job is pushed into the entity queue, it may be
601 * completed and freed up at any time. We can no longer access it.
602 * Make sure to set the submit_ts first, to avoid a race.
603 */
604 sched_job->submit_ts = submit_ts = ktime_get();
605 first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
606
607 /* first job wakes up scheduler */
608 if (first) {
609 struct drm_gpu_scheduler *sched;
610 struct drm_sched_rq *rq;
611
612 /* Add the entity to the run queue */
613 spin_lock(&entity->lock);
614 if (entity->stopped) {
615 spin_unlock(&entity->lock);
616
617 DRM_ERROR("Trying to push to a killed entity\n");
618 return;
619 }
620
621 rq = entity->rq;
622 sched = rq->sched;
623
624 spin_lock(&rq->lock);
625 drm_sched_rq_add_entity(rq, entity);
626
627 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
628 drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
629
630 spin_unlock(&rq->lock);
631 spin_unlock(&entity->lock);
632
633 drm_sched_wakeup(sched);
634 }
635}
636EXPORT_SYMBOL(drm_sched_entity_push_job);