Loading...
Note: File does not exist in v3.1.
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24/**
25 * DOC: Overview
26 *
27 * The GPU scheduler provides entities which allow userspace to push jobs
28 * into software queues which are then scheduled on a hardware run queue.
29 * The software queues have a priority among them. The scheduler selects the entities
30 * from the run queue using a FIFO. The scheduler provides dependency handling
31 * features among jobs. The driver is supposed to provide callback functions for
32 * backend operations to the scheduler like submitting a job to hardware run queue,
33 * returning the dependencies of a job etc.
34 *
35 * The organisation of the scheduler is the following:
36 *
37 * 1. Each hw run queue has one scheduler
38 * 2. Each scheduler has multiple run queues with different priorities
39 * (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
40 * 3. Each scheduler run queue has a queue of entities to schedule
41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
42 * the hardware.
43 *
44 * The jobs in a entity are always scheduled in the order that they were pushed.
45 */
46
47#include <linux/kthread.h>
48#include <linux/wait.h>
49#include <linux/sched.h>
50#include <linux/completion.h>
51#include <linux/dma-resv.h>
52#include <uapi/linux/sched/types.h>
53
54#include <drm/drm_print.h>
55#include <drm/drm_gem.h>
56#include <drm/gpu_scheduler.h>
57#include <drm/spsc_queue.h>
58
59#define CREATE_TRACE_POINTS
60#include "gpu_scheduler_trace.h"
61
62#define to_drm_sched_job(sched_job) \
63 container_of((sched_job), struct drm_sched_job, queue_node)
64
65int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
66
67/**
68 * DOC: sched_policy (int)
69 * Used to override default entities scheduling policy in a run queue.
70 */
71MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
72module_param_named(sched_policy, drm_sched_policy, int, 0444);
73
74static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
75 const struct rb_node *b)
76{
77 struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
78 struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
79
80 return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
81}
82
83static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
84{
85 struct drm_sched_rq *rq = entity->rq;
86
87 if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
88 rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
89 RB_CLEAR_NODE(&entity->rb_tree_node);
90 }
91}
92
93void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
94{
95 /*
96 * Both locks need to be grabbed, one to protect from entity->rq change
97 * for entity from within concurrent drm_sched_entity_select_rq and the
98 * other to update the rb tree structure.
99 */
100 spin_lock(&entity->rq_lock);
101 spin_lock(&entity->rq->lock);
102
103 drm_sched_rq_remove_fifo_locked(entity);
104
105 entity->oldest_job_waiting = ts;
106
107 rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
108 drm_sched_entity_compare_before);
109
110 spin_unlock(&entity->rq->lock);
111 spin_unlock(&entity->rq_lock);
112}
113
114/**
115 * drm_sched_rq_init - initialize a given run queue struct
116 *
117 * @sched: scheduler instance to associate with this run queue
118 * @rq: scheduler run queue
119 *
120 * Initializes a scheduler runqueue.
121 */
122static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
123 struct drm_sched_rq *rq)
124{
125 spin_lock_init(&rq->lock);
126 INIT_LIST_HEAD(&rq->entities);
127 rq->rb_tree_root = RB_ROOT_CACHED;
128 rq->current_entity = NULL;
129 rq->sched = sched;
130}
131
132/**
133 * drm_sched_rq_add_entity - add an entity
134 *
135 * @rq: scheduler run queue
136 * @entity: scheduler entity
137 *
138 * Adds a scheduler entity to the run queue.
139 */
140void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
141 struct drm_sched_entity *entity)
142{
143 if (!list_empty(&entity->list))
144 return;
145
146 spin_lock(&rq->lock);
147
148 atomic_inc(rq->sched->score);
149 list_add_tail(&entity->list, &rq->entities);
150
151 spin_unlock(&rq->lock);
152}
153
154/**
155 * drm_sched_rq_remove_entity - remove an entity
156 *
157 * @rq: scheduler run queue
158 * @entity: scheduler entity
159 *
160 * Removes a scheduler entity from the run queue.
161 */
162void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
163 struct drm_sched_entity *entity)
164{
165 if (list_empty(&entity->list))
166 return;
167
168 spin_lock(&rq->lock);
169
170 atomic_dec(rq->sched->score);
171 list_del_init(&entity->list);
172
173 if (rq->current_entity == entity)
174 rq->current_entity = NULL;
175
176 if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
177 drm_sched_rq_remove_fifo_locked(entity);
178
179 spin_unlock(&rq->lock);
180}
181
182/**
183 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
184 *
185 * @rq: scheduler run queue to check.
186 *
187 * Try to find a ready entity, returns NULL if none found.
188 */
189static struct drm_sched_entity *
190drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
191{
192 struct drm_sched_entity *entity;
193
194 spin_lock(&rq->lock);
195
196 entity = rq->current_entity;
197 if (entity) {
198 list_for_each_entry_continue(entity, &rq->entities, list) {
199 if (drm_sched_entity_is_ready(entity)) {
200 rq->current_entity = entity;
201 reinit_completion(&entity->entity_idle);
202 spin_unlock(&rq->lock);
203 return entity;
204 }
205 }
206 }
207
208 list_for_each_entry(entity, &rq->entities, list) {
209
210 if (drm_sched_entity_is_ready(entity)) {
211 rq->current_entity = entity;
212 reinit_completion(&entity->entity_idle);
213 spin_unlock(&rq->lock);
214 return entity;
215 }
216
217 if (entity == rq->current_entity)
218 break;
219 }
220
221 spin_unlock(&rq->lock);
222
223 return NULL;
224}
225
226/**
227 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
228 *
229 * @rq: scheduler run queue to check.
230 *
231 * Find oldest waiting ready entity, returns NULL if none found.
232 */
233static struct drm_sched_entity *
234drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
235{
236 struct rb_node *rb;
237
238 spin_lock(&rq->lock);
239 for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
240 struct drm_sched_entity *entity;
241
242 entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
243 if (drm_sched_entity_is_ready(entity)) {
244 rq->current_entity = entity;
245 reinit_completion(&entity->entity_idle);
246 break;
247 }
248 }
249 spin_unlock(&rq->lock);
250
251 return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
252}
253
254/**
255 * drm_sched_job_done - complete a job
256 * @s_job: pointer to the job which is done
257 *
258 * Finish the job's fence and wake up the worker thread.
259 */
260static void drm_sched_job_done(struct drm_sched_job *s_job)
261{
262 struct drm_sched_fence *s_fence = s_job->s_fence;
263 struct drm_gpu_scheduler *sched = s_fence->sched;
264
265 atomic_dec(&sched->hw_rq_count);
266 atomic_dec(sched->score);
267
268 trace_drm_sched_process_job(s_fence);
269
270 dma_fence_get(&s_fence->finished);
271 drm_sched_fence_finished(s_fence);
272 dma_fence_put(&s_fence->finished);
273 wake_up_interruptible(&sched->wake_up_worker);
274}
275
276/**
277 * drm_sched_job_done_cb - the callback for a done job
278 * @f: fence
279 * @cb: fence callbacks
280 */
281static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
282{
283 struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
284
285 drm_sched_job_done(s_job);
286}
287
288/**
289 * drm_sched_start_timeout - start timeout for reset worker
290 *
291 * @sched: scheduler instance to start the worker for
292 *
293 * Start the timeout for the given scheduler.
294 */
295static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
296{
297 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
298 !list_empty(&sched->pending_list))
299 queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
300}
301
302/**
303 * drm_sched_fault - immediately start timeout handler
304 *
305 * @sched: scheduler where the timeout handling should be started.
306 *
307 * Start timeout handling immediately when the driver detects a hardware fault.
308 */
309void drm_sched_fault(struct drm_gpu_scheduler *sched)
310{
311 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
312}
313EXPORT_SYMBOL(drm_sched_fault);
314
315/**
316 * drm_sched_suspend_timeout - Suspend scheduler job timeout
317 *
318 * @sched: scheduler instance for which to suspend the timeout
319 *
320 * Suspend the delayed work timeout for the scheduler. This is done by
321 * modifying the delayed work timeout to an arbitrary large value,
322 * MAX_SCHEDULE_TIMEOUT in this case.
323 *
324 * Returns the timeout remaining
325 *
326 */
327unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
328{
329 unsigned long sched_timeout, now = jiffies;
330
331 sched_timeout = sched->work_tdr.timer.expires;
332
333 /*
334 * Modify the timeout to an arbitrarily large value. This also prevents
335 * the timeout to be restarted when new submissions arrive
336 */
337 if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
338 && time_after(sched_timeout, now))
339 return sched_timeout - now;
340 else
341 return sched->timeout;
342}
343EXPORT_SYMBOL(drm_sched_suspend_timeout);
344
345/**
346 * drm_sched_resume_timeout - Resume scheduler job timeout
347 *
348 * @sched: scheduler instance for which to resume the timeout
349 * @remaining: remaining timeout
350 *
351 * Resume the delayed work timeout for the scheduler.
352 */
353void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
354 unsigned long remaining)
355{
356 spin_lock(&sched->job_list_lock);
357
358 if (list_empty(&sched->pending_list))
359 cancel_delayed_work(&sched->work_tdr);
360 else
361 mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
362
363 spin_unlock(&sched->job_list_lock);
364}
365EXPORT_SYMBOL(drm_sched_resume_timeout);
366
367static void drm_sched_job_begin(struct drm_sched_job *s_job)
368{
369 struct drm_gpu_scheduler *sched = s_job->sched;
370
371 spin_lock(&sched->job_list_lock);
372 list_add_tail(&s_job->list, &sched->pending_list);
373 drm_sched_start_timeout(sched);
374 spin_unlock(&sched->job_list_lock);
375}
376
377static void drm_sched_job_timedout(struct work_struct *work)
378{
379 struct drm_gpu_scheduler *sched;
380 struct drm_sched_job *job;
381 enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
382
383 sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
384
385 /* Protects against concurrent deletion in drm_sched_get_cleanup_job */
386 spin_lock(&sched->job_list_lock);
387 job = list_first_entry_or_null(&sched->pending_list,
388 struct drm_sched_job, list);
389
390 if (job) {
391 /*
392 * Remove the bad job so it cannot be freed by concurrent
393 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
394 * is parked at which point it's safe.
395 */
396 list_del_init(&job->list);
397 spin_unlock(&sched->job_list_lock);
398
399 status = job->sched->ops->timedout_job(job);
400
401 /*
402 * Guilty job did complete and hence needs to be manually removed
403 * See drm_sched_stop doc.
404 */
405 if (sched->free_guilty) {
406 job->sched->ops->free_job(job);
407 sched->free_guilty = false;
408 }
409 } else {
410 spin_unlock(&sched->job_list_lock);
411 }
412
413 if (status != DRM_GPU_SCHED_STAT_ENODEV) {
414 spin_lock(&sched->job_list_lock);
415 drm_sched_start_timeout(sched);
416 spin_unlock(&sched->job_list_lock);
417 }
418}
419
420/**
421 * drm_sched_stop - stop the scheduler
422 *
423 * @sched: scheduler instance
424 * @bad: job which caused the time out
425 *
426 * Stop the scheduler and also removes and frees all completed jobs.
427 * Note: bad job will not be freed as it might be used later and so it's
428 * callers responsibility to release it manually if it's not part of the
429 * pending list any more.
430 *
431 */
432void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
433{
434 struct drm_sched_job *s_job, *tmp;
435
436 kthread_park(sched->thread);
437
438 /*
439 * Reinsert back the bad job here - now it's safe as
440 * drm_sched_get_cleanup_job cannot race against us and release the
441 * bad job at this point - we parked (waited for) any in progress
442 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
443 * now until the scheduler thread is unparked.
444 */
445 if (bad && bad->sched == sched)
446 /*
447 * Add at the head of the queue to reflect it was the earliest
448 * job extracted.
449 */
450 list_add(&bad->list, &sched->pending_list);
451
452 /*
453 * Iterate the job list from later to earlier one and either deactive
454 * their HW callbacks or remove them from pending list if they already
455 * signaled.
456 * This iteration is thread safe as sched thread is stopped.
457 */
458 list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
459 list) {
460 if (s_job->s_fence->parent &&
461 dma_fence_remove_callback(s_job->s_fence->parent,
462 &s_job->cb)) {
463 dma_fence_put(s_job->s_fence->parent);
464 s_job->s_fence->parent = NULL;
465 atomic_dec(&sched->hw_rq_count);
466 } else {
467 /*
468 * remove job from pending_list.
469 * Locking here is for concurrent resume timeout
470 */
471 spin_lock(&sched->job_list_lock);
472 list_del_init(&s_job->list);
473 spin_unlock(&sched->job_list_lock);
474
475 /*
476 * Wait for job's HW fence callback to finish using s_job
477 * before releasing it.
478 *
479 * Job is still alive so fence refcount at least 1
480 */
481 dma_fence_wait(&s_job->s_fence->finished, false);
482
483 /*
484 * We must keep bad job alive for later use during
485 * recovery by some of the drivers but leave a hint
486 * that the guilty job must be released.
487 */
488 if (bad != s_job)
489 sched->ops->free_job(s_job);
490 else
491 sched->free_guilty = true;
492 }
493 }
494
495 /*
496 * Stop pending timer in flight as we rearm it in drm_sched_start. This
497 * avoids the pending timeout work in progress to fire right away after
498 * this TDR finished and before the newly restarted jobs had a
499 * chance to complete.
500 */
501 cancel_delayed_work(&sched->work_tdr);
502}
503
504EXPORT_SYMBOL(drm_sched_stop);
505
506/**
507 * drm_sched_start - recover jobs after a reset
508 *
509 * @sched: scheduler instance
510 * @full_recovery: proceed with complete sched restart
511 *
512 */
513void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
514{
515 struct drm_sched_job *s_job, *tmp;
516 int r;
517
518 /*
519 * Locking the list is not required here as the sched thread is parked
520 * so no new jobs are being inserted or removed. Also concurrent
521 * GPU recovers can't run in parallel.
522 */
523 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
524 struct dma_fence *fence = s_job->s_fence->parent;
525
526 atomic_inc(&sched->hw_rq_count);
527
528 if (!full_recovery)
529 continue;
530
531 if (fence) {
532 r = dma_fence_add_callback(fence, &s_job->cb,
533 drm_sched_job_done_cb);
534 if (r == -ENOENT)
535 drm_sched_job_done(s_job);
536 else if (r)
537 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
538 r);
539 } else
540 drm_sched_job_done(s_job);
541 }
542
543 if (full_recovery) {
544 spin_lock(&sched->job_list_lock);
545 drm_sched_start_timeout(sched);
546 spin_unlock(&sched->job_list_lock);
547 }
548
549 kthread_unpark(sched->thread);
550}
551EXPORT_SYMBOL(drm_sched_start);
552
553/**
554 * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
555 *
556 * @sched: scheduler instance
557 *
558 */
559void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
560{
561 struct drm_sched_job *s_job, *tmp;
562 uint64_t guilty_context;
563 bool found_guilty = false;
564 struct dma_fence *fence;
565
566 list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
567 struct drm_sched_fence *s_fence = s_job->s_fence;
568
569 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
570 found_guilty = true;
571 guilty_context = s_job->s_fence->scheduled.context;
572 }
573
574 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
575 dma_fence_set_error(&s_fence->finished, -ECANCELED);
576
577 fence = sched->ops->run_job(s_job);
578
579 if (IS_ERR_OR_NULL(fence)) {
580 if (IS_ERR(fence))
581 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
582
583 s_job->s_fence->parent = NULL;
584 } else {
585
586 s_job->s_fence->parent = dma_fence_get(fence);
587
588 /* Drop for orignal kref_init */
589 dma_fence_put(fence);
590 }
591 }
592}
593EXPORT_SYMBOL(drm_sched_resubmit_jobs);
594
595/**
596 * drm_sched_job_init - init a scheduler job
597 * @job: scheduler job to init
598 * @entity: scheduler entity to use
599 * @owner: job owner for debugging
600 *
601 * Refer to drm_sched_entity_push_job() documentation
602 * for locking considerations.
603 *
604 * Drivers must make sure drm_sched_job_cleanup() if this function returns
605 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
606 *
607 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
608 * has died, which can mean that there's no valid runqueue for a @entity.
609 * This function returns -ENOENT in this case (which probably should be -EIO as
610 * a more meanigful return value).
611 *
612 * Returns 0 for success, negative error code otherwise.
613 */
614int drm_sched_job_init(struct drm_sched_job *job,
615 struct drm_sched_entity *entity,
616 void *owner)
617{
618 if (!entity->rq)
619 return -ENOENT;
620
621 job->entity = entity;
622 job->s_fence = drm_sched_fence_alloc(entity, owner);
623 if (!job->s_fence)
624 return -ENOMEM;
625
626 INIT_LIST_HEAD(&job->list);
627
628 xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
629
630 return 0;
631}
632EXPORT_SYMBOL(drm_sched_job_init);
633
634/**
635 * drm_sched_job_arm - arm a scheduler job for execution
636 * @job: scheduler job to arm
637 *
638 * This arms a scheduler job for execution. Specifically it initializes the
639 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
640 * or other places that need to track the completion of this job.
641 *
642 * Refer to drm_sched_entity_push_job() documentation for locking
643 * considerations.
644 *
645 * This can only be called if drm_sched_job_init() succeeded.
646 */
647void drm_sched_job_arm(struct drm_sched_job *job)
648{
649 struct drm_gpu_scheduler *sched;
650 struct drm_sched_entity *entity = job->entity;
651
652 BUG_ON(!entity);
653 drm_sched_entity_select_rq(entity);
654 sched = entity->rq->sched;
655
656 job->sched = sched;
657 job->s_priority = entity->rq - sched->sched_rq;
658 job->id = atomic64_inc_return(&sched->job_id_count);
659
660 drm_sched_fence_init(job->s_fence, job->entity);
661}
662EXPORT_SYMBOL(drm_sched_job_arm);
663
664/**
665 * drm_sched_job_add_dependency - adds the fence as a job dependency
666 * @job: scheduler job to add the dependencies to
667 * @fence: the dma_fence to add to the list of dependencies.
668 *
669 * Note that @fence is consumed in both the success and error cases.
670 *
671 * Returns:
672 * 0 on success, or an error on failing to expand the array.
673 */
674int drm_sched_job_add_dependency(struct drm_sched_job *job,
675 struct dma_fence *fence)
676{
677 struct dma_fence *entry;
678 unsigned long index;
679 u32 id = 0;
680 int ret;
681
682 if (!fence)
683 return 0;
684
685 /* Deduplicate if we already depend on a fence from the same context.
686 * This lets the size of the array of deps scale with the number of
687 * engines involved, rather than the number of BOs.
688 */
689 xa_for_each(&job->dependencies, index, entry) {
690 if (entry->context != fence->context)
691 continue;
692
693 if (dma_fence_is_later(fence, entry)) {
694 dma_fence_put(entry);
695 xa_store(&job->dependencies, index, fence, GFP_KERNEL);
696 } else {
697 dma_fence_put(fence);
698 }
699 return 0;
700 }
701
702 ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
703 if (ret != 0)
704 dma_fence_put(fence);
705
706 return ret;
707}
708EXPORT_SYMBOL(drm_sched_job_add_dependency);
709
710/**
711 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
712 * @job: scheduler job to add the dependencies to
713 * @resv: the dma_resv object to get the fences from
714 * @usage: the dma_resv_usage to use to filter the fences
715 *
716 * This adds all fences matching the given usage from @resv to @job.
717 * Must be called with the @resv lock held.
718 *
719 * Returns:
720 * 0 on success, or an error on failing to expand the array.
721 */
722int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
723 struct dma_resv *resv,
724 enum dma_resv_usage usage)
725{
726 struct dma_resv_iter cursor;
727 struct dma_fence *fence;
728 int ret;
729
730 dma_resv_assert_held(resv);
731
732 dma_resv_for_each_fence(&cursor, resv, usage, fence) {
733 /* Make sure to grab an additional ref on the added fence */
734 dma_fence_get(fence);
735 ret = drm_sched_job_add_dependency(job, fence);
736 if (ret) {
737 dma_fence_put(fence);
738 return ret;
739 }
740 }
741 return 0;
742}
743EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
744
745/**
746 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
747 * dependencies
748 * @job: scheduler job to add the dependencies to
749 * @obj: the gem object to add new dependencies from.
750 * @write: whether the job might write the object (so we need to depend on
751 * shared fences in the reservation object).
752 *
753 * This should be called after drm_gem_lock_reservations() on your array of
754 * GEM objects used in the job but before updating the reservations with your
755 * own fences.
756 *
757 * Returns:
758 * 0 on success, or an error on failing to expand the array.
759 */
760int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
761 struct drm_gem_object *obj,
762 bool write)
763{
764 return drm_sched_job_add_resv_dependencies(job, obj->resv,
765 dma_resv_usage_rw(write));
766}
767EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
768
769/**
770 * drm_sched_job_cleanup - clean up scheduler job resources
771 * @job: scheduler job to clean up
772 *
773 * Cleans up the resources allocated with drm_sched_job_init().
774 *
775 * Drivers should call this from their error unwind code if @job is aborted
776 * before drm_sched_job_arm() is called.
777 *
778 * After that point of no return @job is committed to be executed by the
779 * scheduler, and this function should be called from the
780 * &drm_sched_backend_ops.free_job callback.
781 */
782void drm_sched_job_cleanup(struct drm_sched_job *job)
783{
784 struct dma_fence *fence;
785 unsigned long index;
786
787 if (kref_read(&job->s_fence->finished.refcount)) {
788 /* drm_sched_job_arm() has been called */
789 dma_fence_put(&job->s_fence->finished);
790 } else {
791 /* aborted job before committing to run it */
792 drm_sched_fence_free(job->s_fence);
793 }
794
795 job->s_fence = NULL;
796
797 xa_for_each(&job->dependencies, index, fence) {
798 dma_fence_put(fence);
799 }
800 xa_destroy(&job->dependencies);
801
802}
803EXPORT_SYMBOL(drm_sched_job_cleanup);
804
805/**
806 * drm_sched_ready - is the scheduler ready
807 *
808 * @sched: scheduler instance
809 *
810 * Return true if we can push more jobs to the hw, otherwise false.
811 */
812static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
813{
814 return atomic_read(&sched->hw_rq_count) <
815 sched->hw_submission_limit;
816}
817
818/**
819 * drm_sched_wakeup - Wake up the scheduler when it is ready
820 *
821 * @sched: scheduler instance
822 *
823 */
824void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
825{
826 if (drm_sched_ready(sched))
827 wake_up_interruptible(&sched->wake_up_worker);
828}
829
830/**
831 * drm_sched_select_entity - Select next entity to process
832 *
833 * @sched: scheduler instance
834 *
835 * Returns the entity to process or NULL if none are found.
836 */
837static struct drm_sched_entity *
838drm_sched_select_entity(struct drm_gpu_scheduler *sched)
839{
840 struct drm_sched_entity *entity;
841 int i;
842
843 if (!drm_sched_ready(sched))
844 return NULL;
845
846 /* Kernel run queue has higher priority than normal run queue*/
847 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
848 entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
849 drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
850 drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
851 if (entity)
852 break;
853 }
854
855 return entity;
856}
857
858/**
859 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
860 *
861 * @sched: scheduler instance
862 *
863 * Returns the next finished job from the pending list (if there is one)
864 * ready for it to be destroyed.
865 */
866static struct drm_sched_job *
867drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
868{
869 struct drm_sched_job *job, *next;
870
871 spin_lock(&sched->job_list_lock);
872
873 job = list_first_entry_or_null(&sched->pending_list,
874 struct drm_sched_job, list);
875
876 if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
877 /* remove job from pending_list */
878 list_del_init(&job->list);
879
880 /* cancel this job's TO timer */
881 cancel_delayed_work(&sched->work_tdr);
882 /* make the scheduled timestamp more accurate */
883 next = list_first_entry_or_null(&sched->pending_list,
884 typeof(*next), list);
885
886 if (next) {
887 next->s_fence->scheduled.timestamp =
888 job->s_fence->finished.timestamp;
889 /* start TO timer for next job */
890 drm_sched_start_timeout(sched);
891 }
892 } else {
893 job = NULL;
894 }
895
896 spin_unlock(&sched->job_list_lock);
897
898 return job;
899}
900
901/**
902 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
903 * @sched_list: list of drm_gpu_schedulers
904 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
905 *
906 * Returns pointer of the sched with the least load or NULL if none of the
907 * drm_gpu_schedulers are ready
908 */
909struct drm_gpu_scheduler *
910drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
911 unsigned int num_sched_list)
912{
913 struct drm_gpu_scheduler *sched, *picked_sched = NULL;
914 int i;
915 unsigned int min_score = UINT_MAX, num_score;
916
917 for (i = 0; i < num_sched_list; ++i) {
918 sched = sched_list[i];
919
920 if (!sched->ready) {
921 DRM_WARN("scheduler %s is not ready, skipping",
922 sched->name);
923 continue;
924 }
925
926 num_score = atomic_read(sched->score);
927 if (num_score < min_score) {
928 min_score = num_score;
929 picked_sched = sched;
930 }
931 }
932
933 return picked_sched;
934}
935EXPORT_SYMBOL(drm_sched_pick_best);
936
937/**
938 * drm_sched_blocked - check if the scheduler is blocked
939 *
940 * @sched: scheduler instance
941 *
942 * Returns true if blocked, otherwise false.
943 */
944static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
945{
946 if (kthread_should_park()) {
947 kthread_parkme();
948 return true;
949 }
950
951 return false;
952}
953
954/**
955 * drm_sched_main - main scheduler thread
956 *
957 * @param: scheduler instance
958 *
959 * Returns 0.
960 */
961static int drm_sched_main(void *param)
962{
963 struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
964 int r;
965
966 sched_set_fifo_low(current);
967
968 while (!kthread_should_stop()) {
969 struct drm_sched_entity *entity = NULL;
970 struct drm_sched_fence *s_fence;
971 struct drm_sched_job *sched_job;
972 struct dma_fence *fence;
973 struct drm_sched_job *cleanup_job = NULL;
974
975 wait_event_interruptible(sched->wake_up_worker,
976 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
977 (!drm_sched_blocked(sched) &&
978 (entity = drm_sched_select_entity(sched))) ||
979 kthread_should_stop());
980
981 if (cleanup_job)
982 sched->ops->free_job(cleanup_job);
983
984 if (!entity)
985 continue;
986
987 sched_job = drm_sched_entity_pop_job(entity);
988
989 if (!sched_job) {
990 complete_all(&entity->entity_idle);
991 continue;
992 }
993
994 s_fence = sched_job->s_fence;
995
996 atomic_inc(&sched->hw_rq_count);
997 drm_sched_job_begin(sched_job);
998
999 trace_drm_run_job(sched_job, entity);
1000 fence = sched->ops->run_job(sched_job);
1001 complete_all(&entity->entity_idle);
1002 drm_sched_fence_scheduled(s_fence);
1003
1004 if (!IS_ERR_OR_NULL(fence)) {
1005 s_fence->parent = dma_fence_get(fence);
1006 /* Drop for original kref_init of the fence */
1007 dma_fence_put(fence);
1008
1009 r = dma_fence_add_callback(fence, &sched_job->cb,
1010 drm_sched_job_done_cb);
1011 if (r == -ENOENT)
1012 drm_sched_job_done(sched_job);
1013 else if (r)
1014 DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1015 r);
1016 } else {
1017 if (IS_ERR(fence))
1018 dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
1019
1020 drm_sched_job_done(sched_job);
1021 }
1022
1023 wake_up(&sched->job_scheduled);
1024 }
1025 return 0;
1026}
1027
1028/**
1029 * drm_sched_init - Init a gpu scheduler instance
1030 *
1031 * @sched: scheduler instance
1032 * @ops: backend operations for this scheduler
1033 * @hw_submission: number of hw submissions that can be in flight
1034 * @hang_limit: number of times to allow a job to hang before dropping it
1035 * @timeout: timeout value in jiffies for the scheduler
1036 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1037 * used
1038 * @score: optional score atomic shared with other schedulers
1039 * @name: name used for debugging
1040 * @dev: target &struct device
1041 *
1042 * Return 0 on success, otherwise error code.
1043 */
1044int drm_sched_init(struct drm_gpu_scheduler *sched,
1045 const struct drm_sched_backend_ops *ops,
1046 unsigned hw_submission, unsigned hang_limit,
1047 long timeout, struct workqueue_struct *timeout_wq,
1048 atomic_t *score, const char *name, struct device *dev)
1049{
1050 int i, ret;
1051 sched->ops = ops;
1052 sched->hw_submission_limit = hw_submission;
1053 sched->name = name;
1054 sched->timeout = timeout;
1055 sched->timeout_wq = timeout_wq ? : system_wq;
1056 sched->hang_limit = hang_limit;
1057 sched->score = score ? score : &sched->_score;
1058 sched->dev = dev;
1059 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1060 drm_sched_rq_init(sched, &sched->sched_rq[i]);
1061
1062 init_waitqueue_head(&sched->wake_up_worker);
1063 init_waitqueue_head(&sched->job_scheduled);
1064 INIT_LIST_HEAD(&sched->pending_list);
1065 spin_lock_init(&sched->job_list_lock);
1066 atomic_set(&sched->hw_rq_count, 0);
1067 INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1068 atomic_set(&sched->_score, 0);
1069 atomic64_set(&sched->job_id_count, 0);
1070
1071 /* Each scheduler will run on a seperate kernel thread */
1072 sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1073 if (IS_ERR(sched->thread)) {
1074 ret = PTR_ERR(sched->thread);
1075 sched->thread = NULL;
1076 DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1077 return ret;
1078 }
1079
1080 sched->ready = true;
1081 return 0;
1082}
1083EXPORT_SYMBOL(drm_sched_init);
1084
1085/**
1086 * drm_sched_fini - Destroy a gpu scheduler
1087 *
1088 * @sched: scheduler instance
1089 *
1090 * Tears down and cleans up the scheduler.
1091 */
1092void drm_sched_fini(struct drm_gpu_scheduler *sched)
1093{
1094 struct drm_sched_entity *s_entity;
1095 int i;
1096
1097 if (sched->thread)
1098 kthread_stop(sched->thread);
1099
1100 for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1101 struct drm_sched_rq *rq = &sched->sched_rq[i];
1102
1103 if (!rq)
1104 continue;
1105
1106 spin_lock(&rq->lock);
1107 list_for_each_entry(s_entity, &rq->entities, list)
1108 /*
1109 * Prevents reinsertion and marks job_queue as idle,
1110 * it will removed from rq in drm_sched_entity_fini
1111 * eventually
1112 */
1113 s_entity->stopped = true;
1114 spin_unlock(&rq->lock);
1115
1116 }
1117
1118 /* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1119 wake_up_all(&sched->job_scheduled);
1120
1121 /* Confirm no work left behind accessing device structures */
1122 cancel_delayed_work_sync(&sched->work_tdr);
1123
1124 sched->ready = false;
1125}
1126EXPORT_SYMBOL(drm_sched_fini);
1127
1128/**
1129 * drm_sched_increase_karma - Update sched_entity guilty flag
1130 *
1131 * @bad: The job guilty of time out
1132 *
1133 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1134 * limit of the scheduler then the respective sched entity is marked guilty and
1135 * jobs from it will not be scheduled further
1136 */
1137void drm_sched_increase_karma(struct drm_sched_job *bad)
1138{
1139 int i;
1140 struct drm_sched_entity *tmp;
1141 struct drm_sched_entity *entity;
1142 struct drm_gpu_scheduler *sched = bad->sched;
1143
1144 /* don't change @bad's karma if it's from KERNEL RQ,
1145 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1146 * corrupt but keep in mind that kernel jobs always considered good.
1147 */
1148 if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1149 atomic_inc(&bad->karma);
1150
1151 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1152 i++) {
1153 struct drm_sched_rq *rq = &sched->sched_rq[i];
1154
1155 spin_lock(&rq->lock);
1156 list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1157 if (bad->s_fence->scheduled.context ==
1158 entity->fence_context) {
1159 if (entity->guilty)
1160 atomic_set(entity->guilty, 1);
1161 break;
1162 }
1163 }
1164 spin_unlock(&rq->lock);
1165 if (&entity->list != &rq->entities)
1166 break;
1167 }
1168 }
1169}
1170EXPORT_SYMBOL(drm_sched_increase_karma);