Linux Audio

Check our new training course

Loading...
v6.13.7
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24/**
  25 * DOC: Overview
  26 *
  27 * The GPU scheduler provides entities which allow userspace to push jobs
  28 * into software queues which are then scheduled on a hardware run queue.
  29 * The software queues have a priority among them. The scheduler selects the entities
  30 * from the run queue using a FIFO. The scheduler provides dependency handling
  31 * features among jobs. The driver is supposed to provide callback functions for
  32 * backend operations to the scheduler like submitting a job to hardware run queue,
  33 * returning the dependencies of a job etc.
  34 *
  35 * The organisation of the scheduler is the following:
  36 *
  37 * 1. Each hw run queue has one scheduler
  38 * 2. Each scheduler has multiple run queues with different priorities
  39 *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
  40 * 3. Each scheduler run queue has a queue of entities to schedule
  41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
  42 *    the hardware.
  43 *
  44 * The jobs in an entity are always scheduled in the order in which they were pushed.
  45 *
  46 * Note that once a job was taken from the entities queue and pushed to the
  47 * hardware, i.e. the pending queue, the entity must not be referenced anymore
  48 * through the jobs entity pointer.
  49 */
  50
  51/**
  52 * DOC: Flow Control
  53 *
  54 * The DRM GPU scheduler provides a flow control mechanism to regulate the rate
  55 * in which the jobs fetched from scheduler entities are executed.
  56 *
  57 * In this context the &drm_gpu_scheduler keeps track of a driver specified
  58 * credit limit representing the capacity of this scheduler and a credit count;
  59 * every &drm_sched_job carries a driver specified number of credits.
  60 *
  61 * Once a job is executed (but not yet finished), the job's credits contribute
  62 * to the scheduler's credit count until the job is finished. If by executing
  63 * one more job the scheduler's credit count would exceed the scheduler's
  64 * credit limit, the job won't be executed. Instead, the scheduler will wait
  65 * until the credit count has decreased enough to not overflow its credit limit.
  66 * This implies waiting for previously executed jobs.
  67 *
  68 * Optionally, drivers may register a callback (update_job_credits) provided by
  69 * struct drm_sched_backend_ops to update the job's credits dynamically. The
  70 * scheduler executes this callback every time the scheduler considers a job for
  71 * execution and subsequently checks whether the job fits the scheduler's credit
  72 * limit.
  73 */
  74
 
  75#include <linux/wait.h>
  76#include <linux/sched.h>
  77#include <linux/completion.h>
  78#include <linux/dma-resv.h>
  79#include <uapi/linux/sched/types.h>
  80
  81#include <drm/drm_print.h>
  82#include <drm/drm_gem.h>
  83#include <drm/drm_syncobj.h>
  84#include <drm/gpu_scheduler.h>
  85#include <drm/spsc_queue.h>
  86
  87#define CREATE_TRACE_POINTS
  88#include "gpu_scheduler_trace.h"
  89
  90#ifdef CONFIG_LOCKDEP
  91static struct lockdep_map drm_sched_lockdep_map = {
  92	.name = "drm_sched_lockdep_map"
  93};
  94#endif
  95
  96#define to_drm_sched_job(sched_job)		\
  97		container_of((sched_job), struct drm_sched_job, queue_node)
  98
  99int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
 100
 101/**
 102 * DOC: sched_policy (int)
 103 * Used to override default entities scheduling policy in a run queue.
 104 */
 105MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
 106module_param_named(sched_policy, drm_sched_policy, int, 0444);
 107
 108static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched)
 109{
 110	u32 credits;
 111
 112	drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit,
 113					      atomic_read(&sched->credit_count),
 114					      &credits));
 115
 116	return credits;
 117}
 118
 119/**
 120 * drm_sched_can_queue -- Can we queue more to the hardware?
 121 * @sched: scheduler instance
 122 * @entity: the scheduler entity
 123 *
 124 * Return true if we can push at least one more job from @entity, false
 125 * otherwise.
 126 */
 127static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
 128				struct drm_sched_entity *entity)
 129{
 130	struct drm_sched_job *s_job;
 131
 132	s_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
 133	if (!s_job)
 134		return false;
 135
 136	if (sched->ops->update_job_credits) {
 137		s_job->credits = sched->ops->update_job_credits(s_job);
 138
 139		drm_WARN(sched, !s_job->credits,
 140			 "Jobs with zero credits bypass job-flow control.\n");
 141	}
 142
 143	/* If a job exceeds the credit limit, truncate it to the credit limit
 144	 * itself to guarantee forward progress.
 145	 */
 146	if (drm_WARN(sched, s_job->credits > sched->credit_limit,
 147		     "Jobs may not exceed the credit limit, truncate.\n"))
 148		s_job->credits = sched->credit_limit;
 149
 150	return drm_sched_available_credits(sched) >= s_job->credits;
 151}
 152
 153static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
 154							    const struct rb_node *b)
 155{
 156	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
 157	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
 158
 159	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
 160}
 161
 162static void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity,
 163					    struct drm_sched_rq *rq)
 164{
 
 
 165	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
 166		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
 167		RB_CLEAR_NODE(&entity->rb_tree_node);
 168	}
 169}
 170
 171void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity,
 172				     struct drm_sched_rq *rq,
 173				     ktime_t ts)
 174{
 175	/*
 176	 * Both locks need to be grabbed, one to protect from entity->rq change
 177	 * for entity from within concurrent drm_sched_entity_select_rq and the
 178	 * other to update the rb tree structure.
 179	 */
 180	lockdep_assert_held(&entity->lock);
 181	lockdep_assert_held(&rq->lock);
 182
 183	drm_sched_rq_remove_fifo_locked(entity, rq);
 184
 185	entity->oldest_job_waiting = ts;
 186
 187	rb_add_cached(&entity->rb_tree_node, &rq->rb_tree_root,
 188		      drm_sched_entity_compare_before);
 
 
 
 189}
 190
 191/**
 192 * drm_sched_rq_init - initialize a given run queue struct
 193 *
 194 * @sched: scheduler instance to associate with this run queue
 195 * @rq: scheduler run queue
 196 *
 197 * Initializes a scheduler runqueue.
 198 */
 199static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
 200			      struct drm_sched_rq *rq)
 201{
 202	spin_lock_init(&rq->lock);
 203	INIT_LIST_HEAD(&rq->entities);
 204	rq->rb_tree_root = RB_ROOT_CACHED;
 205	rq->current_entity = NULL;
 206	rq->sched = sched;
 207}
 208
 209/**
 210 * drm_sched_rq_add_entity - add an entity
 211 *
 212 * @rq: scheduler run queue
 213 * @entity: scheduler entity
 214 *
 215 * Adds a scheduler entity to the run queue.
 216 */
 217void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 218			     struct drm_sched_entity *entity)
 219{
 220	lockdep_assert_held(&entity->lock);
 221	lockdep_assert_held(&rq->lock);
 222
 223	if (!list_empty(&entity->list))
 224		return;
 225
 
 
 226	atomic_inc(rq->sched->score);
 227	list_add_tail(&entity->list, &rq->entities);
 
 
 228}
 229
 230/**
 231 * drm_sched_rq_remove_entity - remove an entity
 232 *
 233 * @rq: scheduler run queue
 234 * @entity: scheduler entity
 235 *
 236 * Removes a scheduler entity from the run queue.
 237 */
 238void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 239				struct drm_sched_entity *entity)
 240{
 241	lockdep_assert_held(&entity->lock);
 242
 243	if (list_empty(&entity->list))
 244		return;
 245
 246	spin_lock(&rq->lock);
 247
 248	atomic_dec(rq->sched->score);
 249	list_del_init(&entity->list);
 250
 251	if (rq->current_entity == entity)
 252		rq->current_entity = NULL;
 253
 254	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
 255		drm_sched_rq_remove_fifo_locked(entity, rq);
 256
 257	spin_unlock(&rq->lock);
 258}
 259
 260/**
 261 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
 262 *
 263 * @sched: the gpu scheduler
 264 * @rq: scheduler run queue to check.
 265 *
 266 * Try to find the next ready entity.
 267 *
 268 * Return an entity if one is found; return an error-pointer (!NULL) if an
 269 * entity was ready, but the scheduler had insufficient credits to accommodate
 270 * its job; return NULL, if no ready entity was found.
 271 */
 272static struct drm_sched_entity *
 273drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched,
 274			      struct drm_sched_rq *rq)
 275{
 276	struct drm_sched_entity *entity;
 277
 278	spin_lock(&rq->lock);
 279
 280	entity = rq->current_entity;
 281	if (entity) {
 282		list_for_each_entry_continue(entity, &rq->entities, list) {
 283			if (drm_sched_entity_is_ready(entity)) {
 284				/* If we can't queue yet, preserve the current
 285				 * entity in terms of fairness.
 286				 */
 287				if (!drm_sched_can_queue(sched, entity)) {
 288					spin_unlock(&rq->lock);
 289					return ERR_PTR(-ENOSPC);
 290				}
 291
 292				rq->current_entity = entity;
 293				reinit_completion(&entity->entity_idle);
 294				spin_unlock(&rq->lock);
 295				return entity;
 296			}
 297		}
 298	}
 299
 300	list_for_each_entry(entity, &rq->entities, list) {
 301		if (drm_sched_entity_is_ready(entity)) {
 302			/* If we can't queue yet, preserve the current entity in
 303			 * terms of fairness.
 304			 */
 305			if (!drm_sched_can_queue(sched, entity)) {
 306				spin_unlock(&rq->lock);
 307				return ERR_PTR(-ENOSPC);
 308			}
 309
 
 310			rq->current_entity = entity;
 311			reinit_completion(&entity->entity_idle);
 312			spin_unlock(&rq->lock);
 313			return entity;
 314		}
 315
 316		if (entity == rq->current_entity)
 317			break;
 318	}
 319
 320	spin_unlock(&rq->lock);
 321
 322	return NULL;
 323}
 324
 325/**
 326 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
 327 *
 328 * @sched: the gpu scheduler
 329 * @rq: scheduler run queue to check.
 330 *
 331 * Find oldest waiting ready entity.
 332 *
 333 * Return an entity if one is found; return an error-pointer (!NULL) if an
 334 * entity was ready, but the scheduler had insufficient credits to accommodate
 335 * its job; return NULL, if no ready entity was found.
 336 */
 337static struct drm_sched_entity *
 338drm_sched_rq_select_entity_fifo(struct drm_gpu_scheduler *sched,
 339				struct drm_sched_rq *rq)
 340{
 341	struct rb_node *rb;
 342
 343	spin_lock(&rq->lock);
 344	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
 345		struct drm_sched_entity *entity;
 346
 347		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
 348		if (drm_sched_entity_is_ready(entity)) {
 349			/* If we can't queue yet, preserve the current entity in
 350			 * terms of fairness.
 351			 */
 352			if (!drm_sched_can_queue(sched, entity)) {
 353				spin_unlock(&rq->lock);
 354				return ERR_PTR(-ENOSPC);
 355			}
 356
 357			reinit_completion(&entity->entity_idle);
 358			break;
 359		}
 360	}
 361	spin_unlock(&rq->lock);
 362
 363	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
 364}
 365
 366/**
 367 * drm_sched_run_job_queue - enqueue run-job work
 368 * @sched: scheduler instance
 369 */
 370static void drm_sched_run_job_queue(struct drm_gpu_scheduler *sched)
 371{
 372	if (!READ_ONCE(sched->pause_submit))
 373		queue_work(sched->submit_wq, &sched->work_run_job);
 374}
 375
 376/**
 377 * __drm_sched_run_free_queue - enqueue free-job work
 378 * @sched: scheduler instance
 379 */
 380static void __drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
 381{
 382	if (!READ_ONCE(sched->pause_submit))
 383		queue_work(sched->submit_wq, &sched->work_free_job);
 384}
 385
 386/**
 387 * drm_sched_run_free_queue - enqueue free-job work if ready
 388 * @sched: scheduler instance
 389 */
 390static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
 391{
 392	struct drm_sched_job *job;
 393
 394	spin_lock(&sched->job_list_lock);
 395	job = list_first_entry_or_null(&sched->pending_list,
 396				       struct drm_sched_job, list);
 397	if (job && dma_fence_is_signaled(&job->s_fence->finished))
 398		__drm_sched_run_free_queue(sched);
 399	spin_unlock(&sched->job_list_lock);
 400}
 401
 402/**
 403 * drm_sched_job_done - complete a job
 404 * @s_job: pointer to the job which is done
 405 *
 406 * Finish the job's fence and wake up the worker thread.
 407 */
 408static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
 409{
 410	struct drm_sched_fence *s_fence = s_job->s_fence;
 411	struct drm_gpu_scheduler *sched = s_fence->sched;
 412
 413	atomic_sub(s_job->credits, &sched->credit_count);
 414	atomic_dec(sched->score);
 415
 416	trace_drm_sched_process_job(s_fence);
 417
 418	dma_fence_get(&s_fence->finished);
 419	drm_sched_fence_finished(s_fence, result);
 420	dma_fence_put(&s_fence->finished);
 421	__drm_sched_run_free_queue(sched);
 422}
 423
 424/**
 425 * drm_sched_job_done_cb - the callback for a done job
 426 * @f: fence
 427 * @cb: fence callbacks
 428 */
 429static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
 430{
 431	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
 432
 433	drm_sched_job_done(s_job, f->error);
 434}
 435
 436/**
 437 * drm_sched_start_timeout - start timeout for reset worker
 438 *
 439 * @sched: scheduler instance to start the worker for
 440 *
 441 * Start the timeout for the given scheduler.
 442 */
 443static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
 444{
 445	lockdep_assert_held(&sched->job_list_lock);
 446
 447	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
 448	    !list_empty(&sched->pending_list))
 449		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
 450}
 451
 452static void drm_sched_start_timeout_unlocked(struct drm_gpu_scheduler *sched)
 453{
 454	spin_lock(&sched->job_list_lock);
 455	drm_sched_start_timeout(sched);
 456	spin_unlock(&sched->job_list_lock);
 457}
 458
 459/**
 460 * drm_sched_tdr_queue_imm: - immediately start job timeout handler
 461 *
 462 * @sched: scheduler for which the timeout handling should be started.
 463 *
 464 * Start timeout handling immediately for the named scheduler.
 465 */
 466void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched)
 467{
 468	spin_lock(&sched->job_list_lock);
 469	sched->timeout = 0;
 470	drm_sched_start_timeout(sched);
 471	spin_unlock(&sched->job_list_lock);
 472}
 473EXPORT_SYMBOL(drm_sched_tdr_queue_imm);
 474
 475/**
 476 * drm_sched_fault - immediately start timeout handler
 477 *
 478 * @sched: scheduler where the timeout handling should be started.
 479 *
 480 * Start timeout handling immediately when the driver detects a hardware fault.
 481 */
 482void drm_sched_fault(struct drm_gpu_scheduler *sched)
 483{
 484	if (sched->timeout_wq)
 485		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
 486}
 487EXPORT_SYMBOL(drm_sched_fault);
 488
 489/**
 490 * drm_sched_suspend_timeout - Suspend scheduler job timeout
 491 *
 492 * @sched: scheduler instance for which to suspend the timeout
 493 *
 494 * Suspend the delayed work timeout for the scheduler. This is done by
 495 * modifying the delayed work timeout to an arbitrary large value,
 496 * MAX_SCHEDULE_TIMEOUT in this case.
 497 *
 498 * Returns the timeout remaining
 499 *
 500 */
 501unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
 502{
 503	unsigned long sched_timeout, now = jiffies;
 504
 505	sched_timeout = sched->work_tdr.timer.expires;
 506
 507	/*
 508	 * Modify the timeout to an arbitrarily large value. This also prevents
 509	 * the timeout to be restarted when new submissions arrive
 510	 */
 511	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
 512			&& time_after(sched_timeout, now))
 513		return sched_timeout - now;
 514	else
 515		return sched->timeout;
 516}
 517EXPORT_SYMBOL(drm_sched_suspend_timeout);
 518
 519/**
 520 * drm_sched_resume_timeout - Resume scheduler job timeout
 521 *
 522 * @sched: scheduler instance for which to resume the timeout
 523 * @remaining: remaining timeout
 524 *
 525 * Resume the delayed work timeout for the scheduler.
 526 */
 527void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 528		unsigned long remaining)
 529{
 530	spin_lock(&sched->job_list_lock);
 531
 532	if (list_empty(&sched->pending_list))
 533		cancel_delayed_work(&sched->work_tdr);
 534	else
 535		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
 536
 537	spin_unlock(&sched->job_list_lock);
 538}
 539EXPORT_SYMBOL(drm_sched_resume_timeout);
 540
 541static void drm_sched_job_begin(struct drm_sched_job *s_job)
 542{
 543	struct drm_gpu_scheduler *sched = s_job->sched;
 544
 545	spin_lock(&sched->job_list_lock);
 546	list_add_tail(&s_job->list, &sched->pending_list);
 547	drm_sched_start_timeout(sched);
 548	spin_unlock(&sched->job_list_lock);
 549}
 550
 551static void drm_sched_job_timedout(struct work_struct *work)
 552{
 553	struct drm_gpu_scheduler *sched;
 554	struct drm_sched_job *job;
 555	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
 556
 557	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
 558
 559	/* Protects against concurrent deletion in drm_sched_get_finished_job */
 560	spin_lock(&sched->job_list_lock);
 561	job = list_first_entry_or_null(&sched->pending_list,
 562				       struct drm_sched_job, list);
 563
 564	if (job) {
 565		/*
 566		 * Remove the bad job so it cannot be freed by concurrent
 567		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
 568		 * is parked at which point it's safe.
 569		 */
 570		list_del_init(&job->list);
 571		spin_unlock(&sched->job_list_lock);
 572
 573		status = job->sched->ops->timedout_job(job);
 574
 575		/*
 576		 * Guilty job did complete and hence needs to be manually removed
 577		 * See drm_sched_stop doc.
 578		 */
 579		if (sched->free_guilty) {
 580			job->sched->ops->free_job(job);
 581			sched->free_guilty = false;
 582		}
 583	} else {
 584		spin_unlock(&sched->job_list_lock);
 585	}
 586
 587	if (status != DRM_GPU_SCHED_STAT_ENODEV)
 588		drm_sched_start_timeout_unlocked(sched);
 
 
 
 589}
 590
 591/**
 592 * drm_sched_stop - stop the scheduler
 593 *
 594 * @sched: scheduler instance
 595 * @bad: job which caused the time out
 596 *
 597 * Stop the scheduler and also removes and frees all completed jobs.
 598 * Note: bad job will not be freed as it might be used later and so it's
 599 * callers responsibility to release it manually if it's not part of the
 600 * pending list any more.
 601 *
 602 * This function is typically used for reset recovery (see the docu of
 603 * drm_sched_backend_ops.timedout_job() for details). Do not call it for
 604 * scheduler teardown, i.e., before calling drm_sched_fini().
 605 */
 606void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 607{
 608	struct drm_sched_job *s_job, *tmp;
 609
 610	drm_sched_wqueue_stop(sched);
 611
 612	/*
 613	 * Reinsert back the bad job here - now it's safe as
 614	 * drm_sched_get_finished_job cannot race against us and release the
 615	 * bad job at this point - we parked (waited for) any in progress
 616	 * (earlier) cleanups and drm_sched_get_finished_job will not be called
 617	 * now until the scheduler thread is unparked.
 618	 */
 619	if (bad && bad->sched == sched)
 620		/*
 621		 * Add at the head of the queue to reflect it was the earliest
 622		 * job extracted.
 623		 */
 624		list_add(&bad->list, &sched->pending_list);
 625
 626	/*
 627	 * Iterate the job list from later to  earlier one and either deactive
 628	 * their HW callbacks or remove them from pending list if they already
 629	 * signaled.
 630	 * This iteration is thread safe as sched thread is stopped.
 631	 */
 632	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
 633					 list) {
 634		if (s_job->s_fence->parent &&
 635		    dma_fence_remove_callback(s_job->s_fence->parent,
 636					      &s_job->cb)) {
 637			dma_fence_put(s_job->s_fence->parent);
 638			s_job->s_fence->parent = NULL;
 639			atomic_sub(s_job->credits, &sched->credit_count);
 640		} else {
 641			/*
 642			 * remove job from pending_list.
 643			 * Locking here is for concurrent resume timeout
 644			 */
 645			spin_lock(&sched->job_list_lock);
 646			list_del_init(&s_job->list);
 647			spin_unlock(&sched->job_list_lock);
 648
 649			/*
 650			 * Wait for job's HW fence callback to finish using s_job
 651			 * before releasing it.
 652			 *
 653			 * Job is still alive so fence refcount at least 1
 654			 */
 655			dma_fence_wait(&s_job->s_fence->finished, false);
 656
 657			/*
 658			 * We must keep bad job alive for later use during
 659			 * recovery by some of the drivers but leave a hint
 660			 * that the guilty job must be released.
 661			 */
 662			if (bad != s_job)
 663				sched->ops->free_job(s_job);
 664			else
 665				sched->free_guilty = true;
 666		}
 667	}
 668
 669	/*
 670	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
 671	 * avoids the pending timeout work in progress to fire right away after
 672	 * this TDR finished and before the newly restarted jobs had a
 673	 * chance to complete.
 674	 */
 675	cancel_delayed_work(&sched->work_tdr);
 676}
 
 677EXPORT_SYMBOL(drm_sched_stop);
 678
 679/**
 680 * drm_sched_start - recover jobs after a reset
 681 *
 682 * @sched: scheduler instance
 683 * @errno: error to set on the pending fences
 684 *
 685 * This function is typically used for reset recovery (see the docu of
 686 * drm_sched_backend_ops.timedout_job() for details). Do not call it for
 687 * scheduler startup. The scheduler itself is fully operational after
 688 * drm_sched_init() succeeded.
 689 */
 690void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
 691{
 692	struct drm_sched_job *s_job, *tmp;
 
 693
 694	/*
 695	 * Locking the list is not required here as the sched thread is parked
 696	 * so no new jobs are being inserted or removed. Also concurrent
 697	 * GPU recovers can't run in parallel.
 698	 */
 699	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
 700		struct dma_fence *fence = s_job->s_fence->parent;
 701
 702		atomic_add(s_job->credits, &sched->credit_count);
 703
 704		if (!fence) {
 705			drm_sched_job_done(s_job, errno ?: -ECANCELED);
 706			continue;
 707		}
 708
 709		if (dma_fence_add_callback(fence, &s_job->cb,
 710					   drm_sched_job_done_cb))
 711			drm_sched_job_done(s_job, fence->error ?: errno);
 
 
 
 
 
 
 
 712	}
 713
 714	drm_sched_start_timeout_unlocked(sched);
 715	drm_sched_wqueue_start(sched);
 
 
 
 
 
 716}
 717EXPORT_SYMBOL(drm_sched_start);
 718
 719/**
 720 * drm_sched_resubmit_jobs - Deprecated, don't use in new code!
 721 *
 722 * @sched: scheduler instance
 723 *
 724 * Re-submitting jobs was a concept AMD came up as cheap way to implement
 725 * recovery after a job timeout.
 726 *
 727 * This turned out to be not working very well. First of all there are many
 728 * problem with the dma_fence implementation and requirements. Either the
 729 * implementation is risking deadlocks with core memory management or violating
 730 * documented implementation details of the dma_fence object.
 731 *
 732 * Drivers can still save and restore their state for recovery operations, but
 733 * we shouldn't make this a general scheduler feature around the dma_fence
 734 * interface.
 735 */
 736void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 737{
 738	struct drm_sched_job *s_job, *tmp;
 739	uint64_t guilty_context;
 740	bool found_guilty = false;
 741	struct dma_fence *fence;
 742
 743	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
 744		struct drm_sched_fence *s_fence = s_job->s_fence;
 745
 746		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
 747			found_guilty = true;
 748			guilty_context = s_job->s_fence->scheduled.context;
 749		}
 750
 751		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
 752			dma_fence_set_error(&s_fence->finished, -ECANCELED);
 753
 754		fence = sched->ops->run_job(s_job);
 755
 756		if (IS_ERR_OR_NULL(fence)) {
 757			if (IS_ERR(fence))
 758				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
 759
 760			s_job->s_fence->parent = NULL;
 761		} else {
 762
 763			s_job->s_fence->parent = dma_fence_get(fence);
 764
 765			/* Drop for orignal kref_init */
 766			dma_fence_put(fence);
 767		}
 768	}
 769}
 770EXPORT_SYMBOL(drm_sched_resubmit_jobs);
 771
 772/**
 773 * drm_sched_job_init - init a scheduler job
 774 * @job: scheduler job to init
 775 * @entity: scheduler entity to use
 776 * @credits: the number of credits this job contributes to the schedulers
 777 * credit limit
 778 * @owner: job owner for debugging
 779 *
 780 * Refer to drm_sched_entity_push_job() documentation
 781 * for locking considerations.
 782 *
 783 * Drivers must make sure drm_sched_job_cleanup() if this function returns
 784 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
 785 *
 786 * Note that this function does not assign a valid value to each struct member
 787 * of struct drm_sched_job. Take a look at that struct's documentation to see
 788 * who sets which struct member with what lifetime.
 789 *
 790 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
 791 * has died, which can mean that there's no valid runqueue for a @entity.
 792 * This function returns -ENOENT in this case (which probably should be -EIO as
 793 * a more meanigful return value).
 794 *
 795 * Returns 0 for success, negative error code otherwise.
 796 */
 797int drm_sched_job_init(struct drm_sched_job *job,
 798		       struct drm_sched_entity *entity,
 799		       u32 credits, void *owner)
 800{
 801	if (!entity->rq) {
 802		/* This will most likely be followed by missing frames
 803		 * or worse--a blank screen--leave a trail in the
 804		 * logs, so this can be debugged easier.
 805		 */
 806		drm_err(job->sched, "%s: entity has no rq!\n", __func__);
 807		return -ENOENT;
 808	}
 809
 810	if (unlikely(!credits)) {
 811		pr_err("*ERROR* %s: credits cannot be 0!\n", __func__);
 812		return -EINVAL;
 813	}
 814
 815	/*
 816	 * We don't know for sure how the user has allocated. Thus, zero the
 817	 * struct so that unallowed (i.e., too early) usage of pointers that
 818	 * this function does not set is guaranteed to lead to a NULL pointer
 819	 * exception instead of UB.
 820	 */
 821	memset(job, 0, sizeof(*job));
 822
 823	job->entity = entity;
 824	job->credits = credits;
 825	job->s_fence = drm_sched_fence_alloc(entity, owner);
 826	if (!job->s_fence)
 827		return -ENOMEM;
 828
 829	INIT_LIST_HEAD(&job->list);
 830
 831	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
 832
 833	return 0;
 834}
 835EXPORT_SYMBOL(drm_sched_job_init);
 836
 837/**
 838 * drm_sched_job_arm - arm a scheduler job for execution
 839 * @job: scheduler job to arm
 840 *
 841 * This arms a scheduler job for execution. Specifically it initializes the
 842 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
 843 * or other places that need to track the completion of this job.
 844 *
 845 * Refer to drm_sched_entity_push_job() documentation for locking
 846 * considerations.
 847 *
 848 * This can only be called if drm_sched_job_init() succeeded.
 849 */
 850void drm_sched_job_arm(struct drm_sched_job *job)
 851{
 852	struct drm_gpu_scheduler *sched;
 853	struct drm_sched_entity *entity = job->entity;
 854
 855	BUG_ON(!entity);
 856	drm_sched_entity_select_rq(entity);
 857	sched = entity->rq->sched;
 858
 859	job->sched = sched;
 860	job->s_priority = entity->priority;
 861	job->id = atomic64_inc_return(&sched->job_id_count);
 862
 863	drm_sched_fence_init(job->s_fence, job->entity);
 864}
 865EXPORT_SYMBOL(drm_sched_job_arm);
 866
 867/**
 868 * drm_sched_job_add_dependency - adds the fence as a job dependency
 869 * @job: scheduler job to add the dependencies to
 870 * @fence: the dma_fence to add to the list of dependencies.
 871 *
 872 * Note that @fence is consumed in both the success and error cases.
 873 *
 874 * Returns:
 875 * 0 on success, or an error on failing to expand the array.
 876 */
 877int drm_sched_job_add_dependency(struct drm_sched_job *job,
 878				 struct dma_fence *fence)
 879{
 880	struct dma_fence *entry;
 881	unsigned long index;
 882	u32 id = 0;
 883	int ret;
 884
 885	if (!fence)
 886		return 0;
 887
 888	/* Deduplicate if we already depend on a fence from the same context.
 889	 * This lets the size of the array of deps scale with the number of
 890	 * engines involved, rather than the number of BOs.
 891	 */
 892	xa_for_each(&job->dependencies, index, entry) {
 893		if (entry->context != fence->context)
 894			continue;
 895
 896		if (dma_fence_is_later(fence, entry)) {
 897			dma_fence_put(entry);
 898			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
 899		} else {
 900			dma_fence_put(fence);
 901		}
 902		return 0;
 903	}
 904
 905	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
 906	if (ret != 0)
 907		dma_fence_put(fence);
 908
 909	return ret;
 910}
 911EXPORT_SYMBOL(drm_sched_job_add_dependency);
 912
 913/**
 914 * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
 915 * @job: scheduler job to add the dependencies to
 916 * @file: drm file private pointer
 917 * @handle: syncobj handle to lookup
 918 * @point: timeline point
 919 *
 920 * This adds the fence matching the given syncobj to @job.
 921 *
 922 * Returns:
 923 * 0 on success, or an error on failing to expand the array.
 924 */
 925int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
 926					 struct drm_file *file,
 927					 u32 handle,
 928					 u32 point)
 929{
 930	struct dma_fence *fence;
 931	int ret;
 932
 933	ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
 934	if (ret)
 935		return ret;
 936
 937	return drm_sched_job_add_dependency(job, fence);
 938}
 939EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
 940
 941/**
 942 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
 943 * @job: scheduler job to add the dependencies to
 944 * @resv: the dma_resv object to get the fences from
 945 * @usage: the dma_resv_usage to use to filter the fences
 946 *
 947 * This adds all fences matching the given usage from @resv to @job.
 948 * Must be called with the @resv lock held.
 949 *
 950 * Returns:
 951 * 0 on success, or an error on failing to expand the array.
 952 */
 953int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
 954					struct dma_resv *resv,
 955					enum dma_resv_usage usage)
 956{
 957	struct dma_resv_iter cursor;
 958	struct dma_fence *fence;
 959	int ret;
 960
 961	dma_resv_assert_held(resv);
 962
 963	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
 964		/* Make sure to grab an additional ref on the added fence */
 965		dma_fence_get(fence);
 966		ret = drm_sched_job_add_dependency(job, fence);
 967		if (ret) {
 968			dma_fence_put(fence);
 969			return ret;
 970		}
 971	}
 972	return 0;
 973}
 974EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
 975
 976/**
 977 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
 978 *   dependencies
 979 * @job: scheduler job to add the dependencies to
 980 * @obj: the gem object to add new dependencies from.
 981 * @write: whether the job might write the object (so we need to depend on
 982 * shared fences in the reservation object).
 983 *
 984 * This should be called after drm_gem_lock_reservations() on your array of
 985 * GEM objects used in the job but before updating the reservations with your
 986 * own fences.
 987 *
 988 * Returns:
 989 * 0 on success, or an error on failing to expand the array.
 990 */
 991int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
 992					    struct drm_gem_object *obj,
 993					    bool write)
 994{
 995	return drm_sched_job_add_resv_dependencies(job, obj->resv,
 996						   dma_resv_usage_rw(write));
 997}
 998EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
 999
1000/**
1001 * drm_sched_job_cleanup - clean up scheduler job resources
1002 * @job: scheduler job to clean up
1003 *
1004 * Cleans up the resources allocated with drm_sched_job_init().
1005 *
1006 * Drivers should call this from their error unwind code if @job is aborted
1007 * before drm_sched_job_arm() is called.
1008 *
1009 * After that point of no return @job is committed to be executed by the
1010 * scheduler, and this function should be called from the
1011 * &drm_sched_backend_ops.free_job callback.
1012 */
1013void drm_sched_job_cleanup(struct drm_sched_job *job)
1014{
1015	struct dma_fence *fence;
1016	unsigned long index;
1017
1018	if (kref_read(&job->s_fence->finished.refcount)) {
1019		/* drm_sched_job_arm() has been called */
1020		dma_fence_put(&job->s_fence->finished);
1021	} else {
1022		/* aborted job before committing to run it */
1023		drm_sched_fence_free(job->s_fence);
1024	}
1025
1026	job->s_fence = NULL;
1027
1028	xa_for_each(&job->dependencies, index, fence) {
1029		dma_fence_put(fence);
1030	}
1031	xa_destroy(&job->dependencies);
1032
1033}
1034EXPORT_SYMBOL(drm_sched_job_cleanup);
1035
1036/**
1037 * drm_sched_wakeup - Wake up the scheduler if it is ready to queue
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1038 * @sched: scheduler instance
1039 *
1040 * Wake up the scheduler if we can queue jobs.
1041 */
1042void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
1043{
1044	drm_sched_run_job_queue(sched);
 
1045}
1046
1047/**
1048 * drm_sched_select_entity - Select next entity to process
1049 *
1050 * @sched: scheduler instance
1051 *
1052 * Return an entity to process or NULL if none are found.
1053 *
1054 * Note, that we break out of the for-loop when "entity" is non-null, which can
1055 * also be an error-pointer--this assures we don't process lower priority
1056 * run-queues. See comments in the respectively called functions.
1057 */
1058static struct drm_sched_entity *
1059drm_sched_select_entity(struct drm_gpu_scheduler *sched)
1060{
1061	struct drm_sched_entity *entity;
1062	int i;
1063
1064	/* Start with the highest priority.
1065	 */
1066	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
 
 
1067		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
1068			drm_sched_rq_select_entity_fifo(sched, sched->sched_rq[i]) :
1069			drm_sched_rq_select_entity_rr(sched, sched->sched_rq[i]);
1070		if (entity)
1071			break;
1072	}
1073
1074	return IS_ERR(entity) ? NULL : entity;
1075}
1076
1077/**
1078 * drm_sched_get_finished_job - fetch the next finished job to be destroyed
1079 *
1080 * @sched: scheduler instance
1081 *
1082 * Returns the next finished job from the pending list (if there is one)
1083 * ready for it to be destroyed.
1084 */
1085static struct drm_sched_job *
1086drm_sched_get_finished_job(struct drm_gpu_scheduler *sched)
1087{
1088	struct drm_sched_job *job, *next;
1089
1090	spin_lock(&sched->job_list_lock);
1091
1092	job = list_first_entry_or_null(&sched->pending_list,
1093				       struct drm_sched_job, list);
1094
1095	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
1096		/* remove job from pending_list */
1097		list_del_init(&job->list);
1098
1099		/* cancel this job's TO timer */
1100		cancel_delayed_work(&sched->work_tdr);
1101		/* make the scheduled timestamp more accurate */
1102		next = list_first_entry_or_null(&sched->pending_list,
1103						typeof(*next), list);
1104
1105		if (next) {
1106			if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
1107				     &next->s_fence->scheduled.flags))
1108				next->s_fence->scheduled.timestamp =
1109					dma_fence_timestamp(&job->s_fence->finished);
1110			/* start TO timer for next job */
1111			drm_sched_start_timeout(sched);
1112		}
1113	} else {
1114		job = NULL;
1115	}
1116
1117	spin_unlock(&sched->job_list_lock);
1118
1119	return job;
1120}
1121
1122/**
1123 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
1124 * @sched_list: list of drm_gpu_schedulers
1125 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
1126 *
1127 * Returns pointer of the sched with the least load or NULL if none of the
1128 * drm_gpu_schedulers are ready
1129 */
1130struct drm_gpu_scheduler *
1131drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
1132		     unsigned int num_sched_list)
1133{
1134	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
1135	int i;
1136	unsigned int min_score = UINT_MAX, num_score;
1137
1138	for (i = 0; i < num_sched_list; ++i) {
1139		sched = sched_list[i];
1140
1141		if (!sched->ready) {
1142			DRM_WARN("scheduler %s is not ready, skipping",
1143				 sched->name);
1144			continue;
1145		}
1146
1147		num_score = atomic_read(sched->score);
1148		if (num_score < min_score) {
1149			min_score = num_score;
1150			picked_sched = sched;
1151		}
1152	}
1153
1154	return picked_sched;
1155}
1156EXPORT_SYMBOL(drm_sched_pick_best);
1157
1158/**
1159 * drm_sched_free_job_work - worker to call free_job
 
 
1160 *
1161 * @w: free job work
1162 */
1163static void drm_sched_free_job_work(struct work_struct *w)
1164{
1165	struct drm_gpu_scheduler *sched =
1166		container_of(w, struct drm_gpu_scheduler, work_free_job);
1167	struct drm_sched_job *job;
1168
1169	if (READ_ONCE(sched->pause_submit))
1170		return;
1171
1172	job = drm_sched_get_finished_job(sched);
1173	if (job)
1174		sched->ops->free_job(job);
1175
1176	drm_sched_run_free_queue(sched);
1177	drm_sched_run_job_queue(sched);
1178}
1179
1180/**
1181 * drm_sched_run_job_work - worker to call run_job
 
 
1182 *
1183 * @w: run job work
1184 */
1185static void drm_sched_run_job_work(struct work_struct *w)
1186{
1187	struct drm_gpu_scheduler *sched =
1188		container_of(w, struct drm_gpu_scheduler, work_run_job);
1189	struct drm_sched_entity *entity;
1190	struct dma_fence *fence;
1191	struct drm_sched_fence *s_fence;
1192	struct drm_sched_job *sched_job;
1193	int r;
1194
1195	if (READ_ONCE(sched->pause_submit))
1196		return;
1197
1198	/* Find entity with a ready job */
1199	entity = drm_sched_select_entity(sched);
1200	if (!entity)
1201		return;	/* No more work */
 
 
 
 
 
 
 
 
1202
1203	sched_job = drm_sched_entity_pop_job(entity);
1204	if (!sched_job) {
1205		complete_all(&entity->entity_idle);
1206		drm_sched_run_job_queue(sched);
1207		return;
1208	}
1209
1210	s_fence = sched_job->s_fence;
 
1211
1212	atomic_add(sched_job->credits, &sched->credit_count);
1213	drm_sched_job_begin(sched_job);
1214
1215	trace_drm_run_job(sched_job, entity);
1216	fence = sched->ops->run_job(sched_job);
1217	complete_all(&entity->entity_idle);
1218	drm_sched_fence_scheduled(s_fence, fence);
1219
1220	if (!IS_ERR_OR_NULL(fence)) {
1221		/* Drop for original kref_init of the fence */
1222		dma_fence_put(fence);
1223
1224		r = dma_fence_add_callback(fence, &sched_job->cb,
1225					   drm_sched_job_done_cb);
1226		if (r == -ENOENT)
1227			drm_sched_job_done(sched_job, fence->error);
1228		else if (r)
1229			DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r);
1230	} else {
1231		drm_sched_job_done(sched_job, IS_ERR(fence) ?
1232				   PTR_ERR(fence) : 0);
1233	}
1234
1235	wake_up(&sched->job_scheduled);
1236	drm_sched_run_job_queue(sched);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1237}
1238
1239/**
1240 * drm_sched_init - Init a gpu scheduler instance
1241 *
1242 * @sched: scheduler instance
1243 * @ops: backend operations for this scheduler
1244 * @submit_wq: workqueue to use for submission. If NULL, an ordered wq is
1245 *	       allocated and used
1246 * @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
1247 * @credit_limit: the number of credits this scheduler can hold from all jobs
1248 * @hang_limit: number of times to allow a job to hang before dropping it
1249 * @timeout: timeout value in jiffies for the scheduler
1250 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1251 *		used
1252 * @score: optional score atomic shared with other schedulers
1253 * @name: name used for debugging
1254 * @dev: target &struct device
1255 *
1256 * Return 0 on success, otherwise error code.
1257 */
1258int drm_sched_init(struct drm_gpu_scheduler *sched,
1259		   const struct drm_sched_backend_ops *ops,
1260		   struct workqueue_struct *submit_wq,
1261		   u32 num_rqs, u32 credit_limit, unsigned int hang_limit,
1262		   long timeout, struct workqueue_struct *timeout_wq,
1263		   atomic_t *score, const char *name, struct device *dev)
1264{
1265	int i;
1266
1267	sched->ops = ops;
1268	sched->credit_limit = credit_limit;
1269	sched->name = name;
1270	sched->timeout = timeout;
1271	sched->timeout_wq = timeout_wq ? : system_wq;
1272	sched->hang_limit = hang_limit;
1273	sched->score = score ? score : &sched->_score;
1274	sched->dev = dev;
 
 
1275
1276	if (num_rqs > DRM_SCHED_PRIORITY_COUNT) {
1277		/* This is a gross violation--tell drivers what the  problem is.
1278		 */
1279		drm_err(sched, "%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1280			__func__);
1281		return -EINVAL;
1282	} else if (sched->sched_rq) {
1283		/* Not an error, but warn anyway so drivers can
1284		 * fine-tune their DRM calling order, and return all
1285		 * is good.
1286		 */
1287		drm_warn(sched, "%s: scheduler already initialized!\n", __func__);
1288		return 0;
1289	}
1290
1291	if (submit_wq) {
1292		sched->submit_wq = submit_wq;
1293		sched->own_submit_wq = false;
1294	} else {
1295#ifdef CONFIG_LOCKDEP
1296		sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name,
1297								       WQ_MEM_RECLAIM,
1298								       &drm_sched_lockdep_map);
1299#else
1300		sched->submit_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
1301#endif
1302		if (!sched->submit_wq)
1303			return -ENOMEM;
1304
1305		sched->own_submit_wq = true;
1306	}
1307
1308	sched->sched_rq = kmalloc_array(num_rqs, sizeof(*sched->sched_rq),
1309					GFP_KERNEL | __GFP_ZERO);
1310	if (!sched->sched_rq)
1311		goto Out_check_own;
1312	sched->num_rqs = num_rqs;
1313	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1314		sched->sched_rq[i] = kzalloc(sizeof(*sched->sched_rq[i]), GFP_KERNEL);
1315		if (!sched->sched_rq[i])
1316			goto Out_unroll;
1317		drm_sched_rq_init(sched, sched->sched_rq[i]);
1318	}
1319
1320	init_waitqueue_head(&sched->job_scheduled);
1321	INIT_LIST_HEAD(&sched->pending_list);
1322	spin_lock_init(&sched->job_list_lock);
1323	atomic_set(&sched->credit_count, 0);
1324	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
1325	INIT_WORK(&sched->work_run_job, drm_sched_run_job_work);
1326	INIT_WORK(&sched->work_free_job, drm_sched_free_job_work);
1327	atomic_set(&sched->_score, 0);
1328	atomic64_set(&sched->job_id_count, 0);
1329	sched->pause_submit = false;
 
 
 
 
 
 
 
 
1330
1331	sched->ready = true;
1332	return 0;
1333Out_unroll:
1334	for (--i ; i >= DRM_SCHED_PRIORITY_KERNEL; i--)
1335		kfree(sched->sched_rq[i]);
1336
1337	kfree(sched->sched_rq);
1338	sched->sched_rq = NULL;
1339Out_check_own:
1340	if (sched->own_submit_wq)
1341		destroy_workqueue(sched->submit_wq);
1342	drm_err(sched, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
1343	return -ENOMEM;
1344}
1345EXPORT_SYMBOL(drm_sched_init);
1346
1347/**
1348 * drm_sched_fini - Destroy a gpu scheduler
1349 *
1350 * @sched: scheduler instance
1351 *
1352 * Tears down and cleans up the scheduler.
1353 *
1354 * This stops submission of new jobs to the hardware through
1355 * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job()
1356 * will not be called for all jobs still in drm_gpu_scheduler.pending_list.
1357 * There is no solution for this currently. Thus, it is up to the driver to make
1358 * sure that:
1359 *
1360 *  a) drm_sched_fini() is only called after for all submitted jobs
1361 *     drm_sched_backend_ops.free_job() has been called or that
1362 *  b) the jobs for which drm_sched_backend_ops.free_job() has not been called
1363 *     after drm_sched_fini() ran are freed manually.
1364 *
1365 * FIXME: Take care of the above problem and prevent this function from leaking
1366 * the jobs in drm_gpu_scheduler.pending_list under any circumstances.
1367 */
1368void drm_sched_fini(struct drm_gpu_scheduler *sched)
1369{
1370	struct drm_sched_entity *s_entity;
1371	int i;
1372
1373	drm_sched_wqueue_stop(sched);
 
1374
1375	for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
1376		struct drm_sched_rq *rq = sched->sched_rq[i];
 
 
 
1377
1378		spin_lock(&rq->lock);
1379		list_for_each_entry(s_entity, &rq->entities, list)
1380			/*
1381			 * Prevents reinsertion and marks job_queue as idle,
1382			 * it will be removed from the rq in drm_sched_entity_fini()
1383			 * eventually
1384			 */
1385			s_entity->stopped = true;
1386		spin_unlock(&rq->lock);
1387		kfree(sched->sched_rq[i]);
1388	}
1389
1390	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1391	wake_up_all(&sched->job_scheduled);
1392
1393	/* Confirm no work left behind accessing device structures */
1394	cancel_delayed_work_sync(&sched->work_tdr);
1395
1396	if (sched->own_submit_wq)
1397		destroy_workqueue(sched->submit_wq);
1398	sched->ready = false;
1399	kfree(sched->sched_rq);
1400	sched->sched_rq = NULL;
1401}
1402EXPORT_SYMBOL(drm_sched_fini);
1403
1404/**
1405 * drm_sched_increase_karma - Update sched_entity guilty flag
1406 *
1407 * @bad: The job guilty of time out
1408 *
1409 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1410 * limit of the scheduler then the respective sched entity is marked guilty and
1411 * jobs from it will not be scheduled further
1412 */
1413void drm_sched_increase_karma(struct drm_sched_job *bad)
1414{
1415	int i;
1416	struct drm_sched_entity *tmp;
1417	struct drm_sched_entity *entity;
1418	struct drm_gpu_scheduler *sched = bad->sched;
1419
1420	/* don't change @bad's karma if it's from KERNEL RQ,
1421	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1422	 * corrupt but keep in mind that kernel jobs always considered good.
1423	 */
1424	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1425		atomic_inc(&bad->karma);
1426
1427		for (i = DRM_SCHED_PRIORITY_HIGH; i < sched->num_rqs; i++) {
1428			struct drm_sched_rq *rq = sched->sched_rq[i];
 
1429
1430			spin_lock(&rq->lock);
1431			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1432				if (bad->s_fence->scheduled.context ==
1433				    entity->fence_context) {
1434					if (entity->guilty)
1435						atomic_set(entity->guilty, 1);
1436					break;
1437				}
1438			}
1439			spin_unlock(&rq->lock);
1440			if (&entity->list != &rq->entities)
1441				break;
1442		}
1443	}
1444}
1445EXPORT_SYMBOL(drm_sched_increase_karma);
1446
1447/**
1448 * drm_sched_wqueue_ready - Is the scheduler ready for submission
1449 *
1450 * @sched: scheduler instance
1451 *
1452 * Returns true if submission is ready
1453 */
1454bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched)
1455{
1456	return sched->ready;
1457}
1458EXPORT_SYMBOL(drm_sched_wqueue_ready);
1459
1460/**
1461 * drm_sched_wqueue_stop - stop scheduler submission
1462 * @sched: scheduler instance
1463 *
1464 * Stops the scheduler from pulling new jobs from entities. It also stops
1465 * freeing jobs automatically through drm_sched_backend_ops.free_job().
1466 */
1467void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched)
1468{
1469	WRITE_ONCE(sched->pause_submit, true);
1470	cancel_work_sync(&sched->work_run_job);
1471	cancel_work_sync(&sched->work_free_job);
1472}
1473EXPORT_SYMBOL(drm_sched_wqueue_stop);
1474
1475/**
1476 * drm_sched_wqueue_start - start scheduler submission
1477 * @sched: scheduler instance
1478 *
1479 * Restarts the scheduler after drm_sched_wqueue_stop() has stopped it.
1480 *
1481 * This function is not necessary for 'conventional' startup. The scheduler is
1482 * fully operational after drm_sched_init() succeeded.
1483 */
1484void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched)
1485{
1486	WRITE_ONCE(sched->pause_submit, false);
1487	queue_work(sched->submit_wq, &sched->work_run_job);
1488	queue_work(sched->submit_wq, &sched->work_free_job);
1489}
1490EXPORT_SYMBOL(drm_sched_wqueue_start);
v6.2
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24/**
  25 * DOC: Overview
  26 *
  27 * The GPU scheduler provides entities which allow userspace to push jobs
  28 * into software queues which are then scheduled on a hardware run queue.
  29 * The software queues have a priority among them. The scheduler selects the entities
  30 * from the run queue using a FIFO. The scheduler provides dependency handling
  31 * features among jobs. The driver is supposed to provide callback functions for
  32 * backend operations to the scheduler like submitting a job to hardware run queue,
  33 * returning the dependencies of a job etc.
  34 *
  35 * The organisation of the scheduler is the following:
  36 *
  37 * 1. Each hw run queue has one scheduler
  38 * 2. Each scheduler has multiple run queues with different priorities
  39 *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
  40 * 3. Each scheduler run queue has a queue of entities to schedule
  41 * 4. Entities themselves maintain a queue of jobs that will be scheduled on
  42 *    the hardware.
  43 *
  44 * The jobs in a entity are always scheduled in the order that they were pushed.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45 */
  46
  47#include <linux/kthread.h>
  48#include <linux/wait.h>
  49#include <linux/sched.h>
  50#include <linux/completion.h>
  51#include <linux/dma-resv.h>
  52#include <uapi/linux/sched/types.h>
  53
  54#include <drm/drm_print.h>
  55#include <drm/drm_gem.h>
 
  56#include <drm/gpu_scheduler.h>
  57#include <drm/spsc_queue.h>
  58
  59#define CREATE_TRACE_POINTS
  60#include "gpu_scheduler_trace.h"
  61
 
 
 
 
 
 
  62#define to_drm_sched_job(sched_job)		\
  63		container_of((sched_job), struct drm_sched_job, queue_node)
  64
  65int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
  66
  67/**
  68 * DOC: sched_policy (int)
  69 * Used to override default entities scheduling policy in a run queue.
  70 */
  71MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
  72module_param_named(sched_policy, drm_sched_policy, int, 0444);
  73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  74static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
  75							    const struct rb_node *b)
  76{
  77	struct drm_sched_entity *ent_a =  rb_entry((a), struct drm_sched_entity, rb_tree_node);
  78	struct drm_sched_entity *ent_b =  rb_entry((b), struct drm_sched_entity, rb_tree_node);
  79
  80	return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
  81}
  82
  83static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
 
  84{
  85	struct drm_sched_rq *rq = entity->rq;
  86
  87	if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
  88		rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
  89		RB_CLEAR_NODE(&entity->rb_tree_node);
  90	}
  91}
  92
  93void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
 
 
  94{
  95	/*
  96	 * Both locks need to be grabbed, one to protect from entity->rq change
  97	 * for entity from within concurrent drm_sched_entity_select_rq and the
  98	 * other to update the rb tree structure.
  99	 */
 100	spin_lock(&entity->rq_lock);
 101	spin_lock(&entity->rq->lock);
 102
 103	drm_sched_rq_remove_fifo_locked(entity);
 104
 105	entity->oldest_job_waiting = ts;
 106
 107	rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
 108		      drm_sched_entity_compare_before);
 109
 110	spin_unlock(&entity->rq->lock);
 111	spin_unlock(&entity->rq_lock);
 112}
 113
 114/**
 115 * drm_sched_rq_init - initialize a given run queue struct
 116 *
 117 * @sched: scheduler instance to associate with this run queue
 118 * @rq: scheduler run queue
 119 *
 120 * Initializes a scheduler runqueue.
 121 */
 122static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
 123			      struct drm_sched_rq *rq)
 124{
 125	spin_lock_init(&rq->lock);
 126	INIT_LIST_HEAD(&rq->entities);
 127	rq->rb_tree_root = RB_ROOT_CACHED;
 128	rq->current_entity = NULL;
 129	rq->sched = sched;
 130}
 131
 132/**
 133 * drm_sched_rq_add_entity - add an entity
 134 *
 135 * @rq: scheduler run queue
 136 * @entity: scheduler entity
 137 *
 138 * Adds a scheduler entity to the run queue.
 139 */
 140void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 141			     struct drm_sched_entity *entity)
 142{
 
 
 
 143	if (!list_empty(&entity->list))
 144		return;
 145
 146	spin_lock(&rq->lock);
 147
 148	atomic_inc(rq->sched->score);
 149	list_add_tail(&entity->list, &rq->entities);
 150
 151	spin_unlock(&rq->lock);
 152}
 153
 154/**
 155 * drm_sched_rq_remove_entity - remove an entity
 156 *
 157 * @rq: scheduler run queue
 158 * @entity: scheduler entity
 159 *
 160 * Removes a scheduler entity from the run queue.
 161 */
 162void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 163				struct drm_sched_entity *entity)
 164{
 
 
 165	if (list_empty(&entity->list))
 166		return;
 167
 168	spin_lock(&rq->lock);
 169
 170	atomic_dec(rq->sched->score);
 171	list_del_init(&entity->list);
 172
 173	if (rq->current_entity == entity)
 174		rq->current_entity = NULL;
 175
 176	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
 177		drm_sched_rq_remove_fifo_locked(entity);
 178
 179	spin_unlock(&rq->lock);
 180}
 181
 182/**
 183 * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
 184 *
 
 185 * @rq: scheduler run queue to check.
 186 *
 187 * Try to find a ready entity, returns NULL if none found.
 
 
 
 
 188 */
 189static struct drm_sched_entity *
 190drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
 
 191{
 192	struct drm_sched_entity *entity;
 193
 194	spin_lock(&rq->lock);
 195
 196	entity = rq->current_entity;
 197	if (entity) {
 198		list_for_each_entry_continue(entity, &rq->entities, list) {
 199			if (drm_sched_entity_is_ready(entity)) {
 
 
 
 
 
 
 
 
 200				rq->current_entity = entity;
 201				reinit_completion(&entity->entity_idle);
 202				spin_unlock(&rq->lock);
 203				return entity;
 204			}
 205		}
 206	}
 207
 208	list_for_each_entry(entity, &rq->entities, list) {
 
 
 
 
 
 
 
 
 209
 210		if (drm_sched_entity_is_ready(entity)) {
 211			rq->current_entity = entity;
 212			reinit_completion(&entity->entity_idle);
 213			spin_unlock(&rq->lock);
 214			return entity;
 215		}
 216
 217		if (entity == rq->current_entity)
 218			break;
 219	}
 220
 221	spin_unlock(&rq->lock);
 222
 223	return NULL;
 224}
 225
 226/**
 227 * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
 228 *
 
 229 * @rq: scheduler run queue to check.
 230 *
 231 * Find oldest waiting ready entity, returns NULL if none found.
 
 
 
 
 232 */
 233static struct drm_sched_entity *
 234drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
 
 235{
 236	struct rb_node *rb;
 237
 238	spin_lock(&rq->lock);
 239	for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
 240		struct drm_sched_entity *entity;
 241
 242		entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
 243		if (drm_sched_entity_is_ready(entity)) {
 244			rq->current_entity = entity;
 
 
 
 
 
 
 
 245			reinit_completion(&entity->entity_idle);
 246			break;
 247		}
 248	}
 249	spin_unlock(&rq->lock);
 250
 251	return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
 252}
 253
 254/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 255 * drm_sched_job_done - complete a job
 256 * @s_job: pointer to the job which is done
 257 *
 258 * Finish the job's fence and wake up the worker thread.
 259 */
 260static void drm_sched_job_done(struct drm_sched_job *s_job)
 261{
 262	struct drm_sched_fence *s_fence = s_job->s_fence;
 263	struct drm_gpu_scheduler *sched = s_fence->sched;
 264
 265	atomic_dec(&sched->hw_rq_count);
 266	atomic_dec(sched->score);
 267
 268	trace_drm_sched_process_job(s_fence);
 269
 270	dma_fence_get(&s_fence->finished);
 271	drm_sched_fence_finished(s_fence);
 272	dma_fence_put(&s_fence->finished);
 273	wake_up_interruptible(&sched->wake_up_worker);
 274}
 275
 276/**
 277 * drm_sched_job_done_cb - the callback for a done job
 278 * @f: fence
 279 * @cb: fence callbacks
 280 */
 281static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb)
 282{
 283	struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
 284
 285	drm_sched_job_done(s_job);
 286}
 287
 288/**
 289 * drm_sched_start_timeout - start timeout for reset worker
 290 *
 291 * @sched: scheduler instance to start the worker for
 292 *
 293 * Start the timeout for the given scheduler.
 294 */
 295static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
 296{
 
 
 297	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
 298	    !list_empty(&sched->pending_list))
 299		queue_delayed_work(sched->timeout_wq, &sched->work_tdr, sched->timeout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 300}
 
 301
 302/**
 303 * drm_sched_fault - immediately start timeout handler
 304 *
 305 * @sched: scheduler where the timeout handling should be started.
 306 *
 307 * Start timeout handling immediately when the driver detects a hardware fault.
 308 */
 309void drm_sched_fault(struct drm_gpu_scheduler *sched)
 310{
 311	mod_delayed_work(sched->timeout_wq, &sched->work_tdr, 0);
 
 312}
 313EXPORT_SYMBOL(drm_sched_fault);
 314
 315/**
 316 * drm_sched_suspend_timeout - Suspend scheduler job timeout
 317 *
 318 * @sched: scheduler instance for which to suspend the timeout
 319 *
 320 * Suspend the delayed work timeout for the scheduler. This is done by
 321 * modifying the delayed work timeout to an arbitrary large value,
 322 * MAX_SCHEDULE_TIMEOUT in this case.
 323 *
 324 * Returns the timeout remaining
 325 *
 326 */
 327unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
 328{
 329	unsigned long sched_timeout, now = jiffies;
 330
 331	sched_timeout = sched->work_tdr.timer.expires;
 332
 333	/*
 334	 * Modify the timeout to an arbitrarily large value. This also prevents
 335	 * the timeout to be restarted when new submissions arrive
 336	 */
 337	if (mod_delayed_work(sched->timeout_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
 338			&& time_after(sched_timeout, now))
 339		return sched_timeout - now;
 340	else
 341		return sched->timeout;
 342}
 343EXPORT_SYMBOL(drm_sched_suspend_timeout);
 344
 345/**
 346 * drm_sched_resume_timeout - Resume scheduler job timeout
 347 *
 348 * @sched: scheduler instance for which to resume the timeout
 349 * @remaining: remaining timeout
 350 *
 351 * Resume the delayed work timeout for the scheduler.
 352 */
 353void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 354		unsigned long remaining)
 355{
 356	spin_lock(&sched->job_list_lock);
 357
 358	if (list_empty(&sched->pending_list))
 359		cancel_delayed_work(&sched->work_tdr);
 360	else
 361		mod_delayed_work(sched->timeout_wq, &sched->work_tdr, remaining);
 362
 363	spin_unlock(&sched->job_list_lock);
 364}
 365EXPORT_SYMBOL(drm_sched_resume_timeout);
 366
 367static void drm_sched_job_begin(struct drm_sched_job *s_job)
 368{
 369	struct drm_gpu_scheduler *sched = s_job->sched;
 370
 371	spin_lock(&sched->job_list_lock);
 372	list_add_tail(&s_job->list, &sched->pending_list);
 373	drm_sched_start_timeout(sched);
 374	spin_unlock(&sched->job_list_lock);
 375}
 376
 377static void drm_sched_job_timedout(struct work_struct *work)
 378{
 379	struct drm_gpu_scheduler *sched;
 380	struct drm_sched_job *job;
 381	enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL;
 382
 383	sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
 384
 385	/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
 386	spin_lock(&sched->job_list_lock);
 387	job = list_first_entry_or_null(&sched->pending_list,
 388				       struct drm_sched_job, list);
 389
 390	if (job) {
 391		/*
 392		 * Remove the bad job so it cannot be freed by concurrent
 393		 * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
 394		 * is parked at which point it's safe.
 395		 */
 396		list_del_init(&job->list);
 397		spin_unlock(&sched->job_list_lock);
 398
 399		status = job->sched->ops->timedout_job(job);
 400
 401		/*
 402		 * Guilty job did complete and hence needs to be manually removed
 403		 * See drm_sched_stop doc.
 404		 */
 405		if (sched->free_guilty) {
 406			job->sched->ops->free_job(job);
 407			sched->free_guilty = false;
 408		}
 409	} else {
 410		spin_unlock(&sched->job_list_lock);
 411	}
 412
 413	if (status != DRM_GPU_SCHED_STAT_ENODEV) {
 414		spin_lock(&sched->job_list_lock);
 415		drm_sched_start_timeout(sched);
 416		spin_unlock(&sched->job_list_lock);
 417	}
 418}
 419
 420/**
 421 * drm_sched_stop - stop the scheduler
 422 *
 423 * @sched: scheduler instance
 424 * @bad: job which caused the time out
 425 *
 426 * Stop the scheduler and also removes and frees all completed jobs.
 427 * Note: bad job will not be freed as it might be used later and so it's
 428 * callers responsibility to release it manually if it's not part of the
 429 * pending list any more.
 430 *
 
 
 
 431 */
 432void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 433{
 434	struct drm_sched_job *s_job, *tmp;
 435
 436	kthread_park(sched->thread);
 437
 438	/*
 439	 * Reinsert back the bad job here - now it's safe as
 440	 * drm_sched_get_cleanup_job cannot race against us and release the
 441	 * bad job at this point - we parked (waited for) any in progress
 442	 * (earlier) cleanups and drm_sched_get_cleanup_job will not be called
 443	 * now until the scheduler thread is unparked.
 444	 */
 445	if (bad && bad->sched == sched)
 446		/*
 447		 * Add at the head of the queue to reflect it was the earliest
 448		 * job extracted.
 449		 */
 450		list_add(&bad->list, &sched->pending_list);
 451
 452	/*
 453	 * Iterate the job list from later to  earlier one and either deactive
 454	 * their HW callbacks or remove them from pending list if they already
 455	 * signaled.
 456	 * This iteration is thread safe as sched thread is stopped.
 457	 */
 458	list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
 459					 list) {
 460		if (s_job->s_fence->parent &&
 461		    dma_fence_remove_callback(s_job->s_fence->parent,
 462					      &s_job->cb)) {
 463			dma_fence_put(s_job->s_fence->parent);
 464			s_job->s_fence->parent = NULL;
 465			atomic_dec(&sched->hw_rq_count);
 466		} else {
 467			/*
 468			 * remove job from pending_list.
 469			 * Locking here is for concurrent resume timeout
 470			 */
 471			spin_lock(&sched->job_list_lock);
 472			list_del_init(&s_job->list);
 473			spin_unlock(&sched->job_list_lock);
 474
 475			/*
 476			 * Wait for job's HW fence callback to finish using s_job
 477			 * before releasing it.
 478			 *
 479			 * Job is still alive so fence refcount at least 1
 480			 */
 481			dma_fence_wait(&s_job->s_fence->finished, false);
 482
 483			/*
 484			 * We must keep bad job alive for later use during
 485			 * recovery by some of the drivers but leave a hint
 486			 * that the guilty job must be released.
 487			 */
 488			if (bad != s_job)
 489				sched->ops->free_job(s_job);
 490			else
 491				sched->free_guilty = true;
 492		}
 493	}
 494
 495	/*
 496	 * Stop pending timer in flight as we rearm it in  drm_sched_start. This
 497	 * avoids the pending timeout work in progress to fire right away after
 498	 * this TDR finished and before the newly restarted jobs had a
 499	 * chance to complete.
 500	 */
 501	cancel_delayed_work(&sched->work_tdr);
 502}
 503
 504EXPORT_SYMBOL(drm_sched_stop);
 505
 506/**
 507 * drm_sched_start - recover jobs after a reset
 508 *
 509 * @sched: scheduler instance
 510 * @full_recovery: proceed with complete sched restart
 511 *
 
 
 
 
 512 */
 513void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 514{
 515	struct drm_sched_job *s_job, *tmp;
 516	int r;
 517
 518	/*
 519	 * Locking the list is not required here as the sched thread is parked
 520	 * so no new jobs are being inserted or removed. Also concurrent
 521	 * GPU recovers can't run in parallel.
 522	 */
 523	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
 524		struct dma_fence *fence = s_job->s_fence->parent;
 525
 526		atomic_inc(&sched->hw_rq_count);
 527
 528		if (!full_recovery)
 
 529			continue;
 
 530
 531		if (fence) {
 532			r = dma_fence_add_callback(fence, &s_job->cb,
 533						   drm_sched_job_done_cb);
 534			if (r == -ENOENT)
 535				drm_sched_job_done(s_job);
 536			else if (r)
 537				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
 538					  r);
 539		} else
 540			drm_sched_job_done(s_job);
 541	}
 542
 543	if (full_recovery) {
 544		spin_lock(&sched->job_list_lock);
 545		drm_sched_start_timeout(sched);
 546		spin_unlock(&sched->job_list_lock);
 547	}
 548
 549	kthread_unpark(sched->thread);
 550}
 551EXPORT_SYMBOL(drm_sched_start);
 552
 553/**
 554 * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
 555 *
 556 * @sched: scheduler instance
 557 *
 
 
 
 
 
 
 
 
 
 
 
 558 */
 559void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 560{
 561	struct drm_sched_job *s_job, *tmp;
 562	uint64_t guilty_context;
 563	bool found_guilty = false;
 564	struct dma_fence *fence;
 565
 566	list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
 567		struct drm_sched_fence *s_fence = s_job->s_fence;
 568
 569		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
 570			found_guilty = true;
 571			guilty_context = s_job->s_fence->scheduled.context;
 572		}
 573
 574		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
 575			dma_fence_set_error(&s_fence->finished, -ECANCELED);
 576
 577		fence = sched->ops->run_job(s_job);
 578
 579		if (IS_ERR_OR_NULL(fence)) {
 580			if (IS_ERR(fence))
 581				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
 582
 583			s_job->s_fence->parent = NULL;
 584		} else {
 585
 586			s_job->s_fence->parent = dma_fence_get(fence);
 587
 588			/* Drop for orignal kref_init */
 589			dma_fence_put(fence);
 590		}
 591	}
 592}
 593EXPORT_SYMBOL(drm_sched_resubmit_jobs);
 594
 595/**
 596 * drm_sched_job_init - init a scheduler job
 597 * @job: scheduler job to init
 598 * @entity: scheduler entity to use
 
 
 599 * @owner: job owner for debugging
 600 *
 601 * Refer to drm_sched_entity_push_job() documentation
 602 * for locking considerations.
 603 *
 604 * Drivers must make sure drm_sched_job_cleanup() if this function returns
 605 * successfully, even when @job is aborted before drm_sched_job_arm() is called.
 606 *
 
 
 
 
 607 * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
 608 * has died, which can mean that there's no valid runqueue for a @entity.
 609 * This function returns -ENOENT in this case (which probably should be -EIO as
 610 * a more meanigful return value).
 611 *
 612 * Returns 0 for success, negative error code otherwise.
 613 */
 614int drm_sched_job_init(struct drm_sched_job *job,
 615		       struct drm_sched_entity *entity,
 616		       void *owner)
 617{
 618	if (!entity->rq)
 
 
 
 
 
 619		return -ENOENT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 620
 621	job->entity = entity;
 
 622	job->s_fence = drm_sched_fence_alloc(entity, owner);
 623	if (!job->s_fence)
 624		return -ENOMEM;
 625
 626	INIT_LIST_HEAD(&job->list);
 627
 628	xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
 629
 630	return 0;
 631}
 632EXPORT_SYMBOL(drm_sched_job_init);
 633
 634/**
 635 * drm_sched_job_arm - arm a scheduler job for execution
 636 * @job: scheduler job to arm
 637 *
 638 * This arms a scheduler job for execution. Specifically it initializes the
 639 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
 640 * or other places that need to track the completion of this job.
 641 *
 642 * Refer to drm_sched_entity_push_job() documentation for locking
 643 * considerations.
 644 *
 645 * This can only be called if drm_sched_job_init() succeeded.
 646 */
 647void drm_sched_job_arm(struct drm_sched_job *job)
 648{
 649	struct drm_gpu_scheduler *sched;
 650	struct drm_sched_entity *entity = job->entity;
 651
 652	BUG_ON(!entity);
 653	drm_sched_entity_select_rq(entity);
 654	sched = entity->rq->sched;
 655
 656	job->sched = sched;
 657	job->s_priority = entity->rq - sched->sched_rq;
 658	job->id = atomic64_inc_return(&sched->job_id_count);
 659
 660	drm_sched_fence_init(job->s_fence, job->entity);
 661}
 662EXPORT_SYMBOL(drm_sched_job_arm);
 663
 664/**
 665 * drm_sched_job_add_dependency - adds the fence as a job dependency
 666 * @job: scheduler job to add the dependencies to
 667 * @fence: the dma_fence to add to the list of dependencies.
 668 *
 669 * Note that @fence is consumed in both the success and error cases.
 670 *
 671 * Returns:
 672 * 0 on success, or an error on failing to expand the array.
 673 */
 674int drm_sched_job_add_dependency(struct drm_sched_job *job,
 675				 struct dma_fence *fence)
 676{
 677	struct dma_fence *entry;
 678	unsigned long index;
 679	u32 id = 0;
 680	int ret;
 681
 682	if (!fence)
 683		return 0;
 684
 685	/* Deduplicate if we already depend on a fence from the same context.
 686	 * This lets the size of the array of deps scale with the number of
 687	 * engines involved, rather than the number of BOs.
 688	 */
 689	xa_for_each(&job->dependencies, index, entry) {
 690		if (entry->context != fence->context)
 691			continue;
 692
 693		if (dma_fence_is_later(fence, entry)) {
 694			dma_fence_put(entry);
 695			xa_store(&job->dependencies, index, fence, GFP_KERNEL);
 696		} else {
 697			dma_fence_put(fence);
 698		}
 699		return 0;
 700	}
 701
 702	ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
 703	if (ret != 0)
 704		dma_fence_put(fence);
 705
 706	return ret;
 707}
 708EXPORT_SYMBOL(drm_sched_job_add_dependency);
 709
 710/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 711 * drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
 712 * @job: scheduler job to add the dependencies to
 713 * @resv: the dma_resv object to get the fences from
 714 * @usage: the dma_resv_usage to use to filter the fences
 715 *
 716 * This adds all fences matching the given usage from @resv to @job.
 717 * Must be called with the @resv lock held.
 718 *
 719 * Returns:
 720 * 0 on success, or an error on failing to expand the array.
 721 */
 722int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
 723					struct dma_resv *resv,
 724					enum dma_resv_usage usage)
 725{
 726	struct dma_resv_iter cursor;
 727	struct dma_fence *fence;
 728	int ret;
 729
 730	dma_resv_assert_held(resv);
 731
 732	dma_resv_for_each_fence(&cursor, resv, usage, fence) {
 733		/* Make sure to grab an additional ref on the added fence */
 734		dma_fence_get(fence);
 735		ret = drm_sched_job_add_dependency(job, fence);
 736		if (ret) {
 737			dma_fence_put(fence);
 738			return ret;
 739		}
 740	}
 741	return 0;
 742}
 743EXPORT_SYMBOL(drm_sched_job_add_resv_dependencies);
 744
 745/**
 746 * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
 747 *   dependencies
 748 * @job: scheduler job to add the dependencies to
 749 * @obj: the gem object to add new dependencies from.
 750 * @write: whether the job might write the object (so we need to depend on
 751 * shared fences in the reservation object).
 752 *
 753 * This should be called after drm_gem_lock_reservations() on your array of
 754 * GEM objects used in the job but before updating the reservations with your
 755 * own fences.
 756 *
 757 * Returns:
 758 * 0 on success, or an error on failing to expand the array.
 759 */
 760int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
 761					    struct drm_gem_object *obj,
 762					    bool write)
 763{
 764	return drm_sched_job_add_resv_dependencies(job, obj->resv,
 765						   dma_resv_usage_rw(write));
 766}
 767EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
 768
 769/**
 770 * drm_sched_job_cleanup - clean up scheduler job resources
 771 * @job: scheduler job to clean up
 772 *
 773 * Cleans up the resources allocated with drm_sched_job_init().
 774 *
 775 * Drivers should call this from their error unwind code if @job is aborted
 776 * before drm_sched_job_arm() is called.
 777 *
 778 * After that point of no return @job is committed to be executed by the
 779 * scheduler, and this function should be called from the
 780 * &drm_sched_backend_ops.free_job callback.
 781 */
 782void drm_sched_job_cleanup(struct drm_sched_job *job)
 783{
 784	struct dma_fence *fence;
 785	unsigned long index;
 786
 787	if (kref_read(&job->s_fence->finished.refcount)) {
 788		/* drm_sched_job_arm() has been called */
 789		dma_fence_put(&job->s_fence->finished);
 790	} else {
 791		/* aborted job before committing to run it */
 792		drm_sched_fence_free(job->s_fence);
 793	}
 794
 795	job->s_fence = NULL;
 796
 797	xa_for_each(&job->dependencies, index, fence) {
 798		dma_fence_put(fence);
 799	}
 800	xa_destroy(&job->dependencies);
 801
 802}
 803EXPORT_SYMBOL(drm_sched_job_cleanup);
 804
 805/**
 806 * drm_sched_ready - is the scheduler ready
 807 *
 808 * @sched: scheduler instance
 809 *
 810 * Return true if we can push more jobs to the hw, otherwise false.
 811 */
 812static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 813{
 814	return atomic_read(&sched->hw_rq_count) <
 815		sched->hw_submission_limit;
 816}
 817
 818/**
 819 * drm_sched_wakeup - Wake up the scheduler when it is ready
 820 *
 821 * @sched: scheduler instance
 822 *
 
 823 */
 824void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 825{
 826	if (drm_sched_ready(sched))
 827		wake_up_interruptible(&sched->wake_up_worker);
 828}
 829
 830/**
 831 * drm_sched_select_entity - Select next entity to process
 832 *
 833 * @sched: scheduler instance
 834 *
 835 * Returns the entity to process or NULL if none are found.
 
 
 
 
 836 */
 837static struct drm_sched_entity *
 838drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 839{
 840	struct drm_sched_entity *entity;
 841	int i;
 842
 843	if (!drm_sched_ready(sched))
 844		return NULL;
 845
 846	/* Kernel run queue has higher priority than normal run queue*/
 847	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
 848		entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
 849			drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
 850			drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
 851		if (entity)
 852			break;
 853	}
 854
 855	return entity;
 856}
 857
 858/**
 859 * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
 860 *
 861 * @sched: scheduler instance
 862 *
 863 * Returns the next finished job from the pending list (if there is one)
 864 * ready for it to be destroyed.
 865 */
 866static struct drm_sched_job *
 867drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 868{
 869	struct drm_sched_job *job, *next;
 870
 871	spin_lock(&sched->job_list_lock);
 872
 873	job = list_first_entry_or_null(&sched->pending_list,
 874				       struct drm_sched_job, list);
 875
 876	if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
 877		/* remove job from pending_list */
 878		list_del_init(&job->list);
 879
 880		/* cancel this job's TO timer */
 881		cancel_delayed_work(&sched->work_tdr);
 882		/* make the scheduled timestamp more accurate */
 883		next = list_first_entry_or_null(&sched->pending_list,
 884						typeof(*next), list);
 885
 886		if (next) {
 887			next->s_fence->scheduled.timestamp =
 888				job->s_fence->finished.timestamp;
 
 
 889			/* start TO timer for next job */
 890			drm_sched_start_timeout(sched);
 891		}
 892	} else {
 893		job = NULL;
 894	}
 895
 896	spin_unlock(&sched->job_list_lock);
 897
 898	return job;
 899}
 900
 901/**
 902 * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
 903 * @sched_list: list of drm_gpu_schedulers
 904 * @num_sched_list: number of drm_gpu_schedulers in the sched_list
 905 *
 906 * Returns pointer of the sched with the least load or NULL if none of the
 907 * drm_gpu_schedulers are ready
 908 */
 909struct drm_gpu_scheduler *
 910drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
 911		     unsigned int num_sched_list)
 912{
 913	struct drm_gpu_scheduler *sched, *picked_sched = NULL;
 914	int i;
 915	unsigned int min_score = UINT_MAX, num_score;
 916
 917	for (i = 0; i < num_sched_list; ++i) {
 918		sched = sched_list[i];
 919
 920		if (!sched->ready) {
 921			DRM_WARN("scheduler %s is not ready, skipping",
 922				 sched->name);
 923			continue;
 924		}
 925
 926		num_score = atomic_read(sched->score);
 927		if (num_score < min_score) {
 928			min_score = num_score;
 929			picked_sched = sched;
 930		}
 931	}
 932
 933	return picked_sched;
 934}
 935EXPORT_SYMBOL(drm_sched_pick_best);
 936
 937/**
 938 * drm_sched_blocked - check if the scheduler is blocked
 939 *
 940 * @sched: scheduler instance
 941 *
 942 * Returns true if blocked, otherwise false.
 943 */
 944static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
 945{
 946	if (kthread_should_park()) {
 947		kthread_parkme();
 948		return true;
 949	}
 
 
 
 
 
 
 950
 951	return false;
 
 952}
 953
 954/**
 955 * drm_sched_main - main scheduler thread
 956 *
 957 * @param: scheduler instance
 958 *
 959 * Returns 0.
 960 */
 961static int drm_sched_main(void *param)
 962{
 963	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
 
 
 
 
 
 964	int r;
 965
 966	sched_set_fifo_low(current);
 
 967
 968	while (!kthread_should_stop()) {
 969		struct drm_sched_entity *entity = NULL;
 970		struct drm_sched_fence *s_fence;
 971		struct drm_sched_job *sched_job;
 972		struct dma_fence *fence;
 973		struct drm_sched_job *cleanup_job = NULL;
 974
 975		wait_event_interruptible(sched->wake_up_worker,
 976					 (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
 977					 (!drm_sched_blocked(sched) &&
 978					  (entity = drm_sched_select_entity(sched))) ||
 979					 kthread_should_stop());
 980
 981		if (cleanup_job)
 982			sched->ops->free_job(cleanup_job);
 
 
 
 
 983
 984		if (!entity)
 985			continue;
 986
 987		sched_job = drm_sched_entity_pop_job(entity);
 
 988
 989		if (!sched_job) {
 990			complete_all(&entity->entity_idle);
 991			continue;
 992		}
 993
 994		s_fence = sched_job->s_fence;
 
 
 995
 996		atomic_inc(&sched->hw_rq_count);
 997		drm_sched_job_begin(sched_job);
 
 
 
 
 
 
 
 
 998
 999		trace_drm_run_job(sched_job, entity);
1000		fence = sched->ops->run_job(sched_job);
1001		complete_all(&entity->entity_idle);
1002		drm_sched_fence_scheduled(s_fence);
1003
1004		if (!IS_ERR_OR_NULL(fence)) {
1005			s_fence->parent = dma_fence_get(fence);
1006			/* Drop for original kref_init of the fence */
1007			dma_fence_put(fence);
1008
1009			r = dma_fence_add_callback(fence, &sched_job->cb,
1010						   drm_sched_job_done_cb);
1011			if (r == -ENOENT)
1012				drm_sched_job_done(sched_job);
1013			else if (r)
1014				DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n",
1015					  r);
1016		} else {
1017			if (IS_ERR(fence))
1018				dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
1019
1020			drm_sched_job_done(sched_job);
1021		}
1022
1023		wake_up(&sched->job_scheduled);
1024	}
1025	return 0;
1026}
1027
1028/**
1029 * drm_sched_init - Init a gpu scheduler instance
1030 *
1031 * @sched: scheduler instance
1032 * @ops: backend operations for this scheduler
1033 * @hw_submission: number of hw submissions that can be in flight
 
 
 
1034 * @hang_limit: number of times to allow a job to hang before dropping it
1035 * @timeout: timeout value in jiffies for the scheduler
1036 * @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
1037 *		used
1038 * @score: optional score atomic shared with other schedulers
1039 * @name: name used for debugging
1040 * @dev: target &struct device
1041 *
1042 * Return 0 on success, otherwise error code.
1043 */
1044int drm_sched_init(struct drm_gpu_scheduler *sched,
1045		   const struct drm_sched_backend_ops *ops,
1046		   unsigned hw_submission, unsigned hang_limit,
 
1047		   long timeout, struct workqueue_struct *timeout_wq,
1048		   atomic_t *score, const char *name, struct device *dev)
1049{
1050	int i, ret;
 
1051	sched->ops = ops;
1052	sched->hw_submission_limit = hw_submission;
1053	sched->name = name;
1054	sched->timeout = timeout;
1055	sched->timeout_wq = timeout_wq ? : system_wq;
1056	sched->hang_limit = hang_limit;
1057	sched->score = score ? score : &sched->_score;
1058	sched->dev = dev;
1059	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
1060		drm_sched_rq_init(sched, &sched->sched_rq[i]);
1061
1062	init_waitqueue_head(&sched->wake_up_worker);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1063	init_waitqueue_head(&sched->job_scheduled);
1064	INIT_LIST_HEAD(&sched->pending_list);
1065	spin_lock_init(&sched->job_list_lock);
1066	atomic_set(&sched->hw_rq_count, 0);
1067	INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
 
 
1068	atomic_set(&sched->_score, 0);
1069	atomic64_set(&sched->job_id_count, 0);
1070
1071	/* Each scheduler will run on a seperate kernel thread */
1072	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
1073	if (IS_ERR(sched->thread)) {
1074		ret = PTR_ERR(sched->thread);
1075		sched->thread = NULL;
1076		DRM_DEV_ERROR(sched->dev, "Failed to create scheduler for %s.\n", name);
1077		return ret;
1078	}
1079
1080	sched->ready = true;
1081	return 0;
 
 
 
 
 
 
 
 
 
 
 
1082}
1083EXPORT_SYMBOL(drm_sched_init);
1084
1085/**
1086 * drm_sched_fini - Destroy a gpu scheduler
1087 *
1088 * @sched: scheduler instance
1089 *
1090 * Tears down and cleans up the scheduler.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1091 */
1092void drm_sched_fini(struct drm_gpu_scheduler *sched)
1093{
1094	struct drm_sched_entity *s_entity;
1095	int i;
1096
1097	if (sched->thread)
1098		kthread_stop(sched->thread);
1099
1100	for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
1101		struct drm_sched_rq *rq = &sched->sched_rq[i];
1102
1103		if (!rq)
1104			continue;
1105
1106		spin_lock(&rq->lock);
1107		list_for_each_entry(s_entity, &rq->entities, list)
1108			/*
1109			 * Prevents reinsertion and marks job_queue as idle,
1110			 * it will removed from rq in drm_sched_entity_fini
1111			 * eventually
1112			 */
1113			s_entity->stopped = true;
1114		spin_unlock(&rq->lock);
1115
1116	}
1117
1118	/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
1119	wake_up_all(&sched->job_scheduled);
1120
1121	/* Confirm no work left behind accessing device structures */
1122	cancel_delayed_work_sync(&sched->work_tdr);
1123
 
 
1124	sched->ready = false;
 
 
1125}
1126EXPORT_SYMBOL(drm_sched_fini);
1127
1128/**
1129 * drm_sched_increase_karma - Update sched_entity guilty flag
1130 *
1131 * @bad: The job guilty of time out
1132 *
1133 * Increment on every hang caused by the 'bad' job. If this exceeds the hang
1134 * limit of the scheduler then the respective sched entity is marked guilty and
1135 * jobs from it will not be scheduled further
1136 */
1137void drm_sched_increase_karma(struct drm_sched_job *bad)
1138{
1139	int i;
1140	struct drm_sched_entity *tmp;
1141	struct drm_sched_entity *entity;
1142	struct drm_gpu_scheduler *sched = bad->sched;
1143
1144	/* don't change @bad's karma if it's from KERNEL RQ,
1145	 * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
1146	 * corrupt but keep in mind that kernel jobs always considered good.
1147	 */
1148	if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
1149		atomic_inc(&bad->karma);
1150
1151		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
1152		     i++) {
1153			struct drm_sched_rq *rq = &sched->sched_rq[i];
1154
1155			spin_lock(&rq->lock);
1156			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
1157				if (bad->s_fence->scheduled.context ==
1158				    entity->fence_context) {
1159					if (entity->guilty)
1160						atomic_set(entity->guilty, 1);
1161					break;
1162				}
1163			}
1164			spin_unlock(&rq->lock);
1165			if (&entity->list != &rq->entities)
1166				break;
1167		}
1168	}
1169}
1170EXPORT_SYMBOL(drm_sched_increase_karma);