Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/kthread.h>
 25#include <linux/wait.h>
 26#include <linux/sched.h>
 27#include <uapi/linux/sched/types.h>
 28#include <drm/drmP.h>
 29#include <drm/gpu_scheduler.h>
 30#include <drm/spsc_queue.h>
 31
 32#define CREATE_TRACE_POINTS
 33#include <drm/gpu_scheduler_trace.h>
 34
 35#define to_drm_sched_job(sched_job)		\
 36		container_of((sched_job), struct drm_sched_job, queue_node)
 37
 38static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 39static void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 40static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
 41
 42/* Initialize a given run queue struct */
 43static void drm_sched_rq_init(struct drm_sched_rq *rq)
 44{
 45	spin_lock_init(&rq->lock);
 46	INIT_LIST_HEAD(&rq->entities);
 47	rq->current_entity = NULL;
 48}
 49
 50static void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 51				    struct drm_sched_entity *entity)
 52{
 53	if (!list_empty(&entity->list))
 54		return;
 55	spin_lock(&rq->lock);
 56	list_add_tail(&entity->list, &rq->entities);
 57	spin_unlock(&rq->lock);
 58}
 59
 60static void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 61				       struct drm_sched_entity *entity)
 62{
 63	if (list_empty(&entity->list))
 64		return;
 65	spin_lock(&rq->lock);
 66	list_del_init(&entity->list);
 67	if (rq->current_entity == entity)
 68		rq->current_entity = NULL;
 69	spin_unlock(&rq->lock);
 70}
 71
 72/**
 73 * Select an entity which could provide a job to run
 74 *
 75 * @rq		The run queue to check.
 76 *
 77 * Try to find a ready entity, returns NULL if none found.
 78 */
 79static struct drm_sched_entity *
 80drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 81{
 82	struct drm_sched_entity *entity;
 83
 84	spin_lock(&rq->lock);
 85
 86	entity = rq->current_entity;
 87	if (entity) {
 88		list_for_each_entry_continue(entity, &rq->entities, list) {
 89			if (drm_sched_entity_is_ready(entity)) {
 90				rq->current_entity = entity;
 91				spin_unlock(&rq->lock);
 92				return entity;
 93			}
 94		}
 95	}
 96
 97	list_for_each_entry(entity, &rq->entities, list) {
 98
 99		if (drm_sched_entity_is_ready(entity)) {
100			rq->current_entity = entity;
101			spin_unlock(&rq->lock);
102			return entity;
103		}
104
105		if (entity == rq->current_entity)
106			break;
107	}
108
109	spin_unlock(&rq->lock);
110
111	return NULL;
112}
113
114/**
115 * Init a context entity used by scheduler when submit to HW ring.
116 *
117 * @sched	The pointer to the scheduler
118 * @entity	The pointer to a valid drm_sched_entity
119 * @rq		The run queue this entity belongs
120 * @kernel	If this is an entity for the kernel
121 * @jobs	The max number of jobs in the job queue
122 *
123 * return 0 if succeed. negative error code on failure
124*/
125int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
126			  struct drm_sched_entity *entity,
127			  struct drm_sched_rq *rq,
128			  uint32_t jobs, atomic_t *guilty)
129{
130	if (!(sched && entity && rq))
131		return -EINVAL;
132
133	memset(entity, 0, sizeof(struct drm_sched_entity));
134	INIT_LIST_HEAD(&entity->list);
135	entity->rq = rq;
136	entity->sched = sched;
137	entity->guilty = guilty;
138
139	spin_lock_init(&entity->rq_lock);
140	spin_lock_init(&entity->queue_lock);
141	spsc_queue_init(&entity->job_queue);
142
143	atomic_set(&entity->fence_seq, 0);
144	entity->fence_context = dma_fence_context_alloc(2);
145
146	return 0;
147}
148EXPORT_SYMBOL(drm_sched_entity_init);
149
150/**
151 * Query if entity is initialized
152 *
153 * @sched       Pointer to scheduler instance
154 * @entity	The pointer to a valid scheduler entity
155 *
156 * return true if entity is initialized, false otherwise
157*/
158static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
159					    struct drm_sched_entity *entity)
160{
161	return entity->sched == sched &&
162		entity->rq != NULL;
163}
164
165/**
166 * Check if entity is idle
167 *
168 * @entity	The pointer to a valid scheduler entity
169 *
170 * Return true if entity don't has any unscheduled jobs.
171 */
172static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
173{
174	rmb();
175	if (spsc_queue_peek(&entity->job_queue) == NULL)
176		return true;
177
178	return false;
179}
180
181/**
182 * Check if entity is ready
183 *
184 * @entity	The pointer to a valid scheduler entity
185 *
186 * Return true if entity could provide a job.
187 */
188static bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
189{
190	if (spsc_queue_peek(&entity->job_queue) == NULL)
191		return false;
192
193	if (READ_ONCE(entity->dependency))
194		return false;
195
196	return true;
197}
198
199/**
200 * Destroy a context entity
201 *
202 * @sched       Pointer to scheduler instance
203 * @entity	The pointer to a valid scheduler entity
204 *
205 * Cleanup and free the allocated resources.
206 */
207void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
208			   struct drm_sched_entity *entity)
209{
210	int r;
211
212	if (!drm_sched_entity_is_initialized(sched, entity))
213		return;
214	/**
215	 * The client will not queue more IBs during this fini, consume existing
216	 * queued IBs or discard them on SIGKILL
217	*/
218	if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL)
219		r = -ERESTARTSYS;
220	else
221		r = wait_event_killable(sched->job_scheduled,
222					drm_sched_entity_is_idle(entity));
223	drm_sched_entity_set_rq(entity, NULL);
224	if (r) {
225		struct drm_sched_job *job;
226
227		/* Park the kernel for a moment to make sure it isn't processing
228		 * our enity.
229		 */
230		kthread_park(sched->thread);
231		kthread_unpark(sched->thread);
232		if (entity->dependency) {
233			dma_fence_remove_callback(entity->dependency,
234						  &entity->cb);
235			dma_fence_put(entity->dependency);
236			entity->dependency = NULL;
237		}
238
239		while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
240			struct drm_sched_fence *s_fence = job->s_fence;
241			drm_sched_fence_scheduled(s_fence);
242			dma_fence_set_error(&s_fence->finished, -ESRCH);
243			drm_sched_fence_finished(s_fence);
244			WARN_ON(s_fence->parent);
245			dma_fence_put(&s_fence->finished);
246			sched->ops->free_job(job);
247		}
248	}
249}
250EXPORT_SYMBOL(drm_sched_entity_fini);
251
252static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
253{
254	struct drm_sched_entity *entity =
255		container_of(cb, struct drm_sched_entity, cb);
256	entity->dependency = NULL;
257	dma_fence_put(f);
258	drm_sched_wakeup(entity->sched);
259}
260
261static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
262{
263	struct drm_sched_entity *entity =
264		container_of(cb, struct drm_sched_entity, cb);
265	entity->dependency = NULL;
266	dma_fence_put(f);
267}
268
269void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
270			     struct drm_sched_rq *rq)
271{
272	if (entity->rq == rq)
273		return;
274
275	spin_lock(&entity->rq_lock);
276
277	if (entity->rq)
278		drm_sched_rq_remove_entity(entity->rq, entity);
279
280	entity->rq = rq;
281	if (rq)
282		drm_sched_rq_add_entity(rq, entity);
283
284	spin_unlock(&entity->rq_lock);
285}
286EXPORT_SYMBOL(drm_sched_entity_set_rq);
287
288bool drm_sched_dependency_optimized(struct dma_fence* fence,
289				    struct drm_sched_entity *entity)
290{
291	struct drm_gpu_scheduler *sched = entity->sched;
292	struct drm_sched_fence *s_fence;
293
294	if (!fence || dma_fence_is_signaled(fence))
295		return false;
296	if (fence->context == entity->fence_context)
297		return true;
298	s_fence = to_drm_sched_fence(fence);
299	if (s_fence && s_fence->sched == sched)
300		return true;
301
302	return false;
303}
304EXPORT_SYMBOL(drm_sched_dependency_optimized);
305
306static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
307{
308	struct drm_gpu_scheduler *sched = entity->sched;
309	struct dma_fence * fence = entity->dependency;
310	struct drm_sched_fence *s_fence;
311
312	if (fence->context == entity->fence_context) {
313		/* We can ignore fences from ourself */
314		dma_fence_put(entity->dependency);
315		return false;
316	}
317
318	s_fence = to_drm_sched_fence(fence);
319	if (s_fence && s_fence->sched == sched) {
320
321		/*
322		 * Fence is from the same scheduler, only need to wait for
323		 * it to be scheduled
324		 */
325		fence = dma_fence_get(&s_fence->scheduled);
326		dma_fence_put(entity->dependency);
327		entity->dependency = fence;
328		if (!dma_fence_add_callback(fence, &entity->cb,
329					    drm_sched_entity_clear_dep))
330			return true;
331
332		/* Ignore it when it is already scheduled */
333		dma_fence_put(fence);
334		return false;
335	}
336
337	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
338				    drm_sched_entity_wakeup))
339		return true;
340
341	dma_fence_put(entity->dependency);
342	return false;
343}
344
345static struct drm_sched_job *
346drm_sched_entity_pop_job(struct drm_sched_entity *entity)
347{
348	struct drm_gpu_scheduler *sched = entity->sched;
349	struct drm_sched_job *sched_job = to_drm_sched_job(
350						spsc_queue_peek(&entity->job_queue));
351
352	if (!sched_job)
353		return NULL;
354
355	while ((entity->dependency = sched->ops->dependency(sched_job, entity)))
356		if (drm_sched_entity_add_dependency_cb(entity))
357			return NULL;
358
359	/* skip jobs from entity that marked guilty */
360	if (entity->guilty && atomic_read(entity->guilty))
361		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
362
363	spsc_queue_pop(&entity->job_queue);
364	return sched_job;
365}
366
367/**
368 * Submit a job to the job queue
369 *
370 * @sched_job		The pointer to job required to submit
371 *
372 * Returns 0 for success, negative error code otherwise.
373 */
374void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
375			       struct drm_sched_entity *entity)
376{
377	struct drm_gpu_scheduler *sched = sched_job->sched;
378	bool first = false;
379
380	trace_drm_sched_job(sched_job, entity);
381
382	spin_lock(&entity->queue_lock);
383	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
384
385	spin_unlock(&entity->queue_lock);
386
387	/* first job wakes up scheduler */
388	if (first) {
389		/* Add the entity to the run queue */
390		spin_lock(&entity->rq_lock);
391		drm_sched_rq_add_entity(entity->rq, entity);
392		spin_unlock(&entity->rq_lock);
393		drm_sched_wakeup(sched);
394	}
395}
396EXPORT_SYMBOL(drm_sched_entity_push_job);
397
398/* job_finish is called after hw fence signaled
399 */
400static void drm_sched_job_finish(struct work_struct *work)
401{
402	struct drm_sched_job *s_job = container_of(work, struct drm_sched_job,
403						   finish_work);
404	struct drm_gpu_scheduler *sched = s_job->sched;
405
406	/* remove job from ring_mirror_list */
407	spin_lock(&sched->job_list_lock);
408	list_del_init(&s_job->node);
409	if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
410		struct drm_sched_job *next;
411
412		spin_unlock(&sched->job_list_lock);
413		cancel_delayed_work_sync(&s_job->work_tdr);
414		spin_lock(&sched->job_list_lock);
415
416		/* queue TDR for next job */
417		next = list_first_entry_or_null(&sched->ring_mirror_list,
418						struct drm_sched_job, node);
419
420		if (next)
421			schedule_delayed_work(&next->work_tdr, sched->timeout);
422	}
423	spin_unlock(&sched->job_list_lock);
424	dma_fence_put(&s_job->s_fence->finished);
425	sched->ops->free_job(s_job);
426}
427
428static void drm_sched_job_finish_cb(struct dma_fence *f,
429				    struct dma_fence_cb *cb)
430{
431	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
432						 finish_cb);
433	schedule_work(&job->finish_work);
434}
435
436static void drm_sched_job_begin(struct drm_sched_job *s_job)
437{
438	struct drm_gpu_scheduler *sched = s_job->sched;
439
440	dma_fence_add_callback(&s_job->s_fence->finished, &s_job->finish_cb,
441			       drm_sched_job_finish_cb);
442
443	spin_lock(&sched->job_list_lock);
444	list_add_tail(&s_job->node, &sched->ring_mirror_list);
445	if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
446	    list_first_entry_or_null(&sched->ring_mirror_list,
447				     struct drm_sched_job, node) == s_job)
448		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
449	spin_unlock(&sched->job_list_lock);
450}
451
452static void drm_sched_job_timedout(struct work_struct *work)
453{
454	struct drm_sched_job *job = container_of(work, struct drm_sched_job,
455						 work_tdr.work);
456
457	job->sched->ops->timedout_job(job);
458}
459
460void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
461{
462	struct drm_sched_job *s_job;
463	struct drm_sched_entity *entity, *tmp;
464	int i;
465
466	spin_lock(&sched->job_list_lock);
467	list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
468		if (s_job->s_fence->parent &&
469		    dma_fence_remove_callback(s_job->s_fence->parent,
470					      &s_job->s_fence->cb)) {
471			dma_fence_put(s_job->s_fence->parent);
472			s_job->s_fence->parent = NULL;
473			atomic_dec(&sched->hw_rq_count);
474		}
475	}
476	spin_unlock(&sched->job_list_lock);
477
478	if (bad && bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
479		atomic_inc(&bad->karma);
480		/* don't increase @bad's karma if it's from KERNEL RQ,
481		 * becuase sometimes GPU hang would cause kernel jobs (like VM updating jobs)
482		 * corrupt but keep in mind that kernel jobs always considered good.
483		 */
484		for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL; i++ ) {
485			struct drm_sched_rq *rq = &sched->sched_rq[i];
486
487			spin_lock(&rq->lock);
488			list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
489				if (bad->s_fence->scheduled.context == entity->fence_context) {
490				    if (atomic_read(&bad->karma) > bad->sched->hang_limit)
491						if (entity->guilty)
492							atomic_set(entity->guilty, 1);
493					break;
494				}
495			}
496			spin_unlock(&rq->lock);
497			if (&entity->list != &rq->entities)
498				break;
499		}
500	}
501}
502EXPORT_SYMBOL(drm_sched_hw_job_reset);
503
504void drm_sched_job_recovery(struct drm_gpu_scheduler *sched)
505{
506	struct drm_sched_job *s_job, *tmp;
507	bool found_guilty = false;
508	int r;
509
510	spin_lock(&sched->job_list_lock);
511	s_job = list_first_entry_or_null(&sched->ring_mirror_list,
512					 struct drm_sched_job, node);
513	if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
514		schedule_delayed_work(&s_job->work_tdr, sched->timeout);
515
516	list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
517		struct drm_sched_fence *s_fence = s_job->s_fence;
518		struct dma_fence *fence;
519		uint64_t guilty_context;
520
521		if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
522			found_guilty = true;
523			guilty_context = s_job->s_fence->scheduled.context;
524		}
525
526		if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
527			dma_fence_set_error(&s_fence->finished, -ECANCELED);
528
529		spin_unlock(&sched->job_list_lock);
530		fence = sched->ops->run_job(s_job);
531		atomic_inc(&sched->hw_rq_count);
532		if (fence) {
533			s_fence->parent = dma_fence_get(fence);
534			r = dma_fence_add_callback(fence, &s_fence->cb,
535						   drm_sched_process_job);
536			if (r == -ENOENT)
537				drm_sched_process_job(fence, &s_fence->cb);
538			else if (r)
539				DRM_ERROR("fence add callback failed (%d)\n",
540					  r);
541			dma_fence_put(fence);
542		} else {
543			drm_sched_process_job(NULL, &s_fence->cb);
544		}
545		spin_lock(&sched->job_list_lock);
546	}
547	spin_unlock(&sched->job_list_lock);
548}
549EXPORT_SYMBOL(drm_sched_job_recovery);
550
551/* init a sched_job with basic field */
552int drm_sched_job_init(struct drm_sched_job *job,
553		       struct drm_gpu_scheduler *sched,
554		       struct drm_sched_entity *entity,
555		       void *owner)
556{
557	job->sched = sched;
558	job->s_priority = entity->rq - sched->sched_rq;
559	job->s_fence = drm_sched_fence_create(entity, owner);
560	if (!job->s_fence)
561		return -ENOMEM;
562	job->id = atomic64_inc_return(&sched->job_id_count);
563
564	INIT_WORK(&job->finish_work, drm_sched_job_finish);
565	INIT_LIST_HEAD(&job->node);
566	INIT_DELAYED_WORK(&job->work_tdr, drm_sched_job_timedout);
567
568	return 0;
569}
570EXPORT_SYMBOL(drm_sched_job_init);
571
572/**
573 * Return ture if we can push more jobs to the hw.
574 */
575static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
576{
577	return atomic_read(&sched->hw_rq_count) <
578		sched->hw_submission_limit;
579}
580
581/**
582 * Wake up the scheduler when it is ready
583 */
584static void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
585{
586	if (drm_sched_ready(sched))
587		wake_up_interruptible(&sched->wake_up_worker);
588}
589
590/**
591 * Select next entity to process
592*/
593static struct drm_sched_entity *
594drm_sched_select_entity(struct drm_gpu_scheduler *sched)
595{
596	struct drm_sched_entity *entity;
597	int i;
598
599	if (!drm_sched_ready(sched))
600		return NULL;
601
602	/* Kernel run queue has higher priority than normal run queue*/
603	for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
604		entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
605		if (entity)
606			break;
607	}
608
609	return entity;
610}
611
612static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
613{
614	struct drm_sched_fence *s_fence =
615		container_of(cb, struct drm_sched_fence, cb);
616	struct drm_gpu_scheduler *sched = s_fence->sched;
617
618	dma_fence_get(&s_fence->finished);
619	atomic_dec(&sched->hw_rq_count);
620	drm_sched_fence_finished(s_fence);
621
622	trace_drm_sched_process_job(s_fence);
623	dma_fence_put(&s_fence->finished);
624	wake_up_interruptible(&sched->wake_up_worker);
625}
626
627static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
628{
629	if (kthread_should_park()) {
630		kthread_parkme();
631		return true;
632	}
633
634	return false;
635}
636
637static int drm_sched_main(void *param)
638{
639	struct sched_param sparam = {.sched_priority = 1};
640	struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
641	int r;
642
643	sched_setscheduler(current, SCHED_FIFO, &sparam);
644
645	while (!kthread_should_stop()) {
646		struct drm_sched_entity *entity = NULL;
647		struct drm_sched_fence *s_fence;
648		struct drm_sched_job *sched_job;
649		struct dma_fence *fence;
650
651		wait_event_interruptible(sched->wake_up_worker,
652					 (!drm_sched_blocked(sched) &&
653					  (entity = drm_sched_select_entity(sched))) ||
654					 kthread_should_stop());
655
656		if (!entity)
657			continue;
658
659		sched_job = drm_sched_entity_pop_job(entity);
660		if (!sched_job)
661			continue;
662
663		s_fence = sched_job->s_fence;
664
665		atomic_inc(&sched->hw_rq_count);
666		drm_sched_job_begin(sched_job);
667
668		fence = sched->ops->run_job(sched_job);
669		drm_sched_fence_scheduled(s_fence);
670
671		if (fence) {
672			s_fence->parent = dma_fence_get(fence);
673			r = dma_fence_add_callback(fence, &s_fence->cb,
674						   drm_sched_process_job);
675			if (r == -ENOENT)
676				drm_sched_process_job(fence, &s_fence->cb);
677			else if (r)
678				DRM_ERROR("fence add callback failed (%d)\n",
679					  r);
680			dma_fence_put(fence);
681		} else {
682			drm_sched_process_job(NULL, &s_fence->cb);
683		}
684
685		wake_up(&sched->job_scheduled);
686	}
687	return 0;
688}
689
690/**
691 * Init a gpu scheduler instance
692 *
693 * @sched		The pointer to the scheduler
694 * @ops			The backend operations for this scheduler.
695 * @hw_submissions	Number of hw submissions to do.
696 * @name		Name used for debugging
697 *
698 * Return 0 on success, otherwise error code.
699*/
700int drm_sched_init(struct drm_gpu_scheduler *sched,
701		   const struct drm_sched_backend_ops *ops,
702		   unsigned hw_submission,
703		   unsigned hang_limit,
704		   long timeout,
705		   const char *name)
706{
707	int i;
708	sched->ops = ops;
709	sched->hw_submission_limit = hw_submission;
710	sched->name = name;
711	sched->timeout = timeout;
712	sched->hang_limit = hang_limit;
713	for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
714		drm_sched_rq_init(&sched->sched_rq[i]);
715
716	init_waitqueue_head(&sched->wake_up_worker);
717	init_waitqueue_head(&sched->job_scheduled);
718	INIT_LIST_HEAD(&sched->ring_mirror_list);
719	spin_lock_init(&sched->job_list_lock);
720	atomic_set(&sched->hw_rq_count, 0);
721	atomic64_set(&sched->job_id_count, 0);
722
723	/* Each scheduler will run on a seperate kernel thread */
724	sched->thread = kthread_run(drm_sched_main, sched, sched->name);
725	if (IS_ERR(sched->thread)) {
726		DRM_ERROR("Failed to create scheduler for %s.\n", name);
727		return PTR_ERR(sched->thread);
728	}
729
730	return 0;
731}
732EXPORT_SYMBOL(drm_sched_init);
733
734/**
735 * Destroy a gpu scheduler
736 *
737 * @sched	The pointer to the scheduler
738 */
739void drm_sched_fini(struct drm_gpu_scheduler *sched)
740{
741	if (sched->thread)
742		kthread_stop(sched->thread);
743}
744EXPORT_SYMBOL(drm_sched_fini);