Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/kthread.h>
 25#include <linux/slab.h>
 26#include <linux/completion.h>
 27
 28#include <drm/drm_print.h>
 29#include <drm/gpu_scheduler.h>
 30
 31#include "gpu_scheduler_trace.h"
 32
 33#define to_drm_sched_job(sched_job)		\
 34		container_of((sched_job), struct drm_sched_job, queue_node)
 35
 36/**
 37 * drm_sched_entity_init - Init a context entity used by scheduler when
 38 * submit to HW ring.
 39 *
 40 * @entity: scheduler entity to init
 41 * @priority: priority of the entity
 42 * @sched_list: the list of drm scheds on which jobs from this
 43 *           entity can be submitted
 44 * @num_sched_list: number of drm sched in sched_list
 45 * @guilty: atomic_t set to 1 when a job on this queue
 46 *          is found to be guilty causing a timeout
 47 *
 48 * Note that the &sched_list must have at least one element to schedule the entity.
 49 *
 50 * For changing @priority later on at runtime see
 51 * drm_sched_entity_set_priority(). For changing the set of schedulers
 52 * @sched_list at runtime see drm_sched_entity_modify_sched().
 53 *
 54 * An entity is cleaned up by calling drm_sched_entity_fini(). See also
 55 * drm_sched_entity_destroy().
 56 *
 57 * Returns 0 on success or a negative error code on failure.
 58 */
 59int drm_sched_entity_init(struct drm_sched_entity *entity,
 60			  enum drm_sched_priority priority,
 61			  struct drm_gpu_scheduler **sched_list,
 62			  unsigned int num_sched_list,
 63			  atomic_t *guilty)
 64{
 65	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
 66		return -EINVAL;
 67
 68	memset(entity, 0, sizeof(struct drm_sched_entity));
 69	INIT_LIST_HEAD(&entity->list);
 70	entity->rq = NULL;
 71	entity->guilty = guilty;
 72	entity->num_sched_list = num_sched_list;
 73	entity->priority = priority;
 74	/*
 75	 * It's perfectly valid to initialize an entity without having a valid
 76	 * scheduler attached. It's just not valid to use the scheduler before it
 77	 * is initialized itself.
 78	 */
 79	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
 80	RCU_INIT_POINTER(entity->last_scheduled, NULL);
 81	RB_CLEAR_NODE(&entity->rb_tree_node);
 82
 83	if (num_sched_list && !sched_list[0]->sched_rq) {
 84		/* Since every entry covered by num_sched_list
 85		 * should be non-NULL and therefore we warn drivers
 86		 * not to do this and to fix their DRM calling order.
 87		 */
 88		pr_warn("%s: called with uninitialized scheduler\n", __func__);
 89	} else if (num_sched_list) {
 90		/* The "priority" of an entity cannot exceed the number of run-queues of a
 91		 * scheduler. Protect against num_rqs being 0, by converting to signed. Choose
 92		 * the lowest priority available.
 93		 */
 94		if (entity->priority >= sched_list[0]->num_rqs) {
 95			drm_err(sched_list[0], "entity with out-of-bounds priority:%u num_rqs:%u\n",
 96				entity->priority, sched_list[0]->num_rqs);
 97			entity->priority = max_t(s32, (s32) sched_list[0]->num_rqs - 1,
 98						 (s32) DRM_SCHED_PRIORITY_KERNEL);
 99		}
100		entity->rq = sched_list[0]->sched_rq[entity->priority];
101	}
102
103	init_completion(&entity->entity_idle);
104
105	/* We start in an idle state. */
106	complete_all(&entity->entity_idle);
107
108	spin_lock_init(&entity->lock);
109	spsc_queue_init(&entity->job_queue);
110
111	atomic_set(&entity->fence_seq, 0);
112	entity->fence_context = dma_fence_context_alloc(2);
113
114	return 0;
115}
116EXPORT_SYMBOL(drm_sched_entity_init);
117
118/**
119 * drm_sched_entity_modify_sched - Modify sched of an entity
120 * @entity: scheduler entity to init
121 * @sched_list: the list of new drm scheds which will replace
122 *		 existing entity->sched_list
123 * @num_sched_list: number of drm sched in sched_list
124 *
125 * Note that this must be called under the same common lock for @entity as
126 * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
127 * guarantee through some other means that this is never called while new jobs
128 * can be pushed to @entity.
129 */
130void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
131				    struct drm_gpu_scheduler **sched_list,
132				    unsigned int num_sched_list)
133{
134	WARN_ON(!num_sched_list || !sched_list);
135
136	spin_lock(&entity->lock);
137	entity->sched_list = sched_list;
138	entity->num_sched_list = num_sched_list;
139	spin_unlock(&entity->lock);
140}
141EXPORT_SYMBOL(drm_sched_entity_modify_sched);
142
 
 
 
 
 
 
 
143static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
144{
145	rmb(); /* for list_empty to work without lock */
146
147	if (list_empty(&entity->list) ||
148	    spsc_queue_count(&entity->job_queue) == 0 ||
149	    entity->stopped)
150		return true;
151
152	return false;
153}
154
155/* Return true if entity could provide a job. */
 
 
 
 
 
 
156bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
157{
158	if (spsc_queue_peek(&entity->job_queue) == NULL)
159		return false;
160
161	if (READ_ONCE(entity->dependency))
162		return false;
163
164	return true;
165}
166
167/**
168 * drm_sched_entity_error - return error of last scheduled job
169 * @entity: scheduler entity to check
170 *
171 * Opportunistically return the error of the last scheduled job. Result can
172 * change any time when new jobs are pushed to the hw.
173 */
174int drm_sched_entity_error(struct drm_sched_entity *entity)
175{
176	struct dma_fence *fence;
177	int r;
178
179	rcu_read_lock();
180	fence = rcu_dereference(entity->last_scheduled);
181	r = fence ? fence->error : 0;
182	rcu_read_unlock();
183
184	return r;
185}
186EXPORT_SYMBOL(drm_sched_entity_error);
187
188static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk)
189{
190	struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
191
192	drm_sched_fence_finished(job->s_fence, -ESRCH);
193	WARN_ON(job->s_fence->parent);
194	job->sched->ops->free_job(job);
195}
196
197/* Signal the scheduler finished fence when the entity in question is killed. */
198static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
199					  struct dma_fence_cb *cb)
200{
201	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
202						 finish_cb);
203	unsigned long index;
204
205	dma_fence_put(f);
206
207	/* Wait for all dependencies to avoid data corruptions */
208	xa_for_each(&job->dependencies, index, f) {
209		struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
210
211		if (s_fence && f == &s_fence->scheduled) {
212			/* The dependencies array had a reference on the scheduled
213			 * fence, and the finished fence refcount might have
214			 * dropped to zero. Use dma_fence_get_rcu() so we get
215			 * a NULL fence in that case.
216			 */
217			f = dma_fence_get_rcu(&s_fence->finished);
218
219			/* Now that we have a reference on the finished fence,
220			 * we can release the reference the dependencies array
221			 * had on the scheduled fence.
222			 */
223			dma_fence_put(&s_fence->scheduled);
224		}
225
226		xa_erase(&job->dependencies, index);
227		if (f && !dma_fence_add_callback(f, &job->finish_cb,
228						 drm_sched_entity_kill_jobs_cb))
229			return;
230
231		dma_fence_put(f);
232	}
233
234	INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
235	schedule_work(&job->work);
236}
237
238/* Remove the entity from the scheduler and kill all pending jobs */
239static void drm_sched_entity_kill(struct drm_sched_entity *entity)
240{
241	struct drm_sched_job *job;
242	struct dma_fence *prev;
243
244	if (!entity->rq)
245		return;
246
247	spin_lock(&entity->lock);
248	entity->stopped = true;
249	drm_sched_rq_remove_entity(entity->rq, entity);
250	spin_unlock(&entity->lock);
251
252	/* Make sure this entity is not used by the scheduler at the moment */
253	wait_for_completion(&entity->entity_idle);
254
255	/* The entity is guaranteed to not be used by the scheduler */
256	prev = rcu_dereference_check(entity->last_scheduled, true);
257	dma_fence_get(prev);
258	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
259		struct drm_sched_fence *s_fence = job->s_fence;
260
261		dma_fence_get(&s_fence->finished);
262		if (!prev || dma_fence_add_callback(prev, &job->finish_cb,
263					   drm_sched_entity_kill_jobs_cb))
264			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
265
266		prev = &s_fence->finished;
267	}
268	dma_fence_put(prev);
269}
270
271/**
272 * drm_sched_entity_flush - Flush a context entity
273 *
274 * @entity: scheduler entity
275 * @timeout: time to wait in for Q to become empty in jiffies.
276 *
277 * Splitting drm_sched_entity_fini() into two functions, The first one does the
278 * waiting, removes the entity from the runqueue and returns an error when the
279 * process was killed.
280 *
281 * Returns the remaining time in jiffies left from the input timeout
282 */
283long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
284{
285	struct drm_gpu_scheduler *sched;
286	struct task_struct *last_user;
287	long ret = timeout;
288
289	if (!entity->rq)
290		return 0;
291
292	sched = entity->rq->sched;
293	/**
294	 * The client will not queue more IBs during this fini, consume existing
295	 * queued IBs or discard them on SIGKILL
296	 */
297	if (current->flags & PF_EXITING) {
298		if (timeout)
299			ret = wait_event_timeout(
300					sched->job_scheduled,
301					drm_sched_entity_is_idle(entity),
302					timeout);
303	} else {
304		wait_event_killable(sched->job_scheduled,
305				    drm_sched_entity_is_idle(entity));
306	}
307
308	/* For killed process disable any more IBs enqueue right now */
309	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
310	if ((!last_user || last_user == current->group_leader) &&
311	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
312		drm_sched_entity_kill(entity);
 
 
 
 
313
314	return ret;
315}
316EXPORT_SYMBOL(drm_sched_entity_flush);
317
318/**
319 * drm_sched_entity_fini - Destroy a context entity
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320 *
321 * @entity: scheduler entity
322 *
323 * Cleanups up @entity which has been initialized by drm_sched_entity_init().
 
324 *
325 * If there are potentially job still in flight or getting newly queued
326 * drm_sched_entity_flush() must be called first. This function then goes over
327 * the entity and signals all jobs with an error code if the process was killed.
328 */
329void drm_sched_entity_fini(struct drm_sched_entity *entity)
330{
331	/*
332	 * If consumption of existing IBs wasn't completed. Forcefully remove
333	 * them here. Also makes sure that the scheduler won't touch this entity
334	 * any more.
 
 
 
 
 
335	 */
336	drm_sched_entity_kill(entity);
 
 
 
 
 
 
337
338	if (entity->dependency) {
339		dma_fence_remove_callback(entity->dependency, &entity->cb);
340		dma_fence_put(entity->dependency);
341		entity->dependency = NULL;
 
 
 
 
 
342	}
343
344	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
345	RCU_INIT_POINTER(entity->last_scheduled, NULL);
346}
347EXPORT_SYMBOL(drm_sched_entity_fini);
348
349/**
350 * drm_sched_entity_destroy - Destroy a context entity
 
351 * @entity: scheduler entity
352 *
353 * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
354 * convenience wrapper.
355 */
356void drm_sched_entity_destroy(struct drm_sched_entity *entity)
357{
358	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
359	drm_sched_entity_fini(entity);
360}
361EXPORT_SYMBOL(drm_sched_entity_destroy);
362
363/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
 
 
364static void drm_sched_entity_clear_dep(struct dma_fence *f,
365				       struct dma_fence_cb *cb)
366{
367	struct drm_sched_entity *entity =
368		container_of(cb, struct drm_sched_entity, cb);
369
370	entity->dependency = NULL;
371	dma_fence_put(f);
372}
373
374/*
375 * drm_sched_entity_wakeup - callback to clear the entity's dependency and
376 * wake up the scheduler
377 */
378static void drm_sched_entity_wakeup(struct dma_fence *f,
379				    struct dma_fence_cb *cb)
380{
381	struct drm_sched_entity *entity =
382		container_of(cb, struct drm_sched_entity, cb);
383
384	drm_sched_entity_clear_dep(f, cb);
385	drm_sched_wakeup(entity->rq->sched);
386}
387
388/**
389 * drm_sched_entity_set_priority - Sets priority of the entity
390 *
391 * @entity: scheduler entity
392 * @priority: scheduler priority
393 *
394 * Update the priority of runqueues used for the entity.
395 */
396void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
397				   enum drm_sched_priority priority)
398{
399	spin_lock(&entity->lock);
400	entity->priority = priority;
401	spin_unlock(&entity->lock);
402}
403EXPORT_SYMBOL(drm_sched_entity_set_priority);
404
405/*
 
 
 
 
406 * Add a callback to the current dependency of the entity to wake up the
407 * scheduler when the entity becomes available.
408 */
409static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
410{
411	struct drm_gpu_scheduler *sched = entity->rq->sched;
412	struct dma_fence *fence = entity->dependency;
413	struct drm_sched_fence *s_fence;
414
415	if (fence->context == entity->fence_context ||
416	    fence->context == entity->fence_context + 1) {
417		/*
418		 * Fence is a scheduled/finished fence from a job
419		 * which belongs to the same entity, we can ignore
420		 * fences from ourself
421		 */
422		dma_fence_put(entity->dependency);
423		return false;
424	}
425
426	s_fence = to_drm_sched_fence(fence);
427	if (!fence->error && s_fence && s_fence->sched == sched &&
428	    !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) {
429
430		/*
431		 * Fence is from the same scheduler, only need to wait for
432		 * it to be scheduled
433		 */
434		fence = dma_fence_get(&s_fence->scheduled);
435		dma_fence_put(entity->dependency);
436		entity->dependency = fence;
437		if (!dma_fence_add_callback(fence, &entity->cb,
438					    drm_sched_entity_clear_dep))
439			return true;
440
441		/* Ignore it when it is already scheduled */
442		dma_fence_put(fence);
443		return false;
444	}
445
446	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
447				    drm_sched_entity_wakeup))
448		return true;
449
450	dma_fence_put(entity->dependency);
451	return false;
452}
453
454static struct dma_fence *
455drm_sched_job_dependency(struct drm_sched_job *job,
456			 struct drm_sched_entity *entity)
457{
458	struct dma_fence *f;
459
460	/* We keep the fence around, so we can iterate over all dependencies
461	 * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled
462	 * before killing the job.
463	 */
464	f = xa_load(&job->dependencies, job->last_dependency);
465	if (f) {
466		job->last_dependency++;
467		return dma_fence_get(f);
468	}
469
470	if (job->sched->ops->prepare_job)
471		return job->sched->ops->prepare_job(job, entity);
472
473	return NULL;
474}
475
476struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
477{
 
478	struct drm_sched_job *sched_job;
479
480	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
481	if (!sched_job)
482		return NULL;
483
484	while ((entity->dependency =
485			drm_sched_job_dependency(sched_job, entity))) {
486		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
487
488		if (drm_sched_entity_add_dependency_cb(entity))
489			return NULL;
490	}
491
492	/* skip jobs from entity that marked guilty */
493	if (entity->guilty && atomic_read(entity->guilty))
494		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
495
496	dma_fence_put(rcu_dereference_check(entity->last_scheduled, true));
497	rcu_assign_pointer(entity->last_scheduled,
498			   dma_fence_get(&sched_job->s_fence->finished));
499
500	/*
501	 * If the queue is empty we allow drm_sched_entity_select_rq() to
502	 * locklessly access ->last_scheduled. This only works if we set the
503	 * pointer before we dequeue and if we a write barrier here.
504	 */
505	smp_wmb();
506
507	spsc_queue_pop(&entity->job_queue);
508
509	/*
510	 * Update the entity's location in the min heap according to
511	 * the timestamp of the next job, if any.
512	 */
513	if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
514		struct drm_sched_job *next;
515
516		next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
517		if (next) {
518			struct drm_sched_rq *rq;
519
520			spin_lock(&entity->lock);
521			rq = entity->rq;
522			spin_lock(&rq->lock);
523			drm_sched_rq_update_fifo_locked(entity, rq,
524							next->submit_ts);
525			spin_unlock(&rq->lock);
526			spin_unlock(&entity->lock);
527		}
528	}
529
530	/* Jobs and entities might have different lifecycles. Since we're
531	 * removing the job from the entities queue, set the jobs entity pointer
532	 * to NULL to prevent any future access of the entity through this job.
533	 */
534	sched_job->entity = NULL;
535
536	return sched_job;
537}
538
 
 
 
 
 
 
 
 
539void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
540{
541	struct dma_fence *fence;
542	struct drm_gpu_scheduler *sched;
543	struct drm_sched_rq *rq;
544
545	/* single possible engine and already selected */
546	if (!entity->sched_list)
547		return;
548
549	/* queue non-empty, stay on the same engine */
550	if (spsc_queue_count(&entity->job_queue))
551		return;
552
553	/*
554	 * Only when the queue is empty are we guaranteed that the scheduler
555	 * thread cannot change ->last_scheduled. To enforce ordering we need
556	 * a read barrier here. See drm_sched_entity_pop_job() for the other
557	 * side.
558	 */
559	smp_rmb();
560
561	fence = rcu_dereference_check(entity->last_scheduled, true);
562
563	/* stay on the same engine if the previous job hasn't finished */
564	if (fence && !dma_fence_is_signaled(fence))
565		return;
566
567	spin_lock(&entity->lock);
568	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
569	rq = sched ? sched->sched_rq[entity->priority] : NULL;
570	if (rq != entity->rq) {
571		drm_sched_rq_remove_entity(entity->rq, entity);
572		entity->rq = rq;
573	}
574	spin_unlock(&entity->lock);
575
576	if (entity->num_sched_list == 1)
577		entity->sched_list = NULL;
578}
579
580/**
581 * drm_sched_entity_push_job - Submit a job to the entity's job queue
 
582 * @sched_job: job to submit
 
 
 
 
 
583 *
584 * Note: To guarantee that the order of insertion to queue matches the job's
585 * fence sequence number this function should be called with drm_sched_job_arm()
586 * under common lock for the struct drm_sched_entity that was set up for
587 * @sched_job in drm_sched_job_init().
588 */
589void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
 
590{
591	struct drm_sched_entity *entity = sched_job->entity;
592	bool first;
593	ktime_t submit_ts;
594
595	trace_drm_sched_job(sched_job, entity);
596	atomic_inc(entity->rq->sched->score);
597	WRITE_ONCE(entity->last_user, current->group_leader);
598
599	/*
600	 * After the sched_job is pushed into the entity queue, it may be
601	 * completed and freed up at any time. We can no longer access it.
602	 * Make sure to set the submit_ts first, to avoid a race.
603	 */
604	sched_job->submit_ts = submit_ts = ktime_get();
605	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
606
607	/* first job wakes up scheduler */
608	if (first) {
609		struct drm_gpu_scheduler *sched;
610		struct drm_sched_rq *rq;
611
612		/* Add the entity to the run queue */
613		spin_lock(&entity->lock);
614		if (entity->stopped) {
615			spin_unlock(&entity->lock);
616
617			DRM_ERROR("Trying to push to a killed entity\n");
618			return;
619		}
620
621		rq = entity->rq;
622		sched = rq->sched;
623
624		spin_lock(&rq->lock);
625		drm_sched_rq_add_entity(rq, entity);
626
627		if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
628			drm_sched_rq_update_fifo_locked(entity, rq, submit_ts);
629
630		spin_unlock(&rq->lock);
631		spin_unlock(&entity->lock);
632
633		drm_sched_wakeup(sched);
634	}
635}
636EXPORT_SYMBOL(drm_sched_entity_push_job);
v5.9
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/kthread.h>
 25#include <linux/slab.h>
 26#include <linux/completion.h>
 27
 28#include <drm/drm_print.h>
 29#include <drm/gpu_scheduler.h>
 30
 31#include "gpu_scheduler_trace.h"
 32
 33#define to_drm_sched_job(sched_job)		\
 34		container_of((sched_job), struct drm_sched_job, queue_node)
 35
 36/**
 37 * drm_sched_entity_init - Init a context entity used by scheduler when
 38 * submit to HW ring.
 39 *
 40 * @entity: scheduler entity to init
 41 * @priority: priority of the entity
 42 * @sched_list: the list of drm scheds on which jobs from this
 43 *           entity can be submitted
 44 * @num_sched_list: number of drm sched in sched_list
 45 * @guilty: atomic_t set to 1 when a job on this queue
 46 *          is found to be guilty causing a timeout
 47 *
 48 * Note: the sched_list should have at least one element to schedule
 49 *       the entity
 
 
 
 
 
 
 50 *
 51 * Returns 0 on success or a negative error code on failure.
 52 */
 53int drm_sched_entity_init(struct drm_sched_entity *entity,
 54			  enum drm_sched_priority priority,
 55			  struct drm_gpu_scheduler **sched_list,
 56			  unsigned int num_sched_list,
 57			  atomic_t *guilty)
 58{
 59	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
 60		return -EINVAL;
 61
 62	memset(entity, 0, sizeof(struct drm_sched_entity));
 63	INIT_LIST_HEAD(&entity->list);
 64	entity->rq = NULL;
 65	entity->guilty = guilty;
 66	entity->num_sched_list = num_sched_list;
 67	entity->priority = priority;
 
 
 
 
 
 68	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
 69	entity->last_scheduled = NULL;
 
 70
 71	if(num_sched_list)
 72		entity->rq = &sched_list[0]->sched_rq[entity->priority];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 74	init_completion(&entity->entity_idle);
 75
 76	spin_lock_init(&entity->rq_lock);
 
 
 
 77	spsc_queue_init(&entity->job_queue);
 78
 79	atomic_set(&entity->fence_seq, 0);
 80	entity->fence_context = dma_fence_context_alloc(2);
 81
 82	return 0;
 83}
 84EXPORT_SYMBOL(drm_sched_entity_init);
 85
 86/**
 87 * drm_sched_entity_modify_sched - Modify sched of an entity
 88 * @entity: scheduler entity to init
 89 * @sched_list: the list of new drm scheds which will replace
 90 *		 existing entity->sched_list
 91 * @num_sched_list: number of drm sched in sched_list
 
 
 
 
 
 92 */
 93void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
 94				    struct drm_gpu_scheduler **sched_list,
 95				    unsigned int num_sched_list)
 96{
 97	WARN_ON(!num_sched_list || !sched_list);
 98
 
 99	entity->sched_list = sched_list;
100	entity->num_sched_list = num_sched_list;
 
101}
102EXPORT_SYMBOL(drm_sched_entity_modify_sched);
103
104/**
105 * drm_sched_entity_is_idle - Check if entity is idle
106 *
107 * @entity: scheduler entity
108 *
109 * Returns true if the entity does not have any unscheduled jobs.
110 */
111static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
112{
113	rmb(); /* for list_empty to work without lock */
114
115	if (list_empty(&entity->list) ||
116	    spsc_queue_count(&entity->job_queue) == 0)
 
117		return true;
118
119	return false;
120}
121
122/**
123 * drm_sched_entity_is_ready - Check if entity is ready
124 *
125 * @entity: scheduler entity
126 *
127 * Return true if entity could provide a job.
128 */
129bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
130{
131	if (spsc_queue_peek(&entity->job_queue) == NULL)
132		return false;
133
134	if (READ_ONCE(entity->dependency))
135		return false;
136
137	return true;
138}
139
140/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141 * drm_sched_entity_flush - Flush a context entity
142 *
143 * @entity: scheduler entity
144 * @timeout: time to wait in for Q to become empty in jiffies.
145 *
146 * Splitting drm_sched_entity_fini() into two functions, The first one does the
147 * waiting, removes the entity from the runqueue and returns an error when the
148 * process was killed.
149 *
150 * Returns the remaining time in jiffies left from the input timeout
151 */
152long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
153{
154	struct drm_gpu_scheduler *sched;
155	struct task_struct *last_user;
156	long ret = timeout;
157
158	if (!entity->rq)
159		return 0;
160
161	sched = entity->rq->sched;
162	/**
163	 * The client will not queue more IBs during this fini, consume existing
164	 * queued IBs or discard them on SIGKILL
165	 */
166	if (current->flags & PF_EXITING) {
167		if (timeout)
168			ret = wait_event_timeout(
169					sched->job_scheduled,
170					drm_sched_entity_is_idle(entity),
171					timeout);
172	} else {
173		wait_event_killable(sched->job_scheduled,
174				    drm_sched_entity_is_idle(entity));
175	}
176
177	/* For killed process disable any more IBs enqueue right now */
178	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
179	if ((!last_user || last_user == current->group_leader) &&
180	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
181		spin_lock(&entity->rq_lock);
182		entity->stopped = true;
183		drm_sched_rq_remove_entity(entity->rq, entity);
184		spin_unlock(&entity->rq_lock);
185	}
186
187	return ret;
188}
189EXPORT_SYMBOL(drm_sched_entity_flush);
190
191/**
192 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
193 *
194 * @f: signaled fence
195 * @cb: our callback structure
196 *
197 * Signal the scheduler finished fence when the entity in question is killed.
198 */
199static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
200					  struct dma_fence_cb *cb)
201{
202	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
203						 finish_cb);
204
205	drm_sched_fence_finished(job->s_fence);
206	WARN_ON(job->s_fence->parent);
207	job->sched->ops->free_job(job);
208}
209
210/**
211 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
212 *
213 * @entity: entity which is cleaned up
214 *
215 * Makes sure that all remaining jobs in an entity are killed before it is
216 * destroyed.
217 */
218static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
219{
220	struct drm_sched_job *job;
221	int r;
222
223	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
224		struct drm_sched_fence *s_fence = job->s_fence;
225
226		drm_sched_fence_scheduled(s_fence);
227		dma_fence_set_error(&s_fence->finished, -ESRCH);
228
229		/*
230		 * When pipe is hanged by older entity, new entity might
231		 * not even have chance to submit it's first job to HW
232		 * and so entity->last_scheduled will remain NULL
233		 */
234		if (!entity->last_scheduled) {
235			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
236			continue;
237		}
238
239		r = dma_fence_add_callback(entity->last_scheduled,
240					   &job->finish_cb,
241					   drm_sched_entity_kill_jobs_cb);
242		if (r == -ENOENT)
243			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
244		else if (r)
245			DRM_ERROR("fence add callback failed (%d)\n", r);
246	}
247}
248
249/**
250 * drm_sched_entity_cleanup - Destroy a context entity
251 *
252 * @entity: scheduler entity
253 *
254 * This should be called after @drm_sched_entity_do_release. It goes over the
255 * entity and signals all jobs with an error code if the process was killed.
256 *
 
 
 
257 */
258void drm_sched_entity_fini(struct drm_sched_entity *entity)
259{
260	struct drm_gpu_scheduler *sched = NULL;
261
262	if (entity->rq) {
263		sched = entity->rq->sched;
264		drm_sched_rq_remove_entity(entity->rq, entity);
265	}
266
267	/* Consumption of existing IBs wasn't completed. Forcefully
268	 * remove them here.
269	 */
270	if (spsc_queue_count(&entity->job_queue)) {
271		if (sched) {
272			/*
273			 * Wait for thread to idle to make sure it isn't processing
274			 * this entity.
275			 */
276			wait_for_completion(&entity->entity_idle);
277
278		}
279		if (entity->dependency) {
280			dma_fence_remove_callback(entity->dependency,
281						  &entity->cb);
282			dma_fence_put(entity->dependency);
283			entity->dependency = NULL;
284		}
285
286		drm_sched_entity_kill_jobs(entity);
287	}
288
289	dma_fence_put(entity->last_scheduled);
290	entity->last_scheduled = NULL;
291}
292EXPORT_SYMBOL(drm_sched_entity_fini);
293
294/**
295 * drm_sched_entity_fini - Destroy a context entity
296 *
297 * @entity: scheduler entity
298 *
299 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
 
300 */
301void drm_sched_entity_destroy(struct drm_sched_entity *entity)
302{
303	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
304	drm_sched_entity_fini(entity);
305}
306EXPORT_SYMBOL(drm_sched_entity_destroy);
307
308/**
309 * drm_sched_entity_clear_dep - callback to clear the entities dependency
310 */
311static void drm_sched_entity_clear_dep(struct dma_fence *f,
312				       struct dma_fence_cb *cb)
313{
314	struct drm_sched_entity *entity =
315		container_of(cb, struct drm_sched_entity, cb);
316
317	entity->dependency = NULL;
318	dma_fence_put(f);
319}
320
321/**
322 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
323 * wake up scheduler
324 */
325static void drm_sched_entity_wakeup(struct dma_fence *f,
326				    struct dma_fence_cb *cb)
327{
328	struct drm_sched_entity *entity =
329		container_of(cb, struct drm_sched_entity, cb);
330
331	drm_sched_entity_clear_dep(f, cb);
332	drm_sched_wakeup(entity->rq->sched);
333}
334
335/**
336 * drm_sched_entity_set_priority - Sets priority of the entity
337 *
338 * @entity: scheduler entity
339 * @priority: scheduler priority
340 *
341 * Update the priority of runqueus used for the entity.
342 */
343void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
344				   enum drm_sched_priority priority)
345{
346	spin_lock(&entity->rq_lock);
347	entity->priority = priority;
348	spin_unlock(&entity->rq_lock);
349}
350EXPORT_SYMBOL(drm_sched_entity_set_priority);
351
352/**
353 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
354 *
355 * @entity: entity with dependency
356 *
357 * Add a callback to the current dependency of the entity to wake up the
358 * scheduler when the entity becomes available.
359 */
360static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
361{
362	struct drm_gpu_scheduler *sched = entity->rq->sched;
363	struct dma_fence *fence = entity->dependency;
364	struct drm_sched_fence *s_fence;
365
366	if (fence->context == entity->fence_context ||
367	    fence->context == entity->fence_context + 1) {
368		/*
369		 * Fence is a scheduled/finished fence from a job
370		 * which belongs to the same entity, we can ignore
371		 * fences from ourself
372		 */
373		dma_fence_put(entity->dependency);
374		return false;
375	}
376
377	s_fence = to_drm_sched_fence(fence);
378	if (s_fence && s_fence->sched == sched) {
 
379
380		/*
381		 * Fence is from the same scheduler, only need to wait for
382		 * it to be scheduled
383		 */
384		fence = dma_fence_get(&s_fence->scheduled);
385		dma_fence_put(entity->dependency);
386		entity->dependency = fence;
387		if (!dma_fence_add_callback(fence, &entity->cb,
388					    drm_sched_entity_clear_dep))
389			return true;
390
391		/* Ignore it when it is already scheduled */
392		dma_fence_put(fence);
393		return false;
394	}
395
396	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
397				    drm_sched_entity_wakeup))
398		return true;
399
400	dma_fence_put(entity->dependency);
401	return false;
402}
403
404/**
405 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
406 *
407 * @entity: entity to get the job from
408 *
409 * Process all dependencies and try to get one job from the entities queue.
410 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
412{
413	struct drm_gpu_scheduler *sched = entity->rq->sched;
414	struct drm_sched_job *sched_job;
415
416	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
417	if (!sched_job)
418		return NULL;
419
420	while ((entity->dependency =
421			sched->ops->dependency(sched_job, entity))) {
422		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
423
424		if (drm_sched_entity_add_dependency_cb(entity))
425			return NULL;
426	}
427
428	/* skip jobs from entity that marked guilty */
429	if (entity->guilty && atomic_read(entity->guilty))
430		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
431
432	dma_fence_put(entity->last_scheduled);
433	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
 
 
 
 
 
 
 
 
434
435	spsc_queue_pop(&entity->job_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436	return sched_job;
437}
438
439/**
440 * drm_sched_entity_select_rq - select a new rq for the entity
441 *
442 * @entity: scheduler entity
443 *
444 * Check all prerequisites and select a new rq for the entity for load
445 * balancing.
446 */
447void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
448{
449	struct dma_fence *fence;
450	struct drm_gpu_scheduler *sched;
451	struct drm_sched_rq *rq;
452
453	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
 
 
 
 
 
454		return;
455
456	fence = READ_ONCE(entity->last_scheduled);
 
 
 
 
 
 
 
 
 
 
457	if (fence && !dma_fence_is_signaled(fence))
458		return;
459
460	spin_lock(&entity->rq_lock);
461	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
462	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
463	if (rq != entity->rq) {
464		drm_sched_rq_remove_entity(entity->rq, entity);
465		entity->rq = rq;
466	}
 
467
468	spin_unlock(&entity->rq_lock);
 
469}
470
471/**
472 * drm_sched_entity_push_job - Submit a job to the entity's job queue
473 *
474 * @sched_job: job to submit
475 * @entity: scheduler entity
476 *
477 * Note: To guarantee that the order of insertion to queue matches
478 * the job's fence sequence number this function should be
479 * called with drm_sched_job_init under common lock.
480 *
481 * Returns 0 for success, negative error code otherwise.
 
 
 
482 */
483void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
484			       struct drm_sched_entity *entity)
485{
 
486	bool first;
 
487
488	trace_drm_sched_job(sched_job, entity);
489	atomic_inc(&entity->rq->sched->score);
490	WRITE_ONCE(entity->last_user, current->group_leader);
 
 
 
 
 
 
 
491	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
492
493	/* first job wakes up scheduler */
494	if (first) {
 
 
 
495		/* Add the entity to the run queue */
496		spin_lock(&entity->rq_lock);
497		if (entity->stopped) {
498			spin_unlock(&entity->rq_lock);
499
500			DRM_ERROR("Trying to push to a killed entity\n");
501			return;
502		}
503		drm_sched_rq_add_entity(entity->rq, entity);
504		spin_unlock(&entity->rq_lock);
505		drm_sched_wakeup(entity->rq->sched);
 
 
 
 
 
 
 
 
 
 
 
506	}
507}
508EXPORT_SYMBOL(drm_sched_entity_push_job);