Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.4
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/kthread.h>
 25#include <linux/slab.h>
 
 26
 27#include <drm/drm_print.h>
 28#include <drm/gpu_scheduler.h>
 29
 30#include "gpu_scheduler_trace.h"
 31
 32#define to_drm_sched_job(sched_job)		\
 33		container_of((sched_job), struct drm_sched_job, queue_node)
 34
 35/**
 36 * drm_sched_entity_init - Init a context entity used by scheduler when
 37 * submit to HW ring.
 38 *
 39 * @entity: scheduler entity to init
 40 * @rq_list: the list of run queue on which jobs from this
 
 41 *           entity can be submitted
 42 * @num_rq_list: number of run queue in rq_list
 43 * @guilty: atomic_t set to 1 when a job on this queue
 44 *          is found to be guilty causing a timeout
 45 *
 46 * Note: the rq_list should have atleast one element to schedule
 47 *       the entity
 48 *
 49 * Returns 0 on success or a negative error code on failure.
 50 */
 51int drm_sched_entity_init(struct drm_sched_entity *entity,
 52			  struct drm_sched_rq **rq_list,
 53			  unsigned int num_rq_list,
 
 54			  atomic_t *guilty)
 55{
 56	int i;
 57
 58	if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
 59		return -EINVAL;
 60
 61	memset(entity, 0, sizeof(struct drm_sched_entity));
 62	INIT_LIST_HEAD(&entity->list);
 63	entity->rq = NULL;
 64	entity->guilty = guilty;
 65	entity->num_rq_list = num_rq_list;
 66	entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
 67				GFP_KERNEL);
 68	if (!entity->rq_list)
 69		return -ENOMEM;
 70
 71	for (i = 0; i < num_rq_list; ++i)
 72		entity->rq_list[i] = rq_list[i];
 73
 74	if (num_rq_list)
 75		entity->rq = rq_list[0];
 76
 77	entity->last_scheduled = NULL;
 78
 79	spin_lock_init(&entity->rq_lock);
 80	spsc_queue_init(&entity->job_queue);
 81
 82	atomic_set(&entity->fence_seq, 0);
 83	entity->fence_context = dma_fence_context_alloc(2);
 84
 85	return 0;
 86}
 87EXPORT_SYMBOL(drm_sched_entity_init);
 88
 89/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90 * drm_sched_entity_is_idle - Check if entity is idle
 91 *
 92 * @entity: scheduler entity
 93 *
 94 * Returns true if the entity does not have any unscheduled jobs.
 95 */
 96static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
 97{
 98	rmb(); /* for list_empty to work without lock */
 99
100	if (list_empty(&entity->list) ||
101	    spsc_queue_count(&entity->job_queue) == 0)
102		return true;
103
104	return false;
105}
106
107/**
108 * drm_sched_entity_is_ready - Check if entity is ready
109 *
110 * @entity: scheduler entity
111 *
112 * Return true if entity could provide a job.
113 */
114bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
115{
116	if (spsc_queue_peek(&entity->job_queue) == NULL)
117		return false;
118
119	if (READ_ONCE(entity->dependency))
120		return false;
121
122	return true;
123}
124
125/**
126 * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
127 *
128 * @entity: scheduler entity
129 *
130 * Return the pointer to the rq with least load.
131 */
132static struct drm_sched_rq *
133drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
134{
135	struct drm_sched_rq *rq = NULL;
136	unsigned int min_jobs = UINT_MAX, num_jobs;
137	int i;
138
139	for (i = 0; i < entity->num_rq_list; ++i) {
140		struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched;
141
142		if (!entity->rq_list[i]->sched->ready) {
143			DRM_WARN("sched%s is not ready, skipping", sched->name);
144			continue;
145		}
146
147		num_jobs = atomic_read(&sched->num_jobs);
148		if (num_jobs < min_jobs) {
149			min_jobs = num_jobs;
150			rq = entity->rq_list[i];
151		}
152	}
153
154	return rq;
155}
156
157/**
158 * drm_sched_entity_flush - Flush a context entity
159 *
160 * @entity: scheduler entity
161 * @timeout: time to wait in for Q to become empty in jiffies.
162 *
163 * Splitting drm_sched_entity_fini() into two functions, The first one does the
164 * waiting, removes the entity from the runqueue and returns an error when the
165 * process was killed.
166 *
167 * Returns the remaining time in jiffies left from the input timeout
168 */
169long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
170{
171	struct drm_gpu_scheduler *sched;
172	struct task_struct *last_user;
173	long ret = timeout;
174
175	if (!entity->rq)
176		return 0;
177
178	sched = entity->rq->sched;
179	/**
180	 * The client will not queue more IBs during this fini, consume existing
181	 * queued IBs or discard them on SIGKILL
182	 */
183	if (current->flags & PF_EXITING) {
184		if (timeout)
185			ret = wait_event_timeout(
186					sched->job_scheduled,
187					drm_sched_entity_is_idle(entity),
188					timeout);
189	} else {
190		wait_event_killable(sched->job_scheduled,
191				    drm_sched_entity_is_idle(entity));
192	}
193
194	/* For killed process disable any more IBs enqueue right now */
195	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
196	if ((!last_user || last_user == current->group_leader) &&
197	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
198		spin_lock(&entity->rq_lock);
199		entity->stopped = true;
200		drm_sched_rq_remove_entity(entity->rq, entity);
201		spin_unlock(&entity->rq_lock);
202	}
203
204	return ret;
205}
206EXPORT_SYMBOL(drm_sched_entity_flush);
207
208/**
209 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
210 *
211 * @f: signaled fence
212 * @cb: our callback structure
213 *
214 * Signal the scheduler finished fence when the entity in question is killed.
215 */
216static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
217					  struct dma_fence_cb *cb)
218{
219	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
220						 finish_cb);
221
222	drm_sched_fence_finished(job->s_fence);
223	WARN_ON(job->s_fence->parent);
224	job->sched->ops->free_job(job);
225}
226
227/**
228 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
229 *
230 * @entity: entity which is cleaned up
231 *
232 * Makes sure that all remaining jobs in an entity are killed before it is
233 * destroyed.
234 */
235static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
236{
237	struct drm_sched_job *job;
238	int r;
239
240	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
241		struct drm_sched_fence *s_fence = job->s_fence;
242
243		drm_sched_fence_scheduled(s_fence);
244		dma_fence_set_error(&s_fence->finished, -ESRCH);
245
246		/*
247		 * When pipe is hanged by older entity, new entity might
248		 * not even have chance to submit it's first job to HW
249		 * and so entity->last_scheduled will remain NULL
250		 */
251		if (!entity->last_scheduled) {
252			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
253			continue;
254		}
255
256		r = dma_fence_add_callback(entity->last_scheduled,
257					   &job->finish_cb,
258					   drm_sched_entity_kill_jobs_cb);
259		if (r == -ENOENT)
260			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
261		else if (r)
262			DRM_ERROR("fence add callback failed (%d)\n", r);
263	}
264}
265
266/**
267 * drm_sched_entity_cleanup - Destroy a context entity
268 *
269 * @entity: scheduler entity
270 *
271 * This should be called after @drm_sched_entity_do_release. It goes over the
272 * entity and signals all jobs with an error code if the process was killed.
273 *
274 */
275void drm_sched_entity_fini(struct drm_sched_entity *entity)
276{
277	struct drm_gpu_scheduler *sched = NULL;
278
279	if (entity->rq) {
280		sched = entity->rq->sched;
281		drm_sched_rq_remove_entity(entity->rq, entity);
282	}
283
284	/* Consumption of existing IBs wasn't completed. Forcefully
285	 * remove them here.
286	 */
287	if (spsc_queue_count(&entity->job_queue)) {
288		if (sched) {
289			/* Park the kernel for a moment to make sure it isn't processing
290			 * our enity.
 
291			 */
292			kthread_park(sched->thread);
293			kthread_unpark(sched->thread);
294		}
295		if (entity->dependency) {
296			dma_fence_remove_callback(entity->dependency,
297						  &entity->cb);
298			dma_fence_put(entity->dependency);
299			entity->dependency = NULL;
300		}
301
302		drm_sched_entity_kill_jobs(entity);
303	}
304
305	dma_fence_put(entity->last_scheduled);
306	entity->last_scheduled = NULL;
307	kfree(entity->rq_list);
308}
309EXPORT_SYMBOL(drm_sched_entity_fini);
310
311/**
312 * drm_sched_entity_fini - Destroy a context entity
313 *
314 * @entity: scheduler entity
315 *
316 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
317 */
318void drm_sched_entity_destroy(struct drm_sched_entity *entity)
319{
320	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
321	drm_sched_entity_fini(entity);
322}
323EXPORT_SYMBOL(drm_sched_entity_destroy);
324
325/**
326 * drm_sched_entity_clear_dep - callback to clear the entities dependency
327 */
328static void drm_sched_entity_clear_dep(struct dma_fence *f,
329				       struct dma_fence_cb *cb)
330{
331	struct drm_sched_entity *entity =
332		container_of(cb, struct drm_sched_entity, cb);
333
334	entity->dependency = NULL;
335	dma_fence_put(f);
336}
337
338/**
339 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
340 * wake up scheduler
341 */
342static void drm_sched_entity_wakeup(struct dma_fence *f,
343				    struct dma_fence_cb *cb)
344{
345	struct drm_sched_entity *entity =
346		container_of(cb, struct drm_sched_entity, cb);
347
348	drm_sched_entity_clear_dep(f, cb);
349	drm_sched_wakeup(entity->rq->sched);
350}
351
352/**
353 * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority
354 */
355static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq,
356					     enum drm_sched_priority priority)
357{
358	*rq = &(*rq)->sched->sched_rq[priority];
359}
360
361/**
362 * drm_sched_entity_set_priority - Sets priority of the entity
363 *
364 * @entity: scheduler entity
365 * @priority: scheduler priority
366 *
367 * Update the priority of runqueus used for the entity.
368 */
369void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
370				   enum drm_sched_priority priority)
371{
372	unsigned int i;
373
374	spin_lock(&entity->rq_lock);
375
376	for (i = 0; i < entity->num_rq_list; ++i)
377		drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
378
379	if (entity->rq) {
380		drm_sched_rq_remove_entity(entity->rq, entity);
381		drm_sched_entity_set_rq_priority(&entity->rq, priority);
382		drm_sched_rq_add_entity(entity->rq, entity);
383	}
384
385	spin_unlock(&entity->rq_lock);
386}
387EXPORT_SYMBOL(drm_sched_entity_set_priority);
388
389/**
390 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
391 *
392 * @entity: entity with dependency
393 *
394 * Add a callback to the current dependency of the entity to wake up the
395 * scheduler when the entity becomes available.
396 */
397static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
398{
399	struct drm_gpu_scheduler *sched = entity->rq->sched;
400	struct dma_fence *fence = entity->dependency;
401	struct drm_sched_fence *s_fence;
402
403	if (fence->context == entity->fence_context ||
404	    fence->context == entity->fence_context + 1) {
405		/*
406		 * Fence is a scheduled/finished fence from a job
407		 * which belongs to the same entity, we can ignore
408		 * fences from ourself
409		 */
410		dma_fence_put(entity->dependency);
411		return false;
412	}
413
414	s_fence = to_drm_sched_fence(fence);
415	if (s_fence && s_fence->sched == sched) {
416
417		/*
418		 * Fence is from the same scheduler, only need to wait for
419		 * it to be scheduled
420		 */
421		fence = dma_fence_get(&s_fence->scheduled);
422		dma_fence_put(entity->dependency);
423		entity->dependency = fence;
424		if (!dma_fence_add_callback(fence, &entity->cb,
425					    drm_sched_entity_clear_dep))
426			return true;
427
428		/* Ignore it when it is already scheduled */
429		dma_fence_put(fence);
430		return false;
431	}
432
433	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
434				    drm_sched_entity_wakeup))
435		return true;
436
437	dma_fence_put(entity->dependency);
438	return false;
439}
440
441/**
442 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
443 *
444 * @entity: entity to get the job from
445 *
446 * Process all dependencies and try to get one job from the entities queue.
447 */
448struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
449{
450	struct drm_gpu_scheduler *sched = entity->rq->sched;
451	struct drm_sched_job *sched_job;
452
453	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
454	if (!sched_job)
455		return NULL;
456
457	while ((entity->dependency =
458			sched->ops->dependency(sched_job, entity))) {
459		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
460
461		if (drm_sched_entity_add_dependency_cb(entity))
462			return NULL;
463	}
464
465	/* skip jobs from entity that marked guilty */
466	if (entity->guilty && atomic_read(entity->guilty))
467		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
468
469	dma_fence_put(entity->last_scheduled);
470	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
471
472	spsc_queue_pop(&entity->job_queue);
473	return sched_job;
474}
475
476/**
477 * drm_sched_entity_select_rq - select a new rq for the entity
478 *
479 * @entity: scheduler entity
480 *
481 * Check all prerequisites and select a new rq for the entity for load
482 * balancing.
483 */
484void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
485{
486	struct dma_fence *fence;
 
487	struct drm_sched_rq *rq;
488
489	if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1)
490		return;
491
492	fence = READ_ONCE(entity->last_scheduled);
493	if (fence && !dma_fence_is_signaled(fence))
494		return;
495
496	rq = drm_sched_entity_get_free_sched(entity);
497	if (rq == entity->rq)
498		return;
499
500	spin_lock(&entity->rq_lock);
501	drm_sched_rq_remove_entity(entity->rq, entity);
502	entity->rq = rq;
 
 
 
 
 
503	spin_unlock(&entity->rq_lock);
504}
505
506/**
507 * drm_sched_entity_push_job - Submit a job to the entity's job queue
508 *
509 * @sched_job: job to submit
510 * @entity: scheduler entity
511 *
512 * Note: To guarantee that the order of insertion to queue matches
513 * the job's fence sequence number this function should be
514 * called with drm_sched_job_init under common lock.
515 *
516 * Returns 0 for success, negative error code otherwise.
517 */
518void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
519			       struct drm_sched_entity *entity)
520{
521	bool first;
522
523	trace_drm_sched_job(sched_job, entity);
524	atomic_inc(&entity->rq->sched->num_jobs);
525	WRITE_ONCE(entity->last_user, current->group_leader);
526	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
527
528	/* first job wakes up scheduler */
529	if (first) {
530		/* Add the entity to the run queue */
531		spin_lock(&entity->rq_lock);
532		if (entity->stopped) {
533			spin_unlock(&entity->rq_lock);
534
535			DRM_ERROR("Trying to push to a killed entity\n");
536			return;
537		}
538		drm_sched_rq_add_entity(entity->rq, entity);
539		spin_unlock(&entity->rq_lock);
540		drm_sched_wakeup(entity->rq->sched);
541	}
542}
543EXPORT_SYMBOL(drm_sched_entity_push_job);
v5.9
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice shall be included in
 12 * all copies or substantial portions of the Software.
 13 *
 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 20 * OTHER DEALINGS IN THE SOFTWARE.
 21 *
 22 */
 23
 24#include <linux/kthread.h>
 25#include <linux/slab.h>
 26#include <linux/completion.h>
 27
 28#include <drm/drm_print.h>
 29#include <drm/gpu_scheduler.h>
 30
 31#include "gpu_scheduler_trace.h"
 32
 33#define to_drm_sched_job(sched_job)		\
 34		container_of((sched_job), struct drm_sched_job, queue_node)
 35
 36/**
 37 * drm_sched_entity_init - Init a context entity used by scheduler when
 38 * submit to HW ring.
 39 *
 40 * @entity: scheduler entity to init
 41 * @priority: priority of the entity
 42 * @sched_list: the list of drm scheds on which jobs from this
 43 *           entity can be submitted
 44 * @num_sched_list: number of drm sched in sched_list
 45 * @guilty: atomic_t set to 1 when a job on this queue
 46 *          is found to be guilty causing a timeout
 47 *
 48 * Note: the sched_list should have at least one element to schedule
 49 *       the entity
 50 *
 51 * Returns 0 on success or a negative error code on failure.
 52 */
 53int drm_sched_entity_init(struct drm_sched_entity *entity,
 54			  enum drm_sched_priority priority,
 55			  struct drm_gpu_scheduler **sched_list,
 56			  unsigned int num_sched_list,
 57			  atomic_t *guilty)
 58{
 59	if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0])))
 
 
 60		return -EINVAL;
 61
 62	memset(entity, 0, sizeof(struct drm_sched_entity));
 63	INIT_LIST_HEAD(&entity->list);
 64	entity->rq = NULL;
 65	entity->guilty = guilty;
 66	entity->num_sched_list = num_sched_list;
 67	entity->priority = priority;
 68	entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
 69	entity->last_scheduled = NULL;
 
 
 
 
 70
 71	if(num_sched_list)
 72		entity->rq = &sched_list[0]->sched_rq[entity->priority];
 73
 74	init_completion(&entity->entity_idle);
 75
 76	spin_lock_init(&entity->rq_lock);
 77	spsc_queue_init(&entity->job_queue);
 78
 79	atomic_set(&entity->fence_seq, 0);
 80	entity->fence_context = dma_fence_context_alloc(2);
 81
 82	return 0;
 83}
 84EXPORT_SYMBOL(drm_sched_entity_init);
 85
 86/**
 87 * drm_sched_entity_modify_sched - Modify sched of an entity
 88 * @entity: scheduler entity to init
 89 * @sched_list: the list of new drm scheds which will replace
 90 *		 existing entity->sched_list
 91 * @num_sched_list: number of drm sched in sched_list
 92 */
 93void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
 94				    struct drm_gpu_scheduler **sched_list,
 95				    unsigned int num_sched_list)
 96{
 97	WARN_ON(!num_sched_list || !sched_list);
 98
 99	entity->sched_list = sched_list;
100	entity->num_sched_list = num_sched_list;
101}
102EXPORT_SYMBOL(drm_sched_entity_modify_sched);
103
104/**
105 * drm_sched_entity_is_idle - Check if entity is idle
106 *
107 * @entity: scheduler entity
108 *
109 * Returns true if the entity does not have any unscheduled jobs.
110 */
111static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
112{
113	rmb(); /* for list_empty to work without lock */
114
115	if (list_empty(&entity->list) ||
116	    spsc_queue_count(&entity->job_queue) == 0)
117		return true;
118
119	return false;
120}
121
122/**
123 * drm_sched_entity_is_ready - Check if entity is ready
124 *
125 * @entity: scheduler entity
126 *
127 * Return true if entity could provide a job.
128 */
129bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
130{
131	if (spsc_queue_peek(&entity->job_queue) == NULL)
132		return false;
133
134	if (READ_ONCE(entity->dependency))
135		return false;
136
137	return true;
138}
139
140/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141 * drm_sched_entity_flush - Flush a context entity
142 *
143 * @entity: scheduler entity
144 * @timeout: time to wait in for Q to become empty in jiffies.
145 *
146 * Splitting drm_sched_entity_fini() into two functions, The first one does the
147 * waiting, removes the entity from the runqueue and returns an error when the
148 * process was killed.
149 *
150 * Returns the remaining time in jiffies left from the input timeout
151 */
152long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
153{
154	struct drm_gpu_scheduler *sched;
155	struct task_struct *last_user;
156	long ret = timeout;
157
158	if (!entity->rq)
159		return 0;
160
161	sched = entity->rq->sched;
162	/**
163	 * The client will not queue more IBs during this fini, consume existing
164	 * queued IBs or discard them on SIGKILL
165	 */
166	if (current->flags & PF_EXITING) {
167		if (timeout)
168			ret = wait_event_timeout(
169					sched->job_scheduled,
170					drm_sched_entity_is_idle(entity),
171					timeout);
172	} else {
173		wait_event_killable(sched->job_scheduled,
174				    drm_sched_entity_is_idle(entity));
175	}
176
177	/* For killed process disable any more IBs enqueue right now */
178	last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
179	if ((!last_user || last_user == current->group_leader) &&
180	    (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) {
181		spin_lock(&entity->rq_lock);
182		entity->stopped = true;
183		drm_sched_rq_remove_entity(entity->rq, entity);
184		spin_unlock(&entity->rq_lock);
185	}
186
187	return ret;
188}
189EXPORT_SYMBOL(drm_sched_entity_flush);
190
191/**
192 * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
193 *
194 * @f: signaled fence
195 * @cb: our callback structure
196 *
197 * Signal the scheduler finished fence when the entity in question is killed.
198 */
199static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
200					  struct dma_fence_cb *cb)
201{
202	struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
203						 finish_cb);
204
205	drm_sched_fence_finished(job->s_fence);
206	WARN_ON(job->s_fence->parent);
207	job->sched->ops->free_job(job);
208}
209
210/**
211 * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
212 *
213 * @entity: entity which is cleaned up
214 *
215 * Makes sure that all remaining jobs in an entity are killed before it is
216 * destroyed.
217 */
218static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
219{
220	struct drm_sched_job *job;
221	int r;
222
223	while ((job = to_drm_sched_job(spsc_queue_pop(&entity->job_queue)))) {
224		struct drm_sched_fence *s_fence = job->s_fence;
225
226		drm_sched_fence_scheduled(s_fence);
227		dma_fence_set_error(&s_fence->finished, -ESRCH);
228
229		/*
230		 * When pipe is hanged by older entity, new entity might
231		 * not even have chance to submit it's first job to HW
232		 * and so entity->last_scheduled will remain NULL
233		 */
234		if (!entity->last_scheduled) {
235			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
236			continue;
237		}
238
239		r = dma_fence_add_callback(entity->last_scheduled,
240					   &job->finish_cb,
241					   drm_sched_entity_kill_jobs_cb);
242		if (r == -ENOENT)
243			drm_sched_entity_kill_jobs_cb(NULL, &job->finish_cb);
244		else if (r)
245			DRM_ERROR("fence add callback failed (%d)\n", r);
246	}
247}
248
249/**
250 * drm_sched_entity_cleanup - Destroy a context entity
251 *
252 * @entity: scheduler entity
253 *
254 * This should be called after @drm_sched_entity_do_release. It goes over the
255 * entity and signals all jobs with an error code if the process was killed.
256 *
257 */
258void drm_sched_entity_fini(struct drm_sched_entity *entity)
259{
260	struct drm_gpu_scheduler *sched = NULL;
261
262	if (entity->rq) {
263		sched = entity->rq->sched;
264		drm_sched_rq_remove_entity(entity->rq, entity);
265	}
266
267	/* Consumption of existing IBs wasn't completed. Forcefully
268	 * remove them here.
269	 */
270	if (spsc_queue_count(&entity->job_queue)) {
271		if (sched) {
272			/*
273			 * Wait for thread to idle to make sure it isn't processing
274			 * this entity.
275			 */
276			wait_for_completion(&entity->entity_idle);
277
278		}
279		if (entity->dependency) {
280			dma_fence_remove_callback(entity->dependency,
281						  &entity->cb);
282			dma_fence_put(entity->dependency);
283			entity->dependency = NULL;
284		}
285
286		drm_sched_entity_kill_jobs(entity);
287	}
288
289	dma_fence_put(entity->last_scheduled);
290	entity->last_scheduled = NULL;
 
291}
292EXPORT_SYMBOL(drm_sched_entity_fini);
293
294/**
295 * drm_sched_entity_fini - Destroy a context entity
296 *
297 * @entity: scheduler entity
298 *
299 * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
300 */
301void drm_sched_entity_destroy(struct drm_sched_entity *entity)
302{
303	drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
304	drm_sched_entity_fini(entity);
305}
306EXPORT_SYMBOL(drm_sched_entity_destroy);
307
308/**
309 * drm_sched_entity_clear_dep - callback to clear the entities dependency
310 */
311static void drm_sched_entity_clear_dep(struct dma_fence *f,
312				       struct dma_fence_cb *cb)
313{
314	struct drm_sched_entity *entity =
315		container_of(cb, struct drm_sched_entity, cb);
316
317	entity->dependency = NULL;
318	dma_fence_put(f);
319}
320
321/**
322 * drm_sched_entity_clear_dep - callback to clear the entities dependency and
323 * wake up scheduler
324 */
325static void drm_sched_entity_wakeup(struct dma_fence *f,
326				    struct dma_fence_cb *cb)
327{
328	struct drm_sched_entity *entity =
329		container_of(cb, struct drm_sched_entity, cb);
330
331	drm_sched_entity_clear_dep(f, cb);
332	drm_sched_wakeup(entity->rq->sched);
333}
334
335/**
 
 
 
 
 
 
 
 
 
336 * drm_sched_entity_set_priority - Sets priority of the entity
337 *
338 * @entity: scheduler entity
339 * @priority: scheduler priority
340 *
341 * Update the priority of runqueus used for the entity.
342 */
343void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
344				   enum drm_sched_priority priority)
345{
 
 
346	spin_lock(&entity->rq_lock);
347	entity->priority = priority;
 
 
 
 
 
 
 
 
 
348	spin_unlock(&entity->rq_lock);
349}
350EXPORT_SYMBOL(drm_sched_entity_set_priority);
351
352/**
353 * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
354 *
355 * @entity: entity with dependency
356 *
357 * Add a callback to the current dependency of the entity to wake up the
358 * scheduler when the entity becomes available.
359 */
360static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
361{
362	struct drm_gpu_scheduler *sched = entity->rq->sched;
363	struct dma_fence *fence = entity->dependency;
364	struct drm_sched_fence *s_fence;
365
366	if (fence->context == entity->fence_context ||
367	    fence->context == entity->fence_context + 1) {
368		/*
369		 * Fence is a scheduled/finished fence from a job
370		 * which belongs to the same entity, we can ignore
371		 * fences from ourself
372		 */
373		dma_fence_put(entity->dependency);
374		return false;
375	}
376
377	s_fence = to_drm_sched_fence(fence);
378	if (s_fence && s_fence->sched == sched) {
379
380		/*
381		 * Fence is from the same scheduler, only need to wait for
382		 * it to be scheduled
383		 */
384		fence = dma_fence_get(&s_fence->scheduled);
385		dma_fence_put(entity->dependency);
386		entity->dependency = fence;
387		if (!dma_fence_add_callback(fence, &entity->cb,
388					    drm_sched_entity_clear_dep))
389			return true;
390
391		/* Ignore it when it is already scheduled */
392		dma_fence_put(fence);
393		return false;
394	}
395
396	if (!dma_fence_add_callback(entity->dependency, &entity->cb,
397				    drm_sched_entity_wakeup))
398		return true;
399
400	dma_fence_put(entity->dependency);
401	return false;
402}
403
404/**
405 * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
406 *
407 * @entity: entity to get the job from
408 *
409 * Process all dependencies and try to get one job from the entities queue.
410 */
411struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
412{
413	struct drm_gpu_scheduler *sched = entity->rq->sched;
414	struct drm_sched_job *sched_job;
415
416	sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
417	if (!sched_job)
418		return NULL;
419
420	while ((entity->dependency =
421			sched->ops->dependency(sched_job, entity))) {
422		trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
423
424		if (drm_sched_entity_add_dependency_cb(entity))
425			return NULL;
426	}
427
428	/* skip jobs from entity that marked guilty */
429	if (entity->guilty && atomic_read(entity->guilty))
430		dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
431
432	dma_fence_put(entity->last_scheduled);
433	entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
434
435	spsc_queue_pop(&entity->job_queue);
436	return sched_job;
437}
438
439/**
440 * drm_sched_entity_select_rq - select a new rq for the entity
441 *
442 * @entity: scheduler entity
443 *
444 * Check all prerequisites and select a new rq for the entity for load
445 * balancing.
446 */
447void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
448{
449	struct dma_fence *fence;
450	struct drm_gpu_scheduler *sched;
451	struct drm_sched_rq *rq;
452
453	if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
454		return;
455
456	fence = READ_ONCE(entity->last_scheduled);
457	if (fence && !dma_fence_is_signaled(fence))
458		return;
459
 
 
 
 
460	spin_lock(&entity->rq_lock);
461	sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
462	rq = sched ? &sched->sched_rq[entity->priority] : NULL;
463	if (rq != entity->rq) {
464		drm_sched_rq_remove_entity(entity->rq, entity);
465		entity->rq = rq;
466	}
467
468	spin_unlock(&entity->rq_lock);
469}
470
471/**
472 * drm_sched_entity_push_job - Submit a job to the entity's job queue
473 *
474 * @sched_job: job to submit
475 * @entity: scheduler entity
476 *
477 * Note: To guarantee that the order of insertion to queue matches
478 * the job's fence sequence number this function should be
479 * called with drm_sched_job_init under common lock.
480 *
481 * Returns 0 for success, negative error code otherwise.
482 */
483void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
484			       struct drm_sched_entity *entity)
485{
486	bool first;
487
488	trace_drm_sched_job(sched_job, entity);
489	atomic_inc(&entity->rq->sched->score);
490	WRITE_ONCE(entity->last_user, current->group_leader);
491	first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
492
493	/* first job wakes up scheduler */
494	if (first) {
495		/* Add the entity to the run queue */
496		spin_lock(&entity->rq_lock);
497		if (entity->stopped) {
498			spin_unlock(&entity->rq_lock);
499
500			DRM_ERROR("Trying to push to a killed entity\n");
501			return;
502		}
503		drm_sched_rq_add_entity(entity->rq, entity);
504		spin_unlock(&entity->rq_lock);
505		drm_sched_wakeup(entity->rq->sched);
506	}
507}
508EXPORT_SYMBOL(drm_sched_entity_push_job);