Linux Audio

Check our new training course

Loading...
  1// SPDX-License-Identifier: MIT
  2/*
  3 * Copyright © 2023 Intel Corporation
  4 */
  5
  6#include "xe_gpu_scheduler.h"
  7
  8static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched)
  9{
 10	if (!READ_ONCE(sched->base.pause_submit))
 11		queue_work(sched->base.submit_wq, &sched->work_process_msg);
 12}
 13
 14static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched)
 15{
 16	struct xe_sched_msg *msg;
 17
 18	spin_lock(&sched->base.job_list_lock);
 19	msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link);
 20	if (msg)
 21		xe_sched_process_msg_queue(sched);
 22	spin_unlock(&sched->base.job_list_lock);
 23}
 24
 25static struct xe_sched_msg *
 26xe_sched_get_msg(struct xe_gpu_scheduler *sched)
 27{
 28	struct xe_sched_msg *msg;
 29
 30	spin_lock(&sched->base.job_list_lock);
 31	msg = list_first_entry_or_null(&sched->msgs,
 32				       struct xe_sched_msg, link);
 33	if (msg)
 34		list_del(&msg->link);
 35	spin_unlock(&sched->base.job_list_lock);
 36
 37	return msg;
 38}
 39
 40static void xe_sched_process_msg_work(struct work_struct *w)
 41{
 42	struct xe_gpu_scheduler *sched =
 43		container_of(w, struct xe_gpu_scheduler, work_process_msg);
 44	struct xe_sched_msg *msg;
 45
 46	if (READ_ONCE(sched->base.pause_submit))
 47		return;
 48
 49	msg = xe_sched_get_msg(sched);
 50	if (msg) {
 51		sched->ops->process_msg(msg);
 52
 53		xe_sched_process_msg_queue_if_ready(sched);
 54	}
 55}
 56
 57int xe_sched_init(struct xe_gpu_scheduler *sched,
 58		  const struct drm_sched_backend_ops *ops,
 59		  const struct xe_sched_backend_ops *xe_ops,
 60		  struct workqueue_struct *submit_wq,
 61		  uint32_t hw_submission, unsigned hang_limit,
 62		  long timeout, struct workqueue_struct *timeout_wq,
 63		  atomic_t *score, const char *name,
 64		  struct device *dev)
 65{
 66	sched->ops = xe_ops;
 67	INIT_LIST_HEAD(&sched->msgs);
 68	INIT_WORK(&sched->work_process_msg, xe_sched_process_msg_work);
 69
 70	return drm_sched_init(&sched->base, ops, submit_wq, 1, hw_submission,
 71			      hang_limit, timeout, timeout_wq, score, name,
 72			      dev);
 73}
 74
 75void xe_sched_fini(struct xe_gpu_scheduler *sched)
 76{
 77	xe_sched_submission_stop(sched);
 78	drm_sched_fini(&sched->base);
 79}
 80
 81void xe_sched_submission_start(struct xe_gpu_scheduler *sched)
 82{
 83	drm_sched_wqueue_start(&sched->base);
 84	queue_work(sched->base.submit_wq, &sched->work_process_msg);
 85}
 86
 87void xe_sched_submission_stop(struct xe_gpu_scheduler *sched)
 88{
 89	drm_sched_wqueue_stop(&sched->base);
 90	cancel_work_sync(&sched->work_process_msg);
 91}
 92
 93void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
 94		      struct xe_sched_msg *msg)
 95{
 96	spin_lock(&sched->base.job_list_lock);
 97	list_add_tail(&msg->link, &sched->msgs);
 98	spin_unlock(&sched->base.job_list_lock);
 99
100	xe_sched_process_msg_queue(sched);
101}