Loading...
Note: File does not exist in v5.4.
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#ifndef _XE_GPU_SCHEDULER_H_
7#define _XE_GPU_SCHEDULER_H_
8
9#include "xe_gpu_scheduler_types.h"
10#include "xe_sched_job_types.h"
11
12int xe_sched_init(struct xe_gpu_scheduler *sched,
13 const struct drm_sched_backend_ops *ops,
14 const struct xe_sched_backend_ops *xe_ops,
15 struct workqueue_struct *submit_wq,
16 uint32_t hw_submission, unsigned hang_limit,
17 long timeout, struct workqueue_struct *timeout_wq,
18 atomic_t *score, const char *name,
19 struct device *dev);
20void xe_sched_fini(struct xe_gpu_scheduler *sched);
21
22void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
23void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
24
25void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
26
27void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
28 struct xe_sched_msg *msg);
29void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
30 struct xe_sched_msg *msg);
31
32static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched)
33{
34 spin_lock(&sched->base.job_list_lock);
35}
36
37static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched)
38{
39 spin_unlock(&sched->base.job_list_lock);
40}
41
42static inline void xe_sched_stop(struct xe_gpu_scheduler *sched)
43{
44 drm_sched_stop(&sched->base, NULL);
45}
46
47static inline void xe_sched_tdr_queue_imm(struct xe_gpu_scheduler *sched)
48{
49 drm_sched_tdr_queue_imm(&sched->base);
50}
51
52static inline void xe_sched_resubmit_jobs(struct xe_gpu_scheduler *sched)
53{
54 drm_sched_resubmit_jobs(&sched->base);
55}
56
57static inline bool
58xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
59{
60 return drm_sched_invalidate_job(&job->drm, threshold);
61}
62
63static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
64 struct xe_sched_job *job)
65{
66 spin_lock(&sched->base.job_list_lock);
67 list_add(&job->drm.list, &sched->base.pending_list);
68 spin_unlock(&sched->base.job_list_lock);
69}
70
71static inline
72struct xe_sched_job *xe_sched_first_pending_job(struct xe_gpu_scheduler *sched)
73{
74 return list_first_entry_or_null(&sched->base.pending_list,
75 struct xe_sched_job, drm.list);
76}
77
78static inline int
79xe_sched_entity_init(struct xe_sched_entity *entity,
80 struct xe_gpu_scheduler *sched)
81{
82 return drm_sched_entity_init(entity, 0,
83 (struct drm_gpu_scheduler **)&sched,
84 1, NULL);
85}
86
87#define xe_sched_entity_fini drm_sched_entity_fini
88
89#endif