Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.13.7
 1/* SPDX-License-Identifier: MIT */
 2/*
 3 * Copyright © 2021 Intel Corporation
 4 */
 5
 6#ifndef _XE_SCHED_JOB_H_
 7#define _XE_SCHED_JOB_H_
 8
 9#include "xe_sched_job_types.h"
10
11struct drm_printer;
12struct xe_vm;
13struct xe_sync_entry;
14
15#define XE_SCHED_HANG_LIMIT 1
16#define XE_SCHED_JOB_TIMEOUT LONG_MAX
17
18int xe_sched_job_module_init(void);
19void xe_sched_job_module_exit(void);
20
21struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
22					 u64 *batch_addr);
23void xe_sched_job_destroy(struct kref *ref);
24
25/**
26 * xe_sched_job_get - get reference to XE schedule job
27 * @job: XE schedule job object
28 *
29 * Increment XE schedule job's reference count
30 */
31static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
32{
33	kref_get(&job->refcount);
34	return job;
35}
36
37/**
38 * xe_sched_job_put - put reference to XE schedule job
39 * @job: XE schedule job object
40 *
41 * Decrement XE schedule job's reference count, call xe_sched_job_destroy when
42 * reference count == 0.
43 */
44static inline void xe_sched_job_put(struct xe_sched_job *job)
45{
46	kref_put(&job->refcount, xe_sched_job_destroy);
47}
48
49void xe_sched_job_set_error(struct xe_sched_job *job, int error);
50static inline bool xe_sched_job_is_error(struct xe_sched_job *job)
51{
52	return job->fence->error < 0;
53}
54
55bool xe_sched_job_started(struct xe_sched_job *job);
56bool xe_sched_job_completed(struct xe_sched_job *job);
57
58void xe_sched_job_arm(struct xe_sched_job *job);
59void xe_sched_job_push(struct xe_sched_job *job);
60
61int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
62void xe_sched_job_init_user_fence(struct xe_sched_job *job,
63				  struct xe_sync_entry *sync);
64
65static inline struct xe_sched_job *
66to_xe_sched_job(struct drm_sched_job *drm)
67{
68	return container_of(drm, struct xe_sched_job, drm);
69}
70
71static inline u32 xe_sched_job_seqno(struct xe_sched_job *job)
72{
73	return job->fence ? job->fence->seqno : 0;
74}
75
76static inline u32 xe_sched_job_lrc_seqno(struct xe_sched_job *job)
77{
78	return job->lrc_seqno;
79}
80
81static inline void
82xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
83{
84	job->migrate_flush_flags = flags;
85}
86
87bool xe_sched_job_is_migration(struct xe_exec_queue *q);
88
89struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job *job);
90void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
91void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
92
93int xe_sched_job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
94			  enum dma_resv_usage usage);
95
96#endif
v6.9.4
 1/* SPDX-License-Identifier: MIT */
 2/*
 3 * Copyright © 2021 Intel Corporation
 4 */
 5
 6#ifndef _XE_SCHED_JOB_H_
 7#define _XE_SCHED_JOB_H_
 8
 9#include "xe_sched_job_types.h"
10
11struct drm_printer;
12struct xe_vm;
 
13
14#define XE_SCHED_HANG_LIMIT 1
15#define XE_SCHED_JOB_TIMEOUT LONG_MAX
16
17int xe_sched_job_module_init(void);
18void xe_sched_job_module_exit(void);
19
20struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
21					 u64 *batch_addr);
22void xe_sched_job_destroy(struct kref *ref);
23
24/**
25 * xe_sched_job_get - get reference to XE schedule job
26 * @job: XE schedule job object
27 *
28 * Increment XE schedule job's reference count
29 */
30static inline struct xe_sched_job *xe_sched_job_get(struct xe_sched_job *job)
31{
32	kref_get(&job->refcount);
33	return job;
34}
35
36/**
37 * xe_sched_job_put - put reference to XE schedule job
38 * @job: XE schedule job object
39 *
40 * Decrement XE schedule job's reference count, call xe_sched_job_destroy when
41 * reference count == 0.
42 */
43static inline void xe_sched_job_put(struct xe_sched_job *job)
44{
45	kref_put(&job->refcount, xe_sched_job_destroy);
46}
47
48void xe_sched_job_set_error(struct xe_sched_job *job, int error);
49static inline bool xe_sched_job_is_error(struct xe_sched_job *job)
50{
51	return job->fence->error < 0;
52}
53
54bool xe_sched_job_started(struct xe_sched_job *job);
55bool xe_sched_job_completed(struct xe_sched_job *job);
56
57void xe_sched_job_arm(struct xe_sched_job *job);
58void xe_sched_job_push(struct xe_sched_job *job);
59
60int xe_sched_job_last_fence_add_dep(struct xe_sched_job *job, struct xe_vm *vm);
 
 
61
62static inline struct xe_sched_job *
63to_xe_sched_job(struct drm_sched_job *drm)
64{
65	return container_of(drm, struct xe_sched_job, drm);
66}
67
68static inline u32 xe_sched_job_seqno(struct xe_sched_job *job)
69{
70	return job->fence->seqno;
 
 
 
 
 
71}
72
73static inline void
74xe_sched_job_add_migrate_flush(struct xe_sched_job *job, u32 flags)
75{
76	job->migrate_flush_flags = flags;
77}
78
79bool xe_sched_job_is_migration(struct xe_exec_queue *q);
80
81struct xe_sched_job_snapshot *xe_sched_job_snapshot_capture(struct xe_sched_job *job);
82void xe_sched_job_snapshot_free(struct xe_sched_job_snapshot *snapshot);
83void xe_sched_job_snapshot_print(struct xe_sched_job_snapshot *snapshot, struct drm_printer *p);
 
 
 
84
85#endif