Loading...
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#ifndef INTEL_ENGINE_PM_H
7#define INTEL_ENGINE_PM_H
8
9#include "i915_drv.h"
10#include "i915_request.h"
11#include "intel_engine_types.h"
12#include "intel_wakeref.h"
13#include "intel_gt_pm.h"
14
15static inline bool
16intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
17{
18 return intel_wakeref_is_active(&engine->wakeref);
19}
20
21static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
22{
23 __intel_wakeref_get(&engine->wakeref);
24}
25
26static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
27{
28 intel_wakeref_get(&engine->wakeref);
29}
30
31static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
32{
33 return intel_wakeref_get_if_active(&engine->wakeref);
34}
35
36static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
37{
38 if (!intel_engine_is_virtual(engine)) {
39 intel_wakeref_might_get(&engine->wakeref);
40 } else {
41 struct intel_gt *gt = engine->gt;
42 struct intel_engine_cs *tengine;
43 intel_engine_mask_t tmp, mask = engine->mask;
44
45 for_each_engine_masked(tengine, gt, mask, tmp)
46 intel_wakeref_might_get(&tengine->wakeref);
47 }
48 intel_gt_pm_might_get(engine->gt);
49}
50
51static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
52{
53 intel_wakeref_put(&engine->wakeref);
54}
55
56static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
57{
58 intel_wakeref_put_async(&engine->wakeref);
59}
60
61static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
62 unsigned long delay)
63{
64 intel_wakeref_put_delay(&engine->wakeref, delay);
65}
66
67static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
68{
69 intel_wakeref_unlock_wait(&engine->wakeref);
70}
71
72static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
73{
74 if (!intel_engine_is_virtual(engine)) {
75 intel_wakeref_might_put(&engine->wakeref);
76 } else {
77 struct intel_gt *gt = engine->gt;
78 struct intel_engine_cs *tengine;
79 intel_engine_mask_t tmp, mask = engine->mask;
80
81 for_each_engine_masked(tengine, gt, mask, tmp)
82 intel_wakeref_might_put(&tengine->wakeref);
83 }
84 intel_gt_pm_might_put(engine->gt);
85}
86
87static inline struct i915_request *
88intel_engine_create_kernel_request(struct intel_engine_cs *engine)
89{
90 struct i915_request *rq;
91
92 /*
93 * The engine->kernel_context is special as it is used inside
94 * the engine-pm barrier (see __engine_park()), circumventing
95 * the usual mutexes and relying on the engine-pm barrier
96 * instead. So whenever we use the engine->kernel_context
97 * outside of the barrier, we must manually handle the
98 * engine wakeref to serialise with the use inside.
99 */
100 intel_engine_pm_get(engine);
101 rq = i915_request_create(engine->kernel_context);
102 intel_engine_pm_put(engine);
103
104 return rq;
105}
106
107void intel_engine_init__pm(struct intel_engine_cs *engine);
108
109void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
110
111#endif /* INTEL_ENGINE_PM_H */
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2019 Intel Corporation
4 */
5
6#ifndef INTEL_ENGINE_PM_H
7#define INTEL_ENGINE_PM_H
8
9#include "i915_drv.h"
10#include "i915_request.h"
11#include "intel_engine_types.h"
12#include "intel_wakeref.h"
13#include "intel_gt.h"
14#include "intel_gt_pm.h"
15
16static inline bool
17intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
18{
19 return intel_wakeref_is_active(&engine->wakeref);
20}
21
22static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
23{
24 __intel_wakeref_get(&engine->wakeref);
25}
26
27static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
28{
29 intel_wakeref_get(&engine->wakeref);
30}
31
32static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
33{
34 return intel_wakeref_get_if_active(&engine->wakeref);
35}
36
37static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
38{
39 if (!intel_engine_is_virtual(engine)) {
40 intel_wakeref_might_get(&engine->wakeref);
41 } else {
42 struct intel_gt *gt = engine->gt;
43 struct intel_engine_cs *tengine;
44 intel_engine_mask_t tmp, mask = engine->mask;
45
46 for_each_engine_masked(tengine, gt, mask, tmp)
47 intel_wakeref_might_get(&tengine->wakeref);
48 }
49 intel_gt_pm_might_get(engine->gt);
50}
51
52static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
53{
54 intel_wakeref_put(&engine->wakeref);
55}
56
57static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
58{
59 intel_wakeref_put_async(&engine->wakeref);
60}
61
62static inline void intel_engine_pm_put_delay(struct intel_engine_cs *engine,
63 unsigned long delay)
64{
65 intel_wakeref_put_delay(&engine->wakeref, delay);
66}
67
68static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
69{
70 intel_wakeref_unlock_wait(&engine->wakeref);
71}
72
73static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
74{
75 if (!intel_engine_is_virtual(engine)) {
76 intel_wakeref_might_put(&engine->wakeref);
77 } else {
78 struct intel_gt *gt = engine->gt;
79 struct intel_engine_cs *tengine;
80 intel_engine_mask_t tmp, mask = engine->mask;
81
82 for_each_engine_masked(tengine, gt, mask, tmp)
83 intel_wakeref_might_put(&tengine->wakeref);
84 }
85 intel_gt_pm_might_put(engine->gt);
86}
87
88static inline struct i915_request *
89intel_engine_create_kernel_request(struct intel_engine_cs *engine)
90{
91 struct i915_request *rq;
92
93 /*
94 * The engine->kernel_context is special as it is used inside
95 * the engine-pm barrier (see __engine_park()), circumventing
96 * the usual mutexes and relying on the engine-pm barrier
97 * instead. So whenever we use the engine->kernel_context
98 * outside of the barrier, we must manually handle the
99 * engine wakeref to serialise with the use inside.
100 */
101 intel_engine_pm_get(engine);
102 rq = i915_request_create(engine->kernel_context);
103 intel_engine_pm_put(engine);
104
105 return rq;
106}
107
108void intel_engine_init__pm(struct intel_engine_cs *engine);
109
110void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
111
112#endif /* INTEL_ENGINE_PM_H */