Loading...
Note: File does not exist in v4.6.
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2019 Intel Corporation
5 */
6
7#ifndef __INTEL_CONTEXT_H__
8#define __INTEL_CONTEXT_H__
9
10#include <linux/bitops.h>
11#include <linux/lockdep.h>
12#include <linux/types.h>
13
14#include "i915_active.h"
15#include "i915_drv.h"
16#include "intel_context_types.h"
17#include "intel_engine_types.h"
18#include "intel_ring_types.h"
19#include "intel_timeline_types.h"
20
21#define CE_TRACE(ce, fmt, ...) do { \
22 const struct intel_context *ce__ = (ce); \
23 ENGINE_TRACE(ce__->engine, "context:%llx " fmt, \
24 ce__->timeline->fence_context, \
25 ##__VA_ARGS__); \
26} while (0)
27
28void intel_context_init(struct intel_context *ce,
29 struct intel_engine_cs *engine);
30void intel_context_fini(struct intel_context *ce);
31
32struct intel_context *
33intel_context_create(struct intel_engine_cs *engine);
34
35int intel_context_alloc_state(struct intel_context *ce);
36
37void intel_context_free(struct intel_context *ce);
38
39int intel_context_reconfigure_sseu(struct intel_context *ce,
40 const struct intel_sseu sseu);
41
42/**
43 * intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
44 * @ce - the context
45 *
46 * Acquire a lock on the pinned status of the HW context, such that the context
47 * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
48 * intel_context_is_pinned() remains stable.
49 */
50static inline int intel_context_lock_pinned(struct intel_context *ce)
51 __acquires(ce->pin_mutex)
52{
53 return mutex_lock_interruptible(&ce->pin_mutex);
54}
55
56/**
57 * intel_context_is_pinned - Reports the 'pinned' status
58 * @ce - the context
59 *
60 * While in use by the GPU, the context, along with its ring and page
61 * tables is pinned into memory and the GTT.
62 *
63 * Returns: true if the context is currently pinned for use by the GPU.
64 */
65static inline bool
66intel_context_is_pinned(struct intel_context *ce)
67{
68 return atomic_read(&ce->pin_count);
69}
70
71/**
72 * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
73 * @ce - the context
74 *
75 * Releases the lock earlier acquired by intel_context_unlock_pinned().
76 */
77static inline void intel_context_unlock_pinned(struct intel_context *ce)
78 __releases(ce->pin_mutex)
79{
80 mutex_unlock(&ce->pin_mutex);
81}
82
83int __intel_context_do_pin(struct intel_context *ce);
84
85static inline bool intel_context_pin_if_active(struct intel_context *ce)
86{
87 return atomic_inc_not_zero(&ce->pin_count);
88}
89
90static inline int intel_context_pin(struct intel_context *ce)
91{
92 if (likely(intel_context_pin_if_active(ce)))
93 return 0;
94
95 return __intel_context_do_pin(ce);
96}
97
98static inline void __intel_context_pin(struct intel_context *ce)
99{
100 GEM_BUG_ON(!intel_context_is_pinned(ce));
101 atomic_inc(&ce->pin_count);
102}
103
104void intel_context_unpin(struct intel_context *ce);
105
106void intel_context_enter_engine(struct intel_context *ce);
107void intel_context_exit_engine(struct intel_context *ce);
108
109static inline void intel_context_enter(struct intel_context *ce)
110{
111 lockdep_assert_held(&ce->timeline->mutex);
112 if (!ce->active_count++)
113 ce->ops->enter(ce);
114}
115
116static inline void intel_context_mark_active(struct intel_context *ce)
117{
118 lockdep_assert_held(&ce->timeline->mutex);
119 ++ce->active_count;
120}
121
122static inline void intel_context_exit(struct intel_context *ce)
123{
124 lockdep_assert_held(&ce->timeline->mutex);
125 GEM_BUG_ON(!ce->active_count);
126 if (!--ce->active_count)
127 ce->ops->exit(ce);
128}
129
130static inline struct intel_context *intel_context_get(struct intel_context *ce)
131{
132 kref_get(&ce->ref);
133 return ce;
134}
135
136static inline void intel_context_put(struct intel_context *ce)
137{
138 kref_put(&ce->ref, ce->ops->destroy);
139}
140
141static inline struct intel_timeline *__must_check
142intel_context_timeline_lock(struct intel_context *ce)
143 __acquires(&ce->timeline->mutex)
144{
145 struct intel_timeline *tl = ce->timeline;
146 int err;
147
148 err = mutex_lock_interruptible(&tl->mutex);
149 if (err)
150 return ERR_PTR(err);
151
152 return tl;
153}
154
155static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
156 __releases(&tl->mutex)
157{
158 mutex_unlock(&tl->mutex);
159}
160
161int intel_context_prepare_remote_request(struct intel_context *ce,
162 struct i915_request *rq);
163
164struct i915_request *intel_context_create_request(struct intel_context *ce);
165
166static inline struct intel_ring *__intel_context_ring_size(u64 sz)
167{
168 return u64_to_ptr(struct intel_ring, sz);
169}
170
171static inline bool intel_context_is_barrier(const struct intel_context *ce)
172{
173 return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
174}
175
176static inline bool intel_context_is_closed(const struct intel_context *ce)
177{
178 return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
179}
180
181static inline bool intel_context_use_semaphores(const struct intel_context *ce)
182{
183 return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
184}
185
186static inline void intel_context_set_use_semaphores(struct intel_context *ce)
187{
188 set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
189}
190
191static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
192{
193 clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
194}
195
196static inline bool intel_context_is_banned(const struct intel_context *ce)
197{
198 return test_bit(CONTEXT_BANNED, &ce->flags);
199}
200
201static inline bool intel_context_set_banned(struct intel_context *ce)
202{
203 return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
204}
205
206static inline bool
207intel_context_force_single_submission(const struct intel_context *ce)
208{
209 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
210}
211
212static inline void
213intel_context_set_single_submission(struct intel_context *ce)
214{
215 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
216}
217
218static inline bool
219intel_context_nopreempt(const struct intel_context *ce)
220{
221 return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
222}
223
224static inline void
225intel_context_set_nopreempt(struct intel_context *ce)
226{
227 set_bit(CONTEXT_NOPREEMPT, &ce->flags);
228}
229
230static inline void
231intel_context_clear_nopreempt(struct intel_context *ce)
232{
233 clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
234}
235
236static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
237{
238 const u32 period =
239 RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
240
241 return READ_ONCE(ce->runtime.total) * period;
242}
243
244static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
245{
246 const u32 period =
247 RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
248
249 return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
250}
251
252#endif /* __INTEL_CONTEXT_H__ */