Loading...
Note: File does not exist in v4.6.
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#ifndef __I915_GEM_CONTEXT_H__
26#define __I915_GEM_CONTEXT_H__
27
28#include <linux/bitops.h>
29#include <linux/list.h>
30#include <linux/radix-tree.h>
31
32#include "i915_gem.h"
33
34struct pid;
35
36struct drm_device;
37struct drm_file;
38
39struct drm_i915_private;
40struct drm_i915_file_private;
41struct i915_hw_ppgtt;
42struct i915_request;
43struct i915_vma;
44struct intel_ring;
45
46#define DEFAULT_CONTEXT_HANDLE 0
47
48/**
49 * struct i915_gem_context - client state
50 *
51 * The struct i915_gem_context represents the combined view of the driver and
52 * logical hardware state for a particular client.
53 */
54struct i915_gem_context {
55 /** i915: i915 device backpointer */
56 struct drm_i915_private *i915;
57
58 /** file_priv: owning file descriptor */
59 struct drm_i915_file_private *file_priv;
60
61 /**
62 * @ppgtt: unique address space (GTT)
63 *
64 * In full-ppgtt mode, each context has its own address space ensuring
65 * complete seperation of one client from all others.
66 *
67 * In other modes, this is a NULL pointer with the expectation that
68 * the caller uses the shared global GTT.
69 */
70 struct i915_hw_ppgtt *ppgtt;
71
72 /**
73 * @pid: process id of creator
74 *
75 * Note that who created the context may not be the principle user,
76 * as the context may be shared across a local socket. However,
77 * that should only affect the default context, all contexts created
78 * explicitly by the client are expected to be isolated.
79 */
80 struct pid *pid;
81
82 /**
83 * @name: arbitrary name
84 *
85 * A name is constructed for the context from the creator's process
86 * name, pid and user handle in order to uniquely identify the
87 * context in messages.
88 */
89 const char *name;
90
91 /** link: place with &drm_i915_private.context_list */
92 struct list_head link;
93 struct llist_node free_link;
94
95 /**
96 * @ref: reference count
97 *
98 * A reference to a context is held by both the client who created it
99 * and on each request submitted to the hardware using the request
100 * (to ensure the hardware has access to the state until it has
101 * finished all pending writes). See i915_gem_context_get() and
102 * i915_gem_context_put() for access.
103 */
104 struct kref ref;
105
106 /**
107 * @rcu: rcu_head for deferred freeing.
108 */
109 struct rcu_head rcu;
110
111 /**
112 * @flags: small set of booleans
113 */
114 unsigned long flags;
115#define CONTEXT_NO_ZEROMAP BIT(0)
116#define CONTEXT_NO_ERROR_CAPTURE 1
117#define CONTEXT_CLOSED 2
118#define CONTEXT_BANNABLE 3
119#define CONTEXT_BANNED 4
120#define CONTEXT_FORCE_SINGLE_SUBMISSION 5
121
122 /**
123 * @hw_id: - unique identifier for the context
124 *
125 * The hardware needs to uniquely identify the context for a few
126 * functions like fault reporting, PASID, scheduling. The
127 * &drm_i915_private.context_hw_ida is used to assign a unqiue
128 * id for the lifetime of the context.
129 */
130 unsigned int hw_id;
131
132 /**
133 * @user_handle: userspace identifier
134 *
135 * A unique per-file identifier is generated from
136 * &drm_i915_file_private.contexts.
137 */
138 u32 user_handle;
139
140 /**
141 * @priority: execution and service priority
142 *
143 * All clients are equal, but some are more equal than others!
144 *
145 * Requests from a context with a greater (more positive) value of
146 * @priority will be executed before those with a lower @priority
147 * value, forming a simple QoS.
148 *
149 * The &drm_i915_private.kernel_context is assigned the lowest priority.
150 */
151 int priority;
152
153 /** ggtt_offset_bias: placement restriction for context objects */
154 u32 ggtt_offset_bias;
155
156 /** engine: per-engine logical HW state */
157 struct intel_context {
158 struct i915_vma *state;
159 struct intel_ring *ring;
160 u32 *lrc_reg_state;
161 u64 lrc_desc;
162 int pin_count;
163 } engine[I915_NUM_ENGINES];
164
165 /** ring_size: size for allocating the per-engine ring buffer */
166 u32 ring_size;
167 /** desc_template: invariant fields for the HW context descriptor */
168 u32 desc_template;
169
170 /** guilty_count: How many times this context has caused a GPU hang. */
171 atomic_t guilty_count;
172 /**
173 * @active_count: How many times this context was active during a GPU
174 * hang, but did not cause it.
175 */
176 atomic_t active_count;
177
178#define CONTEXT_SCORE_GUILTY 10
179#define CONTEXT_SCORE_BAN_THRESHOLD 40
180 /** ban_score: Accumulated score of all hangs caused by this context. */
181 atomic_t ban_score;
182
183 /** remap_slice: Bitmask of cache lines that need remapping */
184 u8 remap_slice;
185
186 /** handles_vma: rbtree to look up our context specific obj/vma for
187 * the user handle. (user handles are per fd, but the binding is
188 * per vm, which may be one per context or shared with the global GTT)
189 */
190 struct radix_tree_root handles_vma;
191
192 /** handles_list: reverse list of all the rbtree entries in use for
193 * this context, which allows us to free all the allocations on
194 * context close.
195 */
196 struct list_head handles_list;
197};
198
199static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
200{
201 return test_bit(CONTEXT_CLOSED, &ctx->flags);
202}
203
204static inline void i915_gem_context_set_closed(struct i915_gem_context *ctx)
205{
206 GEM_BUG_ON(i915_gem_context_is_closed(ctx));
207 __set_bit(CONTEXT_CLOSED, &ctx->flags);
208}
209
210static inline bool i915_gem_context_no_error_capture(const struct i915_gem_context *ctx)
211{
212 return test_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
213}
214
215static inline void i915_gem_context_set_no_error_capture(struct i915_gem_context *ctx)
216{
217 __set_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
218}
219
220static inline void i915_gem_context_clear_no_error_capture(struct i915_gem_context *ctx)
221{
222 __clear_bit(CONTEXT_NO_ERROR_CAPTURE, &ctx->flags);
223}
224
225static inline bool i915_gem_context_is_bannable(const struct i915_gem_context *ctx)
226{
227 return test_bit(CONTEXT_BANNABLE, &ctx->flags);
228}
229
230static inline void i915_gem_context_set_bannable(struct i915_gem_context *ctx)
231{
232 __set_bit(CONTEXT_BANNABLE, &ctx->flags);
233}
234
235static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
236{
237 __clear_bit(CONTEXT_BANNABLE, &ctx->flags);
238}
239
240static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
241{
242 return test_bit(CONTEXT_BANNED, &ctx->flags);
243}
244
245static inline void i915_gem_context_set_banned(struct i915_gem_context *ctx)
246{
247 __set_bit(CONTEXT_BANNED, &ctx->flags);
248}
249
250static inline bool i915_gem_context_force_single_submission(const struct i915_gem_context *ctx)
251{
252 return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
253}
254
255static inline void i915_gem_context_set_force_single_submission(struct i915_gem_context *ctx)
256{
257 __set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ctx->flags);
258}
259
260static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
261{
262 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
263}
264
265static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
266{
267 return !ctx->file_priv;
268}
269
270/* i915_gem_context.c */
271int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
272void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
273void i915_gem_contexts_fini(struct drm_i915_private *dev_priv);
274
275int i915_gem_context_open(struct drm_i915_private *i915,
276 struct drm_file *file);
277void i915_gem_context_close(struct drm_file *file);
278
279int i915_switch_context(struct i915_request *rq);
280int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
281
282void i915_gem_context_release(struct kref *ctx_ref);
283struct i915_gem_context *
284i915_gem_context_create_gvt(struct drm_device *dev);
285
286int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
287 struct drm_file *file);
288int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
289 struct drm_file *file);
290int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
291 struct drm_file *file_priv);
292int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
293 struct drm_file *file_priv);
294int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file);
296
297struct i915_gem_context *
298i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
299
300static inline struct i915_gem_context *
301i915_gem_context_get(struct i915_gem_context *ctx)
302{
303 kref_get(&ctx->ref);
304 return ctx;
305}
306
307static inline void i915_gem_context_put(struct i915_gem_context *ctx)
308{
309 kref_put(&ctx->ref, i915_gem_context_release);
310}
311
312#endif /* !__I915_GEM_CONTEXT_H__ */