Loading...
Note: File does not exist in v4.6.
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36#include "i915_drv.h"
37#include "gvt.h"
38#include "trace.h"
39
40/**
41 * Defined in Intel Open Source PRM.
42 * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
43 */
44#define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
45#define TRNULLDETCT _MMIO(0x4de8)
46#define TRINVTILEDETCT _MMIO(0x4dec)
47#define TRVADR _MMIO(0x4df0)
48#define TRTTE _MMIO(0x4df4)
49#define RING_EXCC(base) _MMIO((base) + 0x28)
50#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
51#define VF_GUARDBAND _MMIO(0x83a4)
52
53#define GEN9_MOCS_SIZE 64
54
55/* Raw offset is appened to each line for convenience. */
56static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
57 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
58 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
59 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
60 {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
61 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
62 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
63 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
64 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
65 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
66 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
67 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
68 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
69 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
70 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
71 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
72 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
73 {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
74 {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
75 {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
76 {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
77 {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
78 {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
79
80 {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
81 {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
82 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
83 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
84 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
85 {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
86};
87
88static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
89 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
90 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
91 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
92 {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
93 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
94 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
95 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
96 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
97 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
98 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
99 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
100 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
101 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
102 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
103 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
104 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
105 {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
106 {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
107 {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
108 {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
109 {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
110 {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
111
112 {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
113 {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
114 {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
115 {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
116 {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
117 {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
118 {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
119 {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
120 {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
121 {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
122 {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
123 {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
124 {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
125 {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
126 {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
127 {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
128 {RCS, TRVADR, 0, false}, /* 0x4df0 */
129 {RCS, TRTTE, 0, false}, /* 0x4df4 */
130
131 {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
132 {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
133 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
134 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
135 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
136
137 {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
138
139 {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
140
141 {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
142 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
143 {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
144 {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
145
146 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
147 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
148
149 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
150 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
151 {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
152 {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
153};
154
155static struct {
156 bool initialized;
157 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
158 u32 l3cc_table[GEN9_MOCS_SIZE / 2];
159} gen9_render_mocs;
160
161static void load_render_mocs(struct drm_i915_private *dev_priv)
162{
163 i915_reg_t offset;
164 u32 regs[] = {
165 [RCS] = 0xc800,
166 [VCS] = 0xc900,
167 [VCS2] = 0xca00,
168 [BCS] = 0xcc00,
169 [VECS] = 0xcb00,
170 };
171 int ring_id, i;
172
173 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
174 offset.reg = regs[ring_id];
175 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
176 gen9_render_mocs.control_table[ring_id][i] =
177 I915_READ_FW(offset);
178 offset.reg += 4;
179 }
180 }
181
182 offset.reg = 0xb020;
183 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
184 gen9_render_mocs.l3cc_table[i] =
185 I915_READ_FW(offset);
186 offset.reg += 4;
187 }
188 gen9_render_mocs.initialized = true;
189}
190
191static int
192restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
193 struct i915_request *req)
194{
195 u32 *cs;
196 int ret;
197 struct engine_mmio *mmio;
198 struct intel_gvt *gvt = vgpu->gvt;
199 int ring_id = req->engine->id;
200 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
201
202 if (count == 0)
203 return 0;
204
205 ret = req->engine->emit_flush(req, EMIT_BARRIER);
206 if (ret)
207 return ret;
208
209 cs = intel_ring_begin(req, count * 2 + 2);
210 if (IS_ERR(cs))
211 return PTR_ERR(cs);
212
213 *cs++ = MI_LOAD_REGISTER_IMM(count);
214 for (mmio = gvt->engine_mmio_list.mmio;
215 i915_mmio_reg_valid(mmio->reg); mmio++) {
216 if (mmio->ring_id != ring_id ||
217 !mmio->in_context)
218 continue;
219
220 *cs++ = i915_mmio_reg_offset(mmio->reg);
221 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
222 (mmio->mask << 16);
223 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
224 *(cs-2), *(cs-1), vgpu->id, ring_id);
225 }
226
227 *cs++ = MI_NOOP;
228 intel_ring_advance(req, cs);
229
230 ret = req->engine->emit_flush(req, EMIT_BARRIER);
231 if (ret)
232 return ret;
233
234 return 0;
235}
236
237static int
238restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
239 struct i915_request *req)
240{
241 unsigned int index;
242 u32 *cs;
243
244 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
245 if (IS_ERR(cs))
246 return PTR_ERR(cs);
247
248 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
249
250 for (index = 0; index < GEN9_MOCS_SIZE; index++) {
251 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
252 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
253 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
254 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
255
256 }
257
258 *cs++ = MI_NOOP;
259 intel_ring_advance(req, cs);
260
261 return 0;
262}
263
264static int
265restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
266 struct i915_request *req)
267{
268 unsigned int index;
269 u32 *cs;
270
271 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
272 if (IS_ERR(cs))
273 return PTR_ERR(cs);
274
275 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
276
277 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
278 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
279 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
280 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
281 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
282
283 }
284
285 *cs++ = MI_NOOP;
286 intel_ring_advance(req, cs);
287
288 return 0;
289}
290
291/*
292 * Use lri command to initialize the mmio which is in context state image for
293 * inhibit context, it contains tracked engine mmio, render_mocs and
294 * render_mocs_l3cc.
295 */
296int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
297 struct i915_request *req)
298{
299 int ret;
300 u32 *cs;
301
302 cs = intel_ring_begin(req, 2);
303 if (IS_ERR(cs))
304 return PTR_ERR(cs);
305
306 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
307 *cs++ = MI_NOOP;
308 intel_ring_advance(req, cs);
309
310 ret = restore_context_mmio_for_inhibit(vgpu, req);
311 if (ret)
312 goto out;
313
314 /* no MOCS register in context except render engine */
315 if (req->engine->id != RCS)
316 goto out;
317
318 ret = restore_render_mocs_control_for_inhibit(vgpu, req);
319 if (ret)
320 goto out;
321
322 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
323 if (ret)
324 goto out;
325
326out:
327 cs = intel_ring_begin(req, 2);
328 if (IS_ERR(cs))
329 return PTR_ERR(cs);
330
331 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
332 *cs++ = MI_NOOP;
333 intel_ring_advance(req, cs);
334
335 return ret;
336}
337
338static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
339{
340 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
341 struct intel_vgpu_submission *s = &vgpu->submission;
342 enum forcewake_domains fw;
343 i915_reg_t reg;
344 u32 regs[] = {
345 [RCS] = 0x4260,
346 [VCS] = 0x4264,
347 [VCS2] = 0x4268,
348 [BCS] = 0x426c,
349 [VECS] = 0x4270,
350 };
351
352 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
353 return;
354
355 if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
356 return;
357
358 reg = _MMIO(regs[ring_id]);
359
360 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
361 * we need to put a forcewake when invalidating RCS TLB caches,
362 * otherwise device can go to RC6 state and interrupt invalidation
363 * process
364 */
365 fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
366 FW_REG_READ | FW_REG_WRITE);
367 if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
368 fw |= FORCEWAKE_RENDER;
369
370 intel_uncore_forcewake_get(dev_priv, fw);
371
372 I915_WRITE_FW(reg, 0x1);
373
374 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
375 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
376 else
377 vgpu_vreg_t(vgpu, reg) = 0;
378
379 intel_uncore_forcewake_put(dev_priv, fw);
380
381 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
382}
383
384static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
385 int ring_id)
386{
387 struct drm_i915_private *dev_priv;
388 i915_reg_t offset, l3_offset;
389 u32 old_v, new_v;
390
391 u32 regs[] = {
392 [RCS] = 0xc800,
393 [VCS] = 0xc900,
394 [VCS2] = 0xca00,
395 [BCS] = 0xcc00,
396 [VECS] = 0xcb00,
397 };
398 int i;
399
400 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
401 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
402 return;
403
404 if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
405 return;
406
407 if (!pre && !gen9_render_mocs.initialized)
408 load_render_mocs(dev_priv);
409
410 offset.reg = regs[ring_id];
411 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
412 if (pre)
413 old_v = vgpu_vreg_t(pre, offset);
414 else
415 old_v = gen9_render_mocs.control_table[ring_id][i];
416 if (next)
417 new_v = vgpu_vreg_t(next, offset);
418 else
419 new_v = gen9_render_mocs.control_table[ring_id][i];
420
421 if (old_v != new_v)
422 I915_WRITE_FW(offset, new_v);
423
424 offset.reg += 4;
425 }
426
427 if (ring_id == RCS) {
428 l3_offset.reg = 0xb020;
429 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
430 if (pre)
431 old_v = vgpu_vreg_t(pre, l3_offset);
432 else
433 old_v = gen9_render_mocs.l3cc_table[i];
434 if (next)
435 new_v = vgpu_vreg_t(next, l3_offset);
436 else
437 new_v = gen9_render_mocs.l3cc_table[i];
438
439 if (old_v != new_v)
440 I915_WRITE_FW(l3_offset, new_v);
441
442 l3_offset.reg += 4;
443 }
444 }
445}
446
447#define CTX_CONTEXT_CONTROL_VAL 0x03
448
449bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
450{
451 u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
452 u32 inhibit_mask =
453 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
454
455 return inhibit_mask ==
456 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
457}
458
459/* Switch ring mmio values (context). */
460static void switch_mmio(struct intel_vgpu *pre,
461 struct intel_vgpu *next,
462 int ring_id)
463{
464 struct drm_i915_private *dev_priv;
465 struct intel_vgpu_submission *s;
466 struct engine_mmio *mmio;
467 u32 old_v, new_v;
468
469 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
470 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
471 switch_mocs(pre, next, ring_id);
472
473 for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
474 i915_mmio_reg_valid(mmio->reg); mmio++) {
475 if (mmio->ring_id != ring_id)
476 continue;
477 /*
478 * No need to do save or restore of the mmio which is in context
479 * state image on kabylake, it's initialized by lri command and
480 * save or restore with context together.
481 */
482 if (IS_KABYLAKE(dev_priv) && mmio->in_context)
483 continue;
484
485 // save
486 if (pre) {
487 vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
488 if (mmio->mask)
489 vgpu_vreg_t(pre, mmio->reg) &=
490 ~(mmio->mask << 16);
491 old_v = vgpu_vreg_t(pre, mmio->reg);
492 } else
493 old_v = mmio->value = I915_READ_FW(mmio->reg);
494
495 // restore
496 if (next) {
497 s = &next->submission;
498 /*
499 * No need to restore the mmio which is in context state
500 * image if it's not inhibit context, it will restore
501 * itself.
502 */
503 if (mmio->in_context &&
504 !is_inhibit_context(s->shadow_ctx, ring_id))
505 continue;
506
507 if (mmio->mask)
508 new_v = vgpu_vreg_t(next, mmio->reg) |
509 (mmio->mask << 16);
510 else
511 new_v = vgpu_vreg_t(next, mmio->reg);
512 } else {
513 if (mmio->in_context)
514 continue;
515 if (mmio->mask)
516 new_v = mmio->value | (mmio->mask << 16);
517 else
518 new_v = mmio->value;
519 }
520
521 I915_WRITE_FW(mmio->reg, new_v);
522
523 trace_render_mmio(pre ? pre->id : 0,
524 next ? next->id : 0,
525 "switch",
526 i915_mmio_reg_offset(mmio->reg),
527 old_v, new_v);
528 }
529
530 if (next)
531 handle_tlb_pending_event(next, ring_id);
532}
533
534/**
535 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
536 * @pre: the last vGPU that own the engine
537 * @next: the vGPU to switch to
538 * @ring_id: specify the engine
539 *
540 * If pre is null indicates that host own the engine. If next is null
541 * indicates that we are switching to host workload.
542 */
543void intel_gvt_switch_mmio(struct intel_vgpu *pre,
544 struct intel_vgpu *next, int ring_id)
545{
546 struct drm_i915_private *dev_priv;
547
548 if (WARN_ON(!pre && !next))
549 return;
550
551 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
552 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
553
554 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
555
556 /**
557 * We are using raw mmio access wrapper to improve the
558 * performace for batch mmio read/write, so we need
559 * handle forcewake mannually.
560 */
561 intel_runtime_pm_get(dev_priv);
562 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
563 switch_mmio(pre, next, ring_id);
564 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
565 intel_runtime_pm_put(dev_priv);
566}
567
568/**
569 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
570 * @gvt: GVT device
571 *
572 */
573void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
574{
575 struct engine_mmio *mmio;
576
577 if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
578 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
579 else
580 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
581
582 for (mmio = gvt->engine_mmio_list.mmio;
583 i915_mmio_reg_valid(mmio->reg); mmio++) {
584 if (mmio->in_context)
585 gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
586 }
587}