Linux Audio

Check our new training course

Loading...
v4.10.11
   1/*
   2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21 * SOFTWARE.
  22 *
  23 * Authors:
  24 *    Ke Yu
  25 *    Kevin Tian <kevin.tian@intel.com>
  26 *    Zhiyuan Lv <zhiyuan.lv@intel.com>
  27 *
  28 * Contributors:
  29 *    Min He <min.he@intel.com>
  30 *    Ping Gao <ping.a.gao@intel.com>
  31 *    Tina Zhang <tina.zhang@intel.com>
  32 *    Yulei Zhang <yulei.zhang@intel.com>
  33 *    Zhi Wang <zhi.a.wang@intel.com>
  34 *
  35 */
  36
  37#include <linux/slab.h>
 
  38#include "i915_drv.h"
 
 
 
 
 
  39#include "gvt.h"
  40#include "i915_pvinfo.h"
  41#include "trace.h"
  42
 
 
 
 
  43#define INVALID_OP    (~0U)
  44
  45#define OP_LEN_MI           9
  46#define OP_LEN_2D           10
  47#define OP_LEN_3D_MEDIA     16
  48#define OP_LEN_MFX_VC       16
  49#define OP_LEN_VEBOX	    16
  50
  51#define CMD_TYPE(cmd)	(((cmd) >> 29) & 7)
  52
  53struct sub_op_bits {
  54	int hi;
  55	int low;
  56};
  57struct decode_info {
  58	char *name;
  59	int op_len;
  60	int nr_sub_op;
  61	struct sub_op_bits *sub_op;
  62};
  63
  64#define   MAX_CMD_BUDGET			0x7fffffff
  65#define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
  66#define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
  67#define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
  68
  69#define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
  70#define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
  71#define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
  72
  73/* Render Command Map */
  74
  75/* MI_* command Opcode (28:23) */
  76#define OP_MI_NOOP                          0x0
  77#define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
  78#define OP_MI_USER_INTERRUPT                0x2
  79#define OP_MI_WAIT_FOR_EVENT                0x3
  80#define OP_MI_FLUSH                         0x4
  81#define OP_MI_ARB_CHECK                     0x5
  82#define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
  83#define OP_MI_REPORT_HEAD                   0x7
  84#define OP_MI_ARB_ON_OFF                    0x8
  85#define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
  86#define OP_MI_BATCH_BUFFER_END              0xA
  87#define OP_MI_SUSPEND_FLUSH                 0xB
  88#define OP_MI_PREDICATE                     0xC  /* IVB+ */
  89#define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
  90#define OP_MI_SET_APPID                     0xE  /* IVB+ */
  91#define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
  92#define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
  93#define OP_MI_DISPLAY_FLIP                  0x14
  94#define OP_MI_SEMAPHORE_MBOX                0x16
  95#define OP_MI_SET_CONTEXT                   0x18
  96#define OP_MI_MATH                          0x1A
  97#define OP_MI_URB_CLEAR                     0x19
  98#define OP_MI_SEMAPHORE_SIGNAL		    0x1B  /* BDW+ */
  99#define OP_MI_SEMAPHORE_WAIT		    0x1C  /* BDW+ */
 100
 101#define OP_MI_STORE_DATA_IMM                0x20
 102#define OP_MI_STORE_DATA_INDEX              0x21
 103#define OP_MI_LOAD_REGISTER_IMM             0x22
 104#define OP_MI_UPDATE_GTT                    0x23
 105#define OP_MI_STORE_REGISTER_MEM            0x24
 106#define OP_MI_FLUSH_DW                      0x26
 107#define OP_MI_CLFLUSH                       0x27
 108#define OP_MI_REPORT_PERF_COUNT             0x28
 109#define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
 110#define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
 111#define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
 112#define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
 113#define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
 114#define OP_MI_2E			    0x2E  /* BDW+ */
 115#define OP_MI_2F			    0x2F  /* BDW+ */
 116#define OP_MI_BATCH_BUFFER_START            0x31
 117
 118/* Bit definition for dword 0 */
 119#define _CMDBIT_BB_START_IN_PPGTT	(1UL << 8)
 120
 121#define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
 122
 123#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
 124#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
 125#define BATCH_BUFFER_ADR_SPACE_BIT(x)	(((x) >> 8) & 1U)
 126#define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
 127
 128/* 2D command: Opcode (28:22) */
 129#define OP_2D(x)    ((2<<7) | x)
 130
 131#define OP_XY_SETUP_BLT                             OP_2D(0x1)
 132#define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
 133#define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
 134#define OP_XY_PIXEL_BLT                             OP_2D(0x24)
 135#define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
 136#define OP_XY_TEXT_BLT                              OP_2D(0x26)
 137#define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
 138#define OP_XY_COLOR_BLT                             OP_2D(0x50)
 139#define OP_XY_PAT_BLT                               OP_2D(0x51)
 140#define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
 141#define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
 142#define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
 143#define OP_XY_FULL_BLT                              OP_2D(0x55)
 144#define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
 145#define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
 146#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
 147#define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
 148#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
 149#define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
 150#define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
 151#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
 152#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
 153#define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
 154#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
 155
 156/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
 157#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
 158	((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
 159
 160#define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
 161
 162#define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
 163#define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
 164#define OP_3D_MEDIA_0_1_4			OP_3D_MEDIA(0x0, 0x1, 0x04)
 
 165
 166#define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
 167
 168#define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
 169
 170#define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
 171#define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
 172#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
 173#define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
 174#define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
 
 175
 176#define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
 177#define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
 178#define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
 179#define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
 180
 181#define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
 182#define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
 183#define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
 184#define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
 185#define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
 186#define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
 187#define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
 188#define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
 189#define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
 190#define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
 191#define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
 192#define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
 193#define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
 194#define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
 195#define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
 196#define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
 197#define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
 198#define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
 199#define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
 200#define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
 201#define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
 202#define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
 203#define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
 204#define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
 205#define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
 206#define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
 207#define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
 208#define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
 209#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
 210#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
 211#define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
 212#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
 213#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
 214#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
 215#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
 216#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
 217#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
 218#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
 219#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
 220#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
 221#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
 222#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
 223#define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
 224#define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
 225#define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
 226#define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
 227#define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
 228#define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
 229#define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
 230#define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
 231#define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
 232#define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
 233#define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
 234#define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
 235#define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
 236#define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
 237#define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
 238#define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
 239#define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
 240#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
 241#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
 242#define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
 243#define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
 244#define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
 245#define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
 246#define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
 247
 248#define OP_3DSTATE_VF_INSTANCING 		OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
 249#define OP_3DSTATE_VF_SGVS  			OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
 250#define OP_3DSTATE_VF_TOPOLOGY   		OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
 251#define OP_3DSTATE_WM_CHROMAKEY   		OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
 252#define OP_3DSTATE_PS_BLEND   			OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
 253#define OP_3DSTATE_WM_DEPTH_STENCIL   		OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
 254#define OP_3DSTATE_PS_EXTRA   			OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
 255#define OP_3DSTATE_RASTER   			OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
 256#define OP_3DSTATE_SBE_SWIZ   			OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
 257#define OP_3DSTATE_WM_HZ_OP   			OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
 258#define OP_3DSTATE_COMPONENT_PACKING		OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
 259
 260#define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
 261#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
 262#define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
 263#define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
 264#define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
 265#define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
 266#define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
 267#define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
 268#define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
 269#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
 270#define OP_3DSTATE_MULTISAMPLE_BDW		OP_3D_MEDIA(0x3, 0x0, 0x0D)
 271#define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
 272#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
 273#define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
 274#define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
 275#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
 276#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
 277#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
 278#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
 279#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
 280#define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
 281#define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
 282#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
 283#define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
 284#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
 285#define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
 286#define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
 287#define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
 288
 289/* VCCP Command Parser */
 290
 291/*
 292 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
 293 * git://anongit.freedesktop.org/vaapi/intel-driver
 294 * src/i965_defines.h
 295 *
 296 */
 297
 298#define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
 299	(3 << 13 | \
 300	 (pipeline) << 11 | \
 301	 (op) << 8 | \
 302	 (sub_opa) << 5 | \
 303	 (sub_opb))
 304
 305#define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
 306#define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
 307#define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
 308#define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
 309#define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
 310#define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
 311#define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
 312#define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
 313#define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
 314#define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
 315#define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
 316
 317#define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
 318
 319#define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
 320#define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
 321#define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
 322#define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
 323#define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
 324#define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
 325#define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
 326#define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
 327#define OP_MFD_AVC_DPB_STATE			   OP_MFX(2, 1, 1, 6) /* IVB+ */
 328#define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
 329#define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
 330#define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
 331
 332#define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
 333#define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
 334#define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
 335#define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
 336#define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
 337
 338#define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
 339#define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
 340#define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
 341#define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
 342#define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
 343
 344#define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
 345#define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
 346#define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
 347
 348#define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
 349#define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
 350#define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
 351
 352#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
 353	(3 << 13 | \
 354	 (pipeline) << 11 | \
 355	 (op) << 8 | \
 356	 (sub_opa) << 5 | \
 357	 (sub_opb))
 358
 359#define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
 360#define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
 361#define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
 362
 363struct parser_exec_state;
 364
 365typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
 366
 367#define GVT_CMD_HASH_BITS   7
 368
 369/* which DWords need address fix */
 370#define ADDR_FIX_1(x1)			(1 << (x1))
 371#define ADDR_FIX_2(x1, x2)		(ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
 372#define ADDR_FIX_3(x1, x2, x3)		(ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
 373#define ADDR_FIX_4(x1, x2, x3, x4)	(ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
 374#define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
 375
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376struct cmd_info {
 377	char *name;
 378	u32 opcode;
 379
 380#define F_LEN_MASK	(1U<<0)
 381#define F_LEN_CONST  1U
 382#define F_LEN_VAR    0U
 
 
 383
 384/*
 385 * command has its own ip advance logic
 386 * e.g. MI_BATCH_START, MI_BATCH_END
 387 */
 388#define F_IP_ADVANCE_CUSTOM (1<<1)
 389
 390#define F_POST_HANDLE	(1<<2)
 391	u32 flag;
 392
 393#define R_RCS	(1 << RCS)
 394#define R_VCS1  (1 << VCS)
 395#define R_VCS2  (1 << VCS2)
 396#define R_VCS	(R_VCS1 | R_VCS2)
 397#define R_BCS	(1 << BCS)
 398#define R_VECS	(1 << VECS)
 399#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
 400	/* rings that support this cmd: BLT/RCS/VCS/VECS */
 401	uint16_t rings;
 402
 403	/* devices that support this cmd: SNB/IVB/HSW/... */
 404	uint16_t devices;
 405
 406	/* which DWords are address that need fix up.
 407	 * bit 0 means a 32-bit non address operand in command
 408	 * bit 1 means address operand, which could be 32-bit
 409	 * or 64-bit depending on different architectures.(
 410	 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
 411	 * No matter the address length, each address only takes
 412	 * one bit in the bitmap.
 413	 */
 414	uint16_t addr_bitmap;
 415
 416	/* flag == F_LEN_CONST : command length
 417	 * flag == F_LEN_VAR : length bias bits
 418	 * Note: length is in DWord
 419	 */
 420	uint8_t	len;
 421
 422	parser_cmd_handler handler;
 
 
 
 423};
 424
 425struct cmd_entry {
 426	struct hlist_node hlist;
 427	struct cmd_info *info;
 428};
 429
 430enum {
 431	RING_BUFFER_INSTRUCTION,
 432	BATCH_BUFFER_INSTRUCTION,
 433	BATCH_BUFFER_2ND_LEVEL,
 
 434};
 435
 436enum {
 437	GTT_BUFFER,
 438	PPGTT_BUFFER
 439};
 440
 441struct parser_exec_state {
 442	struct intel_vgpu *vgpu;
 443	int ring_id;
 444
 445	int buf_type;
 446
 447	/* batch buffer address type */
 448	int buf_addr_type;
 449
 450	/* graphics memory address of ring buffer start */
 451	unsigned long ring_start;
 452	unsigned long ring_size;
 453	unsigned long ring_head;
 454	unsigned long ring_tail;
 455
 456	/* instruction graphics memory address */
 457	unsigned long ip_gma;
 458
 459	/* mapped va of the instr_gma */
 460	void *ip_va;
 461	void *rb_va;
 462
 463	void *ret_bb_va;
 464	/* next instruction when return from  batch buffer to ring buffer */
 465	unsigned long ret_ip_gma_ring;
 466
 467	/* next instruction when return from 2nd batch buffer to batch buffer */
 468	unsigned long ret_ip_gma_bb;
 469
 470	/* batch buffer address type (GTT or PPGTT)
 471	 * used when ret from 2nd level batch buffer
 472	 */
 473	int saved_buf_addr_type;
 
 
 474
 475	struct cmd_info *info;
 476
 477	struct intel_vgpu_workload *workload;
 478};
 479
 480#define gmadr_dw_number(s)	\
 481	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 482
 483static unsigned long bypass_scan_mask = 0;
 484
 485/* ring ALL, type = 0 */
 486static struct sub_op_bits sub_op_mi[] = {
 487	{31, 29},
 488	{28, 23},
 489};
 490
 491static struct decode_info decode_info_mi = {
 492	"MI",
 493	OP_LEN_MI,
 494	ARRAY_SIZE(sub_op_mi),
 495	sub_op_mi,
 496};
 497
 498/* ring RCS, command type 2 */
 499static struct sub_op_bits sub_op_2d[] = {
 500	{31, 29},
 501	{28, 22},
 502};
 503
 504static struct decode_info decode_info_2d = {
 505	"2D",
 506	OP_LEN_2D,
 507	ARRAY_SIZE(sub_op_2d),
 508	sub_op_2d,
 509};
 510
 511/* ring RCS, command type 3 */
 512static struct sub_op_bits sub_op_3d_media[] = {
 513	{31, 29},
 514	{28, 27},
 515	{26, 24},
 516	{23, 16},
 517};
 518
 519static struct decode_info decode_info_3d_media = {
 520	"3D_Media",
 521	OP_LEN_3D_MEDIA,
 522	ARRAY_SIZE(sub_op_3d_media),
 523	sub_op_3d_media,
 524};
 525
 526/* ring VCS, command type 3 */
 527static struct sub_op_bits sub_op_mfx_vc[] = {
 528	{31, 29},
 529	{28, 27},
 530	{26, 24},
 531	{23, 21},
 532	{20, 16},
 533};
 534
 535static struct decode_info decode_info_mfx_vc = {
 536	"MFX_VC",
 537	OP_LEN_MFX_VC,
 538	ARRAY_SIZE(sub_op_mfx_vc),
 539	sub_op_mfx_vc,
 540};
 541
 542/* ring VECS, command type 3 */
 543static struct sub_op_bits sub_op_vebox[] = {
 544	{31, 29},
 545	{28, 27},
 546	{26, 24},
 547	{23, 21},
 548	{20, 16},
 549};
 550
 551static struct decode_info decode_info_vebox = {
 552	"VEBOX",
 553	OP_LEN_VEBOX,
 554	ARRAY_SIZE(sub_op_vebox),
 555	sub_op_vebox,
 556};
 557
 558static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 559	[RCS] = {
 560		&decode_info_mi,
 561		NULL,
 562		NULL,
 563		&decode_info_3d_media,
 564		NULL,
 565		NULL,
 566		NULL,
 567		NULL,
 568	},
 569
 570	[VCS] = {
 571		&decode_info_mi,
 572		NULL,
 573		NULL,
 574		&decode_info_mfx_vc,
 575		NULL,
 576		NULL,
 577		NULL,
 578		NULL,
 579	},
 580
 581	[BCS] = {
 582		&decode_info_mi,
 583		NULL,
 584		&decode_info_2d,
 585		NULL,
 586		NULL,
 587		NULL,
 588		NULL,
 589		NULL,
 590	},
 591
 592	[VECS] = {
 593		&decode_info_mi,
 594		NULL,
 595		NULL,
 596		&decode_info_vebox,
 597		NULL,
 598		NULL,
 599		NULL,
 600		NULL,
 601	},
 602
 603	[VCS2] = {
 604		&decode_info_mi,
 605		NULL,
 606		NULL,
 607		&decode_info_mfx_vc,
 608		NULL,
 609		NULL,
 610		NULL,
 611		NULL,
 612	},
 613};
 614
 615static inline u32 get_opcode(u32 cmd, int ring_id)
 616{
 617	struct decode_info *d_info;
 618
 619	if (ring_id >= I915_NUM_ENGINES)
 620		return INVALID_OP;
 621
 622	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
 623	if (d_info == NULL)
 624		return INVALID_OP;
 625
 626	return cmd >> (32 - d_info->op_len);
 627}
 628
 629static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
 630		unsigned int opcode, int ring_id)
 
 631{
 632	struct cmd_entry *e;
 633
 634	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
 635		if ((opcode == e->info->opcode) &&
 636				(e->info->rings & (1 << ring_id)))
 637			return e->info;
 638	}
 639	return NULL;
 640}
 641
 642static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
 643		u32 cmd, int ring_id)
 
 644{
 645	u32 opcode;
 646
 647	opcode = get_opcode(cmd, ring_id);
 648	if (opcode == INVALID_OP)
 649		return NULL;
 650
 651	return find_cmd_entry(gvt, opcode, ring_id);
 652}
 653
 654static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
 655{
 656	return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
 657}
 658
 659static inline void print_opcode(u32 cmd, int ring_id)
 660{
 661	struct decode_info *d_info;
 662	int i;
 663
 664	if (ring_id >= I915_NUM_ENGINES)
 665		return;
 666
 667	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
 668	if (d_info == NULL)
 669		return;
 670
 671	gvt_err("opcode=0x%x %s sub_ops:",
 672			cmd >> (32 - d_info->op_len), d_info->name);
 673
 674	for (i = 0; i < d_info->nr_sub_op; i++)
 675		pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
 676					d_info->sub_op[i].low));
 677
 678	pr_err("\n");
 679}
 680
 681static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
 682{
 683	return s->ip_va + (index << 2);
 684}
 685
 686static inline u32 cmd_val(struct parser_exec_state *s, int index)
 687{
 688	return *cmd_ptr(s, index);
 689}
 690
 
 
 
 
 
 691static void parser_exec_state_dump(struct parser_exec_state *s)
 692{
 693	int cnt = 0;
 694	int i;
 695
 696	gvt_err("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
 697			" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
 698			s->ring_id, s->ring_start, s->ring_start + s->ring_size,
 699			s->ring_head, s->ring_tail);
 
 700
 701	gvt_err("  %s %s ip_gma(%08lx) ",
 702			s->buf_type == RING_BUFFER_INSTRUCTION ?
 703			"RING_BUFFER" : "BATCH_BUFFER",
 
 704			s->buf_addr_type == GTT_BUFFER ?
 705			"GTT" : "PPGTT", s->ip_gma);
 706
 707	if (s->ip_va == NULL) {
 708		gvt_err(" ip_va(NULL)");
 709		return;
 710	}
 711
 712	gvt_err("  ip_va=%p: %08x %08x %08x %08x\n",
 713			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
 714			cmd_val(s, 2), cmd_val(s, 3));
 715
 716	print_opcode(cmd_val(s, 0), s->ring_id);
 717
 718	/* print the whole page to trace */
 719	pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
 720			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
 721			cmd_val(s, 2), cmd_val(s, 3));
 722
 723	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
 724
 725	while (cnt < 1024) {
 726		pr_err("ip_va=%p: ", s->ip_va);
 727		for (i = 0; i < 8; i++)
 728			pr_err("%08x ", cmd_val(s, i));
 729		pr_err("\n");
 730
 731		s->ip_va += 8 * sizeof(u32);
 732		cnt += 8;
 733	}
 734}
 735
 736static inline void update_ip_va(struct parser_exec_state *s)
 737{
 738	unsigned long len = 0;
 739
 740	if (WARN_ON(s->ring_head == s->ring_tail))
 741		return;
 742
 743	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 
 744		unsigned long ring_top = s->ring_start + s->ring_size;
 745
 746		if (s->ring_head > s->ring_tail) {
 747			if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
 748				len = (s->ip_gma - s->ring_head);
 749			else if (s->ip_gma >= s->ring_start &&
 750					s->ip_gma <= s->ring_tail)
 751				len = (ring_top - s->ring_head) +
 752					(s->ip_gma - s->ring_start);
 753		} else
 754			len = (s->ip_gma - s->ring_head);
 755
 756		s->ip_va = s->rb_va + len;
 757	} else {/* shadow batch buffer */
 758		s->ip_va = s->ret_bb_va;
 759	}
 760}
 761
 762static inline int ip_gma_set(struct parser_exec_state *s,
 763		unsigned long ip_gma)
 764{
 765	WARN_ON(!IS_ALIGNED(ip_gma, 4));
 766
 767	s->ip_gma = ip_gma;
 768	update_ip_va(s);
 769	return 0;
 770}
 771
 772static inline int ip_gma_advance(struct parser_exec_state *s,
 773		unsigned int dw_len)
 774{
 775	s->ip_gma += (dw_len << 2);
 776
 777	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 778		if (s->ip_gma >= s->ring_start + s->ring_size)
 779			s->ip_gma -= s->ring_size;
 780		update_ip_va(s);
 781	} else {
 782		s->ip_va += (dw_len << 2);
 783	}
 784
 785	return 0;
 786}
 787
 788static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
 789{
 790	if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
 791		return info->len;
 792	else
 793		return (cmd & ((1U << info->len) - 1)) + 2;
 794	return 0;
 795}
 796
 797static inline int cmd_length(struct parser_exec_state *s)
 798{
 799	return get_cmd_length(s->info, cmd_val(s, 0));
 800}
 801
 802/* do not remove this, some platform may need clflush here */
 803#define patch_value(s, addr, val) do { \
 804	*addr = val; \
 805} while (0)
 806
 807static bool is_shadowed_mmio(unsigned int offset)
 808{
 809	bool ret = false;
 
 
 
 
 
 
 
 
 
 810
 811	if ((offset == 0x2168) || /*BB current head register UDW */
 812	    (offset == 0x2140) || /*BB current header register */
 813	    (offset == 0x211c) || /*second BB header register UDW */
 814	    (offset == 0x2114)) { /*second BB header register UDW */
 815		ret = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816	}
 817	return ret;
 818}
 819
 820static int cmd_reg_handler(struct parser_exec_state *s,
 821	unsigned int offset, unsigned int index, char *cmd)
 822{
 823	struct intel_vgpu *vgpu = s->vgpu;
 824	struct intel_gvt *gvt = vgpu->gvt;
 
 
 825
 826	if (offset + 4 > gvt->device_info.mmio_size) {
 827		gvt_err("%s access to (%x) outside of MMIO range\n",
 828				cmd, offset);
 829		return -EINVAL;
 830	}
 831
 832	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
 833		gvt_err("vgpu%d: %s access to non-render register (%x)\n",
 834				s->vgpu->id, cmd, offset);
 
 
 
 
 835		return 0;
 836	}
 837
 838	if (is_shadowed_mmio(offset)) {
 839		gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
 840				s->vgpu->id, offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 841		return 0;
 842	}
 843
 
 
 
 
 
 
 
 
 
 
 844	if (offset == i915_mmio_reg_offset(DERRMR) ||
 845		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
 846		/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
 847		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
 848	}
 849
 850	/* TODO: Update the global mask if this MMIO is a masked-MMIO */
 851	intel_gvt_mmio_set_cmd_accessed(gvt, offset);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 852	return 0;
 853}
 854
 855#define cmd_reg(s, i) \
 856	(cmd_val(s, i) & GENMASK(22, 2))
 857
 858#define cmd_reg_inhibit(s, i) \
 859	(cmd_val(s, i) & GENMASK(22, 18))
 860
 861#define cmd_gma(s, i) \
 862	(cmd_val(s, i) & GENMASK(31, 2))
 863
 864#define cmd_gma_hi(s, i) \
 865	(cmd_val(s, i) & GENMASK(15, 0))
 866
 867static int cmd_handler_lri(struct parser_exec_state *s)
 868{
 869	int i, ret = 0;
 870	int cmd_len = cmd_length(s);
 871	struct intel_gvt *gvt = s->vgpu->gvt;
 872
 873	for (i = 1; i < cmd_len; i += 2) {
 874		if (IS_BROADWELL(gvt->dev_priv) &&
 875				(s->ring_id != RCS)) {
 876			if (s->ring_id == BCS &&
 877					cmd_reg(s, i) ==
 878					i915_mmio_reg_offset(DERRMR))
 879				ret |= 0;
 880			else
 881				ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
 882		}
 883		if (ret)
 884			break;
 885		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
 
 
 886	}
 887	return ret;
 888}
 889
 890static int cmd_handler_lrr(struct parser_exec_state *s)
 891{
 892	int i, ret = 0;
 893	int cmd_len = cmd_length(s);
 894
 895	for (i = 1; i < cmd_len; i += 2) {
 896		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
 897			ret |= ((cmd_reg_inhibit(s, i) ||
 898					(cmd_reg_inhibit(s, i + 1)))) ?
 899				-EINVAL : 0;
 900		if (ret)
 901			break;
 902		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
 
 
 903		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
 
 
 904	}
 905	return ret;
 906}
 907
 908static inline int cmd_address_audit(struct parser_exec_state *s,
 909		unsigned long guest_gma, int op_size, bool index_mode);
 910
 911static int cmd_handler_lrm(struct parser_exec_state *s)
 912{
 913	struct intel_gvt *gvt = s->vgpu->gvt;
 914	int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
 915	unsigned long gma;
 916	int i, ret = 0;
 917	int cmd_len = cmd_length(s);
 918
 919	for (i = 1; i < cmd_len;) {
 920		if (IS_BROADWELL(gvt->dev_priv))
 921			ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
 922		if (ret)
 923			break;
 924		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
 
 
 925		if (cmd_val(s, 0) & (1 << 22)) {
 926			gma = cmd_gma(s, i + 1);
 927			if (gmadr_bytes == 8)
 928				gma |= (cmd_gma_hi(s, i + 2)) << 32;
 929			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
 
 
 930		}
 931		i += gmadr_dw_number(s) + 1;
 932	}
 933	return ret;
 934}
 935
 936static int cmd_handler_srm(struct parser_exec_state *s)
 937{
 938	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
 939	unsigned long gma;
 940	int i, ret = 0;
 941	int cmd_len = cmd_length(s);
 942
 943	for (i = 1; i < cmd_len;) {
 944		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
 
 
 945		if (cmd_val(s, 0) & (1 << 22)) {
 946			gma = cmd_gma(s, i + 1);
 947			if (gmadr_bytes == 8)
 948				gma |= (cmd_gma_hi(s, i + 2)) << 32;
 949			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
 
 
 950		}
 951		i += gmadr_dw_number(s) + 1;
 952	}
 953	return ret;
 954}
 955
 956struct cmd_interrupt_event {
 957	int pipe_control_notify;
 958	int mi_flush_dw;
 959	int mi_user_interrupt;
 960};
 961
 962static struct cmd_interrupt_event cmd_interrupt_events[] = {
 963	[RCS] = {
 964		.pipe_control_notify = RCS_PIPE_CONTROL,
 965		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
 966		.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
 967	},
 968	[BCS] = {
 969		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 970		.mi_flush_dw = BCS_MI_FLUSH_DW,
 971		.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
 972	},
 973	[VCS] = {
 974		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 975		.mi_flush_dw = VCS_MI_FLUSH_DW,
 976		.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
 977	},
 978	[VCS2] = {
 979		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 980		.mi_flush_dw = VCS2_MI_FLUSH_DW,
 981		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
 982	},
 983	[VECS] = {
 984		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
 985		.mi_flush_dw = VECS_MI_FLUSH_DW,
 986		.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
 987	},
 988};
 989
 990static int cmd_handler_pipe_control(struct parser_exec_state *s)
 991{
 992	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
 993	unsigned long gma;
 994	bool index_mode = false;
 995	unsigned int post_sync;
 996	int ret = 0;
 
 997
 998	post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
 999
1000	/* LRI post sync */
1001	if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1002		ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1003	/* post sync */
1004	else if (post_sync) {
1005		if (post_sync == 2)
1006			ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1007		else if (post_sync == 3)
1008			ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1009		else if (post_sync == 1) {
1010			/* check ggtt*/
1011			if ((cmd_val(s, 2) & (1 << 2))) {
1012				gma = cmd_val(s, 2) & GENMASK(31, 3);
1013				if (gmadr_bytes == 8)
1014					gma |= (cmd_gma_hi(s, 3)) << 32;
1015				/* Store Data Index */
1016				if (cmd_val(s, 1) & (1 << 21))
1017					index_mode = true;
1018				ret |= cmd_address_audit(s, gma, sizeof(u64),
1019						index_mode);
 
 
 
 
 
 
 
 
 
1020			}
1021		}
1022	}
1023
1024	if (ret)
1025		return ret;
1026
1027	if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1028		set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
1029				s->workload->pending_events);
1030	return 0;
1031}
1032
1033static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1034{
1035	set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1036			s->workload->pending_events);
 
1037	return 0;
1038}
1039
1040static int cmd_advance_default(struct parser_exec_state *s)
1041{
1042	return ip_gma_advance(s, cmd_length(s));
1043}
1044
1045static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1046{
1047	int ret;
1048
1049	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1050		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1051		ret = ip_gma_set(s, s->ret_ip_gma_bb);
1052		s->buf_addr_type = s->saved_buf_addr_type;
 
 
1053	} else {
1054		s->buf_type = RING_BUFFER_INSTRUCTION;
1055		s->buf_addr_type = GTT_BUFFER;
1056		if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1057			s->ret_ip_gma_ring -= s->ring_size;
1058		ret = ip_gma_set(s, s->ret_ip_gma_ring);
1059	}
1060	return ret;
1061}
1062
1063struct mi_display_flip_command_info {
1064	int pipe;
1065	int plane;
1066	int event;
1067	i915_reg_t stride_reg;
1068	i915_reg_t ctrl_reg;
1069	i915_reg_t surf_reg;
1070	u64 stride_val;
1071	u64 tile_val;
1072	u64 surf_val;
1073	bool async_flip;
1074};
1075
1076struct plane_code_mapping {
1077	int pipe;
1078	int plane;
1079	int event;
1080};
1081
1082static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1083		struct mi_display_flip_command_info *info)
1084{
1085	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1086	struct plane_code_mapping gen8_plane_code[] = {
1087		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1088		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1089		[2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1090		[3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1091		[4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1092		[5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1093	};
1094	u32 dword0, dword1, dword2;
1095	u32 v;
1096
1097	dword0 = cmd_val(s, 0);
1098	dword1 = cmd_val(s, 1);
1099	dword2 = cmd_val(s, 2);
1100
1101	v = (dword0 & GENMASK(21, 19)) >> 19;
1102	if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1103		return -EINVAL;
1104
1105	info->pipe = gen8_plane_code[v].pipe;
1106	info->plane = gen8_plane_code[v].plane;
1107	info->event = gen8_plane_code[v].event;
1108	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1109	info->tile_val = (dword1 & 0x1);
1110	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1111	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1112
1113	if (info->plane == PLANE_A) {
1114		info->ctrl_reg = DSPCNTR(info->pipe);
1115		info->stride_reg = DSPSTRIDE(info->pipe);
1116		info->surf_reg = DSPSURF(info->pipe);
1117	} else if (info->plane == PLANE_B) {
1118		info->ctrl_reg = SPRCTL(info->pipe);
1119		info->stride_reg = SPRSTRIDE(info->pipe);
1120		info->surf_reg = SPRSURF(info->pipe);
1121	} else {
1122		WARN_ON(1);
1123		return -EINVAL;
1124	}
1125	return 0;
1126}
1127
1128static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1129		struct mi_display_flip_command_info *info)
1130{
1131	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 
1132	u32 dword0 = cmd_val(s, 0);
1133	u32 dword1 = cmd_val(s, 1);
1134	u32 dword2 = cmd_val(s, 2);
1135	u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1136
 
 
1137	switch (plane) {
1138	case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1139		info->pipe = PIPE_A;
1140		info->event = PRIMARY_A_FLIP_DONE;
1141		break;
1142	case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1143		info->pipe = PIPE_B;
1144		info->event = PRIMARY_B_FLIP_DONE;
1145		break;
1146	case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1147		info->pipe = PIPE_C;
1148		info->event = PRIMARY_C_FLIP_DONE;
1149		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1150	default:
1151		gvt_err("unknown plane code %d\n", plane);
1152		return -EINVAL;
1153	}
1154
1155	info->pipe = PRIMARY_PLANE;
1156	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1157	info->tile_val = (dword1 & GENMASK(2, 0));
1158	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1159	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1160
1161	info->ctrl_reg = DSPCNTR(info->pipe);
1162	info->stride_reg = DSPSTRIDE(info->pipe);
1163	info->surf_reg = DSPSURF(info->pipe);
1164
1165	return 0;
1166}
1167
1168static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1169		struct mi_display_flip_command_info *info)
1170{
1171	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1172	u32 stride, tile;
1173
1174	if (!info->async_flip)
1175		return 0;
1176
1177	if (IS_SKYLAKE(dev_priv)) {
1178		stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1179		tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
1180				GENMASK(12, 10)) >> 10;
1181	} else {
1182		stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
1183				GENMASK(15, 6)) >> 6;
1184		tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1185	}
1186
1187	if (stride != info->stride_val)
1188		gvt_dbg_cmd("cannot change stride during async flip\n");
1189
1190	if (tile != info->tile_val)
1191		gvt_dbg_cmd("cannot change tile during async flip\n");
1192
1193	return 0;
1194}
1195
1196static int gen8_update_plane_mmio_from_mi_display_flip(
1197		struct parser_exec_state *s,
1198		struct mi_display_flip_command_info *info)
1199{
1200	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1201	struct intel_vgpu *vgpu = s->vgpu;
1202
1203	set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
1204		      info->surf_val << 12);
1205	if (IS_SKYLAKE(dev_priv)) {
1206		set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
1207			      info->stride_val);
1208		set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
1209			      info->tile_val << 10);
1210	} else {
1211		set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
1212			      info->stride_val << 6);
1213		set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
1214			      info->tile_val << 10);
1215	}
1216
1217	vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
1218	intel_vgpu_trigger_virtual_event(vgpu, info->event);
 
 
 
 
 
 
1219	return 0;
1220}
1221
1222static int decode_mi_display_flip(struct parser_exec_state *s,
1223		struct mi_display_flip_command_info *info)
1224{
1225	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1226
1227	if (IS_BROADWELL(dev_priv))
1228		return gen8_decode_mi_display_flip(s, info);
1229	if (IS_SKYLAKE(dev_priv))
1230		return skl_decode_mi_display_flip(s, info);
1231
1232	return -ENODEV;
1233}
1234
1235static int check_mi_display_flip(struct parser_exec_state *s,
1236		struct mi_display_flip_command_info *info)
1237{
1238	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1239
1240	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
1241		return gen8_check_mi_display_flip(s, info);
1242	return -ENODEV;
1243}
1244
1245static int update_plane_mmio_from_mi_display_flip(
1246		struct parser_exec_state *s,
1247		struct mi_display_flip_command_info *info)
1248{
1249	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1250
1251	if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
1252		return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1253	return -ENODEV;
1254}
1255
1256static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1257{
1258	struct mi_display_flip_command_info info;
 
1259	int ret;
1260	int i;
1261	int len = cmd_length(s);
 
 
 
 
 
 
 
 
 
1262
1263	ret = decode_mi_display_flip(s, &info);
1264	if (ret) {
1265		gvt_err("fail to decode MI display flip command\n");
1266		return ret;
1267	}
1268
1269	ret = check_mi_display_flip(s, &info);
1270	if (ret) {
1271		gvt_err("invalid MI display flip command\n");
1272		return ret;
1273	}
1274
1275	ret = update_plane_mmio_from_mi_display_flip(s, &info);
1276	if (ret) {
1277		gvt_err("fail to update plane mmio\n");
1278		return ret;
1279	}
1280
1281	for (i = 0; i < len; i++)
1282		patch_value(s, cmd_ptr(s, i), MI_NOOP);
1283	return 0;
1284}
1285
1286static bool is_wait_for_flip_pending(u32 cmd)
1287{
1288	return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1289			MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1290			MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1291			MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1292			MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1293			MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1294}
1295
1296static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1297{
1298	u32 cmd = cmd_val(s, 0);
1299
1300	if (!is_wait_for_flip_pending(cmd))
1301		return 0;
1302
1303	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1304	return 0;
1305}
1306
1307static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1308{
1309	unsigned long addr;
1310	unsigned long gma_high, gma_low;
1311	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
 
1312
1313	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
 
1314		return INTEL_GVT_INVALID_ADDR;
 
1315
1316	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1317	if (gmadr_bytes == 4) {
1318		addr = gma_low;
1319	} else {
1320		gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1321		addr = (((unsigned long)gma_high) << 32) | gma_low;
1322	}
1323	return addr;
1324}
1325
1326static inline int cmd_address_audit(struct parser_exec_state *s,
1327		unsigned long guest_gma, int op_size, bool index_mode)
1328{
1329	struct intel_vgpu *vgpu = s->vgpu;
1330	u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1331	int i;
1332	int ret;
1333
1334	if (op_size > max_surface_size) {
1335		gvt_err("command address audit fail name %s\n", s->info->name);
1336		return -EINVAL;
 
1337	}
1338
1339	if (index_mode)	{
1340		if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
1341			ret = -EINVAL;
1342			goto err;
1343		}
1344	} else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
1345			(!vgpu_gmadr_is_valid(s->vgpu,
1346					      guest_gma + op_size - 1))) {
1347		ret = -EINVAL;
1348		goto err;
1349	}
 
1350	return 0;
 
1351err:
1352	gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1353			s->info->name, guest_gma, op_size);
1354
1355	pr_err("cmd dump: ");
1356	for (i = 0; i < cmd_length(s); i++) {
1357		if (!(i % 4))
1358			pr_err("\n%08x ", cmd_val(s, i));
1359		else
1360			pr_err("%08x ", cmd_val(s, i));
1361	}
1362	pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1363			vgpu->id,
1364			vgpu_aperture_gmadr_base(vgpu),
1365			vgpu_aperture_gmadr_end(vgpu),
1366			vgpu_hidden_gmadr_base(vgpu),
1367			vgpu_hidden_gmadr_end(vgpu));
1368	return ret;
1369}
1370
1371static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1372{
1373	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1374	int op_size = (cmd_length(s) - 3) * sizeof(u32);
1375	int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1376	unsigned long gma, gma_low, gma_high;
 
1377	int ret = 0;
1378
1379	/* check ppggt */
1380	if (!(cmd_val(s, 0) & (1 << 22)))
1381		return 0;
1382
 
 
 
 
 
 
 
 
1383	gma = cmd_val(s, 2) & GENMASK(31, 2);
1384
1385	if (gmadr_bytes == 8) {
1386		gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1387		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1388		gma = (gma_high << 32) | gma_low;
1389		core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1390	}
1391	ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1392	return ret;
1393}
1394
1395static inline int unexpected_cmd(struct parser_exec_state *s)
1396{
1397	gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
1398			s->vgpu->id, s->info->name);
1399	return -EINVAL;
 
 
1400}
1401
1402static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1403{
1404	return unexpected_cmd(s);
1405}
1406
1407static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1408{
1409	return unexpected_cmd(s);
1410}
1411
1412static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1413{
1414	return unexpected_cmd(s);
1415}
1416
1417static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1418{
1419	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1420	int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1421			sizeof(u32);
1422	unsigned long gma, gma_high;
 
1423	int ret = 0;
1424
1425	if (!(cmd_val(s, 0) & (1 << 22)))
1426		return ret;
1427
 
 
 
 
 
 
 
 
1428	gma = cmd_val(s, 1) & GENMASK(31, 2);
1429	if (gmadr_bytes == 8) {
1430		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1431		gma = (gma_high << 32) | gma;
1432	}
1433	ret = cmd_address_audit(s, gma, op_size, false);
1434	return ret;
1435}
1436
1437static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1438{
1439	return unexpected_cmd(s);
1440}
1441
1442static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1443{
1444	return unexpected_cmd(s);
1445}
1446
1447static int cmd_handler_mi_conditional_batch_buffer_end(
1448		struct parser_exec_state *s)
1449{
1450	return unexpected_cmd(s);
1451}
1452
1453static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1454{
1455	return unexpected_cmd(s);
1456}
1457
1458static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1459{
1460	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1461	unsigned long gma;
1462	bool index_mode = false;
1463	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
1464
1465	/* Check post-sync and ppgtt bit */
1466	if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1467		gma = cmd_val(s, 1) & GENMASK(31, 3);
1468		if (gmadr_bytes == 8)
1469			gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1470		/* Store Data Index */
1471		if (cmd_val(s, 0) & (1 << 21))
1472			index_mode = true;
1473		ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
 
 
 
 
 
 
 
 
 
1474	}
1475	/* Check notify bit */
1476	if ((cmd_val(s, 0) & (1 << 8)))
1477		set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
1478				s->workload->pending_events);
1479	return ret;
1480}
1481
1482static void addr_type_update_snb(struct parser_exec_state *s)
1483{
1484	if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1485			(BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1486		s->buf_addr_type = PPGTT_BUFFER;
1487	}
1488}
1489
1490
1491static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1492		unsigned long gma, unsigned long end_gma, void *va)
1493{
1494	unsigned long copy_len, offset;
1495	unsigned long len = 0;
1496	unsigned long gpa;
1497
1498	while (gma != end_gma) {
1499		gpa = intel_vgpu_gma_to_gpa(mm, gma);
1500		if (gpa == INTEL_GVT_INVALID_ADDR) {
1501			gvt_err("invalid gma address: %lx\n", gma);
1502			return -EFAULT;
1503		}
1504
1505		offset = gma & (GTT_PAGE_SIZE - 1);
1506
1507		copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
1508			GTT_PAGE_SIZE - offset : end_gma - gma;
1509
1510		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1511
1512		len += copy_len;
1513		gma += copy_len;
1514	}
1515	return 0;
1516}
1517
1518
1519/*
1520 * Check whether a batch buffer needs to be scanned. Currently
1521 * the only criteria is based on privilege.
1522 */
1523static int batch_buffer_needs_scan(struct parser_exec_state *s)
1524{
1525	struct intel_gvt *gvt = s->vgpu->gvt;
 
 
 
1526
1527	if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
1528		/* BDW decides privilege based on address space */
1529		if (cmd_val(s, 0) & (1 << 8))
1530			return 0;
1531	}
1532	return 1;
1533}
1534
1535static uint32_t find_bb_size(struct parser_exec_state *s)
 
 
 
 
 
 
 
1536{
1537	unsigned long gma = 0;
1538	struct cmd_info *info;
1539	uint32_t bb_size = 0;
1540	uint32_t cmd_len = 0;
1541	bool met_bb_end = false;
1542	u32 cmd;
 
 
 
 
 
1543
1544	/* get the start gm address of the batch buffer */
1545	gma = get_gma_bb_from_cmd(s, 1);
1546	cmd = cmd_val(s, 0);
 
1547
1548	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 
1549	if (info == NULL) {
1550		gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
1551				cmd, get_opcode(cmd, s->ring_id));
1552		return -EINVAL;
 
 
1553	}
1554	do {
1555		copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1556				gma, gma + 4, &cmd);
1557		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 
1558		if (info == NULL) {
1559			gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
1560				cmd, get_opcode(cmd, s->ring_id));
1561			return -EINVAL;
 
 
1562		}
1563
1564		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1565			met_bb_end = true;
1566		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1567			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
1568				/* chained batch buffer */
1569				met_bb_end = true;
1570			}
1571		}
 
 
 
 
1572		cmd_len = get_cmd_length(info, cmd) << 2;
1573		bb_size += cmd_len;
1574		gma += cmd_len;
 
 
 
 
 
 
 
 
 
 
1575
1576	} while (!met_bb_end);
 
 
 
 
 
 
 
1577
1578	return bb_size;
 
 
 
 
 
1579}
1580
1581static int perform_bb_shadow(struct parser_exec_state *s)
1582{
1583	struct intel_shadow_bb_entry *entry_obj;
 
1584	unsigned long gma = 0;
1585	uint32_t bb_size;
1586	void *dst = NULL;
1587	int ret = 0;
 
 
 
1588
1589	/* get the start gm address of the batch buffer */
1590	gma = get_gma_bb_from_cmd(s, 1);
 
 
1591
1592	/* get the size of the batch buffer */
1593	bb_size = find_bb_size(s);
 
1594
1595	/* allocate shadow batch buffer */
1596	entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
1597	if (entry_obj == NULL)
1598		return -ENOMEM;
1599
1600	entry_obj->obj =
1601		i915_gem_object_create(&(s->vgpu->gvt->dev_priv->drm),
1602				       roundup(bb_size, PAGE_SIZE));
1603	if (IS_ERR(entry_obj->obj)) {
1604		ret = PTR_ERR(entry_obj->obj);
1605		goto free_entry;
1606	}
1607	entry_obj->len = bb_size;
1608	INIT_LIST_HEAD(&entry_obj->list);
1609
1610	dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
1611	if (IS_ERR(dst)) {
1612		ret = PTR_ERR(dst);
1613		goto put_obj;
1614	}
1615
1616	ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1617	if (ret) {
1618		gvt_err("failed to set shadow batch to CPU\n");
1619		goto unmap_src;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1620	}
1621
1622	entry_obj->va = dst;
1623	entry_obj->bb_start_cmd_va = s->ip_va;
 
 
 
1624
1625	/* copy batch buffer to shadow batch buffer*/
1626	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1627			      gma, gma + bb_size,
1628			      dst);
1629	if (ret) {
1630		gvt_err("fail to copy guest ring buffer\n");
1631		goto unmap_src;
 
1632	}
1633
1634	list_add(&entry_obj->list, &s->workload->shadow_bb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1635	/*
1636	 * ip_va saves the virtual address of the shadow batch buffer, while
1637	 * ip_gma saves the graphics address of the original batch buffer.
1638	 * As the shadow batch buffer is just a copy from the originial one,
1639	 * it should be right to use shadow batch buffer'va and original batch
1640	 * buffer's gma in pair. After all, we don't want to pin the shadow
1641	 * buffer here (too early).
1642	 */
1643	s->ip_va = dst;
1644	s->ip_gma = gma;
1645
1646	return 0;
1647
1648unmap_src:
1649	i915_gem_object_unpin_map(entry_obj->obj);
1650put_obj:
1651	i915_gem_object_put(entry_obj->obj);
1652free_entry:
1653	kfree(entry_obj);
1654	return ret;
1655}
1656
1657static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1658{
1659	bool second_level;
1660	int ret = 0;
 
1661
1662	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1663		gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1664		return -EINVAL;
1665	}
1666
1667	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1668	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1669		gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
1670		return -EINVAL;
1671	}
1672
1673	s->saved_buf_addr_type = s->buf_addr_type;
1674	addr_type_update_snb(s);
1675	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1676		s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1677		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1678	} else if (second_level) {
1679		s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1680		s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1681		s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1682	}
1683
1684	if (batch_buffer_needs_scan(s)) {
1685		ret = perform_bb_shadow(s);
1686		if (ret < 0)
1687			gvt_err("invalid shadow batch buffer\n");
1688	} else {
1689		/* emulate a batch buffer end to do return right */
1690		ret = cmd_handler_mi_batch_buffer_end(s);
1691		if (ret < 0)
1692			return ret;
1693	}
1694
1695	return ret;
1696}
1697
1698static struct cmd_info cmd_info[] = {
 
 
1699	{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1700
1701	{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
1702		0, 1, NULL},
1703
1704	{"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
1705		0, 1, cmd_handler_mi_user_interrupt},
1706
1707	{"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
1708		D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
1709
1710	{"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1711
1712	{"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1713		NULL},
1714
1715	{"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1716		NULL},
1717
1718	{"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1719		NULL},
1720
1721	{"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1722		NULL},
1723
1724	{"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
1725		D_ALL, 0, 1, NULL},
1726
1727	{"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
1728		F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1729		cmd_handler_mi_batch_buffer_end},
1730
1731	{"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
1732		0, 1, NULL},
1733
1734	{"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1735		NULL},
1736
1737	{"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
1738		D_ALL, 0, 1, NULL},
1739
1740	{"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1741		NULL},
1742
1743	{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1744		NULL},
1745
1746	{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
1747		R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
1748
1749	{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
1750		0, 8, NULL},
1751
1752	{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
1753
1754	{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1755
1756	{"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
1757		D_BDW_PLUS, 0, 8, NULL},
1758
1759	{"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1760		ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
 
 
 
 
 
1761
1762	{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1763		ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
1764
1765	{"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
1766		0, 8, cmd_handler_mi_store_data_index},
1767
1768	{"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
1769		D_ALL, 0, 8, cmd_handler_lri},
1770
1771	{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
1772		cmd_handler_mi_update_gtt},
1773
1774	{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
1775		D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
 
1776
1777	{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
1778		cmd_handler_mi_flush_dw},
1779
1780	{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
1781		10, cmd_handler_mi_clflush},
1782
1783	{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
1784		D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
 
 
 
 
 
 
 
 
 
 
 
 
 
1785
1786	{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
1787		D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
1788
1789	{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
1790		D_ALL, 0, 8, cmd_handler_lrr},
1791
1792	{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
1793		D_ALL, 0, 8, NULL},
1794
1795	{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
1796		ADDR_FIX_1(2), 8, NULL},
1797
1798	{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
1799		ADDR_FIX_1(2), 8, NULL},
1800
1801	{"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
1802		8, cmd_handler_mi_op_2e},
1803
1804	{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
1805		8, cmd_handler_mi_op_2f},
1806
1807	{"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
1808		F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
1809		cmd_handler_mi_batch_buffer_start},
1810
1811	{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
1812		F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
1813		cmd_handler_mi_conditional_batch_buffer_end},
1814
1815	{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
1816		R_RCS | R_BCS, D_ALL, 0, 2, NULL},
1817
1818	{"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1819		ADDR_FIX_2(4, 7), 8, NULL},
1820
1821	{"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1822		0, 8, NULL},
1823
1824	{"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
1825		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1826
1827	{"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1828
1829	{"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
1830		0, 8, NULL},
1831
1832	{"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1833		ADDR_FIX_1(3), 8, NULL},
1834
1835	{"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
1836		D_ALL, 0, 8, NULL},
1837
1838	{"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
1839		ADDR_FIX_1(4), 8, NULL},
1840
1841	{"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1842		ADDR_FIX_2(4, 5), 8, NULL},
1843
1844	{"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1845		ADDR_FIX_1(4), 8, NULL},
1846
1847	{"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
1848		ADDR_FIX_2(4, 7), 8, NULL},
1849
1850	{"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
1851		D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1852
1853	{"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1854
1855	{"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
1856		D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
1857
1858	{"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
1859		R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1860
1861	{"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
1862		OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
1863		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1864
1865	{"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
1866		D_ALL, ADDR_FIX_1(4), 8, NULL},
1867
1868	{"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
1869		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1870
1871	{"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
1872		D_ALL, ADDR_FIX_1(4), 8, NULL},
1873
1874	{"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
1875		D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1876
1877	{"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
1878		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1879
1880	{"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
1881		OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
1882		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1883
1884	{"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
1885		ADDR_FIX_2(4, 5), 8, NULL},
1886
1887	{"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
1888		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1889
1890	{"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
1891		OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
1892		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1893
1894	{"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
1895		OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
1896		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1897
1898	{"3DSTATE_BLEND_STATE_POINTERS",
1899		OP_3DSTATE_BLEND_STATE_POINTERS,
1900		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1901
1902	{"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
1903		OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
1904		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1905
1906	{"3DSTATE_BINDING_TABLE_POINTERS_VS",
1907		OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
1908		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1909
1910	{"3DSTATE_BINDING_TABLE_POINTERS_HS",
1911		OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
1912		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1913
1914	{"3DSTATE_BINDING_TABLE_POINTERS_DS",
1915		OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
1916		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1917
1918	{"3DSTATE_BINDING_TABLE_POINTERS_GS",
1919		OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
1920		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1921
1922	{"3DSTATE_BINDING_TABLE_POINTERS_PS",
1923		OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
1924		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1925
1926	{"3DSTATE_SAMPLER_STATE_POINTERS_VS",
1927		OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
1928		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1929
1930	{"3DSTATE_SAMPLER_STATE_POINTERS_HS",
1931		OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
1932		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1933
1934	{"3DSTATE_SAMPLER_STATE_POINTERS_DS",
1935		OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
1936		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1937
1938	{"3DSTATE_SAMPLER_STATE_POINTERS_GS",
1939		OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
1940		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1941
1942	{"3DSTATE_SAMPLER_STATE_POINTERS_PS",
1943		OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
1944		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1945
1946	{"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
1947		0, 8, NULL},
1948
1949	{"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
1950		0, 8, NULL},
1951
1952	{"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
1953		0, 8, NULL},
1954
1955	{"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
1956		0, 8, NULL},
1957
1958	{"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
1959		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1960
1961	{"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
1962		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1963
1964	{"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
1965		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1966
1967	{"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
1968		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1969
1970	{"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
1971		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1972
1973	{"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
1974		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
1975
1976	{"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
1977		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
1978
1979	{"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
1980		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1981
1982	{"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
1983		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1984
1985	{"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
1986		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1987
1988	{"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
1989		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1990
1991	{"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
1992		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1993
1994	{"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
1995		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1996
1997	{"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
1998		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1999
2000	{"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2001		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2002
2003	{"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2004		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2005
2006	{"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2007		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2008
2009	{"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2010		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2011
2012	{"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2013		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2014
2015	{"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2016		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2017
2018	{"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2019		D_BDW_PLUS, 0, 8, NULL},
2020
2021	{"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2022		NULL},
2023
2024	{"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2025		D_BDW_PLUS, 0, 8, NULL},
2026
2027	{"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2028		D_BDW_PLUS, 0, 8, NULL},
2029
2030	{"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2031		8, NULL},
2032
2033	{"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2034		R_RCS, D_BDW_PLUS, 0, 8, NULL},
2035
2036	{"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2037		8, NULL},
2038
2039	{"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2040		NULL},
2041
2042	{"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2043		NULL},
2044
2045	{"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2046		NULL},
2047
2048	{"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2049		D_BDW_PLUS, 0, 8, NULL},
2050
2051	{"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2052		R_RCS, D_ALL, 0, 8, NULL},
2053
2054	{"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2055		D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2056
2057	{"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2058		R_RCS, D_ALL, 0, 1, NULL},
2059
2060	{"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2061
2062	{"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2063		R_RCS, D_ALL, 0, 8, NULL},
2064
2065	{"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2066		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2067
2068	{"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2069
2070	{"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2071
2072	{"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2073
2074	{"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2075		D_BDW_PLUS, 0, 8, NULL},
2076
2077	{"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2078		D_BDW_PLUS, 0, 8, NULL},
2079
2080	{"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2081		D_ALL, 0, 8, NULL},
2082
2083	{"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2084		D_BDW_PLUS, 0, 8, NULL},
2085
2086	{"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2087		D_BDW_PLUS, 0, 8, NULL},
2088
2089	{"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2090
2091	{"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2092
2093	{"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2094
2095	{"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2096		D_ALL, 0, 8, NULL},
2097
2098	{"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2099
2100	{"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2101
2102	{"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2103		R_RCS, D_ALL, 0, 8, NULL},
2104
2105	{"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2106		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2107
2108	{"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2109		0, 8, NULL},
2110
2111	{"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2112		D_ALL, ADDR_FIX_1(2), 8, NULL},
2113
2114	{"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2115		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2116
2117	{"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2118		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2119
2120	{"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2121		D_ALL, 0, 8, NULL},
2122
2123	{"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2124		D_ALL, 0, 8, NULL},
2125
2126	{"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2127		D_ALL, 0, 8, NULL},
2128
2129	{"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2130		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2131
2132	{"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2133		D_BDW_PLUS, 0, 8, NULL},
2134
2135	{"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2136		D_ALL, ADDR_FIX_1(2), 8, NULL},
2137
2138	{"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2139		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2140
2141	{"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2142		R_RCS, D_ALL, 0, 8, NULL},
2143
2144	{"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2145		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2146
2147	{"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2148		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2149
2150	{"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2151		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2152
2153	{"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2154		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2155
2156	{"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2157		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2158
2159	{"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2160		R_RCS, D_ALL, 0, 8, NULL},
2161
2162	{"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2163		D_ALL, 0, 9, NULL},
2164
2165	{"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2166		ADDR_FIX_2(2, 4), 8, NULL},
2167
2168	{"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2169		OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2170		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2171
2172	{"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2173		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2174
2175	{"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2176		OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2177		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2178
2179	{"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2180		D_BDW_PLUS, 0, 8, NULL},
2181
2182	{"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2183		ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2184
2185	{"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2186
2187	{"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2188		1, NULL},
2189
2190	{"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2191		ADDR_FIX_1(1), 8, NULL},
2192
2193	{"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2194
2195	{"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2196		ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2197
2198	{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2199		ADDR_FIX_1(1), 8, NULL},
2200
 
 
 
2201	{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2202
2203	{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2204
2205	{"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2206		0, 8, NULL},
2207
2208	{"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2209		D_SKL_PLUS, 0, 8, NULL},
2210
2211	{"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2212		F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2213
2214	{"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2215		0, 16, NULL},
2216
2217	{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2218		0, 16, NULL},
2219
 
 
 
2220	{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2221
2222	{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2223		0, 16, NULL},
2224
2225	{"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2226		0, 16, NULL},
2227
2228	{"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2229		0, 16, NULL},
2230
2231	{"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2232		0, 8, NULL},
2233
2234	{"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2235		NULL},
2236
2237	{"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2238		F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2239
2240	{"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2241		R_VCS, D_ALL, 0, 12, NULL},
2242
2243	{"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2244		R_VCS, D_ALL, 0, 12, NULL},
2245
2246	{"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2247		R_VCS, D_BDW_PLUS, 0, 12, NULL},
2248
2249	{"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2250		F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2251
2252	{"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2253		F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2254
2255	{"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2256
2257	{"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2258		R_VCS, D_ALL, 0, 12, NULL},
2259
2260	{"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2261		R_VCS, D_ALL, 0, 12, NULL},
2262
2263	{"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2264		R_VCS, D_ALL, 0, 12, NULL},
2265
2266	{"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2267		R_VCS, D_ALL, 0, 12, NULL},
2268
2269	{"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2270		R_VCS, D_ALL, 0, 12, NULL},
2271
2272	{"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2273		R_VCS, D_ALL, 0, 12, NULL},
2274
2275	{"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2276		R_VCS, D_ALL, 0, 6, NULL},
2277
2278	{"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2279		R_VCS, D_ALL, 0, 12, NULL},
2280
2281	{"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2282		R_VCS, D_ALL, 0, 12, NULL},
2283
2284	{"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2285		R_VCS, D_ALL, 0, 12, NULL},
2286
2287	{"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2288		R_VCS, D_ALL, 0, 12, NULL},
2289
2290	{"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2291		R_VCS, D_ALL, 0, 12, NULL},
2292
2293	{"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2294		R_VCS, D_ALL, 0, 12, NULL},
2295
2296	{"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2297		R_VCS, D_ALL, 0, 12, NULL},
2298	{"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2299		R_VCS, D_ALL, 0, 12, NULL},
2300
2301	{"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2302		R_VCS, D_ALL, 0, 12, NULL},
2303
2304	{"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2305		R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2306
2307	{"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2308		R_VCS, D_ALL, 0, 12, NULL},
2309
2310	{"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2311		R_VCS, D_ALL, 0, 12, NULL},
2312
2313	{"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2314		R_VCS, D_ALL, 0, 12, NULL},
2315
2316	{"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2317		R_VCS, D_ALL, 0, 12, NULL},
2318
2319	{"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2320		R_VCS, D_ALL, 0, 12, NULL},
2321
2322	{"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2323		R_VCS, D_ALL, 0, 12, NULL},
2324
2325	{"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2326		R_VCS, D_ALL, 0, 12, NULL},
2327
2328	{"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2329		R_VCS, D_ALL, 0, 12, NULL},
2330
2331	{"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2332		R_VCS, D_ALL, 0, 12, NULL},
2333
2334	{"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2335		R_VCS, D_ALL, 0, 12, NULL},
2336
2337	{"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2338		R_VCS, D_ALL, 0, 12, NULL},
2339
2340	{"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2341		0, 16, NULL},
2342
2343	{"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2344
2345	{"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2346
2347	{"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2348		R_VCS, D_ALL, 0, 12, NULL},
2349
2350	{"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2351		R_VCS, D_ALL, 0, 12, NULL},
2352
2353	{"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2354		R_VCS, D_ALL, 0, 12, NULL},
2355
2356	{"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2357
2358	{"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2359		0, 12, NULL},
2360
2361	{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2362		0, 20, NULL},
2363};
2364
2365static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2366{
2367	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2368}
2369
2370#define GVT_MAX_CMD_LENGTH     20  /* In Dword */
2371
2372static void trace_cs_command(struct parser_exec_state *s,
2373		cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler)
2374{
2375	/* This buffer is used by ftrace to store all commands copied from
2376	 * guest gma space. Sometimes commands can cross pages, this should
2377	 * not be handled in ftrace logic. So this is just used as a
2378	 * 'bounce buffer'
2379	 */
2380	u32 cmd_trace_buf[GVT_MAX_CMD_LENGTH];
2381	int i;
2382	u32 cmd_len = cmd_length(s);
2383	/* The chosen value of GVT_MAX_CMD_LENGTH are just based on
2384	 * following two considerations:
2385	 * 1) From observation, most common ring commands is not that long.
2386	 *    But there are execeptions. So it indeed makes sence to observe
2387	 *    longer commands.
2388	 * 2) From the performance and debugging point of view, dumping all
2389	 *    contents of very commands is not necessary.
2390	 * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
2391	 * future for performance considerations.
2392	 */
2393	if (unlikely(cmd_len > GVT_MAX_CMD_LENGTH)) {
2394		gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
2395		cmd_len = GVT_MAX_CMD_LENGTH;
2396	}
2397
2398	for (i = 0; i < cmd_len; i++)
2399		cmd_trace_buf[i] = cmd_val(s, i);
2400
2401	trace_gvt_command(s->vgpu->id, s->ring_id, s->ip_gma, cmd_trace_buf,
2402			cmd_len, s->buf_type == RING_BUFFER_INSTRUCTION,
2403			cost_pre_cmd_handler, cost_cmd_handler);
2404}
2405
2406/* call the cmd handler, and advance ip */
2407static int cmd_parser_exec(struct parser_exec_state *s)
2408{
2409	struct cmd_info *info;
 
2410	u32 cmd;
2411	int ret = 0;
2412	cycles_t t0, t1, t2;
2413	struct parser_exec_state s_before_advance_custom;
2414
2415	t0 = get_cycles();
2416
2417	cmd = cmd_val(s, 0);
2418
2419	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
 
 
 
 
 
2420	if (info == NULL) {
2421		gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
2422				cmd, get_opcode(cmd, s->ring_id));
2423		return -EINVAL;
 
 
2424	}
2425
2426	gvt_dbg_cmd("%s\n", info->name);
2427
2428	s->info = info;
2429
2430	t1 = get_cycles();
2431
2432	memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
 
 
 
 
 
 
 
2433
2434	if (info->handler) {
2435		ret = info->handler(s);
2436		if (ret < 0) {
2437			gvt_err("%s handler error\n", info->name);
2438			return ret;
2439		}
2440	}
2441	t2 = get_cycles();
2442
2443	trace_cs_command(&s_before_advance_custom, t1 - t0, t2 - t1);
2444
2445	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2446		ret = cmd_advance_default(s);
2447		if (ret) {
2448			gvt_err("%s IP advance error\n", info->name);
2449			return ret;
2450		}
2451	}
2452	return 0;
2453}
2454
2455static inline bool gma_out_of_range(unsigned long gma,
2456		unsigned long gma_head, unsigned int gma_tail)
2457{
2458	if (gma_tail >= gma_head)
2459		return (gma < gma_head) || (gma > gma_tail);
2460	else
2461		return (gma > gma_tail) && (gma < gma_head);
2462}
2463
 
 
 
 
2464static int command_scan(struct parser_exec_state *s,
2465		unsigned long rb_head, unsigned long rb_tail,
2466		unsigned long rb_start, unsigned long rb_len)
2467{
2468
2469	unsigned long gma_head, gma_tail, gma_bottom;
2470	int ret = 0;
 
2471
2472	gma_head = rb_start + rb_head;
2473	gma_tail = rb_start + rb_tail;
2474	gma_bottom = rb_start +  rb_len;
2475
2476	gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head, gma_tail);
2477
2478	while (s->ip_gma != gma_tail) {
2479		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 
2480			if (!(s->ip_gma >= rb_start) ||
2481				!(s->ip_gma < gma_bottom)) {
2482				gvt_err("ip_gma %lx out of ring scope."
2483					"(base:0x%lx, bottom: 0x%lx)\n",
2484					s->ip_gma, rb_start,
2485					gma_bottom);
2486				parser_exec_state_dump(s);
2487				return -EINVAL;
2488			}
2489			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2490				gvt_err("ip_gma %lx out of range."
2491					"base 0x%lx head 0x%lx tail 0x%lx\n",
2492					s->ip_gma, rb_start,
2493					rb_head, rb_tail);
2494				parser_exec_state_dump(s);
2495				break;
2496			}
2497		}
2498		ret = cmd_parser_exec(s);
2499		if (ret) {
2500			gvt_err("cmd parser error\n");
2501			parser_exec_state_dump(s);
2502			break;
2503		}
2504	}
2505
2506	gvt_dbg_cmd("scan_end\n");
2507
2508	return ret;
2509}
2510
2511static int scan_workload(struct intel_vgpu_workload *workload)
2512{
2513	unsigned long gma_head, gma_tail, gma_bottom;
2514	struct parser_exec_state s;
2515	int ret = 0;
2516
2517	/* ring base is page aligned */
2518	if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
2519		return -EINVAL;
2520
2521	gma_head = workload->rb_start + workload->rb_head;
2522	gma_tail = workload->rb_start + workload->rb_tail;
2523	gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2524
2525	s.buf_type = RING_BUFFER_INSTRUCTION;
2526	s.buf_addr_type = GTT_BUFFER;
2527	s.vgpu = workload->vgpu;
2528	s.ring_id = workload->ring_id;
2529	s.ring_start = workload->rb_start;
2530	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2531	s.ring_head = gma_head;
2532	s.ring_tail = gma_tail;
2533	s.rb_va = workload->shadow_ring_buffer_va;
2534	s.workload = workload;
 
2535
2536	if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2537		gma_head == gma_tail)
2538		return 0;
2539
2540	ret = ip_gma_set(&s, gma_head);
2541	if (ret)
2542		goto out;
2543
2544	ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2545		workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2546
2547out:
2548	return ret;
2549}
2550
2551static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2552{
2553
2554	unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2555	struct parser_exec_state s;
2556	int ret = 0;
 
 
 
2557
2558	/* ring base is page aligned */
2559	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
 
2560		return -EINVAL;
2561
2562	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
2563	ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2564			PAGE_SIZE);
2565	gma_head = wa_ctx->indirect_ctx.guest_gma;
2566	gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2567	gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2568
2569	s.buf_type = RING_BUFFER_INSTRUCTION;
2570	s.buf_addr_type = GTT_BUFFER;
2571	s.vgpu = wa_ctx->workload->vgpu;
2572	s.ring_id = wa_ctx->workload->ring_id;
2573	s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2574	s.ring_size = ring_size;
2575	s.ring_head = gma_head;
2576	s.ring_tail = gma_tail;
2577	s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2578	s.workload = wa_ctx->workload;
 
2579
2580	ret = ip_gma_set(&s, gma_head);
2581	if (ret)
2582		goto out;
2583
2584	ret = command_scan(&s, 0, ring_tail,
2585		wa_ctx->indirect_ctx.guest_gma, ring_size);
2586out:
2587	return ret;
2588}
2589
2590static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2591{
2592	struct intel_vgpu *vgpu = workload->vgpu;
2593	int ring_id = workload->ring_id;
2594	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
2595	struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
2596	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2597	unsigned int copy_len = 0;
2598	int ret;
2599
2600	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2601
2602	/* calculate workload ring buffer size */
2603	workload->rb_len = (workload->rb_tail + guest_rb_size -
2604			workload->rb_head) % guest_rb_size;
2605
2606	gma_head = workload->rb_start + workload->rb_head;
2607	gma_tail = workload->rb_start + workload->rb_tail;
2608	gma_top = workload->rb_start + guest_rb_size;
2609
2610	/* allocate shadow ring buffer */
2611	ret = intel_ring_begin(workload->req, workload->rb_len / 4);
2612	if (ret)
2613		return ret;
 
 
 
 
 
 
 
 
 
 
 
2614
2615	/* get shadow ring buffer va */
2616	workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
2617
2618	/* head > tail --> copy head <-> top */
2619	if (gma_head > gma_tail) {
2620		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2621				gma_head, gma_top,
2622				workload->shadow_ring_buffer_va);
2623		if (ret) {
2624			gvt_err("fail to copy guest ring buffer\n");
2625			return ret;
2626		}
2627		copy_len = gma_top - gma_head;
2628		gma_head = workload->rb_start;
2629	}
2630
2631	/* copy head or start <-> tail */
2632	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2633			gma_head, gma_tail,
2634			workload->shadow_ring_buffer_va + copy_len);
2635	if (ret) {
2636		gvt_err("fail to copy guest ring buffer\n");
2637		return ret;
2638	}
2639	ring->tail += workload->rb_len;
2640	intel_ring_advance(ring);
2641	return 0;
2642}
2643
2644int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
2645{
2646	int ret;
 
2647
2648	ret = shadow_workload_ring_buffer(workload);
2649	if (ret) {
2650		gvt_err("fail to shadow workload ring_buffer\n");
2651		return ret;
2652	}
2653
2654	ret = scan_workload(workload);
2655	if (ret) {
2656		gvt_err("scan workload error\n");
2657		return ret;
2658	}
2659	return 0;
2660}
2661
2662static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2663{
2664	struct drm_device *dev = &wa_ctx->workload->vgpu->gvt->dev_priv->drm;
2665	int ctx_size = wa_ctx->indirect_ctx.size;
2666	unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
 
 
 
 
2667	struct drm_i915_gem_object *obj;
2668	int ret = 0;
2669	void *map;
2670
2671	obj = i915_gem_object_create(dev,
2672				     roundup(ctx_size + CACHELINE_BYTES,
2673					     PAGE_SIZE));
2674	if (IS_ERR(obj))
2675		return PTR_ERR(obj);
2676
2677	/* get the va of the shadow batch buffer */
2678	map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2679	if (IS_ERR(map)) {
2680		gvt_err("failed to vmap shadow indirect ctx\n");
2681		ret = PTR_ERR(map);
2682		goto put_obj;
2683	}
2684
 
2685	ret = i915_gem_object_set_to_cpu_domain(obj, false);
 
2686	if (ret) {
2687		gvt_err("failed to set shadow indirect ctx to CPU\n");
2688		goto unmap_src;
2689	}
2690
2691	ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
2692				wa_ctx->workload->vgpu->gtt.ggtt_mm,
2693				guest_gma, guest_gma + ctx_size,
2694				map);
2695	if (ret) {
2696		gvt_err("fail to copy guest indirect ctx\n");
2697		goto unmap_src;
2698	}
2699
2700	wa_ctx->indirect_ctx.obj = obj;
2701	wa_ctx->indirect_ctx.shadow_va = map;
2702	return 0;
2703
2704unmap_src:
2705	i915_gem_object_unpin_map(obj);
2706put_obj:
2707	i915_gem_object_put(wa_ctx->indirect_ctx.obj);
2708	return ret;
2709}
2710
2711static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2712{
2713	uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2714	unsigned char *bb_start_sva;
2715
 
 
 
2716	per_ctx_start[0] = 0x18800001;
2717	per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2718
2719	bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
2720				wa_ctx->indirect_ctx.size;
2721
2722	memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
2723
2724	return 0;
2725}
2726
2727int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2728{
2729	int ret;
 
 
 
 
2730
2731	if (wa_ctx->indirect_ctx.size == 0)
2732		return 0;
2733
2734	ret = shadow_indirect_ctx(wa_ctx);
2735	if (ret) {
2736		gvt_err("fail to shadow indirect ctx\n");
2737		return ret;
2738	}
2739
2740	combine_wa_ctx(wa_ctx);
2741
2742	ret = scan_wa_ctx(wa_ctx);
2743	if (ret) {
2744		gvt_err("scan wa ctx error\n");
2745		return ret;
2746	}
2747
2748	return 0;
2749}
2750
2751static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2752		unsigned int opcode, int rings)
 
 
 
 
2753{
2754	struct cmd_info *info = NULL;
2755	unsigned int ring;
 
 
2756
2757	for_each_set_bit(ring, (unsigned long *)&rings, I915_NUM_ENGINES) {
2758		info = find_cmd_entry(gvt, opcode, ring);
2759		if (info)
2760			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2761	}
2762	return info;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2763}
2764
2765static int init_cmd_table(struct intel_gvt *gvt)
2766{
 
2767	int i;
2768	struct cmd_entry *e;
2769	struct cmd_info	*info;
2770	unsigned int gen_type;
2771
2772	gen_type = intel_gvt_get_device_type(gvt);
2773
2774	for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
 
 
2775		if (!(cmd_info[i].devices & gen_type))
2776			continue;
2777
2778		e = kzalloc(sizeof(*e), GFP_KERNEL);
2779		if (!e)
2780			return -ENOMEM;
2781
2782		e->info = &cmd_info[i];
2783		info = find_cmd_entry_any_ring(gvt,
2784				e->info->opcode, e->info->rings);
2785		if (info) {
2786			gvt_err("%s %s duplicated\n", e->info->name,
2787					info->name);
2788			return -EEXIST;
2789		}
2790
2791		INIT_HLIST_NODE(&e->hlist);
2792		add_cmd_entry(gvt, e);
2793		gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
2794				e->info->name, e->info->opcode, e->info->flag,
2795				e->info->devices, e->info->rings);
2796	}
 
2797	return 0;
2798}
2799
2800static void clean_cmd_table(struct intel_gvt *gvt)
2801{
2802	struct hlist_node *tmp;
2803	struct cmd_entry *e;
2804	int i;
2805
2806	hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
2807		kfree(e);
2808
2809	hash_init(gvt->cmd_table);
2810}
2811
2812void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
2813{
2814	clean_cmd_table(gvt);
2815}
2816
2817int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
2818{
2819	int ret;
2820
2821	ret = init_cmd_table(gvt);
2822	if (ret) {
2823		intel_gvt_clean_cmd_parser(gvt);
2824		return ret;
2825	}
2826	return 0;
2827}
v5.14.15
   1/*
   2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21 * SOFTWARE.
  22 *
  23 * Authors:
  24 *    Ke Yu
  25 *    Kevin Tian <kevin.tian@intel.com>
  26 *    Zhiyuan Lv <zhiyuan.lv@intel.com>
  27 *
  28 * Contributors:
  29 *    Min He <min.he@intel.com>
  30 *    Ping Gao <ping.a.gao@intel.com>
  31 *    Tina Zhang <tina.zhang@intel.com>
  32 *    Yulei Zhang <yulei.zhang@intel.com>
  33 *    Zhi Wang <zhi.a.wang@intel.com>
  34 *
  35 */
  36
  37#include <linux/slab.h>
  38
  39#include "i915_drv.h"
  40#include "gt/intel_gpu_commands.h"
  41#include "gt/intel_lrc.h"
  42#include "gt/intel_ring.h"
  43#include "gt/intel_gt_requests.h"
  44#include "gt/shmem_utils.h"
  45#include "gvt.h"
  46#include "i915_pvinfo.h"
  47#include "trace.h"
  48
  49#include "gem/i915_gem_context.h"
  50#include "gem/i915_gem_pm.h"
  51#include "gt/intel_context.h"
  52
  53#define INVALID_OP    (~0U)
  54
  55#define OP_LEN_MI           9
  56#define OP_LEN_2D           10
  57#define OP_LEN_3D_MEDIA     16
  58#define OP_LEN_MFX_VC       16
  59#define OP_LEN_VEBOX	    16
  60
  61#define CMD_TYPE(cmd)	(((cmd) >> 29) & 7)
  62
  63struct sub_op_bits {
  64	int hi;
  65	int low;
  66};
  67struct decode_info {
  68	const char *name;
  69	int op_len;
  70	int nr_sub_op;
  71	const struct sub_op_bits *sub_op;
  72};
  73
  74#define   MAX_CMD_BUDGET			0x7fffffff
  75#define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
  76#define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
  77#define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
  78
  79#define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
  80#define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
  81#define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
  82
  83/* Render Command Map */
  84
  85/* MI_* command Opcode (28:23) */
  86#define OP_MI_NOOP                          0x0
  87#define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
  88#define OP_MI_USER_INTERRUPT                0x2
  89#define OP_MI_WAIT_FOR_EVENT                0x3
  90#define OP_MI_FLUSH                         0x4
  91#define OP_MI_ARB_CHECK                     0x5
  92#define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
  93#define OP_MI_REPORT_HEAD                   0x7
  94#define OP_MI_ARB_ON_OFF                    0x8
  95#define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
  96#define OP_MI_BATCH_BUFFER_END              0xA
  97#define OP_MI_SUSPEND_FLUSH                 0xB
  98#define OP_MI_PREDICATE                     0xC  /* IVB+ */
  99#define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
 100#define OP_MI_SET_APPID                     0xE  /* IVB+ */
 101#define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
 102#define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
 103#define OP_MI_DISPLAY_FLIP                  0x14
 104#define OP_MI_SEMAPHORE_MBOX                0x16
 105#define OP_MI_SET_CONTEXT                   0x18
 106#define OP_MI_MATH                          0x1A
 107#define OP_MI_URB_CLEAR                     0x19
 108#define OP_MI_SEMAPHORE_SIGNAL		    0x1B  /* BDW+ */
 109#define OP_MI_SEMAPHORE_WAIT		    0x1C  /* BDW+ */
 110
 111#define OP_MI_STORE_DATA_IMM                0x20
 112#define OP_MI_STORE_DATA_INDEX              0x21
 113#define OP_MI_LOAD_REGISTER_IMM             0x22
 114#define OP_MI_UPDATE_GTT                    0x23
 115#define OP_MI_STORE_REGISTER_MEM            0x24
 116#define OP_MI_FLUSH_DW                      0x26
 117#define OP_MI_CLFLUSH                       0x27
 118#define OP_MI_REPORT_PERF_COUNT             0x28
 119#define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
 120#define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
 121#define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
 122#define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
 123#define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
 124#define OP_MI_2E			    0x2E  /* BDW+ */
 125#define OP_MI_2F			    0x2F  /* BDW+ */
 126#define OP_MI_BATCH_BUFFER_START            0x31
 127
 128/* Bit definition for dword 0 */
 129#define _CMDBIT_BB_START_IN_PPGTT	(1UL << 8)
 130
 131#define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
 132
 133#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
 134#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
 135#define BATCH_BUFFER_ADR_SPACE_BIT(x)	(((x) >> 8) & 1U)
 136#define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
 137
 138/* 2D command: Opcode (28:22) */
 139#define OP_2D(x)    ((2<<7) | x)
 140
 141#define OP_XY_SETUP_BLT                             OP_2D(0x1)
 142#define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
 143#define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
 144#define OP_XY_PIXEL_BLT                             OP_2D(0x24)
 145#define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
 146#define OP_XY_TEXT_BLT                              OP_2D(0x26)
 147#define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
 148#define OP_XY_COLOR_BLT                             OP_2D(0x50)
 149#define OP_XY_PAT_BLT                               OP_2D(0x51)
 150#define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
 151#define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
 152#define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
 153#define OP_XY_FULL_BLT                              OP_2D(0x55)
 154#define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
 155#define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
 156#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
 157#define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
 158#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
 159#define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
 160#define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
 161#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
 162#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
 163#define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
 164#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
 165
 166/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
 167#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
 168	((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
 169
 170#define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
 171
 172#define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
 173#define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
 174#define OP_3D_MEDIA_0_1_4			OP_3D_MEDIA(0x0, 0x1, 0x04)
 175#define OP_SWTESS_BASE_ADDRESS			OP_3D_MEDIA(0x0, 0x1, 0x03)
 176
 177#define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
 178
 179#define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
 180
 181#define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
 182#define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
 183#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
 184#define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
 185#define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
 186#define OP_MEDIA_POOL_STATE                     OP_3D_MEDIA(0x2, 0x0, 0x5)
 187
 188#define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
 189#define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
 190#define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
 191#define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
 192
 193#define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
 194#define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
 195#define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
 196#define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
 197#define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
 198#define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
 199#define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
 200#define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
 201#define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
 202#define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
 203#define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
 204#define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
 205#define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
 206#define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
 207#define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
 208#define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
 209#define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
 210#define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
 211#define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
 212#define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
 213#define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
 214#define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
 215#define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
 216#define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
 217#define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
 218#define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
 219#define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
 220#define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
 221#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
 222#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
 223#define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
 224#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
 225#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
 226#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
 227#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
 228#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
 229#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
 230#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
 231#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
 232#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
 233#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
 234#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
 235#define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
 236#define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
 237#define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
 238#define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
 239#define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
 240#define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
 241#define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
 242#define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
 243#define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
 244#define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
 245#define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
 246#define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
 247#define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
 248#define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
 249#define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
 250#define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
 251#define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
 252#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
 253#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
 254#define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
 255#define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
 256#define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
 257#define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
 258#define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
 259
 260#define OP_3DSTATE_VF_INSTANCING 		OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
 261#define OP_3DSTATE_VF_SGVS  			OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
 262#define OP_3DSTATE_VF_TOPOLOGY   		OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
 263#define OP_3DSTATE_WM_CHROMAKEY   		OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
 264#define OP_3DSTATE_PS_BLEND   			OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
 265#define OP_3DSTATE_WM_DEPTH_STENCIL   		OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
 266#define OP_3DSTATE_PS_EXTRA   			OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
 267#define OP_3DSTATE_RASTER   			OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
 268#define OP_3DSTATE_SBE_SWIZ   			OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
 269#define OP_3DSTATE_WM_HZ_OP   			OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
 270#define OP_3DSTATE_COMPONENT_PACKING		OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
 271
 272#define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
 273#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
 274#define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
 275#define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
 276#define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
 277#define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
 278#define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
 279#define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
 280#define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
 281#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
 282#define OP_3DSTATE_MULTISAMPLE_BDW		OP_3D_MEDIA(0x3, 0x0, 0x0D)
 283#define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
 284#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
 285#define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
 286#define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
 287#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
 288#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
 289#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
 290#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
 291#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
 292#define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
 293#define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
 294#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
 295#define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
 296#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
 297#define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
 298#define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
 299#define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
 300
 301/* VCCP Command Parser */
 302
 303/*
 304 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
 305 * git://anongit.freedesktop.org/vaapi/intel-driver
 306 * src/i965_defines.h
 307 *
 308 */
 309
 310#define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
 311	(3 << 13 | \
 312	 (pipeline) << 11 | \
 313	 (op) << 8 | \
 314	 (sub_opa) << 5 | \
 315	 (sub_opb))
 316
 317#define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
 318#define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
 319#define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
 320#define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
 321#define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
 322#define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
 323#define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
 324#define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
 325#define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
 326#define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
 327#define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
 328
 329#define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
 330
 331#define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
 332#define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
 333#define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
 334#define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
 335#define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
 336#define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
 337#define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
 338#define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
 339#define OP_MFD_AVC_DPB_STATE			   OP_MFX(2, 1, 1, 6) /* IVB+ */
 340#define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
 341#define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
 342#define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
 343
 344#define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
 345#define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
 346#define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
 347#define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
 348#define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
 349
 350#define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
 351#define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
 352#define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
 353#define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
 354#define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
 355
 356#define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
 357#define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
 358#define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
 359
 360#define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
 361#define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
 362#define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
 363
 364#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
 365	(3 << 13 | \
 366	 (pipeline) << 11 | \
 367	 (op) << 8 | \
 368	 (sub_opa) << 5 | \
 369	 (sub_opb))
 370
 371#define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
 372#define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
 373#define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
 374
 375struct parser_exec_state;
 376
 377typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
 378
 379#define GVT_CMD_HASH_BITS   7
 380
 381/* which DWords need address fix */
 382#define ADDR_FIX_1(x1)			(1 << (x1))
 383#define ADDR_FIX_2(x1, x2)		(ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
 384#define ADDR_FIX_3(x1, x2, x3)		(ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
 385#define ADDR_FIX_4(x1, x2, x3, x4)	(ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
 386#define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
 387
 388#define DWORD_FIELD(dword, end, start) \
 389	FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
 390
 391#define OP_LENGTH_BIAS 2
 392#define CMD_LEN(value)  (value + OP_LENGTH_BIAS)
 393
 394static int gvt_check_valid_cmd_length(int len, int valid_len)
 395{
 396	if (valid_len != len) {
 397		gvt_err("len is not valid:  len=%u  valid_len=%u\n",
 398			len, valid_len);
 399		return -EFAULT;
 400	}
 401	return 0;
 402}
 403
 404struct cmd_info {
 405	const char *name;
 406	u32 opcode;
 407
 408#define F_LEN_MASK	3U
 409#define F_LEN_CONST  1U
 410#define F_LEN_VAR    0U
 411/* value is const although LEN maybe variable */
 412#define F_LEN_VAR_FIXED    (1<<1)
 413
 414/*
 415 * command has its own ip advance logic
 416 * e.g. MI_BATCH_START, MI_BATCH_END
 417 */
 418#define F_IP_ADVANCE_CUSTOM (1<<2)
 
 
 419	u32 flag;
 420
 421#define R_RCS	BIT(RCS0)
 422#define R_VCS1  BIT(VCS0)
 423#define R_VCS2  BIT(VCS1)
 424#define R_VCS	(R_VCS1 | R_VCS2)
 425#define R_BCS	BIT(BCS0)
 426#define R_VECS	BIT(VECS0)
 427#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
 428	/* rings that support this cmd: BLT/RCS/VCS/VECS */
 429	u16 rings;
 430
 431	/* devices that support this cmd: SNB/IVB/HSW/... */
 432	u16 devices;
 433
 434	/* which DWords are address that need fix up.
 435	 * bit 0 means a 32-bit non address operand in command
 436	 * bit 1 means address operand, which could be 32-bit
 437	 * or 64-bit depending on different architectures.(
 438	 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
 439	 * No matter the address length, each address only takes
 440	 * one bit in the bitmap.
 441	 */
 442	u16 addr_bitmap;
 443
 444	/* flag == F_LEN_CONST : command length
 445	 * flag == F_LEN_VAR : length bias bits
 446	 * Note: length is in DWord
 447	 */
 448	u32 len;
 449
 450	parser_cmd_handler handler;
 451
 452	/* valid length in DWord */
 453	u32 valid_len;
 454};
 455
 456struct cmd_entry {
 457	struct hlist_node hlist;
 458	const struct cmd_info *info;
 459};
 460
 461enum {
 462	RING_BUFFER_INSTRUCTION,
 463	BATCH_BUFFER_INSTRUCTION,
 464	BATCH_BUFFER_2ND_LEVEL,
 465	RING_BUFFER_CTX,
 466};
 467
 468enum {
 469	GTT_BUFFER,
 470	PPGTT_BUFFER
 471};
 472
 473struct parser_exec_state {
 474	struct intel_vgpu *vgpu;
 475	const struct intel_engine_cs *engine;
 476
 477	int buf_type;
 478
 479	/* batch buffer address type */
 480	int buf_addr_type;
 481
 482	/* graphics memory address of ring buffer start */
 483	unsigned long ring_start;
 484	unsigned long ring_size;
 485	unsigned long ring_head;
 486	unsigned long ring_tail;
 487
 488	/* instruction graphics memory address */
 489	unsigned long ip_gma;
 490
 491	/* mapped va of the instr_gma */
 492	void *ip_va;
 493	void *rb_va;
 494
 495	void *ret_bb_va;
 496	/* next instruction when return from  batch buffer to ring buffer */
 497	unsigned long ret_ip_gma_ring;
 498
 499	/* next instruction when return from 2nd batch buffer to batch buffer */
 500	unsigned long ret_ip_gma_bb;
 501
 502	/* batch buffer address type (GTT or PPGTT)
 503	 * used when ret from 2nd level batch buffer
 504	 */
 505	int saved_buf_addr_type;
 506	bool is_ctx_wa;
 507	bool is_init_ctx;
 508
 509	const struct cmd_info *info;
 510
 511	struct intel_vgpu_workload *workload;
 512};
 513
 514#define gmadr_dw_number(s)	\
 515	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 516
 517static unsigned long bypass_scan_mask = 0;
 518
 519/* ring ALL, type = 0 */
 520static const struct sub_op_bits sub_op_mi[] = {
 521	{31, 29},
 522	{28, 23},
 523};
 524
 525static const struct decode_info decode_info_mi = {
 526	"MI",
 527	OP_LEN_MI,
 528	ARRAY_SIZE(sub_op_mi),
 529	sub_op_mi,
 530};
 531
 532/* ring RCS, command type 2 */
 533static const struct sub_op_bits sub_op_2d[] = {
 534	{31, 29},
 535	{28, 22},
 536};
 537
 538static const struct decode_info decode_info_2d = {
 539	"2D",
 540	OP_LEN_2D,
 541	ARRAY_SIZE(sub_op_2d),
 542	sub_op_2d,
 543};
 544
 545/* ring RCS, command type 3 */
 546static const struct sub_op_bits sub_op_3d_media[] = {
 547	{31, 29},
 548	{28, 27},
 549	{26, 24},
 550	{23, 16},
 551};
 552
 553static const struct decode_info decode_info_3d_media = {
 554	"3D_Media",
 555	OP_LEN_3D_MEDIA,
 556	ARRAY_SIZE(sub_op_3d_media),
 557	sub_op_3d_media,
 558};
 559
 560/* ring VCS, command type 3 */
 561static const struct sub_op_bits sub_op_mfx_vc[] = {
 562	{31, 29},
 563	{28, 27},
 564	{26, 24},
 565	{23, 21},
 566	{20, 16},
 567};
 568
 569static const struct decode_info decode_info_mfx_vc = {
 570	"MFX_VC",
 571	OP_LEN_MFX_VC,
 572	ARRAY_SIZE(sub_op_mfx_vc),
 573	sub_op_mfx_vc,
 574};
 575
 576/* ring VECS, command type 3 */
 577static const struct sub_op_bits sub_op_vebox[] = {
 578	{31, 29},
 579	{28, 27},
 580	{26, 24},
 581	{23, 21},
 582	{20, 16},
 583};
 584
 585static const struct decode_info decode_info_vebox = {
 586	"VEBOX",
 587	OP_LEN_VEBOX,
 588	ARRAY_SIZE(sub_op_vebox),
 589	sub_op_vebox,
 590};
 591
 592static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 593	[RCS0] = {
 594		&decode_info_mi,
 595		NULL,
 596		NULL,
 597		&decode_info_3d_media,
 598		NULL,
 599		NULL,
 600		NULL,
 601		NULL,
 602	},
 603
 604	[VCS0] = {
 605		&decode_info_mi,
 606		NULL,
 607		NULL,
 608		&decode_info_mfx_vc,
 609		NULL,
 610		NULL,
 611		NULL,
 612		NULL,
 613	},
 614
 615	[BCS0] = {
 616		&decode_info_mi,
 617		NULL,
 618		&decode_info_2d,
 619		NULL,
 620		NULL,
 621		NULL,
 622		NULL,
 623		NULL,
 624	},
 625
 626	[VECS0] = {
 627		&decode_info_mi,
 628		NULL,
 629		NULL,
 630		&decode_info_vebox,
 631		NULL,
 632		NULL,
 633		NULL,
 634		NULL,
 635	},
 636
 637	[VCS1] = {
 638		&decode_info_mi,
 639		NULL,
 640		NULL,
 641		&decode_info_mfx_vc,
 642		NULL,
 643		NULL,
 644		NULL,
 645		NULL,
 646	},
 647};
 648
 649static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
 650{
 651	const struct decode_info *d_info;
 652
 653	d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
 
 
 
 654	if (d_info == NULL)
 655		return INVALID_OP;
 656
 657	return cmd >> (32 - d_info->op_len);
 658}
 659
 660static inline const struct cmd_info *
 661find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
 662	       const struct intel_engine_cs *engine)
 663{
 664	struct cmd_entry *e;
 665
 666	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
 667		if (opcode == e->info->opcode &&
 668		    e->info->rings & engine->mask)
 669			return e->info;
 670	}
 671	return NULL;
 672}
 673
 674static inline const struct cmd_info *
 675get_cmd_info(struct intel_gvt *gvt, u32 cmd,
 676	     const struct intel_engine_cs *engine)
 677{
 678	u32 opcode;
 679
 680	opcode = get_opcode(cmd, engine);
 681	if (opcode == INVALID_OP)
 682		return NULL;
 683
 684	return find_cmd_entry(gvt, opcode, engine);
 685}
 686
 687static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
 688{
 689	return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
 690}
 691
 692static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
 693{
 694	const struct decode_info *d_info;
 695	int i;
 696
 697	d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
 
 
 
 698	if (d_info == NULL)
 699		return;
 700
 701	gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
 702			cmd >> (32 - d_info->op_len), d_info->name);
 703
 704	for (i = 0; i < d_info->nr_sub_op; i++)
 705		pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
 706					d_info->sub_op[i].low));
 707
 708	pr_err("\n");
 709}
 710
 711static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
 712{
 713	return s->ip_va + (index << 2);
 714}
 715
 716static inline u32 cmd_val(struct parser_exec_state *s, int index)
 717{
 718	return *cmd_ptr(s, index);
 719}
 720
 721static inline bool is_init_ctx(struct parser_exec_state *s)
 722{
 723	return (s->buf_type == RING_BUFFER_CTX && s->is_init_ctx);
 724}
 725
 726static void parser_exec_state_dump(struct parser_exec_state *s)
 727{
 728	int cnt = 0;
 729	int i;
 730
 731	gvt_dbg_cmd("  vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
 732		    " ring_head(%08lx) ring_tail(%08lx)\n",
 733		    s->vgpu->id, s->engine->name,
 734		    s->ring_start, s->ring_start + s->ring_size,
 735		    s->ring_head, s->ring_tail);
 736
 737	gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
 738			s->buf_type == RING_BUFFER_INSTRUCTION ?
 739			"RING_BUFFER" : ((s->buf_type == RING_BUFFER_CTX) ?
 740				"CTX_BUFFER" : "BATCH_BUFFER"),
 741			s->buf_addr_type == GTT_BUFFER ?
 742			"GTT" : "PPGTT", s->ip_gma);
 743
 744	if (s->ip_va == NULL) {
 745		gvt_dbg_cmd(" ip_va(NULL)");
 746		return;
 747	}
 748
 749	gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
 750			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
 751			cmd_val(s, 2), cmd_val(s, 3));
 752
 753	print_opcode(cmd_val(s, 0), s->engine);
 
 
 
 
 
 754
 755	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
 756
 757	while (cnt < 1024) {
 758		gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
 759		for (i = 0; i < 8; i++)
 760			gvt_dbg_cmd("%08x ", cmd_val(s, i));
 761		gvt_dbg_cmd("\n");
 762
 763		s->ip_va += 8 * sizeof(u32);
 764		cnt += 8;
 765	}
 766}
 767
 768static inline void update_ip_va(struct parser_exec_state *s)
 769{
 770	unsigned long len = 0;
 771
 772	if (WARN_ON(s->ring_head == s->ring_tail))
 773		return;
 774
 775	if (s->buf_type == RING_BUFFER_INSTRUCTION ||
 776			s->buf_type == RING_BUFFER_CTX) {
 777		unsigned long ring_top = s->ring_start + s->ring_size;
 778
 779		if (s->ring_head > s->ring_tail) {
 780			if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
 781				len = (s->ip_gma - s->ring_head);
 782			else if (s->ip_gma >= s->ring_start &&
 783					s->ip_gma <= s->ring_tail)
 784				len = (ring_top - s->ring_head) +
 785					(s->ip_gma - s->ring_start);
 786		} else
 787			len = (s->ip_gma - s->ring_head);
 788
 789		s->ip_va = s->rb_va + len;
 790	} else {/* shadow batch buffer */
 791		s->ip_va = s->ret_bb_va;
 792	}
 793}
 794
 795static inline int ip_gma_set(struct parser_exec_state *s,
 796		unsigned long ip_gma)
 797{
 798	WARN_ON(!IS_ALIGNED(ip_gma, 4));
 799
 800	s->ip_gma = ip_gma;
 801	update_ip_va(s);
 802	return 0;
 803}
 804
 805static inline int ip_gma_advance(struct parser_exec_state *s,
 806		unsigned int dw_len)
 807{
 808	s->ip_gma += (dw_len << 2);
 809
 810	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 811		if (s->ip_gma >= s->ring_start + s->ring_size)
 812			s->ip_gma -= s->ring_size;
 813		update_ip_va(s);
 814	} else {
 815		s->ip_va += (dw_len << 2);
 816	}
 817
 818	return 0;
 819}
 820
 821static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
 822{
 823	if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
 824		return info->len;
 825	else
 826		return (cmd & ((1U << info->len) - 1)) + 2;
 827	return 0;
 828}
 829
 830static inline int cmd_length(struct parser_exec_state *s)
 831{
 832	return get_cmd_length(s->info, cmd_val(s, 0));
 833}
 834
 835/* do not remove this, some platform may need clflush here */
 836#define patch_value(s, addr, val) do { \
 837	*addr = val; \
 838} while (0)
 839
 840static inline bool is_mocs_mmio(unsigned int offset)
 841{
 842	return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
 843		((offset >= 0xb020) && (offset <= 0xb0a0));
 844}
 845
 846static int is_cmd_update_pdps(unsigned int offset,
 847			      struct parser_exec_state *s)
 848{
 849	u32 base = s->workload->engine->mmio_base;
 850	return i915_mmio_reg_equal(_MMIO(offset), GEN8_RING_PDP_UDW(base, 0));
 851}
 852
 853static int cmd_pdp_mmio_update_handler(struct parser_exec_state *s,
 854				       unsigned int offset, unsigned int index)
 855{
 856	struct intel_vgpu *vgpu = s->vgpu;
 857	struct intel_vgpu_mm *shadow_mm = s->workload->shadow_mm;
 858	struct intel_vgpu_mm *mm;
 859	u64 pdps[GEN8_3LVL_PDPES];
 860
 861	if (shadow_mm->ppgtt_mm.root_entry_type ==
 862	    GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
 863		pdps[0] = (u64)cmd_val(s, 2) << 32;
 864		pdps[0] |= cmd_val(s, 4);
 865
 866		mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
 867		if (!mm) {
 868			gvt_vgpu_err("failed to get the 4-level shadow vm\n");
 869			return -EINVAL;
 870		}
 871		intel_vgpu_mm_get(mm);
 872		list_add_tail(&mm->ppgtt_mm.link,
 873			      &s->workload->lri_shadow_mm);
 874		*cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
 875		*cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
 876	} else {
 877		/* Currently all guests use PML4 table and now can't
 878		 * have a guest with 3-level table but uses LRI for
 879		 * PPGTT update. So this is simply un-testable. */
 880		GEM_BUG_ON(1);
 881		gvt_vgpu_err("invalid shared shadow vm type\n");
 882		return -EINVAL;
 883	}
 884	return 0;
 885}
 886
 887static int cmd_reg_handler(struct parser_exec_state *s,
 888	unsigned int offset, unsigned int index, char *cmd)
 889{
 890	struct intel_vgpu *vgpu = s->vgpu;
 891	struct intel_gvt *gvt = vgpu->gvt;
 892	u32 ctx_sr_ctl;
 893	u32 *vreg, vreg_old;
 894
 895	if (offset + 4 > gvt->device_info.mmio_size) {
 896		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
 897				cmd, offset);
 898		return -EFAULT;
 899	}
 900
 901	if (is_init_ctx(s)) {
 902		struct intel_gvt_mmio_info *mmio_info;
 903
 904		intel_gvt_mmio_set_cmd_accessible(gvt, offset);
 905		mmio_info = intel_gvt_find_mmio_info(gvt, offset);
 906		if (mmio_info && mmio_info->write)
 907			intel_gvt_mmio_set_cmd_write_patch(gvt, offset);
 908		return 0;
 909	}
 910
 911	if (!intel_gvt_mmio_is_cmd_accessible(gvt, offset)) {
 912		gvt_vgpu_err("%s access to non-render register (%x)\n",
 913				cmd, offset);
 914		return -EBADRQC;
 915	}
 916
 917	if (!strncmp(cmd, "srm", 3) ||
 918			!strncmp(cmd, "lrm", 3)) {
 919		if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) ||
 920		    offset == 0x21f0 ||
 921		    (IS_BROADWELL(gvt->gt->i915) &&
 922		     offset == i915_mmio_reg_offset(INSTPM)))
 923			return 0;
 924		else {
 925			gvt_vgpu_err("%s access to register (%x)\n",
 926					cmd, offset);
 927			return -EPERM;
 928		}
 929	}
 930
 931	if (!strncmp(cmd, "lrr-src", 7) ||
 932			!strncmp(cmd, "lrr-dst", 7)) {
 933		if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c)
 934			return 0;
 935		else {
 936			gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset);
 937			return -EPERM;
 938		}
 939	}
 940
 941	if (!strncmp(cmd, "pipe_ctrl", 9)) {
 942		/* TODO: add LRI POST logic here */
 943		return 0;
 944	}
 945
 946	if (strncmp(cmd, "lri", 3))
 947		return -EPERM;
 948
 949	/* below are all lri handlers */
 950	vreg = &vgpu_vreg(s->vgpu, offset);
 951
 952	if (is_cmd_update_pdps(offset, s) &&
 953	    cmd_pdp_mmio_update_handler(s, offset, index))
 954		return -EINVAL;
 955
 956	if (offset == i915_mmio_reg_offset(DERRMR) ||
 957		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
 958		/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
 959		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
 960	}
 961
 962	if (is_mocs_mmio(offset))
 963		*vreg = cmd_val(s, index + 1);
 964
 965	vreg_old = *vreg;
 966
 967	if (intel_gvt_mmio_is_cmd_write_patch(gvt, offset)) {
 968		u32 cmdval_new, cmdval;
 969		struct intel_gvt_mmio_info *mmio_info;
 970
 971		cmdval = cmd_val(s, index + 1);
 972
 973		mmio_info = intel_gvt_find_mmio_info(gvt, offset);
 974		if (!mmio_info) {
 975			cmdval_new = cmdval;
 976		} else {
 977			u64 ro_mask = mmio_info->ro_mask;
 978			int ret;
 979
 980			if (likely(!ro_mask))
 981				ret = mmio_info->write(s->vgpu, offset,
 982						&cmdval, 4);
 983			else {
 984				gvt_vgpu_err("try to write RO reg %x\n",
 985						offset);
 986				ret = -EBADRQC;
 987			}
 988			if (ret)
 989				return ret;
 990			cmdval_new = *vreg;
 991		}
 992		if (cmdval_new != cmdval)
 993			patch_value(s, cmd_ptr(s, index+1), cmdval_new);
 994	}
 995
 996	/* only patch cmd. restore vreg value if changed in mmio write handler*/
 997	*vreg = vreg_old;
 998
 999	/* TODO
1000	 * In order to let workload with inhibit context to generate
1001	 * correct image data into memory, vregs values will be loaded to
1002	 * hw via LRIs in the workload with inhibit context. But as
1003	 * indirect context is loaded prior to LRIs in workload, we don't
1004	 * want reg values specified in indirect context overwritten by
1005	 * LRIs in workloads. So, when scanning an indirect context, we
1006	 * update reg values in it into vregs, so LRIs in workload with
1007	 * inhibit context will restore with correct values
1008	 */
1009	if (GRAPHICS_VER(s->engine->i915) == 9 &&
1010	    intel_gvt_mmio_is_sr_in_ctx(gvt, offset) &&
1011	    !strncmp(cmd, "lri", 3)) {
1012		intel_gvt_hypervisor_read_gpa(s->vgpu,
1013			s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
1014		/* check inhibit context */
1015		if (ctx_sr_ctl & 1) {
1016			u32 data = cmd_val(s, index + 1);
1017
1018			if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
1019				intel_vgpu_mask_mmio_write(vgpu,
1020							offset, &data, 4);
1021			else
1022				vgpu_vreg(vgpu, offset) = data;
1023		}
1024	}
1025
1026	return 0;
1027}
1028
1029#define cmd_reg(s, i) \
1030	(cmd_val(s, i) & GENMASK(22, 2))
1031
1032#define cmd_reg_inhibit(s, i) \
1033	(cmd_val(s, i) & GENMASK(22, 18))
1034
1035#define cmd_gma(s, i) \
1036	(cmd_val(s, i) & GENMASK(31, 2))
1037
1038#define cmd_gma_hi(s, i) \
1039	(cmd_val(s, i) & GENMASK(15, 0))
1040
1041static int cmd_handler_lri(struct parser_exec_state *s)
1042{
1043	int i, ret = 0;
1044	int cmd_len = cmd_length(s);
 
1045
1046	for (i = 1; i < cmd_len; i += 2) {
1047		if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
1048			if (s->engine->id == BCS0 &&
1049			    cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
 
 
1050				ret |= 0;
1051			else
1052				ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
1053		}
1054		if (ret)
1055			break;
1056		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
1057		if (ret)
1058			break;
1059	}
1060	return ret;
1061}
1062
1063static int cmd_handler_lrr(struct parser_exec_state *s)
1064{
1065	int i, ret = 0;
1066	int cmd_len = cmd_length(s);
1067
1068	for (i = 1; i < cmd_len; i += 2) {
1069		if (IS_BROADWELL(s->engine->i915))
1070			ret |= ((cmd_reg_inhibit(s, i) ||
1071				 (cmd_reg_inhibit(s, i + 1)))) ?
1072				-EBADRQC : 0;
1073		if (ret)
1074			break;
1075		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
1076		if (ret)
1077			break;
1078		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
1079		if (ret)
1080			break;
1081	}
1082	return ret;
1083}
1084
1085static inline int cmd_address_audit(struct parser_exec_state *s,
1086		unsigned long guest_gma, int op_size, bool index_mode);
1087
1088static int cmd_handler_lrm(struct parser_exec_state *s)
1089{
1090	struct intel_gvt *gvt = s->vgpu->gvt;
1091	int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
1092	unsigned long gma;
1093	int i, ret = 0;
1094	int cmd_len = cmd_length(s);
1095
1096	for (i = 1; i < cmd_len;) {
1097		if (IS_BROADWELL(s->engine->i915))
1098			ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
1099		if (ret)
1100			break;
1101		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
1102		if (ret)
1103			break;
1104		if (cmd_val(s, 0) & (1 << 22)) {
1105			gma = cmd_gma(s, i + 1);
1106			if (gmadr_bytes == 8)
1107				gma |= (cmd_gma_hi(s, i + 2)) << 32;
1108			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1109			if (ret)
1110				break;
1111		}
1112		i += gmadr_dw_number(s) + 1;
1113	}
1114	return ret;
1115}
1116
1117static int cmd_handler_srm(struct parser_exec_state *s)
1118{
1119	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1120	unsigned long gma;
1121	int i, ret = 0;
1122	int cmd_len = cmd_length(s);
1123
1124	for (i = 1; i < cmd_len;) {
1125		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
1126		if (ret)
1127			break;
1128		if (cmd_val(s, 0) & (1 << 22)) {
1129			gma = cmd_gma(s, i + 1);
1130			if (gmadr_bytes == 8)
1131				gma |= (cmd_gma_hi(s, i + 2)) << 32;
1132			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1133			if (ret)
1134				break;
1135		}
1136		i += gmadr_dw_number(s) + 1;
1137	}
1138	return ret;
1139}
1140
1141struct cmd_interrupt_event {
1142	int pipe_control_notify;
1143	int mi_flush_dw;
1144	int mi_user_interrupt;
1145};
1146
1147static struct cmd_interrupt_event cmd_interrupt_events[] = {
1148	[RCS0] = {
1149		.pipe_control_notify = RCS_PIPE_CONTROL,
1150		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
1151		.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
1152	},
1153	[BCS0] = {
1154		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1155		.mi_flush_dw = BCS_MI_FLUSH_DW,
1156		.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
1157	},
1158	[VCS0] = {
1159		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1160		.mi_flush_dw = VCS_MI_FLUSH_DW,
1161		.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
1162	},
1163	[VCS1] = {
1164		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1165		.mi_flush_dw = VCS2_MI_FLUSH_DW,
1166		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
1167	},
1168	[VECS0] = {
1169		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1170		.mi_flush_dw = VECS_MI_FLUSH_DW,
1171		.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1172	},
1173};
1174
1175static int cmd_handler_pipe_control(struct parser_exec_state *s)
1176{
1177	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1178	unsigned long gma;
1179	bool index_mode = false;
1180	unsigned int post_sync;
1181	int ret = 0;
1182	u32 hws_pga, val;
1183
1184	post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1185
1186	/* LRI post sync */
1187	if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1188		ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1189	/* post sync */
1190	else if (post_sync) {
1191		if (post_sync == 2)
1192			ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1193		else if (post_sync == 3)
1194			ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1195		else if (post_sync == 1) {
1196			/* check ggtt*/
1197			if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1198				gma = cmd_val(s, 2) & GENMASK(31, 3);
1199				if (gmadr_bytes == 8)
1200					gma |= (cmd_gma_hi(s, 3)) << 32;
1201				/* Store Data Index */
1202				if (cmd_val(s, 1) & (1 << 21))
1203					index_mode = true;
1204				ret |= cmd_address_audit(s, gma, sizeof(u64),
1205						index_mode);
1206				if (ret)
1207					return ret;
1208				if (index_mode) {
1209					hws_pga = s->vgpu->hws_pga[s->engine->id];
1210					gma = hws_pga + gma;
1211					patch_value(s, cmd_ptr(s, 2), gma);
1212					val = cmd_val(s, 1) & (~(1 << 21));
1213					patch_value(s, cmd_ptr(s, 1), val);
1214				}
1215			}
1216		}
1217	}
1218
1219	if (ret)
1220		return ret;
1221
1222	if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1223		set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
1224			s->workload->pending_events);
1225	return 0;
1226}
1227
1228static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1229{
1230	set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
1231		s->workload->pending_events);
1232	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1233	return 0;
1234}
1235
1236static int cmd_advance_default(struct parser_exec_state *s)
1237{
1238	return ip_gma_advance(s, cmd_length(s));
1239}
1240
1241static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1242{
1243	int ret;
1244
1245	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1246		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1247		ret = ip_gma_set(s, s->ret_ip_gma_bb);
1248		s->buf_addr_type = s->saved_buf_addr_type;
1249	} else if (s->buf_type == RING_BUFFER_CTX) {
1250		ret = ip_gma_set(s, s->ring_tail);
1251	} else {
1252		s->buf_type = RING_BUFFER_INSTRUCTION;
1253		s->buf_addr_type = GTT_BUFFER;
1254		if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1255			s->ret_ip_gma_ring -= s->ring_size;
1256		ret = ip_gma_set(s, s->ret_ip_gma_ring);
1257	}
1258	return ret;
1259}
1260
1261struct mi_display_flip_command_info {
1262	int pipe;
1263	int plane;
1264	int event;
1265	i915_reg_t stride_reg;
1266	i915_reg_t ctrl_reg;
1267	i915_reg_t surf_reg;
1268	u64 stride_val;
1269	u64 tile_val;
1270	u64 surf_val;
1271	bool async_flip;
1272};
1273
1274struct plane_code_mapping {
1275	int pipe;
1276	int plane;
1277	int event;
1278};
1279
1280static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1281		struct mi_display_flip_command_info *info)
1282{
1283	struct drm_i915_private *dev_priv = s->engine->i915;
1284	struct plane_code_mapping gen8_plane_code[] = {
1285		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1286		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1287		[2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1288		[3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1289		[4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1290		[5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1291	};
1292	u32 dword0, dword1, dword2;
1293	u32 v;
1294
1295	dword0 = cmd_val(s, 0);
1296	dword1 = cmd_val(s, 1);
1297	dword2 = cmd_val(s, 2);
1298
1299	v = (dword0 & GENMASK(21, 19)) >> 19;
1300	if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
1301		return -EBADRQC;
1302
1303	info->pipe = gen8_plane_code[v].pipe;
1304	info->plane = gen8_plane_code[v].plane;
1305	info->event = gen8_plane_code[v].event;
1306	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1307	info->tile_val = (dword1 & 0x1);
1308	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1309	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1310
1311	if (info->plane == PLANE_A) {
1312		info->ctrl_reg = DSPCNTR(info->pipe);
1313		info->stride_reg = DSPSTRIDE(info->pipe);
1314		info->surf_reg = DSPSURF(info->pipe);
1315	} else if (info->plane == PLANE_B) {
1316		info->ctrl_reg = SPRCTL(info->pipe);
1317		info->stride_reg = SPRSTRIDE(info->pipe);
1318		info->surf_reg = SPRSURF(info->pipe);
1319	} else {
1320		drm_WARN_ON(&dev_priv->drm, 1);
1321		return -EBADRQC;
1322	}
1323	return 0;
1324}
1325
1326static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1327		struct mi_display_flip_command_info *info)
1328{
1329	struct drm_i915_private *dev_priv = s->engine->i915;
1330	struct intel_vgpu *vgpu = s->vgpu;
1331	u32 dword0 = cmd_val(s, 0);
1332	u32 dword1 = cmd_val(s, 1);
1333	u32 dword2 = cmd_val(s, 2);
1334	u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1335
1336	info->plane = PRIMARY_PLANE;
1337
1338	switch (plane) {
1339	case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1340		info->pipe = PIPE_A;
1341		info->event = PRIMARY_A_FLIP_DONE;
1342		break;
1343	case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1344		info->pipe = PIPE_B;
1345		info->event = PRIMARY_B_FLIP_DONE;
1346		break;
1347	case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1348		info->pipe = PIPE_C;
1349		info->event = PRIMARY_C_FLIP_DONE;
1350		break;
1351
1352	case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1353		info->pipe = PIPE_A;
1354		info->event = SPRITE_A_FLIP_DONE;
1355		info->plane = SPRITE_PLANE;
1356		break;
1357	case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1358		info->pipe = PIPE_B;
1359		info->event = SPRITE_B_FLIP_DONE;
1360		info->plane = SPRITE_PLANE;
1361		break;
1362	case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1363		info->pipe = PIPE_C;
1364		info->event = SPRITE_C_FLIP_DONE;
1365		info->plane = SPRITE_PLANE;
1366		break;
1367
1368	default:
1369		gvt_vgpu_err("unknown plane code %d\n", plane);
1370		return -EBADRQC;
1371	}
1372
 
1373	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1374	info->tile_val = (dword1 & GENMASK(2, 0));
1375	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1376	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1377
1378	info->ctrl_reg = DSPCNTR(info->pipe);
1379	info->stride_reg = DSPSTRIDE(info->pipe);
1380	info->surf_reg = DSPSURF(info->pipe);
1381
1382	return 0;
1383}
1384
1385static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1386		struct mi_display_flip_command_info *info)
1387{
 
1388	u32 stride, tile;
1389
1390	if (!info->async_flip)
1391		return 0;
1392
1393	if (GRAPHICS_VER(s->engine->i915) >= 9) {
1394		stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1395		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
1396				GENMASK(12, 10)) >> 10;
1397	} else {
1398		stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
1399				GENMASK(15, 6)) >> 6;
1400		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1401	}
1402
1403	if (stride != info->stride_val)
1404		gvt_dbg_cmd("cannot change stride during async flip\n");
1405
1406	if (tile != info->tile_val)
1407		gvt_dbg_cmd("cannot change tile during async flip\n");
1408
1409	return 0;
1410}
1411
1412static int gen8_update_plane_mmio_from_mi_display_flip(
1413		struct parser_exec_state *s,
1414		struct mi_display_flip_command_info *info)
1415{
1416	struct drm_i915_private *dev_priv = s->engine->i915;
1417	struct intel_vgpu *vgpu = s->vgpu;
1418
1419	set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
1420		      info->surf_val << 12);
1421	if (GRAPHICS_VER(dev_priv) >= 9) {
1422		set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
1423			      info->stride_val);
1424		set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
1425			      info->tile_val << 10);
1426	} else {
1427		set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
1428			      info->stride_val << 6);
1429		set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
1430			      info->tile_val << 10);
1431	}
1432
1433	if (info->plane == PLANE_PRIMARY)
1434		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
1435
1436	if (info->async_flip)
1437		intel_vgpu_trigger_virtual_event(vgpu, info->event);
1438	else
1439		set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
1440
1441	return 0;
1442}
1443
1444static int decode_mi_display_flip(struct parser_exec_state *s,
1445		struct mi_display_flip_command_info *info)
1446{
1447	if (IS_BROADWELL(s->engine->i915))
 
 
1448		return gen8_decode_mi_display_flip(s, info);
1449	if (GRAPHICS_VER(s->engine->i915) >= 9)
1450		return skl_decode_mi_display_flip(s, info);
1451
1452	return -ENODEV;
1453}
1454
1455static int check_mi_display_flip(struct parser_exec_state *s,
1456		struct mi_display_flip_command_info *info)
1457{
1458	return gen8_check_mi_display_flip(s, info);
 
 
 
 
1459}
1460
1461static int update_plane_mmio_from_mi_display_flip(
1462		struct parser_exec_state *s,
1463		struct mi_display_flip_command_info *info)
1464{
1465	return gen8_update_plane_mmio_from_mi_display_flip(s, info);
 
 
 
 
1466}
1467
1468static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1469{
1470	struct mi_display_flip_command_info info;
1471	struct intel_vgpu *vgpu = s->vgpu;
1472	int ret;
1473	int i;
1474	int len = cmd_length(s);
1475	u32 valid_len = CMD_LEN(1);
1476
1477	/* Flip Type == Stereo 3D Flip */
1478	if (DWORD_FIELD(2, 1, 0) == 2)
1479		valid_len++;
1480	ret = gvt_check_valid_cmd_length(cmd_length(s),
1481			valid_len);
1482	if (ret)
1483		return ret;
1484
1485	ret = decode_mi_display_flip(s, &info);
1486	if (ret) {
1487		gvt_vgpu_err("fail to decode MI display flip command\n");
1488		return ret;
1489	}
1490
1491	ret = check_mi_display_flip(s, &info);
1492	if (ret) {
1493		gvt_vgpu_err("invalid MI display flip command\n");
1494		return ret;
1495	}
1496
1497	ret = update_plane_mmio_from_mi_display_flip(s, &info);
1498	if (ret) {
1499		gvt_vgpu_err("fail to update plane mmio\n");
1500		return ret;
1501	}
1502
1503	for (i = 0; i < len; i++)
1504		patch_value(s, cmd_ptr(s, i), MI_NOOP);
1505	return 0;
1506}
1507
1508static bool is_wait_for_flip_pending(u32 cmd)
1509{
1510	return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1511			MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1512			MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1513			MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1514			MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1515			MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1516}
1517
1518static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1519{
1520	u32 cmd = cmd_val(s, 0);
1521
1522	if (!is_wait_for_flip_pending(cmd))
1523		return 0;
1524
1525	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1526	return 0;
1527}
1528
1529static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1530{
1531	unsigned long addr;
1532	unsigned long gma_high, gma_low;
1533	struct intel_vgpu *vgpu = s->vgpu;
1534	int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1535
1536	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
1537		gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
1538		return INTEL_GVT_INVALID_ADDR;
1539	}
1540
1541	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1542	if (gmadr_bytes == 4) {
1543		addr = gma_low;
1544	} else {
1545		gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1546		addr = (((unsigned long)gma_high) << 32) | gma_low;
1547	}
1548	return addr;
1549}
1550
1551static inline int cmd_address_audit(struct parser_exec_state *s,
1552		unsigned long guest_gma, int op_size, bool index_mode)
1553{
1554	struct intel_vgpu *vgpu = s->vgpu;
1555	u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1556	int i;
1557	int ret;
1558
1559	if (op_size > max_surface_size) {
1560		gvt_vgpu_err("command address audit fail name %s\n",
1561			s->info->name);
1562		return -EFAULT;
1563	}
1564
1565	if (index_mode)	{
1566		if (guest_gma >= I915_GTT_PAGE_SIZE) {
1567			ret = -EFAULT;
1568			goto err;
1569		}
1570	} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1571		ret = -EFAULT;
 
 
1572		goto err;
1573	}
1574
1575	return 0;
1576
1577err:
1578	gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1579			s->info->name, guest_gma, op_size);
1580
1581	pr_err("cmd dump: ");
1582	for (i = 0; i < cmd_length(s); i++) {
1583		if (!(i % 4))
1584			pr_err("\n%08x ", cmd_val(s, i));
1585		else
1586			pr_err("%08x ", cmd_val(s, i));
1587	}
1588	pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1589			vgpu->id,
1590			vgpu_aperture_gmadr_base(vgpu),
1591			vgpu_aperture_gmadr_end(vgpu),
1592			vgpu_hidden_gmadr_base(vgpu),
1593			vgpu_hidden_gmadr_end(vgpu));
1594	return ret;
1595}
1596
1597static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1598{
1599	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1600	int op_size = (cmd_length(s) - 3) * sizeof(u32);
1601	int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1602	unsigned long gma, gma_low, gma_high;
1603	u32 valid_len = CMD_LEN(2);
1604	int ret = 0;
1605
1606	/* check ppggt */
1607	if (!(cmd_val(s, 0) & (1 << 22)))
1608		return 0;
1609
1610	/* check if QWORD */
1611	if (DWORD_FIELD(0, 21, 21))
1612		valid_len++;
1613	ret = gvt_check_valid_cmd_length(cmd_length(s),
1614			valid_len);
1615	if (ret)
1616		return ret;
1617
1618	gma = cmd_val(s, 2) & GENMASK(31, 2);
1619
1620	if (gmadr_bytes == 8) {
1621		gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1622		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1623		gma = (gma_high << 32) | gma_low;
1624		core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1625	}
1626	ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1627	return ret;
1628}
1629
1630static inline int unexpected_cmd(struct parser_exec_state *s)
1631{
1632	struct intel_vgpu *vgpu = s->vgpu;
1633
1634	gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1635
1636	return -EBADRQC;
1637}
1638
1639static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1640{
1641	return unexpected_cmd(s);
1642}
1643
1644static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1645{
1646	return unexpected_cmd(s);
1647}
1648
1649static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1650{
1651	return unexpected_cmd(s);
1652}
1653
1654static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1655{
1656	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1657	int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1658			sizeof(u32);
1659	unsigned long gma, gma_high;
1660	u32 valid_len = CMD_LEN(1);
1661	int ret = 0;
1662
1663	if (!(cmd_val(s, 0) & (1 << 22)))
1664		return ret;
1665
1666	/* check inline data */
1667	if (cmd_val(s, 0) & BIT(18))
1668		valid_len = CMD_LEN(9);
1669	ret = gvt_check_valid_cmd_length(cmd_length(s),
1670			valid_len);
1671	if (ret)
1672		return ret;
1673
1674	gma = cmd_val(s, 1) & GENMASK(31, 2);
1675	if (gmadr_bytes == 8) {
1676		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1677		gma = (gma_high << 32) | gma;
1678	}
1679	ret = cmd_address_audit(s, gma, op_size, false);
1680	return ret;
1681}
1682
1683static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1684{
1685	return unexpected_cmd(s);
1686}
1687
1688static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1689{
1690	return unexpected_cmd(s);
1691}
1692
1693static int cmd_handler_mi_conditional_batch_buffer_end(
1694		struct parser_exec_state *s)
1695{
1696	return unexpected_cmd(s);
1697}
1698
1699static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1700{
1701	return unexpected_cmd(s);
1702}
1703
1704static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1705{
1706	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1707	unsigned long gma;
1708	bool index_mode = false;
1709	int ret = 0;
1710	u32 hws_pga, val;
1711	u32 valid_len = CMD_LEN(2);
1712
1713	ret = gvt_check_valid_cmd_length(cmd_length(s),
1714			valid_len);
1715	if (ret) {
1716		/* Check again for Qword */
1717		ret = gvt_check_valid_cmd_length(cmd_length(s),
1718			++valid_len);
1719		return ret;
1720	}
1721
1722	/* Check post-sync and ppgtt bit */
1723	if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1724		gma = cmd_val(s, 1) & GENMASK(31, 3);
1725		if (gmadr_bytes == 8)
1726			gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1727		/* Store Data Index */
1728		if (cmd_val(s, 0) & (1 << 21))
1729			index_mode = true;
1730		ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1731		if (ret)
1732			return ret;
1733		if (index_mode) {
1734			hws_pga = s->vgpu->hws_pga[s->engine->id];
1735			gma = hws_pga + gma;
1736			patch_value(s, cmd_ptr(s, 1), gma);
1737			val = cmd_val(s, 0) & (~(1 << 21));
1738			patch_value(s, cmd_ptr(s, 0), val);
1739		}
1740	}
1741	/* Check notify bit */
1742	if ((cmd_val(s, 0) & (1 << 8)))
1743		set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
1744			s->workload->pending_events);
1745	return ret;
1746}
1747
1748static void addr_type_update_snb(struct parser_exec_state *s)
1749{
1750	if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1751			(BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1752		s->buf_addr_type = PPGTT_BUFFER;
1753	}
1754}
1755
1756
1757static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1758		unsigned long gma, unsigned long end_gma, void *va)
1759{
1760	unsigned long copy_len, offset;
1761	unsigned long len = 0;
1762	unsigned long gpa;
1763
1764	while (gma != end_gma) {
1765		gpa = intel_vgpu_gma_to_gpa(mm, gma);
1766		if (gpa == INTEL_GVT_INVALID_ADDR) {
1767			gvt_vgpu_err("invalid gma address: %lx\n", gma);
1768			return -EFAULT;
1769		}
1770
1771		offset = gma & (I915_GTT_PAGE_SIZE - 1);
1772
1773		copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
1774			I915_GTT_PAGE_SIZE - offset : end_gma - gma;
1775
1776		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1777
1778		len += copy_len;
1779		gma += copy_len;
1780	}
1781	return len;
1782}
1783
1784
1785/*
1786 * Check whether a batch buffer needs to be scanned. Currently
1787 * the only criteria is based on privilege.
1788 */
1789static int batch_buffer_needs_scan(struct parser_exec_state *s)
1790{
1791	/* Decide privilege based on address space */
1792	if (cmd_val(s, 0) & BIT(8) &&
1793	    !(s->vgpu->scan_nonprivbb & s->engine->mask))
1794		return 0;
1795
 
 
 
 
 
1796	return 1;
1797}
1798
1799static const char *repr_addr_type(unsigned int type)
1800{
1801	return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
1802}
1803
1804static int find_bb_size(struct parser_exec_state *s,
1805			unsigned long *bb_size,
1806			unsigned long *bb_end_cmd_offset)
1807{
1808	unsigned long gma = 0;
1809	const struct cmd_info *info;
1810	u32 cmd_len = 0;
1811	bool bb_end = false;
1812	struct intel_vgpu *vgpu = s->vgpu;
1813	u32 cmd;
1814	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1815		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1816
1817	*bb_size = 0;
1818	*bb_end_cmd_offset = 0;
1819
1820	/* get the start gm address of the batch buffer */
1821	gma = get_gma_bb_from_cmd(s, 1);
1822	if (gma == INTEL_GVT_INVALID_ADDR)
1823		return -EFAULT;
1824
1825	cmd = cmd_val(s, 0);
1826	info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1827	if (info == NULL) {
1828		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1829			     cmd, get_opcode(cmd, s->engine),
1830			     repr_addr_type(s->buf_addr_type),
1831			     s->engine->name, s->workload);
1832		return -EBADRQC;
1833	}
1834	do {
1835		if (copy_gma_to_hva(s->vgpu, mm,
1836				    gma, gma + 4, &cmd) < 0)
1837			return -EFAULT;
1838		info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1839		if (info == NULL) {
1840			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1841				     cmd, get_opcode(cmd, s->engine),
1842				     repr_addr_type(s->buf_addr_type),
1843				     s->engine->name, s->workload);
1844			return -EBADRQC;
1845		}
1846
1847		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1848			bb_end = true;
1849		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1850			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
1851				/* chained batch buffer */
1852				bb_end = true;
 
1853		}
1854
1855		if (bb_end)
1856			*bb_end_cmd_offset = *bb_size;
1857
1858		cmd_len = get_cmd_length(info, cmd) << 2;
1859		*bb_size += cmd_len;
1860		gma += cmd_len;
1861	} while (!bb_end);
1862
1863	return 0;
1864}
1865
1866static int audit_bb_end(struct parser_exec_state *s, void *va)
1867{
1868	struct intel_vgpu *vgpu = s->vgpu;
1869	u32 cmd = *(u32 *)va;
1870	const struct cmd_info *info;
1871
1872	info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
1873	if (info == NULL) {
1874		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
1875			     cmd, get_opcode(cmd, s->engine),
1876			     repr_addr_type(s->buf_addr_type),
1877			     s->engine->name, s->workload);
1878		return -EBADRQC;
1879	}
1880
1881	if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
1882	    ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
1883	     (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
1884		return 0;
1885
1886	return -EBADRQC;
1887}
1888
1889static int perform_bb_shadow(struct parser_exec_state *s)
1890{
1891	struct intel_vgpu *vgpu = s->vgpu;
1892	struct intel_vgpu_shadow_bb *bb;
1893	unsigned long gma = 0;
1894	unsigned long bb_size;
1895	unsigned long bb_end_cmd_offset;
1896	int ret = 0;
1897	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1898		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1899	unsigned long start_offset = 0;
1900
1901	/* get the start gm address of the batch buffer */
1902	gma = get_gma_bb_from_cmd(s, 1);
1903	if (gma == INTEL_GVT_INVALID_ADDR)
1904		return -EFAULT;
1905
1906	ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
1907	if (ret)
1908		return ret;
1909
1910	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
1911	if (!bb)
 
1912		return -ENOMEM;
1913
1914	bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1915
1916	/* the start_offset stores the batch buffer's start gma's
1917	 * offset relative to page boundary. so for non-privileged batch
1918	 * buffer, the shadowed gem object holds exactly the same page
1919	 * layout as original gem object. This is for the convience of
1920	 * replacing the whole non-privilged batch buffer page to this
1921	 * shadowed one in PPGTT at the same gma address. (this replacing
1922	 * action is not implemented yet now, but may be necessary in
1923	 * future).
1924	 * for prileged batch buffer, we just change start gma address to
1925	 * that of shadowed page.
1926	 */
1927	if (bb->ppgtt)
1928		start_offset = gma & ~I915_GTT_PAGE_MASK;
1929
1930	bb->obj = i915_gem_object_create_shmem(s->engine->i915,
1931					       round_up(bb_size + start_offset,
1932							PAGE_SIZE));
1933	if (IS_ERR(bb->obj)) {
1934		ret = PTR_ERR(bb->obj);
1935		goto err_free_bb;
1936	}
1937
1938	bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
1939	if (IS_ERR(bb->va)) {
1940		ret = PTR_ERR(bb->va);
1941		goto err_free_obj;
1942	}
1943
1944	ret = copy_gma_to_hva(s->vgpu, mm,
 
1945			      gma, gma + bb_size,
1946			      bb->va + start_offset);
1947	if (ret < 0) {
1948		gvt_vgpu_err("fail to copy guest ring buffer\n");
1949		ret = -EFAULT;
1950		goto err_unmap;
1951	}
1952
1953	ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
1954	if (ret)
1955		goto err_unmap;
1956
1957	i915_gem_object_unlock(bb->obj);
1958	INIT_LIST_HEAD(&bb->list);
1959	list_add(&bb->list, &s->workload->shadow_bb);
1960
1961	bb->bb_start_cmd_va = s->ip_va;
1962
1963	if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1964		bb->bb_offset = s->ip_va - s->rb_va;
1965	else
1966		bb->bb_offset = 0;
1967
1968	/*
1969	 * ip_va saves the virtual address of the shadow batch buffer, while
1970	 * ip_gma saves the graphics address of the original batch buffer.
1971	 * As the shadow batch buffer is just a copy from the originial one,
1972	 * it should be right to use shadow batch buffer'va and original batch
1973	 * buffer's gma in pair. After all, we don't want to pin the shadow
1974	 * buffer here (too early).
1975	 */
1976	s->ip_va = bb->va + start_offset;
1977	s->ip_gma = gma;
 
1978	return 0;
1979err_unmap:
1980	i915_gem_object_unpin_map(bb->obj);
1981err_free_obj:
1982	i915_gem_object_put(bb->obj);
1983err_free_bb:
1984	kfree(bb);
 
1985	return ret;
1986}
1987
1988static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1989{
1990	bool second_level;
1991	int ret = 0;
1992	struct intel_vgpu *vgpu = s->vgpu;
1993
1994	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1995		gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1996		return -EFAULT;
1997	}
1998
1999	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
2000	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
2001		gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
2002		return -EFAULT;
2003	}
2004
2005	s->saved_buf_addr_type = s->buf_addr_type;
2006	addr_type_update_snb(s);
2007	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2008		s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
2009		s->buf_type = BATCH_BUFFER_INSTRUCTION;
2010	} else if (second_level) {
2011		s->buf_type = BATCH_BUFFER_2ND_LEVEL;
2012		s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
2013		s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
2014	}
2015
2016	if (batch_buffer_needs_scan(s)) {
2017		ret = perform_bb_shadow(s);
2018		if (ret < 0)
2019			gvt_vgpu_err("invalid shadow batch buffer\n");
2020	} else {
2021		/* emulate a batch buffer end to do return right */
2022		ret = cmd_handler_mi_batch_buffer_end(s);
2023		if (ret < 0)
2024			return ret;
2025	}
 
2026	return ret;
2027}
2028
2029static int mi_noop_index;
2030
2031static const struct cmd_info cmd_info[] = {
2032	{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2033
2034	{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
2035		0, 1, NULL},
2036
2037	{"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
2038		0, 1, cmd_handler_mi_user_interrupt},
2039
2040	{"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
2041		D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
2042
2043	{"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2044
2045	{"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2046		NULL},
2047
2048	{"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2049		NULL},
2050
2051	{"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2052		NULL},
2053
2054	{"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2055		NULL},
2056
2057	{"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
2058		D_ALL, 0, 1, NULL},
2059
2060	{"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
2061		F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2062		cmd_handler_mi_batch_buffer_end},
2063
2064	{"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
2065		0, 1, NULL},
2066
2067	{"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2068		NULL},
2069
2070	{"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
2071		D_ALL, 0, 1, NULL},
2072
2073	{"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2074		NULL},
2075
2076	{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2077		NULL},
2078
2079	{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
2080		R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
2081
2082	{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
2083		R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
2084
2085	{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
2086
2087	{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
2088		D_ALL, 0, 8, NULL, CMD_LEN(0)},
 
 
2089
2090	{"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
2091		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
2092		NULL, CMD_LEN(0)},
2093
2094	{"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
2095		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
2096		8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
2097
2098	{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
2099		ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
2100
2101	{"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
2102		0, 8, cmd_handler_mi_store_data_index},
2103
2104	{"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
2105		D_ALL, 0, 8, cmd_handler_lri},
2106
2107	{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
2108		cmd_handler_mi_update_gtt},
2109
2110	{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
2111		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2112		cmd_handler_srm, CMD_LEN(2)},
2113
2114	{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
2115		cmd_handler_mi_flush_dw},
2116
2117	{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
2118		10, cmd_handler_mi_clflush},
2119
2120	{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
2121		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
2122		cmd_handler_mi_report_perf_count, CMD_LEN(2)},
2123
2124	{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
2125		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2126		cmd_handler_lrm, CMD_LEN(2)},
2127
2128	{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
2129		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
2130		cmd_handler_lrr, CMD_LEN(1)},
2131
2132	{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
2133		F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
2134		8, NULL, CMD_LEN(2)},
2135
2136	{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
2137		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
 
 
 
 
 
 
 
 
 
2138
2139	{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
2140		ADDR_FIX_1(2), 8, NULL},
2141
2142	{"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
2143		ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
2144
2145	{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
2146		8, cmd_handler_mi_op_2f},
2147
2148	{"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
2149		F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
2150		cmd_handler_mi_batch_buffer_start},
2151
2152	{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
2153		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2154		cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
2155
2156	{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
2157		R_RCS | R_BCS, D_ALL, 0, 2, NULL},
2158
2159	{"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2160		ADDR_FIX_2(4, 7), 8, NULL},
2161
2162	{"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2163		0, 8, NULL},
2164
2165	{"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
2166		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2167
2168	{"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2169
2170	{"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
2171		0, 8, NULL},
2172
2173	{"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2174		ADDR_FIX_1(3), 8, NULL},
2175
2176	{"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
2177		D_ALL, 0, 8, NULL},
2178
2179	{"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
2180		ADDR_FIX_1(4), 8, NULL},
2181
2182	{"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2183		ADDR_FIX_2(4, 5), 8, NULL},
2184
2185	{"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2186		ADDR_FIX_1(4), 8, NULL},
2187
2188	{"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
2189		ADDR_FIX_2(4, 7), 8, NULL},
2190
2191	{"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
2192		D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2193
2194	{"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2195
2196	{"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
2197		D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
2198
2199	{"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
2200		R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2201
2202	{"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
2203		OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
2204		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2205
2206	{"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
2207		D_ALL, ADDR_FIX_1(4), 8, NULL},
2208
2209	{"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
2210		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2211
2212	{"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
2213		D_ALL, ADDR_FIX_1(4), 8, NULL},
2214
2215	{"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
2216		D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2217
2218	{"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
2219		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2220
2221	{"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
2222		OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
2223		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2224
2225	{"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
2226		ADDR_FIX_2(4, 5), 8, NULL},
2227
2228	{"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
2229		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2230
2231	{"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
2232		OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2233		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2234
2235	{"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
2236		OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2237		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2238
2239	{"3DSTATE_BLEND_STATE_POINTERS",
2240		OP_3DSTATE_BLEND_STATE_POINTERS,
2241		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2242
2243	{"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
2244		OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
2245		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2246
2247	{"3DSTATE_BINDING_TABLE_POINTERS_VS",
2248		OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
2249		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2250
2251	{"3DSTATE_BINDING_TABLE_POINTERS_HS",
2252		OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
2253		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2254
2255	{"3DSTATE_BINDING_TABLE_POINTERS_DS",
2256		OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
2257		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2258
2259	{"3DSTATE_BINDING_TABLE_POINTERS_GS",
2260		OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
2261		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2262
2263	{"3DSTATE_BINDING_TABLE_POINTERS_PS",
2264		OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
2265		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2266
2267	{"3DSTATE_SAMPLER_STATE_POINTERS_VS",
2268		OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
2269		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2270
2271	{"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2272		OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
2273		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2274
2275	{"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2276		OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
2277		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2278
2279	{"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2280		OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
2281		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2282
2283	{"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2284		OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
2285		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2286
2287	{"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
2288		0, 8, NULL},
2289
2290	{"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
2291		0, 8, NULL},
2292
2293	{"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2294		0, 8, NULL},
2295
2296	{"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2297		0, 8, NULL},
2298
2299	{"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2300		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2301
2302	{"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2303		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2304
2305	{"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2306		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2307
2308	{"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2309		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2310
2311	{"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2312		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2313
2314	{"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2315		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2316
2317	{"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2318		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2319
2320	{"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2321		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2322
2323	{"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2324		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2325
2326	{"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2327		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2328
2329	{"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2330		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2331
2332	{"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2333		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2334
2335	{"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2336		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2337
2338	{"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2339		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2340
2341	{"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2342		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2343
2344	{"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2345		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2346
2347	{"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2348		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2349
2350	{"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2351		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2352
2353	{"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2354		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2355
2356	{"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2357		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2358
2359	{"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2360		D_BDW_PLUS, 0, 8, NULL},
2361
2362	{"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2363		NULL},
2364
2365	{"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2366		D_BDW_PLUS, 0, 8, NULL},
2367
2368	{"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2369		D_BDW_PLUS, 0, 8, NULL},
2370
2371	{"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2372		8, NULL},
2373
2374	{"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2375		R_RCS, D_BDW_PLUS, 0, 8, NULL},
2376
2377	{"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2378		8, NULL},
2379
2380	{"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2381		NULL},
2382
2383	{"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2384		NULL},
2385
2386	{"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2387		NULL},
2388
2389	{"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2390		D_BDW_PLUS, 0, 8, NULL},
2391
2392	{"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2393		R_RCS, D_ALL, 0, 8, NULL},
2394
2395	{"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2396		D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2397
2398	{"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2399		R_RCS, D_ALL, 0, 1, NULL},
2400
2401	{"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2402
2403	{"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2404		R_RCS, D_ALL, 0, 8, NULL},
2405
2406	{"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2407		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2408
2409	{"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2410
2411	{"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2412
2413	{"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2414
2415	{"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2416		D_BDW_PLUS, 0, 8, NULL},
2417
2418	{"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2419		D_BDW_PLUS, 0, 8, NULL},
2420
2421	{"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2422		D_ALL, 0, 8, NULL},
2423
2424	{"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2425		D_BDW_PLUS, 0, 8, NULL},
2426
2427	{"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2428		D_BDW_PLUS, 0, 8, NULL},
2429
2430	{"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2431
2432	{"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2433
2434	{"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2435
2436	{"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2437		D_ALL, 0, 8, NULL},
2438
2439	{"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2440
2441	{"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2442
2443	{"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2444		R_RCS, D_ALL, 0, 8, NULL},
2445
2446	{"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2447		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2448
2449	{"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2450		0, 8, NULL},
2451
2452	{"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2453		D_ALL, ADDR_FIX_1(2), 8, NULL},
2454
2455	{"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2456		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2457
2458	{"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2459		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2460
2461	{"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2462		D_ALL, 0, 8, NULL},
2463
2464	{"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2465		D_ALL, 0, 8, NULL},
2466
2467	{"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2468		D_ALL, 0, 8, NULL},
2469
2470	{"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2471		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2472
2473	{"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2474		D_BDW_PLUS, 0, 8, NULL},
2475
2476	{"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2477		D_ALL, ADDR_FIX_1(2), 8, NULL},
2478
2479	{"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2480		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2481
2482	{"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2483		R_RCS, D_ALL, 0, 8, NULL},
2484
2485	{"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2486		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2487
2488	{"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2489		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2490
2491	{"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2492		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2493
2494	{"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2495		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2496
2497	{"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2498		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2499
2500	{"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2501		R_RCS, D_ALL, 0, 8, NULL},
2502
2503	{"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2504		D_ALL, 0, 9, NULL},
2505
2506	{"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2507		ADDR_FIX_2(2, 4), 8, NULL},
2508
2509	{"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2510		OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2511		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2512
2513	{"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2514		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2515
2516	{"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2517		OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2518		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2519
2520	{"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2521		D_BDW_PLUS, 0, 8, NULL},
2522
2523	{"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2524		ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2525
2526	{"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2527
2528	{"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2529		1, NULL},
2530
2531	{"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2532		ADDR_FIX_1(1), 8, NULL},
2533
2534	{"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2535
2536	{"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2537		ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2538
2539	{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2540		ADDR_FIX_1(1), 8, NULL},
2541
2542	{"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS,
2543		F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL},
2544
2545	{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2546
2547	{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2548
2549	{"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2550		0, 8, NULL},
2551
2552	{"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2553		D_SKL_PLUS, 0, 8, NULL},
2554
2555	{"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2556		F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2557
2558	{"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2559		0, 16, NULL},
2560
2561	{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2562		0, 16, NULL},
2563
2564	{"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
2565		0, 16, NULL},
2566
2567	{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2568
2569	{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2570		0, 16, NULL},
2571
2572	{"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2573		0, 16, NULL},
2574
2575	{"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2576		0, 16, NULL},
2577
2578	{"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2579		0, 8, NULL},
2580
2581	{"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2582		NULL},
2583
2584	{"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2585		F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2586
2587	{"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2588		R_VCS, D_ALL, 0, 12, NULL},
2589
2590	{"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2591		R_VCS, D_ALL, 0, 12, NULL},
2592
2593	{"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2594		R_VCS, D_BDW_PLUS, 0, 12, NULL},
2595
2596	{"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2597		F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2598
2599	{"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2600		F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2601
2602	{"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2603
2604	{"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2605		R_VCS, D_ALL, 0, 12, NULL},
2606
2607	{"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2608		R_VCS, D_ALL, 0, 12, NULL},
2609
2610	{"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2611		R_VCS, D_ALL, 0, 12, NULL},
2612
2613	{"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2614		R_VCS, D_ALL, 0, 12, NULL},
2615
2616	{"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2617		R_VCS, D_ALL, 0, 12, NULL},
2618
2619	{"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2620		R_VCS, D_ALL, 0, 12, NULL},
2621
2622	{"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2623		R_VCS, D_ALL, 0, 6, NULL},
2624
2625	{"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2626		R_VCS, D_ALL, 0, 12, NULL},
2627
2628	{"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2629		R_VCS, D_ALL, 0, 12, NULL},
2630
2631	{"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2632		R_VCS, D_ALL, 0, 12, NULL},
2633
2634	{"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2635		R_VCS, D_ALL, 0, 12, NULL},
2636
2637	{"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2638		R_VCS, D_ALL, 0, 12, NULL},
2639
2640	{"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2641		R_VCS, D_ALL, 0, 12, NULL},
2642
2643	{"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2644		R_VCS, D_ALL, 0, 12, NULL},
2645	{"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2646		R_VCS, D_ALL, 0, 12, NULL},
2647
2648	{"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2649		R_VCS, D_ALL, 0, 12, NULL},
2650
2651	{"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2652		R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2653
2654	{"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2655		R_VCS, D_ALL, 0, 12, NULL},
2656
2657	{"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2658		R_VCS, D_ALL, 0, 12, NULL},
2659
2660	{"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2661		R_VCS, D_ALL, 0, 12, NULL},
2662
2663	{"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2664		R_VCS, D_ALL, 0, 12, NULL},
2665
2666	{"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2667		R_VCS, D_ALL, 0, 12, NULL},
2668
2669	{"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2670		R_VCS, D_ALL, 0, 12, NULL},
2671
2672	{"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2673		R_VCS, D_ALL, 0, 12, NULL},
2674
2675	{"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2676		R_VCS, D_ALL, 0, 12, NULL},
2677
2678	{"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2679		R_VCS, D_ALL, 0, 12, NULL},
2680
2681	{"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2682		R_VCS, D_ALL, 0, 12, NULL},
2683
2684	{"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2685		R_VCS, D_ALL, 0, 12, NULL},
2686
2687	{"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2688		0, 16, NULL},
2689
2690	{"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2691
2692	{"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2693
2694	{"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2695		R_VCS, D_ALL, 0, 12, NULL},
2696
2697	{"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2698		R_VCS, D_ALL, 0, 12, NULL},
2699
2700	{"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2701		R_VCS, D_ALL, 0, 12, NULL},
2702
2703	{"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2704
2705	{"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2706		0, 12, NULL},
2707
2708	{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2709		0, 12, NULL},
2710};
2711
2712static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2713{
2714	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2715}
2716
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2717/* call the cmd handler, and advance ip */
2718static int cmd_parser_exec(struct parser_exec_state *s)
2719{
2720	struct intel_vgpu *vgpu = s->vgpu;
2721	const struct cmd_info *info;
2722	u32 cmd;
2723	int ret = 0;
 
 
 
 
2724
2725	cmd = cmd_val(s, 0);
2726
2727	/* fastpath for MI_NOOP */
2728	if (cmd == MI_NOOP)
2729		info = &cmd_info[mi_noop_index];
2730	else
2731		info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
2732
2733	if (info == NULL) {
2734		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
2735			     cmd, get_opcode(cmd, s->engine),
2736			     repr_addr_type(s->buf_addr_type),
2737			     s->engine->name, s->workload);
2738		return -EBADRQC;
2739	}
2740
 
 
2741	s->info = info;
2742
2743	trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
2744			  cmd_length(s), s->buf_type, s->buf_addr_type,
2745			  s->workload, info->name);
2746
2747	if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
2748		ret = gvt_check_valid_cmd_length(cmd_length(s),
2749						 info->valid_len);
2750		if (ret)
2751			return ret;
2752	}
2753
2754	if (info->handler) {
2755		ret = info->handler(s);
2756		if (ret < 0) {
2757			gvt_vgpu_err("%s handler error\n", info->name);
2758			return ret;
2759		}
2760	}
 
 
 
2761
2762	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2763		ret = cmd_advance_default(s);
2764		if (ret) {
2765			gvt_vgpu_err("%s IP advance error\n", info->name);
2766			return ret;
2767		}
2768	}
2769	return 0;
2770}
2771
2772static inline bool gma_out_of_range(unsigned long gma,
2773		unsigned long gma_head, unsigned int gma_tail)
2774{
2775	if (gma_tail >= gma_head)
2776		return (gma < gma_head) || (gma > gma_tail);
2777	else
2778		return (gma > gma_tail) && (gma < gma_head);
2779}
2780
2781/* Keep the consistent return type, e.g EBADRQC for unknown
2782 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2783 * works as the input of VM healthy status.
2784 */
2785static int command_scan(struct parser_exec_state *s,
2786		unsigned long rb_head, unsigned long rb_tail,
2787		unsigned long rb_start, unsigned long rb_len)
2788{
2789
2790	unsigned long gma_head, gma_tail, gma_bottom;
2791	int ret = 0;
2792	struct intel_vgpu *vgpu = s->vgpu;
2793
2794	gma_head = rb_start + rb_head;
2795	gma_tail = rb_start + rb_tail;
2796	gma_bottom = rb_start +  rb_len;
2797
 
 
2798	while (s->ip_gma != gma_tail) {
2799		if (s->buf_type == RING_BUFFER_INSTRUCTION ||
2800				s->buf_type == RING_BUFFER_CTX) {
2801			if (!(s->ip_gma >= rb_start) ||
2802				!(s->ip_gma < gma_bottom)) {
2803				gvt_vgpu_err("ip_gma %lx out of ring scope."
2804					"(base:0x%lx, bottom: 0x%lx)\n",
2805					s->ip_gma, rb_start,
2806					gma_bottom);
2807				parser_exec_state_dump(s);
2808				return -EFAULT;
2809			}
2810			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2811				gvt_vgpu_err("ip_gma %lx out of range."
2812					"base 0x%lx head 0x%lx tail 0x%lx\n",
2813					s->ip_gma, rb_start,
2814					rb_head, rb_tail);
2815				parser_exec_state_dump(s);
2816				break;
2817			}
2818		}
2819		ret = cmd_parser_exec(s);
2820		if (ret) {
2821			gvt_vgpu_err("cmd parser error\n");
2822			parser_exec_state_dump(s);
2823			break;
2824		}
2825	}
2826
 
 
2827	return ret;
2828}
2829
2830static int scan_workload(struct intel_vgpu_workload *workload)
2831{
2832	unsigned long gma_head, gma_tail, gma_bottom;
2833	struct parser_exec_state s;
2834	int ret = 0;
2835
2836	/* ring base is page aligned */
2837	if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
2838		return -EINVAL;
2839
2840	gma_head = workload->rb_start + workload->rb_head;
2841	gma_tail = workload->rb_start + workload->rb_tail;
2842	gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2843
2844	s.buf_type = RING_BUFFER_INSTRUCTION;
2845	s.buf_addr_type = GTT_BUFFER;
2846	s.vgpu = workload->vgpu;
2847	s.engine = workload->engine;
2848	s.ring_start = workload->rb_start;
2849	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2850	s.ring_head = gma_head;
2851	s.ring_tail = gma_tail;
2852	s.rb_va = workload->shadow_ring_buffer_va;
2853	s.workload = workload;
2854	s.is_ctx_wa = false;
2855
2856	if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
 
2857		return 0;
2858
2859	ret = ip_gma_set(&s, gma_head);
2860	if (ret)
2861		goto out;
2862
2863	ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2864		workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2865
2866out:
2867	return ret;
2868}
2869
2870static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2871{
2872
2873	unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2874	struct parser_exec_state s;
2875	int ret = 0;
2876	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2877				struct intel_vgpu_workload,
2878				wa_ctx);
2879
2880	/* ring base is page aligned */
2881	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2882					I915_GTT_PAGE_SIZE)))
2883		return -EINVAL;
2884
2885	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
2886	ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2887			PAGE_SIZE);
2888	gma_head = wa_ctx->indirect_ctx.guest_gma;
2889	gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2890	gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2891
2892	s.buf_type = RING_BUFFER_INSTRUCTION;
2893	s.buf_addr_type = GTT_BUFFER;
2894	s.vgpu = workload->vgpu;
2895	s.engine = workload->engine;
2896	s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2897	s.ring_size = ring_size;
2898	s.ring_head = gma_head;
2899	s.ring_tail = gma_tail;
2900	s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2901	s.workload = workload;
2902	s.is_ctx_wa = true;
2903
2904	ret = ip_gma_set(&s, gma_head);
2905	if (ret)
2906		goto out;
2907
2908	ret = command_scan(&s, 0, ring_tail,
2909		wa_ctx->indirect_ctx.guest_gma, ring_size);
2910out:
2911	return ret;
2912}
2913
2914static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2915{
2916	struct intel_vgpu *vgpu = workload->vgpu;
2917	struct intel_vgpu_submission *s = &vgpu->submission;
 
 
2918	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2919	void *shadow_ring_buffer_va;
2920	int ret;
2921
2922	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2923
2924	/* calculate workload ring buffer size */
2925	workload->rb_len = (workload->rb_tail + guest_rb_size -
2926			workload->rb_head) % guest_rb_size;
2927
2928	gma_head = workload->rb_start + workload->rb_head;
2929	gma_tail = workload->rb_start + workload->rb_tail;
2930	gma_top = workload->rb_start + guest_rb_size;
2931
2932	if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
2933		void *p;
2934
2935		/* realloc the new ring buffer if needed */
2936		p = krealloc(s->ring_scan_buffer[workload->engine->id],
2937			     workload->rb_len, GFP_KERNEL);
2938		if (!p) {
2939			gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2940			return -ENOMEM;
2941		}
2942		s->ring_scan_buffer[workload->engine->id] = p;
2943		s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
2944	}
2945
2946	shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
2947
2948	/* get shadow ring buffer va */
2949	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2950
2951	/* head > tail --> copy head <-> top */
2952	if (gma_head > gma_tail) {
2953		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2954				      gma_head, gma_top, shadow_ring_buffer_va);
2955		if (ret < 0) {
2956			gvt_vgpu_err("fail to copy guest ring buffer\n");
 
2957			return ret;
2958		}
2959		shadow_ring_buffer_va += ret;
2960		gma_head = workload->rb_start;
2961	}
2962
2963	/* copy head or start <-> tail */
2964	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2965				shadow_ring_buffer_va);
2966	if (ret < 0) {
2967		gvt_vgpu_err("fail to copy guest ring buffer\n");
 
2968		return ret;
2969	}
 
 
2970	return 0;
2971}
2972
2973int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
2974{
2975	int ret;
2976	struct intel_vgpu *vgpu = workload->vgpu;
2977
2978	ret = shadow_workload_ring_buffer(workload);
2979	if (ret) {
2980		gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2981		return ret;
2982	}
2983
2984	ret = scan_workload(workload);
2985	if (ret) {
2986		gvt_vgpu_err("scan workload error\n");
2987		return ret;
2988	}
2989	return 0;
2990}
2991
2992static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2993{
 
2994	int ctx_size = wa_ctx->indirect_ctx.size;
2995	unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2996	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2997					struct intel_vgpu_workload,
2998					wa_ctx);
2999	struct intel_vgpu *vgpu = workload->vgpu;
3000	struct drm_i915_gem_object *obj;
3001	int ret = 0;
3002	void *map;
3003
3004	obj = i915_gem_object_create_shmem(workload->engine->i915,
3005					   roundup(ctx_size + CACHELINE_BYTES,
3006						   PAGE_SIZE));
3007	if (IS_ERR(obj))
3008		return PTR_ERR(obj);
3009
3010	/* get the va of the shadow batch buffer */
3011	map = i915_gem_object_pin_map(obj, I915_MAP_WB);
3012	if (IS_ERR(map)) {
3013		gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
3014		ret = PTR_ERR(map);
3015		goto put_obj;
3016	}
3017
3018	i915_gem_object_lock(obj, NULL);
3019	ret = i915_gem_object_set_to_cpu_domain(obj, false);
3020	i915_gem_object_unlock(obj);
3021	if (ret) {
3022		gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
3023		goto unmap_src;
3024	}
3025
3026	ret = copy_gma_to_hva(workload->vgpu,
3027				workload->vgpu->gtt.ggtt_mm,
3028				guest_gma, guest_gma + ctx_size,
3029				map);
3030	if (ret < 0) {
3031		gvt_vgpu_err("fail to copy guest indirect ctx\n");
3032		goto unmap_src;
3033	}
3034
3035	wa_ctx->indirect_ctx.obj = obj;
3036	wa_ctx->indirect_ctx.shadow_va = map;
3037	return 0;
3038
3039unmap_src:
3040	i915_gem_object_unpin_map(obj);
3041put_obj:
3042	i915_gem_object_put(obj);
3043	return ret;
3044}
3045
3046static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3047{
3048	u32 per_ctx_start[CACHELINE_DWORDS] = {0};
3049	unsigned char *bb_start_sva;
3050
3051	if (!wa_ctx->per_ctx.valid)
3052		return 0;
3053
3054	per_ctx_start[0] = 0x18800001;
3055	per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
3056
3057	bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
3058				wa_ctx->indirect_ctx.size;
3059
3060	memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
3061
3062	return 0;
3063}
3064
3065int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3066{
3067	int ret;
3068	struct intel_vgpu_workload *workload = container_of(wa_ctx,
3069					struct intel_vgpu_workload,
3070					wa_ctx);
3071	struct intel_vgpu *vgpu = workload->vgpu;
3072
3073	if (wa_ctx->indirect_ctx.size == 0)
3074		return 0;
3075
3076	ret = shadow_indirect_ctx(wa_ctx);
3077	if (ret) {
3078		gvt_vgpu_err("fail to shadow indirect ctx\n");
3079		return ret;
3080	}
3081
3082	combine_wa_ctx(wa_ctx);
3083
3084	ret = scan_wa_ctx(wa_ctx);
3085	if (ret) {
3086		gvt_vgpu_err("scan wa ctx error\n");
3087		return ret;
3088	}
3089
3090	return 0;
3091}
3092
3093/* generate dummy contexts by sending empty requests to HW, and let
3094 * the HW to fill Engine Contexts. This dummy contexts are used for
3095 * initialization purpose (update reg whitelist), so referred to as
3096 * init context here
3097 */
3098void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)
3099{
3100	const unsigned long start = LRC_STATE_PN * PAGE_SIZE;
3101	struct intel_gvt *gvt = vgpu->gvt;
3102	struct intel_engine_cs *engine;
3103	enum intel_engine_id id;
3104
3105	if (gvt->is_reg_whitelist_updated)
3106		return;
3107
3108	/* scan init ctx to update cmd accessible list */
3109	for_each_engine(engine, gvt->gt, id) {
3110		struct parser_exec_state s;
3111		void *vaddr;
3112		int ret;
3113
3114		if (!engine->default_state)
3115			continue;
3116
3117		vaddr = shmem_pin_map(engine->default_state);
3118		if (IS_ERR(vaddr)) {
3119			gvt_err("failed to map %s->default state, err:%zd\n",
3120				engine->name, PTR_ERR(vaddr));
3121			return;
3122		}
3123
3124		s.buf_type = RING_BUFFER_CTX;
3125		s.buf_addr_type = GTT_BUFFER;
3126		s.vgpu = vgpu;
3127		s.engine = engine;
3128		s.ring_start = 0;
3129		s.ring_size = engine->context_size - start;
3130		s.ring_head = 0;
3131		s.ring_tail = s.ring_size;
3132		s.rb_va = vaddr + start;
3133		s.workload = NULL;
3134		s.is_ctx_wa = false;
3135		s.is_init_ctx = true;
3136
3137		/* skipping the first RING_CTX_SIZE(0x50) dwords */
3138		ret = ip_gma_set(&s, RING_CTX_SIZE);
3139		if (ret == 0) {
3140			ret = command_scan(&s, 0, s.ring_size, 0, s.ring_size);
3141			if (ret)
3142				gvt_err("Scan init ctx error\n");
3143		}
3144
3145		shmem_unpin_map(engine->default_state, vaddr);
3146		if (ret)
3147			return;
3148	}
3149
3150	gvt->is_reg_whitelist_updated = true;
3151}
3152
3153int intel_gvt_scan_engine_context(struct intel_vgpu_workload *workload)
3154{
3155	struct intel_vgpu *vgpu = workload->vgpu;
3156	unsigned long gma_head, gma_tail, gma_start, ctx_size;
3157	struct parser_exec_state s;
3158	int ring_id = workload->engine->id;
3159	struct intel_context *ce = vgpu->submission.shadow[ring_id];
3160	int ret;
3161
3162	GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
3163
3164	ctx_size = workload->engine->context_size - PAGE_SIZE;
3165
3166	/* Only ring contxt is loaded to HW for inhibit context, no need to
3167	 * scan engine context
3168	 */
3169	if (is_inhibit_context(ce))
3170		return 0;
3171
3172	gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
3173	gma_head = 0;
3174	gma_tail = ctx_size;
3175
3176	s.buf_type = RING_BUFFER_CTX;
3177	s.buf_addr_type = GTT_BUFFER;
3178	s.vgpu = workload->vgpu;
3179	s.engine = workload->engine;
3180	s.ring_start = gma_start;
3181	s.ring_size = ctx_size;
3182	s.ring_head = gma_start + gma_head;
3183	s.ring_tail = gma_start + gma_tail;
3184	s.rb_va = ce->lrc_reg_state;
3185	s.workload = workload;
3186	s.is_ctx_wa = false;
3187	s.is_init_ctx = false;
3188
3189	/* don't scan the first RING_CTX_SIZE(0x50) dwords, as it's ring
3190	 * context
3191	 */
3192	ret = ip_gma_set(&s, gma_start + gma_head + RING_CTX_SIZE);
3193	if (ret)
3194		goto out;
3195
3196	ret = command_scan(&s, gma_head, gma_tail,
3197		gma_start, ctx_size);
3198out:
3199	if (ret)
3200		gvt_vgpu_err("scan shadow ctx error\n");
3201
3202	return ret;
3203}
3204
3205static int init_cmd_table(struct intel_gvt *gvt)
3206{
3207	unsigned int gen_type = intel_gvt_get_device_type(gvt);
3208	int i;
 
 
 
 
 
3209
3210	for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
3211		struct cmd_entry *e;
3212
3213		if (!(cmd_info[i].devices & gen_type))
3214			continue;
3215
3216		e = kzalloc(sizeof(*e), GFP_KERNEL);
3217		if (!e)
3218			return -ENOMEM;
3219
3220		e->info = &cmd_info[i];
3221		if (cmd_info[i].opcode == OP_MI_NOOP)
3222			mi_noop_index = i;
 
 
 
 
 
3223
3224		INIT_HLIST_NODE(&e->hlist);
3225		add_cmd_entry(gvt, e);
3226		gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
3227			    e->info->name, e->info->opcode, e->info->flag,
3228			    e->info->devices, e->info->rings);
3229	}
3230
3231	return 0;
3232}
3233
3234static void clean_cmd_table(struct intel_gvt *gvt)
3235{
3236	struct hlist_node *tmp;
3237	struct cmd_entry *e;
3238	int i;
3239
3240	hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
3241		kfree(e);
3242
3243	hash_init(gvt->cmd_table);
3244}
3245
3246void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
3247{
3248	clean_cmd_table(gvt);
3249}
3250
3251int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
3252{
3253	int ret;
3254
3255	ret = init_cmd_table(gvt);
3256	if (ret) {
3257		intel_gvt_clean_cmd_parser(gvt);
3258		return ret;
3259	}
3260	return 0;
3261}