Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1/*
   2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21 * SOFTWARE.
  22 *
  23 * Authors:
  24 *    Ke Yu
  25 *    Kevin Tian <kevin.tian@intel.com>
  26 *    Zhiyuan Lv <zhiyuan.lv@intel.com>
  27 *
  28 * Contributors:
  29 *    Min He <min.he@intel.com>
  30 *    Ping Gao <ping.a.gao@intel.com>
  31 *    Tina Zhang <tina.zhang@intel.com>
  32 *    Yulei Zhang <yulei.zhang@intel.com>
  33 *    Zhi Wang <zhi.a.wang@intel.com>
  34 *
  35 */
  36
  37#include <linux/slab.h>
  38#include "i915_drv.h"
  39#include "gvt.h"
  40#include "i915_pvinfo.h"
  41#include "trace.h"
  42
  43#define INVALID_OP    (~0U)
  44
  45#define OP_LEN_MI           9
  46#define OP_LEN_2D           10
  47#define OP_LEN_3D_MEDIA     16
  48#define OP_LEN_MFX_VC       16
  49#define OP_LEN_VEBOX	    16
  50
  51#define CMD_TYPE(cmd)	(((cmd) >> 29) & 7)
  52
  53struct sub_op_bits {
  54	int hi;
  55	int low;
  56};
  57struct decode_info {
  58	const char *name;
  59	int op_len;
  60	int nr_sub_op;
  61	const struct sub_op_bits *sub_op;
  62};
  63
  64#define   MAX_CMD_BUDGET			0x7fffffff
  65#define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
  66#define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
  67#define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
  68
  69#define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
  70#define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
  71#define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
  72
  73/* Render Command Map */
  74
  75/* MI_* command Opcode (28:23) */
  76#define OP_MI_NOOP                          0x0
  77#define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
  78#define OP_MI_USER_INTERRUPT                0x2
  79#define OP_MI_WAIT_FOR_EVENT                0x3
  80#define OP_MI_FLUSH                         0x4
  81#define OP_MI_ARB_CHECK                     0x5
  82#define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
  83#define OP_MI_REPORT_HEAD                   0x7
  84#define OP_MI_ARB_ON_OFF                    0x8
  85#define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
  86#define OP_MI_BATCH_BUFFER_END              0xA
  87#define OP_MI_SUSPEND_FLUSH                 0xB
  88#define OP_MI_PREDICATE                     0xC  /* IVB+ */
  89#define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
  90#define OP_MI_SET_APPID                     0xE  /* IVB+ */
  91#define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
  92#define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
  93#define OP_MI_DISPLAY_FLIP                  0x14
  94#define OP_MI_SEMAPHORE_MBOX                0x16
  95#define OP_MI_SET_CONTEXT                   0x18
  96#define OP_MI_MATH                          0x1A
  97#define OP_MI_URB_CLEAR                     0x19
  98#define OP_MI_SEMAPHORE_SIGNAL		    0x1B  /* BDW+ */
  99#define OP_MI_SEMAPHORE_WAIT		    0x1C  /* BDW+ */
 100
 101#define OP_MI_STORE_DATA_IMM                0x20
 102#define OP_MI_STORE_DATA_INDEX              0x21
 103#define OP_MI_LOAD_REGISTER_IMM             0x22
 104#define OP_MI_UPDATE_GTT                    0x23
 105#define OP_MI_STORE_REGISTER_MEM            0x24
 106#define OP_MI_FLUSH_DW                      0x26
 107#define OP_MI_CLFLUSH                       0x27
 108#define OP_MI_REPORT_PERF_COUNT             0x28
 109#define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
 110#define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
 111#define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
 112#define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
 113#define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
 114#define OP_MI_2E			    0x2E  /* BDW+ */
 115#define OP_MI_2F			    0x2F  /* BDW+ */
 116#define OP_MI_BATCH_BUFFER_START            0x31
 117
 118/* Bit definition for dword 0 */
 119#define _CMDBIT_BB_START_IN_PPGTT	(1UL << 8)
 120
 121#define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
 122
 123#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
 124#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
 125#define BATCH_BUFFER_ADR_SPACE_BIT(x)	(((x) >> 8) & 1U)
 126#define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
 127
 128/* 2D command: Opcode (28:22) */
 129#define OP_2D(x)    ((2<<7) | x)
 130
 131#define OP_XY_SETUP_BLT                             OP_2D(0x1)
 132#define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
 133#define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
 134#define OP_XY_PIXEL_BLT                             OP_2D(0x24)
 135#define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
 136#define OP_XY_TEXT_BLT                              OP_2D(0x26)
 137#define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
 138#define OP_XY_COLOR_BLT                             OP_2D(0x50)
 139#define OP_XY_PAT_BLT                               OP_2D(0x51)
 140#define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
 141#define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
 142#define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
 143#define OP_XY_FULL_BLT                              OP_2D(0x55)
 144#define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
 145#define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
 146#define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
 147#define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
 148#define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
 149#define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
 150#define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
 151#define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
 152#define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
 153#define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
 154#define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
 155
 156/* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
 157#define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
 158	((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
 159
 160#define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
 161
 162#define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
 163#define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
 164#define OP_3D_MEDIA_0_1_4			OP_3D_MEDIA(0x0, 0x1, 0x04)
 165
 166#define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
 167
 168#define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
 169
 170#define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
 171#define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
 172#define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
 173#define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
 174#define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
 175#define OP_MEDIA_POOL_STATE                     OP_3D_MEDIA(0x2, 0x0, 0x5)
 176
 177#define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
 178#define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
 179#define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
 180#define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
 181
 182#define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
 183#define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
 184#define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
 185#define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
 186#define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
 187#define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
 188#define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
 189#define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
 190#define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
 191#define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
 192#define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
 193#define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
 194#define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
 195#define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
 196#define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
 197#define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
 198#define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
 199#define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
 200#define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
 201#define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
 202#define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
 203#define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
 204#define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
 205#define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
 206#define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
 207#define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
 208#define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
 209#define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
 210#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
 211#define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
 212#define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
 213#define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
 214#define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
 215#define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
 216#define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
 217#define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
 218#define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
 219#define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
 220#define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
 221#define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
 222#define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
 223#define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
 224#define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
 225#define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
 226#define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
 227#define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
 228#define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
 229#define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
 230#define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
 231#define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
 232#define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
 233#define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
 234#define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
 235#define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
 236#define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
 237#define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
 238#define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
 239#define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
 240#define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
 241#define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
 242#define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
 243#define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
 244#define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
 245#define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
 246#define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
 247#define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
 248
 249#define OP_3DSTATE_VF_INSTANCING 		OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
 250#define OP_3DSTATE_VF_SGVS  			OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
 251#define OP_3DSTATE_VF_TOPOLOGY   		OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
 252#define OP_3DSTATE_WM_CHROMAKEY   		OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
 253#define OP_3DSTATE_PS_BLEND   			OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
 254#define OP_3DSTATE_WM_DEPTH_STENCIL   		OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
 255#define OP_3DSTATE_PS_EXTRA   			OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
 256#define OP_3DSTATE_RASTER   			OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
 257#define OP_3DSTATE_SBE_SWIZ   			OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
 258#define OP_3DSTATE_WM_HZ_OP   			OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
 259#define OP_3DSTATE_COMPONENT_PACKING		OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
 260
 261#define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
 262#define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
 263#define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
 264#define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
 265#define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
 266#define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
 267#define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
 268#define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
 269#define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
 270#define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
 271#define OP_3DSTATE_MULTISAMPLE_BDW		OP_3D_MEDIA(0x3, 0x0, 0x0D)
 272#define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
 273#define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
 274#define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
 275#define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
 276#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
 277#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
 278#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
 279#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
 280#define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
 281#define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
 282#define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
 283#define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
 284#define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
 285#define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
 286#define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
 287#define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
 288#define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
 289
 290/* VCCP Command Parser */
 291
 292/*
 293 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
 294 * git://anongit.freedesktop.org/vaapi/intel-driver
 295 * src/i965_defines.h
 296 *
 297 */
 298
 299#define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
 300	(3 << 13 | \
 301	 (pipeline) << 11 | \
 302	 (op) << 8 | \
 303	 (sub_opa) << 5 | \
 304	 (sub_opb))
 305
 306#define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
 307#define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
 308#define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
 309#define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
 310#define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
 311#define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
 312#define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
 313#define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
 314#define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
 315#define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
 316#define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
 317
 318#define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
 319
 320#define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
 321#define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
 322#define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
 323#define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
 324#define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
 325#define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
 326#define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
 327#define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
 328#define OP_MFD_AVC_DPB_STATE			   OP_MFX(2, 1, 1, 6) /* IVB+ */
 329#define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
 330#define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
 331#define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
 332
 333#define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
 334#define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
 335#define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
 336#define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
 337#define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
 338
 339#define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
 340#define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
 341#define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
 342#define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
 343#define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
 344
 345#define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
 346#define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
 347#define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
 348
 349#define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
 350#define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
 351#define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
 352
 353#define OP_VEB(pipeline, op, sub_opa, sub_opb) \
 354	(3 << 13 | \
 355	 (pipeline) << 11 | \
 356	 (op) << 8 | \
 357	 (sub_opa) << 5 | \
 358	 (sub_opb))
 359
 360#define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
 361#define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
 362#define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
 363
 364struct parser_exec_state;
 365
 366typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
 367
 368#define GVT_CMD_HASH_BITS   7
 369
 370/* which DWords need address fix */
 371#define ADDR_FIX_1(x1)			(1 << (x1))
 372#define ADDR_FIX_2(x1, x2)		(ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
 373#define ADDR_FIX_3(x1, x2, x3)		(ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
 374#define ADDR_FIX_4(x1, x2, x3, x4)	(ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
 375#define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
 376
 377#define DWORD_FIELD(dword, end, start) \
 378	FIELD_GET(GENMASK(end, start), cmd_val(s, dword))
 379
 380#define OP_LENGTH_BIAS 2
 381#define CMD_LEN(value)  (value + OP_LENGTH_BIAS)
 382
 383static int gvt_check_valid_cmd_length(int len, int valid_len)
 384{
 385	if (valid_len != len) {
 386		gvt_err("len is not valid:  len=%u  valid_len=%u\n",
 387			len, valid_len);
 388		return -EFAULT;
 389	}
 390	return 0;
 391}
 392
 393struct cmd_info {
 394	const char *name;
 395	u32 opcode;
 396
 397#define F_LEN_MASK	3U
 398#define F_LEN_CONST  1U
 399#define F_LEN_VAR    0U
 400/* value is const although LEN maybe variable */
 401#define F_LEN_VAR_FIXED    (1<<1)
 402
 403/*
 404 * command has its own ip advance logic
 405 * e.g. MI_BATCH_START, MI_BATCH_END
 406 */
 407#define F_IP_ADVANCE_CUSTOM (1<<2)
 408	u32 flag;
 409
 410#define R_RCS	BIT(RCS0)
 411#define R_VCS1  BIT(VCS0)
 412#define R_VCS2  BIT(VCS1)
 413#define R_VCS	(R_VCS1 | R_VCS2)
 414#define R_BCS	BIT(BCS0)
 415#define R_VECS	BIT(VECS0)
 416#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
 417	/* rings that support this cmd: BLT/RCS/VCS/VECS */
 418	u16 rings;
 419
 420	/* devices that support this cmd: SNB/IVB/HSW/... */
 421	u16 devices;
 422
 423	/* which DWords are address that need fix up.
 424	 * bit 0 means a 32-bit non address operand in command
 425	 * bit 1 means address operand, which could be 32-bit
 426	 * or 64-bit depending on different architectures.(
 427	 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
 428	 * No matter the address length, each address only takes
 429	 * one bit in the bitmap.
 430	 */
 431	u16 addr_bitmap;
 432
 433	/* flag == F_LEN_CONST : command length
 434	 * flag == F_LEN_VAR : length bias bits
 435	 * Note: length is in DWord
 436	 */
 437	u32 len;
 438
 439	parser_cmd_handler handler;
 440
 441	/* valid length in DWord */
 442	u32 valid_len;
 443};
 444
 445struct cmd_entry {
 446	struct hlist_node hlist;
 447	const struct cmd_info *info;
 448};
 449
 450enum {
 451	RING_BUFFER_INSTRUCTION,
 452	BATCH_BUFFER_INSTRUCTION,
 453	BATCH_BUFFER_2ND_LEVEL,
 454};
 455
 456enum {
 457	GTT_BUFFER,
 458	PPGTT_BUFFER
 459};
 460
 461struct parser_exec_state {
 462	struct intel_vgpu *vgpu;
 463	int ring_id;
 464
 465	int buf_type;
 466
 467	/* batch buffer address type */
 468	int buf_addr_type;
 469
 470	/* graphics memory address of ring buffer start */
 471	unsigned long ring_start;
 472	unsigned long ring_size;
 473	unsigned long ring_head;
 474	unsigned long ring_tail;
 475
 476	/* instruction graphics memory address */
 477	unsigned long ip_gma;
 478
 479	/* mapped va of the instr_gma */
 480	void *ip_va;
 481	void *rb_va;
 482
 483	void *ret_bb_va;
 484	/* next instruction when return from  batch buffer to ring buffer */
 485	unsigned long ret_ip_gma_ring;
 486
 487	/* next instruction when return from 2nd batch buffer to batch buffer */
 488	unsigned long ret_ip_gma_bb;
 489
 490	/* batch buffer address type (GTT or PPGTT)
 491	 * used when ret from 2nd level batch buffer
 492	 */
 493	int saved_buf_addr_type;
 494	bool is_ctx_wa;
 495
 496	const struct cmd_info *info;
 497
 498	struct intel_vgpu_workload *workload;
 499};
 500
 501#define gmadr_dw_number(s)	\
 502	(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
 503
 504static unsigned long bypass_scan_mask = 0;
 505
 506/* ring ALL, type = 0 */
 507static const struct sub_op_bits sub_op_mi[] = {
 508	{31, 29},
 509	{28, 23},
 510};
 511
 512static const struct decode_info decode_info_mi = {
 513	"MI",
 514	OP_LEN_MI,
 515	ARRAY_SIZE(sub_op_mi),
 516	sub_op_mi,
 517};
 518
 519/* ring RCS, command type 2 */
 520static const struct sub_op_bits sub_op_2d[] = {
 521	{31, 29},
 522	{28, 22},
 523};
 524
 525static const struct decode_info decode_info_2d = {
 526	"2D",
 527	OP_LEN_2D,
 528	ARRAY_SIZE(sub_op_2d),
 529	sub_op_2d,
 530};
 531
 532/* ring RCS, command type 3 */
 533static const struct sub_op_bits sub_op_3d_media[] = {
 534	{31, 29},
 535	{28, 27},
 536	{26, 24},
 537	{23, 16},
 538};
 539
 540static const struct decode_info decode_info_3d_media = {
 541	"3D_Media",
 542	OP_LEN_3D_MEDIA,
 543	ARRAY_SIZE(sub_op_3d_media),
 544	sub_op_3d_media,
 545};
 546
 547/* ring VCS, command type 3 */
 548static const struct sub_op_bits sub_op_mfx_vc[] = {
 549	{31, 29},
 550	{28, 27},
 551	{26, 24},
 552	{23, 21},
 553	{20, 16},
 554};
 555
 556static const struct decode_info decode_info_mfx_vc = {
 557	"MFX_VC",
 558	OP_LEN_MFX_VC,
 559	ARRAY_SIZE(sub_op_mfx_vc),
 560	sub_op_mfx_vc,
 561};
 562
 563/* ring VECS, command type 3 */
 564static const struct sub_op_bits sub_op_vebox[] = {
 565	{31, 29},
 566	{28, 27},
 567	{26, 24},
 568	{23, 21},
 569	{20, 16},
 570};
 571
 572static const struct decode_info decode_info_vebox = {
 573	"VEBOX",
 574	OP_LEN_VEBOX,
 575	ARRAY_SIZE(sub_op_vebox),
 576	sub_op_vebox,
 577};
 578
 579static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
 580	[RCS0] = {
 581		&decode_info_mi,
 582		NULL,
 583		NULL,
 584		&decode_info_3d_media,
 585		NULL,
 586		NULL,
 587		NULL,
 588		NULL,
 589	},
 590
 591	[VCS0] = {
 592		&decode_info_mi,
 593		NULL,
 594		NULL,
 595		&decode_info_mfx_vc,
 596		NULL,
 597		NULL,
 598		NULL,
 599		NULL,
 600	},
 601
 602	[BCS0] = {
 603		&decode_info_mi,
 604		NULL,
 605		&decode_info_2d,
 606		NULL,
 607		NULL,
 608		NULL,
 609		NULL,
 610		NULL,
 611	},
 612
 613	[VECS0] = {
 614		&decode_info_mi,
 615		NULL,
 616		NULL,
 617		&decode_info_vebox,
 618		NULL,
 619		NULL,
 620		NULL,
 621		NULL,
 622	},
 623
 624	[VCS1] = {
 625		&decode_info_mi,
 626		NULL,
 627		NULL,
 628		&decode_info_mfx_vc,
 629		NULL,
 630		NULL,
 631		NULL,
 632		NULL,
 633	},
 634};
 635
 636static inline u32 get_opcode(u32 cmd, int ring_id)
 637{
 638	const struct decode_info *d_info;
 639
 640	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
 641	if (d_info == NULL)
 642		return INVALID_OP;
 643
 644	return cmd >> (32 - d_info->op_len);
 645}
 646
 647static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
 648		unsigned int opcode, int ring_id)
 649{
 650	struct cmd_entry *e;
 651
 652	hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
 653		if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
 654			return e->info;
 655	}
 656	return NULL;
 657}
 658
 659static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
 660		u32 cmd, int ring_id)
 661{
 662	u32 opcode;
 663
 664	opcode = get_opcode(cmd, ring_id);
 665	if (opcode == INVALID_OP)
 666		return NULL;
 667
 668	return find_cmd_entry(gvt, opcode, ring_id);
 669}
 670
 671static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
 672{
 673	return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
 674}
 675
 676static inline void print_opcode(u32 cmd, int ring_id)
 677{
 678	const struct decode_info *d_info;
 679	int i;
 680
 681	d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
 682	if (d_info == NULL)
 683		return;
 684
 685	gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
 686			cmd >> (32 - d_info->op_len), d_info->name);
 687
 688	for (i = 0; i < d_info->nr_sub_op; i++)
 689		pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
 690					d_info->sub_op[i].low));
 691
 692	pr_err("\n");
 693}
 694
 695static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
 696{
 697	return s->ip_va + (index << 2);
 698}
 699
 700static inline u32 cmd_val(struct parser_exec_state *s, int index)
 701{
 702	return *cmd_ptr(s, index);
 703}
 704
 705static void parser_exec_state_dump(struct parser_exec_state *s)
 706{
 707	int cnt = 0;
 708	int i;
 709
 710	gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
 711			" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
 712			s->ring_id, s->ring_start, s->ring_start + s->ring_size,
 713			s->ring_head, s->ring_tail);
 714
 715	gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
 716			s->buf_type == RING_BUFFER_INSTRUCTION ?
 717			"RING_BUFFER" : "BATCH_BUFFER",
 718			s->buf_addr_type == GTT_BUFFER ?
 719			"GTT" : "PPGTT", s->ip_gma);
 720
 721	if (s->ip_va == NULL) {
 722		gvt_dbg_cmd(" ip_va(NULL)");
 723		return;
 724	}
 725
 726	gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
 727			s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
 728			cmd_val(s, 2), cmd_val(s, 3));
 729
 730	print_opcode(cmd_val(s, 0), s->ring_id);
 731
 732	s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
 733
 734	while (cnt < 1024) {
 735		gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
 736		for (i = 0; i < 8; i++)
 737			gvt_dbg_cmd("%08x ", cmd_val(s, i));
 738		gvt_dbg_cmd("\n");
 739
 740		s->ip_va += 8 * sizeof(u32);
 741		cnt += 8;
 742	}
 743}
 744
 745static inline void update_ip_va(struct parser_exec_state *s)
 746{
 747	unsigned long len = 0;
 748
 749	if (WARN_ON(s->ring_head == s->ring_tail))
 750		return;
 751
 752	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 753		unsigned long ring_top = s->ring_start + s->ring_size;
 754
 755		if (s->ring_head > s->ring_tail) {
 756			if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
 757				len = (s->ip_gma - s->ring_head);
 758			else if (s->ip_gma >= s->ring_start &&
 759					s->ip_gma <= s->ring_tail)
 760				len = (ring_top - s->ring_head) +
 761					(s->ip_gma - s->ring_start);
 762		} else
 763			len = (s->ip_gma - s->ring_head);
 764
 765		s->ip_va = s->rb_va + len;
 766	} else {/* shadow batch buffer */
 767		s->ip_va = s->ret_bb_va;
 768	}
 769}
 770
 771static inline int ip_gma_set(struct parser_exec_state *s,
 772		unsigned long ip_gma)
 773{
 774	WARN_ON(!IS_ALIGNED(ip_gma, 4));
 775
 776	s->ip_gma = ip_gma;
 777	update_ip_va(s);
 778	return 0;
 779}
 780
 781static inline int ip_gma_advance(struct parser_exec_state *s,
 782		unsigned int dw_len)
 783{
 784	s->ip_gma += (dw_len << 2);
 785
 786	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
 787		if (s->ip_gma >= s->ring_start + s->ring_size)
 788			s->ip_gma -= s->ring_size;
 789		update_ip_va(s);
 790	} else {
 791		s->ip_va += (dw_len << 2);
 792	}
 793
 794	return 0;
 795}
 796
 797static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
 798{
 799	if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
 800		return info->len;
 801	else
 802		return (cmd & ((1U << info->len) - 1)) + 2;
 803	return 0;
 804}
 805
 806static inline int cmd_length(struct parser_exec_state *s)
 807{
 808	return get_cmd_length(s->info, cmd_val(s, 0));
 809}
 810
 811/* do not remove this, some platform may need clflush here */
 812#define patch_value(s, addr, val) do { \
 813	*addr = val; \
 814} while (0)
 815
 816static bool is_shadowed_mmio(unsigned int offset)
 817{
 818	bool ret = false;
 819
 820	if ((offset == 0x2168) || /*BB current head register UDW */
 821	    (offset == 0x2140) || /*BB current header register */
 822	    (offset == 0x211c) || /*second BB header register UDW */
 823	    (offset == 0x2114)) { /*second BB header register UDW */
 824		ret = true;
 825	}
 826	return ret;
 827}
 828
 829static inline bool is_force_nonpriv_mmio(unsigned int offset)
 830{
 831	return (offset >= 0x24d0 && offset < 0x2500);
 832}
 833
 834static int force_nonpriv_reg_handler(struct parser_exec_state *s,
 835		unsigned int offset, unsigned int index, char *cmd)
 836{
 837	struct intel_gvt *gvt = s->vgpu->gvt;
 838	unsigned int data;
 839	u32 ring_base;
 840	u32 nopid;
 841	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
 842
 843	if (!strcmp(cmd, "lri"))
 844		data = cmd_val(s, index + 1);
 845	else {
 846		gvt_err("Unexpected forcenonpriv 0x%x write from cmd %s\n",
 847			offset, cmd);
 848		return -EINVAL;
 849	}
 850
 851	ring_base = dev_priv->engine[s->ring_id]->mmio_base;
 852	nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
 853
 854	if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
 855			data != nopid) {
 856		gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
 857			offset, data);
 858		patch_value(s, cmd_ptr(s, index), nopid);
 859		return 0;
 860	}
 861	return 0;
 862}
 863
 864static inline bool is_mocs_mmio(unsigned int offset)
 865{
 866	return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
 867		((offset >= 0xb020) && (offset <= 0xb0a0));
 868}
 869
 870static int mocs_cmd_reg_handler(struct parser_exec_state *s,
 871				unsigned int offset, unsigned int index)
 872{
 873	if (!is_mocs_mmio(offset))
 874		return -EINVAL;
 875	vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
 876	return 0;
 877}
 878
 879static int cmd_reg_handler(struct parser_exec_state *s,
 880	unsigned int offset, unsigned int index, char *cmd)
 881{
 882	struct intel_vgpu *vgpu = s->vgpu;
 883	struct intel_gvt *gvt = vgpu->gvt;
 884	u32 ctx_sr_ctl;
 885
 886	if (offset + 4 > gvt->device_info.mmio_size) {
 887		gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
 888				cmd, offset);
 889		return -EFAULT;
 890	}
 891
 892	if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
 893		gvt_vgpu_err("%s access to non-render register (%x)\n",
 894				cmd, offset);
 895		return -EBADRQC;
 896	}
 897
 898	if (is_shadowed_mmio(offset)) {
 899		gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
 900		return 0;
 901	}
 902
 903	if (is_mocs_mmio(offset) &&
 904	    mocs_cmd_reg_handler(s, offset, index))
 905		return -EINVAL;
 906
 907	if (is_force_nonpriv_mmio(offset) &&
 908		force_nonpriv_reg_handler(s, offset, index, cmd))
 909		return -EPERM;
 910
 911	if (offset == i915_mmio_reg_offset(DERRMR) ||
 912		offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
 913		/* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
 914		patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
 915	}
 916
 917	/* TODO
 918	 * In order to let workload with inhibit context to generate
 919	 * correct image data into memory, vregs values will be loaded to
 920	 * hw via LRIs in the workload with inhibit context. But as
 921	 * indirect context is loaded prior to LRIs in workload, we don't
 922	 * want reg values specified in indirect context overwritten by
 923	 * LRIs in workloads. So, when scanning an indirect context, we
 924	 * update reg values in it into vregs, so LRIs in workload with
 925	 * inhibit context will restore with correct values
 926	 */
 927	if (IS_GEN(gvt->dev_priv, 9) &&
 928			intel_gvt_mmio_is_in_ctx(gvt, offset) &&
 929			!strncmp(cmd, "lri", 3)) {
 930		intel_gvt_hypervisor_read_gpa(s->vgpu,
 931			s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
 932		/* check inhibit context */
 933		if (ctx_sr_ctl & 1) {
 934			u32 data = cmd_val(s, index + 1);
 935
 936			if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
 937				intel_vgpu_mask_mmio_write(vgpu,
 938							offset, &data, 4);
 939			else
 940				vgpu_vreg(vgpu, offset) = data;
 941		}
 942	}
 943
 944	/* TODO: Update the global mask if this MMIO is a masked-MMIO */
 945	intel_gvt_mmio_set_cmd_accessed(gvt, offset);
 946	return 0;
 947}
 948
 949#define cmd_reg(s, i) \
 950	(cmd_val(s, i) & GENMASK(22, 2))
 951
 952#define cmd_reg_inhibit(s, i) \
 953	(cmd_val(s, i) & GENMASK(22, 18))
 954
 955#define cmd_gma(s, i) \
 956	(cmd_val(s, i) & GENMASK(31, 2))
 957
 958#define cmd_gma_hi(s, i) \
 959	(cmd_val(s, i) & GENMASK(15, 0))
 960
 961static int cmd_handler_lri(struct parser_exec_state *s)
 962{
 963	int i, ret = 0;
 964	int cmd_len = cmd_length(s);
 965	struct intel_gvt *gvt = s->vgpu->gvt;
 966	u32 valid_len = CMD_LEN(1);
 967
 968	/*
 969	 * Official intel docs are somewhat sloppy , check the definition of
 970	 * MI_LOAD_REGISTER_IMM.
 971	 */
 972	#define MAX_VALID_LEN 127
 973	if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
 974		gvt_err("len is not valid:  len=%u  valid_len=%u\n",
 975			cmd_len, valid_len);
 976		return -EFAULT;
 977	}
 978
 979	for (i = 1; i < cmd_len; i += 2) {
 980		if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
 981			if (s->ring_id == BCS0 &&
 982			    cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
 983				ret |= 0;
 984			else
 985				ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
 986		}
 987		if (ret)
 988			break;
 989		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
 990		if (ret)
 991			break;
 992	}
 993	return ret;
 994}
 995
 996static int cmd_handler_lrr(struct parser_exec_state *s)
 997{
 998	int i, ret = 0;
 999	int cmd_len = cmd_length(s);
1000
1001	for (i = 1; i < cmd_len; i += 2) {
1002		if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
1003			ret |= ((cmd_reg_inhibit(s, i) ||
1004					(cmd_reg_inhibit(s, i + 1)))) ?
1005				-EBADRQC : 0;
1006		if (ret)
1007			break;
1008		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
1009		if (ret)
1010			break;
1011		ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
1012		if (ret)
1013			break;
1014	}
1015	return ret;
1016}
1017
1018static inline int cmd_address_audit(struct parser_exec_state *s,
1019		unsigned long guest_gma, int op_size, bool index_mode);
1020
1021static int cmd_handler_lrm(struct parser_exec_state *s)
1022{
1023	struct intel_gvt *gvt = s->vgpu->gvt;
1024	int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
1025	unsigned long gma;
1026	int i, ret = 0;
1027	int cmd_len = cmd_length(s);
1028
1029	for (i = 1; i < cmd_len;) {
1030		if (IS_BROADWELL(gvt->dev_priv))
1031			ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
1032		if (ret)
1033			break;
1034		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
1035		if (ret)
1036			break;
1037		if (cmd_val(s, 0) & (1 << 22)) {
1038			gma = cmd_gma(s, i + 1);
1039			if (gmadr_bytes == 8)
1040				gma |= (cmd_gma_hi(s, i + 2)) << 32;
1041			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1042			if (ret)
1043				break;
1044		}
1045		i += gmadr_dw_number(s) + 1;
1046	}
1047	return ret;
1048}
1049
1050static int cmd_handler_srm(struct parser_exec_state *s)
1051{
1052	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1053	unsigned long gma;
1054	int i, ret = 0;
1055	int cmd_len = cmd_length(s);
1056
1057	for (i = 1; i < cmd_len;) {
1058		ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
1059		if (ret)
1060			break;
1061		if (cmd_val(s, 0) & (1 << 22)) {
1062			gma = cmd_gma(s, i + 1);
1063			if (gmadr_bytes == 8)
1064				gma |= (cmd_gma_hi(s, i + 2)) << 32;
1065			ret |= cmd_address_audit(s, gma, sizeof(u32), false);
1066			if (ret)
1067				break;
1068		}
1069		i += gmadr_dw_number(s) + 1;
1070	}
1071	return ret;
1072}
1073
1074struct cmd_interrupt_event {
1075	int pipe_control_notify;
1076	int mi_flush_dw;
1077	int mi_user_interrupt;
1078};
1079
1080static struct cmd_interrupt_event cmd_interrupt_events[] = {
1081	[RCS0] = {
1082		.pipe_control_notify = RCS_PIPE_CONTROL,
1083		.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
1084		.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
1085	},
1086	[BCS0] = {
1087		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1088		.mi_flush_dw = BCS_MI_FLUSH_DW,
1089		.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
1090	},
1091	[VCS0] = {
1092		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1093		.mi_flush_dw = VCS_MI_FLUSH_DW,
1094		.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
1095	},
1096	[VCS1] = {
1097		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1098		.mi_flush_dw = VCS2_MI_FLUSH_DW,
1099		.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
1100	},
1101	[VECS0] = {
1102		.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1103		.mi_flush_dw = VECS_MI_FLUSH_DW,
1104		.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1105	},
1106};
1107
1108static int cmd_handler_pipe_control(struct parser_exec_state *s)
1109{
1110	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1111	unsigned long gma;
1112	bool index_mode = false;
1113	unsigned int post_sync;
1114	int ret = 0;
1115	u32 hws_pga, val;
1116
1117	post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1118
1119	/* LRI post sync */
1120	if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1121		ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1122	/* post sync */
1123	else if (post_sync) {
1124		if (post_sync == 2)
1125			ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1126		else if (post_sync == 3)
1127			ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1128		else if (post_sync == 1) {
1129			/* check ggtt*/
1130			if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1131				gma = cmd_val(s, 2) & GENMASK(31, 3);
1132				if (gmadr_bytes == 8)
1133					gma |= (cmd_gma_hi(s, 3)) << 32;
1134				/* Store Data Index */
1135				if (cmd_val(s, 1) & (1 << 21))
1136					index_mode = true;
1137				ret |= cmd_address_audit(s, gma, sizeof(u64),
1138						index_mode);
1139				if (ret)
1140					return ret;
1141				if (index_mode) {
1142					hws_pga = s->vgpu->hws_pga[s->ring_id];
1143					gma = hws_pga + gma;
1144					patch_value(s, cmd_ptr(s, 2), gma);
1145					val = cmd_val(s, 1) & (~(1 << 21));
1146					patch_value(s, cmd_ptr(s, 1), val);
1147				}
1148			}
1149		}
1150	}
1151
1152	if (ret)
1153		return ret;
1154
1155	if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1156		set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
1157				s->workload->pending_events);
1158	return 0;
1159}
1160
1161static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1162{
1163	set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1164			s->workload->pending_events);
1165	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1166	return 0;
1167}
1168
1169static int cmd_advance_default(struct parser_exec_state *s)
1170{
1171	return ip_gma_advance(s, cmd_length(s));
1172}
1173
1174static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1175{
1176	int ret;
1177
1178	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1179		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1180		ret = ip_gma_set(s, s->ret_ip_gma_bb);
1181		s->buf_addr_type = s->saved_buf_addr_type;
1182	} else {
1183		s->buf_type = RING_BUFFER_INSTRUCTION;
1184		s->buf_addr_type = GTT_BUFFER;
1185		if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1186			s->ret_ip_gma_ring -= s->ring_size;
1187		ret = ip_gma_set(s, s->ret_ip_gma_ring);
1188	}
1189	return ret;
1190}
1191
1192struct mi_display_flip_command_info {
1193	int pipe;
1194	int plane;
1195	int event;
1196	i915_reg_t stride_reg;
1197	i915_reg_t ctrl_reg;
1198	i915_reg_t surf_reg;
1199	u64 stride_val;
1200	u64 tile_val;
1201	u64 surf_val;
1202	bool async_flip;
1203};
1204
1205struct plane_code_mapping {
1206	int pipe;
1207	int plane;
1208	int event;
1209};
1210
1211static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1212		struct mi_display_flip_command_info *info)
1213{
1214	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1215	struct plane_code_mapping gen8_plane_code[] = {
1216		[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1217		[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1218		[2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1219		[3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1220		[4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1221		[5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1222	};
1223	u32 dword0, dword1, dword2;
1224	u32 v;
1225
1226	dword0 = cmd_val(s, 0);
1227	dword1 = cmd_val(s, 1);
1228	dword2 = cmd_val(s, 2);
1229
1230	v = (dword0 & GENMASK(21, 19)) >> 19;
1231	if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1232		return -EBADRQC;
1233
1234	info->pipe = gen8_plane_code[v].pipe;
1235	info->plane = gen8_plane_code[v].plane;
1236	info->event = gen8_plane_code[v].event;
1237	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1238	info->tile_val = (dword1 & 0x1);
1239	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1240	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1241
1242	if (info->plane == PLANE_A) {
1243		info->ctrl_reg = DSPCNTR(info->pipe);
1244		info->stride_reg = DSPSTRIDE(info->pipe);
1245		info->surf_reg = DSPSURF(info->pipe);
1246	} else if (info->plane == PLANE_B) {
1247		info->ctrl_reg = SPRCTL(info->pipe);
1248		info->stride_reg = SPRSTRIDE(info->pipe);
1249		info->surf_reg = SPRSURF(info->pipe);
1250	} else {
1251		WARN_ON(1);
1252		return -EBADRQC;
1253	}
1254	return 0;
1255}
1256
1257static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1258		struct mi_display_flip_command_info *info)
1259{
1260	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1261	struct intel_vgpu *vgpu = s->vgpu;
1262	u32 dword0 = cmd_val(s, 0);
1263	u32 dword1 = cmd_val(s, 1);
1264	u32 dword2 = cmd_val(s, 2);
1265	u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1266
1267	info->plane = PRIMARY_PLANE;
1268
1269	switch (plane) {
1270	case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1271		info->pipe = PIPE_A;
1272		info->event = PRIMARY_A_FLIP_DONE;
1273		break;
1274	case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1275		info->pipe = PIPE_B;
1276		info->event = PRIMARY_B_FLIP_DONE;
1277		break;
1278	case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1279		info->pipe = PIPE_C;
1280		info->event = PRIMARY_C_FLIP_DONE;
1281		break;
1282
1283	case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1284		info->pipe = PIPE_A;
1285		info->event = SPRITE_A_FLIP_DONE;
1286		info->plane = SPRITE_PLANE;
1287		break;
1288	case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1289		info->pipe = PIPE_B;
1290		info->event = SPRITE_B_FLIP_DONE;
1291		info->plane = SPRITE_PLANE;
1292		break;
1293	case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1294		info->pipe = PIPE_C;
1295		info->event = SPRITE_C_FLIP_DONE;
1296		info->plane = SPRITE_PLANE;
1297		break;
1298
1299	default:
1300		gvt_vgpu_err("unknown plane code %d\n", plane);
1301		return -EBADRQC;
1302	}
1303
1304	info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1305	info->tile_val = (dword1 & GENMASK(2, 0));
1306	info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1307	info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1308
1309	info->ctrl_reg = DSPCNTR(info->pipe);
1310	info->stride_reg = DSPSTRIDE(info->pipe);
1311	info->surf_reg = DSPSURF(info->pipe);
1312
1313	return 0;
1314}
1315
1316static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1317		struct mi_display_flip_command_info *info)
1318{
1319	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1320	u32 stride, tile;
1321
1322	if (!info->async_flip)
1323		return 0;
1324
1325	if (INTEL_GEN(dev_priv) >= 9) {
1326		stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1327		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
1328				GENMASK(12, 10)) >> 10;
1329	} else {
1330		stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
1331				GENMASK(15, 6)) >> 6;
1332		tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1333	}
1334
1335	if (stride != info->stride_val)
1336		gvt_dbg_cmd("cannot change stride during async flip\n");
1337
1338	if (tile != info->tile_val)
1339		gvt_dbg_cmd("cannot change tile during async flip\n");
1340
1341	return 0;
1342}
1343
1344static int gen8_update_plane_mmio_from_mi_display_flip(
1345		struct parser_exec_state *s,
1346		struct mi_display_flip_command_info *info)
1347{
1348	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1349	struct intel_vgpu *vgpu = s->vgpu;
1350
1351	set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
1352		      info->surf_val << 12);
1353	if (INTEL_GEN(dev_priv) >= 9) {
1354		set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
1355			      info->stride_val);
1356		set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
1357			      info->tile_val << 10);
1358	} else {
1359		set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
1360			      info->stride_val << 6);
1361		set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
1362			      info->tile_val << 10);
1363	}
1364
1365	if (info->plane == PLANE_PRIMARY)
1366		vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
1367
1368	if (info->async_flip)
1369		intel_vgpu_trigger_virtual_event(vgpu, info->event);
1370	else
1371		set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
1372
1373	return 0;
1374}
1375
1376static int decode_mi_display_flip(struct parser_exec_state *s,
1377		struct mi_display_flip_command_info *info)
1378{
1379	struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1380
1381	if (IS_BROADWELL(dev_priv))
1382		return gen8_decode_mi_display_flip(s, info);
1383	if (INTEL_GEN(dev_priv) >= 9)
1384		return skl_decode_mi_display_flip(s, info);
1385
1386	return -ENODEV;
1387}
1388
1389static int check_mi_display_flip(struct parser_exec_state *s,
1390		struct mi_display_flip_command_info *info)
1391{
1392	return gen8_check_mi_display_flip(s, info);
1393}
1394
1395static int update_plane_mmio_from_mi_display_flip(
1396		struct parser_exec_state *s,
1397		struct mi_display_flip_command_info *info)
1398{
1399	return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1400}
1401
1402static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1403{
1404	struct mi_display_flip_command_info info;
1405	struct intel_vgpu *vgpu = s->vgpu;
1406	int ret;
1407	int i;
1408	int len = cmd_length(s);
1409	u32 valid_len = CMD_LEN(1);
1410
1411	/* Flip Type == Stereo 3D Flip */
1412	if (DWORD_FIELD(2, 1, 0) == 2)
1413		valid_len++;
1414	ret = gvt_check_valid_cmd_length(cmd_length(s),
1415			valid_len);
1416	if (ret)
1417		return ret;
1418
1419	ret = decode_mi_display_flip(s, &info);
1420	if (ret) {
1421		gvt_vgpu_err("fail to decode MI display flip command\n");
1422		return ret;
1423	}
1424
1425	ret = check_mi_display_flip(s, &info);
1426	if (ret) {
1427		gvt_vgpu_err("invalid MI display flip command\n");
1428		return ret;
1429	}
1430
1431	ret = update_plane_mmio_from_mi_display_flip(s, &info);
1432	if (ret) {
1433		gvt_vgpu_err("fail to update plane mmio\n");
1434		return ret;
1435	}
1436
1437	for (i = 0; i < len; i++)
1438		patch_value(s, cmd_ptr(s, i), MI_NOOP);
1439	return 0;
1440}
1441
1442static bool is_wait_for_flip_pending(u32 cmd)
1443{
1444	return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1445			MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1446			MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1447			MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1448			MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1449			MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1450}
1451
1452static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1453{
1454	u32 cmd = cmd_val(s, 0);
1455
1456	if (!is_wait_for_flip_pending(cmd))
1457		return 0;
1458
1459	patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1460	return 0;
1461}
1462
1463static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1464{
1465	unsigned long addr;
1466	unsigned long gma_high, gma_low;
1467	struct intel_vgpu *vgpu = s->vgpu;
1468	int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1469
1470	if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
1471		gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
1472		return INTEL_GVT_INVALID_ADDR;
1473	}
1474
1475	gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1476	if (gmadr_bytes == 4) {
1477		addr = gma_low;
1478	} else {
1479		gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1480		addr = (((unsigned long)gma_high) << 32) | gma_low;
1481	}
1482	return addr;
1483}
1484
1485static inline int cmd_address_audit(struct parser_exec_state *s,
1486		unsigned long guest_gma, int op_size, bool index_mode)
1487{
1488	struct intel_vgpu *vgpu = s->vgpu;
1489	u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1490	int i;
1491	int ret;
1492
1493	if (op_size > max_surface_size) {
1494		gvt_vgpu_err("command address audit fail name %s\n",
1495			s->info->name);
1496		return -EFAULT;
1497	}
1498
1499	if (index_mode)	{
1500		if (guest_gma >= I915_GTT_PAGE_SIZE) {
1501			ret = -EFAULT;
1502			goto err;
1503		}
1504	} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1505		ret = -EFAULT;
1506		goto err;
1507	}
1508
1509	return 0;
1510
1511err:
1512	gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1513			s->info->name, guest_gma, op_size);
1514
1515	pr_err("cmd dump: ");
1516	for (i = 0; i < cmd_length(s); i++) {
1517		if (!(i % 4))
1518			pr_err("\n%08x ", cmd_val(s, i));
1519		else
1520			pr_err("%08x ", cmd_val(s, i));
1521	}
1522	pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1523			vgpu->id,
1524			vgpu_aperture_gmadr_base(vgpu),
1525			vgpu_aperture_gmadr_end(vgpu),
1526			vgpu_hidden_gmadr_base(vgpu),
1527			vgpu_hidden_gmadr_end(vgpu));
1528	return ret;
1529}
1530
1531static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1532{
1533	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1534	int op_size = (cmd_length(s) - 3) * sizeof(u32);
1535	int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1536	unsigned long gma, gma_low, gma_high;
1537	u32 valid_len = CMD_LEN(2);
1538	int ret = 0;
1539
1540	/* check ppggt */
1541	if (!(cmd_val(s, 0) & (1 << 22)))
1542		return 0;
1543
1544	/* check if QWORD */
1545	if (DWORD_FIELD(0, 21, 21))
1546		valid_len++;
1547	ret = gvt_check_valid_cmd_length(cmd_length(s),
1548			valid_len);
1549	if (ret)
1550		return ret;
1551
1552	gma = cmd_val(s, 2) & GENMASK(31, 2);
1553
1554	if (gmadr_bytes == 8) {
1555		gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1556		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1557		gma = (gma_high << 32) | gma_low;
1558		core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1559	}
1560	ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1561	return ret;
1562}
1563
1564static inline int unexpected_cmd(struct parser_exec_state *s)
1565{
1566	struct intel_vgpu *vgpu = s->vgpu;
1567
1568	gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1569
1570	return -EBADRQC;
1571}
1572
1573static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1574{
1575	return unexpected_cmd(s);
1576}
1577
1578static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1579{
1580	return unexpected_cmd(s);
1581}
1582
1583static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1584{
1585	return unexpected_cmd(s);
1586}
1587
1588static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1589{
1590	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1591	int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1592			sizeof(u32);
1593	unsigned long gma, gma_high;
1594	u32 valid_len = CMD_LEN(1);
1595	int ret = 0;
1596
1597	if (!(cmd_val(s, 0) & (1 << 22)))
1598		return ret;
1599
1600	/* check if QWORD */
1601	if (DWORD_FIELD(0, 20, 19) == 1)
1602		valid_len += 8;
1603	ret = gvt_check_valid_cmd_length(cmd_length(s),
1604			valid_len);
1605	if (ret)
1606		return ret;
1607
1608	gma = cmd_val(s, 1) & GENMASK(31, 2);
1609	if (gmadr_bytes == 8) {
1610		gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1611		gma = (gma_high << 32) | gma;
1612	}
1613	ret = cmd_address_audit(s, gma, op_size, false);
1614	return ret;
1615}
1616
1617static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1618{
1619	return unexpected_cmd(s);
1620}
1621
1622static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1623{
1624	return unexpected_cmd(s);
1625}
1626
1627static int cmd_handler_mi_conditional_batch_buffer_end(
1628		struct parser_exec_state *s)
1629{
1630	return unexpected_cmd(s);
1631}
1632
1633static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1634{
1635	return unexpected_cmd(s);
1636}
1637
1638static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1639{
1640	int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1641	unsigned long gma;
1642	bool index_mode = false;
1643	int ret = 0;
1644	u32 hws_pga, val;
1645	u32 valid_len = CMD_LEN(2);
1646
1647	ret = gvt_check_valid_cmd_length(cmd_length(s),
1648			valid_len);
1649	if (ret) {
1650		/* Check again for Qword */
1651		ret = gvt_check_valid_cmd_length(cmd_length(s),
1652			++valid_len);
1653		return ret;
1654	}
1655
1656	/* Check post-sync and ppgtt bit */
1657	if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1658		gma = cmd_val(s, 1) & GENMASK(31, 3);
1659		if (gmadr_bytes == 8)
1660			gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1661		/* Store Data Index */
1662		if (cmd_val(s, 0) & (1 << 21))
1663			index_mode = true;
1664		ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1665		if (ret)
1666			return ret;
1667		if (index_mode) {
1668			hws_pga = s->vgpu->hws_pga[s->ring_id];
1669			gma = hws_pga + gma;
1670			patch_value(s, cmd_ptr(s, 1), gma);
1671			val = cmd_val(s, 0) & (~(1 << 21));
1672			patch_value(s, cmd_ptr(s, 0), val);
1673		}
1674	}
1675	/* Check notify bit */
1676	if ((cmd_val(s, 0) & (1 << 8)))
1677		set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
1678				s->workload->pending_events);
1679	return ret;
1680}
1681
1682static void addr_type_update_snb(struct parser_exec_state *s)
1683{
1684	if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1685			(BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1686		s->buf_addr_type = PPGTT_BUFFER;
1687	}
1688}
1689
1690
1691static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1692		unsigned long gma, unsigned long end_gma, void *va)
1693{
1694	unsigned long copy_len, offset;
1695	unsigned long len = 0;
1696	unsigned long gpa;
1697
1698	while (gma != end_gma) {
1699		gpa = intel_vgpu_gma_to_gpa(mm, gma);
1700		if (gpa == INTEL_GVT_INVALID_ADDR) {
1701			gvt_vgpu_err("invalid gma address: %lx\n", gma);
1702			return -EFAULT;
1703		}
1704
1705		offset = gma & (I915_GTT_PAGE_SIZE - 1);
1706
1707		copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
1708			I915_GTT_PAGE_SIZE - offset : end_gma - gma;
1709
1710		intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1711
1712		len += copy_len;
1713		gma += copy_len;
1714	}
1715	return len;
1716}
1717
1718
1719/*
1720 * Check whether a batch buffer needs to be scanned. Currently
1721 * the only criteria is based on privilege.
1722 */
1723static int batch_buffer_needs_scan(struct parser_exec_state *s)
1724{
1725	/* Decide privilege based on address space */
1726	if (cmd_val(s, 0) & (1 << 8) &&
1727			!(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
1728		return 0;
1729	return 1;
1730}
1731
1732static int find_bb_size(struct parser_exec_state *s,
1733			unsigned long *bb_size,
1734			unsigned long *bb_end_cmd_offset)
1735{
1736	unsigned long gma = 0;
1737	const struct cmd_info *info;
1738	u32 cmd_len = 0;
1739	bool bb_end = false;
1740	struct intel_vgpu *vgpu = s->vgpu;
1741	u32 cmd;
1742	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1743		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1744
1745	*bb_size = 0;
1746	*bb_end_cmd_offset = 0;
1747
1748	/* get the start gm address of the batch buffer */
1749	gma = get_gma_bb_from_cmd(s, 1);
1750	if (gma == INTEL_GVT_INVALID_ADDR)
1751		return -EFAULT;
1752
1753	cmd = cmd_val(s, 0);
1754	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1755	if (info == NULL) {
1756		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
1757				cmd, get_opcode(cmd, s->ring_id),
1758				(s->buf_addr_type == PPGTT_BUFFER) ?
1759				"ppgtt" : "ggtt", s->ring_id, s->workload);
1760		return -EBADRQC;
1761	}
1762	do {
1763		if (copy_gma_to_hva(s->vgpu, mm,
1764				gma, gma + 4, &cmd) < 0)
1765			return -EFAULT;
1766		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1767		if (info == NULL) {
1768			gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
1769				cmd, get_opcode(cmd, s->ring_id),
1770				(s->buf_addr_type == PPGTT_BUFFER) ?
1771				"ppgtt" : "ggtt", s->ring_id, s->workload);
1772			return -EBADRQC;
1773		}
1774
1775		if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1776			bb_end = true;
1777		} else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1778			if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
1779				/* chained batch buffer */
1780				bb_end = true;
1781		}
1782
1783		if (bb_end)
1784			*bb_end_cmd_offset = *bb_size;
1785
1786		cmd_len = get_cmd_length(info, cmd) << 2;
1787		*bb_size += cmd_len;
1788		gma += cmd_len;
1789	} while (!bb_end);
1790
1791	return 0;
1792}
1793
1794static int audit_bb_end(struct parser_exec_state *s, void *va)
1795{
1796	struct intel_vgpu *vgpu = s->vgpu;
1797	u32 cmd = *(u32 *)va;
1798	const struct cmd_info *info;
1799
1800	info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1801	if (info == NULL) {
1802		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
1803			cmd, get_opcode(cmd, s->ring_id),
1804			(s->buf_addr_type == PPGTT_BUFFER) ?
1805			"ppgtt" : "ggtt", s->ring_id, s->workload);
1806		return -EBADRQC;
1807	}
1808
1809	if ((info->opcode == OP_MI_BATCH_BUFFER_END) ||
1810	    ((info->opcode == OP_MI_BATCH_BUFFER_START) &&
1811	     (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)))
1812		return 0;
1813
1814	return -EBADRQC;
1815}
1816
1817static int perform_bb_shadow(struct parser_exec_state *s)
1818{
1819	struct intel_vgpu *vgpu = s->vgpu;
1820	struct intel_vgpu_shadow_bb *bb;
1821	unsigned long gma = 0;
1822	unsigned long bb_size;
1823	unsigned long bb_end_cmd_offset;
1824	int ret = 0;
1825	struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
1826		s->vgpu->gtt.ggtt_mm : s->workload->shadow_mm;
1827	unsigned long start_offset = 0;
1828
1829	/* get the start gm address of the batch buffer */
1830	gma = get_gma_bb_from_cmd(s, 1);
1831	if (gma == INTEL_GVT_INVALID_ADDR)
1832		return -EFAULT;
1833
1834	ret = find_bb_size(s, &bb_size, &bb_end_cmd_offset);
1835	if (ret)
1836		return ret;
1837
1838	bb = kzalloc(sizeof(*bb), GFP_KERNEL);
1839	if (!bb)
1840		return -ENOMEM;
1841
1842	bb->ppgtt = (s->buf_addr_type == GTT_BUFFER) ? false : true;
1843
1844	/* the start_offset stores the batch buffer's start gma's
1845	 * offset relative to page boundary. so for non-privileged batch
1846	 * buffer, the shadowed gem object holds exactly the same page
1847	 * layout as original gem object. This is for the convience of
1848	 * replacing the whole non-privilged batch buffer page to this
1849	 * shadowed one in PPGTT at the same gma address. (this replacing
1850	 * action is not implemented yet now, but may be necessary in
1851	 * future).
1852	 * for prileged batch buffer, we just change start gma address to
1853	 * that of shadowed page.
1854	 */
1855	if (bb->ppgtt)
1856		start_offset = gma & ~I915_GTT_PAGE_MASK;
1857
1858	bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
1859					       round_up(bb_size + start_offset,
1860							PAGE_SIZE));
1861	if (IS_ERR(bb->obj)) {
1862		ret = PTR_ERR(bb->obj);
1863		goto err_free_bb;
1864	}
1865
1866	ret = i915_gem_object_prepare_write(bb->obj, &bb->clflush);
1867	if (ret)
1868		goto err_free_obj;
1869
1870	bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
1871	if (IS_ERR(bb->va)) {
1872		ret = PTR_ERR(bb->va);
1873		goto err_finish_shmem_access;
1874	}
1875
1876	if (bb->clflush & CLFLUSH_BEFORE) {
1877		drm_clflush_virt_range(bb->va, bb->obj->base.size);
1878		bb->clflush &= ~CLFLUSH_BEFORE;
1879	}
1880
1881	ret = copy_gma_to_hva(s->vgpu, mm,
1882			      gma, gma + bb_size,
1883			      bb->va + start_offset);
1884	if (ret < 0) {
1885		gvt_vgpu_err("fail to copy guest ring buffer\n");
1886		ret = -EFAULT;
1887		goto err_unmap;
1888	}
1889
1890	ret = audit_bb_end(s, bb->va + start_offset + bb_end_cmd_offset);
1891	if (ret)
1892		goto err_unmap;
1893
1894	INIT_LIST_HEAD(&bb->list);
1895	list_add(&bb->list, &s->workload->shadow_bb);
1896
1897	bb->accessing = true;
1898	bb->bb_start_cmd_va = s->ip_va;
1899
1900	if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1901		bb->bb_offset = s->ip_va - s->rb_va;
1902	else
1903		bb->bb_offset = 0;
1904
1905	/*
1906	 * ip_va saves the virtual address of the shadow batch buffer, while
1907	 * ip_gma saves the graphics address of the original batch buffer.
1908	 * As the shadow batch buffer is just a copy from the originial one,
1909	 * it should be right to use shadow batch buffer'va and original batch
1910	 * buffer's gma in pair. After all, we don't want to pin the shadow
1911	 * buffer here (too early).
1912	 */
1913	s->ip_va = bb->va + start_offset;
1914	s->ip_gma = gma;
1915	return 0;
1916err_unmap:
1917	i915_gem_object_unpin_map(bb->obj);
1918err_finish_shmem_access:
1919	i915_gem_object_finish_access(bb->obj);
1920err_free_obj:
1921	i915_gem_object_put(bb->obj);
1922err_free_bb:
1923	kfree(bb);
1924	return ret;
1925}
1926
1927static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1928{
1929	bool second_level;
1930	int ret = 0;
1931	struct intel_vgpu *vgpu = s->vgpu;
1932
1933	if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1934		gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1935		return -EFAULT;
1936	}
1937
1938	second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1939	if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1940		gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1941		return -EFAULT;
1942	}
1943
1944	s->saved_buf_addr_type = s->buf_addr_type;
1945	addr_type_update_snb(s);
1946	if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1947		s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1948		s->buf_type = BATCH_BUFFER_INSTRUCTION;
1949	} else if (second_level) {
1950		s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1951		s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1952		s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1953	}
1954
1955	if (batch_buffer_needs_scan(s)) {
1956		ret = perform_bb_shadow(s);
1957		if (ret < 0)
1958			gvt_vgpu_err("invalid shadow batch buffer\n");
1959	} else {
1960		/* emulate a batch buffer end to do return right */
1961		ret = cmd_handler_mi_batch_buffer_end(s);
1962		if (ret < 0)
1963			return ret;
1964	}
1965	return ret;
1966}
1967
1968static int mi_noop_index;
1969
1970static const struct cmd_info cmd_info[] = {
1971	{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1972
1973	{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
1974		0, 1, NULL},
1975
1976	{"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
1977		0, 1, cmd_handler_mi_user_interrupt},
1978
1979	{"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
1980		D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
1981
1982	{"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1983
1984	{"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1985		NULL},
1986
1987	{"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1988		NULL},
1989
1990	{"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1991		NULL},
1992
1993	{"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1994		NULL},
1995
1996	{"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
1997		D_ALL, 0, 1, NULL},
1998
1999	{"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
2000		F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2001		cmd_handler_mi_batch_buffer_end},
2002
2003	{"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
2004		0, 1, NULL},
2005
2006	{"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2007		NULL},
2008
2009	{"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
2010		D_ALL, 0, 1, NULL},
2011
2012	{"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
2013		NULL},
2014
2015	{"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
2016		NULL},
2017
2018	{"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR,
2019		R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
2020
2021	{"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR | F_LEN_VAR_FIXED,
2022		R_ALL, D_ALL, 0, 8, NULL, CMD_LEN(1)},
2023
2024	{"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
2025
2026	{"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS,
2027		D_ALL, 0, 8, NULL, CMD_LEN(0)},
2028
2029	{"MI_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL,
2030		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, 0, 8,
2031		NULL, CMD_LEN(0)},
2032
2033	{"MI_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT,
2034		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS, ADDR_FIX_1(2),
2035		8, cmd_handler_mi_semaphore_wait, CMD_LEN(2)},
2036
2037	{"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
2038		ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
2039
2040	{"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
2041		0, 8, cmd_handler_mi_store_data_index},
2042
2043	{"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
2044		D_ALL, 0, 8, cmd_handler_lri},
2045
2046	{"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
2047		cmd_handler_mi_update_gtt},
2048
2049	{"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM,
2050		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2051		cmd_handler_srm, CMD_LEN(2)},
2052
2053	{"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
2054		cmd_handler_mi_flush_dw},
2055
2056	{"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
2057		10, cmd_handler_mi_clflush},
2058
2059	{"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT,
2060		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(1), 6,
2061		cmd_handler_mi_report_perf_count, CMD_LEN(2)},
2062
2063	{"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM,
2064		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2065		cmd_handler_lrm, CMD_LEN(2)},
2066
2067	{"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG,
2068		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, 0, 8,
2069		cmd_handler_lrr, CMD_LEN(1)},
2070
2071	{"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM,
2072		F_LEN_VAR | F_LEN_VAR_FIXED, R_RCS, D_ALL, 0,
2073		8, NULL, CMD_LEN(2)},
2074
2075	{"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR | F_LEN_VAR_FIXED,
2076		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL, CMD_LEN(2)},
2077
2078	{"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
2079		ADDR_FIX_1(2), 8, NULL},
2080
2081	{"MI_OP_2E", OP_MI_2E, F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_BDW_PLUS,
2082		ADDR_FIX_2(1, 2), 8, cmd_handler_mi_op_2e, CMD_LEN(3)},
2083
2084	{"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
2085		8, cmd_handler_mi_op_2f},
2086
2087	{"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
2088		F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
2089		cmd_handler_mi_batch_buffer_start},
2090
2091	{"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
2092		F_LEN_VAR | F_LEN_VAR_FIXED, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
2093		cmd_handler_mi_conditional_batch_buffer_end, CMD_LEN(2)},
2094
2095	{"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
2096		R_RCS | R_BCS, D_ALL, 0, 2, NULL},
2097
2098	{"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2099		ADDR_FIX_2(4, 7), 8, NULL},
2100
2101	{"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
2102		0, 8, NULL},
2103
2104	{"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
2105		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2106
2107	{"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2108
2109	{"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
2110		0, 8, NULL},
2111
2112	{"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2113		ADDR_FIX_1(3), 8, NULL},
2114
2115	{"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
2116		D_ALL, 0, 8, NULL},
2117
2118	{"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
2119		ADDR_FIX_1(4), 8, NULL},
2120
2121	{"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2122		ADDR_FIX_2(4, 5), 8, NULL},
2123
2124	{"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
2125		ADDR_FIX_1(4), 8, NULL},
2126
2127	{"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
2128		ADDR_FIX_2(4, 7), 8, NULL},
2129
2130	{"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
2131		D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2132
2133	{"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
2134
2135	{"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
2136		D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
2137
2138	{"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
2139		R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2140
2141	{"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
2142		OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
2143		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2144
2145	{"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
2146		D_ALL, ADDR_FIX_1(4), 8, NULL},
2147
2148	{"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
2149		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2150
2151	{"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
2152		D_ALL, ADDR_FIX_1(4), 8, NULL},
2153
2154	{"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
2155		D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2156
2157	{"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
2158		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
2159
2160	{"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
2161		OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
2162		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
2163
2164	{"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
2165		ADDR_FIX_2(4, 5), 8, NULL},
2166
2167	{"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
2168		F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
2169
2170	{"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
2171		OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
2172		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2173
2174	{"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
2175		OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
2176		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2177
2178	{"3DSTATE_BLEND_STATE_POINTERS",
2179		OP_3DSTATE_BLEND_STATE_POINTERS,
2180		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2181
2182	{"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
2183		OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
2184		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2185
2186	{"3DSTATE_BINDING_TABLE_POINTERS_VS",
2187		OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
2188		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2189
2190	{"3DSTATE_BINDING_TABLE_POINTERS_HS",
2191		OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
2192		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2193
2194	{"3DSTATE_BINDING_TABLE_POINTERS_DS",
2195		OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
2196		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2197
2198	{"3DSTATE_BINDING_TABLE_POINTERS_GS",
2199		OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
2200		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2201
2202	{"3DSTATE_BINDING_TABLE_POINTERS_PS",
2203		OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
2204		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2205
2206	{"3DSTATE_SAMPLER_STATE_POINTERS_VS",
2207		OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
2208		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2209
2210	{"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2211		OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
2212		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2213
2214	{"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2215		OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
2216		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2217
2218	{"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2219		OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
2220		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2221
2222	{"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2223		OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
2224		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2225
2226	{"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
2227		0, 8, NULL},
2228
2229	{"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
2230		0, 8, NULL},
2231
2232	{"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2233		0, 8, NULL},
2234
2235	{"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2236		0, 8, NULL},
2237
2238	{"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2239		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2240
2241	{"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2242		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2243
2244	{"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2245		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2246
2247	{"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2248		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2249
2250	{"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2251		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2252
2253	{"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2254		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2255
2256	{"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2257		F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2258
2259	{"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2260		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2261
2262	{"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2263		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2264
2265	{"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2266		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2267
2268	{"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2269		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2270
2271	{"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2272		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2273
2274	{"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2275		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2276
2277	{"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2278		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2279
2280	{"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2281		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2282
2283	{"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2284		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2285
2286	{"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2287		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2288
2289	{"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2290		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2291
2292	{"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2293		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2294
2295	{"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2296		F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2297
2298	{"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2299		D_BDW_PLUS, 0, 8, NULL},
2300
2301	{"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2302		NULL},
2303
2304	{"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2305		D_BDW_PLUS, 0, 8, NULL},
2306
2307	{"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2308		D_BDW_PLUS, 0, 8, NULL},
2309
2310	{"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2311		8, NULL},
2312
2313	{"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2314		R_RCS, D_BDW_PLUS, 0, 8, NULL},
2315
2316	{"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2317		8, NULL},
2318
2319	{"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2320		NULL},
2321
2322	{"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2323		NULL},
2324
2325	{"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2326		NULL},
2327
2328	{"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2329		D_BDW_PLUS, 0, 8, NULL},
2330
2331	{"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2332		R_RCS, D_ALL, 0, 8, NULL},
2333
2334	{"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2335		D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2336
2337	{"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2338		R_RCS, D_ALL, 0, 1, NULL},
2339
2340	{"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2341
2342	{"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2343		R_RCS, D_ALL, 0, 8, NULL},
2344
2345	{"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2346		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2347
2348	{"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2349
2350	{"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2351
2352	{"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2353
2354	{"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2355		D_BDW_PLUS, 0, 8, NULL},
2356
2357	{"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2358		D_BDW_PLUS, 0, 8, NULL},
2359
2360	{"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2361		D_ALL, 0, 8, NULL},
2362
2363	{"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2364		D_BDW_PLUS, 0, 8, NULL},
2365
2366	{"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2367		D_BDW_PLUS, 0, 8, NULL},
2368
2369	{"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2370
2371	{"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2372
2373	{"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2374
2375	{"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2376		D_ALL, 0, 8, NULL},
2377
2378	{"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2379
2380	{"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2381
2382	{"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2383		R_RCS, D_ALL, 0, 8, NULL},
2384
2385	{"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2386		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2387
2388	{"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2389		0, 8, NULL},
2390
2391	{"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2392		D_ALL, ADDR_FIX_1(2), 8, NULL},
2393
2394	{"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2395		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2396
2397	{"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2398		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2399
2400	{"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2401		D_ALL, 0, 8, NULL},
2402
2403	{"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2404		D_ALL, 0, 8, NULL},
2405
2406	{"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2407		D_ALL, 0, 8, NULL},
2408
2409	{"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2410		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2411
2412	{"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2413		D_BDW_PLUS, 0, 8, NULL},
2414
2415	{"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2416		D_ALL, ADDR_FIX_1(2), 8, NULL},
2417
2418	{"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2419		R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2420
2421	{"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2422		R_RCS, D_ALL, 0, 8, NULL},
2423
2424	{"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2425		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2426
2427	{"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2428		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2429
2430	{"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2431		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2432
2433	{"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2434		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2435
2436	{"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2437		F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2438
2439	{"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2440		R_RCS, D_ALL, 0, 8, NULL},
2441
2442	{"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2443		D_ALL, 0, 9, NULL},
2444
2445	{"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2446		ADDR_FIX_2(2, 4), 8, NULL},
2447
2448	{"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2449		OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2450		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2451
2452	{"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2453		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2454
2455	{"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2456		OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2457		F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2458
2459	{"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2460		D_BDW_PLUS, 0, 8, NULL},
2461
2462	{"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2463		ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2464
2465	{"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2466
2467	{"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2468		1, NULL},
2469
2470	{"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2471		ADDR_FIX_1(1), 8, NULL},
2472
2473	{"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2474
2475	{"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2476		ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2477
2478	{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2479		ADDR_FIX_1(1), 8, NULL},
2480
2481	{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2482
2483	{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2484
2485	{"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2486		0, 8, NULL},
2487
2488	{"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2489		D_SKL_PLUS, 0, 8, NULL},
2490
2491	{"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2492		F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2493
2494	{"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2495		0, 16, NULL},
2496
2497	{"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2498		0, 16, NULL},
2499
2500	{"MEDIA_POOL_STATE", OP_MEDIA_POOL_STATE, F_LEN_VAR, R_RCS, D_ALL,
2501		0, 16, NULL},
2502
2503	{"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2504
2505	{"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2506		0, 16, NULL},
2507
2508	{"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2509		0, 16, NULL},
2510
2511	{"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2512		0, 16, NULL},
2513
2514	{"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2515		0, 8, NULL},
2516
2517	{"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2518		NULL},
2519
2520	{"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2521		F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2522
2523	{"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2524		R_VCS, D_ALL, 0, 12, NULL},
2525
2526	{"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2527		R_VCS, D_ALL, 0, 12, NULL},
2528
2529	{"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2530		R_VCS, D_BDW_PLUS, 0, 12, NULL},
2531
2532	{"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2533		F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2534
2535	{"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2536		F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2537
2538	{"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2539
2540	{"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2541		R_VCS, D_ALL, 0, 12, NULL},
2542
2543	{"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2544		R_VCS, D_ALL, 0, 12, NULL},
2545
2546	{"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2547		R_VCS, D_ALL, 0, 12, NULL},
2548
2549	{"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2550		R_VCS, D_ALL, 0, 12, NULL},
2551
2552	{"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2553		R_VCS, D_ALL, 0, 12, NULL},
2554
2555	{"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2556		R_VCS, D_ALL, 0, 12, NULL},
2557
2558	{"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2559		R_VCS, D_ALL, 0, 6, NULL},
2560
2561	{"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2562		R_VCS, D_ALL, 0, 12, NULL},
2563
2564	{"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2565		R_VCS, D_ALL, 0, 12, NULL},
2566
2567	{"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2568		R_VCS, D_ALL, 0, 12, NULL},
2569
2570	{"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2571		R_VCS, D_ALL, 0, 12, NULL},
2572
2573	{"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2574		R_VCS, D_ALL, 0, 12, NULL},
2575
2576	{"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2577		R_VCS, D_ALL, 0, 12, NULL},
2578
2579	{"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2580		R_VCS, D_ALL, 0, 12, NULL},
2581	{"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2582		R_VCS, D_ALL, 0, 12, NULL},
2583
2584	{"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2585		R_VCS, D_ALL, 0, 12, NULL},
2586
2587	{"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2588		R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2589
2590	{"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2591		R_VCS, D_ALL, 0, 12, NULL},
2592
2593	{"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2594		R_VCS, D_ALL, 0, 12, NULL},
2595
2596	{"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2597		R_VCS, D_ALL, 0, 12, NULL},
2598
2599	{"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2600		R_VCS, D_ALL, 0, 12, NULL},
2601
2602	{"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2603		R_VCS, D_ALL, 0, 12, NULL},
2604
2605	{"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2606		R_VCS, D_ALL, 0, 12, NULL},
2607
2608	{"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2609		R_VCS, D_ALL, 0, 12, NULL},
2610
2611	{"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2612		R_VCS, D_ALL, 0, 12, NULL},
2613
2614	{"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2615		R_VCS, D_ALL, 0, 12, NULL},
2616
2617	{"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2618		R_VCS, D_ALL, 0, 12, NULL},
2619
2620	{"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2621		R_VCS, D_ALL, 0, 12, NULL},
2622
2623	{"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2624		0, 16, NULL},
2625
2626	{"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2627
2628	{"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2629
2630	{"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2631		R_VCS, D_ALL, 0, 12, NULL},
2632
2633	{"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2634		R_VCS, D_ALL, 0, 12, NULL},
2635
2636	{"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2637		R_VCS, D_ALL, 0, 12, NULL},
2638
2639	{"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2640
2641	{"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2642		0, 12, NULL},
2643
2644	{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2645		0, 12, NULL},
2646};
2647
2648static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2649{
2650	hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2651}
2652
2653/* call the cmd handler, and advance ip */
2654static int cmd_parser_exec(struct parser_exec_state *s)
2655{
2656	struct intel_vgpu *vgpu = s->vgpu;
2657	const struct cmd_info *info;
2658	u32 cmd;
2659	int ret = 0;
2660
2661	cmd = cmd_val(s, 0);
2662
2663	/* fastpath for MI_NOOP */
2664	if (cmd == MI_NOOP)
2665		info = &cmd_info[mi_noop_index];
2666	else
2667		info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2668
2669	if (info == NULL) {
2670		gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
2671				cmd, get_opcode(cmd, s->ring_id),
2672				(s->buf_addr_type == PPGTT_BUFFER) ?
2673				"ppgtt" : "ggtt", s->ring_id, s->workload);
2674		return -EBADRQC;
2675	}
2676
2677	s->info = info;
2678
2679	trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
2680			  cmd_length(s), s->buf_type, s->buf_addr_type,
2681			  s->workload, info->name);
2682
2683	if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
2684		ret = gvt_check_valid_cmd_length(cmd_length(s),
2685			info->valid_len);
2686		if (ret)
2687			return ret;
2688	}
2689
2690	if (info->handler) {
2691		ret = info->handler(s);
2692		if (ret < 0) {
2693			gvt_vgpu_err("%s handler error\n", info->name);
2694			return ret;
2695		}
2696	}
2697
2698	if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2699		ret = cmd_advance_default(s);
2700		if (ret) {
2701			gvt_vgpu_err("%s IP advance error\n", info->name);
2702			return ret;
2703		}
2704	}
2705	return 0;
2706}
2707
2708static inline bool gma_out_of_range(unsigned long gma,
2709		unsigned long gma_head, unsigned int gma_tail)
2710{
2711	if (gma_tail >= gma_head)
2712		return (gma < gma_head) || (gma > gma_tail);
2713	else
2714		return (gma > gma_tail) && (gma < gma_head);
2715}
2716
2717/* Keep the consistent return type, e.g EBADRQC for unknown
2718 * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2719 * works as the input of VM healthy status.
2720 */
2721static int command_scan(struct parser_exec_state *s,
2722		unsigned long rb_head, unsigned long rb_tail,
2723		unsigned long rb_start, unsigned long rb_len)
2724{
2725
2726	unsigned long gma_head, gma_tail, gma_bottom;
2727	int ret = 0;
2728	struct intel_vgpu *vgpu = s->vgpu;
2729
2730	gma_head = rb_start + rb_head;
2731	gma_tail = rb_start + rb_tail;
2732	gma_bottom = rb_start +  rb_len;
2733
2734	while (s->ip_gma != gma_tail) {
2735		if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2736			if (!(s->ip_gma >= rb_start) ||
2737				!(s->ip_gma < gma_bottom)) {
2738				gvt_vgpu_err("ip_gma %lx out of ring scope."
2739					"(base:0x%lx, bottom: 0x%lx)\n",
2740					s->ip_gma, rb_start,
2741					gma_bottom);
2742				parser_exec_state_dump(s);
2743				return -EFAULT;
2744			}
2745			if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2746				gvt_vgpu_err("ip_gma %lx out of range."
2747					"base 0x%lx head 0x%lx tail 0x%lx\n",
2748					s->ip_gma, rb_start,
2749					rb_head, rb_tail);
2750				parser_exec_state_dump(s);
2751				break;
2752			}
2753		}
2754		ret = cmd_parser_exec(s);
2755		if (ret) {
2756			gvt_vgpu_err("cmd parser error\n");
2757			parser_exec_state_dump(s);
2758			break;
2759		}
2760	}
2761
2762	return ret;
2763}
2764
2765static int scan_workload(struct intel_vgpu_workload *workload)
2766{
2767	unsigned long gma_head, gma_tail, gma_bottom;
2768	struct parser_exec_state s;
2769	int ret = 0;
2770
2771	/* ring base is page aligned */
2772	if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
2773		return -EINVAL;
2774
2775	gma_head = workload->rb_start + workload->rb_head;
2776	gma_tail = workload->rb_start + workload->rb_tail;
2777	gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2778
2779	s.buf_type = RING_BUFFER_INSTRUCTION;
2780	s.buf_addr_type = GTT_BUFFER;
2781	s.vgpu = workload->vgpu;
2782	s.ring_id = workload->ring_id;
2783	s.ring_start = workload->rb_start;
2784	s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2785	s.ring_head = gma_head;
2786	s.ring_tail = gma_tail;
2787	s.rb_va = workload->shadow_ring_buffer_va;
2788	s.workload = workload;
2789	s.is_ctx_wa = false;
2790
2791	if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2792		gma_head == gma_tail)
2793		return 0;
2794
2795	ret = ip_gma_set(&s, gma_head);
2796	if (ret)
2797		goto out;
2798
2799	ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2800		workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2801
2802out:
2803	return ret;
2804}
2805
2806static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2807{
2808
2809	unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2810	struct parser_exec_state s;
2811	int ret = 0;
2812	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2813				struct intel_vgpu_workload,
2814				wa_ctx);
2815
2816	/* ring base is page aligned */
2817	if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2818					I915_GTT_PAGE_SIZE)))
2819		return -EINVAL;
2820
2821	ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
2822	ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2823			PAGE_SIZE);
2824	gma_head = wa_ctx->indirect_ctx.guest_gma;
2825	gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2826	gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2827
2828	s.buf_type = RING_BUFFER_INSTRUCTION;
2829	s.buf_addr_type = GTT_BUFFER;
2830	s.vgpu = workload->vgpu;
2831	s.ring_id = workload->ring_id;
2832	s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2833	s.ring_size = ring_size;
2834	s.ring_head = gma_head;
2835	s.ring_tail = gma_tail;
2836	s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2837	s.workload = workload;
2838	s.is_ctx_wa = true;
2839
2840	ret = ip_gma_set(&s, gma_head);
2841	if (ret)
2842		goto out;
2843
2844	ret = command_scan(&s, 0, ring_tail,
2845		wa_ctx->indirect_ctx.guest_gma, ring_size);
2846out:
2847	return ret;
2848}
2849
2850static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2851{
2852	struct intel_vgpu *vgpu = workload->vgpu;
2853	struct intel_vgpu_submission *s = &vgpu->submission;
2854	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2855	void *shadow_ring_buffer_va;
2856	int ring_id = workload->ring_id;
2857	int ret;
2858
2859	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2860
2861	/* calculate workload ring buffer size */
2862	workload->rb_len = (workload->rb_tail + guest_rb_size -
2863			workload->rb_head) % guest_rb_size;
2864
2865	gma_head = workload->rb_start + workload->rb_head;
2866	gma_tail = workload->rb_start + workload->rb_tail;
2867	gma_top = workload->rb_start + guest_rb_size;
2868
2869	if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
2870		void *p;
2871
2872		/* realloc the new ring buffer if needed */
2873		p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
2874				GFP_KERNEL);
2875		if (!p) {
2876			gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2877			return -ENOMEM;
2878		}
2879		s->ring_scan_buffer[ring_id] = p;
2880		s->ring_scan_buffer_size[ring_id] = workload->rb_len;
2881	}
2882
2883	shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
2884
2885	/* get shadow ring buffer va */
2886	workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2887
2888	/* head > tail --> copy head <-> top */
2889	if (gma_head > gma_tail) {
2890		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2891				      gma_head, gma_top, shadow_ring_buffer_va);
2892		if (ret < 0) {
2893			gvt_vgpu_err("fail to copy guest ring buffer\n");
2894			return ret;
2895		}
2896		shadow_ring_buffer_va += ret;
2897		gma_head = workload->rb_start;
2898	}
2899
2900	/* copy head or start <-> tail */
2901	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2902				shadow_ring_buffer_va);
2903	if (ret < 0) {
2904		gvt_vgpu_err("fail to copy guest ring buffer\n");
2905		return ret;
2906	}
2907	return 0;
2908}
2909
2910int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
2911{
2912	int ret;
2913	struct intel_vgpu *vgpu = workload->vgpu;
2914
2915	ret = shadow_workload_ring_buffer(workload);
2916	if (ret) {
2917		gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2918		return ret;
2919	}
2920
2921	ret = scan_workload(workload);
2922	if (ret) {
2923		gvt_vgpu_err("scan workload error\n");
2924		return ret;
2925	}
2926	return 0;
2927}
2928
2929static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2930{
2931	int ctx_size = wa_ctx->indirect_ctx.size;
2932	unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2933	struct intel_vgpu_workload *workload = container_of(wa_ctx,
2934					struct intel_vgpu_workload,
2935					wa_ctx);
2936	struct intel_vgpu *vgpu = workload->vgpu;
2937	struct drm_i915_gem_object *obj;
2938	int ret = 0;
2939	void *map;
2940
2941	obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
2942					   roundup(ctx_size + CACHELINE_BYTES,
2943						   PAGE_SIZE));
2944	if (IS_ERR(obj))
2945		return PTR_ERR(obj);
2946
2947	/* get the va of the shadow batch buffer */
2948	map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2949	if (IS_ERR(map)) {
2950		gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2951		ret = PTR_ERR(map);
2952		goto put_obj;
2953	}
2954
2955	i915_gem_object_lock(obj);
2956	ret = i915_gem_object_set_to_cpu_domain(obj, false);
2957	i915_gem_object_unlock(obj);
2958	if (ret) {
2959		gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2960		goto unmap_src;
2961	}
2962
2963	ret = copy_gma_to_hva(workload->vgpu,
2964				workload->vgpu->gtt.ggtt_mm,
2965				guest_gma, guest_gma + ctx_size,
2966				map);
2967	if (ret < 0) {
2968		gvt_vgpu_err("fail to copy guest indirect ctx\n");
2969		goto unmap_src;
2970	}
2971
2972	wa_ctx->indirect_ctx.obj = obj;
2973	wa_ctx->indirect_ctx.shadow_va = map;
2974	return 0;
2975
2976unmap_src:
2977	i915_gem_object_unpin_map(obj);
2978put_obj:
2979	i915_gem_object_put(obj);
2980	return ret;
2981}
2982
2983static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2984{
2985	u32 per_ctx_start[CACHELINE_DWORDS] = {0};
2986	unsigned char *bb_start_sva;
2987
2988	if (!wa_ctx->per_ctx.valid)
2989		return 0;
2990
2991	per_ctx_start[0] = 0x18800001;
2992	per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2993
2994	bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
2995				wa_ctx->indirect_ctx.size;
2996
2997	memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
2998
2999	return 0;
3000}
3001
3002int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
3003{
3004	int ret;
3005	struct intel_vgpu_workload *workload = container_of(wa_ctx,
3006					struct intel_vgpu_workload,
3007					wa_ctx);
3008	struct intel_vgpu *vgpu = workload->vgpu;
3009
3010	if (wa_ctx->indirect_ctx.size == 0)
3011		return 0;
3012
3013	ret = shadow_indirect_ctx(wa_ctx);
3014	if (ret) {
3015		gvt_vgpu_err("fail to shadow indirect ctx\n");
3016		return ret;
3017	}
3018
3019	combine_wa_ctx(wa_ctx);
3020
3021	ret = scan_wa_ctx(wa_ctx);
3022	if (ret) {
3023		gvt_vgpu_err("scan wa ctx error\n");
3024		return ret;
3025	}
3026
3027	return 0;
3028}
3029
3030static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
3031		unsigned int opcode, unsigned long rings)
3032{
3033	const struct cmd_info *info = NULL;
3034	unsigned int ring;
3035
3036	for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
3037		info = find_cmd_entry(gvt, opcode, ring);
3038		if (info)
3039			break;
3040	}
3041	return info;
3042}
3043
3044static int init_cmd_table(struct intel_gvt *gvt)
3045{
3046	int i;
3047	struct cmd_entry *e;
3048	const struct cmd_info *info;
3049	unsigned int gen_type;
3050
3051	gen_type = intel_gvt_get_device_type(gvt);
3052
3053	for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
3054		if (!(cmd_info[i].devices & gen_type))
3055			continue;
3056
3057		e = kzalloc(sizeof(*e), GFP_KERNEL);
3058		if (!e)
3059			return -ENOMEM;
3060
3061		e->info = &cmd_info[i];
3062		info = find_cmd_entry_any_ring(gvt,
3063				e->info->opcode, e->info->rings);
3064		if (info) {
3065			gvt_err("%s %s duplicated\n", e->info->name,
3066					info->name);
3067			kfree(e);
3068			return -EEXIST;
3069		}
3070		if (cmd_info[i].opcode == OP_MI_NOOP)
3071			mi_noop_index = i;
3072
3073		INIT_HLIST_NODE(&e->hlist);
3074		add_cmd_entry(gvt, e);
3075		gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
3076				e->info->name, e->info->opcode, e->info->flag,
3077				e->info->devices, e->info->rings);
3078	}
3079	return 0;
3080}
3081
3082static void clean_cmd_table(struct intel_gvt *gvt)
3083{
3084	struct hlist_node *tmp;
3085	struct cmd_entry *e;
3086	int i;
3087
3088	hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
3089		kfree(e);
3090
3091	hash_init(gvt->cmd_table);
3092}
3093
3094void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
3095{
3096	clean_cmd_table(gvt);
3097}
3098
3099int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
3100{
3101	int ret;
3102
3103	ret = init_cmd_table(gvt);
3104	if (ret) {
3105		intel_gvt_clean_cmd_parser(gvt);
3106		return ret;
3107	}
3108	return 0;
3109}