Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 
 27
 28#include "vmwgfx_drv.h"
 29#include "vmwgfx_reg.h"
 30#include "ttm/ttm_bo_api.h"
 31#include "ttm/ttm_placement.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32
 33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 34			   struct vmw_sw_context *sw_context,
 35			   SVGA3dCmdHeader *header)
 36{
 37	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
 38}
 39
 40static int vmw_cmd_ok(struct vmw_private *dev_priv,
 41		      struct vmw_sw_context *sw_context,
 42		      SVGA3dCmdHeader *header)
 43{
 44	return 0;
 45}
 46
 47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 48			     struct vmw_sw_context *sw_context,
 49			     SVGA3dCmdHeader *header)
 
 
 
 
 
 
 
 
 
 
 
 
 
 50{
 51	struct vmw_cid_cmd {
 52		SVGA3dCmdHeader header;
 53		__le32 cid;
 54	} *cmd;
 55	int ret;
 56
 57	cmd = container_of(header, struct vmw_cid_cmd, header);
 58	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59		return 0;
 
 
 
 
 
 
 
 
 
 60
 61	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62	if (unlikely(ret != 0)) {
 63		DRM_ERROR("Could not find or use context %u\n",
 64			  (unsigned) cmd->cid);
 
 65		return ret;
 66	}
 67
 68	sw_context->last_cid = cmd->cid;
 69	sw_context->cid_valid = true;
 
 
 
 
 
 
 70
 
 
 
 
 71	return 0;
 
 
 
 
 
 
 72}
 73
 74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 75			     struct vmw_sw_context *sw_context,
 76			     uint32_t *sid)
 
 
 
 
 
 77{
 78	if (*sid == SVGA3D_INVALID_ID)
 
 
 
 
 
 
 
 
 
 
 79		return 0;
 80
 81	if (unlikely((!sw_context->sid_valid  ||
 82		      *sid != sw_context->last_sid))) {
 83		int real_id;
 84		int ret = vmw_surface_check(dev_priv, sw_context->tfile,
 85					    *sid, &real_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86
 
 
 87		if (unlikely(ret != 0)) {
 88			DRM_ERROR("Could ot find or use surface 0x%08x "
 89				  "address 0x%08lx\n",
 90				  (unsigned int) *sid,
 91				  (unsigned long) sid);
 92			return ret;
 93		}
 94
 95		sw_context->last_sid = *sid;
 96		sw_context->sid_valid = true;
 97		*sid = real_id;
 98		sw_context->sid_translation = real_id;
 99	} else
100		*sid = sw_context->sid_translation;
101
102	return 0;
103}
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107					   struct vmw_sw_context *sw_context,
108					   SVGA3dCmdHeader *header)
109{
110	struct vmw_sid_cmd {
111		SVGA3dCmdHeader header;
112		SVGA3dCmdSetRenderTarget body;
113	} *cmd;
 
 
114	int ret;
115
116	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 
 
 
 
 
 
 
 
 
 
117	if (unlikely(ret != 0))
118		return ret;
119
120	cmd = container_of(header, struct vmw_sid_cmd, header);
121	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126				      struct vmw_sw_context *sw_context,
127				      SVGA3dCmdHeader *header)
128{
129	struct vmw_sid_cmd {
130		SVGA3dCmdHeader header;
131		SVGA3dCmdSurfaceCopy body;
132	} *cmd;
133	int ret;
134
135	cmd = container_of(header, struct vmw_sid_cmd, header);
136	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
137	if (unlikely(ret != 0))
 
 
 
138		return ret;
139	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143				     struct vmw_sw_context *sw_context,
144				     SVGA3dCmdHeader *header)
145{
146	struct vmw_sid_cmd {
147		SVGA3dCmdHeader header;
148		SVGA3dCmdSurfaceStretchBlt body;
149	} *cmd;
150	int ret;
151
152	cmd = container_of(header, struct vmw_sid_cmd, header);
153	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
 
 
154	if (unlikely(ret != 0))
155		return ret;
156	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 
 
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160					 struct vmw_sw_context *sw_context,
161					 SVGA3dCmdHeader *header)
162{
163	struct vmw_sid_cmd {
164		SVGA3dCmdHeader header;
165		SVGA3dCmdBlitSurfaceToScreen body;
166	} *cmd;
167
168	cmd = container_of(header, struct vmw_sid_cmd, header);
169	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
 
 
 
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173				 struct vmw_sw_context *sw_context,
174				 SVGA3dCmdHeader *header)
175{
176	struct vmw_sid_cmd {
177		SVGA3dCmdHeader header;
178		SVGA3dCmdPresent body;
179	} *cmd;
180
 
181	cmd = container_of(header, struct vmw_sid_cmd, header);
182	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 
 
 
183}
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186				   struct vmw_sw_context *sw_context,
187				   SVGAGuestPtr *ptr,
188				   struct vmw_dma_buffer **vmw_bo_p)
189{
190	struct vmw_dma_buffer *vmw_bo = NULL;
191	struct ttm_buffer_object *bo;
192	uint32_t handle = ptr->gmrId;
193	struct vmw_relocation *reloc;
194	uint32_t cur_validate_node;
195	struct ttm_validate_buffer *val_buf;
196	int ret;
197
198	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
 
199	if (unlikely(ret != 0)) {
200		DRM_ERROR("Could not find or use GMR region.\n");
201		return -EINVAL;
 
202	}
203	bo = &vmw_bo->base;
204
205	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
206		DRM_ERROR("Max number relocations per submission"
207			  " exceeded\n");
208		ret = -EINVAL;
209		goto out_no_reloc;
210	}
211
212	reloc = &sw_context->relocs[sw_context->cur_reloc++];
213	reloc->location = ptr;
214
215	cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216	if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217		DRM_ERROR("Max number of DMA buffers per submission"
218			  " exceeded.\n");
219		ret = -EINVAL;
220		goto out_no_reloc;
221	}
222
223	reloc->index = cur_validate_node;
224	if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225		val_buf = &sw_context->val_bufs[cur_validate_node];
226		val_buf->bo = ttm_bo_reference(bo);
227		val_buf->new_sync_obj_arg = (void *) dev_priv;
228		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229		++sw_context->cur_val_buf;
230	}
231	*vmw_bo_p = vmw_bo;
232	return 0;
233
234out_no_reloc:
235	vmw_dmabuf_unreference(&vmw_bo);
236	vmw_bo_p = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237	return ret;
238}
239
 
 
 
 
 
 
 
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241			     struct vmw_sw_context *sw_context,
242			     SVGA3dCmdHeader *header)
243{
244	struct vmw_dma_buffer *vmw_bo;
245	struct vmw_query_cmd {
246		SVGA3dCmdHeader header;
247		SVGA3dCmdEndQuery q;
248	} *cmd;
249	int ret;
250
251	cmd = container_of(header, struct vmw_query_cmd, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253	if (unlikely(ret != 0))
254		return ret;
255
256	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257				      &cmd->q.guestResult,
258				      &vmw_bo);
259	if (unlikely(ret != 0))
260		return ret;
261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
262	vmw_dmabuf_unreference(&vmw_bo);
263	return 0;
264}
265
 
 
 
 
 
 
 
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267			      struct vmw_sw_context *sw_context,
268			      SVGA3dCmdHeader *header)
269{
270	struct vmw_dma_buffer *vmw_bo;
271	struct vmw_query_cmd {
272		SVGA3dCmdHeader header;
273		SVGA3dCmdWaitForQuery q;
274	} *cmd;
275	int ret;
276
277	cmd = container_of(header, struct vmw_query_cmd, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279	if (unlikely(ret != 0))
280		return ret;
281
282	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283				      &cmd->q.guestResult,
284				      &vmw_bo);
285	if (unlikely(ret != 0))
286		return ret;
287
288	vmw_dmabuf_unreference(&vmw_bo);
289	return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294		       struct vmw_sw_context *sw_context,
295		       SVGA3dCmdHeader *header)
296{
297	struct vmw_dma_buffer *vmw_bo = NULL;
298	struct ttm_buffer_object *bo;
299	struct vmw_surface *srf = NULL;
300	struct vmw_dma_cmd {
301		SVGA3dCmdHeader header;
302		SVGA3dCmdSurfaceDMA dma;
303	} *cmd;
304	int ret;
 
 
305
306	cmd = container_of(header, struct vmw_dma_cmd, header);
 
 
 
 
 
 
 
 
 
307	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308				      &cmd->dma.guest.ptr,
309				      &vmw_bo);
310	if (unlikely(ret != 0))
311		return ret;
312
313	bo = &vmw_bo->base;
314	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
315					     cmd->dma.host.sid, &srf);
316	if (ret) {
317		DRM_ERROR("could not find surface\n");
318		goto out_no_reloc;
319	}
320
321	/**
322	 * Patch command stream with device SID.
323	 */
 
 
 
 
 
 
 
 
 
324
325	cmd->dma.host.sid = srf->res.id;
326	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
327	/**
328	 * FIXME: May deadlock here when called from the
329	 * command parsing code.
330	 */
331	vmw_surface_unreference(&srf);
332
333out_no_reloc:
 
 
 
334	vmw_dmabuf_unreference(&vmw_bo);
335	return ret;
336}
337
338static int vmw_cmd_draw(struct vmw_private *dev_priv,
339			struct vmw_sw_context *sw_context,
340			SVGA3dCmdHeader *header)
341{
342	struct vmw_draw_cmd {
343		SVGA3dCmdHeader header;
344		SVGA3dCmdDrawPrimitives body;
345	} *cmd;
346	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
347		(unsigned long)header + sizeof(*cmd));
348	SVGA3dPrimitiveRange *range;
349	uint32_t i;
350	uint32_t maxnum;
351	int ret;
352
353	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
354	if (unlikely(ret != 0))
355		return ret;
356
357	cmd = container_of(header, struct vmw_draw_cmd, header);
358	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
359
360	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
361		DRM_ERROR("Illegal number of vertex declarations.\n");
362		return -EINVAL;
363	}
364
365	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
366		ret = vmw_cmd_sid_check(dev_priv, sw_context,
367					&decl->array.surfaceId);
 
368		if (unlikely(ret != 0))
369			return ret;
370	}
371
372	maxnum = (header->size - sizeof(cmd->body) -
373		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
374	if (unlikely(cmd->body.numRanges > maxnum)) {
375		DRM_ERROR("Illegal number of index ranges.\n");
376		return -EINVAL;
377	}
378
379	range = (SVGA3dPrimitiveRange *) decl;
380	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
381		ret = vmw_cmd_sid_check(dev_priv, sw_context,
382					&range->indexArray.surfaceId);
 
383		if (unlikely(ret != 0))
384			return ret;
385	}
386	return 0;
387}
388
389
390static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
391			     struct vmw_sw_context *sw_context,
392			     SVGA3dCmdHeader *header)
393{
394	struct vmw_tex_state_cmd {
395		SVGA3dCmdHeader header;
396		SVGA3dCmdSetTextureState state;
397	};
398
399	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
400	  ((unsigned long) header + header->size + sizeof(header));
401	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
402		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
 
 
403	int ret;
404
405	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 
 
 
 
 
406	if (unlikely(ret != 0))
407		return ret;
408
409	for (; cur_state < last_state; ++cur_state) {
410		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
411			continue;
412
413		ret = vmw_cmd_sid_check(dev_priv, sw_context,
414					&cur_state->value);
 
 
 
 
 
 
 
415		if (unlikely(ret != 0))
416			return ret;
 
 
 
 
 
 
 
 
 
 
 
417	}
418
419	return 0;
420}
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
423typedef int (*vmw_cmd_func) (struct vmw_private *,
424			     struct vmw_sw_context *,
425			     SVGA3dCmdHeader *);
 
426
427#define VMW_CMD_DEF(cmd, func) \
428	[cmd - SVGA_3D_CMD_BASE] = func
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
430static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
431	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
432	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
433	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
434	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
435	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
436	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
437	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
438	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
439	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
440	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
442		    &vmw_cmd_set_render_target_check),
443	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
444	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
445	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
446	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
447	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
448	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
449	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
450	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
451	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
452	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
453	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
454	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
455	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
456	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
457	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
458	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
459	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
460	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
462		    &vmw_cmd_blt_surf_screen_check)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463};
464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465static int vmw_cmd_check(struct vmw_private *dev_priv,
466			 struct vmw_sw_context *sw_context,
467			 void *buf, uint32_t *size)
468{
469	uint32_t cmd_id;
470	uint32_t size_remaining = *size;
471	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472	int ret;
 
 
473
474	cmd_id = ((uint32_t *)buf)[0];
475	if (cmd_id == SVGA_CMD_UPDATE) {
476		*size = 5 << 2;
477		return 0;
478	}
479
480	cmd_id = le32_to_cpu(header->id);
481	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
482
483	cmd_id -= SVGA_3D_CMD_BASE;
484	if (unlikely(*size > size_remaining))
485		goto out_err;
486
487	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
488		goto out_err;
 
 
 
 
489
490	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
 
 
 
 
 
 
 
 
 
491	if (unlikely(ret != 0))
492		goto out_err;
493
494	return 0;
495out_err:
496	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
 
 
 
 
 
 
 
 
 
 
 
 
497		  cmd_id + SVGA_3D_CMD_BASE);
498	return -EINVAL;
499}
500
501static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502			     struct vmw_sw_context *sw_context,
503			     void *buf, uint32_t size)
 
504{
505	int32_t cur_size = size;
506	int ret;
507
 
 
508	while (cur_size > 0) {
509		size = cur_size;
510		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
511		if (unlikely(ret != 0))
512			return ret;
513		buf = (void *)((unsigned long) buf + size);
514		cur_size -= size;
515	}
516
517	if (unlikely(cur_size != 0)) {
518		DRM_ERROR("Command verifier out of sync.\n");
519		return -EINVAL;
520	}
521
522	return 0;
523}
524
525static void vmw_free_relocations(struct vmw_sw_context *sw_context)
526{
527	sw_context->cur_reloc = 0;
528}
529
530static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
531{
532	uint32_t i;
533	struct vmw_relocation *reloc;
534	struct ttm_validate_buffer *validate;
535	struct ttm_buffer_object *bo;
536
537	for (i = 0; i < sw_context->cur_reloc; ++i) {
538		reloc = &sw_context->relocs[i];
539		validate = &sw_context->val_bufs[reloc->index];
540		bo = validate->bo;
541		if (bo->mem.mem_type == TTM_PL_VRAM) {
 
542			reloc->location->offset += bo->offset;
543			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
544		} else
 
545			reloc->location->gmrId = bo->mem.start;
 
 
 
 
 
 
 
546	}
547	vmw_free_relocations(sw_context);
548}
549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
550static void vmw_clear_validations(struct vmw_sw_context *sw_context)
551{
552	struct ttm_validate_buffer *entry, *next;
 
553
 
 
 
554	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
555				 head) {
556		list_del(&entry->head);
557		vmw_dmabuf_validate_clear(entry->bo);
558		ttm_bo_unref(&entry->bo);
559		sw_context->cur_val_buf--;
560	}
561	BUG_ON(sw_context->cur_val_buf != 0);
 
 
 
562}
563
564static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
565				      struct ttm_buffer_object *bo)
 
 
566{
 
 
 
567	int ret;
568
 
 
 
 
 
 
569	/**
570	 * Put BO in VRAM if there is space, otherwise as a GMR.
571	 * If there is no space in VRAM and GMR ids are all used up,
572	 * start evicting GMRs to make room. If the DMA buffer can't be
573	 * used as a GMR, this will return -ENOMEM.
574	 */
575
576	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
577	if (likely(ret == 0 || ret == -ERESTARTSYS))
578		return ret;
579
580	/**
581	 * If that failed, try VRAM again, this time evicting
582	 * previous contents.
583	 */
584
585	DRM_INFO("Falling through to VRAM.\n");
586	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
587	return ret;
588}
589
590
591static int vmw_validate_buffers(struct vmw_private *dev_priv,
592				struct vmw_sw_context *sw_context)
593{
594	struct ttm_validate_buffer *entry;
595	int ret;
596
597	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
598		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
 
 
599		if (unlikely(ret != 0))
600			return ret;
601	}
602	return 0;
603}
604
605int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
606		      struct drm_file *file_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607{
608	struct vmw_private *dev_priv = vmw_priv(dev);
609	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
610	struct drm_vmw_fence_rep fence_rep;
611	struct drm_vmw_fence_rep __user *user_fence_rep;
612	int ret;
613	void *user_cmd;
614	void *cmd;
615	uint32_t sequence;
616	struct vmw_sw_context *sw_context = &dev_priv->ctx;
617	struct vmw_master *vmaster = vmw_master(file_priv->master);
618
619	ret = ttm_read_lock(&vmaster->lock, true);
620	if (unlikely(ret != 0))
621		return ret;
622
623	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
624	if (unlikely(ret != 0)) {
625		ret = -ERESTARTSYS;
626		goto out_no_cmd_mutex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
627	}
628
629	cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
630	if (unlikely(cmd == NULL)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631		DRM_ERROR("Failed reserving fifo space for commands.\n");
632		ret = -ENOMEM;
633		goto out_unlock;
634	}
635
636	user_cmd = (void __user *)(unsigned long)arg->commands;
637	ret = copy_from_user(cmd, user_cmd, arg->command_size);
 
 
 
638
639	if (unlikely(ret != 0)) {
640		ret = -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
641		DRM_ERROR("Failed copying commands.\n");
642		goto out_commit;
 
 
643	}
644
645	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
646	sw_context->cid_valid = false;
647	sw_context->sid_valid = false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
648	sw_context->cur_reloc = 0;
649	sw_context->cur_val_buf = 0;
650
 
 
 
 
 
 
 
 
651	INIT_LIST_HEAD(&sw_context->validate_nodes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
652
653	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
654	if (unlikely(ret != 0))
655		goto out_err;
656	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
 
657	if (unlikely(ret != 0))
658		goto out_err;
 
 
 
 
 
659
660	ret = vmw_validate_buffers(dev_priv, sw_context);
661	if (unlikely(ret != 0))
662		goto out_err;
663
664	vmw_apply_relocations(sw_context);
 
 
665
666	if (arg->throttle_us) {
667		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
668				   arg->throttle_us);
 
 
669
 
 
670		if (unlikely(ret != 0))
671			goto out_err;
672	}
673
674	vmw_fifo_commit(dev_priv, arg->command_size);
675
676	ret = vmw_fifo_send_fence(dev_priv, &sequence);
677
678	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
679				    (void *)(unsigned long) sequence);
680	vmw_clear_validations(sw_context);
681	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 
 
682
 
 
 
 
683	/*
684	 * This error is harmless, because if fence submission fails,
685	 * vmw_fifo_send_fence will sync.
 
686	 */
687
688	if (ret != 0)
689		DRM_ERROR("Fence submission error. Syncing.\n");
690
691	fence_rep.error = ret;
692	fence_rep.fence_seq = (uint64_t) sequence;
693	fence_rep.pad64 = 0;
 
 
 
 
 
694
695	user_fence_rep = (struct drm_vmw_fence_rep __user *)
696	    (unsigned long)arg->fence_rep;
697
698	/*
699	 * copy_to_user errors will be detected by user space not
700	 * seeing fence_rep::error filled in.
 
 
701	 */
 
702
703	ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
704
705	vmw_kms_cursor_post_execbuf(dev_priv);
706	ttm_read_unlock(&vmaster->lock);
707	return 0;
 
 
 
708out_err:
 
 
 
 
709	vmw_free_relocations(sw_context);
710	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
711	vmw_clear_validations(sw_context);
712out_commit:
713	vmw_fifo_commit(dev_priv, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
714out_unlock:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
715	mutex_unlock(&dev_priv->cmdbuf_mutex);
716out_no_cmd_mutex:
717	ttm_read_unlock(&vmaster->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
718	return ret;
719}
v4.17
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/sync_file.h>
  28
  29#include "vmwgfx_drv.h"
  30#include "vmwgfx_reg.h"
  31#include <drm/ttm/ttm_bo_api.h>
  32#include <drm/ttm/ttm_placement.h>
  33#include "vmwgfx_so.h"
  34#include "vmwgfx_binding.h"
  35
  36#define VMW_RES_HT_ORDER 12
  37
  38/**
  39 * enum vmw_resource_relocation_type - Relocation type for resources
  40 *
  41 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  42 * command stream is replaced with the actual id after validation.
  43 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  44 * with a NOP.
  45 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
  46 * after validation is -1, the command is replaced with a NOP. Otherwise no
  47 * action.
  48 */
  49enum vmw_resource_relocation_type {
  50	vmw_res_rel_normal,
  51	vmw_res_rel_nop,
  52	vmw_res_rel_cond_nop,
  53	vmw_res_rel_max
  54};
  55
  56/**
  57 * struct vmw_resource_relocation - Relocation info for resources
  58 *
  59 * @head: List head for the software context's relocation list.
  60 * @res: Non-ref-counted pointer to the resource.
  61 * @offset: Offset of single byte entries into the command buffer where the
  62 * id that needs fixup is located.
  63 * @rel_type: Type of relocation.
  64 */
  65struct vmw_resource_relocation {
  66	struct list_head head;
  67	const struct vmw_resource *res;
  68	u32 offset:29;
  69	enum vmw_resource_relocation_type rel_type:3;
  70};
  71
  72/**
  73 * struct vmw_resource_val_node - Validation info for resources
  74 *
  75 * @head: List head for the software context's resource list.
  76 * @hash: Hash entry for quick resouce to val_node lookup.
  77 * @res: Ref-counted pointer to the resource.
  78 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  79 * @new_backup: Refcounted pointer to the new backup buffer.
  80 * @staged_bindings: If @res is a context, tracks bindings set up during
  81 * the command batch. Otherwise NULL.
  82 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  83 * @first_usage: Set to true the first time the resource is referenced in
  84 * the command stream.
  85 * @switching_backup: The command stream provides a new backup buffer for a
  86 * resource.
  87 * @no_buffer_needed: This means @switching_backup is true on first buffer
  88 * reference. So resource reservation does not need to allocate a backup
  89 * buffer for the resource.
  90 */
  91struct vmw_resource_val_node {
  92	struct list_head head;
  93	struct drm_hash_item hash;
  94	struct vmw_resource *res;
  95	struct vmw_dma_buffer *new_backup;
  96	struct vmw_ctx_binding_state *staged_bindings;
  97	unsigned long new_backup_offset;
  98	u32 first_usage : 1;
  99	u32 switching_backup : 1;
 100	u32 no_buffer_needed : 1;
 101};
 102
 103/**
 104 * struct vmw_cmd_entry - Describe a command for the verifier
 105 *
 106 * @user_allow: Whether allowed from the execbuf ioctl.
 107 * @gb_disable: Whether disabled if guest-backed objects are available.
 108 * @gb_enable: Whether enabled iff guest-backed objects are available.
 109 */
 110struct vmw_cmd_entry {
 111	int (*func) (struct vmw_private *, struct vmw_sw_context *,
 112		     SVGA3dCmdHeader *);
 113	bool user_allow;
 114	bool gb_disable;
 115	bool gb_enable;
 116	const char *cmd_name;
 117};
 118
 119#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 120	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 121				       (_gb_disable), (_gb_enable), #_cmd}
 122
 123static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 124					struct vmw_sw_context *sw_context,
 125					struct vmw_resource *ctx);
 126static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 127				 struct vmw_sw_context *sw_context,
 128				 SVGAMobId *id,
 129				 struct vmw_dma_buffer **vmw_bo_p);
 130static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 131				   struct vmw_dma_buffer *vbo,
 132				   bool validate_as_mob,
 133				   uint32_t *p_val_node);
 134/**
 135 * vmw_ptr_diff - Compute the offset from a to b in bytes
 136 *
 137 * @a: A starting pointer.
 138 * @b: A pointer offset in the same address space.
 139 *
 140 * Returns: The offset in bytes between the two pointers.
 141 */
 142static size_t vmw_ptr_diff(void *a, void *b)
 143{
 144	return (unsigned long) b - (unsigned long) a;
 145}
 146
 147/**
 148 * vmw_resources_unreserve - unreserve resources previously reserved for
 149 * command submission.
 150 *
 151 * @sw_context: pointer to the software context
 152 * @backoff: Whether command submission failed.
 153 */
 154static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
 155				    bool backoff)
 156{
 157	struct vmw_resource_val_node *val;
 158	struct list_head *list = &sw_context->resource_list;
 159
 160	if (sw_context->dx_query_mob && !backoff)
 161		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 162					  sw_context->dx_query_mob);
 163
 164	list_for_each_entry(val, list, head) {
 165		struct vmw_resource *res = val->res;
 166		bool switch_backup =
 167			(backoff) ? false : val->switching_backup;
 168
 169		/*
 170		 * Transfer staged context bindings to the
 171		 * persistent context binding tracker.
 172		 */
 173		if (unlikely(val->staged_bindings)) {
 174			if (!backoff) {
 175				vmw_binding_state_commit
 176					(vmw_context_binding_state(val->res),
 177					 val->staged_bindings);
 178			}
 179
 180			if (val->staged_bindings != sw_context->staged_bindings)
 181				vmw_binding_state_free(val->staged_bindings);
 182			else
 183				sw_context->staged_bindings_inuse = false;
 184			val->staged_bindings = NULL;
 185		}
 186		vmw_resource_unreserve(res, switch_backup, val->new_backup,
 187				       val->new_backup_offset);
 188		vmw_dmabuf_unreference(&val->new_backup);
 189	}
 190}
 191
 192/**
 193 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
 194 * added to the validate list.
 195 *
 196 * @dev_priv: Pointer to the device private:
 197 * @sw_context: The validation context:
 198 * @node: The validation node holding this context.
 199 */
 200static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 201				   struct vmw_sw_context *sw_context,
 202				   struct vmw_resource_val_node *node)
 203{
 204	int ret;
 205
 206	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
 207	if (unlikely(ret != 0))
 208		goto out_err;
 209
 210	if (!sw_context->staged_bindings) {
 211		sw_context->staged_bindings =
 212			vmw_binding_state_alloc(dev_priv);
 213		if (IS_ERR(sw_context->staged_bindings)) {
 214			DRM_ERROR("Failed to allocate context binding "
 215				  "information.\n");
 216			ret = PTR_ERR(sw_context->staged_bindings);
 217			sw_context->staged_bindings = NULL;
 218			goto out_err;
 219		}
 220	}
 221
 222	if (sw_context->staged_bindings_inuse) {
 223		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
 224		if (IS_ERR(node->staged_bindings)) {
 225			DRM_ERROR("Failed to allocate context binding "
 226				  "information.\n");
 227			ret = PTR_ERR(node->staged_bindings);
 228			node->staged_bindings = NULL;
 229			goto out_err;
 230		}
 231	} else {
 232		node->staged_bindings = sw_context->staged_bindings;
 233		sw_context->staged_bindings_inuse = true;
 234	}
 235
 236	return 0;
 237out_err:
 238	return ret;
 239}
 240
 241/**
 242 * vmw_resource_val_add - Add a resource to the software context's
 243 * resource list if it's not already on it.
 244 *
 245 * @sw_context: Pointer to the software context.
 246 * @res: Pointer to the resource.
 247 * @p_node On successful return points to a valid pointer to a
 248 * struct vmw_resource_val_node, if non-NULL on entry.
 249 */
 250static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 251				struct vmw_resource *res,
 252				struct vmw_resource_val_node **p_node)
 253{
 254	struct vmw_private *dev_priv = res->dev_priv;
 255	struct vmw_resource_val_node *node;
 256	struct drm_hash_item *hash;
 257	int ret;
 258
 259	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
 260				    &hash) == 0)) {
 261		node = container_of(hash, struct vmw_resource_val_node, hash);
 262		node->first_usage = false;
 263		if (unlikely(p_node != NULL))
 264			*p_node = node;
 265		return 0;
 266	}
 267
 268	node = kzalloc(sizeof(*node), GFP_KERNEL);
 269	if (unlikely(!node)) {
 270		DRM_ERROR("Failed to allocate a resource validation "
 271			  "entry.\n");
 272		return -ENOMEM;
 273	}
 274
 275	node->hash.key = (unsigned long) res;
 276	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
 277	if (unlikely(ret != 0)) {
 278		DRM_ERROR("Failed to initialize a resource validation "
 279			  "entry.\n");
 280		kfree(node);
 281		return ret;
 282	}
 283	node->res = vmw_resource_reference(res);
 284	node->first_usage = true;
 285	if (unlikely(p_node != NULL))
 286		*p_node = node;
 287
 288	if (!dev_priv->has_mob) {
 289		list_add_tail(&node->head, &sw_context->resource_list);
 290		return 0;
 291	}
 292
 293	switch (vmw_res_type(res)) {
 294	case vmw_res_context:
 295	case vmw_res_dx_context:
 296		list_add(&node->head, &sw_context->ctx_resource_list);
 297		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
 298		break;
 299	case vmw_res_cotable:
 300		list_add_tail(&node->head, &sw_context->ctx_resource_list);
 301		break;
 302	default:
 303		list_add_tail(&node->head, &sw_context->resource_list);
 304		break;
 305	}
 306
 307	return ret;
 308}
 309
 310/**
 311 * vmw_view_res_val_add - Add a view and the surface it's pointing to
 312 * to the validation list
 313 *
 314 * @sw_context: The software context holding the validation list.
 315 * @view: Pointer to the view resource.
 316 *
 317 * Returns 0 if success, negative error code otherwise.
 318 */
 319static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 320				struct vmw_resource *view)
 321{
 322	int ret;
 323
 324	/*
 325	 * First add the resource the view is pointing to, otherwise
 326	 * it may be swapped out when the view is validated.
 327	 */
 328	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
 329	if (ret)
 330		return ret;
 331
 332	return vmw_resource_val_add(sw_context, view, NULL);
 333}
 334
 335/**
 336 * vmw_view_id_val_add - Look up a view and add it and the surface it's
 337 * pointing to to the validation list.
 338 *
 339 * @sw_context: The software context holding the validation list.
 340 * @view_type: The view type to look up.
 341 * @id: view id of the view.
 342 *
 343 * The view is represented by a view id and the DX context it's created on,
 344 * or scheduled for creation on. If there is no DX context set, the function
 345 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
 346 */
 347static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 348			       enum vmw_view_type view_type, u32 id)
 349{
 350	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
 351	struct vmw_resource *view;
 352	int ret;
 353
 354	if (!ctx_node) {
 355		DRM_ERROR("DX Context not set.\n");
 356		return -EINVAL;
 357	}
 358
 359	view = vmw_view_lookup(sw_context->man, view_type, id);
 360	if (IS_ERR(view))
 361		return PTR_ERR(view);
 362
 363	ret = vmw_view_res_val_add(sw_context, view);
 364	vmw_resource_unreference(&view);
 365
 366	return ret;
 367}
 368
 369/**
 370 * vmw_resource_context_res_add - Put resources previously bound to a context on
 371 * the validation list
 372 *
 373 * @dev_priv: Pointer to a device private structure
 374 * @sw_context: Pointer to a software context used for this command submission
 375 * @ctx: Pointer to the context resource
 376 *
 377 * This function puts all resources that were previously bound to @ctx on
 378 * the resource validation list. This is part of the context state reemission
 379 */
 380static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 381					struct vmw_sw_context *sw_context,
 382					struct vmw_resource *ctx)
 383{
 384	struct list_head *binding_list;
 385	struct vmw_ctx_bindinfo *entry;
 386	int ret = 0;
 387	struct vmw_resource *res;
 388	u32 i;
 389
 390	/* Add all cotables to the validation list. */
 391	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 392		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 393			res = vmw_context_cotable(ctx, i);
 394			if (IS_ERR(res))
 395				continue;
 396
 397			ret = vmw_resource_val_add(sw_context, res, NULL);
 398			vmw_resource_unreference(&res);
 399			if (unlikely(ret != 0))
 400				return ret;
 401		}
 402	}
 403
 404
 405	/* Add all resources bound to the context to the validation list */
 406	mutex_lock(&dev_priv->binding_mutex);
 407	binding_list = vmw_context_binding_list(ctx);
 408
 409	list_for_each_entry(entry, binding_list, ctx_list) {
 410		/* entry->res is not refcounted */
 411		res = vmw_resource_reference_unless_doomed(entry->res);
 412		if (unlikely(res == NULL))
 413			continue;
 414
 415		if (vmw_res_type(entry->res) == vmw_res_view)
 416			ret = vmw_view_res_val_add(sw_context, entry->res);
 417		else
 418			ret = vmw_resource_val_add(sw_context, entry->res,
 419						   NULL);
 420		vmw_resource_unreference(&res);
 421		if (unlikely(ret != 0))
 422			break;
 423	}
 424
 425	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 426		struct vmw_dma_buffer *dx_query_mob;
 427
 428		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 429		if (dx_query_mob)
 430			ret = vmw_bo_to_validate_list(sw_context,
 431						      dx_query_mob,
 432						      true, NULL);
 433	}
 434
 435	mutex_unlock(&dev_priv->binding_mutex);
 436	return ret;
 437}
 438
 439/**
 440 * vmw_resource_relocation_add - Add a relocation to the relocation list
 441 *
 442 * @list: Pointer to head of relocation list.
 443 * @res: The resource.
 444 * @offset: Offset into the command buffer currently being parsed where the
 445 * id that needs fixup is located. Granularity is one byte.
 446 * @rel_type: Relocation type.
 447 */
 448static int vmw_resource_relocation_add(struct list_head *list,
 449				       const struct vmw_resource *res,
 450				       unsigned long offset,
 451				       enum vmw_resource_relocation_type
 452				       rel_type)
 453{
 454	struct vmw_resource_relocation *rel;
 455
 456	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
 457	if (unlikely(!rel)) {
 458		DRM_ERROR("Failed to allocate a resource relocation.\n");
 459		return -ENOMEM;
 460	}
 461
 462	rel->res = res;
 463	rel->offset = offset;
 464	rel->rel_type = rel_type;
 465	list_add_tail(&rel->head, list);
 466
 467	return 0;
 468}
 469
 470/**
 471 * vmw_resource_relocations_free - Free all relocations on a list
 472 *
 473 * @list: Pointer to the head of the relocation list.
 474 */
 475static void vmw_resource_relocations_free(struct list_head *list)
 476{
 477	struct vmw_resource_relocation *rel, *n;
 478
 479	list_for_each_entry_safe(rel, n, list, head) {
 480		list_del(&rel->head);
 481		kfree(rel);
 482	}
 483}
 484
 485/**
 486 * vmw_resource_relocations_apply - Apply all relocations on a list
 487 *
 488 * @cb: Pointer to the start of the command buffer bein patch. This need
 489 * not be the same buffer as the one being parsed when the relocation
 490 * list was built, but the contents must be the same modulo the
 491 * resource ids.
 492 * @list: Pointer to the head of the relocation list.
 493 */
 494static void vmw_resource_relocations_apply(uint32_t *cb,
 495					   struct list_head *list)
 496{
 497	struct vmw_resource_relocation *rel;
 498
 499	/* Validate the struct vmw_resource_relocation member size */
 500	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
 501	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
 502
 503	list_for_each_entry(rel, list, head) {
 504		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
 505		switch (rel->rel_type) {
 506		case vmw_res_rel_normal:
 507			*addr = rel->res->id;
 508			break;
 509		case vmw_res_rel_nop:
 510			*addr = SVGA_3D_CMD_NOP;
 511			break;
 512		default:
 513			if (rel->res->id == -1)
 514				*addr = SVGA_3D_CMD_NOP;
 515			break;
 516		}
 517	}
 518}
 519
 520static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 521			   struct vmw_sw_context *sw_context,
 522			   SVGA3dCmdHeader *header)
 523{
 524	return -EINVAL;
 525}
 526
 527static int vmw_cmd_ok(struct vmw_private *dev_priv,
 528		      struct vmw_sw_context *sw_context,
 529		      SVGA3dCmdHeader *header)
 530{
 531	return 0;
 532}
 533
 534/**
 535 * vmw_bo_to_validate_list - add a bo to a validate list
 536 *
 537 * @sw_context: The software context used for this command submission batch.
 538 * @bo: The buffer object to add.
 539 * @validate_as_mob: Validate this buffer as a MOB.
 540 * @p_val_node: If non-NULL Will be updated with the validate node number
 541 * on return.
 542 *
 543 * Returns -EINVAL if the limit of number of buffer objects per command
 544 * submission is reached.
 545 */
 546static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 547				   struct vmw_dma_buffer *vbo,
 548				   bool validate_as_mob,
 549				   uint32_t *p_val_node)
 550{
 551	uint32_t val_node;
 552	struct vmw_validate_buffer *vval_buf;
 553	struct ttm_validate_buffer *val_buf;
 554	struct drm_hash_item *hash;
 555	int ret;
 556
 557	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
 558				    &hash) == 0)) {
 559		vval_buf = container_of(hash, struct vmw_validate_buffer,
 560					hash);
 561		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
 562			DRM_ERROR("Inconsistent buffer usage.\n");
 563			return -EINVAL;
 564		}
 565		val_buf = &vval_buf->base;
 566		val_node = vval_buf - sw_context->val_bufs;
 567	} else {
 568		val_node = sw_context->cur_val_buf;
 569		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
 570			DRM_ERROR("Max number of DMA buffers per submission "
 571				  "exceeded.\n");
 572			return -EINVAL;
 573		}
 574		vval_buf = &sw_context->val_bufs[val_node];
 575		vval_buf->hash.key = (unsigned long) vbo;
 576		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 577		if (unlikely(ret != 0)) {
 578			DRM_ERROR("Failed to initialize a buffer validation "
 579				  "entry.\n");
 580			return ret;
 581		}
 582		++sw_context->cur_val_buf;
 583		val_buf = &vval_buf->base;
 584		val_buf->bo = ttm_bo_reference(&vbo->base);
 585		val_buf->shared = false;
 586		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 587		vval_buf->validate_as_mob = validate_as_mob;
 588	}
 589
 590	if (p_val_node)
 591		*p_val_node = val_node;
 592
 593	return 0;
 594}
 595
 596/**
 597 * vmw_resources_reserve - Reserve all resources on the sw_context's
 598 * resource list.
 599 *
 600 * @sw_context: Pointer to the software context.
 601 *
 602 * Note that since vmware's command submission currently is protected by
 603 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
 604 * since only a single thread at once will attempt this.
 605 */
 606static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 607{
 608	struct vmw_resource_val_node *val;
 609	int ret = 0;
 610
 611	list_for_each_entry(val, &sw_context->resource_list, head) {
 612		struct vmw_resource *res = val->res;
 613
 614		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
 615		if (unlikely(ret != 0))
 616			return ret;
 617
 618		if (res->backup) {
 619			struct vmw_dma_buffer *vbo = res->backup;
 620
 621			ret = vmw_bo_to_validate_list
 622				(sw_context, vbo,
 623				 vmw_resource_needs_backup(res), NULL);
 624
 625			if (unlikely(ret != 0))
 626				return ret;
 627		}
 628	}
 629
 630	if (sw_context->dx_query_mob) {
 631		struct vmw_dma_buffer *expected_dx_query_mob;
 632
 633		expected_dx_query_mob =
 634			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 635		if (expected_dx_query_mob &&
 636		    expected_dx_query_mob != sw_context->dx_query_mob) {
 637			ret = -EINVAL;
 638		}
 639	}
 640
 641	return ret;
 642}
 643
 644/**
 645 * vmw_resources_validate - Validate all resources on the sw_context's
 646 * resource list.
 647 *
 648 * @sw_context: Pointer to the software context.
 649 *
 650 * Before this function is called, all resource backup buffers must have
 651 * been validated.
 652 */
 653static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 654{
 655	struct vmw_resource_val_node *val;
 656	int ret;
 657
 658	list_for_each_entry(val, &sw_context->resource_list, head) {
 659		struct vmw_resource *res = val->res;
 660		struct vmw_dma_buffer *backup = res->backup;
 661
 662		ret = vmw_resource_validate(res);
 663		if (unlikely(ret != 0)) {
 664			if (ret != -ERESTARTSYS)
 665				DRM_ERROR("Failed to validate resource.\n");
 666			return ret;
 667		}
 668
 669		/* Check if the resource switched backup buffer */
 670		if (backup && res->backup && (backup != res->backup)) {
 671			struct vmw_dma_buffer *vbo = res->backup;
 672
 673			ret = vmw_bo_to_validate_list
 674				(sw_context, vbo,
 675				 vmw_resource_needs_backup(res), NULL);
 676			if (ret) {
 677				ttm_bo_unreserve(&vbo->base);
 678				return ret;
 679			}
 680		}
 681	}
 682	return 0;
 683}
 684
 685/**
 686 * vmw_cmd_res_reloc_add - Add a resource to a software context's
 687 * relocation- and validation lists.
 688 *
 689 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 690 * @sw_context: Pointer to the software context.
 691 * @id_loc: Pointer to where the id that needs translation is located.
 692 * @res: Valid pointer to a struct vmw_resource.
 693 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
 694 * used for this resource is returned here.
 695 */
 696static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 697				 struct vmw_sw_context *sw_context,
 698				 uint32_t *id_loc,
 699				 struct vmw_resource *res,
 700				 struct vmw_resource_val_node **p_val)
 701{
 702	int ret;
 703	struct vmw_resource_val_node *node;
 704
 705	*p_val = NULL;
 706	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 707					  res,
 708					  vmw_ptr_diff(sw_context->buf_start,
 709						       id_loc),
 710					  vmw_res_rel_normal);
 711	if (unlikely(ret != 0))
 712		return ret;
 713
 714	ret = vmw_resource_val_add(sw_context, res, &node);
 715	if (unlikely(ret != 0))
 716		return ret;
 717
 718	if (p_val)
 719		*p_val = node;
 720
 721	return 0;
 722}
 723
 724
 725/**
 726 * vmw_cmd_res_check - Check that a resource is present and if so, put it
 727 * on the resource validate list unless it's already there.
 728 *
 729 * @dev_priv: Pointer to a device private structure.
 730 * @sw_context: Pointer to the software context.
 731 * @res_type: Resource type.
 732 * @converter: User-space visisble type specific information.
 733 * @id_loc: Pointer to the location in the command buffer currently being
 734 * parsed from where the user-space resource id handle is located.
 735 * @p_val: Pointer to pointer to resource validalidation node. Populated
 736 * on exit.
 737 */
 738static int
 739vmw_cmd_res_check(struct vmw_private *dev_priv,
 740		  struct vmw_sw_context *sw_context,
 741		  enum vmw_res_type res_type,
 742		  const struct vmw_user_resource_conv *converter,
 743		  uint32_t *id_loc,
 744		  struct vmw_resource_val_node **p_val)
 745{
 746	struct vmw_res_cache_entry *rcache =
 747		&sw_context->res_cache[res_type];
 748	struct vmw_resource *res;
 749	struct vmw_resource_val_node *node;
 750	int ret;
 751
 752	if (*id_loc == SVGA3D_INVALID_ID) {
 753		if (p_val)
 754			*p_val = NULL;
 755		if (res_type == vmw_res_context) {
 756			DRM_ERROR("Illegal context invalid id.\n");
 757			return -EINVAL;
 758		}
 759		return 0;
 760	}
 761
 762	/*
 763	 * Fastpath in case of repeated commands referencing the same
 764	 * resource
 765	 */
 766
 767	if (likely(rcache->valid && *id_loc == rcache->handle)) {
 768		const struct vmw_resource *res = rcache->res;
 769
 770		rcache->node->first_usage = false;
 771		if (p_val)
 772			*p_val = rcache->node;
 773
 774		return vmw_resource_relocation_add
 775			(&sw_context->res_relocations, res,
 776			 vmw_ptr_diff(sw_context->buf_start, id_loc),
 777			 vmw_res_rel_normal);
 778	}
 779
 780	ret = vmw_user_resource_lookup_handle(dev_priv,
 781					      sw_context->fp->tfile,
 782					      *id_loc,
 783					      converter,
 784					      &res);
 785	if (unlikely(ret != 0)) {
 786		DRM_ERROR("Could not find or use resource 0x%08x.\n",
 787			  (unsigned) *id_loc);
 788		dump_stack();
 789		return ret;
 790	}
 791
 792	rcache->valid = true;
 793	rcache->res = res;
 794	rcache->handle = *id_loc;
 795
 796	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
 797				    res, &node);
 798	if (unlikely(ret != 0))
 799		goto out_no_reloc;
 800
 801	rcache->node = node;
 802	if (p_val)
 803		*p_val = node;
 804	vmw_resource_unreference(&res);
 805	return 0;
 806
 807out_no_reloc:
 808	BUG_ON(sw_context->error_resource != NULL);
 809	sw_context->error_resource = res;
 810
 811	return ret;
 812}
 813
 814/**
 815 * vmw_rebind_dx_query - Rebind DX query associated with the context
 816 *
 817 * @ctx_res: context the query belongs to
 818 *
 819 * This function assumes binding_mutex is held.
 820 */
 821static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 822{
 823	struct vmw_private *dev_priv = ctx_res->dev_priv;
 824	struct vmw_dma_buffer *dx_query_mob;
 825	struct {
 826		SVGA3dCmdHeader header;
 827		SVGA3dCmdDXBindAllQuery body;
 828	} *cmd;
 829
 830
 831	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 832
 833	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 834		return 0;
 835
 836	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
 837
 838	if (cmd == NULL) {
 839		DRM_ERROR("Failed to rebind queries.\n");
 840		return -ENOMEM;
 841	}
 842
 843	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 844	cmd->header.size = sizeof(cmd->body);
 845	cmd->body.cid = ctx_res->id;
 846	cmd->body.mobid = dx_query_mob->base.mem.start;
 847	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 848
 849	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 850
 851	return 0;
 852}
 853
 854/**
 855 * vmw_rebind_contexts - Rebind all resources previously bound to
 856 * referenced contexts.
 857 *
 858 * @sw_context: Pointer to the software context.
 859 *
 860 * Rebind context binding points that have been scrubbed because of eviction.
 861 */
 862static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 863{
 864	struct vmw_resource_val_node *val;
 865	int ret;
 866
 867	list_for_each_entry(val, &sw_context->resource_list, head) {
 868		if (unlikely(!val->staged_bindings))
 869			break;
 870
 871		ret = vmw_binding_rebind_all
 872			(vmw_context_binding_state(val->res));
 873		if (unlikely(ret != 0)) {
 874			if (ret != -ERESTARTSYS)
 875				DRM_ERROR("Failed to rebind context.\n");
 
 
 876			return ret;
 877		}
 878
 879		ret = vmw_rebind_all_dx_query(val->res);
 880		if (ret != 0)
 881			return ret;
 882	}
 
 
 883
 884	return 0;
 885}
 886
 887/**
 888 * vmw_view_bindings_add - Add an array of view bindings to a context
 889 * binding state tracker.
 890 *
 891 * @sw_context: The execbuf state used for this command.
 892 * @view_type: View type for the bindings.
 893 * @binding_type: Binding type for the bindings.
 894 * @shader_slot: The shader slot to user for the bindings.
 895 * @view_ids: Array of view ids to be bound.
 896 * @num_views: Number of view ids in @view_ids.
 897 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 898 */
 899static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 900				 enum vmw_view_type view_type,
 901				 enum vmw_ctx_binding_type binding_type,
 902				 uint32 shader_slot,
 903				 uint32 view_ids[], u32 num_views,
 904				 u32 first_slot)
 905{
 906	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
 907	struct vmw_cmdbuf_res_manager *man;
 908	u32 i;
 909	int ret;
 910
 911	if (!ctx_node) {
 912		DRM_ERROR("DX Context not set.\n");
 913		return -EINVAL;
 914	}
 915
 916	man = sw_context->man;
 917	for (i = 0; i < num_views; ++i) {
 918		struct vmw_ctx_bindinfo_view binding;
 919		struct vmw_resource *view = NULL;
 920
 921		if (view_ids[i] != SVGA3D_INVALID_ID) {
 922			view = vmw_view_lookup(man, view_type, view_ids[i]);
 923			if (IS_ERR(view)) {
 924				DRM_ERROR("View not found.\n");
 925				return PTR_ERR(view);
 926			}
 927
 928			ret = vmw_view_res_val_add(sw_context, view);
 929			if (ret) {
 930				DRM_ERROR("Could not add view to "
 931					  "validation list.\n");
 932				vmw_resource_unreference(&view);
 933				return ret;
 934			}
 935		}
 936		binding.bi.ctx = ctx_node->res;
 937		binding.bi.res = view;
 938		binding.bi.bt = binding_type;
 939		binding.shader_slot = shader_slot;
 940		binding.slot = first_slot + i;
 941		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
 942				shader_slot, binding.slot);
 943		if (view)
 944			vmw_resource_unreference(&view);
 945	}
 946
 947	return 0;
 948}
 949
 950/**
 951 * vmw_cmd_cid_check - Check a command header for valid context information.
 952 *
 953 * @dev_priv: Pointer to a device private structure.
 954 * @sw_context: Pointer to the software context.
 955 * @header: A command header with an embedded user-space context handle.
 956 *
 957 * Convenience function: Call vmw_cmd_res_check with the user-space context
 958 * handle embedded in @header.
 959 */
 960static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 961			     struct vmw_sw_context *sw_context,
 962			     SVGA3dCmdHeader *header)
 963{
 964	struct vmw_cid_cmd {
 965		SVGA3dCmdHeader header;
 966		uint32_t cid;
 967	} *cmd;
 968
 969	cmd = container_of(header, struct vmw_cid_cmd, header);
 970	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 971				 user_context_converter, &cmd->cid, NULL);
 972}
 973
 974static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 975					   struct vmw_sw_context *sw_context,
 976					   SVGA3dCmdHeader *header)
 977{
 978	struct vmw_sid_cmd {
 979		SVGA3dCmdHeader header;
 980		SVGA3dCmdSetRenderTarget body;
 981	} *cmd;
 982	struct vmw_resource_val_node *ctx_node;
 983	struct vmw_resource_val_node *res_node;
 984	int ret;
 985
 986	cmd = container_of(header, struct vmw_sid_cmd, header);
 987
 988	if (cmd->body.type >= SVGA3D_RT_MAX) {
 989		DRM_ERROR("Illegal render target type %u.\n",
 990			  (unsigned) cmd->body.type);
 991		return -EINVAL;
 992	}
 993
 994	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 995				user_context_converter, &cmd->body.cid,
 996				&ctx_node);
 997	if (unlikely(ret != 0))
 998		return ret;
 999
1000	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1001				user_surface_converter,
1002				&cmd->body.target.sid, &res_node);
1003	if (unlikely(ret != 0))
1004		return ret;
1005
1006	if (dev_priv->has_mob) {
1007		struct vmw_ctx_bindinfo_view binding;
1008
1009		binding.bi.ctx = ctx_node->res;
1010		binding.bi.res = res_node ? res_node->res : NULL;
1011		binding.bi.bt = vmw_ctx_binding_rt;
1012		binding.slot = cmd->body.type;
1013		vmw_binding_add(ctx_node->staged_bindings,
1014				&binding.bi, 0, binding.slot);
1015	}
1016
1017	return 0;
1018}
1019
1020static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
1021				      struct vmw_sw_context *sw_context,
1022				      SVGA3dCmdHeader *header)
1023{
1024	struct vmw_sid_cmd {
1025		SVGA3dCmdHeader header;
1026		SVGA3dCmdSurfaceCopy body;
1027	} *cmd;
1028	int ret;
1029
1030	cmd = container_of(header, struct vmw_sid_cmd, header);
1031
1032	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1033				user_surface_converter,
1034				&cmd->body.src.sid, NULL);
1035	if (ret)
1036		return ret;
1037
1038	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1039				 user_surface_converter,
1040				 &cmd->body.dest.sid, NULL);
1041}
1042
1043static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
1044				      struct vmw_sw_context *sw_context,
1045				      SVGA3dCmdHeader *header)
1046{
1047	struct {
1048		SVGA3dCmdHeader header;
1049		SVGA3dCmdDXBufferCopy body;
1050	} *cmd;
1051	int ret;
1052
1053	cmd = container_of(header, typeof(*cmd), header);
1054	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1055				user_surface_converter,
1056				&cmd->body.src, NULL);
1057	if (ret != 0)
1058		return ret;
1059
1060	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1061				 user_surface_converter,
1062				 &cmd->body.dest, NULL);
1063}
1064
1065static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1066				   struct vmw_sw_context *sw_context,
1067				   SVGA3dCmdHeader *header)
1068{
1069	struct {
1070		SVGA3dCmdHeader header;
1071		SVGA3dCmdDXPredCopyRegion body;
1072	} *cmd;
1073	int ret;
1074
1075	cmd = container_of(header, typeof(*cmd), header);
1076	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1077				user_surface_converter,
1078				&cmd->body.srcSid, NULL);
1079	if (ret != 0)
1080		return ret;
1081
1082	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1083				 user_surface_converter,
1084				 &cmd->body.dstSid, NULL);
1085}
1086
1087static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1088				     struct vmw_sw_context *sw_context,
1089				     SVGA3dCmdHeader *header)
1090{
1091	struct vmw_sid_cmd {
1092		SVGA3dCmdHeader header;
1093		SVGA3dCmdSurfaceStretchBlt body;
1094	} *cmd;
1095	int ret;
1096
1097	cmd = container_of(header, struct vmw_sid_cmd, header);
1098	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1099				user_surface_converter,
1100				&cmd->body.src.sid, NULL);
1101	if (unlikely(ret != 0))
1102		return ret;
1103	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1104				 user_surface_converter,
1105				 &cmd->body.dest.sid, NULL);
1106}
1107
1108static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1109					 struct vmw_sw_context *sw_context,
1110					 SVGA3dCmdHeader *header)
1111{
1112	struct vmw_sid_cmd {
1113		SVGA3dCmdHeader header;
1114		SVGA3dCmdBlitSurfaceToScreen body;
1115	} *cmd;
1116
1117	cmd = container_of(header, struct vmw_sid_cmd, header);
1118
1119	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1120				 user_surface_converter,
1121				 &cmd->body.srcImage.sid, NULL);
1122}
1123
1124static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1125				 struct vmw_sw_context *sw_context,
1126				 SVGA3dCmdHeader *header)
1127{
1128	struct vmw_sid_cmd {
1129		SVGA3dCmdHeader header;
1130		SVGA3dCmdPresent body;
1131	} *cmd;
1132
1133
1134	cmd = container_of(header, struct vmw_sid_cmd, header);
1135
1136	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1137				 user_surface_converter, &cmd->body.sid,
1138				 NULL);
1139}
1140
1141/**
1142 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1143 *
1144 * @dev_priv: The device private structure.
1145 * @new_query_bo: The new buffer holding query results.
1146 * @sw_context: The software context used for this command submission.
1147 *
1148 * This function checks whether @new_query_bo is suitable for holding
1149 * query results, and if another buffer currently is pinned for query
1150 * results. If so, the function prepares the state of @sw_context for
1151 * switching pinned buffers after successful submission of the current
1152 * command batch.
1153 */
1154static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1155				       struct vmw_dma_buffer *new_query_bo,
1156				       struct vmw_sw_context *sw_context)
1157{
1158	struct vmw_res_cache_entry *ctx_entry =
1159		&sw_context->res_cache[vmw_res_context];
1160	int ret;
1161
1162	BUG_ON(!ctx_entry->valid);
1163	sw_context->last_query_ctx = ctx_entry->res;
1164
1165	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1166
1167		if (unlikely(new_query_bo->base.num_pages > 4)) {
1168			DRM_ERROR("Query buffer too large.\n");
1169			return -EINVAL;
1170		}
1171
1172		if (unlikely(sw_context->cur_query_bo != NULL)) {
1173			sw_context->needs_post_query_barrier = true;
1174			ret = vmw_bo_to_validate_list(sw_context,
1175						      sw_context->cur_query_bo,
1176						      dev_priv->has_mob, NULL);
1177			if (unlikely(ret != 0))
1178				return ret;
1179		}
1180		sw_context->cur_query_bo = new_query_bo;
1181
1182		ret = vmw_bo_to_validate_list(sw_context,
1183					      dev_priv->dummy_query_bo,
1184					      dev_priv->has_mob, NULL);
1185		if (unlikely(ret != 0))
1186			return ret;
1187
1188	}
1189
1190	return 0;
1191}
1192
1193
1194/**
1195 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1196 *
1197 * @dev_priv: The device private structure.
1198 * @sw_context: The software context used for this command submission batch.
1199 *
1200 * This function will check if we're switching query buffers, and will then,
1201 * issue a dummy occlusion query wait used as a query barrier. When the fence
1202 * object following that query wait has signaled, we are sure that all
1203 * preceding queries have finished, and the old query buffer can be unpinned.
1204 * However, since both the new query buffer and the old one are fenced with
1205 * that fence, we can do an asynchronus unpin now, and be sure that the
1206 * old query buffer won't be moved until the fence has signaled.
1207 *
1208 * As mentioned above, both the new - and old query buffers need to be fenced
1209 * using a sequence emitted *after* calling this function.
1210 */
1211static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1212				     struct vmw_sw_context *sw_context)
1213{
1214	/*
1215	 * The validate list should still hold references to all
1216	 * contexts here.
1217	 */
1218
1219	if (sw_context->needs_post_query_barrier) {
1220		struct vmw_res_cache_entry *ctx_entry =
1221			&sw_context->res_cache[vmw_res_context];
1222		struct vmw_resource *ctx;
1223		int ret;
1224
1225		BUG_ON(!ctx_entry->valid);
1226		ctx = ctx_entry->res;
1227
1228		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1229
1230		if (unlikely(ret != 0))
1231			DRM_ERROR("Out of fifo space for dummy query.\n");
1232	}
1233
1234	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1235		if (dev_priv->pinned_bo) {
1236			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1237			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1238		}
1239
1240		if (!sw_context->needs_post_query_barrier) {
1241			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1242
1243			/*
1244			 * We pin also the dummy_query_bo buffer so that we
1245			 * don't need to validate it when emitting
1246			 * dummy queries in context destroy paths.
1247			 */
1248
1249			if (!dev_priv->dummy_query_bo_pinned) {
1250				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1251						    true);
1252				dev_priv->dummy_query_bo_pinned = true;
1253			}
1254
1255			BUG_ON(sw_context->last_query_ctx == NULL);
1256			dev_priv->query_cid = sw_context->last_query_ctx->id;
1257			dev_priv->query_cid_valid = true;
1258			dev_priv->pinned_bo =
1259				vmw_dmabuf_reference(sw_context->cur_query_bo);
1260		}
1261	}
1262}
1263
1264/**
1265 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1266 * handle to a MOB id.
1267 *
1268 * @dev_priv: Pointer to a device private structure.
1269 * @sw_context: The software context used for this command batch validation.
1270 * @id: Pointer to the user-space handle to be translated.
1271 * @vmw_bo_p: Points to a location that, on successful return will carry
1272 * a reference-counted pointer to the DMA buffer identified by the
1273 * user-space handle in @id.
1274 *
1275 * This function saves information needed to translate a user-space buffer
1276 * handle to a MOB id. The translation does not take place immediately, but
1277 * during a call to vmw_apply_relocations(). This function builds a relocation
1278 * list and a list of buffers to validate. The former needs to be freed using
1279 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1280 * needs to be freed using vmw_clear_validations.
1281 */
1282static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1283				 struct vmw_sw_context *sw_context,
1284				 SVGAMobId *id,
1285				 struct vmw_dma_buffer **vmw_bo_p)
1286{
1287	struct vmw_dma_buffer *vmw_bo = NULL;
1288	uint32_t handle = *id;
1289	struct vmw_relocation *reloc;
1290	int ret;
1291
1292	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1293				     NULL);
1294	if (unlikely(ret != 0)) {
1295		DRM_ERROR("Could not find or use MOB buffer.\n");
1296		ret = -EINVAL;
1297		goto out_no_reloc;
1298	}
1299
1300	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1301		DRM_ERROR("Max number relocations per submission"
1302			  " exceeded\n");
1303		ret = -EINVAL;
1304		goto out_no_reloc;
1305	}
1306
1307	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1308	reloc->mob_loc = id;
1309	reloc->location = NULL;
1310
1311	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1312	if (unlikely(ret != 0))
1313		goto out_no_reloc;
1314
1315	*vmw_bo_p = vmw_bo;
1316	return 0;
1317
1318out_no_reloc:
1319	vmw_dmabuf_unreference(&vmw_bo);
1320	*vmw_bo_p = NULL;
1321	return ret;
1322}
1323
1324/**
1325 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1326 * handle to a valid SVGAGuestPtr
1327 *
1328 * @dev_priv: Pointer to a device private structure.
1329 * @sw_context: The software context used for this command batch validation.
1330 * @ptr: Pointer to the user-space handle to be translated.
1331 * @vmw_bo_p: Points to a location that, on successful return will carry
1332 * a reference-counted pointer to the DMA buffer identified by the
1333 * user-space handle in @id.
1334 *
1335 * This function saves information needed to translate a user-space buffer
1336 * handle to a valid SVGAGuestPtr. The translation does not take place
1337 * immediately, but during a call to vmw_apply_relocations().
1338 * This function builds a relocation list and a list of buffers to validate.
1339 * The former needs to be freed using either vmw_apply_relocations() or
1340 * vmw_free_relocations(). The latter needs to be freed using
1341 * vmw_clear_validations.
1342 */
1343static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1344				   struct vmw_sw_context *sw_context,
1345				   SVGAGuestPtr *ptr,
1346				   struct vmw_dma_buffer **vmw_bo_p)
1347{
1348	struct vmw_dma_buffer *vmw_bo = NULL;
 
1349	uint32_t handle = ptr->gmrId;
1350	struct vmw_relocation *reloc;
 
 
1351	int ret;
1352
1353	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1354				     NULL);
1355	if (unlikely(ret != 0)) {
1356		DRM_ERROR("Could not find or use GMR region.\n");
1357		ret = -EINVAL;
1358		goto out_no_reloc;
1359	}
 
1360
1361	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1362		DRM_ERROR("Max number relocations per submission"
1363			  " exceeded\n");
1364		ret = -EINVAL;
1365		goto out_no_reloc;
1366	}
1367
1368	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1369	reloc->location = ptr;
1370
1371	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1372	if (unlikely(ret != 0))
 
 
 
1373		goto out_no_reloc;
 
1374
 
 
 
 
 
 
 
 
1375	*vmw_bo_p = vmw_bo;
1376	return 0;
1377
1378out_no_reloc:
1379	vmw_dmabuf_unreference(&vmw_bo);
1380	*vmw_bo_p = NULL;
1381	return ret;
1382}
1383
1384
1385
1386/**
1387 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1388 *
1389 * @dev_priv: Pointer to a device private struct.
1390 * @sw_context: The software context used for this command submission.
1391 * @header: Pointer to the command header in the command stream.
1392 *
1393 * This function adds the new query into the query COTABLE
1394 */
1395static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1396				   struct vmw_sw_context *sw_context,
1397				   SVGA3dCmdHeader *header)
1398{
1399	struct vmw_dx_define_query_cmd {
1400		SVGA3dCmdHeader header;
1401		SVGA3dCmdDXDefineQuery q;
1402	} *cmd;
1403
1404	int    ret;
1405	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1406	struct vmw_resource *cotable_res;
1407
1408
1409	if (ctx_node == NULL) {
1410		DRM_ERROR("DX Context not set for query.\n");
1411		return -EINVAL;
1412	}
1413
1414	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1415
1416	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1417	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1418		return -EINVAL;
1419
1420	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1421	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1422	vmw_resource_unreference(&cotable_res);
1423
1424	return ret;
1425}
1426
1427
1428
1429/**
1430 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1431 *
1432 * @dev_priv: Pointer to a device private struct.
1433 * @sw_context: The software context used for this command submission.
1434 * @header: Pointer to the command header in the command stream.
1435 *
1436 * The query bind operation will eventually associate the query ID
1437 * with its backing MOB.  In this function, we take the user mode
1438 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1439 * kernel mode equivalent.
1440 */
1441static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1442				 struct vmw_sw_context *sw_context,
1443				 SVGA3dCmdHeader *header)
1444{
1445	struct vmw_dx_bind_query_cmd {
1446		SVGA3dCmdHeader header;
1447		SVGA3dCmdDXBindQuery q;
1448	} *cmd;
1449
1450	struct vmw_dma_buffer *vmw_bo;
1451	int    ret;
1452
1453
1454	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1455
1456	/*
1457	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1458	 * list so its kernel mode MOB ID can be filled in later
1459	 */
1460	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1461				    &vmw_bo);
1462
1463	if (ret != 0)
1464		return ret;
1465
1466	sw_context->dx_query_mob = vmw_bo;
1467	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1468
1469	vmw_dmabuf_unreference(&vmw_bo);
1470
1471	return ret;
1472}
1473
1474
1475
1476/**
1477 * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1478 *
1479 * @dev_priv: Pointer to a device private struct.
1480 * @sw_context: The software context used for this command submission.
1481 * @header: Pointer to the command header in the command stream.
1482 */
1483static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1484				  struct vmw_sw_context *sw_context,
1485				  SVGA3dCmdHeader *header)
1486{
1487	struct vmw_begin_gb_query_cmd {
1488		SVGA3dCmdHeader header;
1489		SVGA3dCmdBeginGBQuery q;
1490	} *cmd;
1491
1492	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1493			   header);
1494
1495	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1496				 user_context_converter, &cmd->q.cid,
1497				 NULL);
1498}
1499
1500/**
1501 * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1502 *
1503 * @dev_priv: Pointer to a device private struct.
1504 * @sw_context: The software context used for this command submission.
1505 * @header: Pointer to the command header in the command stream.
1506 */
1507static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1508			       struct vmw_sw_context *sw_context,
1509			       SVGA3dCmdHeader *header)
1510{
1511	struct vmw_begin_query_cmd {
1512		SVGA3dCmdHeader header;
1513		SVGA3dCmdBeginQuery q;
1514	} *cmd;
1515
1516	cmd = container_of(header, struct vmw_begin_query_cmd,
1517			   header);
1518
1519	if (unlikely(dev_priv->has_mob)) {
1520		struct {
1521			SVGA3dCmdHeader header;
1522			SVGA3dCmdBeginGBQuery q;
1523		} gb_cmd;
1524
1525		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1526
1527		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1528		gb_cmd.header.size = cmd->header.size;
1529		gb_cmd.q.cid = cmd->q.cid;
1530		gb_cmd.q.type = cmd->q.type;
1531
1532		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1533		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1534	}
1535
1536	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1537				 user_context_converter, &cmd->q.cid,
1538				 NULL);
1539}
1540
1541/**
1542 * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1543 *
1544 * @dev_priv: Pointer to a device private struct.
1545 * @sw_context: The software context used for this command submission.
1546 * @header: Pointer to the command header in the command stream.
1547 */
1548static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1549				struct vmw_sw_context *sw_context,
1550				SVGA3dCmdHeader *header)
1551{
1552	struct vmw_dma_buffer *vmw_bo;
1553	struct vmw_query_cmd {
1554		SVGA3dCmdHeader header;
1555		SVGA3dCmdEndGBQuery q;
1556	} *cmd;
1557	int ret;
1558
1559	cmd = container_of(header, struct vmw_query_cmd, header);
1560	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1561	if (unlikely(ret != 0))
1562		return ret;
1563
1564	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1565				    &cmd->q.mobid,
1566				    &vmw_bo);
1567	if (unlikely(ret != 0))
1568		return ret;
1569
1570	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1571
1572	vmw_dmabuf_unreference(&vmw_bo);
1573	return ret;
1574}
1575
1576/**
1577 * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1578 *
1579 * @dev_priv: Pointer to a device private struct.
1580 * @sw_context: The software context used for this command submission.
1581 * @header: Pointer to the command header in the command stream.
1582 */
1583static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1584			     struct vmw_sw_context *sw_context,
1585			     SVGA3dCmdHeader *header)
1586{
1587	struct vmw_dma_buffer *vmw_bo;
1588	struct vmw_query_cmd {
1589		SVGA3dCmdHeader header;
1590		SVGA3dCmdEndQuery q;
1591	} *cmd;
1592	int ret;
1593
1594	cmd = container_of(header, struct vmw_query_cmd, header);
1595	if (dev_priv->has_mob) {
1596		struct {
1597			SVGA3dCmdHeader header;
1598			SVGA3dCmdEndGBQuery q;
1599		} gb_cmd;
1600
1601		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1602
1603		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1604		gb_cmd.header.size = cmd->header.size;
1605		gb_cmd.q.cid = cmd->q.cid;
1606		gb_cmd.q.type = cmd->q.type;
1607		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1608		gb_cmd.q.offset = cmd->q.guestResult.offset;
1609
1610		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1611		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1612	}
1613
1614	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1615	if (unlikely(ret != 0))
1616		return ret;
1617
1618	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1619				      &cmd->q.guestResult,
1620				      &vmw_bo);
1621	if (unlikely(ret != 0))
1622		return ret;
1623
1624	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1625
1626	vmw_dmabuf_unreference(&vmw_bo);
1627	return ret;
1628}
1629
1630/**
1631 * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1632 *
1633 * @dev_priv: Pointer to a device private struct.
1634 * @sw_context: The software context used for this command submission.
1635 * @header: Pointer to the command header in the command stream.
1636 */
1637static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1638				 struct vmw_sw_context *sw_context,
1639				 SVGA3dCmdHeader *header)
1640{
1641	struct vmw_dma_buffer *vmw_bo;
1642	struct vmw_query_cmd {
1643		SVGA3dCmdHeader header;
1644		SVGA3dCmdWaitForGBQuery q;
1645	} *cmd;
1646	int ret;
1647
1648	cmd = container_of(header, struct vmw_query_cmd, header);
1649	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1650	if (unlikely(ret != 0))
1651		return ret;
1652
1653	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1654				    &cmd->q.mobid,
1655				    &vmw_bo);
1656	if (unlikely(ret != 0))
1657		return ret;
1658
1659	vmw_dmabuf_unreference(&vmw_bo);
1660	return 0;
1661}
1662
1663/**
1664 * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1665 *
1666 * @dev_priv: Pointer to a device private struct.
1667 * @sw_context: The software context used for this command submission.
1668 * @header: Pointer to the command header in the command stream.
1669 */
1670static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1671			      struct vmw_sw_context *sw_context,
1672			      SVGA3dCmdHeader *header)
1673{
1674	struct vmw_dma_buffer *vmw_bo;
1675	struct vmw_query_cmd {
1676		SVGA3dCmdHeader header;
1677		SVGA3dCmdWaitForQuery q;
1678	} *cmd;
1679	int ret;
1680
1681	cmd = container_of(header, struct vmw_query_cmd, header);
1682	if (dev_priv->has_mob) {
1683		struct {
1684			SVGA3dCmdHeader header;
1685			SVGA3dCmdWaitForGBQuery q;
1686		} gb_cmd;
1687
1688		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1689
1690		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1691		gb_cmd.header.size = cmd->header.size;
1692		gb_cmd.q.cid = cmd->q.cid;
1693		gb_cmd.q.type = cmd->q.type;
1694		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1695		gb_cmd.q.offset = cmd->q.guestResult.offset;
1696
1697		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1698		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1699	}
1700
1701	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1702	if (unlikely(ret != 0))
1703		return ret;
1704
1705	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1706				      &cmd->q.guestResult,
1707				      &vmw_bo);
1708	if (unlikely(ret != 0))
1709		return ret;
1710
1711	vmw_dmabuf_unreference(&vmw_bo);
1712	return 0;
1713}
1714
 
1715static int vmw_cmd_dma(struct vmw_private *dev_priv,
1716		       struct vmw_sw_context *sw_context,
1717		       SVGA3dCmdHeader *header)
1718{
1719	struct vmw_dma_buffer *vmw_bo = NULL;
 
1720	struct vmw_surface *srf = NULL;
1721	struct vmw_dma_cmd {
1722		SVGA3dCmdHeader header;
1723		SVGA3dCmdSurfaceDMA dma;
1724	} *cmd;
1725	int ret;
1726	SVGA3dCmdSurfaceDMASuffix *suffix;
1727	uint32_t bo_size;
1728
1729	cmd = container_of(header, struct vmw_dma_cmd, header);
1730	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1731					       header->size - sizeof(*suffix));
1732
1733	/* Make sure device and verifier stays in sync. */
1734	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1735		DRM_ERROR("Invalid DMA suffix size.\n");
1736		return -EINVAL;
1737	}
1738
1739	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1740				      &cmd->dma.guest.ptr,
1741				      &vmw_bo);
1742	if (unlikely(ret != 0))
1743		return ret;
1744
1745	/* Make sure DMA doesn't cross BO boundaries. */
1746	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1747	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1748		DRM_ERROR("Invalid DMA offset.\n");
1749		return -EINVAL;
 
1750	}
1751
1752	bo_size -= cmd->dma.guest.ptr.offset;
1753	if (unlikely(suffix->maximumOffset > bo_size))
1754		suffix->maximumOffset = bo_size;
1755
1756	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1757				user_surface_converter, &cmd->dma.host.sid,
1758				NULL);
1759	if (unlikely(ret != 0)) {
1760		if (unlikely(ret != -ERESTARTSYS))
1761			DRM_ERROR("could not find surface for DMA.\n");
1762		goto out_no_surface;
1763	}
1764
1765	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
 
 
 
 
 
1766
1767	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1768			     header);
1769
1770out_no_surface:
1771	vmw_dmabuf_unreference(&vmw_bo);
1772	return ret;
1773}
1774
1775static int vmw_cmd_draw(struct vmw_private *dev_priv,
1776			struct vmw_sw_context *sw_context,
1777			SVGA3dCmdHeader *header)
1778{
1779	struct vmw_draw_cmd {
1780		SVGA3dCmdHeader header;
1781		SVGA3dCmdDrawPrimitives body;
1782	} *cmd;
1783	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1784		(unsigned long)header + sizeof(*cmd));
1785	SVGA3dPrimitiveRange *range;
1786	uint32_t i;
1787	uint32_t maxnum;
1788	int ret;
1789
1790	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1791	if (unlikely(ret != 0))
1792		return ret;
1793
1794	cmd = container_of(header, struct vmw_draw_cmd, header);
1795	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1796
1797	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1798		DRM_ERROR("Illegal number of vertex declarations.\n");
1799		return -EINVAL;
1800	}
1801
1802	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1803		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1804					user_surface_converter,
1805					&decl->array.surfaceId, NULL);
1806		if (unlikely(ret != 0))
1807			return ret;
1808	}
1809
1810	maxnum = (header->size - sizeof(cmd->body) -
1811		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1812	if (unlikely(cmd->body.numRanges > maxnum)) {
1813		DRM_ERROR("Illegal number of index ranges.\n");
1814		return -EINVAL;
1815	}
1816
1817	range = (SVGA3dPrimitiveRange *) decl;
1818	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1819		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1820					user_surface_converter,
1821					&range->indexArray.surfaceId, NULL);
1822		if (unlikely(ret != 0))
1823			return ret;
1824	}
1825	return 0;
1826}
1827
1828
1829static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1830			     struct vmw_sw_context *sw_context,
1831			     SVGA3dCmdHeader *header)
1832{
1833	struct vmw_tex_state_cmd {
1834		SVGA3dCmdHeader header;
1835		SVGA3dCmdSetTextureState state;
1836	} *cmd;
1837
1838	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1839	  ((unsigned long) header + header->size + sizeof(header));
1840	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1841		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1842	struct vmw_resource_val_node *ctx_node;
1843	struct vmw_resource_val_node *res_node;
1844	int ret;
1845
1846	cmd = container_of(header, struct vmw_tex_state_cmd,
1847			   header);
1848
1849	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1850				user_context_converter, &cmd->state.cid,
1851				&ctx_node);
1852	if (unlikely(ret != 0))
1853		return ret;
1854
1855	for (; cur_state < last_state; ++cur_state) {
1856		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1857			continue;
1858
1859		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1860			DRM_ERROR("Illegal texture/sampler unit %u.\n",
1861				  (unsigned) cur_state->stage);
1862			return -EINVAL;
1863		}
1864
1865		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1866					user_surface_converter,
1867					&cur_state->value, &res_node);
1868		if (unlikely(ret != 0))
1869			return ret;
1870
1871		if (dev_priv->has_mob) {
1872			struct vmw_ctx_bindinfo_tex binding;
1873
1874			binding.bi.ctx = ctx_node->res;
1875			binding.bi.res = res_node ? res_node->res : NULL;
1876			binding.bi.bt = vmw_ctx_binding_tex;
1877			binding.texture_stage = cur_state->stage;
1878			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1879					0, binding.texture_stage);
1880		}
1881	}
1882
1883	return 0;
1884}
1885
1886static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1887				      struct vmw_sw_context *sw_context,
1888				      void *buf)
1889{
1890	struct vmw_dma_buffer *vmw_bo;
1891	int ret;
1892
1893	struct {
1894		uint32_t header;
1895		SVGAFifoCmdDefineGMRFB body;
1896	} *cmd = buf;
1897
1898	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1899				      &cmd->body.ptr,
1900				      &vmw_bo);
1901	if (unlikely(ret != 0))
1902		return ret;
1903
1904	vmw_dmabuf_unreference(&vmw_bo);
1905
1906	return ret;
1907}
1908
1909
1910/**
1911 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1912 * switching
1913 *
1914 * @dev_priv: Pointer to a device private struct.
1915 * @sw_context: The software context being used for this batch.
1916 * @val_node: The validation node representing the resource.
1917 * @buf_id: Pointer to the user-space backup buffer handle in the command
1918 * stream.
1919 * @backup_offset: Offset of backup into MOB.
1920 *
1921 * This function prepares for registering a switch of backup buffers
1922 * in the resource metadata just prior to unreserving. It's basically a wrapper
1923 * around vmw_cmd_res_switch_backup with a different interface.
1924 */
1925static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1926				     struct vmw_sw_context *sw_context,
1927				     struct vmw_resource_val_node *val_node,
1928				     uint32_t *buf_id,
1929				     unsigned long backup_offset)
1930{
1931	struct vmw_dma_buffer *dma_buf;
1932	int ret;
1933
1934	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1935	if (ret)
1936		return ret;
1937
1938	val_node->switching_backup = true;
1939	if (val_node->first_usage)
1940		val_node->no_buffer_needed = true;
1941
1942	vmw_dmabuf_unreference(&val_node->new_backup);
1943	val_node->new_backup = dma_buf;
1944	val_node->new_backup_offset = backup_offset;
1945
1946	return 0;
1947}
1948
1949
1950/**
1951 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1952 *
1953 * @dev_priv: Pointer to a device private struct.
1954 * @sw_context: The software context being used for this batch.
1955 * @res_type: The resource type.
1956 * @converter: Information about user-space binding for this resource type.
1957 * @res_id: Pointer to the user-space resource handle in the command stream.
1958 * @buf_id: Pointer to the user-space backup buffer handle in the command
1959 * stream.
1960 * @backup_offset: Offset of backup into MOB.
1961 *
1962 * This function prepares for registering a switch of backup buffers
1963 * in the resource metadata just prior to unreserving. It's basically a wrapper
1964 * around vmw_cmd_res_switch_backup with a different interface.
1965 */
1966static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1967				 struct vmw_sw_context *sw_context,
1968				 enum vmw_res_type res_type,
1969				 const struct vmw_user_resource_conv
1970				 *converter,
1971				 uint32_t *res_id,
1972				 uint32_t *buf_id,
1973				 unsigned long backup_offset)
1974{
1975	struct vmw_resource_val_node *val_node;
1976	int ret;
1977
1978	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1979				converter, res_id, &val_node);
1980	if (ret)
1981		return ret;
1982
1983	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1984					 buf_id, backup_offset);
1985}
1986
1987/**
1988 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1989 * command
1990 *
1991 * @dev_priv: Pointer to a device private struct.
1992 * @sw_context: The software context being used for this batch.
1993 * @header: Pointer to the command header in the command stream.
1994 */
1995static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1996				   struct vmw_sw_context *sw_context,
1997				   SVGA3dCmdHeader *header)
1998{
1999	struct vmw_bind_gb_surface_cmd {
2000		SVGA3dCmdHeader header;
2001		SVGA3dCmdBindGBSurface body;
2002	} *cmd;
2003
2004	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
2005
2006	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
2007				     user_surface_converter,
2008				     &cmd->body.sid, &cmd->body.mobid,
2009				     0);
2010}
2011
2012/**
2013 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
2014 * command
2015 *
2016 * @dev_priv: Pointer to a device private struct.
2017 * @sw_context: The software context being used for this batch.
2018 * @header: Pointer to the command header in the command stream.
2019 */
2020static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
2021				   struct vmw_sw_context *sw_context,
2022				   SVGA3dCmdHeader *header)
2023{
2024	struct vmw_gb_surface_cmd {
2025		SVGA3dCmdHeader header;
2026		SVGA3dCmdUpdateGBImage body;
2027	} *cmd;
2028
2029	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2030
2031	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2032				 user_surface_converter,
2033				 &cmd->body.image.sid, NULL);
2034}
2035
2036/**
2037 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
2038 * command
2039 *
2040 * @dev_priv: Pointer to a device private struct.
2041 * @sw_context: The software context being used for this batch.
2042 * @header: Pointer to the command header in the command stream.
2043 */
2044static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
2045				     struct vmw_sw_context *sw_context,
2046				     SVGA3dCmdHeader *header)
2047{
2048	struct vmw_gb_surface_cmd {
2049		SVGA3dCmdHeader header;
2050		SVGA3dCmdUpdateGBSurface body;
2051	} *cmd;
2052
2053	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2054
2055	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2056				 user_surface_converter,
2057				 &cmd->body.sid, NULL);
2058}
2059
2060/**
2061 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2062 * command
2063 *
2064 * @dev_priv: Pointer to a device private struct.
2065 * @sw_context: The software context being used for this batch.
2066 * @header: Pointer to the command header in the command stream.
2067 */
2068static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2069				     struct vmw_sw_context *sw_context,
2070				     SVGA3dCmdHeader *header)
2071{
2072	struct vmw_gb_surface_cmd {
2073		SVGA3dCmdHeader header;
2074		SVGA3dCmdReadbackGBImage body;
2075	} *cmd;
2076
2077	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2078
2079	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2080				 user_surface_converter,
2081				 &cmd->body.image.sid, NULL);
2082}
2083
2084/**
2085 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2086 * command
2087 *
2088 * @dev_priv: Pointer to a device private struct.
2089 * @sw_context: The software context being used for this batch.
2090 * @header: Pointer to the command header in the command stream.
2091 */
2092static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2093				       struct vmw_sw_context *sw_context,
2094				       SVGA3dCmdHeader *header)
2095{
2096	struct vmw_gb_surface_cmd {
2097		SVGA3dCmdHeader header;
2098		SVGA3dCmdReadbackGBSurface body;
2099	} *cmd;
2100
2101	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2102
2103	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2104				 user_surface_converter,
2105				 &cmd->body.sid, NULL);
2106}
2107
2108/**
2109 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2110 * command
2111 *
2112 * @dev_priv: Pointer to a device private struct.
2113 * @sw_context: The software context being used for this batch.
2114 * @header: Pointer to the command header in the command stream.
2115 */
2116static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2117				       struct vmw_sw_context *sw_context,
2118				       SVGA3dCmdHeader *header)
2119{
2120	struct vmw_gb_surface_cmd {
2121		SVGA3dCmdHeader header;
2122		SVGA3dCmdInvalidateGBImage body;
2123	} *cmd;
2124
2125	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2126
2127	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2128				 user_surface_converter,
2129				 &cmd->body.image.sid, NULL);
2130}
2131
2132/**
2133 * vmw_cmd_invalidate_gb_surface - Validate an
2134 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2135 *
2136 * @dev_priv: Pointer to a device private struct.
2137 * @sw_context: The software context being used for this batch.
2138 * @header: Pointer to the command header in the command stream.
2139 */
2140static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2141					 struct vmw_sw_context *sw_context,
2142					 SVGA3dCmdHeader *header)
2143{
2144	struct vmw_gb_surface_cmd {
2145		SVGA3dCmdHeader header;
2146		SVGA3dCmdInvalidateGBSurface body;
2147	} *cmd;
2148
2149	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2150
2151	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2152				 user_surface_converter,
2153				 &cmd->body.sid, NULL);
2154}
2155
2156
2157/**
2158 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2159 * command
2160 *
2161 * @dev_priv: Pointer to a device private struct.
2162 * @sw_context: The software context being used for this batch.
2163 * @header: Pointer to the command header in the command stream.
2164 */
2165static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2166				 struct vmw_sw_context *sw_context,
2167				 SVGA3dCmdHeader *header)
2168{
2169	struct vmw_shader_define_cmd {
2170		SVGA3dCmdHeader header;
2171		SVGA3dCmdDefineShader body;
2172	} *cmd;
2173	int ret;
2174	size_t size;
2175	struct vmw_resource_val_node *val;
2176
2177	cmd = container_of(header, struct vmw_shader_define_cmd,
2178			   header);
2179
2180	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2181				user_context_converter, &cmd->body.cid,
2182				&val);
2183	if (unlikely(ret != 0))
2184		return ret;
2185
2186	if (unlikely(!dev_priv->has_mob))
2187		return 0;
2188
2189	size = cmd->header.size - sizeof(cmd->body);
2190	ret = vmw_compat_shader_add(dev_priv,
2191				    vmw_context_res_man(val->res),
2192				    cmd->body.shid, cmd + 1,
2193				    cmd->body.type, size,
2194				    &sw_context->staged_cmd_res);
2195	if (unlikely(ret != 0))
2196		return ret;
2197
2198	return vmw_resource_relocation_add(&sw_context->res_relocations,
2199					   NULL,
2200					   vmw_ptr_diff(sw_context->buf_start,
2201							&cmd->header.id),
2202					   vmw_res_rel_nop);
2203}
2204
2205/**
2206 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2207 * command
2208 *
2209 * @dev_priv: Pointer to a device private struct.
2210 * @sw_context: The software context being used for this batch.
2211 * @header: Pointer to the command header in the command stream.
2212 */
2213static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2214				  struct vmw_sw_context *sw_context,
2215				  SVGA3dCmdHeader *header)
2216{
2217	struct vmw_shader_destroy_cmd {
2218		SVGA3dCmdHeader header;
2219		SVGA3dCmdDestroyShader body;
2220	} *cmd;
2221	int ret;
2222	struct vmw_resource_val_node *val;
2223
2224	cmd = container_of(header, struct vmw_shader_destroy_cmd,
2225			   header);
2226
2227	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228				user_context_converter, &cmd->body.cid,
2229				&val);
2230	if (unlikely(ret != 0))
2231		return ret;
2232
2233	if (unlikely(!dev_priv->has_mob))
2234		return 0;
2235
2236	ret = vmw_shader_remove(vmw_context_res_man(val->res),
2237				cmd->body.shid,
2238				cmd->body.type,
2239				&sw_context->staged_cmd_res);
2240	if (unlikely(ret != 0))
2241		return ret;
2242
2243	return vmw_resource_relocation_add(&sw_context->res_relocations,
2244					   NULL,
2245					   vmw_ptr_diff(sw_context->buf_start,
2246							&cmd->header.id),
2247					   vmw_res_rel_nop);
2248}
2249
2250/**
2251 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2252 * command
2253 *
2254 * @dev_priv: Pointer to a device private struct.
2255 * @sw_context: The software context being used for this batch.
2256 * @header: Pointer to the command header in the command stream.
2257 */
2258static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2259			      struct vmw_sw_context *sw_context,
2260			      SVGA3dCmdHeader *header)
2261{
2262	struct vmw_set_shader_cmd {
2263		SVGA3dCmdHeader header;
2264		SVGA3dCmdSetShader body;
2265	} *cmd;
2266	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2267	struct vmw_ctx_bindinfo_shader binding;
2268	struct vmw_resource *res = NULL;
2269	int ret;
2270
2271	cmd = container_of(header, struct vmw_set_shader_cmd,
2272			   header);
2273
2274	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2275		DRM_ERROR("Illegal shader type %u.\n",
2276			  (unsigned) cmd->body.type);
2277		return -EINVAL;
2278	}
2279
2280	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2281				user_context_converter, &cmd->body.cid,
2282				&ctx_node);
2283	if (unlikely(ret != 0))
2284		return ret;
2285
2286	if (!dev_priv->has_mob)
2287		return 0;
2288
2289	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2290		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2291					cmd->body.shid,
2292					cmd->body.type);
2293
2294		if (!IS_ERR(res)) {
2295			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2296						    &cmd->body.shid, res,
2297						    &res_node);
2298			vmw_resource_unreference(&res);
2299			if (unlikely(ret != 0))
2300				return ret;
2301		}
2302	}
2303
2304	if (!res_node) {
2305		ret = vmw_cmd_res_check(dev_priv, sw_context,
2306					vmw_res_shader,
2307					user_shader_converter,
2308					&cmd->body.shid, &res_node);
2309		if (unlikely(ret != 0))
2310			return ret;
2311	}
2312
2313	binding.bi.ctx = ctx_node->res;
2314	binding.bi.res = res_node ? res_node->res : NULL;
2315	binding.bi.bt = vmw_ctx_binding_shader;
2316	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2317	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2318			binding.shader_slot, 0);
2319	return 0;
2320}
2321
2322/**
2323 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2324 * command
2325 *
2326 * @dev_priv: Pointer to a device private struct.
2327 * @sw_context: The software context being used for this batch.
2328 * @header: Pointer to the command header in the command stream.
2329 */
2330static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2331				    struct vmw_sw_context *sw_context,
2332				    SVGA3dCmdHeader *header)
2333{
2334	struct vmw_set_shader_const_cmd {
2335		SVGA3dCmdHeader header;
2336		SVGA3dCmdSetShaderConst body;
2337	} *cmd;
2338	int ret;
2339
2340	cmd = container_of(header, struct vmw_set_shader_const_cmd,
2341			   header);
2342
2343	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2344				user_context_converter, &cmd->body.cid,
2345				NULL);
2346	if (unlikely(ret != 0))
2347		return ret;
2348
2349	if (dev_priv->has_mob)
2350		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2351
2352	return 0;
2353}
2354
2355/**
2356 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2357 * command
2358 *
2359 * @dev_priv: Pointer to a device private struct.
2360 * @sw_context: The software context being used for this batch.
2361 * @header: Pointer to the command header in the command stream.
2362 */
2363static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2364				  struct vmw_sw_context *sw_context,
2365				  SVGA3dCmdHeader *header)
2366{
2367	struct vmw_bind_gb_shader_cmd {
2368		SVGA3dCmdHeader header;
2369		SVGA3dCmdBindGBShader body;
2370	} *cmd;
2371
2372	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2373			   header);
2374
2375	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2376				     user_shader_converter,
2377				     &cmd->body.shid, &cmd->body.mobid,
2378				     cmd->body.offsetInBytes);
2379}
2380
2381/**
2382 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2383 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2384 *
2385 * @dev_priv: Pointer to a device private struct.
2386 * @sw_context: The software context being used for this batch.
2387 * @header: Pointer to the command header in the command stream.
2388 */
2389static int
2390vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2391				      struct vmw_sw_context *sw_context,
2392				      SVGA3dCmdHeader *header)
2393{
2394	struct {
2395		SVGA3dCmdHeader header;
2396		SVGA3dCmdDXSetSingleConstantBuffer body;
2397	} *cmd;
2398	struct vmw_resource_val_node *res_node = NULL;
2399	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2400	struct vmw_ctx_bindinfo_cb binding;
2401	int ret;
2402
2403	if (unlikely(ctx_node == NULL)) {
2404		DRM_ERROR("DX Context not set.\n");
2405		return -EINVAL;
2406	}
2407
2408	cmd = container_of(header, typeof(*cmd), header);
2409	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2410				user_surface_converter,
2411				&cmd->body.sid, &res_node);
2412	if (unlikely(ret != 0))
2413		return ret;
2414
2415	binding.bi.ctx = ctx_node->res;
2416	binding.bi.res = res_node ? res_node->res : NULL;
2417	binding.bi.bt = vmw_ctx_binding_cb;
2418	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2419	binding.offset = cmd->body.offsetInBytes;
2420	binding.size = cmd->body.sizeInBytes;
2421	binding.slot = cmd->body.slot;
2422
2423	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2424	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2425		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2426			  (unsigned) cmd->body.type,
2427			  (unsigned) binding.slot);
2428		return -EINVAL;
2429	}
2430
2431	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2432			binding.shader_slot, binding.slot);
2433
2434	return 0;
2435}
2436
2437/**
2438 * vmw_cmd_dx_set_shader_res - Validate an
2439 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2440 *
2441 * @dev_priv: Pointer to a device private struct.
2442 * @sw_context: The software context being used for this batch.
2443 * @header: Pointer to the command header in the command stream.
2444 */
2445static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2446				     struct vmw_sw_context *sw_context,
2447				     SVGA3dCmdHeader *header)
2448{
2449	struct {
2450		SVGA3dCmdHeader header;
2451		SVGA3dCmdDXSetShaderResources body;
2452	} *cmd = container_of(header, typeof(*cmd), header);
2453	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2454		sizeof(SVGA3dShaderResourceViewId);
2455
2456	if ((u64) cmd->body.startView + (u64) num_sr_view >
2457	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2458	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2459		DRM_ERROR("Invalid shader binding.\n");
2460		return -EINVAL;
2461	}
2462
2463	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2464				     vmw_ctx_binding_sr,
2465				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2466				     (void *) &cmd[1], num_sr_view,
2467				     cmd->body.startView);
2468}
2469
2470/**
2471 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2472 * command
2473 *
2474 * @dev_priv: Pointer to a device private struct.
2475 * @sw_context: The software context being used for this batch.
2476 * @header: Pointer to the command header in the command stream.
2477 */
2478static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2479				 struct vmw_sw_context *sw_context,
2480				 SVGA3dCmdHeader *header)
2481{
2482	struct {
2483		SVGA3dCmdHeader header;
2484		SVGA3dCmdDXSetShader body;
2485	} *cmd;
2486	struct vmw_resource *res = NULL;
2487	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2488	struct vmw_ctx_bindinfo_shader binding;
2489	int ret = 0;
2490
2491	if (unlikely(ctx_node == NULL)) {
2492		DRM_ERROR("DX Context not set.\n");
2493		return -EINVAL;
2494	}
2495
2496	cmd = container_of(header, typeof(*cmd), header);
2497
2498	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2499		DRM_ERROR("Illegal shader type %u.\n",
2500			  (unsigned) cmd->body.type);
2501		return -EINVAL;
2502	}
2503
2504	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2505		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2506		if (IS_ERR(res)) {
2507			DRM_ERROR("Could not find shader for binding.\n");
2508			return PTR_ERR(res);
2509		}
2510
2511		ret = vmw_resource_val_add(sw_context, res, NULL);
2512		if (ret)
2513			goto out_unref;
2514	}
2515
2516	binding.bi.ctx = ctx_node->res;
2517	binding.bi.res = res;
2518	binding.bi.bt = vmw_ctx_binding_dx_shader;
2519	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2520
2521	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2522			binding.shader_slot, 0);
2523out_unref:
2524	if (res)
2525		vmw_resource_unreference(&res);
2526
2527	return ret;
2528}
2529
2530/**
2531 * vmw_cmd_dx_set_vertex_buffers - Validates an
2532 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2533 *
2534 * @dev_priv: Pointer to a device private struct.
2535 * @sw_context: The software context being used for this batch.
2536 * @header: Pointer to the command header in the command stream.
2537 */
2538static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2539					 struct vmw_sw_context *sw_context,
2540					 SVGA3dCmdHeader *header)
2541{
2542	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2543	struct vmw_ctx_bindinfo_vb binding;
2544	struct vmw_resource_val_node *res_node;
2545	struct {
2546		SVGA3dCmdHeader header;
2547		SVGA3dCmdDXSetVertexBuffers body;
2548		SVGA3dVertexBuffer buf[];
2549	} *cmd;
2550	int i, ret, num;
2551
2552	if (unlikely(ctx_node == NULL)) {
2553		DRM_ERROR("DX Context not set.\n");
2554		return -EINVAL;
2555	}
2556
2557	cmd = container_of(header, typeof(*cmd), header);
2558	num = (cmd->header.size - sizeof(cmd->body)) /
2559		sizeof(SVGA3dVertexBuffer);
2560	if ((u64)num + (u64)cmd->body.startBuffer >
2561	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2562		DRM_ERROR("Invalid number of vertex buffers.\n");
2563		return -EINVAL;
2564	}
2565
2566	for (i = 0; i < num; i++) {
2567		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2568					user_surface_converter,
2569					&cmd->buf[i].sid, &res_node);
2570		if (unlikely(ret != 0))
2571			return ret;
2572
2573		binding.bi.ctx = ctx_node->res;
2574		binding.bi.bt = vmw_ctx_binding_vb;
2575		binding.bi.res = ((res_node) ? res_node->res : NULL);
2576		binding.offset = cmd->buf[i].offset;
2577		binding.stride = cmd->buf[i].stride;
2578		binding.slot = i + cmd->body.startBuffer;
2579
2580		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2581				0, binding.slot);
2582	}
2583
2584	return 0;
2585}
2586
2587/**
2588 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2589 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2590 *
2591 * @dev_priv: Pointer to a device private struct.
2592 * @sw_context: The software context being used for this batch.
2593 * @header: Pointer to the command header in the command stream.
2594 */
2595static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2596				       struct vmw_sw_context *sw_context,
2597				       SVGA3dCmdHeader *header)
2598{
2599	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2600	struct vmw_ctx_bindinfo_ib binding;
2601	struct vmw_resource_val_node *res_node;
2602	struct {
2603		SVGA3dCmdHeader header;
2604		SVGA3dCmdDXSetIndexBuffer body;
2605	} *cmd;
2606	int ret;
2607
2608	if (unlikely(ctx_node == NULL)) {
2609		DRM_ERROR("DX Context not set.\n");
2610		return -EINVAL;
2611	}
2612
2613	cmd = container_of(header, typeof(*cmd), header);
2614	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2615				user_surface_converter,
2616				&cmd->body.sid, &res_node);
2617	if (unlikely(ret != 0))
2618		return ret;
2619
2620	binding.bi.ctx = ctx_node->res;
2621	binding.bi.res = ((res_node) ? res_node->res : NULL);
2622	binding.bi.bt = vmw_ctx_binding_ib;
2623	binding.offset = cmd->body.offset;
2624	binding.format = cmd->body.format;
2625
2626	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2627
2628	return 0;
2629}
2630
2631/**
2632 * vmw_cmd_dx_set_rendertarget - Validate an
2633 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2634 *
2635 * @dev_priv: Pointer to a device private struct.
2636 * @sw_context: The software context being used for this batch.
2637 * @header: Pointer to the command header in the command stream.
2638 */
2639static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2640					struct vmw_sw_context *sw_context,
2641					SVGA3dCmdHeader *header)
2642{
2643	struct {
2644		SVGA3dCmdHeader header;
2645		SVGA3dCmdDXSetRenderTargets body;
2646	} *cmd = container_of(header, typeof(*cmd), header);
2647	int ret;
2648	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2649		sizeof(SVGA3dRenderTargetViewId);
2650
2651	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2652		DRM_ERROR("Invalid DX Rendertarget binding.\n");
2653		return -EINVAL;
2654	}
2655
2656	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2657				    vmw_ctx_binding_ds, 0,
2658				    &cmd->body.depthStencilViewId, 1, 0);
2659	if (ret)
2660		return ret;
2661
2662	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2663				     vmw_ctx_binding_dx_rt, 0,
2664				     (void *)&cmd[1], num_rt_view, 0);
2665}
2666
2667/**
2668 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2669 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2670 *
2671 * @dev_priv: Pointer to a device private struct.
2672 * @sw_context: The software context being used for this batch.
2673 * @header: Pointer to the command header in the command stream.
2674 */
2675static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2676					      struct vmw_sw_context *sw_context,
2677					      SVGA3dCmdHeader *header)
2678{
2679	struct {
2680		SVGA3dCmdHeader header;
2681		SVGA3dCmdDXClearRenderTargetView body;
2682	} *cmd = container_of(header, typeof(*cmd), header);
2683
2684	return vmw_view_id_val_add(sw_context, vmw_view_rt,
2685				   cmd->body.renderTargetViewId);
2686}
2687
2688/**
2689 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2690 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2691 *
2692 * @dev_priv: Pointer to a device private struct.
2693 * @sw_context: The software context being used for this batch.
2694 * @header: Pointer to the command header in the command stream.
2695 */
2696static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2697					      struct vmw_sw_context *sw_context,
2698					      SVGA3dCmdHeader *header)
2699{
2700	struct {
2701		SVGA3dCmdHeader header;
2702		SVGA3dCmdDXClearDepthStencilView body;
2703	} *cmd = container_of(header, typeof(*cmd), header);
2704
2705	return vmw_view_id_val_add(sw_context, vmw_view_ds,
2706				   cmd->body.depthStencilViewId);
2707}
2708
2709static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2710				  struct vmw_sw_context *sw_context,
2711				  SVGA3dCmdHeader *header)
2712{
2713	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2714	struct vmw_resource_val_node *srf_node;
2715	struct vmw_resource *res;
2716	enum vmw_view_type view_type;
2717	int ret;
2718	/*
2719	 * This is based on the fact that all affected define commands have
2720	 * the same initial command body layout.
2721	 */
2722	struct {
2723		SVGA3dCmdHeader header;
2724		uint32 defined_id;
2725		uint32 sid;
2726	} *cmd;
2727
2728	if (unlikely(ctx_node == NULL)) {
2729		DRM_ERROR("DX Context not set.\n");
2730		return -EINVAL;
2731	}
2732
2733	view_type = vmw_view_cmd_to_type(header->id);
2734	if (view_type == vmw_view_max)
2735		return -EINVAL;
2736	cmd = container_of(header, typeof(*cmd), header);
2737	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2738				user_surface_converter,
2739				&cmd->sid, &srf_node);
2740	if (unlikely(ret != 0))
2741		return ret;
2742
2743	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2744	ret = vmw_cotable_notify(res, cmd->defined_id);
2745	vmw_resource_unreference(&res);
2746	if (unlikely(ret != 0))
2747		return ret;
2748
2749	return vmw_view_add(sw_context->man,
2750			    ctx_node->res,
2751			    srf_node->res,
2752			    view_type,
2753			    cmd->defined_id,
2754			    header,
2755			    header->size + sizeof(*header),
2756			    &sw_context->staged_cmd_res);
2757}
2758
2759/**
2760 * vmw_cmd_dx_set_so_targets - Validate an
2761 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2762 *
2763 * @dev_priv: Pointer to a device private struct.
2764 * @sw_context: The software context being used for this batch.
2765 * @header: Pointer to the command header in the command stream.
2766 */
2767static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2768				     struct vmw_sw_context *sw_context,
2769				     SVGA3dCmdHeader *header)
2770{
2771	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2772	struct vmw_ctx_bindinfo_so binding;
2773	struct vmw_resource_val_node *res_node;
2774	struct {
2775		SVGA3dCmdHeader header;
2776		SVGA3dCmdDXSetSOTargets body;
2777		SVGA3dSoTarget targets[];
2778	} *cmd;
2779	int i, ret, num;
2780
2781	if (unlikely(ctx_node == NULL)) {
2782		DRM_ERROR("DX Context not set.\n");
2783		return -EINVAL;
2784	}
2785
2786	cmd = container_of(header, typeof(*cmd), header);
2787	num = (cmd->header.size - sizeof(cmd->body)) /
2788		sizeof(SVGA3dSoTarget);
2789
2790	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2791		DRM_ERROR("Invalid DX SO binding.\n");
2792		return -EINVAL;
2793	}
2794
2795	for (i = 0; i < num; i++) {
2796		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2797					user_surface_converter,
2798					&cmd->targets[i].sid, &res_node);
2799		if (unlikely(ret != 0))
2800			return ret;
2801
2802		binding.bi.ctx = ctx_node->res;
2803		binding.bi.res = ((res_node) ? res_node->res : NULL);
2804		binding.bi.bt = vmw_ctx_binding_so,
2805		binding.offset = cmd->targets[i].offset;
2806		binding.size = cmd->targets[i].sizeInBytes;
2807		binding.slot = i;
2808
2809		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2810				0, binding.slot);
2811	}
2812
2813	return 0;
2814}
2815
2816static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2817				struct vmw_sw_context *sw_context,
2818				SVGA3dCmdHeader *header)
2819{
2820	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2821	struct vmw_resource *res;
2822	/*
2823	 * This is based on the fact that all affected define commands have
2824	 * the same initial command body layout.
2825	 */
2826	struct {
2827		SVGA3dCmdHeader header;
2828		uint32 defined_id;
2829	} *cmd;
2830	enum vmw_so_type so_type;
2831	int ret;
2832
2833	if (unlikely(ctx_node == NULL)) {
2834		DRM_ERROR("DX Context not set.\n");
2835		return -EINVAL;
2836	}
2837
2838	so_type = vmw_so_cmd_to_type(header->id);
2839	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2840	cmd = container_of(header, typeof(*cmd), header);
2841	ret = vmw_cotable_notify(res, cmd->defined_id);
2842	vmw_resource_unreference(&res);
2843
2844	return ret;
2845}
2846
2847/**
2848 * vmw_cmd_dx_check_subresource - Validate an
2849 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2850 *
2851 * @dev_priv: Pointer to a device private struct.
2852 * @sw_context: The software context being used for this batch.
2853 * @header: Pointer to the command header in the command stream.
2854 */
2855static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2856					struct vmw_sw_context *sw_context,
2857					SVGA3dCmdHeader *header)
2858{
2859	struct {
2860		SVGA3dCmdHeader header;
2861		union {
2862			SVGA3dCmdDXReadbackSubResource r_body;
2863			SVGA3dCmdDXInvalidateSubResource i_body;
2864			SVGA3dCmdDXUpdateSubResource u_body;
2865			SVGA3dSurfaceId sid;
2866		};
2867	} *cmd;
2868
2869	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2870		     offsetof(typeof(*cmd), sid));
2871	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2872		     offsetof(typeof(*cmd), sid));
2873	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2874		     offsetof(typeof(*cmd), sid));
2875
2876	cmd = container_of(header, typeof(*cmd), header);
2877
2878	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2879				 user_surface_converter,
2880				 &cmd->sid, NULL);
2881}
2882
2883static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2884				struct vmw_sw_context *sw_context,
2885				SVGA3dCmdHeader *header)
2886{
2887	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2888
2889	if (unlikely(ctx_node == NULL)) {
2890		DRM_ERROR("DX Context not set.\n");
2891		return -EINVAL;
2892	}
2893
2894	return 0;
2895}
2896
2897/**
2898 * vmw_cmd_dx_view_remove - validate a view remove command and
2899 * schedule the view resource for removal.
2900 *
2901 * @dev_priv: Pointer to a device private struct.
2902 * @sw_context: The software context being used for this batch.
2903 * @header: Pointer to the command header in the command stream.
2904 *
2905 * Check that the view exists, and if it was not created using this
2906 * command batch, conditionally make this command a NOP.
2907 */
2908static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2909				  struct vmw_sw_context *sw_context,
2910				  SVGA3dCmdHeader *header)
2911{
2912	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2913	struct {
2914		SVGA3dCmdHeader header;
2915		union vmw_view_destroy body;
2916	} *cmd = container_of(header, typeof(*cmd), header);
2917	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2918	struct vmw_resource *view;
2919	int ret;
2920
2921	if (!ctx_node) {
2922		DRM_ERROR("DX Context not set.\n");
2923		return -EINVAL;
2924	}
2925
2926	ret = vmw_view_remove(sw_context->man,
2927			      cmd->body.view_id, view_type,
2928			      &sw_context->staged_cmd_res,
2929			      &view);
2930	if (ret || !view)
2931		return ret;
2932
2933	/*
2934	 * If the view wasn't created during this command batch, it might
2935	 * have been removed due to a context swapout, so add a
2936	 * relocation to conditionally make this command a NOP to avoid
2937	 * device errors.
2938	 */
2939	return vmw_resource_relocation_add(&sw_context->res_relocations,
2940					   view,
2941					   vmw_ptr_diff(sw_context->buf_start,
2942							&cmd->header.id),
2943					   vmw_res_rel_cond_nop);
2944}
2945
2946/**
2947 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2948 * command
2949 *
2950 * @dev_priv: Pointer to a device private struct.
2951 * @sw_context: The software context being used for this batch.
2952 * @header: Pointer to the command header in the command stream.
2953 */
2954static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2955				    struct vmw_sw_context *sw_context,
2956				    SVGA3dCmdHeader *header)
2957{
2958	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2959	struct vmw_resource *res;
2960	struct {
2961		SVGA3dCmdHeader header;
2962		SVGA3dCmdDXDefineShader body;
2963	} *cmd = container_of(header, typeof(*cmd), header);
2964	int ret;
2965
2966	if (!ctx_node) {
2967		DRM_ERROR("DX Context not set.\n");
2968		return -EINVAL;
2969	}
2970
2971	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2972	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2973	vmw_resource_unreference(&res);
2974	if (ret)
2975		return ret;
2976
2977	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2978				 cmd->body.shaderId, cmd->body.type,
2979				 &sw_context->staged_cmd_res);
2980}
2981
2982/**
2983 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2984 * command
2985 *
2986 * @dev_priv: Pointer to a device private struct.
2987 * @sw_context: The software context being used for this batch.
2988 * @header: Pointer to the command header in the command stream.
2989 */
2990static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2991				     struct vmw_sw_context *sw_context,
2992				     SVGA3dCmdHeader *header)
2993{
2994	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2995	struct {
2996		SVGA3dCmdHeader header;
2997		SVGA3dCmdDXDestroyShader body;
2998	} *cmd = container_of(header, typeof(*cmd), header);
2999	int ret;
3000
3001	if (!ctx_node) {
3002		DRM_ERROR("DX Context not set.\n");
3003		return -EINVAL;
3004	}
3005
3006	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
3007				&sw_context->staged_cmd_res);
3008	if (ret)
3009		DRM_ERROR("Could not find shader to remove.\n");
3010
3011	return ret;
3012}
3013
3014/**
3015 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
3016 * command
3017 *
3018 * @dev_priv: Pointer to a device private struct.
3019 * @sw_context: The software context being used for this batch.
3020 * @header: Pointer to the command header in the command stream.
3021 */
3022static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
3023				  struct vmw_sw_context *sw_context,
3024				  SVGA3dCmdHeader *header)
3025{
3026	struct vmw_resource_val_node *ctx_node;
3027	struct vmw_resource_val_node *res_node;
3028	struct vmw_resource *res;
3029	struct {
3030		SVGA3dCmdHeader header;
3031		SVGA3dCmdDXBindShader body;
3032	} *cmd = container_of(header, typeof(*cmd), header);
3033	int ret;
3034
3035	if (cmd->body.cid != SVGA3D_INVALID_ID) {
3036		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
3037					user_context_converter,
3038					&cmd->body.cid, &ctx_node);
3039		if (ret)
3040			return ret;
3041	} else {
3042		ctx_node = sw_context->dx_ctx_node;
3043		if (!ctx_node) {
3044			DRM_ERROR("DX Context not set.\n");
3045			return -EINVAL;
3046		}
3047	}
3048
3049	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
3050				cmd->body.shid, 0);
3051	if (IS_ERR(res)) {
3052		DRM_ERROR("Could not find shader to bind.\n");
3053		return PTR_ERR(res);
3054	}
3055
3056	ret = vmw_resource_val_add(sw_context, res, &res_node);
3057	if (ret) {
3058		DRM_ERROR("Error creating resource validation node.\n");
3059		goto out_unref;
3060	}
3061
3062
3063	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3064					&cmd->body.mobid,
3065					cmd->body.offsetInBytes);
3066out_unref:
3067	vmw_resource_unreference(&res);
3068
3069	return ret;
3070}
3071
3072/**
3073 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3074 *
3075 * @dev_priv: Pointer to a device private struct.
3076 * @sw_context: The software context being used for this batch.
3077 * @header: Pointer to the command header in the command stream.
3078 */
3079static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3080			      struct vmw_sw_context *sw_context,
3081			      SVGA3dCmdHeader *header)
3082{
3083	struct {
3084		SVGA3dCmdHeader header;
3085		SVGA3dCmdDXGenMips body;
3086	} *cmd = container_of(header, typeof(*cmd), header);
3087
3088	return vmw_view_id_val_add(sw_context, vmw_view_sr,
3089				   cmd->body.shaderResourceViewId);
3090}
3091
3092/**
3093 * vmw_cmd_dx_transfer_from_buffer -
3094 * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
3095 *
3096 * @dev_priv: Pointer to a device private struct.
3097 * @sw_context: The software context being used for this batch.
3098 * @header: Pointer to the command header in the command stream.
3099 */
3100static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
3101					   struct vmw_sw_context *sw_context,
3102					   SVGA3dCmdHeader *header)
3103{
3104	struct {
3105		SVGA3dCmdHeader header;
3106		SVGA3dCmdDXTransferFromBuffer body;
3107	} *cmd = container_of(header, typeof(*cmd), header);
3108	int ret;
3109
3110	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3111				user_surface_converter,
3112				&cmd->body.srcSid, NULL);
3113	if (ret != 0)
3114		return ret;
3115
3116	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3117				 user_surface_converter,
3118				 &cmd->body.destSid, NULL);
3119}
3120
3121static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3122				struct vmw_sw_context *sw_context,
3123				void *buf, uint32_t *size)
3124{
3125	uint32_t size_remaining = *size;
3126	uint32_t cmd_id;
3127
3128	cmd_id = ((uint32_t *)buf)[0];
3129	switch (cmd_id) {
3130	case SVGA_CMD_UPDATE:
3131		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3132		break;
3133	case SVGA_CMD_DEFINE_GMRFB:
3134		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3135		break;
3136	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3137		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3138		break;
3139	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3140		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3141		break;
3142	default:
3143		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3144		return -EINVAL;
3145	}
3146
3147	if (*size > size_remaining) {
3148		DRM_ERROR("Invalid SVGA command (size mismatch):"
3149			  " %u.\n", cmd_id);
3150		return -EINVAL;
3151	}
3152
3153	if (unlikely(!sw_context->kernel)) {
3154		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3155		return -EPERM;
3156	}
3157
3158	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3159		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3160
3161	return 0;
3162}
3163
3164static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3165	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3166		    false, false, false),
3167	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3168		    false, false, false),
3169	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3170		    true, false, false),
3171	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3172		    true, false, false),
3173	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3174		    true, false, false),
3175	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3176		    false, false, false),
3177	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3178		    false, false, false),
3179	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3180		    true, false, false),
3181	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3182		    true, false, false),
3183	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3184		    true, false, false),
3185	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3186		    &vmw_cmd_set_render_target_check, true, false, false),
3187	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3188		    true, false, false),
3189	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3190		    true, false, false),
3191	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3192		    true, false, false),
3193	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3194		    true, false, false),
3195	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3196		    true, false, false),
3197	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3198		    true, false, false),
3199	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3200		    true, false, false),
3201	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3202		    false, false, false),
3203	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3204		    true, false, false),
3205	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3206		    true, false, false),
3207	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3208		    true, false, false),
3209	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3210		    true, false, false),
3211	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3212		    true, false, false),
3213	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3214		    true, false, false),
3215	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3216		    true, false, false),
3217	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3218		    true, false, false),
3219	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3220		    true, false, false),
3221	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3222		    true, false, false),
3223	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3224		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3225	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3226		    false, false, false),
3227	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3228		    false, false, false),
3229	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3230		    false, false, false),
3231	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3232		    false, false, false),
3233	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3234		    false, false, false),
3235	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3236		    false, false, false),
3237	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3238		    false, false, false),
3239	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3240		    false, false, false),
3241	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3242		    false, false, false),
3243	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3244		    false, false, false),
3245	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3246		    false, false, false),
3247	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3248		    false, false, false),
3249	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3250		    false, false, false),
3251	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3252		    false, false, true),
3253	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3254		    false, false, true),
3255	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3256		    false, false, true),
3257	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3258		    false, false, true),
3259	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3260		    false, false, true),
3261	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3262		    false, false, true),
3263	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3264		    false, false, true),
3265	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3266		    false, false, true),
3267	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3268		    true, false, true),
3269	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3270		    false, false, true),
3271	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3272		    true, false, true),
3273	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3274		    &vmw_cmd_update_gb_surface, true, false, true),
3275	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3276		    &vmw_cmd_readback_gb_image, true, false, true),
3277	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3278		    &vmw_cmd_readback_gb_surface, true, false, true),
3279	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3280		    &vmw_cmd_invalidate_gb_image, true, false, true),
3281	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3282		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3283	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3284		    false, false, true),
3285	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3286		    false, false, true),
3287	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3288		    false, false, true),
3289	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3290		    false, false, true),
3291	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3292		    false, false, true),
3293	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3294		    false, false, true),
3295	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3296		    true, false, true),
3297	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3298		    false, false, true),
3299	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3300		    false, false, false),
3301	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3302		    true, false, true),
3303	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3304		    true, false, true),
3305	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3306		    true, false, true),
3307	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3308		    true, false, true),
3309	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3310		    true, false, true),
3311	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3312		    false, false, true),
3313	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3314		    false, false, true),
3315	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3316		    false, false, true),
3317	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3318		    false, false, true),
3319	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3320		    false, false, true),
3321	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3322		    false, false, true),
3323	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3324		    false, false, true),
3325	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3326		    false, false, true),
3327	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3328		    false, false, true),
3329	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3330		    false, false, true),
3331	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3332		    true, false, true),
3333	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3334		    false, false, true),
3335	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3336		    false, false, true),
3337	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3338		    false, false, true),
3339	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3340		    false, false, true),
3341
3342	/*
3343	 * DX commands
3344	 */
3345	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3346		    false, false, true),
3347	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3348		    false, false, true),
3349	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3350		    false, false, true),
3351	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3352		    false, false, true),
3353	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3354		    false, false, true),
3355	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3356		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3357	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3358		    &vmw_cmd_dx_set_shader_res, true, false, true),
3359	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3360		    true, false, true),
3361	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3362		    true, false, true),
3363	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3364		    true, false, true),
3365	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3366		    true, false, true),
3367	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3368		    true, false, true),
3369	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3370		    &vmw_cmd_dx_cid_check, true, false, true),
3371	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3372		    true, false, true),
3373	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3374		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3375	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3376		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3377	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3378		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3379	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3380		    true, false, true),
3381	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3382		    &vmw_cmd_dx_cid_check, true, false, true),
3383	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3384		    &vmw_cmd_dx_cid_check, true, false, true),
3385	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3386		    true, false, true),
3387	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3388		    true, false, true),
3389	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3390		    true, false, true),
3391	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3392		    &vmw_cmd_dx_cid_check, true, false, true),
3393	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3394		    true, false, true),
3395	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3396		    true, false, true),
3397	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3398		    true, false, true),
3399	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3400		    true, false, true),
3401	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3402		    true, false, true),
3403	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3404		    true, false, true),
3405	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3406		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3407	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3408		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3409	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3410		    true, false, true),
3411	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3412		    true, false, true),
3413	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3414		    &vmw_cmd_dx_check_subresource, true, false, true),
3415	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3416		    &vmw_cmd_dx_check_subresource, true, false, true),
3417	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3418		    &vmw_cmd_dx_check_subresource, true, false, true),
3419	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3420		    &vmw_cmd_dx_view_define, true, false, true),
3421	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3422		    &vmw_cmd_dx_view_remove, true, false, true),
3423	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3424		    &vmw_cmd_dx_view_define, true, false, true),
3425	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3426		    &vmw_cmd_dx_view_remove, true, false, true),
3427	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3428		    &vmw_cmd_dx_view_define, true, false, true),
3429	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3430		    &vmw_cmd_dx_view_remove, true, false, true),
3431	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3432		    &vmw_cmd_dx_so_define, true, false, true),
3433	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3434		    &vmw_cmd_dx_cid_check, true, false, true),
3435	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3436		    &vmw_cmd_dx_so_define, true, false, true),
3437	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3438		    &vmw_cmd_dx_cid_check, true, false, true),
3439	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3440		    &vmw_cmd_dx_so_define, true, false, true),
3441	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3442		    &vmw_cmd_dx_cid_check, true, false, true),
3443	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3444		    &vmw_cmd_dx_so_define, true, false, true),
3445	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3446		    &vmw_cmd_dx_cid_check, true, false, true),
3447	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3448		    &vmw_cmd_dx_so_define, true, false, true),
3449	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3450		    &vmw_cmd_dx_cid_check, true, false, true),
3451	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3452		    &vmw_cmd_dx_define_shader, true, false, true),
3453	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3454		    &vmw_cmd_dx_destroy_shader, true, false, true),
3455	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3456		    &vmw_cmd_dx_bind_shader, true, false, true),
3457	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3458		    &vmw_cmd_dx_so_define, true, false, true),
3459	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3460		    &vmw_cmd_dx_cid_check, true, false, true),
3461	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3462		    true, false, true),
3463	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3464		    &vmw_cmd_dx_set_so_targets, true, false, true),
3465	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3466		    &vmw_cmd_dx_cid_check, true, false, true),
3467	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3468		    &vmw_cmd_dx_cid_check, true, false, true),
3469	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3470		    &vmw_cmd_buffer_copy_check, true, false, true),
3471	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3472		    &vmw_cmd_pred_copy_check, true, false, true),
3473	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3474		    &vmw_cmd_dx_transfer_from_buffer,
3475		    true, false, true),
3476};
3477
3478bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3479{
3480	u32 cmd_id = ((u32 *) buf)[0];
3481
3482	if (cmd_id >= SVGA_CMD_MAX) {
3483		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3484		const struct vmw_cmd_entry *entry;
3485
3486		*size = header->size + sizeof(SVGA3dCmdHeader);
3487		cmd_id = header->id;
3488		if (cmd_id >= SVGA_3D_CMD_MAX)
3489			return false;
3490
3491		cmd_id -= SVGA_3D_CMD_BASE;
3492		entry = &vmw_cmd_entries[cmd_id];
3493		*cmd = entry->cmd_name;
3494		return true;
3495	}
3496
3497	switch (cmd_id) {
3498	case SVGA_CMD_UPDATE:
3499		*cmd = "SVGA_CMD_UPDATE";
3500		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3501		break;
3502	case SVGA_CMD_DEFINE_GMRFB:
3503		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3504		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3505		break;
3506	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3507		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3508		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3509		break;
3510	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3511		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3512		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3513		break;
3514	default:
3515		*cmd = "UNKNOWN";
3516		*size = 0;
3517		return false;
3518	}
3519
3520	return true;
3521}
3522
3523static int vmw_cmd_check(struct vmw_private *dev_priv,
3524			 struct vmw_sw_context *sw_context,
3525			 void *buf, uint32_t *size)
3526{
3527	uint32_t cmd_id;
3528	uint32_t size_remaining = *size;
3529	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3530	int ret;
3531	const struct vmw_cmd_entry *entry;
3532	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3533
3534	cmd_id = ((uint32_t *)buf)[0];
3535	/* Handle any none 3D commands */
3536	if (unlikely(cmd_id < SVGA_CMD_MAX))
3537		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3538
3539
3540	cmd_id = header->id;
3541	*size = header->size + sizeof(SVGA3dCmdHeader);
3542
3543	cmd_id -= SVGA_3D_CMD_BASE;
3544	if (unlikely(*size > size_remaining))
3545		goto out_invalid;
3546
3547	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3548		goto out_invalid;
3549
3550	entry = &vmw_cmd_entries[cmd_id];
3551	if (unlikely(!entry->func))
3552		goto out_invalid;
3553
3554	if (unlikely(!entry->user_allow && !sw_context->kernel))
3555		goto out_privileged;
3556
3557	if (unlikely(entry->gb_disable && gb))
3558		goto out_old;
3559
3560	if (unlikely(entry->gb_enable && !gb))
3561		goto out_new;
3562
3563	ret = entry->func(dev_priv, sw_context, header);
3564	if (unlikely(ret != 0))
3565		goto out_invalid;
3566
3567	return 0;
3568out_invalid:
3569	DRM_ERROR("Invalid SVGA3D command: %d\n",
3570		  cmd_id + SVGA_3D_CMD_BASE);
3571	return -EINVAL;
3572out_privileged:
3573	DRM_ERROR("Privileged SVGA3D command: %d\n",
3574		  cmd_id + SVGA_3D_CMD_BASE);
3575	return -EPERM;
3576out_old:
3577	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3578		  cmd_id + SVGA_3D_CMD_BASE);
3579	return -EINVAL;
3580out_new:
3581	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3582		  cmd_id + SVGA_3D_CMD_BASE);
3583	return -EINVAL;
3584}
3585
3586static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3587			     struct vmw_sw_context *sw_context,
3588			     void *buf,
3589			     uint32_t size)
3590{
3591	int32_t cur_size = size;
3592	int ret;
3593
3594	sw_context->buf_start = buf;
3595
3596	while (cur_size > 0) {
3597		size = cur_size;
3598		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3599		if (unlikely(ret != 0))
3600			return ret;
3601		buf = (void *)((unsigned long) buf + size);
3602		cur_size -= size;
3603	}
3604
3605	if (unlikely(cur_size != 0)) {
3606		DRM_ERROR("Command verifier out of sync.\n");
3607		return -EINVAL;
3608	}
3609
3610	return 0;
3611}
3612
3613static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3614{
3615	sw_context->cur_reloc = 0;
3616}
3617
3618static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3619{
3620	uint32_t i;
3621	struct vmw_relocation *reloc;
3622	struct ttm_validate_buffer *validate;
3623	struct ttm_buffer_object *bo;
3624
3625	for (i = 0; i < sw_context->cur_reloc; ++i) {
3626		reloc = &sw_context->relocs[i];
3627		validate = &sw_context->val_bufs[reloc->index].base;
3628		bo = validate->bo;
3629		switch (bo->mem.mem_type) {
3630		case TTM_PL_VRAM:
3631			reloc->location->offset += bo->offset;
3632			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3633			break;
3634		case VMW_PL_GMR:
3635			reloc->location->gmrId = bo->mem.start;
3636			break;
3637		case VMW_PL_MOB:
3638			*reloc->mob_loc = bo->mem.start;
3639			break;
3640		default:
3641			BUG();
3642		}
3643	}
3644	vmw_free_relocations(sw_context);
3645}
3646
3647/**
3648 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3649 * all resources referenced by it.
3650 *
3651 * @list: The resource list.
3652 */
3653static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3654					  struct list_head *list)
3655{
3656	struct vmw_resource_val_node *val, *val_next;
3657
3658	/*
3659	 * Drop references to resources held during command submission.
3660	 */
3661
3662	list_for_each_entry_safe(val, val_next, list, head) {
3663		list_del_init(&val->head);
3664		vmw_resource_unreference(&val->res);
3665
3666		if (val->staged_bindings) {
3667			if (val->staged_bindings != sw_context->staged_bindings)
3668				vmw_binding_state_free(val->staged_bindings);
3669			else
3670				sw_context->staged_bindings_inuse = false;
3671			val->staged_bindings = NULL;
3672		}
3673
3674		kfree(val);
3675	}
3676}
3677
3678static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3679{
3680	struct vmw_validate_buffer *entry, *next;
3681	struct vmw_resource_val_node *val;
3682
3683	/*
3684	 * Drop references to DMA buffers held during command submission.
3685	 */
3686	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3687				 base.head) {
3688		list_del(&entry->base.head);
3689		ttm_bo_unref(&entry->base.bo);
3690		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3691		sw_context->cur_val_buf--;
3692	}
3693	BUG_ON(sw_context->cur_val_buf != 0);
3694
3695	list_for_each_entry(val, &sw_context->resource_list, head)
3696		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3697}
3698
3699int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3700			       struct ttm_buffer_object *bo,
3701			       bool interruptible,
3702			       bool validate_as_mob)
3703{
3704	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3705						  base);
3706	struct ttm_operation_ctx ctx = { interruptible, true };
3707	int ret;
3708
3709	if (vbo->pin_count > 0)
3710		return 0;
3711
3712	if (validate_as_mob)
3713		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
3714
3715	/**
3716	 * Put BO in VRAM if there is space, otherwise as a GMR.
3717	 * If there is no space in VRAM and GMR ids are all used up,
3718	 * start evicting GMRs to make room. If the DMA buffer can't be
3719	 * used as a GMR, this will return -ENOMEM.
3720	 */
3721
3722	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
3723	if (likely(ret == 0 || ret == -ERESTARTSYS))
3724		return ret;
3725
3726	/**
3727	 * If that failed, try VRAM again, this time evicting
3728	 * previous contents.
3729	 */
3730
3731	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 
3732	return ret;
3733}
3734
 
3735static int vmw_validate_buffers(struct vmw_private *dev_priv,
3736				struct vmw_sw_context *sw_context)
3737{
3738	struct vmw_validate_buffer *entry;
3739	int ret;
3740
3741	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3742		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3743						 true,
3744						 entry->validate_as_mob);
3745		if (unlikely(ret != 0))
3746			return ret;
3747	}
3748	return 0;
3749}
3750
3751static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3752				 uint32_t size)
3753{
3754	if (likely(sw_context->cmd_bounce_size >= size))
3755		return 0;
3756
3757	if (sw_context->cmd_bounce_size == 0)
3758		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3759
3760	while (sw_context->cmd_bounce_size < size) {
3761		sw_context->cmd_bounce_size =
3762			PAGE_ALIGN(sw_context->cmd_bounce_size +
3763				   (sw_context->cmd_bounce_size >> 1));
3764	}
3765
3766	vfree(sw_context->cmd_bounce);
3767	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3768
3769	if (sw_context->cmd_bounce == NULL) {
3770		DRM_ERROR("Failed to allocate command bounce buffer.\n");
3771		sw_context->cmd_bounce_size = 0;
3772		return -ENOMEM;
3773	}
3774
3775	return 0;
3776}
3777
3778/**
3779 * vmw_execbuf_fence_commands - create and submit a command stream fence
3780 *
3781 * Creates a fence object and submits a command stream marker.
3782 * If this fails for some reason, We sync the fifo and return NULL.
3783 * It is then safe to fence buffers with a NULL pointer.
3784 *
3785 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3786 * a userspace handle if @p_handle is not NULL, otherwise not.
3787 */
3788
3789int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3790			       struct vmw_private *dev_priv,
3791			       struct vmw_fence_obj **p_fence,
3792			       uint32_t *p_handle)
3793{
 
 
 
 
 
 
 
3794	uint32_t sequence;
3795	int ret;
3796	bool synced = false;
3797
3798	/* p_handle implies file_priv. */
3799	BUG_ON(p_handle != NULL && file_priv == NULL);
 
3800
3801	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3802	if (unlikely(ret != 0)) {
3803		DRM_ERROR("Fence submission error. Syncing.\n");
3804		synced = true;
3805	}
3806
3807	if (p_handle != NULL)
3808		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3809					    sequence, p_fence, p_handle);
3810	else
3811		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3812
3813	if (unlikely(ret != 0 && !synced)) {
3814		(void) vmw_fallback_wait(dev_priv, false, false,
3815					 sequence, false,
3816					 VMW_FENCE_WAIT_TIMEOUT);
3817		*p_fence = NULL;
3818	}
3819
3820	return 0;
3821}
3822
3823/**
3824 * vmw_execbuf_copy_fence_user - copy fence object information to
3825 * user-space.
3826 *
3827 * @dev_priv: Pointer to a vmw_private struct.
3828 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3829 * @ret: Return value from fence object creation.
3830 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3831 * which the information should be copied.
3832 * @fence: Pointer to the fenc object.
3833 * @fence_handle: User-space fence handle.
3834 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3835 * @sync_file:  Only used to clean up in case of an error in this function.
3836 *
3837 * This function copies fence information to user-space. If copying fails,
3838 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3839 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3840 * the error will hopefully be detected.
3841 * Also if copying fails, user-space will be unable to signal the fence
3842 * object so we wait for it immediately, and then unreference the
3843 * user-space reference.
3844 */
3845void
3846vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3847			    struct vmw_fpriv *vmw_fp,
3848			    int ret,
3849			    struct drm_vmw_fence_rep __user *user_fence_rep,
3850			    struct vmw_fence_obj *fence,
3851			    uint32_t fence_handle,
3852			    int32_t out_fence_fd,
3853			    struct sync_file *sync_file)
3854{
3855	struct drm_vmw_fence_rep fence_rep;
3856
3857	if (user_fence_rep == NULL)
3858		return;
3859
3860	memset(&fence_rep, 0, sizeof(fence_rep));
3861
3862	fence_rep.error = ret;
3863	fence_rep.fd = out_fence_fd;
3864	if (ret == 0) {
3865		BUG_ON(fence == NULL);
3866
3867		fence_rep.handle = fence_handle;
3868		fence_rep.seqno = fence->base.seqno;
3869		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3870		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3871	}
3872
3873	/*
3874	 * copy_to_user errors will be detected by user space not
3875	 * seeing fence_rep::error filled in. Typically
3876	 * user-space would have pre-set that member to -EFAULT.
3877	 */
3878	ret = copy_to_user(user_fence_rep, &fence_rep,
3879			   sizeof(fence_rep));
3880
3881	/*
3882	 * User-space lost the fence object. We need to sync
3883	 * and unreference the handle.
3884	 */
3885	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3886		if (sync_file)
3887			fput(sync_file->file);
3888
3889		if (fence_rep.fd != -1) {
3890			put_unused_fd(fence_rep.fd);
3891			fence_rep.fd = -1;
3892		}
3893
3894		ttm_ref_object_base_unref(vmw_fp->tfile,
3895					  fence_handle, TTM_REF_USAGE);
3896		DRM_ERROR("Fence copy error. Syncing.\n");
3897		(void) vmw_fence_obj_wait(fence, false, false,
3898					  VMW_FENCE_WAIT_TIMEOUT);
3899	}
3900}
3901
3902/**
3903 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3904 * the fifo.
3905 *
3906 * @dev_priv: Pointer to a device private structure.
3907 * @kernel_commands: Pointer to the unpatched command batch.
3908 * @command_size: Size of the unpatched command batch.
3909 * @sw_context: Structure holding the relocation lists.
3910 *
3911 * Side effects: If this function returns 0, then the command batch
3912 * pointed to by @kernel_commands will have been modified.
3913 */
3914static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3915				   void *kernel_commands,
3916				   u32 command_size,
3917				   struct vmw_sw_context *sw_context)
3918{
3919	void *cmd;
3920
3921	if (sw_context->dx_ctx_node)
3922		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3923					  sw_context->dx_ctx_node->res->id);
3924	else
3925		cmd = vmw_fifo_reserve(dev_priv, command_size);
3926	if (!cmd) {
3927		DRM_ERROR("Failed reserving fifo space for commands.\n");
3928		return -ENOMEM;
 
3929	}
3930
3931	vmw_apply_relocations(sw_context);
3932	memcpy(cmd, kernel_commands, command_size);
3933	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3934	vmw_resource_relocations_free(&sw_context->res_relocations);
3935	vmw_fifo_commit(dev_priv, command_size);
3936
3937	return 0;
3938}
3939
3940/**
3941 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3942 * the command buffer manager.
3943 *
3944 * @dev_priv: Pointer to a device private structure.
3945 * @header: Opaque handle to the command buffer allocation.
3946 * @command_size: Size of the unpatched command batch.
3947 * @sw_context: Structure holding the relocation lists.
3948 *
3949 * Side effects: If this function returns 0, then the command buffer
3950 * represented by @header will have been modified.
3951 */
3952static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3953				     struct vmw_cmdbuf_header *header,
3954				     u32 command_size,
3955				     struct vmw_sw_context *sw_context)
3956{
3957	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3958		  SVGA3D_INVALID_ID);
3959	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3960				       id, false, header);
3961
3962	vmw_apply_relocations(sw_context);
3963	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3964	vmw_resource_relocations_free(&sw_context->res_relocations);
3965	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3966
3967	return 0;
3968}
3969
3970/**
3971 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3972 * submission using a command buffer.
3973 *
3974 * @dev_priv: Pointer to a device private structure.
3975 * @user_commands: User-space pointer to the commands to be submitted.
3976 * @command_size: Size of the unpatched command batch.
3977 * @header: Out parameter returning the opaque pointer to the command buffer.
3978 *
3979 * This function checks whether we can use the command buffer manager for
3980 * submission and if so, creates a command buffer of suitable size and
3981 * copies the user data into that buffer.
3982 *
3983 * On successful return, the function returns a pointer to the data in the
3984 * command buffer and *@header is set to non-NULL.
3985 * If command buffers could not be used, the function will return the value
3986 * of @kernel_commands on function call. That value may be NULL. In that case,
3987 * the value of *@header will be set to NULL.
3988 * If an error is encountered, the function will return a pointer error value.
3989 * If the function is interrupted by a signal while sleeping, it will return
3990 * -ERESTARTSYS casted to a pointer error value.
3991 */
3992static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3993				void __user *user_commands,
3994				void *kernel_commands,
3995				u32 command_size,
3996				struct vmw_cmdbuf_header **header)
3997{
3998	size_t cmdbuf_size;
3999	int ret;
4000
4001	*header = NULL;
4002	if (command_size > SVGA_CB_MAX_SIZE) {
4003		DRM_ERROR("Command buffer is too large.\n");
4004		return ERR_PTR(-EINVAL);
4005	}
4006
4007	if (!dev_priv->cman || kernel_commands)
4008		return kernel_commands;
4009
4010	/* If possible, add a little space for fencing. */
4011	cmdbuf_size = command_size + 512;
4012	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4013	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
4014					   true, header);
4015	if (IS_ERR(kernel_commands))
4016		return kernel_commands;
4017
4018	ret = copy_from_user(kernel_commands, user_commands,
4019			     command_size);
4020	if (ret) {
4021		DRM_ERROR("Failed copying commands.\n");
4022		vmw_cmdbuf_header_free(*header);
4023		*header = NULL;
4024		return ERR_PTR(-EFAULT);
4025	}
4026
4027	return kernel_commands;
4028}
4029
4030static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4031				   struct vmw_sw_context *sw_context,
4032				   uint32_t handle)
4033{
4034	struct vmw_resource_val_node *ctx_node;
4035	struct vmw_resource *res;
4036	int ret;
4037
4038	if (handle == SVGA3D_INVALID_ID)
4039		return 0;
4040
4041	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
4042					      handle, user_context_converter,
4043					      &res);
4044	if (unlikely(ret != 0)) {
4045		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
4046			  (unsigned) handle);
4047		return ret;
4048	}
4049
4050	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
4051	if (unlikely(ret != 0))
4052		goto out_err;
4053
4054	sw_context->dx_ctx_node = ctx_node;
4055	sw_context->man = vmw_context_res_man(res);
4056out_err:
4057	vmw_resource_unreference(&res);
4058	return ret;
4059}
4060
4061int vmw_execbuf_process(struct drm_file *file_priv,
4062			struct vmw_private *dev_priv,
4063			void __user *user_commands,
4064			void *kernel_commands,
4065			uint32_t command_size,
4066			uint64_t throttle_us,
4067			uint32_t dx_context_handle,
4068			struct drm_vmw_fence_rep __user *user_fence_rep,
4069			struct vmw_fence_obj **out_fence,
4070			uint32_t flags)
4071{
4072	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4073	struct vmw_fence_obj *fence = NULL;
4074	struct vmw_resource *error_resource;
4075	struct list_head resource_list;
4076	struct vmw_cmdbuf_header *header;
4077	struct ww_acquire_ctx ticket;
4078	uint32_t handle;
4079	int ret;
4080	int32_t out_fence_fd = -1;
4081	struct sync_file *sync_file = NULL;
4082
4083
4084	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4085		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4086		if (out_fence_fd < 0) {
4087			DRM_ERROR("Failed to get a fence file descriptor.\n");
4088			return out_fence_fd;
4089		}
4090	}
4091
4092	if (throttle_us) {
4093		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4094				   throttle_us);
4095
4096		if (ret)
4097			goto out_free_fence_fd;
4098	}
4099
4100	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4101					     kernel_commands, command_size,
4102					     &header);
4103	if (IS_ERR(kernel_commands)) {
4104		ret = PTR_ERR(kernel_commands);
4105		goto out_free_fence_fd;
4106	}
4107
4108	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4109	if (ret) {
4110		ret = -ERESTARTSYS;
4111		goto out_free_header;
4112	}
4113
4114	sw_context->kernel = false;
4115	if (kernel_commands == NULL) {
4116		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4117		if (unlikely(ret != 0))
4118			goto out_unlock;
4119
4120
4121		ret = copy_from_user(sw_context->cmd_bounce,
4122				     user_commands, command_size);
4123
4124		if (unlikely(ret != 0)) {
4125			ret = -EFAULT;
4126			DRM_ERROR("Failed copying commands.\n");
4127			goto out_unlock;
4128		}
4129		kernel_commands = sw_context->cmd_bounce;
4130	} else if (!header)
4131		sw_context->kernel = true;
4132
4133	sw_context->fp = vmw_fpriv(file_priv);
4134	sw_context->cur_reloc = 0;
4135	sw_context->cur_val_buf = 0;
4136	INIT_LIST_HEAD(&sw_context->resource_list);
4137	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
4138	sw_context->cur_query_bo = dev_priv->pinned_bo;
4139	sw_context->last_query_ctx = NULL;
4140	sw_context->needs_post_query_barrier = false;
4141	sw_context->dx_ctx_node = NULL;
4142	sw_context->dx_query_mob = NULL;
4143	sw_context->dx_query_ctx = NULL;
4144	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4145	INIT_LIST_HEAD(&sw_context->validate_nodes);
4146	INIT_LIST_HEAD(&sw_context->res_relocations);
4147	if (sw_context->staged_bindings)
4148		vmw_binding_state_reset(sw_context->staged_bindings);
4149
4150	if (!sw_context->res_ht_initialized) {
4151		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4152		if (unlikely(ret != 0))
4153			goto out_unlock;
4154		sw_context->res_ht_initialized = true;
4155	}
4156	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4157	INIT_LIST_HEAD(&resource_list);
4158	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4159	if (unlikely(ret != 0)) {
4160		list_splice_init(&sw_context->ctx_resource_list,
4161				 &sw_context->resource_list);
4162		goto out_err_nores;
4163	}
4164
4165	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4166				command_size);
4167	/*
4168	 * Merge the resource lists before checking the return status
4169	 * from vmd_cmd_check_all so that all the open hashtabs will
4170	 * be handled properly even if vmw_cmd_check_all fails.
4171	 */
4172	list_splice_init(&sw_context->ctx_resource_list,
4173			 &sw_context->resource_list);
4174
 
4175	if (unlikely(ret != 0))
4176		goto out_err_nores;
4177
4178	ret = vmw_resources_reserve(sw_context);
4179	if (unlikely(ret != 0))
4180		goto out_err_nores;
4181
4182	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4183				     true, NULL);
4184	if (unlikely(ret != 0))
4185		goto out_err_nores;
4186
4187	ret = vmw_validate_buffers(dev_priv, sw_context);
4188	if (unlikely(ret != 0))
4189		goto out_err;
4190
4191	ret = vmw_resources_validate(sw_context);
4192	if (unlikely(ret != 0))
4193		goto out_err;
4194
4195	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4196	if (unlikely(ret != 0)) {
4197		ret = -ERESTARTSYS;
4198		goto out_err;
4199	}
4200
4201	if (dev_priv->has_mob) {
4202		ret = vmw_rebind_contexts(sw_context);
4203		if (unlikely(ret != 0))
4204			goto out_unlock_binding;
4205	}
4206
4207	if (!header) {
4208		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4209					      command_size, sw_context);
4210	} else {
4211		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4212						sw_context);
4213		header = NULL;
4214	}
4215	mutex_unlock(&dev_priv->binding_mutex);
4216	if (ret)
4217		goto out_err;
4218
4219	vmw_query_bo_switch_commit(dev_priv, sw_context);
4220	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4221					 &fence,
4222					 (user_fence_rep) ? &handle : NULL);
4223	/*
4224	 * This error is harmless, because if fence submission fails,
4225	 * vmw_fifo_send_fence will sync. The error will be propagated to
4226	 * user-space in @fence_rep
4227	 */
4228
4229	if (ret != 0)
4230		DRM_ERROR("Fence submission error. Syncing.\n");
4231
4232	vmw_resources_unreserve(sw_context, false);
4233
4234	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4235				    (void *) fence);
4236
4237	if (unlikely(dev_priv->pinned_bo != NULL &&
4238		     !dev_priv->query_cid_valid))
4239		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4240
4241	vmw_clear_validations(sw_context);
 
4242
4243	/*
4244	 * If anything fails here, give up trying to export the fence
4245	 * and do a sync since the user mode will not be able to sync
4246	 * the fence itself.  This ensures we are still functionally
4247	 * correct.
4248	 */
4249	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4250
4251		sync_file = sync_file_create(&fence->base);
4252		if (!sync_file) {
4253			DRM_ERROR("Unable to create sync file for fence\n");
4254			put_unused_fd(out_fence_fd);
4255			out_fence_fd = -1;
4256
4257			(void) vmw_fence_obj_wait(fence, false, false,
4258						  VMW_FENCE_WAIT_TIMEOUT);
4259		} else {
4260			/* Link the fence with the FD created earlier */
4261			fd_install(out_fence_fd, sync_file->file);
4262		}
4263	}
4264
4265	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4266				    user_fence_rep, fence, handle,
4267				    out_fence_fd, sync_file);
4268
4269	/* Don't unreference when handing fence out */
4270	if (unlikely(out_fence != NULL)) {
4271		*out_fence = fence;
4272		fence = NULL;
4273	} else if (likely(fence != NULL)) {
4274		vmw_fence_obj_unreference(&fence);
4275	}
4276
4277	list_splice_init(&sw_context->resource_list, &resource_list);
4278	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4279	mutex_unlock(&dev_priv->cmdbuf_mutex);
4280
4281	/*
4282	 * Unreference resources outside of the cmdbuf_mutex to
4283	 * avoid deadlocks in resource destruction paths.
4284	 */
4285	vmw_resource_list_unreference(sw_context, &resource_list);
4286
 
 
4287	return 0;
4288
4289out_unlock_binding:
4290	mutex_unlock(&dev_priv->binding_mutex);
4291out_err:
4292	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4293out_err_nores:
4294	vmw_resources_unreserve(sw_context, true);
4295	vmw_resource_relocations_free(&sw_context->res_relocations);
4296	vmw_free_relocations(sw_context);
 
4297	vmw_clear_validations(sw_context);
4298	if (unlikely(dev_priv->pinned_bo != NULL &&
4299		     !dev_priv->query_cid_valid))
4300		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4301out_unlock:
4302	list_splice_init(&sw_context->resource_list, &resource_list);
4303	error_resource = sw_context->error_resource;
4304	sw_context->error_resource = NULL;
4305	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4306	mutex_unlock(&dev_priv->cmdbuf_mutex);
4307
4308	/*
4309	 * Unreference resources outside of the cmdbuf_mutex to
4310	 * avoid deadlocks in resource destruction paths.
4311	 */
4312	vmw_resource_list_unreference(sw_context, &resource_list);
4313	if (unlikely(error_resource != NULL))
4314		vmw_resource_unreference(&error_resource);
4315out_free_header:
4316	if (header)
4317		vmw_cmdbuf_header_free(header);
4318out_free_fence_fd:
4319	if (out_fence_fd >= 0)
4320		put_unused_fd(out_fence_fd);
4321
4322	return ret;
4323}
4324
4325/**
4326 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4327 *
4328 * @dev_priv: The device private structure.
4329 *
4330 * This function is called to idle the fifo and unpin the query buffer
4331 * if the normal way to do this hits an error, which should typically be
4332 * extremely rare.
4333 */
4334static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4335{
4336	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4337
4338	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4339	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4340	if (dev_priv->dummy_query_bo_pinned) {
4341		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4342		dev_priv->dummy_query_bo_pinned = false;
4343	}
4344}
4345
4346
4347/**
4348 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4349 * query bo.
4350 *
4351 * @dev_priv: The device private structure.
4352 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4353 * _after_ a query barrier that flushes all queries touching the current
4354 * buffer pointed to by @dev_priv->pinned_bo
4355 *
4356 * This function should be used to unpin the pinned query bo, or
4357 * as a query barrier when we need to make sure that all queries have
4358 * finished before the next fifo command. (For example on hardware
4359 * context destructions where the hardware may otherwise leak unfinished
4360 * queries).
4361 *
4362 * This function does not return any failure codes, but make attempts
4363 * to do safe unpinning in case of errors.
4364 *
4365 * The function will synchronize on the previous query barrier, and will
4366 * thus not finish until that barrier has executed.
4367 *
4368 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4369 * before calling this function.
4370 */
4371void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4372				     struct vmw_fence_obj *fence)
4373{
4374	int ret = 0;
4375	struct list_head validate_list;
4376	struct ttm_validate_buffer pinned_val, query_val;
4377	struct vmw_fence_obj *lfence = NULL;
4378	struct ww_acquire_ctx ticket;
4379
4380	if (dev_priv->pinned_bo == NULL)
4381		goto out_unlock;
4382
4383	INIT_LIST_HEAD(&validate_list);
4384
4385	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4386	pinned_val.shared = false;
4387	list_add_tail(&pinned_val.head, &validate_list);
4388
4389	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4390	query_val.shared = false;
4391	list_add_tail(&query_val.head, &validate_list);
4392
4393	ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4394				     false, NULL);
4395	if (unlikely(ret != 0)) {
4396		vmw_execbuf_unpin_panic(dev_priv);
4397		goto out_no_reserve;
4398	}
4399
4400	if (dev_priv->query_cid_valid) {
4401		BUG_ON(fence != NULL);
4402		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4403		if (unlikely(ret != 0)) {
4404			vmw_execbuf_unpin_panic(dev_priv);
4405			goto out_no_emit;
4406		}
4407		dev_priv->query_cid_valid = false;
4408	}
4409
4410	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4411	if (dev_priv->dummy_query_bo_pinned) {
4412		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4413		dev_priv->dummy_query_bo_pinned = false;
4414	}
4415	if (fence == NULL) {
4416		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4417						  NULL);
4418		fence = lfence;
4419	}
4420	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4421	if (lfence != NULL)
4422		vmw_fence_obj_unreference(&lfence);
4423
4424	ttm_bo_unref(&query_val.bo);
4425	ttm_bo_unref(&pinned_val.bo);
4426	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4427out_unlock:
4428	return;
4429
4430out_no_emit:
4431	ttm_eu_backoff_reservation(&ticket, &validate_list);
4432out_no_reserve:
4433	ttm_bo_unref(&query_val.bo);
4434	ttm_bo_unref(&pinned_val.bo);
4435	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4436}
4437
4438/**
4439 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4440 * query bo.
4441 *
4442 * @dev_priv: The device private structure.
4443 *
4444 * This function should be used to unpin the pinned query bo, or
4445 * as a query barrier when we need to make sure that all queries have
4446 * finished before the next fifo command. (For example on hardware
4447 * context destructions where the hardware may otherwise leak unfinished
4448 * queries).
4449 *
4450 * This function does not return any failure codes, but make attempts
4451 * to do safe unpinning in case of errors.
4452 *
4453 * The function will synchronize on the previous query barrier, and will
4454 * thus not finish until that barrier has executed.
4455 */
4456void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4457{
4458	mutex_lock(&dev_priv->cmdbuf_mutex);
4459	if (dev_priv->query_cid_valid)
4460		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4461	mutex_unlock(&dev_priv->cmdbuf_mutex);
4462}
4463
4464int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4465		      struct drm_file *file_priv, size_t size)
4466{
4467	struct vmw_private *dev_priv = vmw_priv(dev);
4468	struct drm_vmw_execbuf_arg arg;
4469	int ret;
4470	static const size_t copy_offset[] = {
4471		offsetof(struct drm_vmw_execbuf_arg, context_handle),
4472		sizeof(struct drm_vmw_execbuf_arg)};
4473	struct dma_fence *in_fence = NULL;
4474
4475	if (unlikely(size < copy_offset[0])) {
4476		DRM_ERROR("Invalid command size, ioctl %d\n",
4477			  DRM_VMW_EXECBUF);
4478		return -EINVAL;
4479	}
4480
4481	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4482		return -EFAULT;
4483
4484	/*
4485	 * Extend the ioctl argument while
4486	 * maintaining backwards compatibility:
4487	 * We take different code paths depending on the value of
4488	 * arg.version.
4489	 */
4490
4491	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4492		     arg.version == 0)) {
4493		DRM_ERROR("Incorrect execbuf version.\n");
4494		return -EINVAL;
4495	}
4496
4497	if (arg.version > 1 &&
4498	    copy_from_user(&arg.context_handle,
4499			   (void __user *) (data + copy_offset[0]),
4500			   copy_offset[arg.version - 1] -
4501			   copy_offset[0]) != 0)
4502		return -EFAULT;
4503
4504	switch (arg.version) {
4505	case 1:
4506		arg.context_handle = (uint32_t) -1;
4507		break;
4508	case 2:
4509	default:
4510		break;
4511	}
4512
4513
4514	/* If imported a fence FD from elsewhere, then wait on it */
4515	if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4516		in_fence = sync_file_get_fence(arg.imported_fence_fd);
4517
4518		if (!in_fence) {
4519			DRM_ERROR("Cannot get imported fence\n");
4520			return -EINVAL;
4521		}
4522
4523		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4524		if (ret)
4525			goto out;
4526	}
4527
4528	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4529	if (unlikely(ret != 0))
4530		return ret;
4531
4532	ret = vmw_execbuf_process(file_priv, dev_priv,
4533				  (void __user *)(unsigned long)arg.commands,
4534				  NULL, arg.command_size, arg.throttle_us,
4535				  arg.context_handle,
4536				  (void __user *)(unsigned long)arg.fence_rep,
4537				  NULL,
4538				  arg.flags);
4539	ttm_read_unlock(&dev_priv->reservation_sem);
4540	if (unlikely(ret != 0))
4541		goto out;
4542
4543	vmw_kms_cursor_post_execbuf(dev_priv);
4544
4545out:
4546	if (in_fence)
4547		dma_fence_put(in_fence);
4548	return ret;
4549}