Linux Audio

Check our new training course

Loading...
v3.1
 
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 
 27
 28#include "vmwgfx_drv.h"
 29#include "vmwgfx_reg.h"
 30#include "ttm/ttm_bo_api.h"
 31#include "ttm/ttm_placement.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 32
 33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 34			   struct vmw_sw_context *sw_context,
 35			   SVGA3dCmdHeader *header)
 36{
 37	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
 38}
 39
 40static int vmw_cmd_ok(struct vmw_private *dev_priv,
 41		      struct vmw_sw_context *sw_context,
 42		      SVGA3dCmdHeader *header)
 43{
 44	return 0;
 45}
 46
 47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 48			     struct vmw_sw_context *sw_context,
 49			     SVGA3dCmdHeader *header)
 
 
 
 
 
 
 
 
 50{
 51	struct vmw_cid_cmd {
 52		SVGA3dCmdHeader header;
 53		__le32 cid;
 54	} *cmd;
 55	int ret;
 56
 57	cmd = container_of(header, struct vmw_cid_cmd, header);
 58	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 59		return 0;
 
 60
 61	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
 62	if (unlikely(ret != 0)) {
 63		DRM_ERROR("Could not find or use context %u\n",
 64			  (unsigned) cmd->cid);
 65		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66	}
 67
 68	sw_context->last_cid = cmd->cid;
 69	sw_context->cid_valid = true;
 
 
 
 
 70
 71	return 0;
 72}
 73
 74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 75			     struct vmw_sw_context *sw_context,
 76			     uint32_t *sid)
 
 
 
 
 
 77{
 78	if (*sid == SVGA3D_INVALID_ID)
 
 
 
 
 
 
 79		return 0;
 80
 81	if (unlikely((!sw_context->sid_valid  ||
 82		      *sid != sw_context->last_sid))) {
 83		int real_id;
 84		int ret = vmw_surface_check(dev_priv, sw_context->tfile,
 85					    *sid, &real_id);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86
 
 
 87		if (unlikely(ret != 0)) {
 88			DRM_ERROR("Could ot find or use surface 0x%08x "
 89				  "address 0x%08lx\n",
 90				  (unsigned int) *sid,
 91				  (unsigned long) sid);
 92			return ret;
 93		}
 94
 95		sw_context->last_sid = *sid;
 96		sw_context->sid_valid = true;
 97		*sid = real_id;
 98		sw_context->sid_translation = real_id;
 99	} else
100		*sid = sw_context->sid_translation;
101
102	return 0;
103}
104
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107					   struct vmw_sw_context *sw_context,
108					   SVGA3dCmdHeader *header)
109{
110	struct vmw_sid_cmd {
111		SVGA3dCmdHeader header;
112		SVGA3dCmdSetRenderTarget body;
113	} *cmd;
114	int ret;
115
116	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 
 
 
 
 
 
 
 
 
 
117	if (unlikely(ret != 0))
118		return ret;
119
120	cmd = container_of(header, struct vmw_sid_cmd, header);
121	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126				      struct vmw_sw_context *sw_context,
127				      SVGA3dCmdHeader *header)
128{
129	struct vmw_sid_cmd {
130		SVGA3dCmdHeader header;
131		SVGA3dCmdSurfaceCopy body;
132	} *cmd;
133	int ret;
134
135	cmd = container_of(header, struct vmw_sid_cmd, header);
136	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
137	if (unlikely(ret != 0))
 
 
 
138		return ret;
139	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143				     struct vmw_sw_context *sw_context,
144				     SVGA3dCmdHeader *header)
145{
146	struct vmw_sid_cmd {
147		SVGA3dCmdHeader header;
148		SVGA3dCmdSurfaceStretchBlt body;
149	} *cmd;
150	int ret;
151
152	cmd = container_of(header, struct vmw_sid_cmd, header);
153	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
 
 
154	if (unlikely(ret != 0))
155		return ret;
156	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 
 
 
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160					 struct vmw_sw_context *sw_context,
161					 SVGA3dCmdHeader *header)
162{
163	struct vmw_sid_cmd {
164		SVGA3dCmdHeader header;
165		SVGA3dCmdBlitSurfaceToScreen body;
166	} *cmd;
167
168	cmd = container_of(header, struct vmw_sid_cmd, header);
169	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
 
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173				 struct vmw_sw_context *sw_context,
174				 SVGA3dCmdHeader *header)
175{
176	struct vmw_sid_cmd {
177		SVGA3dCmdHeader header;
178		SVGA3dCmdPresent body;
179	} *cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
181	cmd = container_of(header, struct vmw_sid_cmd, header);
182	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183}
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186				   struct vmw_sw_context *sw_context,
187				   SVGAGuestPtr *ptr,
188				   struct vmw_dma_buffer **vmw_bo_p)
189{
190	struct vmw_dma_buffer *vmw_bo = NULL;
191	struct ttm_buffer_object *bo;
192	uint32_t handle = ptr->gmrId;
193	struct vmw_relocation *reloc;
194	uint32_t cur_validate_node;
195	struct ttm_validate_buffer *val_buf;
196	int ret;
197
198	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
199	if (unlikely(ret != 0)) {
200		DRM_ERROR("Could not find or use GMR region.\n");
201		return -EINVAL;
 
202	}
203	bo = &vmw_bo->base;
204
205	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
206		DRM_ERROR("Max number relocations per submission"
207			  " exceeded\n");
208		ret = -EINVAL;
209		goto out_no_reloc;
210	}
 
 
211
212	reloc = &sw_context->relocs[sw_context->cur_reloc++];
213	reloc->location = ptr;
 
 
 
214
215	cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216	if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217		DRM_ERROR("Max number of DMA buffers per submission"
218			  " exceeded.\n");
219		ret = -EINVAL;
220		goto out_no_reloc;
221	}
222
223	reloc->index = cur_validate_node;
224	if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225		val_buf = &sw_context->val_bufs[cur_validate_node];
226		val_buf->bo = ttm_bo_reference(bo);
227		val_buf->new_sync_obj_arg = (void *) dev_priv;
228		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229		++sw_context->cur_val_buf;
230	}
231	*vmw_bo_p = vmw_bo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
234out_no_reloc:
235	vmw_dmabuf_unreference(&vmw_bo);
236	vmw_bo_p = NULL;
237	return ret;
238}
239
 
 
 
 
 
 
 
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241			     struct vmw_sw_context *sw_context,
242			     SVGA3dCmdHeader *header)
243{
244	struct vmw_dma_buffer *vmw_bo;
245	struct vmw_query_cmd {
246		SVGA3dCmdHeader header;
247		SVGA3dCmdEndQuery q;
248	} *cmd;
249	int ret;
250
251	cmd = container_of(header, struct vmw_query_cmd, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253	if (unlikely(ret != 0))
254		return ret;
255
256	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257				      &cmd->q.guestResult,
258				      &vmw_bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259	if (unlikely(ret != 0))
260		return ret;
261
262	vmw_dmabuf_unreference(&vmw_bo);
263	return 0;
264}
265
 
 
 
 
 
 
 
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267			      struct vmw_sw_context *sw_context,
268			      SVGA3dCmdHeader *header)
269{
270	struct vmw_dma_buffer *vmw_bo;
271	struct vmw_query_cmd {
272		SVGA3dCmdHeader header;
273		SVGA3dCmdWaitForQuery q;
274	} *cmd;
275	int ret;
276
277	cmd = container_of(header, struct vmw_query_cmd, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279	if (unlikely(ret != 0))
280		return ret;
281
282	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283				      &cmd->q.guestResult,
284				      &vmw_bo);
285	if (unlikely(ret != 0))
286		return ret;
287
288	vmw_dmabuf_unreference(&vmw_bo);
289	return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294		       struct vmw_sw_context *sw_context,
295		       SVGA3dCmdHeader *header)
296{
297	struct vmw_dma_buffer *vmw_bo = NULL;
298	struct ttm_buffer_object *bo;
299	struct vmw_surface *srf = NULL;
300	struct vmw_dma_cmd {
301		SVGA3dCmdHeader header;
302		SVGA3dCmdSurfaceDMA dma;
303	} *cmd;
304	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
305
306	cmd = container_of(header, struct vmw_dma_cmd, header);
307	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308				      &cmd->dma.guest.ptr,
309				      &vmw_bo);
310	if (unlikely(ret != 0))
311		return ret;
312
313	bo = &vmw_bo->base;
314	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
315					     cmd->dma.host.sid, &srf);
316	if (ret) {
317		DRM_ERROR("could not find surface\n");
318		goto out_no_reloc;
319	}
320
321	/**
322	 * Patch command stream with device SID.
323	 */
 
 
 
 
 
 
 
 
 
 
 
324
325	cmd->dma.host.sid = srf->res.id;
326	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
327	/**
328	 * FIXME: May deadlock here when called from the
329	 * command parsing code.
330	 */
331	vmw_surface_unreference(&srf);
332
333out_no_reloc:
334	vmw_dmabuf_unreference(&vmw_bo);
335	return ret;
336}
337
338static int vmw_cmd_draw(struct vmw_private *dev_priv,
339			struct vmw_sw_context *sw_context,
340			SVGA3dCmdHeader *header)
341{
342	struct vmw_draw_cmd {
343		SVGA3dCmdHeader header;
344		SVGA3dCmdDrawPrimitives body;
345	} *cmd;
346	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
347		(unsigned long)header + sizeof(*cmd));
348	SVGA3dPrimitiveRange *range;
349	uint32_t i;
350	uint32_t maxnum;
351	int ret;
352
353	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
354	if (unlikely(ret != 0))
355		return ret;
356
357	cmd = container_of(header, struct vmw_draw_cmd, header);
358	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
359
360	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
361		DRM_ERROR("Illegal number of vertex declarations.\n");
362		return -EINVAL;
363	}
364
365	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
366		ret = vmw_cmd_sid_check(dev_priv, sw_context,
367					&decl->array.surfaceId);
 
 
368		if (unlikely(ret != 0))
369			return ret;
370	}
371
372	maxnum = (header->size - sizeof(cmd->body) -
373		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
374	if (unlikely(cmd->body.numRanges > maxnum)) {
375		DRM_ERROR("Illegal number of index ranges.\n");
376		return -EINVAL;
377	}
378
379	range = (SVGA3dPrimitiveRange *) decl;
380	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
381		ret = vmw_cmd_sid_check(dev_priv, sw_context,
382					&range->indexArray.surfaceId);
 
 
383		if (unlikely(ret != 0))
384			return ret;
385	}
386	return 0;
387}
388
389
390static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
391			     struct vmw_sw_context *sw_context,
392			     SVGA3dCmdHeader *header)
393{
394	struct vmw_tex_state_cmd {
395		SVGA3dCmdHeader header;
396		SVGA3dCmdSetTextureState state;
397	};
398
399	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
400	  ((unsigned long) header + header->size + sizeof(header));
401	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
402		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
 
 
403	int ret;
404
405	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 
 
 
 
406	if (unlikely(ret != 0))
407		return ret;
408
409	for (; cur_state < last_state; ++cur_state) {
410		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
411			continue;
412
413		ret = vmw_cmd_sid_check(dev_priv, sw_context,
414					&cur_state->value);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
415		if (unlikely(ret != 0))
416			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417	}
418
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419	return 0;
420}
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
423typedef int (*vmw_cmd_func) (struct vmw_private *,
424			     struct vmw_sw_context *,
425			     SVGA3dCmdHeader *);
 
426
427#define VMW_CMD_DEF(cmd, func) \
428	[cmd - SVGA_3D_CMD_BASE] = func
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
430static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
431	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
432	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
433	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
434	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
435	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
436	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
437	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
438	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
439	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
440	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
 
 
 
 
 
 
 
 
 
 
441	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
442		    &vmw_cmd_set_render_target_check),
443	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
444	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
445	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
446	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
447	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
448	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
449	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
450	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
451	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
452	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
453	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
454	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
455	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
456	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
457	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
458	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
459	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
460	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
461	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
462		    &vmw_cmd_blt_surf_screen_check)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
463};
464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
465static int vmw_cmd_check(struct vmw_private *dev_priv,
466			 struct vmw_sw_context *sw_context,
467			 void *buf, uint32_t *size)
468{
469	uint32_t cmd_id;
470	uint32_t size_remaining = *size;
471	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472	int ret;
 
 
473
474	cmd_id = ((uint32_t *)buf)[0];
475	if (cmd_id == SVGA_CMD_UPDATE) {
476		*size = 5 << 2;
477		return 0;
478	}
479
480	cmd_id = le32_to_cpu(header->id);
481	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
482
483	cmd_id -= SVGA_3D_CMD_BASE;
484	if (unlikely(*size > size_remaining))
485		goto out_err;
486
487	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
488		goto out_err;
489
490	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
491	if (unlikely(ret != 0))
492		goto out_err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
493
494	return 0;
495out_err:
496	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
497		  cmd_id + SVGA_3D_CMD_BASE);
 
 
 
 
 
 
 
 
 
 
 
 
498	return -EINVAL;
499}
500
501static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502			     struct vmw_sw_context *sw_context,
503			     void *buf, uint32_t size)
504{
505	int32_t cur_size = size;
506	int ret;
507
 
 
508	while (cur_size > 0) {
509		size = cur_size;
510		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
511		if (unlikely(ret != 0))
512			return ret;
513		buf = (void *)((unsigned long) buf + size);
514		cur_size -= size;
515	}
516
517	if (unlikely(cur_size != 0)) {
518		DRM_ERROR("Command verifier out of sync.\n");
519		return -EINVAL;
520	}
521
522	return 0;
523}
524
525static void vmw_free_relocations(struct vmw_sw_context *sw_context)
526{
527	sw_context->cur_reloc = 0;
 
528}
529
530static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
531{
532	uint32_t i;
533	struct vmw_relocation *reloc;
534	struct ttm_validate_buffer *validate;
535	struct ttm_buffer_object *bo;
536
537	for (i = 0; i < sw_context->cur_reloc; ++i) {
538		reloc = &sw_context->relocs[i];
539		validate = &sw_context->val_bufs[reloc->index];
540		bo = validate->bo;
541		if (bo->mem.mem_type == TTM_PL_VRAM) {
542			reloc->location->offset += bo->offset;
543			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
544		} else
 
545			reloc->location->gmrId = bo->mem.start;
 
 
 
 
 
 
 
546	}
547	vmw_free_relocations(sw_context);
548}
549
550static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 
551{
552	struct ttm_validate_buffer *entry, *next;
 
 
 
 
 
 
 
 
 
 
553
554	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
555				 head) {
556		list_del(&entry->head);
557		vmw_dmabuf_validate_clear(entry->bo);
558		ttm_bo_unref(&entry->bo);
559		sw_context->cur_val_buf--;
 
560	}
561	BUG_ON(sw_context->cur_val_buf != 0);
 
562}
563
564static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
565				      struct ttm_buffer_object *bo)
 
 
 
 
 
 
 
 
 
 
 
 
 
566{
 
567	int ret;
 
568
569	/**
570	 * Put BO in VRAM if there is space, otherwise as a GMR.
571	 * If there is no space in VRAM and GMR ids are all used up,
572	 * start evicting GMRs to make room. If the DMA buffer can't be
573	 * used as a GMR, this will return -ENOMEM.
574	 */
575
576	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
577	if (likely(ret == 0 || ret == -ERESTARTSYS))
578		return ret;
 
 
579
580	/**
581	 * If that failed, try VRAM again, this time evicting
582	 * previous contents.
583	 */
 
 
 
 
 
 
 
584
585	DRM_INFO("Falling through to VRAM.\n");
586	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
587	return ret;
588}
589
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
590
591static int vmw_validate_buffers(struct vmw_private *dev_priv,
592				struct vmw_sw_context *sw_context)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
593{
594	struct ttm_validate_buffer *entry;
595	int ret;
596
597	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
598		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
599		if (unlikely(ret != 0))
600			return ret;
601	}
602	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603}
604
605int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
606		      struct drm_file *file_priv)
 
607{
608	struct vmw_private *dev_priv = vmw_priv(dev);
609	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
610	struct drm_vmw_fence_rep fence_rep;
611	struct drm_vmw_fence_rep __user *user_fence_rep;
612	int ret;
613	void *user_cmd;
614	void *cmd;
615	uint32_t sequence;
616	struct vmw_sw_context *sw_context = &dev_priv->ctx;
617	struct vmw_master *vmaster = vmw_master(file_priv->master);
618
619	ret = ttm_read_lock(&vmaster->lock, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620	if (unlikely(ret != 0))
621		return ret;
622
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
623	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
624	if (unlikely(ret != 0)) {
625		ret = -ERESTARTSYS;
626		goto out_no_cmd_mutex;
627	}
628
629	cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
630	if (unlikely(cmd == NULL)) {
631		DRM_ERROR("Failed reserving fifo space for commands.\n");
632		ret = -ENOMEM;
633		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
634	}
635
636	user_cmd = (void __user *)(unsigned long)arg->commands;
637	ret = copy_from_user(cmd, user_cmd, arg->command_size);
 
 
 
 
 
 
 
 
 
638
639	if (unlikely(ret != 0)) {
640		ret = -EFAULT;
641		DRM_ERROR("Failed copying commands.\n");
642		goto out_commit;
 
 
 
 
 
643	}
644
645	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
646	sw_context->cid_valid = false;
647	sw_context->sid_valid = false;
648	sw_context->cur_reloc = 0;
649	sw_context->cur_val_buf = 0;
 
 
 
 
 
650
651	INIT_LIST_HEAD(&sw_context->validate_nodes);
 
 
652
653	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
654	if (unlikely(ret != 0))
655		goto out_err;
656	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
 
657	if (unlikely(ret != 0))
658		goto out_err;
659
660	ret = vmw_validate_buffers(dev_priv, sw_context);
661	if (unlikely(ret != 0))
662		goto out_err;
663
664	vmw_apply_relocations(sw_context);
665
666	if (arg->throttle_us) {
667		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
668				   arg->throttle_us);
 
 
669
 
 
670		if (unlikely(ret != 0))
671			goto out_err;
672	}
673
674	vmw_fifo_commit(dev_priv, arg->command_size);
675
676	ret = vmw_fifo_send_fence(dev_priv, &sequence);
677
678	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
679				    (void *)(unsigned long) sequence);
680	vmw_clear_validations(sw_context);
681	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 
 
682
 
 
 
683	/*
684	 * This error is harmless, because if fence submission fails,
685	 * vmw_fifo_send_fence will sync.
 
686	 */
687
688	if (ret != 0)
689		DRM_ERROR("Fence submission error. Syncing.\n");
690
691	fence_rep.error = ret;
692	fence_rep.fence_seq = (uint64_t) sequence;
693	fence_rep.pad64 = 0;
694
695	user_fence_rep = (struct drm_vmw_fence_rep __user *)
696	    (unsigned long)arg->fence_rep;
 
 
697
698	/*
699	 * copy_to_user errors will be detected by user space not
700	 * seeing fence_rep::error filled in.
 
701	 */
 
702
703	ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
704
705	vmw_kms_cursor_post_execbuf(dev_priv);
706	ttm_read_unlock(&vmaster->lock);
707	return 0;
 
 
 
708out_err:
 
 
 
 
 
709	vmw_free_relocations(sw_context);
710	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
711	vmw_clear_validations(sw_context);
712out_commit:
713	vmw_fifo_commit(dev_priv, 0);
714out_unlock:
 
 
 
715	mutex_unlock(&dev_priv->cmdbuf_mutex);
716out_no_cmd_mutex:
717	ttm_read_unlock(&vmaster->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
718	return ret;
719}
v5.9
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/sync_file.h>
  28
  29#include "vmwgfx_drv.h"
  30#include "vmwgfx_reg.h"
  31#include <drm/ttm/ttm_bo_api.h>
  32#include <drm/ttm/ttm_placement.h>
  33#include "vmwgfx_so.h"
  34#include "vmwgfx_binding.h"
  35
  36#define VMW_RES_HT_ORDER 12
  37
  38/*
  39 * Helper macro to get dx_ctx_node if available otherwise print an error
  40 * message. This is for use in command verifier function where if dx_ctx_node
  41 * is not set then command is invalid.
  42 */
  43#define VMW_GET_CTX_NODE(__sw_context)                                        \
  44({                                                                            \
  45	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
  46		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
  47		__sw_context->dx_ctx_node;                                    \
  48	});                                                                   \
  49})
  50
  51#define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
  52	struct {                                                              \
  53		SVGA3dCmdHeader header;                                       \
  54		__type body;                                                  \
  55	} __var
  56
  57/**
  58 * struct vmw_relocation - Buffer object relocation
  59 *
  60 * @head: List head for the command submission context's relocation list
  61 * @vbo: Non ref-counted pointer to buffer object
  62 * @mob_loc: Pointer to location for mob id to be modified
  63 * @location: Pointer to location for guest pointer to be modified
  64 */
  65struct vmw_relocation {
  66	struct list_head head;
  67	struct vmw_buffer_object *vbo;
  68	union {
  69		SVGAMobId *mob_loc;
  70		SVGAGuestPtr *location;
  71	};
  72};
  73
  74/**
  75 * enum vmw_resource_relocation_type - Relocation type for resources
  76 *
  77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  78 * command stream is replaced with the actual id after validation.
  79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  80 * with a NOP.
  81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
  82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
  83 */
  84enum vmw_resource_relocation_type {
  85	vmw_res_rel_normal,
  86	vmw_res_rel_nop,
  87	vmw_res_rel_cond_nop,
  88	vmw_res_rel_max
  89};
  90
  91/**
  92 * struct vmw_resource_relocation - Relocation info for resources
  93 *
  94 * @head: List head for the software context's relocation list.
  95 * @res: Non-ref-counted pointer to the resource.
  96 * @offset: Offset of single byte entries into the command buffer where the id
  97 * that needs fixup is located.
  98 * @rel_type: Type of relocation.
  99 */
 100struct vmw_resource_relocation {
 101	struct list_head head;
 102	const struct vmw_resource *res;
 103	u32 offset:29;
 104	enum vmw_resource_relocation_type rel_type:3;
 105};
 106
 107/**
 108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
 109 *
 110 * @head: List head of context list
 111 * @ctx: The context resource
 112 * @cur: The context's persistent binding state
 113 * @staged: The binding state changes of this command buffer
 114 */
 115struct vmw_ctx_validation_info {
 116	struct list_head head;
 117	struct vmw_resource *ctx;
 118	struct vmw_ctx_binding_state *cur;
 119	struct vmw_ctx_binding_state *staged;
 120};
 121
 122/**
 123 * struct vmw_cmd_entry - Describe a command for the verifier
 124 *
 125 * @user_allow: Whether allowed from the execbuf ioctl.
 126 * @gb_disable: Whether disabled if guest-backed objects are available.
 127 * @gb_enable: Whether enabled iff guest-backed objects are available.
 128 */
 129struct vmw_cmd_entry {
 130	int (*func) (struct vmw_private *, struct vmw_sw_context *,
 131		     SVGA3dCmdHeader *);
 132	bool user_allow;
 133	bool gb_disable;
 134	bool gb_enable;
 135	const char *cmd_name;
 136};
 137
 138#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 139	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 140				       (_gb_disable), (_gb_enable), #_cmd}
 141
 142static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 143					struct vmw_sw_context *sw_context,
 144					struct vmw_resource *ctx);
 145static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 146				 struct vmw_sw_context *sw_context,
 147				 SVGAMobId *id,
 148				 struct vmw_buffer_object **vmw_bo_p);
 149/**
 150 * vmw_ptr_diff - Compute the offset from a to b in bytes
 151 *
 152 * @a: A starting pointer.
 153 * @b: A pointer offset in the same address space.
 154 *
 155 * Returns: The offset in bytes between the two pointers.
 156 */
 157static size_t vmw_ptr_diff(void *a, void *b)
 158{
 159	return (unsigned long) b - (unsigned long) a;
 160}
 161
 162/**
 163 * vmw_execbuf_bindings_commit - Commit modified binding state
 164 *
 165 * @sw_context: The command submission context
 166 * @backoff: Whether this is part of the error path and binding state changes
 167 * should be ignored
 168 */
 169static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
 170					bool backoff)
 171{
 172	struct vmw_ctx_validation_info *entry;
 173
 174	list_for_each_entry(entry, &sw_context->ctx_list, head) {
 175		if (!backoff)
 176			vmw_binding_state_commit(entry->cur, entry->staged);
 177
 178		if (entry->staged != sw_context->staged_bindings)
 179			vmw_binding_state_free(entry->staged);
 180		else
 181			sw_context->staged_bindings_inuse = false;
 182	}
 183
 184	/* List entries are freed with the validation context */
 185	INIT_LIST_HEAD(&sw_context->ctx_list);
 186}
 187
 188/**
 189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
 190 *
 191 * @sw_context: The command submission context
 192 */
 193static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
 194{
 195	if (sw_context->dx_query_mob)
 196		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 197					  sw_context->dx_query_mob);
 198}
 199
 200/**
 201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
 202 * the validate list.
 203 *
 204 * @dev_priv: Pointer to the device private:
 205 * @sw_context: The command submission context
 206 * @node: The validation node holding the context resource metadata
 207 */
 208static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 209				   struct vmw_sw_context *sw_context,
 210				   struct vmw_resource *res,
 211				   struct vmw_ctx_validation_info *node)
 212{
 213	int ret;
 214
 215	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 216	if (unlikely(ret != 0))
 217		goto out_err;
 218
 219	if (!sw_context->staged_bindings) {
 220		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
 221		if (IS_ERR(sw_context->staged_bindings)) {
 222			ret = PTR_ERR(sw_context->staged_bindings);
 223			sw_context->staged_bindings = NULL;
 224			goto out_err;
 225		}
 226	}
 227
 228	if (sw_context->staged_bindings_inuse) {
 229		node->staged = vmw_binding_state_alloc(dev_priv);
 230		if (IS_ERR(node->staged)) {
 231			ret = PTR_ERR(node->staged);
 232			node->staged = NULL;
 233			goto out_err;
 234		}
 235	} else {
 236		node->staged = sw_context->staged_bindings;
 237		sw_context->staged_bindings_inuse = true;
 238	}
 239
 240	node->ctx = res;
 241	node->cur = vmw_context_binding_state(res);
 242	list_add_tail(&node->head, &sw_context->ctx_list);
 243
 244	return 0;
 245
 246out_err:
 247	return ret;
 248}
 249
 250/**
 251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
 252 *
 253 * @dev_priv: Pointer to the device private struct.
 254 * @res_type: The resource type.
 255 *
 256 * Guest-backed contexts and DX contexts require extra size to store execbuf
 257 * private information in the validation node. Typically the binding manager
 258 * associated data structures.
 259 *
 260 * Returns: The extra size requirement based on resource type.
 261 */
 262static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
 263					 enum vmw_res_type res_type)
 264{
 265	return (res_type == vmw_res_dx_context ||
 266		(res_type == vmw_res_context && dev_priv->has_mob)) ?
 267		sizeof(struct vmw_ctx_validation_info) : 0;
 268}
 269
 270/**
 271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
 272 *
 273 * @rcache: Pointer to the entry to update.
 274 * @res: Pointer to the resource.
 275 * @private: Pointer to the execbuf-private space in the resource validation
 276 * node.
 277 */
 278static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
 279				      struct vmw_resource *res,
 280				      void *private)
 281{
 282	rcache->res = res;
 283	rcache->private = private;
 284	rcache->valid = 1;
 285	rcache->valid_handle = 0;
 286}
 287
 288/**
 289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
 290 * rcu-protected pointer to the validation list.
 291 *
 292 * @sw_context: Pointer to the software context.
 293 * @res: Unreferenced rcu-protected pointer to the resource.
 294 * @dirty: Whether to change dirty status.
 295 *
 296 * Returns: 0 on success. Negative error code on failure. Typical error codes
 297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
 298 */
 299static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
 300					 struct vmw_resource *res,
 301					 u32 dirty)
 302{
 303	struct vmw_private *dev_priv = res->dev_priv;
 304	int ret;
 305	enum vmw_res_type res_type = vmw_res_type(res);
 306	struct vmw_res_cache_entry *rcache;
 307	struct vmw_ctx_validation_info *ctx_info;
 308	bool first_usage;
 309	unsigned int priv_size;
 310
 311	rcache = &sw_context->res_cache[res_type];
 312	if (likely(rcache->valid && rcache->res == res)) {
 313		if (dirty)
 314			vmw_validation_res_set_dirty(sw_context->ctx,
 315						     rcache->private, dirty);
 316		vmw_user_resource_noref_release();
 317		return 0;
 318	}
 319
 320	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
 321	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
 322					  dirty, (void **)&ctx_info,
 323					  &first_usage);
 324	vmw_user_resource_noref_release();
 325	if (ret)
 326		return ret;
 327
 328	if (priv_size && first_usage) {
 329		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
 330					      ctx_info);
 331		if (ret) {
 332			VMW_DEBUG_USER("Failed first usage context setup.\n");
 333			return ret;
 334		}
 335	}
 336
 337	vmw_execbuf_rcache_update(rcache, res, ctx_info);
 338	return 0;
 339}
 340
 341/**
 342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
 343 * validation list if it's not already on it
 344 *
 345 * @sw_context: Pointer to the software context.
 346 * @res: Pointer to the resource.
 347 * @dirty: Whether to change dirty status.
 348 *
 349 * Returns: Zero on success. Negative error code on failure.
 350 */
 351static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
 352					 struct vmw_resource *res,
 353					 u32 dirty)
 354{
 355	struct vmw_res_cache_entry *rcache;
 356	enum vmw_res_type res_type = vmw_res_type(res);
 357	void *ptr;
 358	int ret;
 359
 360	rcache = &sw_context->res_cache[res_type];
 361	if (likely(rcache->valid && rcache->res == res)) {
 362		if (dirty)
 363			vmw_validation_res_set_dirty(sw_context->ctx,
 364						     rcache->private, dirty);
 365		return 0;
 366	}
 367
 368	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
 369					  &ptr, NULL);
 370	if (ret)
 371		return ret;
 372
 373	vmw_execbuf_rcache_update(rcache, res, ptr);
 374
 375	return 0;
 376}
 377
 378/**
 379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
 380 * validation list
 381 *
 382 * @sw_context: The software context holding the validation list.
 383 * @view: Pointer to the view resource.
 384 *
 385 * Returns 0 if success, negative error code otherwise.
 386 */
 387static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 388				struct vmw_resource *view)
 389{
 390	int ret;
 391
 392	/*
 393	 * First add the resource the view is pointing to, otherwise it may be
 394	 * swapped out when the view is validated.
 395	 */
 396	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
 397					    vmw_view_dirtying(view));
 398	if (ret)
 399		return ret;
 400
 401	return vmw_execbuf_res_noctx_val_add(sw_context, view,
 402					     VMW_RES_DIRTY_NONE);
 403}
 404
 405/**
 406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
 407 * to to the validation list.
 408 *
 409 * @sw_context: The software context holding the validation list.
 410 * @view_type: The view type to look up.
 411 * @id: view id of the view.
 412 *
 413 * The view is represented by a view id and the DX context it's created on, or
 414 * scheduled for creation on. If there is no DX context set, the function will
 415 * return an -EINVAL error pointer.
 416 *
 417 * Returns: Unreferenced pointer to the resource on success, negative error
 418 * pointer on failure.
 419 */
 420static struct vmw_resource *
 421vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 422		    enum vmw_view_type view_type, u32 id)
 423{
 424	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
 425	struct vmw_resource *view;
 426	int ret;
 427
 428	if (!ctx_node)
 429		return ERR_PTR(-EINVAL);
 430
 431	view = vmw_view_lookup(sw_context->man, view_type, id);
 432	if (IS_ERR(view))
 433		return view;
 434
 435	ret = vmw_view_res_val_add(sw_context, view);
 436	if (ret)
 437		return ERR_PTR(ret);
 438
 439	return view;
 440}
 441
 442/**
 443 * vmw_resource_context_res_add - Put resources previously bound to a context on
 444 * the validation list
 445 *
 446 * @dev_priv: Pointer to a device private structure
 447 * @sw_context: Pointer to a software context used for this command submission
 448 * @ctx: Pointer to the context resource
 449 *
 450 * This function puts all resources that were previously bound to @ctx on the
 451 * resource validation list. This is part of the context state reemission
 452 */
 453static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 454					struct vmw_sw_context *sw_context,
 455					struct vmw_resource *ctx)
 456{
 457	struct list_head *binding_list;
 458	struct vmw_ctx_bindinfo *entry;
 459	int ret = 0;
 460	struct vmw_resource *res;
 461	u32 i;
 462	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
 463		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 464
 465	/* Add all cotables to the validation list. */
 466	if (has_sm4_context(dev_priv) &&
 467	    vmw_res_type(ctx) == vmw_res_dx_context) {
 468		for (i = 0; i < cotable_max; ++i) {
 469			res = vmw_context_cotable(ctx, i);
 470			if (IS_ERR(res))
 471				continue;
 472
 473			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
 474							    VMW_RES_DIRTY_SET);
 475			if (unlikely(ret != 0))
 476				return ret;
 477		}
 478	}
 479
 480	/* Add all resources bound to the context to the validation list */
 481	mutex_lock(&dev_priv->binding_mutex);
 482	binding_list = vmw_context_binding_list(ctx);
 483
 484	list_for_each_entry(entry, binding_list, ctx_list) {
 485		if (vmw_res_type(entry->res) == vmw_res_view)
 486			ret = vmw_view_res_val_add(sw_context, entry->res);
 487		else
 488			ret = vmw_execbuf_res_noctx_val_add
 489				(sw_context, entry->res,
 490				 vmw_binding_dirtying(entry->bt));
 491		if (unlikely(ret != 0))
 492			break;
 493	}
 494
 495	if (has_sm4_context(dev_priv) &&
 496	    vmw_res_type(ctx) == vmw_res_dx_context) {
 497		struct vmw_buffer_object *dx_query_mob;
 498
 499		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 500		if (dx_query_mob)
 501			ret = vmw_validation_add_bo(sw_context->ctx,
 502						    dx_query_mob, true, false);
 503	}
 504
 505	mutex_unlock(&dev_priv->binding_mutex);
 506	return ret;
 507}
 508
 509/**
 510 * vmw_resource_relocation_add - Add a relocation to the relocation list
 511 *
 512 * @list: Pointer to head of relocation list.
 513 * @res: The resource.
 514 * @offset: Offset into the command buffer currently being parsed where the id
 515 * that needs fixup is located. Granularity is one byte.
 516 * @rel_type: Relocation type.
 517 */
 518static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
 519				       const struct vmw_resource *res,
 520				       unsigned long offset,
 521				       enum vmw_resource_relocation_type
 522				       rel_type)
 523{
 524	struct vmw_resource_relocation *rel;
 525
 526	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
 527	if (unlikely(!rel)) {
 528		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
 529		return -ENOMEM;
 530	}
 531
 532	rel->res = res;
 533	rel->offset = offset;
 534	rel->rel_type = rel_type;
 535	list_add_tail(&rel->head, &sw_context->res_relocations);
 536
 537	return 0;
 538}
 539
 540/**
 541 * vmw_resource_relocations_free - Free all relocations on a list
 542 *
 543 * @list: Pointer to the head of the relocation list
 544 */
 545static void vmw_resource_relocations_free(struct list_head *list)
 546{
 547	/* Memory is validation context memory, so no need to free it */
 548	INIT_LIST_HEAD(list);
 549}
 550
 551/**
 552 * vmw_resource_relocations_apply - Apply all relocations on a list
 553 *
 554 * @cb: Pointer to the start of the command buffer bein patch. This need not be
 555 * the same buffer as the one being parsed when the relocation list was built,
 556 * but the contents must be the same modulo the resource ids.
 557 * @list: Pointer to the head of the relocation list.
 558 */
 559static void vmw_resource_relocations_apply(uint32_t *cb,
 560					   struct list_head *list)
 561{
 562	struct vmw_resource_relocation *rel;
 563
 564	/* Validate the struct vmw_resource_relocation member size */
 565	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
 566	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
 567
 568	list_for_each_entry(rel, list, head) {
 569		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
 570		switch (rel->rel_type) {
 571		case vmw_res_rel_normal:
 572			*addr = rel->res->id;
 573			break;
 574		case vmw_res_rel_nop:
 575			*addr = SVGA_3D_CMD_NOP;
 576			break;
 577		default:
 578			if (rel->res->id == -1)
 579				*addr = SVGA_3D_CMD_NOP;
 580			break;
 581		}
 582	}
 583}
 584
 585static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 586			   struct vmw_sw_context *sw_context,
 587			   SVGA3dCmdHeader *header)
 588{
 589	return -EINVAL;
 590}
 591
 592static int vmw_cmd_ok(struct vmw_private *dev_priv,
 593		      struct vmw_sw_context *sw_context,
 594		      SVGA3dCmdHeader *header)
 595{
 596	return 0;
 597}
 598
 599/**
 600 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
 601 * list.
 602 *
 603 * @sw_context: Pointer to the software context.
 604 *
 605 * Note that since vmware's command submission currently is protected by the
 606 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
 607 * only a single thread at once will attempt this.
 608 */
 609static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 610{
 
 
 
 
 611	int ret;
 612
 613	ret = vmw_validation_res_reserve(sw_context->ctx, true);
 614	if (ret)
 615		return ret;
 616
 617	if (sw_context->dx_query_mob) {
 618		struct vmw_buffer_object *expected_dx_query_mob;
 619
 620		expected_dx_query_mob =
 621			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 622		if (expected_dx_query_mob &&
 623		    expected_dx_query_mob != sw_context->dx_query_mob) {
 624			ret = -EINVAL;
 625		}
 626	}
 627
 628	return ret;
 629}
 630
 631/**
 632 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
 633 * resource validate list unless it's already there.
 634 *
 635 * @dev_priv: Pointer to a device private structure.
 636 * @sw_context: Pointer to the software context.
 637 * @res_type: Resource type.
 638 * @dirty: Whether to change dirty status.
 639 * @converter: User-space visisble type specific information.
 640 * @id_loc: Pointer to the location in the command buffer currently being parsed
 641 * from where the user-space resource id handle is located.
 642 * @p_val: Pointer to pointer to resource validalidation node. Populated on
 643 * exit.
 644 */
 645static int
 646vmw_cmd_res_check(struct vmw_private *dev_priv,
 647		  struct vmw_sw_context *sw_context,
 648		  enum vmw_res_type res_type,
 649		  u32 dirty,
 650		  const struct vmw_user_resource_conv *converter,
 651		  uint32_t *id_loc,
 652		  struct vmw_resource **p_res)
 653{
 654	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
 655	struct vmw_resource *res;
 656	int ret;
 657
 658	if (p_res)
 659		*p_res = NULL;
 660
 661	if (*id_loc == SVGA3D_INVALID_ID) {
 662		if (res_type == vmw_res_context) {
 663			VMW_DEBUG_USER("Illegal context invalid id.\n");
 664			return -EINVAL;
 665		}
 666		return 0;
 667	}
 668
 669	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
 670		res = rcache->res;
 671		if (dirty)
 672			vmw_validation_res_set_dirty(sw_context->ctx,
 673						     rcache->private, dirty);
 674	} else {
 675		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
 676
 677		ret = vmw_validation_preload_res(sw_context->ctx, size);
 678		if (ret)
 679			return ret;
 680
 681		res = vmw_user_resource_noref_lookup_handle
 682			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
 683		if (IS_ERR(res)) {
 684			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
 685				       (unsigned int) *id_loc);
 686			return PTR_ERR(res);
 687		}
 688
 689		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
 690		if (unlikely(ret != 0))
 691			return ret;
 692
 693		if (rcache->valid && rcache->res == res) {
 694			rcache->valid_handle = true;
 695			rcache->handle = *id_loc;
 696		}
 697	}
 698
 699	ret = vmw_resource_relocation_add(sw_context, res,
 700					  vmw_ptr_diff(sw_context->buf_start,
 701						       id_loc),
 702					  vmw_res_rel_normal);
 703	if (p_res)
 704		*p_res = res;
 705
 706	return 0;
 707}
 708
 709/**
 710 * vmw_rebind_dx_query - Rebind DX query associated with the context
 711 *
 712 * @ctx_res: context the query belongs to
 713 *
 714 * This function assumes binding_mutex is held.
 715 */
 716static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 717{
 718	struct vmw_private *dev_priv = ctx_res->dev_priv;
 719	struct vmw_buffer_object *dx_query_mob;
 720	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
 721
 722	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 723
 724	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 725		return 0;
 726
 727	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
 728	if (cmd == NULL)
 729		return -ENOMEM;
 730
 731	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 732	cmd->header.size = sizeof(cmd->body);
 733	cmd->body.cid = ctx_res->id;
 734	cmd->body.mobid = dx_query_mob->base.mem.start;
 735	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 736
 737	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 738
 739	return 0;
 740}
 741
 742/**
 743 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
 744 * contexts.
 745 *
 746 * @sw_context: Pointer to the software context.
 747 *
 748 * Rebind context binding points that have been scrubbed because of eviction.
 749 */
 750static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 751{
 752	struct vmw_ctx_validation_info *val;
 753	int ret;
 754
 755	list_for_each_entry(val, &sw_context->ctx_list, head) {
 756		ret = vmw_binding_rebind_all(val->cur);
 757		if (unlikely(ret != 0)) {
 758			if (ret != -ERESTARTSYS)
 759				VMW_DEBUG_USER("Failed to rebind context.\n");
 
 
 760			return ret;
 761		}
 762
 763		ret = vmw_rebind_all_dx_query(val->ctx);
 764		if (ret != 0) {
 765			VMW_DEBUG_USER("Failed to rebind queries.\n");
 766			return ret;
 767		}
 768	}
 769
 770	return 0;
 771}
 772
 773/**
 774 * vmw_view_bindings_add - Add an array of view bindings to a context binding
 775 * state tracker.
 776 *
 777 * @sw_context: The execbuf state used for this command.
 778 * @view_type: View type for the bindings.
 779 * @binding_type: Binding type for the bindings.
 780 * @shader_slot: The shader slot to user for the bindings.
 781 * @view_ids: Array of view ids to be bound.
 782 * @num_views: Number of view ids in @view_ids.
 783 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 784 */
 785static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 786				 enum vmw_view_type view_type,
 787				 enum vmw_ctx_binding_type binding_type,
 788				 uint32 shader_slot,
 789				 uint32 view_ids[], u32 num_views,
 790				 u32 first_slot)
 791{
 792	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
 793	u32 i;
 794
 795	if (!ctx_node)
 796		return -EINVAL;
 797
 798	for (i = 0; i < num_views; ++i) {
 799		struct vmw_ctx_bindinfo_view binding;
 800		struct vmw_resource *view = NULL;
 801
 802		if (view_ids[i] != SVGA3D_INVALID_ID) {
 803			view = vmw_view_id_val_add(sw_context, view_type,
 804						   view_ids[i]);
 805			if (IS_ERR(view)) {
 806				VMW_DEBUG_USER("View not found.\n");
 807				return PTR_ERR(view);
 808			}
 809		}
 810		binding.bi.ctx = ctx_node->ctx;
 811		binding.bi.res = view;
 812		binding.bi.bt = binding_type;
 813		binding.shader_slot = shader_slot;
 814		binding.slot = first_slot + i;
 815		vmw_binding_add(ctx_node->staged, &binding.bi,
 816				shader_slot, binding.slot);
 817	}
 818
 819	return 0;
 820}
 821
 822/**
 823 * vmw_cmd_cid_check - Check a command header for valid context information.
 824 *
 825 * @dev_priv: Pointer to a device private structure.
 826 * @sw_context: Pointer to the software context.
 827 * @header: A command header with an embedded user-space context handle.
 828 *
 829 * Convenience function: Call vmw_cmd_res_check with the user-space context
 830 * handle embedded in @header.
 831 */
 832static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 833			     struct vmw_sw_context *sw_context,
 834			     SVGA3dCmdHeader *header)
 835{
 836	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
 837		container_of(header, typeof(*cmd), header);
 838
 839	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 840				 VMW_RES_DIRTY_SET, user_context_converter,
 841				 &cmd->body, NULL);
 842}
 843
 844/**
 845 * vmw_execbuf_info_from_res - Get the private validation metadata for a
 846 * recently validated resource
 847 *
 848 * @sw_context: Pointer to the command submission context
 849 * @res: The resource
 850 *
 851 * The resource pointed to by @res needs to be present in the command submission
 852 * context's resource cache and hence the last resource of that type to be
 853 * processed by the validation code.
 854 *
 855 * Return: a pointer to the private metadata of the resource, or NULL if it
 856 * wasn't found
 857 */
 858static struct vmw_ctx_validation_info *
 859vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
 860			  struct vmw_resource *res)
 861{
 862	struct vmw_res_cache_entry *rcache =
 863		&sw_context->res_cache[vmw_res_type(res)];
 864
 865	if (rcache->valid && rcache->res == res)
 866		return rcache->private;
 867
 868	WARN_ON_ONCE(true);
 869	return NULL;
 870}
 871
 872static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 873					   struct vmw_sw_context *sw_context,
 874					   SVGA3dCmdHeader *header)
 875{
 876	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
 877	struct vmw_resource *ctx;
 878	struct vmw_resource *res;
 
 879	int ret;
 880
 881	cmd = container_of(header, typeof(*cmd), header);
 882
 883	if (cmd->body.type >= SVGA3D_RT_MAX) {
 884		VMW_DEBUG_USER("Illegal render target type %u.\n",
 885			       (unsigned int) cmd->body.type);
 886		return -EINVAL;
 887	}
 888
 889	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 890				VMW_RES_DIRTY_SET, user_context_converter,
 891				&cmd->body.cid, &ctx);
 892	if (unlikely(ret != 0))
 893		return ret;
 894
 895	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 896				VMW_RES_DIRTY_SET, user_surface_converter,
 897				&cmd->body.target.sid, &res);
 898	if (unlikely(ret))
 899		return ret;
 900
 901	if (dev_priv->has_mob) {
 902		struct vmw_ctx_bindinfo_view binding;
 903		struct vmw_ctx_validation_info *node;
 904
 905		node = vmw_execbuf_info_from_res(sw_context, ctx);
 906		if (!node)
 907			return -EINVAL;
 908
 909		binding.bi.ctx = ctx;
 910		binding.bi.res = res;
 911		binding.bi.bt = vmw_ctx_binding_rt;
 912		binding.slot = cmd->body.type;
 913		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
 914	}
 915
 916	return 0;
 917}
 918
 919static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 920				      struct vmw_sw_context *sw_context,
 921				      SVGA3dCmdHeader *header)
 922{
 923	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
 
 
 
 924	int ret;
 925
 926	cmd = container_of(header, typeof(*cmd), header);
 927
 928	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 929				VMW_RES_DIRTY_NONE, user_surface_converter,
 930				&cmd->body.src.sid, NULL);
 931	if (ret)
 932		return ret;
 933
 934	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 935				 VMW_RES_DIRTY_SET, user_surface_converter,
 936				 &cmd->body.dest.sid, NULL);
 937}
 938
 939static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
 940				     struct vmw_sw_context *sw_context,
 941				     SVGA3dCmdHeader *header)
 942{
 943	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
 944	int ret;
 945
 946	cmd = container_of(header, typeof(*cmd), header);
 947	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 948				VMW_RES_DIRTY_NONE, user_surface_converter,
 949				&cmd->body.src, NULL);
 950	if (ret != 0)
 951		return ret;
 952
 953	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 954				 VMW_RES_DIRTY_SET, user_surface_converter,
 955				 &cmd->body.dest, NULL);
 956}
 957
 958static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
 959				   struct vmw_sw_context *sw_context,
 960				   SVGA3dCmdHeader *header)
 961{
 962	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
 963	int ret;
 964
 965	cmd = container_of(header, typeof(*cmd), header);
 966	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 967				VMW_RES_DIRTY_NONE, user_surface_converter,
 968				&cmd->body.srcSid, NULL);
 969	if (ret != 0)
 970		return ret;
 971
 972	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 973				 VMW_RES_DIRTY_SET, user_surface_converter,
 974				 &cmd->body.dstSid, NULL);
 975}
 976
 977static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 978				     struct vmw_sw_context *sw_context,
 979				     SVGA3dCmdHeader *header)
 980{
 981	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
 
 
 
 982	int ret;
 983
 984	cmd = container_of(header, typeof(*cmd), header);
 985	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 986				VMW_RES_DIRTY_NONE, user_surface_converter,
 987				&cmd->body.src.sid, NULL);
 988	if (unlikely(ret != 0))
 989		return ret;
 990
 991	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 992				 VMW_RES_DIRTY_SET, user_surface_converter,
 993				 &cmd->body.dest.sid, NULL);
 994}
 995
 996static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 997					 struct vmw_sw_context *sw_context,
 998					 SVGA3dCmdHeader *header)
 999{
1000	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1001		container_of(header, typeof(*cmd), header);
 
 
1002
1003	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1004				 VMW_RES_DIRTY_NONE, user_surface_converter,
1005				 &cmd->body.srcImage.sid, NULL);
1006}
1007
1008static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1009				 struct vmw_sw_context *sw_context,
1010				 SVGA3dCmdHeader *header)
1011{
1012	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1013		container_of(header, typeof(*cmd), header);
1014
1015	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1016				 VMW_RES_DIRTY_NONE, user_surface_converter,
1017				 &cmd->body.sid, NULL);
1018}
1019
1020/**
1021 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1022 *
1023 * @dev_priv: The device private structure.
1024 * @new_query_bo: The new buffer holding query results.
1025 * @sw_context: The software context used for this command submission.
1026 *
1027 * This function checks whether @new_query_bo is suitable for holding query
1028 * results, and if another buffer currently is pinned for query results. If so,
1029 * the function prepares the state of @sw_context for switching pinned buffers
1030 * after successful submission of the current command batch.
1031 */
1032static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1033				       struct vmw_buffer_object *new_query_bo,
1034				       struct vmw_sw_context *sw_context)
1035{
1036	struct vmw_res_cache_entry *ctx_entry =
1037		&sw_context->res_cache[vmw_res_context];
1038	int ret;
1039
1040	BUG_ON(!ctx_entry->valid);
1041	sw_context->last_query_ctx = ctx_entry->res;
1042
1043	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1044
1045		if (unlikely(new_query_bo->base.num_pages > 4)) {
1046			VMW_DEBUG_USER("Query buffer too large.\n");
1047			return -EINVAL;
1048		}
1049
1050		if (unlikely(sw_context->cur_query_bo != NULL)) {
1051			sw_context->needs_post_query_barrier = true;
1052			ret = vmw_validation_add_bo(sw_context->ctx,
1053						    sw_context->cur_query_bo,
1054						    dev_priv->has_mob, false);
1055			if (unlikely(ret != 0))
1056				return ret;
1057		}
1058		sw_context->cur_query_bo = new_query_bo;
1059
1060		ret = vmw_validation_add_bo(sw_context->ctx,
1061					    dev_priv->dummy_query_bo,
1062					    dev_priv->has_mob, false);
1063		if (unlikely(ret != 0))
1064			return ret;
1065	}
1066
1067	return 0;
1068}
1069
1070/**
1071 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1072 *
1073 * @dev_priv: The device private structure.
1074 * @sw_context: The software context used for this command submission batch.
1075 *
1076 * This function will check if we're switching query buffers, and will then,
1077 * issue a dummy occlusion query wait used as a query barrier. When the fence
1078 * object following that query wait has signaled, we are sure that all preceding
1079 * queries have finished, and the old query buffer can be unpinned. However,
1080 * since both the new query buffer and the old one are fenced with that fence,
1081 * we can do an asynchronus unpin now, and be sure that the old query buffer
1082 * won't be moved until the fence has signaled.
1083 *
1084 * As mentioned above, both the new - and old query buffers need to be fenced
1085 * using a sequence emitted *after* calling this function.
1086 */
1087static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1088				     struct vmw_sw_context *sw_context)
1089{
1090	/*
1091	 * The validate list should still hold references to all
1092	 * contexts here.
1093	 */
1094	if (sw_context->needs_post_query_barrier) {
1095		struct vmw_res_cache_entry *ctx_entry =
1096			&sw_context->res_cache[vmw_res_context];
1097		struct vmw_resource *ctx;
1098		int ret;
1099
1100		BUG_ON(!ctx_entry->valid);
1101		ctx = ctx_entry->res;
1102
1103		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1104
1105		if (unlikely(ret != 0))
1106			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1107	}
1108
1109	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1110		if (dev_priv->pinned_bo) {
1111			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1112			vmw_bo_unreference(&dev_priv->pinned_bo);
1113		}
1114
1115		if (!sw_context->needs_post_query_barrier) {
1116			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1117
1118			/*
1119			 * We pin also the dummy_query_bo buffer so that we
1120			 * don't need to validate it when emitting dummy queries
1121			 * in context destroy paths.
1122			 */
1123			if (!dev_priv->dummy_query_bo_pinned) {
1124				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1125						    true);
1126				dev_priv->dummy_query_bo_pinned = true;
1127			}
1128
1129			BUG_ON(sw_context->last_query_ctx == NULL);
1130			dev_priv->query_cid = sw_context->last_query_ctx->id;
1131			dev_priv->query_cid_valid = true;
1132			dev_priv->pinned_bo =
1133				vmw_bo_reference(sw_context->cur_query_bo);
1134		}
1135	}
1136}
1137
1138/**
1139 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1140 * to a MOB id.
1141 *
1142 * @dev_priv: Pointer to a device private structure.
1143 * @sw_context: The software context used for this command batch validation.
1144 * @id: Pointer to the user-space handle to be translated.
1145 * @vmw_bo_p: Points to a location that, on successful return will carry a
1146 * non-reference-counted pointer to the buffer object identified by the
1147 * user-space handle in @id.
1148 *
1149 * This function saves information needed to translate a user-space buffer
1150 * handle to a MOB id. The translation does not take place immediately, but
1151 * during a call to vmw_apply_relocations().
1152 *
1153 * This function builds a relocation list and a list of buffers to validate. The
1154 * former needs to be freed using either vmw_apply_relocations() or
1155 * vmw_free_relocations(). The latter needs to be freed using
1156 * vmw_clear_validations.
1157 */
1158static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1159				 struct vmw_sw_context *sw_context,
1160				 SVGAMobId *id,
1161				 struct vmw_buffer_object **vmw_bo_p)
1162{
1163	struct vmw_buffer_object *vmw_bo;
1164	uint32_t handle = *id;
1165	struct vmw_relocation *reloc;
1166	int ret;
1167
1168	vmw_validation_preload_bo(sw_context->ctx);
1169	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1170	if (IS_ERR(vmw_bo)) {
1171		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1172		return PTR_ERR(vmw_bo);
1173	}
1174
1175	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1176	vmw_user_bo_noref_release();
1177	if (unlikely(ret != 0))
1178		return ret;
1179
1180	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1181	if (!reloc)
1182		return -ENOMEM;
1183
1184	reloc->mob_loc = id;
1185	reloc->vbo = vmw_bo;
1186
1187	*vmw_bo_p = vmw_bo;
1188	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1189
1190	return 0;
 
1191}
1192
1193/**
1194 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1195 * to a valid SVGAGuestPtr
1196 *
1197 * @dev_priv: Pointer to a device private structure.
1198 * @sw_context: The software context used for this command batch validation.
1199 * @ptr: Pointer to the user-space handle to be translated.
1200 * @vmw_bo_p: Points to a location that, on successful return will carry a
1201 * non-reference-counted pointer to the DMA buffer identified by the user-space
1202 * handle in @id.
1203 *
1204 * This function saves information needed to translate a user-space buffer
1205 * handle to a valid SVGAGuestPtr. The translation does not take place
1206 * immediately, but during a call to vmw_apply_relocations().
1207 *
1208 * This function builds a relocation list and a list of buffers to validate.
1209 * The former needs to be freed using either vmw_apply_relocations() or
1210 * vmw_free_relocations(). The latter needs to be freed using
1211 * vmw_clear_validations.
1212 */
1213static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1214				   struct vmw_sw_context *sw_context,
1215				   SVGAGuestPtr *ptr,
1216				   struct vmw_buffer_object **vmw_bo_p)
1217{
1218	struct vmw_buffer_object *vmw_bo;
 
1219	uint32_t handle = ptr->gmrId;
1220	struct vmw_relocation *reloc;
 
 
1221	int ret;
1222
1223	vmw_validation_preload_bo(sw_context->ctx);
1224	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1225	if (IS_ERR(vmw_bo)) {
1226		VMW_DEBUG_USER("Could not find or use GMR region.\n");
1227		return PTR_ERR(vmw_bo);
1228	}
 
1229
1230	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1231	vmw_user_bo_noref_release();
1232	if (unlikely(ret != 0))
1233		return ret;
1234
1235	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1236	if (!reloc)
1237		return -ENOMEM;
1238
 
1239	reloc->location = ptr;
1240	reloc->vbo = vmw_bo;
1241	*vmw_bo_p = vmw_bo;
1242	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1243
1244	return 0;
1245}
 
 
 
 
 
1246
1247/**
1248 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1249 *
1250 * @dev_priv: Pointer to a device private struct.
1251 * @sw_context: The software context used for this command submission.
1252 * @header: Pointer to the command header in the command stream.
1253 *
1254 * This function adds the new query into the query COTABLE
1255 */
1256static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1257				   struct vmw_sw_context *sw_context,
1258				   SVGA3dCmdHeader *header)
1259{
1260	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1261	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1262	struct vmw_resource *cotable_res;
1263	int ret;
1264
1265	if (!ctx_node)
1266		return -EINVAL;
1267
1268	cmd = container_of(header, typeof(*cmd), header);
1269
1270	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1271	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1272		return -EINVAL;
1273
1274	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1275	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1276
1277	return ret;
1278}
1279
1280/**
1281 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1282 *
1283 * @dev_priv: Pointer to a device private struct.
1284 * @sw_context: The software context used for this command submission.
1285 * @header: Pointer to the command header in the command stream.
1286 *
1287 * The query bind operation will eventually associate the query ID with its
1288 * backing MOB.  In this function, we take the user mode MOB ID and use
1289 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1290 */
1291static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1292				 struct vmw_sw_context *sw_context,
1293				 SVGA3dCmdHeader *header)
1294{
1295	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1296	struct vmw_buffer_object *vmw_bo;
1297	int ret;
1298
1299	cmd = container_of(header, typeof(*cmd), header);
1300
1301	/*
1302	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1303	 * list so its kernel mode MOB ID can be filled in later
1304	 */
1305	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1306				    &vmw_bo);
1307
1308	if (ret != 0)
1309		return ret;
1310
1311	sw_context->dx_query_mob = vmw_bo;
1312	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1313	return 0;
1314}
1315
1316/**
1317 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1318 *
1319 * @dev_priv: Pointer to a device private struct.
1320 * @sw_context: The software context used for this command submission.
1321 * @header: Pointer to the command header in the command stream.
1322 */
1323static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1324				  struct vmw_sw_context *sw_context,
1325				  SVGA3dCmdHeader *header)
1326{
1327	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1328		container_of(header, typeof(*cmd), header);
1329
1330	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1331				 VMW_RES_DIRTY_SET, user_context_converter,
1332				 &cmd->body.cid, NULL);
1333}
1334
1335/**
1336 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1337 *
1338 * @dev_priv: Pointer to a device private struct.
1339 * @sw_context: The software context used for this command submission.
1340 * @header: Pointer to the command header in the command stream.
1341 */
1342static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1343			       struct vmw_sw_context *sw_context,
1344			       SVGA3dCmdHeader *header)
1345{
1346	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1347		container_of(header, typeof(*cmd), header);
1348
1349	if (unlikely(dev_priv->has_mob)) {
1350		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1351
1352		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1353
1354		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1355		gb_cmd.header.size = cmd->header.size;
1356		gb_cmd.body.cid = cmd->body.cid;
1357		gb_cmd.body.type = cmd->body.type;
1358
1359		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1360		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1361	}
1362
1363	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1364				 VMW_RES_DIRTY_SET, user_context_converter,
1365				 &cmd->body.cid, NULL);
1366}
1367
1368/**
1369 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1370 *
1371 * @dev_priv: Pointer to a device private struct.
1372 * @sw_context: The software context used for this command submission.
1373 * @header: Pointer to the command header in the command stream.
1374 */
1375static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1376				struct vmw_sw_context *sw_context,
1377				SVGA3dCmdHeader *header)
1378{
1379	struct vmw_buffer_object *vmw_bo;
1380	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1381	int ret;
1382
1383	cmd = container_of(header, typeof(*cmd), header);
1384	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1385	if (unlikely(ret != 0))
1386		return ret;
1387
1388	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1389				    &vmw_bo);
1390	if (unlikely(ret != 0))
1391		return ret;
1392
1393	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1394
 
 
 
1395	return ret;
1396}
1397
1398/**
1399 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1400 *
1401 * @dev_priv: Pointer to a device private struct.
1402 * @sw_context: The software context used for this command submission.
1403 * @header: Pointer to the command header in the command stream.
1404 */
1405static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1406			     struct vmw_sw_context *sw_context,
1407			     SVGA3dCmdHeader *header)
1408{
1409	struct vmw_buffer_object *vmw_bo;
1410	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
 
 
 
1411	int ret;
1412
1413	cmd = container_of(header, typeof(*cmd), header);
1414	if (dev_priv->has_mob) {
1415		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1416
1417		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1418
1419		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1420		gb_cmd.header.size = cmd->header.size;
1421		gb_cmd.body.cid = cmd->body.cid;
1422		gb_cmd.body.type = cmd->body.type;
1423		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1424		gb_cmd.body.offset = cmd->body.guestResult.offset;
1425
1426		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1427		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1428	}
1429
1430	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1431	if (unlikely(ret != 0))
1432		return ret;
1433
1434	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1435				      &cmd->body.guestResult, &vmw_bo);
1436	if (unlikely(ret != 0))
1437		return ret;
1438
1439	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1440
1441	return ret;
1442}
1443
1444/**
1445 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1446 *
1447 * @dev_priv: Pointer to a device private struct.
1448 * @sw_context: The software context used for this command submission.
1449 * @header: Pointer to the command header in the command stream.
1450 */
1451static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1452				 struct vmw_sw_context *sw_context,
1453				 SVGA3dCmdHeader *header)
1454{
1455	struct vmw_buffer_object *vmw_bo;
1456	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1457	int ret;
1458
1459	cmd = container_of(header, typeof(*cmd), header);
1460	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1461	if (unlikely(ret != 0))
1462		return ret;
1463
1464	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1465				    &vmw_bo);
1466	if (unlikely(ret != 0))
1467		return ret;
1468
 
1469	return 0;
1470}
1471
1472/**
1473 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1474 *
1475 * @dev_priv: Pointer to a device private struct.
1476 * @sw_context: The software context used for this command submission.
1477 * @header: Pointer to the command header in the command stream.
1478 */
1479static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1480			      struct vmw_sw_context *sw_context,
1481			      SVGA3dCmdHeader *header)
1482{
1483	struct vmw_buffer_object *vmw_bo;
1484	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
 
 
 
1485	int ret;
1486
1487	cmd = container_of(header, typeof(*cmd), header);
1488	if (dev_priv->has_mob) {
1489		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1490
1491		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1492
1493		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1494		gb_cmd.header.size = cmd->header.size;
1495		gb_cmd.body.cid = cmd->body.cid;
1496		gb_cmd.body.type = cmd->body.type;
1497		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1498		gb_cmd.body.offset = cmd->body.guestResult.offset;
1499
1500		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1501		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1502	}
1503
1504	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1505	if (unlikely(ret != 0))
1506		return ret;
1507
1508	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1509				      &cmd->body.guestResult, &vmw_bo);
 
1510	if (unlikely(ret != 0))
1511		return ret;
1512
 
1513	return 0;
1514}
1515
 
1516static int vmw_cmd_dma(struct vmw_private *dev_priv,
1517		       struct vmw_sw_context *sw_context,
1518		       SVGA3dCmdHeader *header)
1519{
1520	struct vmw_buffer_object *vmw_bo = NULL;
 
1521	struct vmw_surface *srf = NULL;
1522	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
 
 
 
1523	int ret;
1524	SVGA3dCmdSurfaceDMASuffix *suffix;
1525	uint32_t bo_size;
1526	bool dirty;
1527
1528	cmd = container_of(header, typeof(*cmd), header);
1529	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1530					       header->size - sizeof(*suffix));
1531
1532	/* Make sure device and verifier stays in sync. */
1533	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1534		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1535		return -EINVAL;
1536	}
1537
 
1538	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1539				      &cmd->body.guest.ptr, &vmw_bo);
 
1540	if (unlikely(ret != 0))
1541		return ret;
1542
1543	/* Make sure DMA doesn't cross BO boundaries. */
1544	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1545	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1546		VMW_DEBUG_USER("Invalid DMA offset.\n");
1547		return -EINVAL;
 
1548	}
1549
1550	bo_size -= cmd->body.guest.ptr.offset;
1551	if (unlikely(suffix->maximumOffset > bo_size))
1552		suffix->maximumOffset = bo_size;
1553
1554	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1555		VMW_RES_DIRTY_SET : 0;
1556	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1557				dirty, user_surface_converter,
1558				&cmd->body.host.sid, NULL);
1559	if (unlikely(ret != 0)) {
1560		if (unlikely(ret != -ERESTARTSYS))
1561			VMW_DEBUG_USER("could not find surface for DMA.\n");
1562		return ret;
1563	}
1564
1565	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
 
 
 
 
 
1566
1567	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1568
1569	return 0;
1570}
1571
1572static int vmw_cmd_draw(struct vmw_private *dev_priv,
1573			struct vmw_sw_context *sw_context,
1574			SVGA3dCmdHeader *header)
1575{
1576	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
 
 
 
1577	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1578		(unsigned long)header + sizeof(*cmd));
1579	SVGA3dPrimitiveRange *range;
1580	uint32_t i;
1581	uint32_t maxnum;
1582	int ret;
1583
1584	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1585	if (unlikely(ret != 0))
1586		return ret;
1587
1588	cmd = container_of(header, typeof(*cmd), header);
1589	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1590
1591	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1592		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1593		return -EINVAL;
1594	}
1595
1596	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1597		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1598					VMW_RES_DIRTY_NONE,
1599					user_surface_converter,
1600					&decl->array.surfaceId, NULL);
1601		if (unlikely(ret != 0))
1602			return ret;
1603	}
1604
1605	maxnum = (header->size - sizeof(cmd->body) -
1606		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1607	if (unlikely(cmd->body.numRanges > maxnum)) {
1608		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1609		return -EINVAL;
1610	}
1611
1612	range = (SVGA3dPrimitiveRange *) decl;
1613	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1614		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1615					VMW_RES_DIRTY_NONE,
1616					user_surface_converter,
1617					&range->indexArray.surfaceId, NULL);
1618		if (unlikely(ret != 0))
1619			return ret;
1620	}
1621	return 0;
1622}
1623
 
1624static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1625			     struct vmw_sw_context *sw_context,
1626			     SVGA3dCmdHeader *header)
1627{
1628	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
 
 
 
 
1629	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1630	  ((unsigned long) header + header->size + sizeof(header));
1631	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1632		((unsigned long) header + sizeof(*cmd));
1633	struct vmw_resource *ctx;
1634	struct vmw_resource *res;
1635	int ret;
1636
1637	cmd = container_of(header, typeof(*cmd), header);
1638
1639	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1640				VMW_RES_DIRTY_SET, user_context_converter,
1641				&cmd->body.cid, &ctx);
1642	if (unlikely(ret != 0))
1643		return ret;
1644
1645	for (; cur_state < last_state; ++cur_state) {
1646		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1647			continue;
1648
1649		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1650			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1651				       (unsigned int) cur_state->stage);
1652			return -EINVAL;
1653		}
1654
1655		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1656					VMW_RES_DIRTY_NONE,
1657					user_surface_converter,
1658					&cur_state->value, &res);
1659		if (unlikely(ret != 0))
1660			return ret;
1661
1662		if (dev_priv->has_mob) {
1663			struct vmw_ctx_bindinfo_tex binding;
1664			struct vmw_ctx_validation_info *node;
1665
1666			node = vmw_execbuf_info_from_res(sw_context, ctx);
1667			if (!node)
1668				return -EINVAL;
1669
1670			binding.bi.ctx = ctx;
1671			binding.bi.res = res;
1672			binding.bi.bt = vmw_ctx_binding_tex;
1673			binding.texture_stage = cur_state->stage;
1674			vmw_binding_add(node->staged, &binding.bi, 0,
1675					binding.texture_stage);
1676		}
1677	}
1678
1679	return 0;
1680}
1681
1682static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1683				      struct vmw_sw_context *sw_context,
1684				      void *buf)
1685{
1686	struct vmw_buffer_object *vmw_bo;
1687
1688	struct {
1689		uint32_t header;
1690		SVGAFifoCmdDefineGMRFB body;
1691	} *cmd = buf;
1692
1693	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1694				       &vmw_bo);
1695}
1696
1697/**
1698 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1699 * switching
1700 *
1701 * @dev_priv: Pointer to a device private struct.
1702 * @sw_context: The software context being used for this batch.
1703 * @val_node: The validation node representing the resource.
1704 * @buf_id: Pointer to the user-space backup buffer handle in the command
1705 * stream.
1706 * @backup_offset: Offset of backup into MOB.
1707 *
1708 * This function prepares for registering a switch of backup buffers in the
1709 * resource metadata just prior to unreserving. It's basically a wrapper around
1710 * vmw_cmd_res_switch_backup with a different interface.
1711 */
1712static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1713				     struct vmw_sw_context *sw_context,
1714				     struct vmw_resource *res, uint32_t *buf_id,
1715				     unsigned long backup_offset)
1716{
1717	struct vmw_buffer_object *vbo;
1718	void *info;
1719	int ret;
1720
1721	info = vmw_execbuf_info_from_res(sw_context, res);
1722	if (!info)
1723		return -EINVAL;
1724
1725	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1726	if (ret)
1727		return ret;
1728
1729	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1730					 backup_offset);
1731	return 0;
1732}
1733
1734/**
1735 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1736 *
1737 * @dev_priv: Pointer to a device private struct.
1738 * @sw_context: The software context being used for this batch.
1739 * @res_type: The resource type.
1740 * @converter: Information about user-space binding for this resource type.
1741 * @res_id: Pointer to the user-space resource handle in the command stream.
1742 * @buf_id: Pointer to the user-space backup buffer handle in the command
1743 * stream.
1744 * @backup_offset: Offset of backup into MOB.
1745 *
1746 * This function prepares for registering a switch of backup buffers in the
1747 * resource metadata just prior to unreserving. It's basically a wrapper around
1748 * vmw_cmd_res_switch_backup with a different interface.
1749 */
1750static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1751				 struct vmw_sw_context *sw_context,
1752				 enum vmw_res_type res_type,
1753				 const struct vmw_user_resource_conv
1754				 *converter, uint32_t *res_id, uint32_t *buf_id,
1755				 unsigned long backup_offset)
1756{
1757	struct vmw_resource *res;
1758	int ret;
1759
1760	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1761				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1762	if (ret)
1763		return ret;
1764
1765	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1766					 backup_offset);
1767}
1768
1769/**
1770 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1771 *
1772 * @dev_priv: Pointer to a device private struct.
1773 * @sw_context: The software context being used for this batch.
1774 * @header: Pointer to the command header in the command stream.
1775 */
1776static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1777				   struct vmw_sw_context *sw_context,
1778				   SVGA3dCmdHeader *header)
1779{
1780	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1781		container_of(header, typeof(*cmd), header);
1782
1783	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1784				     user_surface_converter, &cmd->body.sid,
1785				     &cmd->body.mobid, 0);
1786}
1787
1788/**
1789 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1790 *
1791 * @dev_priv: Pointer to a device private struct.
1792 * @sw_context: The software context being used for this batch.
1793 * @header: Pointer to the command header in the command stream.
1794 */
1795static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1796				   struct vmw_sw_context *sw_context,
1797				   SVGA3dCmdHeader *header)
1798{
1799	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1800		container_of(header, typeof(*cmd), header);
1801
1802	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1803				 VMW_RES_DIRTY_NONE, user_surface_converter,
1804				 &cmd->body.image.sid, NULL);
1805}
1806
1807/**
1808 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1809 *
1810 * @dev_priv: Pointer to a device private struct.
1811 * @sw_context: The software context being used for this batch.
1812 * @header: Pointer to the command header in the command stream.
1813 */
1814static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1815				     struct vmw_sw_context *sw_context,
1816				     SVGA3dCmdHeader *header)
1817{
1818	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1819		container_of(header, typeof(*cmd), header);
1820
1821	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1822				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1823				 &cmd->body.sid, NULL);
1824}
1825
1826/**
1827 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1828 *
1829 * @dev_priv: Pointer to a device private struct.
1830 * @sw_context: The software context being used for this batch.
1831 * @header: Pointer to the command header in the command stream.
1832 */
1833static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1834				     struct vmw_sw_context *sw_context,
1835				     SVGA3dCmdHeader *header)
1836{
1837	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1838		container_of(header, typeof(*cmd), header);
1839
1840	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1841				 VMW_RES_DIRTY_NONE, user_surface_converter,
1842				 &cmd->body.image.sid, NULL);
1843}
1844
1845/**
1846 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1847 * command
1848 *
1849 * @dev_priv: Pointer to a device private struct.
1850 * @sw_context: The software context being used for this batch.
1851 * @header: Pointer to the command header in the command stream.
1852 */
1853static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1854				       struct vmw_sw_context *sw_context,
1855				       SVGA3dCmdHeader *header)
1856{
1857	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1858		container_of(header, typeof(*cmd), header);
1859
1860	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1861				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1862				 &cmd->body.sid, NULL);
1863}
1864
1865/**
1866 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1867 * command
1868 *
1869 * @dev_priv: Pointer to a device private struct.
1870 * @sw_context: The software context being used for this batch.
1871 * @header: Pointer to the command header in the command stream.
1872 */
1873static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1874				       struct vmw_sw_context *sw_context,
1875				       SVGA3dCmdHeader *header)
1876{
1877	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1878		container_of(header, typeof(*cmd), header);
1879
1880	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1881				 VMW_RES_DIRTY_NONE, user_surface_converter,
1882				 &cmd->body.image.sid, NULL);
1883}
1884
1885/**
1886 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1887 * command
1888 *
1889 * @dev_priv: Pointer to a device private struct.
1890 * @sw_context: The software context being used for this batch.
1891 * @header: Pointer to the command header in the command stream.
1892 */
1893static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1894					 struct vmw_sw_context *sw_context,
1895					 SVGA3dCmdHeader *header)
1896{
1897	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1898		container_of(header, typeof(*cmd), header);
1899
1900	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1901				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1902				 &cmd->body.sid, NULL);
1903}
1904
1905/**
1906 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1907 *
1908 * @dev_priv: Pointer to a device private struct.
1909 * @sw_context: The software context being used for this batch.
1910 * @header: Pointer to the command header in the command stream.
1911 */
1912static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1913				 struct vmw_sw_context *sw_context,
1914				 SVGA3dCmdHeader *header)
1915{
1916	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1917	int ret;
1918	size_t size;
1919	struct vmw_resource *ctx;
1920
1921	cmd = container_of(header, typeof(*cmd), header);
1922
1923	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1924				VMW_RES_DIRTY_SET, user_context_converter,
1925				&cmd->body.cid, &ctx);
1926	if (unlikely(ret != 0))
1927		return ret;
1928
1929	if (unlikely(!dev_priv->has_mob))
1930		return 0;
1931
1932	size = cmd->header.size - sizeof(cmd->body);
1933	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1934				    cmd->body.shid, cmd + 1, cmd->body.type,
1935				    size, &sw_context->staged_cmd_res);
1936	if (unlikely(ret != 0))
1937		return ret;
1938
1939	return vmw_resource_relocation_add(sw_context, NULL,
1940					   vmw_ptr_diff(sw_context->buf_start,
1941							&cmd->header.id),
1942					   vmw_res_rel_nop);
1943}
1944
1945/**
1946 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1947 *
1948 * @dev_priv: Pointer to a device private struct.
1949 * @sw_context: The software context being used for this batch.
1950 * @header: Pointer to the command header in the command stream.
1951 */
1952static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1953				  struct vmw_sw_context *sw_context,
1954				  SVGA3dCmdHeader *header)
1955{
1956	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1957	int ret;
1958	struct vmw_resource *ctx;
1959
1960	cmd = container_of(header, typeof(*cmd), header);
1961
1962	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1963				VMW_RES_DIRTY_SET, user_context_converter,
1964				&cmd->body.cid, &ctx);
1965	if (unlikely(ret != 0))
1966		return ret;
1967
1968	if (unlikely(!dev_priv->has_mob))
1969		return 0;
1970
1971	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1972				cmd->body.type, &sw_context->staged_cmd_res);
1973	if (unlikely(ret != 0))
1974		return ret;
1975
1976	return vmw_resource_relocation_add(sw_context, NULL,
1977					   vmw_ptr_diff(sw_context->buf_start,
1978							&cmd->header.id),
1979					   vmw_res_rel_nop);
1980}
1981
1982/**
1983 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1984 *
1985 * @dev_priv: Pointer to a device private struct.
1986 * @sw_context: The software context being used for this batch.
1987 * @header: Pointer to the command header in the command stream.
1988 */
1989static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1990			      struct vmw_sw_context *sw_context,
1991			      SVGA3dCmdHeader *header)
1992{
1993	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1994	struct vmw_ctx_bindinfo_shader binding;
1995	struct vmw_resource *ctx, *res = NULL;
1996	struct vmw_ctx_validation_info *ctx_info;
1997	int ret;
1998
1999	cmd = container_of(header, typeof(*cmd), header);
2000
2001	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2002		VMW_DEBUG_USER("Illegal shader type %u.\n",
2003			       (unsigned int) cmd->body.type);
2004		return -EINVAL;
2005	}
2006
2007	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2008				VMW_RES_DIRTY_SET, user_context_converter,
2009				&cmd->body.cid, &ctx);
2010	if (unlikely(ret != 0))
2011		return ret;
2012
2013	if (!dev_priv->has_mob)
2014		return 0;
2015
2016	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2017		/*
2018		 * This is the compat shader path - Per device guest-backed
2019		 * shaders, but user-space thinks it's per context host-
2020		 * backed shaders.
2021		 */
2022		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2023					cmd->body.shid, cmd->body.type);
2024		if (!IS_ERR(res)) {
2025			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2026							    VMW_RES_DIRTY_NONE);
2027			if (unlikely(ret != 0))
2028				return ret;
2029
2030			ret = vmw_resource_relocation_add
2031				(sw_context, res,
2032				 vmw_ptr_diff(sw_context->buf_start,
2033					      &cmd->body.shid),
2034				 vmw_res_rel_normal);
2035			if (unlikely(ret != 0))
2036				return ret;
2037		}
2038	}
2039
2040	if (IS_ERR_OR_NULL(res)) {
2041		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2042					VMW_RES_DIRTY_NONE,
2043					user_shader_converter, &cmd->body.shid,
2044					&res);
2045		if (unlikely(ret != 0))
2046			return ret;
2047	}
2048
2049	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2050	if (!ctx_info)
2051		return -EINVAL;
2052
2053	binding.bi.ctx = ctx;
2054	binding.bi.res = res;
2055	binding.bi.bt = vmw_ctx_binding_shader;
2056	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2057	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2058
2059	return 0;
2060}
2061
2062/**
2063 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2064 *
2065 * @dev_priv: Pointer to a device private struct.
2066 * @sw_context: The software context being used for this batch.
2067 * @header: Pointer to the command header in the command stream.
2068 */
2069static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2070				    struct vmw_sw_context *sw_context,
2071				    SVGA3dCmdHeader *header)
2072{
2073	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2074	int ret;
2075
2076	cmd = container_of(header, typeof(*cmd), header);
2077
2078	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2079				VMW_RES_DIRTY_SET, user_context_converter,
2080				&cmd->body.cid, NULL);
2081	if (unlikely(ret != 0))
2082		return ret;
2083
2084	if (dev_priv->has_mob)
2085		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2086
2087	return 0;
2088}
2089
2090/**
2091 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2092 *
2093 * @dev_priv: Pointer to a device private struct.
2094 * @sw_context: The software context being used for this batch.
2095 * @header: Pointer to the command header in the command stream.
2096 */
2097static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2098				  struct vmw_sw_context *sw_context,
2099				  SVGA3dCmdHeader *header)
2100{
2101	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2102		container_of(header, typeof(*cmd), header);
2103
2104	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2105				     user_shader_converter, &cmd->body.shid,
2106				     &cmd->body.mobid, cmd->body.offsetInBytes);
2107}
2108
2109/**
2110 * vmw_cmd_dx_set_single_constant_buffer - Validate
2111 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2112 *
2113 * @dev_priv: Pointer to a device private struct.
2114 * @sw_context: The software context being used for this batch.
2115 * @header: Pointer to the command header in the command stream.
2116 */
2117static int
2118vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2119				      struct vmw_sw_context *sw_context,
2120				      SVGA3dCmdHeader *header)
2121{
2122	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2123	SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2124		SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2125
2126	struct vmw_resource *res = NULL;
2127	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2128	struct vmw_ctx_bindinfo_cb binding;
2129	int ret;
2130
2131	if (!ctx_node)
2132		return -EINVAL;
2133
2134	cmd = container_of(header, typeof(*cmd), header);
2135	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2136				VMW_RES_DIRTY_NONE, user_surface_converter,
2137				&cmd->body.sid, &res);
2138	if (unlikely(ret != 0))
2139		return ret;
2140
2141	binding.bi.ctx = ctx_node->ctx;
2142	binding.bi.res = res;
2143	binding.bi.bt = vmw_ctx_binding_cb;
2144	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2145	binding.offset = cmd->body.offsetInBytes;
2146	binding.size = cmd->body.sizeInBytes;
2147	binding.slot = cmd->body.slot;
2148
2149	if (binding.shader_slot >= max_shader_num ||
2150	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2151		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2152			       (unsigned int) cmd->body.type,
2153			       (unsigned int) binding.slot);
2154		return -EINVAL;
2155	}
2156
2157	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2158			binding.slot);
2159
2160	return 0;
2161}
2162
2163/**
2164 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2165 * command
2166 *
2167 * @dev_priv: Pointer to a device private struct.
2168 * @sw_context: The software context being used for this batch.
2169 * @header: Pointer to the command header in the command stream.
2170 */
2171static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2172				     struct vmw_sw_context *sw_context,
2173				     SVGA3dCmdHeader *header)
2174{
2175	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2176		container_of(header, typeof(*cmd), header);
2177	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2178		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2179
2180	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2181		sizeof(SVGA3dShaderResourceViewId);
2182
2183	if ((u64) cmd->body.startView + (u64) num_sr_view >
2184	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2185	    cmd->body.type >= max_allowed) {
2186		VMW_DEBUG_USER("Invalid shader binding.\n");
2187		return -EINVAL;
2188	}
2189
2190	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2191				     vmw_ctx_binding_sr,
2192				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2193				     (void *) &cmd[1], num_sr_view,
2194				     cmd->body.startView);
2195}
2196
2197/**
2198 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2199 *
2200 * @dev_priv: Pointer to a device private struct.
2201 * @sw_context: The software context being used for this batch.
2202 * @header: Pointer to the command header in the command stream.
2203 */
2204static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2205				 struct vmw_sw_context *sw_context,
2206				 SVGA3dCmdHeader *header)
2207{
2208	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2209	SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2210		SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2211	struct vmw_resource *res = NULL;
2212	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2213	struct vmw_ctx_bindinfo_shader binding;
2214	int ret = 0;
2215
2216	if (!ctx_node)
2217		return -EINVAL;
2218
2219	cmd = container_of(header, typeof(*cmd), header);
2220
2221	if (cmd->body.type >= max_allowed ||
2222	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2223		VMW_DEBUG_USER("Illegal shader type %u.\n",
2224			       (unsigned int) cmd->body.type);
2225		return -EINVAL;
2226	}
2227
2228	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2229		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2230		if (IS_ERR(res)) {
2231			VMW_DEBUG_USER("Could not find shader for binding.\n");
2232			return PTR_ERR(res);
2233		}
2234
2235		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2236						    VMW_RES_DIRTY_NONE);
2237		if (ret)
2238			return ret;
2239	}
2240
2241	binding.bi.ctx = ctx_node->ctx;
2242	binding.bi.res = res;
2243	binding.bi.bt = vmw_ctx_binding_dx_shader;
2244	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2245
2246	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2247
2248	return 0;
2249}
2250
2251/**
2252 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2253 * command
2254 *
2255 * @dev_priv: Pointer to a device private struct.
2256 * @sw_context: The software context being used for this batch.
2257 * @header: Pointer to the command header in the command stream.
2258 */
2259static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2260					 struct vmw_sw_context *sw_context,
2261					 SVGA3dCmdHeader *header)
2262{
2263	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2264	struct vmw_ctx_bindinfo_vb binding;
2265	struct vmw_resource *res;
2266	struct {
2267		SVGA3dCmdHeader header;
2268		SVGA3dCmdDXSetVertexBuffers body;
2269		SVGA3dVertexBuffer buf[];
2270	} *cmd;
2271	int i, ret, num;
2272
2273	if (!ctx_node)
2274		return -EINVAL;
2275
2276	cmd = container_of(header, typeof(*cmd), header);
2277	num = (cmd->header.size - sizeof(cmd->body)) /
2278		sizeof(SVGA3dVertexBuffer);
2279	if ((u64)num + (u64)cmd->body.startBuffer >
2280	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2281		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2282		return -EINVAL;
2283	}
2284
2285	for (i = 0; i < num; i++) {
2286		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2287					VMW_RES_DIRTY_NONE,
2288					user_surface_converter,
2289					&cmd->buf[i].sid, &res);
2290		if (unlikely(ret != 0))
2291			return ret;
2292
2293		binding.bi.ctx = ctx_node->ctx;
2294		binding.bi.bt = vmw_ctx_binding_vb;
2295		binding.bi.res = res;
2296		binding.offset = cmd->buf[i].offset;
2297		binding.stride = cmd->buf[i].stride;
2298		binding.slot = i + cmd->body.startBuffer;
2299
2300		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2301	}
2302
2303	return 0;
2304}
2305
2306/**
2307 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2308 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2309 *
2310 * @dev_priv: Pointer to a device private struct.
2311 * @sw_context: The software context being used for this batch.
2312 * @header: Pointer to the command header in the command stream.
2313 */
2314static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2315				       struct vmw_sw_context *sw_context,
2316				       SVGA3dCmdHeader *header)
2317{
2318	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2319	struct vmw_ctx_bindinfo_ib binding;
2320	struct vmw_resource *res;
2321	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2322	int ret;
2323
2324	if (!ctx_node)
2325		return -EINVAL;
2326
2327	cmd = container_of(header, typeof(*cmd), header);
2328	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2329				VMW_RES_DIRTY_NONE, user_surface_converter,
2330				&cmd->body.sid, &res);
2331	if (unlikely(ret != 0))
2332		return ret;
2333
2334	binding.bi.ctx = ctx_node->ctx;
2335	binding.bi.res = res;
2336	binding.bi.bt = vmw_ctx_binding_ib;
2337	binding.offset = cmd->body.offset;
2338	binding.format = cmd->body.format;
2339
2340	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2341
2342	return 0;
2343}
2344
2345/**
2346 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2347 * command
2348 *
2349 * @dev_priv: Pointer to a device private struct.
2350 * @sw_context: The software context being used for this batch.
2351 * @header: Pointer to the command header in the command stream.
2352 */
2353static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2354					struct vmw_sw_context *sw_context,
2355					SVGA3dCmdHeader *header)
2356{
2357	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2358		container_of(header, typeof(*cmd), header);
2359	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2360		sizeof(SVGA3dRenderTargetViewId);
2361	int ret;
2362
2363	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2364		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2365		return -EINVAL;
2366	}
2367
2368	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2369				    0, &cmd->body.depthStencilViewId, 1, 0);
2370	if (ret)
2371		return ret;
2372
2373	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2374				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2375				     num_rt_view, 0);
2376}
2377
2378/**
2379 * vmw_cmd_dx_clear_rendertarget_view - Validate
2380 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2381 *
2382 * @dev_priv: Pointer to a device private struct.
2383 * @sw_context: The software context being used for this batch.
2384 * @header: Pointer to the command header in the command stream.
2385 */
2386static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2387					      struct vmw_sw_context *sw_context,
2388					      SVGA3dCmdHeader *header)
2389{
2390	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2391		container_of(header, typeof(*cmd), header);
2392	struct vmw_resource *ret;
2393
2394	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2395				  cmd->body.renderTargetViewId);
2396
2397	return PTR_ERR_OR_ZERO(ret);
2398}
2399
2400/**
2401 * vmw_cmd_dx_clear_rendertarget_view - Validate
2402 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2403 *
2404 * @dev_priv: Pointer to a device private struct.
2405 * @sw_context: The software context being used for this batch.
2406 * @header: Pointer to the command header in the command stream.
2407 */
2408static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2409					      struct vmw_sw_context *sw_context,
2410					      SVGA3dCmdHeader *header)
2411{
2412	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2413		container_of(header, typeof(*cmd), header);
2414	struct vmw_resource *ret;
2415
2416	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2417				  cmd->body.depthStencilViewId);
2418
2419	return PTR_ERR_OR_ZERO(ret);
2420}
2421
2422static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2423				  struct vmw_sw_context *sw_context,
2424				  SVGA3dCmdHeader *header)
2425{
2426	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2427	struct vmw_resource *srf;
2428	struct vmw_resource *res;
2429	enum vmw_view_type view_type;
2430	int ret;
2431	/*
2432	 * This is based on the fact that all affected define commands have the
2433	 * same initial command body layout.
2434	 */
2435	struct {
2436		SVGA3dCmdHeader header;
2437		uint32 defined_id;
2438		uint32 sid;
2439	} *cmd;
2440
2441	if (!ctx_node)
2442		return -EINVAL;
2443
2444	view_type = vmw_view_cmd_to_type(header->id);
2445	if (view_type == vmw_view_max)
2446		return -EINVAL;
2447
2448	cmd = container_of(header, typeof(*cmd), header);
2449	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2450		VMW_DEBUG_USER("Invalid surface id.\n");
2451		return -EINVAL;
2452	}
2453	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2454				VMW_RES_DIRTY_NONE, user_surface_converter,
2455				&cmd->sid, &srf);
2456	if (unlikely(ret != 0))
2457		return ret;
2458
2459	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2460	ret = vmw_cotable_notify(res, cmd->defined_id);
2461	if (unlikely(ret != 0))
2462		return ret;
2463
2464	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2465			    cmd->defined_id, header,
2466			    header->size + sizeof(*header),
2467			    &sw_context->staged_cmd_res);
2468}
2469
2470/**
2471 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2472 *
2473 * @dev_priv: Pointer to a device private struct.
2474 * @sw_context: The software context being used for this batch.
2475 * @header: Pointer to the command header in the command stream.
2476 */
2477static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2478				     struct vmw_sw_context *sw_context,
2479				     SVGA3dCmdHeader *header)
2480{
2481	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2482	struct vmw_ctx_bindinfo_so_target binding;
2483	struct vmw_resource *res;
2484	struct {
2485		SVGA3dCmdHeader header;
2486		SVGA3dCmdDXSetSOTargets body;
2487		SVGA3dSoTarget targets[];
2488	} *cmd;
2489	int i, ret, num;
2490
2491	if (!ctx_node)
2492		return -EINVAL;
2493
2494	cmd = container_of(header, typeof(*cmd), header);
2495	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2496
2497	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2498		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2499		return -EINVAL;
2500	}
2501
2502	for (i = 0; i < num; i++) {
2503		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2504					VMW_RES_DIRTY_SET,
2505					user_surface_converter,
2506					&cmd->targets[i].sid, &res);
2507		if (unlikely(ret != 0))
2508			return ret;
2509
2510		binding.bi.ctx = ctx_node->ctx;
2511		binding.bi.res = res;
2512		binding.bi.bt = vmw_ctx_binding_so_target,
2513		binding.offset = cmd->targets[i].offset;
2514		binding.size = cmd->targets[i].sizeInBytes;
2515		binding.slot = i;
2516
2517		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2518	}
2519
2520	return 0;
2521}
2522
2523static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2524				struct vmw_sw_context *sw_context,
2525				SVGA3dCmdHeader *header)
2526{
2527	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2528	struct vmw_resource *res;
2529	/*
2530	 * This is based on the fact that all affected define commands have
2531	 * the same initial command body layout.
2532	 */
2533	struct {
2534		SVGA3dCmdHeader header;
2535		uint32 defined_id;
2536	} *cmd;
2537	enum vmw_so_type so_type;
2538	int ret;
2539
2540	if (!ctx_node)
2541		return -EINVAL;
2542
2543	so_type = vmw_so_cmd_to_type(header->id);
2544	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2545	cmd = container_of(header, typeof(*cmd), header);
2546	ret = vmw_cotable_notify(res, cmd->defined_id);
2547
2548	return ret;
2549}
2550
2551/**
2552 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2553 * command
2554 *
2555 * @dev_priv: Pointer to a device private struct.
2556 * @sw_context: The software context being used for this batch.
2557 * @header: Pointer to the command header in the command stream.
2558 */
2559static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2560					struct vmw_sw_context *sw_context,
2561					SVGA3dCmdHeader *header)
2562{
2563	struct {
2564		SVGA3dCmdHeader header;
2565		union {
2566			SVGA3dCmdDXReadbackSubResource r_body;
2567			SVGA3dCmdDXInvalidateSubResource i_body;
2568			SVGA3dCmdDXUpdateSubResource u_body;
2569			SVGA3dSurfaceId sid;
2570		};
2571	} *cmd;
2572
2573	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2574		     offsetof(typeof(*cmd), sid));
2575	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2576		     offsetof(typeof(*cmd), sid));
2577	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2578		     offsetof(typeof(*cmd), sid));
2579
2580	cmd = container_of(header, typeof(*cmd), header);
2581	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2582				 VMW_RES_DIRTY_NONE, user_surface_converter,
2583				 &cmd->sid, NULL);
2584}
2585
2586static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2587				struct vmw_sw_context *sw_context,
2588				SVGA3dCmdHeader *header)
2589{
2590	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2591
2592	if (!ctx_node)
2593		return -EINVAL;
2594
2595	return 0;
2596}
2597
2598/**
2599 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2600 * resource for removal.
2601 *
2602 * @dev_priv: Pointer to a device private struct.
2603 * @sw_context: The software context being used for this batch.
2604 * @header: Pointer to the command header in the command stream.
2605 *
2606 * Check that the view exists, and if it was not created using this command
2607 * batch, conditionally make this command a NOP.
2608 */
2609static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2610				  struct vmw_sw_context *sw_context,
2611				  SVGA3dCmdHeader *header)
2612{
2613	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2614	struct {
2615		SVGA3dCmdHeader header;
2616		union vmw_view_destroy body;
2617	} *cmd = container_of(header, typeof(*cmd), header);
2618	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2619	struct vmw_resource *view;
2620	int ret;
2621
2622	if (!ctx_node)
2623		return -EINVAL;
2624
2625	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2626			      &sw_context->staged_cmd_res, &view);
2627	if (ret || !view)
2628		return ret;
2629
2630	/*
2631	 * If the view wasn't created during this command batch, it might
2632	 * have been removed due to a context swapout, so add a
2633	 * relocation to conditionally make this command a NOP to avoid
2634	 * device errors.
2635	 */
2636	return vmw_resource_relocation_add(sw_context, view,
2637					   vmw_ptr_diff(sw_context->buf_start,
2638							&cmd->header.id),
2639					   vmw_res_rel_cond_nop);
2640}
2641
2642/**
2643 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2644 *
2645 * @dev_priv: Pointer to a device private struct.
2646 * @sw_context: The software context being used for this batch.
2647 * @header: Pointer to the command header in the command stream.
2648 */
2649static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2650				    struct vmw_sw_context *sw_context,
2651				    SVGA3dCmdHeader *header)
2652{
2653	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2654	struct vmw_resource *res;
2655	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2656		container_of(header, typeof(*cmd), header);
2657	int ret;
2658
2659	if (!ctx_node)
2660		return -EINVAL;
2661
2662	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2663	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2664	if (ret)
2665		return ret;
2666
2667	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2668				 cmd->body.shaderId, cmd->body.type,
2669				 &sw_context->staged_cmd_res);
2670}
2671
2672/**
2673 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2674 *
2675 * @dev_priv: Pointer to a device private struct.
2676 * @sw_context: The software context being used for this batch.
2677 * @header: Pointer to the command header in the command stream.
2678 */
2679static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2680				     struct vmw_sw_context *sw_context,
2681				     SVGA3dCmdHeader *header)
2682{
2683	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2684	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2685		container_of(header, typeof(*cmd), header);
2686	int ret;
2687
2688	if (!ctx_node)
2689		return -EINVAL;
2690
2691	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2692				&sw_context->staged_cmd_res);
2693
2694	return ret;
2695}
2696
2697/**
2698 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2699 *
2700 * @dev_priv: Pointer to a device private struct.
2701 * @sw_context: The software context being used for this batch.
2702 * @header: Pointer to the command header in the command stream.
2703 */
2704static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2705				  struct vmw_sw_context *sw_context,
2706				  SVGA3dCmdHeader *header)
2707{
2708	struct vmw_resource *ctx;
2709	struct vmw_resource *res;
2710	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2711		container_of(header, typeof(*cmd), header);
2712	int ret;
2713
2714	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2715		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2716					VMW_RES_DIRTY_SET,
2717					user_context_converter, &cmd->body.cid,
2718					&ctx);
2719		if (ret)
2720			return ret;
2721	} else {
2722		struct vmw_ctx_validation_info *ctx_node =
2723			VMW_GET_CTX_NODE(sw_context);
2724
2725		if (!ctx_node)
2726			return -EINVAL;
2727
2728		ctx = ctx_node->ctx;
2729	}
2730
2731	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2732	if (IS_ERR(res)) {
2733		VMW_DEBUG_USER("Could not find shader to bind.\n");
2734		return PTR_ERR(res);
2735	}
2736
2737	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2738					    VMW_RES_DIRTY_NONE);
2739	if (ret) {
2740		VMW_DEBUG_USER("Error creating resource validation node.\n");
2741		return ret;
2742	}
2743
2744	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2745					 &cmd->body.mobid,
2746					 cmd->body.offsetInBytes);
2747}
2748
2749/**
2750 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2751 *
2752 * @dev_priv: Pointer to a device private struct.
2753 * @sw_context: The software context being used for this batch.
2754 * @header: Pointer to the command header in the command stream.
2755 */
2756static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2757			      struct vmw_sw_context *sw_context,
2758			      SVGA3dCmdHeader *header)
2759{
2760	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2761		container_of(header, typeof(*cmd), header);
2762	struct vmw_resource *ret;
2763
2764	ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
2765				  cmd->body.shaderResourceViewId);
2766
2767	return PTR_ERR_OR_ZERO(ret);
2768}
2769
2770/**
2771 * vmw_cmd_dx_transfer_from_buffer - Validate
2772 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2773 *
2774 * @dev_priv: Pointer to a device private struct.
2775 * @sw_context: The software context being used for this batch.
2776 * @header: Pointer to the command header in the command stream.
2777 */
2778static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2779					   struct vmw_sw_context *sw_context,
2780					   SVGA3dCmdHeader *header)
2781{
2782	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2783		container_of(header, typeof(*cmd), header);
2784	int ret;
2785
2786	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2787				VMW_RES_DIRTY_NONE, user_surface_converter,
2788				&cmd->body.srcSid, NULL);
2789	if (ret != 0)
2790		return ret;
2791
2792	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2793				 VMW_RES_DIRTY_SET, user_surface_converter,
2794				 &cmd->body.destSid, NULL);
2795}
2796
2797/**
2798 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2799 *
2800 * @dev_priv: Pointer to a device private struct.
2801 * @sw_context: The software context being used for this batch.
2802 * @header: Pointer to the command header in the command stream.
2803 */
2804static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2805					   struct vmw_sw_context *sw_context,
2806					   SVGA3dCmdHeader *header)
2807{
2808	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2809		container_of(header, typeof(*cmd), header);
2810
2811	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2812		return -EINVAL;
2813
2814	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2815				 VMW_RES_DIRTY_SET, user_surface_converter,
2816				 &cmd->body.surface.sid, NULL);
2817}
2818
2819static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2820		       struct vmw_sw_context *sw_context,
2821		       SVGA3dCmdHeader *header)
2822{
2823	if (!has_sm5_context(dev_priv))
2824		return -EINVAL;
2825
2826	return 0;
2827}
2828
2829static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2830				   struct vmw_sw_context *sw_context,
2831				   SVGA3dCmdHeader *header)
2832{
2833	if (!has_sm5_context(dev_priv))
2834		return -EINVAL;
2835
2836	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2837}
2838
2839static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2840				   struct vmw_sw_context *sw_context,
2841				   SVGA3dCmdHeader *header)
2842{
2843	if (!has_sm5_context(dev_priv))
2844		return -EINVAL;
2845
2846	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2847}
2848
2849static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2850				  struct vmw_sw_context *sw_context,
2851				  SVGA3dCmdHeader *header)
2852{
2853	struct {
2854		SVGA3dCmdHeader header;
2855		SVGA3dCmdDXClearUAViewUint body;
2856	} *cmd = container_of(header, typeof(*cmd), header);
2857	struct vmw_resource *ret;
2858
2859	if (!has_sm5_context(dev_priv))
2860		return -EINVAL;
2861
2862	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2863				  cmd->body.uaViewId);
2864
2865	return PTR_ERR_OR_ZERO(ret);
2866}
2867
2868static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2869				   struct vmw_sw_context *sw_context,
2870				   SVGA3dCmdHeader *header)
2871{
2872	struct {
2873		SVGA3dCmdHeader header;
2874		SVGA3dCmdDXClearUAViewFloat body;
2875	} *cmd = container_of(header, typeof(*cmd), header);
2876	struct vmw_resource *ret;
2877
2878	if (!has_sm5_context(dev_priv))
2879		return -EINVAL;
2880
2881	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2882				  cmd->body.uaViewId);
2883
2884	return PTR_ERR_OR_ZERO(ret);
2885}
2886
2887static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2888			   struct vmw_sw_context *sw_context,
2889			   SVGA3dCmdHeader *header)
2890{
2891	struct {
2892		SVGA3dCmdHeader header;
2893		SVGA3dCmdDXSetUAViews body;
2894	} *cmd = container_of(header, typeof(*cmd), header);
2895	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2896		sizeof(SVGA3dUAViewId);
2897	int ret;
2898
2899	if (!has_sm5_context(dev_priv))
2900		return -EINVAL;
2901
2902	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2903		VMW_DEBUG_USER("Invalid UAV binding.\n");
2904		return -EINVAL;
2905	}
2906
2907	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2908				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2909				    num_uav, 0);
2910	if (ret)
2911		return ret;
2912
2913	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2914					 cmd->body.uavSpliceIndex);
2915
2916	return ret;
2917}
2918
2919static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2920			      struct vmw_sw_context *sw_context,
2921			      SVGA3dCmdHeader *header)
2922{
2923	struct {
2924		SVGA3dCmdHeader header;
2925		SVGA3dCmdDXSetCSUAViews body;
2926	} *cmd = container_of(header, typeof(*cmd), header);
2927	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2928		sizeof(SVGA3dUAViewId);
2929	int ret;
2930
2931	if (!has_sm5_context(dev_priv))
2932		return -EINVAL;
2933
2934	if (num_uav > SVGA3D_MAX_UAVIEWS) {
2935		VMW_DEBUG_USER("Invalid UAV binding.\n");
2936		return -EINVAL;
2937	}
2938
2939	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2940				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2941				    num_uav, 0);
2942	if (ret)
2943		return ret;
2944
2945	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2946				  cmd->body.startIndex);
2947
2948	return ret;
2949}
2950
2951static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2952					  struct vmw_sw_context *sw_context,
2953					  SVGA3dCmdHeader *header)
2954{
2955	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2956	struct vmw_resource *res;
2957	struct {
2958		SVGA3dCmdHeader header;
2959		SVGA3dCmdDXDefineStreamOutputWithMob body;
2960	} *cmd = container_of(header, typeof(*cmd), header);
2961	int ret;
2962
2963	if (!has_sm5_context(dev_priv))
2964		return -EINVAL;
2965
2966	if (!ctx_node) {
2967		DRM_ERROR("DX Context not set.\n");
2968		return -EINVAL;
2969	}
2970
2971	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2972	ret = vmw_cotable_notify(res, cmd->body.soid);
2973	if (ret)
2974		return ret;
2975
2976	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2977				       cmd->body.soid,
2978				       &sw_context->staged_cmd_res);
2979}
2980
2981static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
2982					   struct vmw_sw_context *sw_context,
2983					   SVGA3dCmdHeader *header)
2984{
2985	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2986	struct vmw_resource *res;
2987	struct {
2988		SVGA3dCmdHeader header;
2989		SVGA3dCmdDXDestroyStreamOutput body;
2990	} *cmd = container_of(header, typeof(*cmd), header);
2991
2992	if (!ctx_node) {
2993		DRM_ERROR("DX Context not set.\n");
2994		return -EINVAL;
2995	}
2996
2997	/*
2998	 * When device does not support SM5 then streamoutput with mob command is
2999	 * not available to user-space. Simply return in this case.
3000	 */
3001	if (!has_sm5_context(dev_priv))
3002		return 0;
3003
3004	/*
3005	 * With SM5 capable device if lookup fails then user-space probably used
3006	 * old streamoutput define command. Return without an error.
3007	 */
3008	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3009					 cmd->body.soid);
3010	if (IS_ERR(res))
3011		return 0;
3012
3013	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3014					  &sw_context->staged_cmd_res);
3015}
3016
3017static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3018					struct vmw_sw_context *sw_context,
3019					SVGA3dCmdHeader *header)
3020{
3021	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3022	struct vmw_resource *res;
3023	struct {
3024		SVGA3dCmdHeader header;
3025		SVGA3dCmdDXBindStreamOutput body;
3026	} *cmd = container_of(header, typeof(*cmd), header);
3027	int ret;
3028
3029	if (!has_sm5_context(dev_priv))
3030		return -EINVAL;
3031
3032	if (!ctx_node) {
3033		DRM_ERROR("DX Context not set.\n");
3034		return -EINVAL;
3035	}
3036
3037	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3038					 cmd->body.soid);
3039	if (IS_ERR(res)) {
3040		DRM_ERROR("Could not find streamoutput to bind.\n");
3041		return PTR_ERR(res);
3042	}
3043
3044	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3045
3046	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3047					    VMW_RES_DIRTY_NONE);
3048	if (ret) {
3049		DRM_ERROR("Error creating resource validation node.\n");
3050		return ret;
3051	}
3052
3053	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3054					 &cmd->body.mobid,
3055					 cmd->body.offsetInBytes);
3056}
3057
3058static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3059				       struct vmw_sw_context *sw_context,
3060				       SVGA3dCmdHeader *header)
3061{
3062	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3063	struct vmw_resource *res;
3064	struct vmw_ctx_bindinfo_so binding;
3065	struct {
3066		SVGA3dCmdHeader header;
3067		SVGA3dCmdDXSetStreamOutput body;
3068	} *cmd = container_of(header, typeof(*cmd), header);
3069	int ret;
3070
3071	if (!ctx_node) {
3072		DRM_ERROR("DX Context not set.\n");
3073		return -EINVAL;
3074	}
3075
3076	if (cmd->body.soid == SVGA3D_INVALID_ID)
3077		return 0;
3078
3079	/*
3080	 * When device does not support SM5 then streamoutput with mob command is
3081	 * not available to user-space. Simply return in this case.
3082	 */
3083	if (!has_sm5_context(dev_priv))
3084		return 0;
3085
3086	/*
3087	 * With SM5 capable device if lookup fails then user-space probably used
3088	 * old streamoutput define command. Return without an error.
3089	 */
3090	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3091					 cmd->body.soid);
3092	if (IS_ERR(res)) {
3093		return 0;
3094	}
3095
3096	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3097					    VMW_RES_DIRTY_NONE);
3098	if (ret) {
3099		DRM_ERROR("Error creating resource validation node.\n");
3100		return ret;
3101	}
3102
3103	binding.bi.ctx = ctx_node->ctx;
3104	binding.bi.res = res;
3105	binding.bi.bt = vmw_ctx_binding_so;
3106	binding.slot = 0; /* Only one SO set to context at a time. */
3107
3108	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3109			binding.slot);
3110
3111	return ret;
3112}
3113
3114static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3115					      struct vmw_sw_context *sw_context,
3116					      SVGA3dCmdHeader *header)
3117{
3118	struct vmw_draw_indexed_instanced_indirect_cmd {
3119		SVGA3dCmdHeader header;
3120		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3121	} *cmd = container_of(header, typeof(*cmd), header);
3122
3123	if (!has_sm5_context(dev_priv))
3124		return -EINVAL;
3125
3126	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3127				 VMW_RES_DIRTY_NONE, user_surface_converter,
3128				 &cmd->body.argsBufferSid, NULL);
3129}
3130
3131static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3132				      struct vmw_sw_context *sw_context,
3133				      SVGA3dCmdHeader *header)
3134{
3135	struct vmw_draw_instanced_indirect_cmd {
3136		SVGA3dCmdHeader header;
3137		SVGA3dCmdDXDrawInstancedIndirect body;
3138	} *cmd = container_of(header, typeof(*cmd), header);
3139
3140	if (!has_sm5_context(dev_priv))
3141		return -EINVAL;
3142
3143	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3144				 VMW_RES_DIRTY_NONE, user_surface_converter,
3145				 &cmd->body.argsBufferSid, NULL);
3146}
3147
3148static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3149				     struct vmw_sw_context *sw_context,
3150				     SVGA3dCmdHeader *header)
3151{
3152	struct vmw_dispatch_indirect_cmd {
3153		SVGA3dCmdHeader header;
3154		SVGA3dCmdDXDispatchIndirect body;
3155	} *cmd = container_of(header, typeof(*cmd), header);
3156
3157	if (!has_sm5_context(dev_priv))
3158		return -EINVAL;
3159
3160	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3161				 VMW_RES_DIRTY_NONE, user_surface_converter,
3162				 &cmd->body.argsBufferSid, NULL);
3163}
3164
3165static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3166				struct vmw_sw_context *sw_context,
3167				void *buf, uint32_t *size)
3168{
3169	uint32_t size_remaining = *size;
3170	uint32_t cmd_id;
3171
3172	cmd_id = ((uint32_t *)buf)[0];
3173	switch (cmd_id) {
3174	case SVGA_CMD_UPDATE:
3175		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3176		break;
3177	case SVGA_CMD_DEFINE_GMRFB:
3178		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3179		break;
3180	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3181		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3182		break;
3183	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3184		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3185		break;
3186	default:
3187		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3188		return -EINVAL;
3189	}
3190
3191	if (*size > size_remaining) {
3192		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3193			       cmd_id);
3194		return -EINVAL;
3195	}
3196
3197	if (unlikely(!sw_context->kernel)) {
3198		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3199		return -EPERM;
3200	}
3201
3202	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3203		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3204
3205	return 0;
3206}
3207
3208static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3209	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3210		    false, false, false),
3211	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3212		    false, false, false),
3213	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3214		    true, false, false),
3215	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3216		    true, false, false),
3217	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3218		    true, false, false),
3219	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3220		    false, false, false),
3221	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3222		    false, false, false),
3223	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3224		    true, false, false),
3225	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3226		    true, false, false),
3227	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3228		    true, false, false),
3229	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3230		    &vmw_cmd_set_render_target_check, true, false, false),
3231	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3232		    true, false, false),
3233	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3234		    true, false, false),
3235	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3236		    true, false, false),
3237	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3238		    true, false, false),
3239	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3240		    true, false, false),
3241	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3242		    true, false, false),
3243	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3244		    true, false, false),
3245	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3246		    false, false, false),
3247	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3248		    true, false, false),
3249	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3250		    true, false, false),
3251	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3252		    true, false, false),
3253	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3254		    true, false, false),
3255	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3256		    true, false, false),
3257	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3258		    true, false, false),
3259	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3260		    true, false, false),
3261	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3262		    true, false, false),
3263	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3264		    true, false, false),
3265	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3266		    true, false, false),
3267	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3268		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3269	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3270		    false, false, false),
3271	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3272		    false, false, false),
3273	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3274		    false, false, false),
3275	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3276		    false, false, false),
3277	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3278		    false, false, false),
3279	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3280		    false, false, false),
3281	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3282		    false, false, false),
3283	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3284	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3285	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3286	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3287	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3288	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3289	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3290		    false, false, true),
3291	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3292		    false, false, true),
3293	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3294		    false, false, true),
3295	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3296		    false, false, true),
3297	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3298		    false, false, true),
3299	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3300		    false, false, true),
3301	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3302		    false, false, true),
3303	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3304		    false, false, true),
3305	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3306		    true, false, true),
3307	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3308		    false, false, true),
3309	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3310		    true, false, true),
3311	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3312		    &vmw_cmd_update_gb_surface, true, false, true),
3313	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3314		    &vmw_cmd_readback_gb_image, true, false, true),
3315	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3316		    &vmw_cmd_readback_gb_surface, true, false, true),
3317	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3318		    &vmw_cmd_invalidate_gb_image, true, false, true),
3319	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3320		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3321	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3322		    false, false, true),
3323	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3324		    false, false, true),
3325	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3326		    false, false, true),
3327	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3328		    false, false, true),
3329	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3330		    false, false, true),
3331	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3332		    false, false, true),
3333	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3334		    true, false, true),
3335	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3336		    false, false, true),
3337	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3338		    false, false, false),
3339	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3340		    true, false, true),
3341	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3342		    true, false, true),
3343	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3344		    true, false, true),
3345	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3346		    true, false, true),
3347	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3348		    true, false, true),
3349	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3350		    false, false, true),
3351	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3352		    false, false, true),
3353	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3354		    false, false, true),
3355	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3356		    false, false, true),
3357	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3358		    false, false, true),
3359	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3360		    false, false, true),
3361	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3362		    false, false, true),
3363	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3364		    false, false, true),
3365	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3366		    false, false, true),
3367	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3368		    false, false, true),
3369	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3370		    true, false, true),
3371	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3372		    false, false, true),
3373	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3374		    false, false, true),
3375	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3376		    false, false, true),
3377	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3378		    false, false, true),
3379
3380	/* SM commands */
3381	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3382		    false, false, true),
3383	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3384		    false, false, true),
3385	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3386		    false, false, true),
3387	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3388		    false, false, true),
3389	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3390		    false, false, true),
3391	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3392		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3393	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3394		    &vmw_cmd_dx_set_shader_res, true, false, true),
3395	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3396		    true, false, true),
3397	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3398		    true, false, true),
3399	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3400		    true, false, true),
3401	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3402		    true, false, true),
3403	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3404		    true, false, true),
3405	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3406		    &vmw_cmd_dx_cid_check, true, false, true),
3407	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3408		    true, false, true),
3409	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3410		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3411	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3412		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3413	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3414		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3415	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3416		    true, false, true),
3417	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3418		    &vmw_cmd_dx_cid_check, true, false, true),
3419	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3420		    &vmw_cmd_dx_cid_check, true, false, true),
3421	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3422		    true, false, true),
3423	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3424		    true, false, true),
3425	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3426		    true, false, true),
3427	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3428		    &vmw_cmd_dx_cid_check, true, false, true),
3429	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3430		    true, false, true),
3431	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3432		    true, false, true),
3433	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3434		    true, false, true),
3435	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3436		    true, false, true),
3437	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3438		    true, false, true),
3439	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3440		    true, false, true),
3441	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3442		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3443	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3444		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3445	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3446		    true, false, true),
3447	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3448		    true, false, true),
3449	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3450		    &vmw_cmd_dx_check_subresource, true, false, true),
3451	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3452		    &vmw_cmd_dx_check_subresource, true, false, true),
3453	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3454		    &vmw_cmd_dx_check_subresource, true, false, true),
3455	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3456		    &vmw_cmd_dx_view_define, true, false, true),
3457	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3458		    &vmw_cmd_dx_view_remove, true, false, true),
3459	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3460		    &vmw_cmd_dx_view_define, true, false, true),
3461	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3462		    &vmw_cmd_dx_view_remove, true, false, true),
3463	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3464		    &vmw_cmd_dx_view_define, true, false, true),
3465	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3466		    &vmw_cmd_dx_view_remove, true, false, true),
3467	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3468		    &vmw_cmd_dx_so_define, true, false, true),
3469	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3470		    &vmw_cmd_dx_cid_check, true, false, true),
3471	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3472		    &vmw_cmd_dx_so_define, true, false, true),
3473	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3474		    &vmw_cmd_dx_cid_check, true, false, true),
3475	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3476		    &vmw_cmd_dx_so_define, true, false, true),
3477	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3478		    &vmw_cmd_dx_cid_check, true, false, true),
3479	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3480		    &vmw_cmd_dx_so_define, true, false, true),
3481	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3482		    &vmw_cmd_dx_cid_check, true, false, true),
3483	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3484		    &vmw_cmd_dx_so_define, true, false, true),
3485	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3486		    &vmw_cmd_dx_cid_check, true, false, true),
3487	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3488		    &vmw_cmd_dx_define_shader, true, false, true),
3489	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3490		    &vmw_cmd_dx_destroy_shader, true, false, true),
3491	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3492		    &vmw_cmd_dx_bind_shader, true, false, true),
3493	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3494		    &vmw_cmd_dx_so_define, true, false, true),
3495	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3496		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3497	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3498		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3499	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3500		    &vmw_cmd_dx_set_so_targets, true, false, true),
3501	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3502		    &vmw_cmd_dx_cid_check, true, false, true),
3503	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3504		    &vmw_cmd_dx_cid_check, true, false, true),
3505	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3506		    &vmw_cmd_buffer_copy_check, true, false, true),
3507	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3508		    &vmw_cmd_pred_copy_check, true, false, true),
3509	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3510		    &vmw_cmd_dx_transfer_from_buffer,
3511		    true, false, true),
3512	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3513		    true, false, true),
3514
3515	/*
3516	 * SM5 commands
3517	 */
3518	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3519		    true, false, true),
3520	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3521		    true, false, true),
3522	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3523		    true, false, true),
3524	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3525		    &vmw_cmd_clear_uav_float, true, false, true),
3526	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3527		    false, true),
3528	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3529		    true),
3530	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3531		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3532	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3533		    &vmw_cmd_instanced_indirect, true, false, true),
3534	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3535	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3536		    &vmw_cmd_dispatch_indirect, true, false, true),
3537	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3538		    false, true),
3539	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3540		    &vmw_cmd_sm5_view_define, true, false, true),
3541	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3542		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3543	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3544		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3545};
3546
3547bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3548{
3549	u32 cmd_id = ((u32 *) buf)[0];
3550
3551	if (cmd_id >= SVGA_CMD_MAX) {
3552		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3553		const struct vmw_cmd_entry *entry;
3554
3555		*size = header->size + sizeof(SVGA3dCmdHeader);
3556		cmd_id = header->id;
3557		if (cmd_id >= SVGA_3D_CMD_MAX)
3558			return false;
3559
3560		cmd_id -= SVGA_3D_CMD_BASE;
3561		entry = &vmw_cmd_entries[cmd_id];
3562		*cmd = entry->cmd_name;
3563		return true;
3564	}
3565
3566	switch (cmd_id) {
3567	case SVGA_CMD_UPDATE:
3568		*cmd = "SVGA_CMD_UPDATE";
3569		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3570		break;
3571	case SVGA_CMD_DEFINE_GMRFB:
3572		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3573		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3574		break;
3575	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3576		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3577		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3578		break;
3579	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3580		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3581		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3582		break;
3583	default:
3584		*cmd = "UNKNOWN";
3585		*size = 0;
3586		return false;
3587	}
3588
3589	return true;
3590}
3591
3592static int vmw_cmd_check(struct vmw_private *dev_priv,
3593			 struct vmw_sw_context *sw_context, void *buf,
3594			 uint32_t *size)
3595{
3596	uint32_t cmd_id;
3597	uint32_t size_remaining = *size;
3598	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3599	int ret;
3600	const struct vmw_cmd_entry *entry;
3601	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3602
3603	cmd_id = ((uint32_t *)buf)[0];
3604	/* Handle any none 3D commands */
3605	if (unlikely(cmd_id < SVGA_CMD_MAX))
3606		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3607
3608
3609	cmd_id = header->id;
3610	*size = header->size + sizeof(SVGA3dCmdHeader);
3611
3612	cmd_id -= SVGA_3D_CMD_BASE;
3613	if (unlikely(*size > size_remaining))
3614		goto out_invalid;
3615
3616	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3617		goto out_invalid;
3618
3619	entry = &vmw_cmd_entries[cmd_id];
3620	if (unlikely(!entry->func))
3621		goto out_invalid;
3622
3623	if (unlikely(!entry->user_allow && !sw_context->kernel))
3624		goto out_privileged;
3625
3626	if (unlikely(entry->gb_disable && gb))
3627		goto out_old;
3628
3629	if (unlikely(entry->gb_enable && !gb))
3630		goto out_new;
3631
3632	ret = entry->func(dev_priv, sw_context, header);
3633	if (unlikely(ret != 0)) {
3634		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3635			       cmd_id + SVGA_3D_CMD_BASE, ret);
3636		return ret;
3637	}
3638
3639	return 0;
3640out_invalid:
3641	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3642		       cmd_id + SVGA_3D_CMD_BASE);
3643	return -EINVAL;
3644out_privileged:
3645	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3646		       cmd_id + SVGA_3D_CMD_BASE);
3647	return -EPERM;
3648out_old:
3649	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3650		       cmd_id + SVGA_3D_CMD_BASE);
3651	return -EINVAL;
3652out_new:
3653	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3654		       cmd_id + SVGA_3D_CMD_BASE);
3655	return -EINVAL;
3656}
3657
3658static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3659			     struct vmw_sw_context *sw_context, void *buf,
3660			     uint32_t size)
3661{
3662	int32_t cur_size = size;
3663	int ret;
3664
3665	sw_context->buf_start = buf;
3666
3667	while (cur_size > 0) {
3668		size = cur_size;
3669		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3670		if (unlikely(ret != 0))
3671			return ret;
3672		buf = (void *)((unsigned long) buf + size);
3673		cur_size -= size;
3674	}
3675
3676	if (unlikely(cur_size != 0)) {
3677		VMW_DEBUG_USER("Command verifier out of sync.\n");
3678		return -EINVAL;
3679	}
3680
3681	return 0;
3682}
3683
3684static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3685{
3686	/* Memory is validation context memory, so no need to free it */
3687	INIT_LIST_HEAD(&sw_context->bo_relocations);
3688}
3689
3690static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3691{
 
3692	struct vmw_relocation *reloc;
 
3693	struct ttm_buffer_object *bo;
3694
3695	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3696		bo = &reloc->vbo->base;
3697		switch (bo->mem.mem_type) {
3698		case TTM_PL_VRAM:
3699			reloc->location->offset += bo->mem.start << PAGE_SHIFT;
 
3700			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3701			break;
3702		case VMW_PL_GMR:
3703			reloc->location->gmrId = bo->mem.start;
3704			break;
3705		case VMW_PL_MOB:
3706			*reloc->mob_loc = bo->mem.start;
3707			break;
3708		default:
3709			BUG();
3710		}
3711	}
3712	vmw_free_relocations(sw_context);
3713}
3714
3715static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3716				 uint32_t size)
3717{
3718	if (likely(sw_context->cmd_bounce_size >= size))
3719		return 0;
3720
3721	if (sw_context->cmd_bounce_size == 0)
3722		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3723
3724	while (sw_context->cmd_bounce_size < size) {
3725		sw_context->cmd_bounce_size =
3726			PAGE_ALIGN(sw_context->cmd_bounce_size +
3727				   (sw_context->cmd_bounce_size >> 1));
3728	}
3729
3730	vfree(sw_context->cmd_bounce);
3731	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3732
3733	if (sw_context->cmd_bounce == NULL) {
3734		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3735		sw_context->cmd_bounce_size = 0;
3736		return -ENOMEM;
3737	}
3738
3739	return 0;
3740}
3741
3742/**
3743 * vmw_execbuf_fence_commands - create and submit a command stream fence
3744 *
3745 * Creates a fence object and submits a command stream marker.
3746 * If this fails for some reason, We sync the fifo and return NULL.
3747 * It is then safe to fence buffers with a NULL pointer.
3748 *
3749 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3750 * userspace handle if @p_handle is not NULL, otherwise not.
3751 */
3752
3753int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3754			       struct vmw_private *dev_priv,
3755			       struct vmw_fence_obj **p_fence,
3756			       uint32_t *p_handle)
3757{
3758	uint32_t sequence;
3759	int ret;
3760	bool synced = false;
3761
3762	/* p_handle implies file_priv. */
3763	BUG_ON(p_handle != NULL && file_priv == NULL);
 
 
 
 
3764
3765	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3766	if (unlikely(ret != 0)) {
3767		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3768		synced = true;
3769	}
3770
3771	if (p_handle != NULL)
3772		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3773					    sequence, p_fence, p_handle);
3774	else
3775		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3776
3777	if (unlikely(ret != 0 && !synced)) {
3778		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3779					 false, VMW_FENCE_WAIT_TIMEOUT);
3780		*p_fence = NULL;
3781	}
3782
 
 
3783	return ret;
3784}
3785
3786/**
3787 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3788 *
3789 * @dev_priv: Pointer to a vmw_private struct.
3790 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3791 * @ret: Return value from fence object creation.
3792 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3793 * the information should be copied.
3794 * @fence: Pointer to the fenc object.
3795 * @fence_handle: User-space fence handle.
3796 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3797 * @sync_file:  Only used to clean up in case of an error in this function.
3798 *
3799 * This function copies fence information to user-space. If copying fails, the
3800 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3801 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3802 * will hopefully be detected.
3803 *
3804 * Also if copying fails, user-space will be unable to signal the fence object
3805 * so we wait for it immediately, and then unreference the user-space reference.
3806 */
3807void
3808vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3809			    struct vmw_fpriv *vmw_fp, int ret,
3810			    struct drm_vmw_fence_rep __user *user_fence_rep,
3811			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3812			    int32_t out_fence_fd, struct sync_file *sync_file)
3813{
3814	struct drm_vmw_fence_rep fence_rep;
3815
3816	if (user_fence_rep == NULL)
3817		return;
3818
3819	memset(&fence_rep, 0, sizeof(fence_rep));
3820
3821	fence_rep.error = ret;
3822	fence_rep.fd = out_fence_fd;
3823	if (ret == 0) {
3824		BUG_ON(fence == NULL);
3825
3826		fence_rep.handle = fence_handle;
3827		fence_rep.seqno = fence->base.seqno;
3828		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3829		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3830	}
3831
3832	/*
3833	 * copy_to_user errors will be detected by user space not seeing
3834	 * fence_rep::error filled in. Typically user-space would have pre-set
3835	 * that member to -EFAULT.
3836	 */
3837	ret = copy_to_user(user_fence_rep, &fence_rep,
3838			   sizeof(fence_rep));
3839
3840	/*
3841	 * User-space lost the fence object. We need to sync and unreference the
3842	 * handle.
3843	 */
3844	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3845		if (sync_file)
3846			fput(sync_file->file);
3847
3848		if (fence_rep.fd != -1) {
3849			put_unused_fd(fence_rep.fd);
3850			fence_rep.fd = -1;
3851		}
3852
3853		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3854					  TTM_REF_USAGE);
3855		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3856		(void) vmw_fence_obj_wait(fence, false, false,
3857					  VMW_FENCE_WAIT_TIMEOUT);
3858	}
3859}
3860
3861/**
3862 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3863 *
3864 * @dev_priv: Pointer to a device private structure.
3865 * @kernel_commands: Pointer to the unpatched command batch.
3866 * @command_size: Size of the unpatched command batch.
3867 * @sw_context: Structure holding the relocation lists.
3868 *
3869 * Side effects: If this function returns 0, then the command batch pointed to
3870 * by @kernel_commands will have been modified.
3871 */
3872static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3873				   void *kernel_commands, u32 command_size,
3874				   struct vmw_sw_context *sw_context)
3875{
3876	void *cmd;
3877
3878	if (sw_context->dx_ctx_node)
3879		cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3880					  sw_context->dx_ctx_node->ctx->id);
3881	else
3882		cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3883
3884	if (!cmd)
3885		return -ENOMEM;
3886
3887	vmw_apply_relocations(sw_context);
3888	memcpy(cmd, kernel_commands, command_size);
3889	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3890	vmw_resource_relocations_free(&sw_context->res_relocations);
3891	vmw_fifo_commit(dev_priv, command_size);
3892
3893	return 0;
3894}
3895
3896/**
3897 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3898 * command buffer manager.
3899 *
3900 * @dev_priv: Pointer to a device private structure.
3901 * @header: Opaque handle to the command buffer allocation.
3902 * @command_size: Size of the unpatched command batch.
3903 * @sw_context: Structure holding the relocation lists.
3904 *
3905 * Side effects: If this function returns 0, then the command buffer represented
3906 * by @header will have been modified.
3907 */
3908static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3909				     struct vmw_cmdbuf_header *header,
3910				     u32 command_size,
3911				     struct vmw_sw_context *sw_context)
3912{
3913	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3914		  SVGA3D_INVALID_ID);
3915	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3916				       header);
3917
3918	vmw_apply_relocations(sw_context);
3919	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3920	vmw_resource_relocations_free(&sw_context->res_relocations);
3921	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3922
3923	return 0;
3924}
3925
3926/**
3927 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3928 * submission using a command buffer.
3929 *
3930 * @dev_priv: Pointer to a device private structure.
3931 * @user_commands: User-space pointer to the commands to be submitted.
3932 * @command_size: Size of the unpatched command batch.
3933 * @header: Out parameter returning the opaque pointer to the command buffer.
3934 *
3935 * This function checks whether we can use the command buffer manager for
3936 * submission and if so, creates a command buffer of suitable size and copies
3937 * the user data into that buffer.
3938 *
3939 * On successful return, the function returns a pointer to the data in the
3940 * command buffer and *@header is set to non-NULL.
3941 *
3942 * If command buffers could not be used, the function will return the value of
3943 * @kernel_commands on function call. That value may be NULL. In that case, the
3944 * value of *@header will be set to NULL.
3945 *
3946 * If an error is encountered, the function will return a pointer error value.
3947 * If the function is interrupted by a signal while sleeping, it will return
3948 * -ERESTARTSYS casted to a pointer error value.
3949 */
3950static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3951				void __user *user_commands,
3952				void *kernel_commands, u32 command_size,
3953				struct vmw_cmdbuf_header **header)
3954{
3955	size_t cmdbuf_size;
3956	int ret;
3957
3958	*header = NULL;
3959	if (command_size > SVGA_CB_MAX_SIZE) {
3960		VMW_DEBUG_USER("Command buffer is too large.\n");
3961		return ERR_PTR(-EINVAL);
3962	}
3963
3964	if (!dev_priv->cman || kernel_commands)
3965		return kernel_commands;
3966
3967	/* If possible, add a little space for fencing. */
3968	cmdbuf_size = command_size + 512;
3969	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3970	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3971					   header);
3972	if (IS_ERR(kernel_commands))
3973		return kernel_commands;
3974
3975	ret = copy_from_user(kernel_commands, user_commands, command_size);
3976	if (ret) {
3977		VMW_DEBUG_USER("Failed copying commands.\n");
3978		vmw_cmdbuf_header_free(*header);
3979		*header = NULL;
3980		return ERR_PTR(-EFAULT);
3981	}
3982
3983	return kernel_commands;
3984}
3985
3986static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3987				   struct vmw_sw_context *sw_context,
3988				   uint32_t handle)
3989{
3990	struct vmw_resource *res;
 
 
 
3991	int ret;
3992	unsigned int size;
 
 
 
 
3993
3994	if (handle == SVGA3D_INVALID_ID)
3995		return 0;
3996
3997	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3998	ret = vmw_validation_preload_res(sw_context->ctx, size);
3999	if (ret)
4000		return ret;
4001
4002	res = vmw_user_resource_noref_lookup_handle
4003		(dev_priv, sw_context->fp->tfile, handle,
4004		 user_context_converter);
4005	if (IS_ERR(res)) {
4006		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4007			       (unsigned int) handle);
4008		return PTR_ERR(res);
4009	}
4010
4011	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4012	if (unlikely(ret != 0))
4013		return ret;
4014
4015	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4016	sw_context->man = vmw_context_res_man(res);
4017
4018	return 0;
4019}
4020
4021int vmw_execbuf_process(struct drm_file *file_priv,
4022			struct vmw_private *dev_priv,
4023			void __user *user_commands, void *kernel_commands,
4024			uint32_t command_size, uint64_t throttle_us,
4025			uint32_t dx_context_handle,
4026			struct drm_vmw_fence_rep __user *user_fence_rep,
4027			struct vmw_fence_obj **out_fence, uint32_t flags)
4028{
4029	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4030	struct vmw_fence_obj *fence = NULL;
4031	struct vmw_cmdbuf_header *header;
4032	uint32_t handle = 0;
4033	int ret;
4034	int32_t out_fence_fd = -1;
4035	struct sync_file *sync_file = NULL;
4036	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4037
4038	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4039
4040	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4041		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4042		if (out_fence_fd < 0) {
4043			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4044			return out_fence_fd;
4045		}
4046	}
4047
4048	if (throttle_us) {
4049		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
4050				   throttle_us);
4051
4052		if (ret)
4053			goto out_free_fence_fd;
4054	}
4055
4056	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4057					     kernel_commands, command_size,
4058					     &header);
4059	if (IS_ERR(kernel_commands)) {
4060		ret = PTR_ERR(kernel_commands);
4061		goto out_free_fence_fd;
4062	}
4063
4064	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4065	if (ret) {
4066		ret = -ERESTARTSYS;
4067		goto out_free_header;
4068	}
4069
4070	sw_context->kernel = false;
4071	if (kernel_commands == NULL) {
4072		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4073		if (unlikely(ret != 0))
4074			goto out_unlock;
4075
4076		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4077				     command_size);
4078		if (unlikely(ret != 0)) {
4079			ret = -EFAULT;
4080			VMW_DEBUG_USER("Failed copying commands.\n");
4081			goto out_unlock;
4082		}
4083
4084		kernel_commands = sw_context->cmd_bounce;
4085	} else if (!header) {
4086		sw_context->kernel = true;
4087	}
4088
4089	sw_context->fp = vmw_fpriv(file_priv);
4090	INIT_LIST_HEAD(&sw_context->ctx_list);
4091	sw_context->cur_query_bo = dev_priv->pinned_bo;
4092	sw_context->last_query_ctx = NULL;
4093	sw_context->needs_post_query_barrier = false;
4094	sw_context->dx_ctx_node = NULL;
4095	sw_context->dx_query_mob = NULL;
4096	sw_context->dx_query_ctx = NULL;
4097	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4098	INIT_LIST_HEAD(&sw_context->res_relocations);
4099	INIT_LIST_HEAD(&sw_context->bo_relocations);
4100
4101	if (sw_context->staged_bindings)
4102		vmw_binding_state_reset(sw_context->staged_bindings);
4103
4104	if (!sw_context->res_ht_initialized) {
4105		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4106		if (unlikely(ret != 0))
4107			goto out_unlock;
4108
4109		sw_context->res_ht_initialized = true;
4110	}
4111
4112	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4113	sw_context->ctx = &val_ctx;
4114	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4115	if (unlikely(ret != 0))
4116		goto out_err_nores;
4117
4118	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4119				command_size);
4120	if (unlikely(ret != 0))
4121		goto out_err_nores;
4122
4123	ret = vmw_resources_reserve(sw_context);
4124	if (unlikely(ret != 0))
4125		goto out_err_nores;
4126
4127	ret = vmw_validation_bo_reserve(&val_ctx, true);
4128	if (unlikely(ret != 0))
4129		goto out_err_nores;
4130
4131	ret = vmw_validation_bo_validate(&val_ctx, true);
4132	if (unlikely(ret != 0))
4133		goto out_err;
4134
4135	ret = vmw_validation_res_validate(&val_ctx, true);
4136	if (unlikely(ret != 0))
4137		goto out_err;
4138
4139	vmw_validation_drop_ht(&val_ctx);
4140
4141	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4142	if (unlikely(ret != 0)) {
4143		ret = -ERESTARTSYS;
4144		goto out_err;
4145	}
4146
4147	if (dev_priv->has_mob) {
4148		ret = vmw_rebind_contexts(sw_context);
4149		if (unlikely(ret != 0))
4150			goto out_unlock_binding;
4151	}
4152
4153	if (!header) {
4154		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4155					      command_size, sw_context);
4156	} else {
4157		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4158						sw_context);
4159		header = NULL;
4160	}
4161	mutex_unlock(&dev_priv->binding_mutex);
4162	if (ret)
4163		goto out_err;
4164
4165	vmw_query_bo_switch_commit(dev_priv, sw_context);
4166	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4167					 (user_fence_rep) ? &handle : NULL);
4168	/*
4169	 * This error is harmless, because if fence submission fails,
4170	 * vmw_fifo_send_fence will sync. The error will be propagated to
4171	 * user-space in @fence_rep
4172	 */
 
4173	if (ret != 0)
4174		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4175
4176	vmw_execbuf_bindings_commit(sw_context, false);
4177	vmw_bind_dx_query_mob(sw_context);
4178	vmw_validation_res_unreserve(&val_ctx, false);
4179
4180	vmw_validation_bo_fence(sw_context->ctx, fence);
4181
4182	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4183		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4184
4185	/*
4186	 * If anything fails here, give up trying to export the fence and do a
4187	 * sync since the user mode will not be able to sync the fence itself.
4188	 * This ensures we are still functionally correct.
4189	 */
4190	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4191
4192		sync_file = sync_file_create(&fence->base);
4193		if (!sync_file) {
4194			VMW_DEBUG_USER("Sync file create failed for fence\n");
4195			put_unused_fd(out_fence_fd);
4196			out_fence_fd = -1;
4197
4198			(void) vmw_fence_obj_wait(fence, false, false,
4199						  VMW_FENCE_WAIT_TIMEOUT);
4200		} else {
4201			/* Link the fence with the FD created earlier */
4202			fd_install(out_fence_fd, sync_file->file);
4203		}
4204	}
4205
4206	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4207				    user_fence_rep, fence, handle, out_fence_fd,
4208				    sync_file);
4209
4210	/* Don't unreference when handing fence out */
4211	if (unlikely(out_fence != NULL)) {
4212		*out_fence = fence;
4213		fence = NULL;
4214	} else if (likely(fence != NULL)) {
4215		vmw_fence_obj_unreference(&fence);
4216	}
4217
4218	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4219	mutex_unlock(&dev_priv->cmdbuf_mutex);
4220
4221	/*
4222	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4223	 * in resource destruction paths.
4224	 */
4225	vmw_validation_unref_lists(&val_ctx);
4226
 
 
4227	return 0;
4228
4229out_unlock_binding:
4230	mutex_unlock(&dev_priv->binding_mutex);
4231out_err:
4232	vmw_validation_bo_backoff(&val_ctx);
4233out_err_nores:
4234	vmw_execbuf_bindings_commit(sw_context, true);
4235	vmw_validation_res_unreserve(&val_ctx, true);
4236	vmw_resource_relocations_free(&sw_context->res_relocations);
4237	vmw_free_relocations(sw_context);
4238	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4239		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
 
 
4240out_unlock:
4241	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4242	vmw_validation_drop_ht(&val_ctx);
4243	WARN_ON(!list_empty(&sw_context->ctx_list));
4244	mutex_unlock(&dev_priv->cmdbuf_mutex);
4245
4246	/*
4247	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4248	 * in resource destruction paths.
4249	 */
4250	vmw_validation_unref_lists(&val_ctx);
4251out_free_header:
4252	if (header)
4253		vmw_cmdbuf_header_free(header);
4254out_free_fence_fd:
4255	if (out_fence_fd >= 0)
4256		put_unused_fd(out_fence_fd);
4257
4258	return ret;
4259}
4260
4261/**
4262 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4263 *
4264 * @dev_priv: The device private structure.
4265 *
4266 * This function is called to idle the fifo and unpin the query buffer if the
4267 * normal way to do this hits an error, which should typically be extremely
4268 * rare.
4269 */
4270static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4271{
4272	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4273
4274	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4275	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4276	if (dev_priv->dummy_query_bo_pinned) {
4277		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4278		dev_priv->dummy_query_bo_pinned = false;
4279	}
4280}
4281
4282
4283/**
4284 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4285 * bo.
4286 *
4287 * @dev_priv: The device private structure.
4288 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4289 * query barrier that flushes all queries touching the current buffer pointed to
4290 * by @dev_priv->pinned_bo
4291 *
4292 * This function should be used to unpin the pinned query bo, or as a query
4293 * barrier when we need to make sure that all queries have finished before the
4294 * next fifo command. (For example on hardware context destructions where the
4295 * hardware may otherwise leak unfinished queries).
4296 *
4297 * This function does not return any failure codes, but make attempts to do safe
4298 * unpinning in case of errors.
4299 *
4300 * The function will synchronize on the previous query barrier, and will thus
4301 * not finish until that barrier has executed.
4302 *
4303 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4304 * calling this function.
4305 */
4306void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4307				     struct vmw_fence_obj *fence)
4308{
4309	int ret = 0;
4310	struct vmw_fence_obj *lfence = NULL;
4311	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4312
4313	if (dev_priv->pinned_bo == NULL)
4314		goto out_unlock;
4315
4316	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4317				    false);
4318	if (ret)
4319		goto out_no_reserve;
4320
4321	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4322				    false);
4323	if (ret)
4324		goto out_no_reserve;
4325
4326	ret = vmw_validation_bo_reserve(&val_ctx, false);
4327	if (ret)
4328		goto out_no_reserve;
4329
4330	if (dev_priv->query_cid_valid) {
4331		BUG_ON(fence != NULL);
4332		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4333		if (ret)
4334			goto out_no_emit;
4335		dev_priv->query_cid_valid = false;
4336	}
4337
4338	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4339	if (dev_priv->dummy_query_bo_pinned) {
4340		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4341		dev_priv->dummy_query_bo_pinned = false;
4342	}
4343	if (fence == NULL) {
4344		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4345						  NULL);
4346		fence = lfence;
4347	}
4348	vmw_validation_bo_fence(&val_ctx, fence);
4349	if (lfence != NULL)
4350		vmw_fence_obj_unreference(&lfence);
4351
4352	vmw_validation_unref_lists(&val_ctx);
4353	vmw_bo_unreference(&dev_priv->pinned_bo);
4354
4355out_unlock:
4356	return;
4357out_no_emit:
4358	vmw_validation_bo_backoff(&val_ctx);
4359out_no_reserve:
4360	vmw_validation_unref_lists(&val_ctx);
4361	vmw_execbuf_unpin_panic(dev_priv);
4362	vmw_bo_unreference(&dev_priv->pinned_bo);
4363}
4364
4365/**
4366 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4367 *
4368 * @dev_priv: The device private structure.
4369 *
4370 * This function should be used to unpin the pinned query bo, or as a query
4371 * barrier when we need to make sure that all queries have finished before the
4372 * next fifo command. (For example on hardware context destructions where the
4373 * hardware may otherwise leak unfinished queries).
4374 *
4375 * This function does not return any failure codes, but make attempts to do safe
4376 * unpinning in case of errors.
4377 *
4378 * The function will synchronize on the previous query barrier, and will thus
4379 * not finish until that barrier has executed.
4380 */
4381void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4382{
4383	mutex_lock(&dev_priv->cmdbuf_mutex);
4384	if (dev_priv->query_cid_valid)
4385		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4386	mutex_unlock(&dev_priv->cmdbuf_mutex);
4387}
4388
4389int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4390		      struct drm_file *file_priv)
4391{
4392	struct vmw_private *dev_priv = vmw_priv(dev);
4393	struct drm_vmw_execbuf_arg *arg = data;
4394	int ret;
4395	struct dma_fence *in_fence = NULL;
4396
4397	/*
4398	 * Extend the ioctl argument while maintaining backwards compatibility:
4399	 * We take different code paths depending on the value of arg->version.
4400	 *
4401	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4402	 */
4403	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4404		     arg->version == 0)) {
4405		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4406		return -EINVAL;
4407	}
4408
4409	switch (arg->version) {
4410	case 1:
4411		/* For v1 core DRM have extended + zeropadded the data */
4412		arg->context_handle = (uint32_t) -1;
4413		break;
4414	case 2:
4415	default:
4416		/* For v2 and later core DRM would have correctly copied it */
4417		break;
4418	}
4419
4420	/* If imported a fence FD from elsewhere, then wait on it */
4421	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4422		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4423
4424		if (!in_fence) {
4425			VMW_DEBUG_USER("Cannot get imported fence\n");
4426			return -EINVAL;
4427		}
4428
4429		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4430		if (ret)
4431			goto out;
4432	}
4433
4434	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4435	if (unlikely(ret != 0))
4436		return ret;
4437
4438	ret = vmw_execbuf_process(file_priv, dev_priv,
4439				  (void __user *)(unsigned long)arg->commands,
4440				  NULL, arg->command_size, arg->throttle_us,
4441				  arg->context_handle,
4442				  (void __user *)(unsigned long)arg->fence_rep,
4443				  NULL, arg->flags);
4444
4445	ttm_read_unlock(&dev_priv->reservation_sem);
4446	if (unlikely(ret != 0))
4447		goto out;
4448
4449	vmw_kms_cursor_post_execbuf(dev_priv);
4450
4451out:
4452	if (in_fence)
4453		dma_fence_put(in_fence);
4454	return ret;
4455}