Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "vmwgfx_drv.h"
 29#include "vmwgfx_reg.h"
 30#include "ttm/ttm_bo_api.h"
 31#include "ttm/ttm_placement.h"
 32
 33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 34			   struct vmw_sw_context *sw_context,
 35			   SVGA3dCmdHeader *header)
 36{
 37	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
 38}
 39
 40static int vmw_cmd_ok(struct vmw_private *dev_priv,
 41		      struct vmw_sw_context *sw_context,
 42		      SVGA3dCmdHeader *header)
 43{
 44	return 0;
 45}
 46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 48			     struct vmw_sw_context *sw_context,
 49			     SVGA3dCmdHeader *header)
 50{
 
 
 51	struct vmw_cid_cmd {
 52		SVGA3dCmdHeader header;
 53		__le32 cid;
 54	} *cmd;
 55	int ret;
 56
 57	cmd = container_of(header, struct vmw_cid_cmd, header);
 58	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
 59		return 0;
 60
 61	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
 
 62	if (unlikely(ret != 0)) {
 63		DRM_ERROR("Could not find or use context %u\n",
 64			  (unsigned) cmd->cid);
 65		return ret;
 66	}
 67
 68	sw_context->last_cid = cmd->cid;
 69	sw_context->cid_valid = true;
 
 
 70
 71	return 0;
 72}
 73
 74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 75			     struct vmw_sw_context *sw_context,
 76			     uint32_t *sid)
 77{
 
 
 
 
 78	if (*sid == SVGA3D_INVALID_ID)
 79		return 0;
 80
 81	if (unlikely((!sw_context->sid_valid  ||
 82		      *sid != sw_context->last_sid))) {
 83		int real_id;
 84		int ret = vmw_surface_check(dev_priv, sw_context->tfile,
 85					    *sid, &real_id);
 86
 87		if (unlikely(ret != 0)) {
 88			DRM_ERROR("Could ot find or use surface 0x%08x "
 89				  "address 0x%08lx\n",
 90				  (unsigned int) *sid,
 91				  (unsigned long) sid);
 92			return ret;
 93		}
 
 
 
 94
 95		sw_context->last_sid = *sid;
 96		sw_context->sid_valid = true;
 97		*sid = real_id;
 98		sw_context->sid_translation = real_id;
 99	} else
100		*sid = sw_context->sid_translation;
 
 
 
 
 
 
 
 
 
101
102	return 0;
103}
104
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107					   struct vmw_sw_context *sw_context,
108					   SVGA3dCmdHeader *header)
109{
110	struct vmw_sid_cmd {
111		SVGA3dCmdHeader header;
112		SVGA3dCmdSetRenderTarget body;
113	} *cmd;
114	int ret;
115
116	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117	if (unlikely(ret != 0))
118		return ret;
119
120	cmd = container_of(header, struct vmw_sid_cmd, header);
121	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122	return ret;
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126				      struct vmw_sw_context *sw_context,
127				      SVGA3dCmdHeader *header)
128{
129	struct vmw_sid_cmd {
130		SVGA3dCmdHeader header;
131		SVGA3dCmdSurfaceCopy body;
132	} *cmd;
133	int ret;
134
135	cmd = container_of(header, struct vmw_sid_cmd, header);
136	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
137	if (unlikely(ret != 0))
138		return ret;
139	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143				     struct vmw_sw_context *sw_context,
144				     SVGA3dCmdHeader *header)
145{
146	struct vmw_sid_cmd {
147		SVGA3dCmdHeader header;
148		SVGA3dCmdSurfaceStretchBlt body;
149	} *cmd;
150	int ret;
151
152	cmd = container_of(header, struct vmw_sid_cmd, header);
153	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
154	if (unlikely(ret != 0))
155		return ret;
156	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160					 struct vmw_sw_context *sw_context,
161					 SVGA3dCmdHeader *header)
162{
163	struct vmw_sid_cmd {
164		SVGA3dCmdHeader header;
165		SVGA3dCmdBlitSurfaceToScreen body;
166	} *cmd;
167
168	cmd = container_of(header, struct vmw_sid_cmd, header);
 
 
 
 
 
 
169	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173				 struct vmw_sw_context *sw_context,
174				 SVGA3dCmdHeader *header)
175{
176	struct vmw_sid_cmd {
177		SVGA3dCmdHeader header;
178		SVGA3dCmdPresent body;
179	} *cmd;
180
 
181	cmd = container_of(header, struct vmw_sid_cmd, header);
 
 
 
 
 
 
182	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183}
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186				   struct vmw_sw_context *sw_context,
187				   SVGAGuestPtr *ptr,
188				   struct vmw_dma_buffer **vmw_bo_p)
189{
190	struct vmw_dma_buffer *vmw_bo = NULL;
191	struct ttm_buffer_object *bo;
192	uint32_t handle = ptr->gmrId;
193	struct vmw_relocation *reloc;
194	uint32_t cur_validate_node;
195	struct ttm_validate_buffer *val_buf;
196	int ret;
197
198	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
199	if (unlikely(ret != 0)) {
200		DRM_ERROR("Could not find or use GMR region.\n");
201		return -EINVAL;
202	}
203	bo = &vmw_bo->base;
204
205	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
206		DRM_ERROR("Max number relocations per submission"
207			  " exceeded\n");
208		ret = -EINVAL;
209		goto out_no_reloc;
210	}
211
212	reloc = &sw_context->relocs[sw_context->cur_reloc++];
213	reloc->location = ptr;
214
215	cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216	if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217		DRM_ERROR("Max number of DMA buffers per submission"
218			  " exceeded.\n");
219		ret = -EINVAL;
220		goto out_no_reloc;
221	}
222
223	reloc->index = cur_validate_node;
224	if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225		val_buf = &sw_context->val_bufs[cur_validate_node];
226		val_buf->bo = ttm_bo_reference(bo);
227		val_buf->new_sync_obj_arg = (void *) dev_priv;
228		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229		++sw_context->cur_val_buf;
230	}
231	*vmw_bo_p = vmw_bo;
232	return 0;
233
234out_no_reloc:
235	vmw_dmabuf_unreference(&vmw_bo);
236	vmw_bo_p = NULL;
237	return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241			     struct vmw_sw_context *sw_context,
242			     SVGA3dCmdHeader *header)
243{
244	struct vmw_dma_buffer *vmw_bo;
245	struct vmw_query_cmd {
246		SVGA3dCmdHeader header;
247		SVGA3dCmdEndQuery q;
248	} *cmd;
249	int ret;
250
251	cmd = container_of(header, struct vmw_query_cmd, header);
252	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253	if (unlikely(ret != 0))
254		return ret;
255
256	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257				      &cmd->q.guestResult,
258				      &vmw_bo);
259	if (unlikely(ret != 0))
260		return ret;
261
 
 
 
262	vmw_dmabuf_unreference(&vmw_bo);
263	return 0;
264}
265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267			      struct vmw_sw_context *sw_context,
268			      SVGA3dCmdHeader *header)
269{
270	struct vmw_dma_buffer *vmw_bo;
271	struct vmw_query_cmd {
272		SVGA3dCmdHeader header;
273		SVGA3dCmdWaitForQuery q;
274	} *cmd;
275	int ret;
 
276
277	cmd = container_of(header, struct vmw_query_cmd, header);
278	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279	if (unlikely(ret != 0))
280		return ret;
281
282	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283				      &cmd->q.guestResult,
284				      &vmw_bo);
285	if (unlikely(ret != 0))
286		return ret;
287
288	vmw_dmabuf_unreference(&vmw_bo);
 
 
 
 
 
 
 
 
 
 
289	return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294		       struct vmw_sw_context *sw_context,
295		       SVGA3dCmdHeader *header)
296{
297	struct vmw_dma_buffer *vmw_bo = NULL;
298	struct ttm_buffer_object *bo;
299	struct vmw_surface *srf = NULL;
300	struct vmw_dma_cmd {
301		SVGA3dCmdHeader header;
302		SVGA3dCmdSurfaceDMA dma;
303	} *cmd;
304	int ret;
 
305
306	cmd = container_of(header, struct vmw_dma_cmd, header);
307	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308				      &cmd->dma.guest.ptr,
309				      &vmw_bo);
310	if (unlikely(ret != 0))
311		return ret;
312
313	bo = &vmw_bo->base;
314	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
315					     cmd->dma.host.sid, &srf);
316	if (ret) {
317		DRM_ERROR("could not find surface\n");
318		goto out_no_reloc;
319	}
320
321	/**
 
 
 
 
 
 
 
322	 * Patch command stream with device SID.
323	 */
324
325	cmd->dma.host.sid = srf->res.id;
326	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
327	/**
328	 * FIXME: May deadlock here when called from the
329	 * command parsing code.
330	 */
331	vmw_surface_unreference(&srf);
332
 
 
 
 
 
 
 
 
 
333out_no_reloc:
334	vmw_dmabuf_unreference(&vmw_bo);
335	return ret;
336}
337
338static int vmw_cmd_draw(struct vmw_private *dev_priv,
339			struct vmw_sw_context *sw_context,
340			SVGA3dCmdHeader *header)
341{
342	struct vmw_draw_cmd {
343		SVGA3dCmdHeader header;
344		SVGA3dCmdDrawPrimitives body;
345	} *cmd;
346	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
347		(unsigned long)header + sizeof(*cmd));
348	SVGA3dPrimitiveRange *range;
349	uint32_t i;
350	uint32_t maxnum;
351	int ret;
352
353	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
354	if (unlikely(ret != 0))
355		return ret;
356
357	cmd = container_of(header, struct vmw_draw_cmd, header);
358	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
359
360	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
361		DRM_ERROR("Illegal number of vertex declarations.\n");
362		return -EINVAL;
363	}
364
365	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
366		ret = vmw_cmd_sid_check(dev_priv, sw_context,
367					&decl->array.surfaceId);
368		if (unlikely(ret != 0))
369			return ret;
370	}
371
372	maxnum = (header->size - sizeof(cmd->body) -
373		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
374	if (unlikely(cmd->body.numRanges > maxnum)) {
375		DRM_ERROR("Illegal number of index ranges.\n");
376		return -EINVAL;
377	}
378
379	range = (SVGA3dPrimitiveRange *) decl;
380	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
381		ret = vmw_cmd_sid_check(dev_priv, sw_context,
382					&range->indexArray.surfaceId);
383		if (unlikely(ret != 0))
384			return ret;
385	}
386	return 0;
387}
388
389
390static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
391			     struct vmw_sw_context *sw_context,
392			     SVGA3dCmdHeader *header)
393{
394	struct vmw_tex_state_cmd {
395		SVGA3dCmdHeader header;
396		SVGA3dCmdSetTextureState state;
397	};
398
399	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
400	  ((unsigned long) header + header->size + sizeof(header));
401	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
402		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
403	int ret;
404
405	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
406	if (unlikely(ret != 0))
407		return ret;
408
409	for (; cur_state < last_state; ++cur_state) {
410		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
411			continue;
412
413		ret = vmw_cmd_sid_check(dev_priv, sw_context,
414					&cur_state->value);
415		if (unlikely(ret != 0))
416			return ret;
417	}
418
419	return 0;
420}
421
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
422
423typedef int (*vmw_cmd_func) (struct vmw_private *,
424			     struct vmw_sw_context *,
425			     SVGA3dCmdHeader *);
426
427#define VMW_CMD_DEF(cmd, func) \
428	[cmd - SVGA_3D_CMD_BASE] = func
429
430static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
431	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
432	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
433	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
434	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
435	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
436	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
437	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
438	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
439	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
440	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
441	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
442		    &vmw_cmd_set_render_target_check),
443	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
444	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
445	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
446	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
447	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
448	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
449	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
450	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
451	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
452	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
453	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
454	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
455	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
456	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
457	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
458	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
459	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
460	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
461	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
462		    &vmw_cmd_blt_surf_screen_check)
463};
464
465static int vmw_cmd_check(struct vmw_private *dev_priv,
466			 struct vmw_sw_context *sw_context,
467			 void *buf, uint32_t *size)
468{
469	uint32_t cmd_id;
470	uint32_t size_remaining = *size;
471	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472	int ret;
473
474	cmd_id = ((uint32_t *)buf)[0];
475	if (cmd_id == SVGA_CMD_UPDATE) {
476		*size = 5 << 2;
477		return 0;
478	}
479
480	cmd_id = le32_to_cpu(header->id);
481	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
482
483	cmd_id -= SVGA_3D_CMD_BASE;
484	if (unlikely(*size > size_remaining))
485		goto out_err;
486
487	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
488		goto out_err;
489
490	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
491	if (unlikely(ret != 0))
492		goto out_err;
493
494	return 0;
495out_err:
496	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
497		  cmd_id + SVGA_3D_CMD_BASE);
498	return -EINVAL;
499}
500
501static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502			     struct vmw_sw_context *sw_context,
503			     void *buf, uint32_t size)
 
504{
505	int32_t cur_size = size;
506	int ret;
507
508	while (cur_size > 0) {
509		size = cur_size;
510		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
511		if (unlikely(ret != 0))
512			return ret;
513		buf = (void *)((unsigned long) buf + size);
514		cur_size -= size;
515	}
516
517	if (unlikely(cur_size != 0)) {
518		DRM_ERROR("Command verifier out of sync.\n");
519		return -EINVAL;
520	}
521
522	return 0;
523}
524
525static void vmw_free_relocations(struct vmw_sw_context *sw_context)
526{
527	sw_context->cur_reloc = 0;
528}
529
530static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
531{
532	uint32_t i;
533	struct vmw_relocation *reloc;
534	struct ttm_validate_buffer *validate;
535	struct ttm_buffer_object *bo;
536
537	for (i = 0; i < sw_context->cur_reloc; ++i) {
538		reloc = &sw_context->relocs[i];
539		validate = &sw_context->val_bufs[reloc->index];
540		bo = validate->bo;
541		if (bo->mem.mem_type == TTM_PL_VRAM) {
542			reloc->location->offset += bo->offset;
543			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
544		} else
545			reloc->location->gmrId = bo->mem.start;
546	}
547	vmw_free_relocations(sw_context);
548}
549
550static void vmw_clear_validations(struct vmw_sw_context *sw_context)
551{
552	struct ttm_validate_buffer *entry, *next;
 
553
 
 
 
554	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
555				 head) {
556		list_del(&entry->head);
557		vmw_dmabuf_validate_clear(entry->bo);
558		ttm_bo_unref(&entry->bo);
559		sw_context->cur_val_buf--;
560	}
561	BUG_ON(sw_context->cur_val_buf != 0);
 
 
 
 
 
 
 
 
 
 
562}
563
564static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
565				      struct ttm_buffer_object *bo)
566{
567	int ret;
568
 
 
 
 
 
 
 
 
 
 
569	/**
570	 * Put BO in VRAM if there is space, otherwise as a GMR.
571	 * If there is no space in VRAM and GMR ids are all used up,
572	 * start evicting GMRs to make room. If the DMA buffer can't be
573	 * used as a GMR, this will return -ENOMEM.
574	 */
575
576	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
577	if (likely(ret == 0 || ret == -ERESTARTSYS))
578		return ret;
579
580	/**
581	 * If that failed, try VRAM again, this time evicting
582	 * previous contents.
583	 */
584
585	DRM_INFO("Falling through to VRAM.\n");
586	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
587	return ret;
588}
589
590
591static int vmw_validate_buffers(struct vmw_private *dev_priv,
592				struct vmw_sw_context *sw_context)
593{
594	struct ttm_validate_buffer *entry;
595	int ret;
596
597	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
598		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
599		if (unlikely(ret != 0))
600			return ret;
601	}
602	return 0;
603}
604
605int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
606		      struct drm_file *file_priv)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607{
608	struct vmw_private *dev_priv = vmw_priv(dev);
609	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
610	struct drm_vmw_fence_rep fence_rep;
611	struct drm_vmw_fence_rep __user *user_fence_rep;
612	int ret;
613	void *user_cmd;
614	void *cmd;
615	uint32_t sequence;
616	struct vmw_sw_context *sw_context = &dev_priv->ctx;
617	struct vmw_master *vmaster = vmw_master(file_priv->master);
618
619	ret = ttm_read_lock(&vmaster->lock, true);
620	if (unlikely(ret != 0))
621		return ret;
622
623	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
624	if (unlikely(ret != 0)) {
625		ret = -ERESTARTSYS;
626		goto out_no_cmd_mutex;
627	}
628
629	cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
630	if (unlikely(cmd == NULL)) {
631		DRM_ERROR("Failed reserving fifo space for commands.\n");
632		ret = -ENOMEM;
633		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
634	}
635
636	user_cmd = (void __user *)(unsigned long)arg->commands;
637	ret = copy_from_user(cmd, user_cmd, arg->command_size);
638
639	if (unlikely(ret != 0)) {
640		ret = -EFAULT;
641		DRM_ERROR("Failed copying commands.\n");
642		goto out_commit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
643	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
644
645	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
646	sw_context->cid_valid = false;
647	sw_context->sid_valid = false;
648	sw_context->cur_reloc = 0;
649	sw_context->cur_val_buf = 0;
 
 
 
 
 
 
650
651	INIT_LIST_HEAD(&sw_context->validate_nodes);
652
653	ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
 
654	if (unlikely(ret != 0))
655		goto out_err;
 
656	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
657	if (unlikely(ret != 0))
658		goto out_err;
659
660	ret = vmw_validate_buffers(dev_priv, sw_context);
661	if (unlikely(ret != 0))
662		goto out_err;
663
664	vmw_apply_relocations(sw_context);
665
666	if (arg->throttle_us) {
667		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
668				   arg->throttle_us);
669
670		if (unlikely(ret != 0))
671			goto out_err;
672	}
673
674	vmw_fifo_commit(dev_priv, arg->command_size);
675
676	ret = vmw_fifo_send_fence(dev_priv, &sequence);
 
 
 
677
678	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
679				    (void *)(unsigned long) sequence);
680	vmw_clear_validations(sw_context);
681	mutex_unlock(&dev_priv->cmdbuf_mutex);
682
 
 
 
 
683	/*
684	 * This error is harmless, because if fence submission fails,
685	 * vmw_fifo_send_fence will sync.
 
686	 */
687
688	if (ret != 0)
689		DRM_ERROR("Fence submission error. Syncing.\n");
690
691	fence_rep.error = ret;
692	fence_rep.fence_seq = (uint64_t) sequence;
693	fence_rep.pad64 = 0;
694
695	user_fence_rep = (struct drm_vmw_fence_rep __user *)
696	    (unsigned long)arg->fence_rep;
697
698	/*
699	 * copy_to_user errors will be detected by user space not
700	 * seeing fence_rep::error filled in.
701	 */
702
703	ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
 
 
 
 
 
 
704
705	vmw_kms_cursor_post_execbuf(dev_priv);
706	ttm_read_unlock(&vmaster->lock);
707	return 0;
 
708out_err:
709	vmw_free_relocations(sw_context);
 
 
710	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
711	vmw_clear_validations(sw_context);
712out_commit:
713	vmw_fifo_commit(dev_priv, 0);
714out_unlock:
715	mutex_unlock(&dev_priv->cmdbuf_mutex);
716out_no_cmd_mutex:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
717	ttm_read_unlock(&vmaster->lock);
718	return ret;
719}
v3.5.6
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_reg.h"
  30#include "ttm/ttm_bo_api.h"
  31#include "ttm/ttm_placement.h"
  32
  33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
  34			   struct vmw_sw_context *sw_context,
  35			   SVGA3dCmdHeader *header)
  36{
  37	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
  38}
  39
  40static int vmw_cmd_ok(struct vmw_private *dev_priv,
  41		      struct vmw_sw_context *sw_context,
  42		      SVGA3dCmdHeader *header)
  43{
  44	return 0;
  45}
  46
  47static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
  48					  struct vmw_resource **p_res)
  49{
  50	struct vmw_resource *res = *p_res;
  51
  52	if (list_empty(&res->validate_head)) {
  53		list_add_tail(&res->validate_head, &sw_context->resource_list);
  54		*p_res = NULL;
  55	} else
  56		vmw_resource_unreference(p_res);
  57}
  58
  59/**
  60 * vmw_bo_to_validate_list - add a bo to a validate list
  61 *
  62 * @sw_context: The software context used for this command submission batch.
  63 * @bo: The buffer object to add.
  64 * @fence_flags: Fence flags to be or'ed with any other fence flags for
  65 * this buffer on this submission batch.
  66 * @p_val_node: If non-NULL Will be updated with the validate node number
  67 * on return.
  68 *
  69 * Returns -EINVAL if the limit of number of buffer objects per command
  70 * submission is reached.
  71 */
  72static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
  73				   struct ttm_buffer_object *bo,
  74				   uint32_t fence_flags,
  75				   uint32_t *p_val_node)
  76{
  77	uint32_t val_node;
  78	struct ttm_validate_buffer *val_buf;
  79
  80	val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
  81
  82	if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
  83		DRM_ERROR("Max number of DMA buffers per submission"
  84			  " exceeded.\n");
  85		return -EINVAL;
  86	}
  87
  88	val_buf = &sw_context->val_bufs[val_node];
  89	if (unlikely(val_node == sw_context->cur_val_buf)) {
  90		val_buf->new_sync_obj_arg = NULL;
  91		val_buf->bo = ttm_bo_reference(bo);
  92		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
  93		++sw_context->cur_val_buf;
  94	}
  95
  96	val_buf->new_sync_obj_arg = (void *)
  97		((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
  98	sw_context->fence_flags |= fence_flags;
  99
 100	if (p_val_node)
 101		*p_val_node = val_node;
 102
 103	return 0;
 104}
 105
 106static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 107			     struct vmw_sw_context *sw_context,
 108			     SVGA3dCmdHeader *header)
 109{
 110	struct vmw_resource *ctx;
 111
 112	struct vmw_cid_cmd {
 113		SVGA3dCmdHeader header;
 114		__le32 cid;
 115	} *cmd;
 116	int ret;
 117
 118	cmd = container_of(header, struct vmw_cid_cmd, header);
 119	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
 120		return 0;
 121
 122	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
 123				&ctx);
 124	if (unlikely(ret != 0)) {
 125		DRM_ERROR("Could not find or use context %u\n",
 126			  (unsigned) cmd->cid);
 127		return ret;
 128	}
 129
 130	sw_context->last_cid = cmd->cid;
 131	sw_context->cid_valid = true;
 132	sw_context->cur_ctx = ctx;
 133	vmw_resource_to_validate_list(sw_context, &ctx);
 134
 135	return 0;
 136}
 137
 138static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 139			     struct vmw_sw_context *sw_context,
 140			     uint32_t *sid)
 141{
 142	struct vmw_surface *srf;
 143	int ret;
 144	struct vmw_resource *res;
 145
 146	if (*sid == SVGA3D_INVALID_ID)
 147		return 0;
 148
 149	if (likely((sw_context->sid_valid  &&
 150		      *sid == sw_context->last_sid))) {
 151		*sid = sw_context->sid_translation;
 152		return 0;
 153	}
 154
 155	ret = vmw_user_surface_lookup_handle(dev_priv,
 156					     sw_context->tfile,
 157					     *sid, &srf);
 158	if (unlikely(ret != 0)) {
 159		DRM_ERROR("Could ot find or use surface 0x%08x "
 160			  "address 0x%08lx\n",
 161			  (unsigned int) *sid,
 162			  (unsigned long) sid);
 163		return ret;
 164	}
 165
 166	ret = vmw_surface_validate(dev_priv, srf);
 167	if (unlikely(ret != 0)) {
 168		if (ret != -ERESTARTSYS)
 169			DRM_ERROR("Could not validate surface.\n");
 170		vmw_surface_unreference(&srf);
 171		return ret;
 172	}
 173
 174	sw_context->last_sid = *sid;
 175	sw_context->sid_valid = true;
 176	sw_context->sid_translation = srf->res.id;
 177	*sid = sw_context->sid_translation;
 178
 179	res = &srf->res;
 180	vmw_resource_to_validate_list(sw_context, &res);
 181
 182	return 0;
 183}
 184
 185
 186static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 187					   struct vmw_sw_context *sw_context,
 188					   SVGA3dCmdHeader *header)
 189{
 190	struct vmw_sid_cmd {
 191		SVGA3dCmdHeader header;
 192		SVGA3dCmdSetRenderTarget body;
 193	} *cmd;
 194	int ret;
 195
 196	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 197	if (unlikely(ret != 0))
 198		return ret;
 199
 200	cmd = container_of(header, struct vmw_sid_cmd, header);
 201	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
 202	return ret;
 203}
 204
 205static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 206				      struct vmw_sw_context *sw_context,
 207				      SVGA3dCmdHeader *header)
 208{
 209	struct vmw_sid_cmd {
 210		SVGA3dCmdHeader header;
 211		SVGA3dCmdSurfaceCopy body;
 212	} *cmd;
 213	int ret;
 214
 215	cmd = container_of(header, struct vmw_sid_cmd, header);
 216	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
 217	if (unlikely(ret != 0))
 218		return ret;
 219	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 220}
 221
 222static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 223				     struct vmw_sw_context *sw_context,
 224				     SVGA3dCmdHeader *header)
 225{
 226	struct vmw_sid_cmd {
 227		SVGA3dCmdHeader header;
 228		SVGA3dCmdSurfaceStretchBlt body;
 229	} *cmd;
 230	int ret;
 231
 232	cmd = container_of(header, struct vmw_sid_cmd, header);
 233	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
 234	if (unlikely(ret != 0))
 235		return ret;
 236	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 237}
 238
 239static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 240					 struct vmw_sw_context *sw_context,
 241					 SVGA3dCmdHeader *header)
 242{
 243	struct vmw_sid_cmd {
 244		SVGA3dCmdHeader header;
 245		SVGA3dCmdBlitSurfaceToScreen body;
 246	} *cmd;
 247
 248	cmd = container_of(header, struct vmw_sid_cmd, header);
 249
 250	if (unlikely(!sw_context->kernel)) {
 251		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
 252		return -EPERM;
 253	}
 254
 255	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
 256}
 257
 258static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 259				 struct vmw_sw_context *sw_context,
 260				 SVGA3dCmdHeader *header)
 261{
 262	struct vmw_sid_cmd {
 263		SVGA3dCmdHeader header;
 264		SVGA3dCmdPresent body;
 265	} *cmd;
 266
 267
 268	cmd = container_of(header, struct vmw_sid_cmd, header);
 269
 270	if (unlikely(!sw_context->kernel)) {
 271		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
 272		return -EPERM;
 273	}
 274
 275	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 276}
 277
 278/**
 279 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
 280 *
 281 * @dev_priv: The device private structure.
 282 * @cid: The hardware context for the next query.
 283 * @new_query_bo: The new buffer holding query results.
 284 * @sw_context: The software context used for this command submission.
 285 *
 286 * This function checks whether @new_query_bo is suitable for holding
 287 * query results, and if another buffer currently is pinned for query
 288 * results. If so, the function prepares the state of @sw_context for
 289 * switching pinned buffers after successful submission of the current
 290 * command batch. It also checks whether we're using a new query context.
 291 * In that case, it makes sure we emit a query barrier for the old
 292 * context before the current query buffer is fenced.
 293 */
 294static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 295				       uint32_t cid,
 296				       struct ttm_buffer_object *new_query_bo,
 297				       struct vmw_sw_context *sw_context)
 298{
 299	int ret;
 300	bool add_cid = false;
 301	uint32_t cid_to_add;
 302
 303	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 304
 305		if (unlikely(new_query_bo->num_pages > 4)) {
 306			DRM_ERROR("Query buffer too large.\n");
 307			return -EINVAL;
 308		}
 309
 310		if (unlikely(sw_context->cur_query_bo != NULL)) {
 311			BUG_ON(!sw_context->query_cid_valid);
 312			add_cid = true;
 313			cid_to_add = sw_context->cur_query_cid;
 314			ret = vmw_bo_to_validate_list(sw_context,
 315						      sw_context->cur_query_bo,
 316						      DRM_VMW_FENCE_FLAG_EXEC,
 317						      NULL);
 318			if (unlikely(ret != 0))
 319				return ret;
 320		}
 321		sw_context->cur_query_bo = new_query_bo;
 322
 323		ret = vmw_bo_to_validate_list(sw_context,
 324					      dev_priv->dummy_query_bo,
 325					      DRM_VMW_FENCE_FLAG_EXEC,
 326					      NULL);
 327		if (unlikely(ret != 0))
 328			return ret;
 329
 330	}
 331
 332	if (unlikely(cid != sw_context->cur_query_cid &&
 333		     sw_context->query_cid_valid)) {
 334		add_cid = true;
 335		cid_to_add = sw_context->cur_query_cid;
 336	}
 337
 338	sw_context->cur_query_cid = cid;
 339	sw_context->query_cid_valid = true;
 340
 341	if (add_cid) {
 342		struct vmw_resource *ctx = sw_context->cur_ctx;
 343
 344		if (list_empty(&ctx->query_head))
 345			list_add_tail(&ctx->query_head,
 346				      &sw_context->query_list);
 347		ret = vmw_bo_to_validate_list(sw_context,
 348					      dev_priv->dummy_query_bo,
 349					      DRM_VMW_FENCE_FLAG_EXEC,
 350					      NULL);
 351		if (unlikely(ret != 0))
 352			return ret;
 353	}
 354	return 0;
 355}
 356
 357
 358/**
 359 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
 360 *
 361 * @dev_priv: The device private structure.
 362 * @sw_context: The software context used for this command submission batch.
 363 *
 364 * This function will check if we're switching query buffers, and will then,
 365 * if no other query waits are issued this command submission batch,
 366 * issue a dummy occlusion query wait used as a query barrier. When the fence
 367 * object following that query wait has signaled, we are sure that all
 368 * preseding queries have finished, and the old query buffer can be unpinned.
 369 * However, since both the new query buffer and the old one are fenced with
 370 * that fence, we can do an asynchronus unpin now, and be sure that the
 371 * old query buffer won't be moved until the fence has signaled.
 372 *
 373 * As mentioned above, both the new - and old query buffers need to be fenced
 374 * using a sequence emitted *after* calling this function.
 375 */
 376static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 377				     struct vmw_sw_context *sw_context)
 378{
 379
 380	struct vmw_resource *ctx, *next_ctx;
 381	int ret;
 382
 383	/*
 384	 * The validate list should still hold references to all
 385	 * contexts here.
 386	 */
 387
 388	list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
 389				 query_head) {
 390		list_del_init(&ctx->query_head);
 391
 392		BUG_ON(list_empty(&ctx->validate_head));
 393
 394		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 395
 396		if (unlikely(ret != 0))
 397			DRM_ERROR("Out of fifo space for dummy query.\n");
 398	}
 399
 400	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 401		if (dev_priv->pinned_bo) {
 402			vmw_bo_pin(dev_priv->pinned_bo, false);
 403			ttm_bo_unref(&dev_priv->pinned_bo);
 404		}
 405
 406		vmw_bo_pin(sw_context->cur_query_bo, true);
 407
 408		/*
 409		 * We pin also the dummy_query_bo buffer so that we
 410		 * don't need to validate it when emitting
 411		 * dummy queries in context destroy paths.
 412		 */
 413
 414		vmw_bo_pin(dev_priv->dummy_query_bo, true);
 415		dev_priv->dummy_query_bo_pinned = true;
 416
 417		dev_priv->query_cid = sw_context->cur_query_cid;
 418		dev_priv->pinned_bo =
 419			ttm_bo_reference(sw_context->cur_query_bo);
 420	}
 421}
 422
 423/**
 424 * vmw_query_switch_backoff - clear query barrier list
 425 * @sw_context: The sw context used for this submission batch.
 426 *
 427 * This function is used as part of an error path, where a previously
 428 * set up list of query barriers needs to be cleared.
 429 *
 430 */
 431static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
 432{
 433	struct list_head *list, *next;
 434
 435	list_for_each_safe(list, next, &sw_context->query_list) {
 436		list_del_init(list);
 437	}
 438}
 439
 440static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 441				   struct vmw_sw_context *sw_context,
 442				   SVGAGuestPtr *ptr,
 443				   struct vmw_dma_buffer **vmw_bo_p)
 444{
 445	struct vmw_dma_buffer *vmw_bo = NULL;
 446	struct ttm_buffer_object *bo;
 447	uint32_t handle = ptr->gmrId;
 448	struct vmw_relocation *reloc;
 
 
 449	int ret;
 450
 451	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
 452	if (unlikely(ret != 0)) {
 453		DRM_ERROR("Could not find or use GMR region.\n");
 454		return -EINVAL;
 455	}
 456	bo = &vmw_bo->base;
 457
 458	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 459		DRM_ERROR("Max number relocations per submission"
 460			  " exceeded\n");
 461		ret = -EINVAL;
 462		goto out_no_reloc;
 463	}
 464
 465	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 466	reloc->location = ptr;
 467
 468	ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
 469				      &reloc->index);
 470	if (unlikely(ret != 0))
 
 
 471		goto out_no_reloc;
 
 472
 
 
 
 
 
 
 
 
 473	*vmw_bo_p = vmw_bo;
 474	return 0;
 475
 476out_no_reloc:
 477	vmw_dmabuf_unreference(&vmw_bo);
 478	vmw_bo_p = NULL;
 479	return ret;
 480}
 481
 482static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 483			     struct vmw_sw_context *sw_context,
 484			     SVGA3dCmdHeader *header)
 485{
 486	struct vmw_dma_buffer *vmw_bo;
 487	struct vmw_query_cmd {
 488		SVGA3dCmdHeader header;
 489		SVGA3dCmdEndQuery q;
 490	} *cmd;
 491	int ret;
 492
 493	cmd = container_of(header, struct vmw_query_cmd, header);
 494	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 495	if (unlikely(ret != 0))
 496		return ret;
 497
 498	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 499				      &cmd->q.guestResult,
 500				      &vmw_bo);
 501	if (unlikely(ret != 0))
 502		return ret;
 503
 504	ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
 505					  &vmw_bo->base, sw_context);
 506
 507	vmw_dmabuf_unreference(&vmw_bo);
 508	return ret;
 509}
 510
 511static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 512			      struct vmw_sw_context *sw_context,
 513			      SVGA3dCmdHeader *header)
 514{
 515	struct vmw_dma_buffer *vmw_bo;
 516	struct vmw_query_cmd {
 517		SVGA3dCmdHeader header;
 518		SVGA3dCmdWaitForQuery q;
 519	} *cmd;
 520	int ret;
 521	struct vmw_resource *ctx;
 522
 523	cmd = container_of(header, struct vmw_query_cmd, header);
 524	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 525	if (unlikely(ret != 0))
 526		return ret;
 527
 528	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 529				      &cmd->q.guestResult,
 530				      &vmw_bo);
 531	if (unlikely(ret != 0))
 532		return ret;
 533
 534	vmw_dmabuf_unreference(&vmw_bo);
 535
 536	/*
 537	 * This wait will act as a barrier for previous waits for this
 538	 * context.
 539	 */
 540
 541	ctx = sw_context->cur_ctx;
 542	if (!list_empty(&ctx->query_head))
 543		list_del_init(&ctx->query_head);
 544
 545	return 0;
 546}
 547
 
 548static int vmw_cmd_dma(struct vmw_private *dev_priv,
 549		       struct vmw_sw_context *sw_context,
 550		       SVGA3dCmdHeader *header)
 551{
 552	struct vmw_dma_buffer *vmw_bo = NULL;
 553	struct ttm_buffer_object *bo;
 554	struct vmw_surface *srf = NULL;
 555	struct vmw_dma_cmd {
 556		SVGA3dCmdHeader header;
 557		SVGA3dCmdSurfaceDMA dma;
 558	} *cmd;
 559	int ret;
 560	struct vmw_resource *res;
 561
 562	cmd = container_of(header, struct vmw_dma_cmd, header);
 563	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 564				      &cmd->dma.guest.ptr,
 565				      &vmw_bo);
 566	if (unlikely(ret != 0))
 567		return ret;
 568
 569	bo = &vmw_bo->base;
 570	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
 571					     cmd->dma.host.sid, &srf);
 572	if (ret) {
 573		DRM_ERROR("could not find surface\n");
 574		goto out_no_reloc;
 575	}
 576
 577	ret = vmw_surface_validate(dev_priv, srf);
 578	if (unlikely(ret != 0)) {
 579		if (ret != -ERESTARTSYS)
 580			DRM_ERROR("Culd not validate surface.\n");
 581		goto out_no_validate;
 582	}
 583
 584	/*
 585	 * Patch command stream with device SID.
 586	 */
 
 587	cmd->dma.host.sid = srf->res.id;
 588	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
 
 
 
 
 
 589
 590	vmw_dmabuf_unreference(&vmw_bo);
 591
 592	res = &srf->res;
 593	vmw_resource_to_validate_list(sw_context, &res);
 594
 595	return 0;
 596
 597out_no_validate:
 598	vmw_surface_unreference(&srf);
 599out_no_reloc:
 600	vmw_dmabuf_unreference(&vmw_bo);
 601	return ret;
 602}
 603
 604static int vmw_cmd_draw(struct vmw_private *dev_priv,
 605			struct vmw_sw_context *sw_context,
 606			SVGA3dCmdHeader *header)
 607{
 608	struct vmw_draw_cmd {
 609		SVGA3dCmdHeader header;
 610		SVGA3dCmdDrawPrimitives body;
 611	} *cmd;
 612	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
 613		(unsigned long)header + sizeof(*cmd));
 614	SVGA3dPrimitiveRange *range;
 615	uint32_t i;
 616	uint32_t maxnum;
 617	int ret;
 618
 619	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 620	if (unlikely(ret != 0))
 621		return ret;
 622
 623	cmd = container_of(header, struct vmw_draw_cmd, header);
 624	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
 625
 626	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
 627		DRM_ERROR("Illegal number of vertex declarations.\n");
 628		return -EINVAL;
 629	}
 630
 631	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
 632		ret = vmw_cmd_sid_check(dev_priv, sw_context,
 633					&decl->array.surfaceId);
 634		if (unlikely(ret != 0))
 635			return ret;
 636	}
 637
 638	maxnum = (header->size - sizeof(cmd->body) -
 639		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
 640	if (unlikely(cmd->body.numRanges > maxnum)) {
 641		DRM_ERROR("Illegal number of index ranges.\n");
 642		return -EINVAL;
 643	}
 644
 645	range = (SVGA3dPrimitiveRange *) decl;
 646	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
 647		ret = vmw_cmd_sid_check(dev_priv, sw_context,
 648					&range->indexArray.surfaceId);
 649		if (unlikely(ret != 0))
 650			return ret;
 651	}
 652	return 0;
 653}
 654
 655
 656static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 657			     struct vmw_sw_context *sw_context,
 658			     SVGA3dCmdHeader *header)
 659{
 660	struct vmw_tex_state_cmd {
 661		SVGA3dCmdHeader header;
 662		SVGA3dCmdSetTextureState state;
 663	};
 664
 665	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
 666	  ((unsigned long) header + header->size + sizeof(header));
 667	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
 668		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
 669	int ret;
 670
 671	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 672	if (unlikely(ret != 0))
 673		return ret;
 674
 675	for (; cur_state < last_state; ++cur_state) {
 676		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
 677			continue;
 678
 679		ret = vmw_cmd_sid_check(dev_priv, sw_context,
 680					&cur_state->value);
 681		if (unlikely(ret != 0))
 682			return ret;
 683	}
 684
 685	return 0;
 686}
 687
 688static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
 689				      struct vmw_sw_context *sw_context,
 690				      void *buf)
 691{
 692	struct vmw_dma_buffer *vmw_bo;
 693	int ret;
 694
 695	struct {
 696		uint32_t header;
 697		SVGAFifoCmdDefineGMRFB body;
 698	} *cmd = buf;
 699
 700	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 701				      &cmd->body.ptr,
 702				      &vmw_bo);
 703	if (unlikely(ret != 0))
 704		return ret;
 705
 706	vmw_dmabuf_unreference(&vmw_bo);
 707
 708	return ret;
 709}
 710
 711static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 712				struct vmw_sw_context *sw_context,
 713				void *buf, uint32_t *size)
 714{
 715	uint32_t size_remaining = *size;
 716	uint32_t cmd_id;
 717
 718	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
 719	switch (cmd_id) {
 720	case SVGA_CMD_UPDATE:
 721		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
 722		break;
 723	case SVGA_CMD_DEFINE_GMRFB:
 724		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
 725		break;
 726	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
 727		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
 728		break;
 729	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
 730		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
 731		break;
 732	default:
 733		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
 734		return -EINVAL;
 735	}
 736
 737	if (*size > size_remaining) {
 738		DRM_ERROR("Invalid SVGA command (size mismatch):"
 739			  " %u.\n", cmd_id);
 740		return -EINVAL;
 741	}
 742
 743	if (unlikely(!sw_context->kernel)) {
 744		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
 745		return -EPERM;
 746	}
 747
 748	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
 749		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
 750
 751	return 0;
 752}
 753
 754typedef int (*vmw_cmd_func) (struct vmw_private *,
 755			     struct vmw_sw_context *,
 756			     SVGA3dCmdHeader *);
 757
 758#define VMW_CMD_DEF(cmd, func) \
 759	[cmd - SVGA_3D_CMD_BASE] = func
 760
 761static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
 762	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
 763	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
 764	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
 765	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
 766	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
 767	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
 768	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
 769	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
 770	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
 771	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
 772	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
 773		    &vmw_cmd_set_render_target_check),
 774	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
 775	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
 776	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
 777	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
 778	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
 779	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
 780	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
 781	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
 782	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
 783	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
 784	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
 785	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
 786	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
 787	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
 788	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
 789	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
 790	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
 791	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
 792	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
 793		    &vmw_cmd_blt_surf_screen_check)
 794};
 795
 796static int vmw_cmd_check(struct vmw_private *dev_priv,
 797			 struct vmw_sw_context *sw_context,
 798			 void *buf, uint32_t *size)
 799{
 800	uint32_t cmd_id;
 801	uint32_t size_remaining = *size;
 802	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
 803	int ret;
 804
 805	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
 806	/* Handle any none 3D commands */
 807	if (unlikely(cmd_id < SVGA_CMD_MAX))
 808		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 809
 810
 811	cmd_id = le32_to_cpu(header->id);
 812	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
 813
 814	cmd_id -= SVGA_3D_CMD_BASE;
 815	if (unlikely(*size > size_remaining))
 816		goto out_err;
 817
 818	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
 819		goto out_err;
 820
 821	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
 822	if (unlikely(ret != 0))
 823		goto out_err;
 824
 825	return 0;
 826out_err:
 827	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
 828		  cmd_id + SVGA_3D_CMD_BASE);
 829	return -EINVAL;
 830}
 831
 832static int vmw_cmd_check_all(struct vmw_private *dev_priv,
 833			     struct vmw_sw_context *sw_context,
 834			     void *buf,
 835			     uint32_t size)
 836{
 837	int32_t cur_size = size;
 838	int ret;
 839
 840	while (cur_size > 0) {
 841		size = cur_size;
 842		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
 843		if (unlikely(ret != 0))
 844			return ret;
 845		buf = (void *)((unsigned long) buf + size);
 846		cur_size -= size;
 847	}
 848
 849	if (unlikely(cur_size != 0)) {
 850		DRM_ERROR("Command verifier out of sync.\n");
 851		return -EINVAL;
 852	}
 853
 854	return 0;
 855}
 856
 857static void vmw_free_relocations(struct vmw_sw_context *sw_context)
 858{
 859	sw_context->cur_reloc = 0;
 860}
 861
 862static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 863{
 864	uint32_t i;
 865	struct vmw_relocation *reloc;
 866	struct ttm_validate_buffer *validate;
 867	struct ttm_buffer_object *bo;
 868
 869	for (i = 0; i < sw_context->cur_reloc; ++i) {
 870		reloc = &sw_context->relocs[i];
 871		validate = &sw_context->val_bufs[reloc->index];
 872		bo = validate->bo;
 873		if (bo->mem.mem_type == TTM_PL_VRAM) {
 874			reloc->location->offset += bo->offset;
 875			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
 876		} else
 877			reloc->location->gmrId = bo->mem.start;
 878	}
 879	vmw_free_relocations(sw_context);
 880}
 881
 882static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 883{
 884	struct ttm_validate_buffer *entry, *next;
 885	struct vmw_resource *res, *res_next;
 886
 887	/*
 888	 * Drop references to DMA buffers held during command submission.
 889	 */
 890	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
 891				 head) {
 892		list_del(&entry->head);
 893		vmw_dmabuf_validate_clear(entry->bo);
 894		ttm_bo_unref(&entry->bo);
 895		sw_context->cur_val_buf--;
 896	}
 897	BUG_ON(sw_context->cur_val_buf != 0);
 898
 899	/*
 900	 * Drop references to resources held during command submission.
 901	 */
 902	vmw_resource_unreserve(&sw_context->resource_list);
 903	list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
 904				 validate_head) {
 905		list_del_init(&res->validate_head);
 906		vmw_resource_unreference(&res);
 907	}
 908}
 909
 910static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 911				      struct ttm_buffer_object *bo)
 912{
 913	int ret;
 914
 915
 916	/*
 917	 * Don't validate pinned buffers.
 918	 */
 919
 920	if (bo == dev_priv->pinned_bo ||
 921	    (bo == dev_priv->dummy_query_bo &&
 922	     dev_priv->dummy_query_bo_pinned))
 923		return 0;
 924
 925	/**
 926	 * Put BO in VRAM if there is space, otherwise as a GMR.
 927	 * If there is no space in VRAM and GMR ids are all used up,
 928	 * start evicting GMRs to make room. If the DMA buffer can't be
 929	 * used as a GMR, this will return -ENOMEM.
 930	 */
 931
 932	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
 933	if (likely(ret == 0 || ret == -ERESTARTSYS))
 934		return ret;
 935
 936	/**
 937	 * If that failed, try VRAM again, this time evicting
 938	 * previous contents.
 939	 */
 940
 941	DRM_INFO("Falling through to VRAM.\n");
 942	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
 943	return ret;
 944}
 945
 946
 947static int vmw_validate_buffers(struct vmw_private *dev_priv,
 948				struct vmw_sw_context *sw_context)
 949{
 950	struct ttm_validate_buffer *entry;
 951	int ret;
 952
 953	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
 954		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
 955		if (unlikely(ret != 0))
 956			return ret;
 957	}
 958	return 0;
 959}
 960
 961static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
 962				 uint32_t size)
 963{
 964	if (likely(sw_context->cmd_bounce_size >= size))
 965		return 0;
 966
 967	if (sw_context->cmd_bounce_size == 0)
 968		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
 969
 970	while (sw_context->cmd_bounce_size < size) {
 971		sw_context->cmd_bounce_size =
 972			PAGE_ALIGN(sw_context->cmd_bounce_size +
 973				   (sw_context->cmd_bounce_size >> 1));
 974	}
 975
 976	if (sw_context->cmd_bounce != NULL)
 977		vfree(sw_context->cmd_bounce);
 978
 979	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
 980
 981	if (sw_context->cmd_bounce == NULL) {
 982		DRM_ERROR("Failed to allocate command bounce buffer.\n");
 983		sw_context->cmd_bounce_size = 0;
 984		return -ENOMEM;
 985	}
 986
 987	return 0;
 988}
 989
 990/**
 991 * vmw_execbuf_fence_commands - create and submit a command stream fence
 992 *
 993 * Creates a fence object and submits a command stream marker.
 994 * If this fails for some reason, We sync the fifo and return NULL.
 995 * It is then safe to fence buffers with a NULL pointer.
 996 *
 997 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
 998 * a userspace handle if @p_handle is not NULL, otherwise not.
 999 */
1000
1001int vmw_execbuf_fence_commands(struct drm_file *file_priv,
1002			       struct vmw_private *dev_priv,
1003			       struct vmw_fence_obj **p_fence,
1004			       uint32_t *p_handle)
1005{
 
 
 
 
 
 
 
1006	uint32_t sequence;
1007	int ret;
1008	bool synced = false;
1009
1010	/* p_handle implies file_priv. */
1011	BUG_ON(p_handle != NULL && file_priv == NULL);
 
1012
1013	ret = vmw_fifo_send_fence(dev_priv, &sequence);
1014	if (unlikely(ret != 0)) {
1015		DRM_ERROR("Fence submission error. Syncing.\n");
1016		synced = true;
1017	}
1018
1019	if (p_handle != NULL)
1020		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1021					    sequence,
1022					    DRM_VMW_FENCE_FLAG_EXEC,
1023					    p_fence, p_handle);
1024	else
1025		ret = vmw_fence_create(dev_priv->fman, sequence,
1026				       DRM_VMW_FENCE_FLAG_EXEC,
1027				       p_fence);
1028
1029	if (unlikely(ret != 0 && !synced)) {
1030		(void) vmw_fallback_wait(dev_priv, false, false,
1031					 sequence, false,
1032					 VMW_FENCE_WAIT_TIMEOUT);
1033		*p_fence = NULL;
1034	}
1035
1036	return 0;
1037}
1038
1039/**
1040 * vmw_execbuf_copy_fence_user - copy fence object information to
1041 * user-space.
1042 *
1043 * @dev_priv: Pointer to a vmw_private struct.
1044 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
1045 * @ret: Return value from fence object creation.
1046 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
1047 * which the information should be copied.
1048 * @fence: Pointer to the fenc object.
1049 * @fence_handle: User-space fence handle.
1050 *
1051 * This function copies fence information to user-space. If copying fails,
1052 * The user-space struct drm_vmw_fence_rep::error member is hopefully
1053 * left untouched, and if it's preloaded with an -EFAULT by user-space,
1054 * the error will hopefully be detected.
1055 * Also if copying fails, user-space will be unable to signal the fence
1056 * object so we wait for it immediately, and then unreference the
1057 * user-space reference.
1058 */
1059void
1060vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
1061			    struct vmw_fpriv *vmw_fp,
1062			    int ret,
1063			    struct drm_vmw_fence_rep __user *user_fence_rep,
1064			    struct vmw_fence_obj *fence,
1065			    uint32_t fence_handle)
1066{
1067	struct drm_vmw_fence_rep fence_rep;
1068
1069	if (user_fence_rep == NULL)
1070		return;
1071
1072	memset(&fence_rep, 0, sizeof(fence_rep));
1073
1074	fence_rep.error = ret;
1075	if (ret == 0) {
1076		BUG_ON(fence == NULL);
1077
1078		fence_rep.handle = fence_handle;
1079		fence_rep.seqno = fence->seqno;
1080		vmw_update_seqno(dev_priv, &dev_priv->fifo);
1081		fence_rep.passed_seqno = dev_priv->last_read_seqno;
1082	}
1083
1084	/*
1085	 * copy_to_user errors will be detected by user space not
1086	 * seeing fence_rep::error filled in. Typically
1087	 * user-space would have pre-set that member to -EFAULT.
1088	 */
1089	ret = copy_to_user(user_fence_rep, &fence_rep,
1090			   sizeof(fence_rep));
1091
1092	/*
1093	 * User-space lost the fence object. We need to sync
1094	 * and unreference the handle.
1095	 */
1096	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
1097		ttm_ref_object_base_unref(vmw_fp->tfile,
1098					  fence_handle, TTM_REF_USAGE);
1099		DRM_ERROR("Fence copy error. Syncing.\n");
1100		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
1101					  false, false,
1102					  VMW_FENCE_WAIT_TIMEOUT);
1103	}
1104}
1105
1106int vmw_execbuf_process(struct drm_file *file_priv,
1107			struct vmw_private *dev_priv,
1108			void __user *user_commands,
1109			void *kernel_commands,
1110			uint32_t command_size,
1111			uint64_t throttle_us,
1112			struct drm_vmw_fence_rep __user *user_fence_rep,
1113			struct vmw_fence_obj **out_fence)
1114{
1115	struct vmw_sw_context *sw_context = &dev_priv->ctx;
1116	struct vmw_fence_obj *fence = NULL;
1117	uint32_t handle;
1118	void *cmd;
1119	int ret;
1120
1121	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
1122	if (unlikely(ret != 0))
1123		return -ERESTARTSYS;
1124
1125	if (kernel_commands == NULL) {
1126		sw_context->kernel = false;
1127
1128		ret = vmw_resize_cmd_bounce(sw_context, command_size);
1129		if (unlikely(ret != 0))
1130			goto out_unlock;
1131
1132
1133		ret = copy_from_user(sw_context->cmd_bounce,
1134				     user_commands, command_size);
1135
1136		if (unlikely(ret != 0)) {
1137			ret = -EFAULT;
1138			DRM_ERROR("Failed copying commands.\n");
1139			goto out_unlock;
1140		}
1141		kernel_commands = sw_context->cmd_bounce;
1142	} else
1143		sw_context->kernel = true;
1144
1145	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
1146	sw_context->cid_valid = false;
1147	sw_context->sid_valid = false;
1148	sw_context->cur_reloc = 0;
1149	sw_context->cur_val_buf = 0;
1150	sw_context->fence_flags = 0;
1151	INIT_LIST_HEAD(&sw_context->query_list);
1152	INIT_LIST_HEAD(&sw_context->resource_list);
1153	sw_context->cur_query_bo = dev_priv->pinned_bo;
1154	sw_context->cur_query_cid = dev_priv->query_cid;
1155	sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
1156
1157	INIT_LIST_HEAD(&sw_context->validate_nodes);
1158
1159	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1160				command_size);
1161	if (unlikely(ret != 0))
1162		goto out_err;
1163
1164	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
1165	if (unlikely(ret != 0))
1166		goto out_err;
1167
1168	ret = vmw_validate_buffers(dev_priv, sw_context);
1169	if (unlikely(ret != 0))
1170		goto out_err;
1171
1172	vmw_apply_relocations(sw_context);
1173
1174	if (throttle_us) {
1175		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
1176				   throttle_us);
1177
1178		if (unlikely(ret != 0))
1179			goto out_throttle;
1180	}
1181
1182	cmd = vmw_fifo_reserve(dev_priv, command_size);
1183	if (unlikely(cmd == NULL)) {
1184		DRM_ERROR("Failed reserving fifo space for commands.\n");
1185		ret = -ENOMEM;
1186		goto out_throttle;
1187	}
1188
1189	memcpy(cmd, kernel_commands, command_size);
1190	vmw_fifo_commit(dev_priv, command_size);
 
 
1191
1192	vmw_query_bo_switch_commit(dev_priv, sw_context);
1193	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1194					 &fence,
1195					 (user_fence_rep) ? &handle : NULL);
1196	/*
1197	 * This error is harmless, because if fence submission fails,
1198	 * vmw_fifo_send_fence will sync. The error will be propagated to
1199	 * user-space in @fence_rep
1200	 */
1201
1202	if (ret != 0)
1203		DRM_ERROR("Fence submission error. Syncing.\n");
1204
1205	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
1206				    (void *) fence);
 
 
 
 
1207
1208	vmw_clear_validations(sw_context);
1209	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
1210				    user_fence_rep, fence, handle);
 
1211
1212	/* Don't unreference when handing fence out */
1213	if (unlikely(out_fence != NULL)) {
1214		*out_fence = fence;
1215		fence = NULL;
1216	} else if (likely(fence != NULL)) {
1217		vmw_fence_obj_unreference(&fence);
1218	}
1219
1220	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
1221	return 0;
1222
1223out_err:
1224	vmw_free_relocations(sw_context);
1225out_throttle:
1226	vmw_query_switch_backoff(sw_context);
1227	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
1228	vmw_clear_validations(sw_context);
 
 
1229out_unlock:
1230	mutex_unlock(&dev_priv->cmdbuf_mutex);
1231	return ret;
1232}
1233
1234/**
1235 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1236 *
1237 * @dev_priv: The device private structure.
1238 *
1239 * This function is called to idle the fifo and unpin the query buffer
1240 * if the normal way to do this hits an error, which should typically be
1241 * extremely rare.
1242 */
1243static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1244{
1245	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1246
1247	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
1248	vmw_bo_pin(dev_priv->pinned_bo, false);
1249	vmw_bo_pin(dev_priv->dummy_query_bo, false);
1250	dev_priv->dummy_query_bo_pinned = false;
1251}
1252
1253
1254/**
1255 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1256 * query bo.
1257 *
1258 * @dev_priv: The device private structure.
1259 * @only_on_cid_match: Only flush and unpin if the current active query cid
1260 * matches @cid.
1261 * @cid: Optional context id to match.
1262 *
1263 * This function should be used to unpin the pinned query bo, or
1264 * as a query barrier when we need to make sure that all queries have
1265 * finished before the next fifo command. (For example on hardware
1266 * context destructions where the hardware may otherwise leak unfinished
1267 * queries).
1268 *
1269 * This function does not return any failure codes, but make attempts
1270 * to do safe unpinning in case of errors.
1271 *
1272 * The function will synchronize on the previous query barrier, and will
1273 * thus not finish until that barrier has executed.
1274 */
1275void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1276				   bool only_on_cid_match, uint32_t cid)
1277{
1278	int ret = 0;
1279	struct list_head validate_list;
1280	struct ttm_validate_buffer pinned_val, query_val;
1281	struct vmw_fence_obj *fence;
1282
1283	mutex_lock(&dev_priv->cmdbuf_mutex);
1284
1285	if (dev_priv->pinned_bo == NULL)
1286		goto out_unlock;
1287
1288	if (only_on_cid_match && cid != dev_priv->query_cid)
1289		goto out_unlock;
1290
1291	INIT_LIST_HEAD(&validate_list);
1292
1293	pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1294		DRM_VMW_FENCE_FLAG_EXEC;
1295	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1296	list_add_tail(&pinned_val.head, &validate_list);
1297
1298	query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1299	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1300	list_add_tail(&query_val.head, &validate_list);
1301
1302	do {
1303		ret = ttm_eu_reserve_buffers(&validate_list);
1304	} while (ret == -ERESTARTSYS);
1305
1306	if (unlikely(ret != 0)) {
1307		vmw_execbuf_unpin_panic(dev_priv);
1308		goto out_no_reserve;
1309	}
1310
1311	ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1312	if (unlikely(ret != 0)) {
1313		vmw_execbuf_unpin_panic(dev_priv);
1314		goto out_no_emit;
1315	}
1316
1317	vmw_bo_pin(dev_priv->pinned_bo, false);
1318	vmw_bo_pin(dev_priv->dummy_query_bo, false);
1319	dev_priv->dummy_query_bo_pinned = false;
1320
1321	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1322	ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
1323
1324	ttm_bo_unref(&query_val.bo);
1325	ttm_bo_unref(&pinned_val.bo);
1326	ttm_bo_unref(&dev_priv->pinned_bo);
1327
1328out_unlock:
1329	mutex_unlock(&dev_priv->cmdbuf_mutex);
1330	return;
1331
1332out_no_emit:
1333	ttm_eu_backoff_reservation(&validate_list);
1334out_no_reserve:
1335	ttm_bo_unref(&query_val.bo);
1336	ttm_bo_unref(&pinned_val.bo);
1337	ttm_bo_unref(&dev_priv->pinned_bo);
1338	mutex_unlock(&dev_priv->cmdbuf_mutex);
1339}
1340
1341
1342int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
1343		      struct drm_file *file_priv)
1344{
1345	struct vmw_private *dev_priv = vmw_priv(dev);
1346	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
1347	struct vmw_master *vmaster = vmw_master(file_priv->master);
1348	int ret;
1349
1350	/*
1351	 * This will allow us to extend the ioctl argument while
1352	 * maintaining backwards compatibility:
1353	 * We take different code paths depending on the value of
1354	 * arg->version.
1355	 */
1356
1357	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
1358		DRM_ERROR("Incorrect execbuf version.\n");
1359		DRM_ERROR("You're running outdated experimental "
1360			  "vmwgfx user-space drivers.");
1361		return -EINVAL;
1362	}
1363
1364	ret = ttm_read_lock(&vmaster->lock, true);
1365	if (unlikely(ret != 0))
1366		return ret;
1367
1368	ret = vmw_execbuf_process(file_priv, dev_priv,
1369				  (void __user *)(unsigned long)arg->commands,
1370				  NULL, arg->command_size, arg->throttle_us,
1371				  (void __user *)(unsigned long)arg->fence_rep,
1372				  NULL);
1373
1374	if (unlikely(ret != 0))
1375		goto out_unlock;
1376
1377	vmw_kms_cursor_post_execbuf(dev_priv);
1378
1379out_unlock:
1380	ttm_read_unlock(&vmaster->lock);
1381	return ret;
1382}