Linux Audio

Check our new training course

Loading...
v4.6
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_reg.h"
  30#include <drm/ttm/ttm_bo_api.h>
  31#include <drm/ttm/ttm_placement.h>
  32#include "vmwgfx_so.h"
  33#include "vmwgfx_binding.h"
  34
  35#define VMW_RES_HT_ORDER 12
  36
  37/**
  38 * struct vmw_resource_relocation - Relocation info for resources
  39 *
  40 * @head: List head for the software context's relocation list.
  41 * @res: Non-ref-counted pointer to the resource.
  42 * @offset: Offset of 4 byte entries into the command buffer where the
  43 * id that needs fixup is located.
  44 */
  45struct vmw_resource_relocation {
  46	struct list_head head;
  47	const struct vmw_resource *res;
  48	unsigned long offset;
  49};
  50
  51/**
  52 * struct vmw_resource_val_node - Validation info for resources
  53 *
  54 * @head: List head for the software context's resource list.
  55 * @hash: Hash entry for quick resouce to val_node lookup.
  56 * @res: Ref-counted pointer to the resource.
  57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  58 * @new_backup: Refcounted pointer to the new backup buffer.
  59 * @staged_bindings: If @res is a context, tracks bindings set up during
  60 * the command batch. Otherwise NULL.
  61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  62 * @first_usage: Set to true the first time the resource is referenced in
  63 * the command stream.
  64 * @switching_backup: The command stream provides a new backup buffer for a
  65 * resource.
  66 * @no_buffer_needed: This means @switching_backup is true on first buffer
  67 * reference. So resource reservation does not need to allocate a backup
  68 * buffer for the resource.
  69 */
  70struct vmw_resource_val_node {
  71	struct list_head head;
  72	struct drm_hash_item hash;
  73	struct vmw_resource *res;
  74	struct vmw_dma_buffer *new_backup;
  75	struct vmw_ctx_binding_state *staged_bindings;
  76	unsigned long new_backup_offset;
  77	u32 first_usage : 1;
  78	u32 switching_backup : 1;
  79	u32 no_buffer_needed : 1;
  80};
  81
  82/**
  83 * struct vmw_cmd_entry - Describe a command for the verifier
  84 *
  85 * @user_allow: Whether allowed from the execbuf ioctl.
  86 * @gb_disable: Whether disabled if guest-backed objects are available.
  87 * @gb_enable: Whether enabled iff guest-backed objects are available.
  88 */
  89struct vmw_cmd_entry {
  90	int (*func) (struct vmw_private *, struct vmw_sw_context *,
  91		     SVGA3dCmdHeader *);
  92	bool user_allow;
  93	bool gb_disable;
  94	bool gb_enable;
  95};
  96
  97#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
  98	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
  99				       (_gb_disable), (_gb_enable)}
 100
 101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 102					struct vmw_sw_context *sw_context,
 103					struct vmw_resource *ctx);
 104static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 105				 struct vmw_sw_context *sw_context,
 106				 SVGAMobId *id,
 107				 struct vmw_dma_buffer **vmw_bo_p);
 108static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 109				   struct vmw_dma_buffer *vbo,
 110				   bool validate_as_mob,
 111				   uint32_t *p_val_node);
 112
 113
 114/**
 115 * vmw_resources_unreserve - unreserve resources previously reserved for
 116 * command submission.
 117 *
 118 * @sw_context: pointer to the software context
 119 * @backoff: Whether command submission failed.
 120 */
 121static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
 122				    bool backoff)
 123{
 124	struct vmw_resource_val_node *val;
 125	struct list_head *list = &sw_context->resource_list;
 126
 127	if (sw_context->dx_query_mob && !backoff)
 128		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 129					  sw_context->dx_query_mob);
 130
 131	list_for_each_entry(val, list, head) {
 132		struct vmw_resource *res = val->res;
 133		bool switch_backup =
 134			(backoff) ? false : val->switching_backup;
 135
 136		/*
 137		 * Transfer staged context bindings to the
 138		 * persistent context binding tracker.
 139		 */
 140		if (unlikely(val->staged_bindings)) {
 141			if (!backoff) {
 142				vmw_binding_state_commit
 143					(vmw_context_binding_state(val->res),
 144					 val->staged_bindings);
 145			}
 146
 147			if (val->staged_bindings != sw_context->staged_bindings)
 148				vmw_binding_state_free(val->staged_bindings);
 149			else
 150				sw_context->staged_bindings_inuse = false;
 151			val->staged_bindings = NULL;
 152		}
 153		vmw_resource_unreserve(res, switch_backup, val->new_backup,
 154				       val->new_backup_offset);
 155		vmw_dmabuf_unreference(&val->new_backup);
 156	}
 157}
 158
 159/**
 160 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
 161 * added to the validate list.
 162 *
 163 * @dev_priv: Pointer to the device private:
 164 * @sw_context: The validation context:
 165 * @node: The validation node holding this context.
 166 */
 167static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 168				   struct vmw_sw_context *sw_context,
 169				   struct vmw_resource_val_node *node)
 170{
 171	int ret;
 172
 173	ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
 174	if (unlikely(ret != 0))
 175		goto out_err;
 176
 177	if (!sw_context->staged_bindings) {
 178		sw_context->staged_bindings =
 179			vmw_binding_state_alloc(dev_priv);
 180		if (IS_ERR(sw_context->staged_bindings)) {
 181			DRM_ERROR("Failed to allocate context binding "
 182				  "information.\n");
 183			ret = PTR_ERR(sw_context->staged_bindings);
 184			sw_context->staged_bindings = NULL;
 185			goto out_err;
 186		}
 187	}
 188
 189	if (sw_context->staged_bindings_inuse) {
 190		node->staged_bindings = vmw_binding_state_alloc(dev_priv);
 191		if (IS_ERR(node->staged_bindings)) {
 192			DRM_ERROR("Failed to allocate context binding "
 193				  "information.\n");
 194			ret = PTR_ERR(node->staged_bindings);
 195			node->staged_bindings = NULL;
 196			goto out_err;
 197		}
 198	} else {
 199		node->staged_bindings = sw_context->staged_bindings;
 200		sw_context->staged_bindings_inuse = true;
 201	}
 202
 203	return 0;
 204out_err:
 205	return ret;
 206}
 207
 208/**
 209 * vmw_resource_val_add - Add a resource to the software context's
 210 * resource list if it's not already on it.
 211 *
 212 * @sw_context: Pointer to the software context.
 213 * @res: Pointer to the resource.
 214 * @p_node On successful return points to a valid pointer to a
 215 * struct vmw_resource_val_node, if non-NULL on entry.
 216 */
 217static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 218				struct vmw_resource *res,
 219				struct vmw_resource_val_node **p_node)
 220{
 221	struct vmw_private *dev_priv = res->dev_priv;
 222	struct vmw_resource_val_node *node;
 223	struct drm_hash_item *hash;
 224	int ret;
 225
 226	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
 227				    &hash) == 0)) {
 228		node = container_of(hash, struct vmw_resource_val_node, hash);
 229		node->first_usage = false;
 230		if (unlikely(p_node != NULL))
 231			*p_node = node;
 232		return 0;
 233	}
 234
 235	node = kzalloc(sizeof(*node), GFP_KERNEL);
 236	if (unlikely(node == NULL)) {
 237		DRM_ERROR("Failed to allocate a resource validation "
 238			  "entry.\n");
 239		return -ENOMEM;
 240	}
 241
 242	node->hash.key = (unsigned long) res;
 243	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
 244	if (unlikely(ret != 0)) {
 245		DRM_ERROR("Failed to initialize a resource validation "
 246			  "entry.\n");
 247		kfree(node);
 248		return ret;
 249	}
 250	node->res = vmw_resource_reference(res);
 251	node->first_usage = true;
 252	if (unlikely(p_node != NULL))
 253		*p_node = node;
 254
 255	if (!dev_priv->has_mob) {
 256		list_add_tail(&node->head, &sw_context->resource_list);
 257		return 0;
 258	}
 259
 260	switch (vmw_res_type(res)) {
 261	case vmw_res_context:
 262	case vmw_res_dx_context:
 263		list_add(&node->head, &sw_context->ctx_resource_list);
 264		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
 265		break;
 266	case vmw_res_cotable:
 267		list_add_tail(&node->head, &sw_context->ctx_resource_list);
 268		break;
 269	default:
 270		list_add_tail(&node->head, &sw_context->resource_list);
 271		break;
 272	}
 273
 274	return ret;
 275}
 276
 277/**
 278 * vmw_view_res_val_add - Add a view and the surface it's pointing to
 279 * to the validation list
 280 *
 281 * @sw_context: The software context holding the validation list.
 282 * @view: Pointer to the view resource.
 283 *
 284 * Returns 0 if success, negative error code otherwise.
 285 */
 286static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 287				struct vmw_resource *view)
 288{
 289	int ret;
 290
 291	/*
 292	 * First add the resource the view is pointing to, otherwise
 293	 * it may be swapped out when the view is validated.
 294	 */
 295	ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
 296	if (ret)
 297		return ret;
 298
 299	return vmw_resource_val_add(sw_context, view, NULL);
 300}
 301
 302/**
 303 * vmw_view_id_val_add - Look up a view and add it and the surface it's
 304 * pointing to to the validation list.
 305 *
 306 * @sw_context: The software context holding the validation list.
 307 * @view_type: The view type to look up.
 308 * @id: view id of the view.
 309 *
 310 * The view is represented by a view id and the DX context it's created on,
 311 * or scheduled for creation on. If there is no DX context set, the function
 312 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
 313 */
 314static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 315			       enum vmw_view_type view_type, u32 id)
 316{
 317	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
 318	struct vmw_resource *view;
 319	int ret;
 320
 321	if (!ctx_node) {
 322		DRM_ERROR("DX Context not set.\n");
 323		return -EINVAL;
 324	}
 325
 326	view = vmw_view_lookup(sw_context->man, view_type, id);
 327	if (IS_ERR(view))
 328		return PTR_ERR(view);
 329
 330	ret = vmw_view_res_val_add(sw_context, view);
 331	vmw_resource_unreference(&view);
 332
 333	return ret;
 334}
 335
 336/**
 337 * vmw_resource_context_res_add - Put resources previously bound to a context on
 338 * the validation list
 339 *
 340 * @dev_priv: Pointer to a device private structure
 341 * @sw_context: Pointer to a software context used for this command submission
 342 * @ctx: Pointer to the context resource
 343 *
 344 * This function puts all resources that were previously bound to @ctx on
 345 * the resource validation list. This is part of the context state reemission
 346 */
 347static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 348					struct vmw_sw_context *sw_context,
 349					struct vmw_resource *ctx)
 350{
 351	struct list_head *binding_list;
 352	struct vmw_ctx_bindinfo *entry;
 353	int ret = 0;
 354	struct vmw_resource *res;
 355	u32 i;
 356
 357	/* Add all cotables to the validation list. */
 358	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 359		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 360			res = vmw_context_cotable(ctx, i);
 361			if (IS_ERR(res))
 362				continue;
 363
 364			ret = vmw_resource_val_add(sw_context, res, NULL);
 365			vmw_resource_unreference(&res);
 366			if (unlikely(ret != 0))
 367				return ret;
 368		}
 369	}
 370
 371
 372	/* Add all resources bound to the context to the validation list */
 373	mutex_lock(&dev_priv->binding_mutex);
 374	binding_list = vmw_context_binding_list(ctx);
 375
 376	list_for_each_entry(entry, binding_list, ctx_list) {
 377		/* entry->res is not refcounted */
 378		res = vmw_resource_reference_unless_doomed(entry->res);
 379		if (unlikely(res == NULL))
 380			continue;
 381
 382		if (vmw_res_type(entry->res) == vmw_res_view)
 383			ret = vmw_view_res_val_add(sw_context, entry->res);
 384		else
 385			ret = vmw_resource_val_add(sw_context, entry->res,
 386						   NULL);
 387		vmw_resource_unreference(&res);
 388		if (unlikely(ret != 0))
 389			break;
 390	}
 391
 392	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 393		struct vmw_dma_buffer *dx_query_mob;
 394
 395		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 396		if (dx_query_mob)
 397			ret = vmw_bo_to_validate_list(sw_context,
 398						      dx_query_mob,
 399						      true, NULL);
 400	}
 401
 402	mutex_unlock(&dev_priv->binding_mutex);
 403	return ret;
 404}
 405
 406/**
 407 * vmw_resource_relocation_add - Add a relocation to the relocation list
 408 *
 409 * @list: Pointer to head of relocation list.
 410 * @res: The resource.
 411 * @offset: Offset into the command buffer currently being parsed where the
 412 * id that needs fixup is located. Granularity is 4 bytes.
 413 */
 414static int vmw_resource_relocation_add(struct list_head *list,
 415				       const struct vmw_resource *res,
 416				       unsigned long offset)
 417{
 418	struct vmw_resource_relocation *rel;
 419
 420	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
 421	if (unlikely(rel == NULL)) {
 422		DRM_ERROR("Failed to allocate a resource relocation.\n");
 423		return -ENOMEM;
 424	}
 425
 426	rel->res = res;
 427	rel->offset = offset;
 428	list_add_tail(&rel->head, list);
 429
 430	return 0;
 431}
 432
 433/**
 434 * vmw_resource_relocations_free - Free all relocations on a list
 435 *
 436 * @list: Pointer to the head of the relocation list.
 437 */
 438static void vmw_resource_relocations_free(struct list_head *list)
 439{
 440	struct vmw_resource_relocation *rel, *n;
 441
 442	list_for_each_entry_safe(rel, n, list, head) {
 443		list_del(&rel->head);
 444		kfree(rel);
 445	}
 446}
 447
 448/**
 449 * vmw_resource_relocations_apply - Apply all relocations on a list
 450 *
 451 * @cb: Pointer to the start of the command buffer bein patch. This need
 452 * not be the same buffer as the one being parsed when the relocation
 453 * list was built, but the contents must be the same modulo the
 454 * resource ids.
 455 * @list: Pointer to the head of the relocation list.
 456 */
 457static void vmw_resource_relocations_apply(uint32_t *cb,
 458					   struct list_head *list)
 459{
 460	struct vmw_resource_relocation *rel;
 461
 462	list_for_each_entry(rel, list, head) {
 463		if (likely(rel->res != NULL))
 464			cb[rel->offset] = rel->res->id;
 465		else
 466			cb[rel->offset] = SVGA_3D_CMD_NOP;
 467	}
 468}
 469
 470static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 471			   struct vmw_sw_context *sw_context,
 472			   SVGA3dCmdHeader *header)
 473{
 474	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
 475}
 476
 477static int vmw_cmd_ok(struct vmw_private *dev_priv,
 478		      struct vmw_sw_context *sw_context,
 479		      SVGA3dCmdHeader *header)
 480{
 481	return 0;
 482}
 483
 
 
 
 
 
 
 
 
 
 
 
 
 484/**
 485 * vmw_bo_to_validate_list - add a bo to a validate list
 486 *
 487 * @sw_context: The software context used for this command submission batch.
 488 * @bo: The buffer object to add.
 489 * @validate_as_mob: Validate this buffer as a MOB.
 
 490 * @p_val_node: If non-NULL Will be updated with the validate node number
 491 * on return.
 492 *
 493 * Returns -EINVAL if the limit of number of buffer objects per command
 494 * submission is reached.
 495 */
 496static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 497				   struct vmw_dma_buffer *vbo,
 498				   bool validate_as_mob,
 499				   uint32_t *p_val_node)
 500{
 501	uint32_t val_node;
 502	struct vmw_validate_buffer *vval_buf;
 503	struct ttm_validate_buffer *val_buf;
 504	struct drm_hash_item *hash;
 505	int ret;
 506
 507	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
 508				    &hash) == 0)) {
 509		vval_buf = container_of(hash, struct vmw_validate_buffer,
 510					hash);
 511		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
 512			DRM_ERROR("Inconsistent buffer usage.\n");
 513			return -EINVAL;
 514		}
 515		val_buf = &vval_buf->base;
 516		val_node = vval_buf - sw_context->val_bufs;
 517	} else {
 518		val_node = sw_context->cur_val_buf;
 519		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
 520			DRM_ERROR("Max number of DMA buffers per submission "
 521				  "exceeded.\n");
 522			return -EINVAL;
 523		}
 524		vval_buf = &sw_context->val_bufs[val_node];
 525		vval_buf->hash.key = (unsigned long) vbo;
 526		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 527		if (unlikely(ret != 0)) {
 528			DRM_ERROR("Failed to initialize a buffer validation "
 529				  "entry.\n");
 530			return ret;
 531		}
 532		++sw_context->cur_val_buf;
 533		val_buf = &vval_buf->base;
 534		val_buf->bo = ttm_bo_reference(&vbo->base);
 535		val_buf->shared = false;
 536		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 537		vval_buf->validate_as_mob = validate_as_mob;
 538	}
 539
 540	if (p_val_node)
 541		*p_val_node = val_node;
 542
 543	return 0;
 544}
 545
 546/**
 547 * vmw_resources_reserve - Reserve all resources on the sw_context's
 548 * resource list.
 549 *
 550 * @sw_context: Pointer to the software context.
 551 *
 552 * Note that since vmware's command submission currently is protected by
 553 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
 554 * since only a single thread at once will attempt this.
 555 */
 556static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 557{
 558	struct vmw_resource_val_node *val;
 559	int ret = 0;
 560
 561	list_for_each_entry(val, &sw_context->resource_list, head) {
 562		struct vmw_resource *res = val->res;
 563
 564		ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
 565		if (unlikely(ret != 0))
 566			return ret;
 567
 568		if (res->backup) {
 569			struct vmw_dma_buffer *vbo = res->backup;
 570
 571			ret = vmw_bo_to_validate_list
 572				(sw_context, vbo,
 573				 vmw_resource_needs_backup(res), NULL);
 574
 575			if (unlikely(ret != 0))
 576				return ret;
 577		}
 
 578	}
 579
 580	if (sw_context->dx_query_mob) {
 581		struct vmw_dma_buffer *expected_dx_query_mob;
 582
 583		expected_dx_query_mob =
 584			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 585		if (expected_dx_query_mob &&
 586		    expected_dx_query_mob != sw_context->dx_query_mob) {
 587			ret = -EINVAL;
 588		}
 589	}
 590
 591	return ret;
 592}
 593
 594/**
 595 * vmw_resources_validate - Validate all resources on the sw_context's
 596 * resource list.
 597 *
 598 * @sw_context: Pointer to the software context.
 599 *
 600 * Before this function is called, all resource backup buffers must have
 601 * been validated.
 602 */
 603static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 604{
 605	struct vmw_resource_val_node *val;
 606	int ret;
 607
 608	list_for_each_entry(val, &sw_context->resource_list, head) {
 609		struct vmw_resource *res = val->res;
 610		struct vmw_dma_buffer *backup = res->backup;
 611
 612		ret = vmw_resource_validate(res);
 613		if (unlikely(ret != 0)) {
 614			if (ret != -ERESTARTSYS)
 615				DRM_ERROR("Failed to validate resource.\n");
 616			return ret;
 617		}
 618
 619		/* Check if the resource switched backup buffer */
 620		if (backup && res->backup && (backup != res->backup)) {
 621			struct vmw_dma_buffer *vbo = res->backup;
 622
 623			ret = vmw_bo_to_validate_list
 624				(sw_context, vbo,
 625				 vmw_resource_needs_backup(res), NULL);
 626			if (ret) {
 627				ttm_bo_unreserve(&vbo->base);
 628				return ret;
 629			}
 630		}
 631	}
 632	return 0;
 633}
 634
 635/**
 636 * vmw_cmd_res_reloc_add - Add a resource to a software context's
 637 * relocation- and validation lists.
 638 *
 639 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 640 * @sw_context: Pointer to the software context.
 641 * @id_loc: Pointer to where the id that needs translation is located.
 642 * @res: Valid pointer to a struct vmw_resource.
 643 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
 644 * used for this resource is returned here.
 645 */
 646static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
 647				 struct vmw_sw_context *sw_context,
 648				 uint32_t *id_loc,
 649				 struct vmw_resource *res,
 650				 struct vmw_resource_val_node **p_val)
 651{
 652	int ret;
 653	struct vmw_resource_val_node *node;
 654
 655	*p_val = NULL;
 656	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 657					  res,
 658					  id_loc - sw_context->buf_start);
 659	if (unlikely(ret != 0))
 660		return ret;
 661
 662	ret = vmw_resource_val_add(sw_context, res, &node);
 663	if (unlikely(ret != 0))
 664		return ret;
 665
 666	if (p_val)
 667		*p_val = node;
 668
 669	return 0;
 670}
 671
 672
 673/**
 674 * vmw_cmd_res_check - Check that a resource is present and if so, put it
 675 * on the resource validate list unless it's already there.
 676 *
 677 * @dev_priv: Pointer to a device private structure.
 678 * @sw_context: Pointer to the software context.
 679 * @res_type: Resource type.
 680 * @converter: User-space visisble type specific information.
 681 * @id_loc: Pointer to the location in the command buffer currently being
 682 * parsed from where the user-space resource id handle is located.
 683 * @p_val: Pointer to pointer to resource validalidation node. Populated
 684 * on exit.
 685 */
 686static int
 687vmw_cmd_res_check(struct vmw_private *dev_priv,
 688		  struct vmw_sw_context *sw_context,
 689		  enum vmw_res_type res_type,
 690		  const struct vmw_user_resource_conv *converter,
 691		  uint32_t *id_loc,
 692		  struct vmw_resource_val_node **p_val)
 693{
 694	struct vmw_res_cache_entry *rcache =
 695		&sw_context->res_cache[res_type];
 696	struct vmw_resource *res;
 697	struct vmw_resource_val_node *node;
 698	int ret;
 699
 700	if (*id_loc == SVGA3D_INVALID_ID) {
 701		if (p_val)
 702			*p_val = NULL;
 703		if (res_type == vmw_res_context) {
 704			DRM_ERROR("Illegal context invalid id.\n");
 705			return -EINVAL;
 706		}
 707		return 0;
 708	}
 709
 710	/*
 711	 * Fastpath in case of repeated commands referencing the same
 712	 * resource
 713	 */
 714
 715	if (likely(rcache->valid && *id_loc == rcache->handle)) {
 716		const struct vmw_resource *res = rcache->res;
 717
 718		rcache->node->first_usage = false;
 719		if (p_val)
 720			*p_val = rcache->node;
 721
 722		return vmw_resource_relocation_add
 723			(&sw_context->res_relocations, res,
 724			 id_loc - sw_context->buf_start);
 725	}
 726
 727	ret = vmw_user_resource_lookup_handle(dev_priv,
 728					      sw_context->fp->tfile,
 729					      *id_loc,
 730					      converter,
 731					      &res);
 732	if (unlikely(ret != 0)) {
 733		DRM_ERROR("Could not find or use resource 0x%08x.\n",
 734			  (unsigned) *id_loc);
 735		dump_stack();
 736		return ret;
 737	}
 738
 739	rcache->valid = true;
 740	rcache->res = res;
 741	rcache->handle = *id_loc;
 
 742
 743	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
 744				    res, &node);
 745	if (unlikely(ret != 0))
 746		goto out_no_reloc;
 747
 748	rcache->node = node;
 749	if (p_val)
 750		*p_val = node;
 751	vmw_resource_unreference(&res);
 752	return 0;
 753
 754out_no_reloc:
 755	BUG_ON(sw_context->error_resource != NULL);
 756	sw_context->error_resource = res;
 757
 758	return ret;
 759}
 760
 761/**
 762 * vmw_rebind_dx_query - Rebind DX query associated with the context
 763 *
 764 * @ctx_res: context the query belongs to
 765 *
 766 * This function assumes binding_mutex is held.
 767 */
 768static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 769{
 770	struct vmw_private *dev_priv = ctx_res->dev_priv;
 771	struct vmw_dma_buffer *dx_query_mob;
 772	struct {
 773		SVGA3dCmdHeader header;
 774		SVGA3dCmdDXBindAllQuery body;
 775	} *cmd;
 776
 777
 778	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 779
 780	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 781		return 0;
 782
 783	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
 784
 785	if (cmd == NULL) {
 786		DRM_ERROR("Failed to rebind queries.\n");
 787		return -ENOMEM;
 788	}
 789
 790	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 791	cmd->header.size = sizeof(cmd->body);
 792	cmd->body.cid = ctx_res->id;
 793	cmd->body.mobid = dx_query_mob->base.mem.start;
 794	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 795
 796	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 797
 798	return 0;
 799}
 800
 801/**
 802 * vmw_rebind_contexts - Rebind all resources previously bound to
 803 * referenced contexts.
 804 *
 805 * @sw_context: Pointer to the software context.
 806 *
 807 * Rebind context binding points that have been scrubbed because of eviction.
 808 */
 809static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 810{
 811	struct vmw_resource_val_node *val;
 812	int ret;
 813
 814	list_for_each_entry(val, &sw_context->resource_list, head) {
 815		if (unlikely(!val->staged_bindings))
 816			break;
 817
 818		ret = vmw_binding_rebind_all
 819			(vmw_context_binding_state(val->res));
 820		if (unlikely(ret != 0)) {
 821			if (ret != -ERESTARTSYS)
 822				DRM_ERROR("Failed to rebind context.\n");
 823			return ret;
 824		}
 825
 826		ret = vmw_rebind_all_dx_query(val->res);
 827		if (ret != 0)
 828			return ret;
 829	}
 830
 831	return 0;
 832}
 833
 834/**
 835 * vmw_view_bindings_add - Add an array of view bindings to a context
 836 * binding state tracker.
 837 *
 838 * @sw_context: The execbuf state used for this command.
 839 * @view_type: View type for the bindings.
 840 * @binding_type: Binding type for the bindings.
 841 * @shader_slot: The shader slot to user for the bindings.
 842 * @view_ids: Array of view ids to be bound.
 843 * @num_views: Number of view ids in @view_ids.
 844 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 845 */
 846static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 847				 enum vmw_view_type view_type,
 848				 enum vmw_ctx_binding_type binding_type,
 849				 uint32 shader_slot,
 850				 uint32 view_ids[], u32 num_views,
 851				 u32 first_slot)
 852{
 853	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
 854	struct vmw_cmdbuf_res_manager *man;
 855	u32 i;
 856	int ret;
 857
 858	if (!ctx_node) {
 859		DRM_ERROR("DX Context not set.\n");
 860		return -EINVAL;
 861	}
 862
 863	man = sw_context->man;
 864	for (i = 0; i < num_views; ++i) {
 865		struct vmw_ctx_bindinfo_view binding;
 866		struct vmw_resource *view = NULL;
 867
 868		if (view_ids[i] != SVGA3D_INVALID_ID) {
 869			view = vmw_view_lookup(man, view_type, view_ids[i]);
 870			if (IS_ERR(view)) {
 871				DRM_ERROR("View not found.\n");
 872				return PTR_ERR(view);
 873			}
 874
 875			ret = vmw_view_res_val_add(sw_context, view);
 876			if (ret) {
 877				DRM_ERROR("Could not add view to "
 878					  "validation list.\n");
 879				vmw_resource_unreference(&view);
 880				return ret;
 881			}
 882		}
 883		binding.bi.ctx = ctx_node->res;
 884		binding.bi.res = view;
 885		binding.bi.bt = binding_type;
 886		binding.shader_slot = shader_slot;
 887		binding.slot = first_slot + i;
 888		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
 889				shader_slot, binding.slot);
 890		if (view)
 891			vmw_resource_unreference(&view);
 892	}
 893
 894	return 0;
 895}
 896
 897/**
 898 * vmw_cmd_cid_check - Check a command header for valid context information.
 899 *
 900 * @dev_priv: Pointer to a device private structure.
 901 * @sw_context: Pointer to the software context.
 902 * @header: A command header with an embedded user-space context handle.
 903 *
 904 * Convenience function: Call vmw_cmd_res_check with the user-space context
 905 * handle embedded in @header.
 906 */
 907static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 908			     struct vmw_sw_context *sw_context,
 909			     SVGA3dCmdHeader *header)
 910{
 911	struct vmw_cid_cmd {
 912		SVGA3dCmdHeader header;
 913		uint32_t cid;
 914	} *cmd;
 915
 916	cmd = container_of(header, struct vmw_cid_cmd, header);
 917	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 918				 user_context_converter, &cmd->cid, NULL);
 919}
 920
 921static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 922					   struct vmw_sw_context *sw_context,
 923					   SVGA3dCmdHeader *header)
 924{
 925	struct vmw_sid_cmd {
 926		SVGA3dCmdHeader header;
 927		SVGA3dCmdSetRenderTarget body;
 928	} *cmd;
 929	struct vmw_resource_val_node *ctx_node;
 930	struct vmw_resource_val_node *res_node;
 931	int ret;
 932
 933	cmd = container_of(header, struct vmw_sid_cmd, header);
 934
 935	if (cmd->body.type >= SVGA3D_RT_MAX) {
 936		DRM_ERROR("Illegal render target type %u.\n",
 937			  (unsigned) cmd->body.type);
 938		return -EINVAL;
 939	}
 940
 941	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 942				user_context_converter, &cmd->body.cid,
 943				&ctx_node);
 944	if (unlikely(ret != 0))
 945		return ret;
 946
 947	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 948				user_surface_converter,
 949				&cmd->body.target.sid, &res_node);
 950	if (unlikely(ret != 0))
 951		return ret;
 952
 953	if (dev_priv->has_mob) {
 954		struct vmw_ctx_bindinfo_view binding;
 955
 956		binding.bi.ctx = ctx_node->res;
 957		binding.bi.res = res_node ? res_node->res : NULL;
 958		binding.bi.bt = vmw_ctx_binding_rt;
 959		binding.slot = cmd->body.type;
 960		vmw_binding_add(ctx_node->staged_bindings,
 961				&binding.bi, 0, binding.slot);
 962	}
 963
 964	return 0;
 965}
 966
 967static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 968				      struct vmw_sw_context *sw_context,
 969				      SVGA3dCmdHeader *header)
 970{
 971	struct vmw_sid_cmd {
 972		SVGA3dCmdHeader header;
 973		SVGA3dCmdSurfaceCopy body;
 974	} *cmd;
 975	int ret;
 976
 977	cmd = container_of(header, struct vmw_sid_cmd, header);
 978
 979	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 980				user_surface_converter,
 981				&cmd->body.src.sid, NULL);
 982	if (ret)
 983		return ret;
 984
 985	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 986				 user_surface_converter,
 987				 &cmd->body.dest.sid, NULL);
 988}
 989
 990static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
 991				      struct vmw_sw_context *sw_context,
 992				      SVGA3dCmdHeader *header)
 993{
 994	struct {
 995		SVGA3dCmdHeader header;
 996		SVGA3dCmdDXBufferCopy body;
 997	} *cmd;
 998	int ret;
 999
1000	cmd = container_of(header, typeof(*cmd), header);
1001	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002				user_surface_converter,
1003				&cmd->body.src, NULL);
1004	if (ret != 0)
1005		return ret;
1006
1007	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008				 user_surface_converter,
1009				 &cmd->body.dest, NULL);
1010}
1011
1012static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013				   struct vmw_sw_context *sw_context,
1014				   SVGA3dCmdHeader *header)
1015{
1016	struct {
1017		SVGA3dCmdHeader header;
1018		SVGA3dCmdDXPredCopyRegion body;
1019	} *cmd;
1020	int ret;
1021
1022	cmd = container_of(header, typeof(*cmd), header);
1023	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024				user_surface_converter,
1025				&cmd->body.srcSid, NULL);
1026	if (ret != 0)
1027		return ret;
1028
1029	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030				 user_surface_converter,
1031				 &cmd->body.dstSid, NULL);
1032}
1033
1034static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1035				     struct vmw_sw_context *sw_context,
1036				     SVGA3dCmdHeader *header)
1037{
1038	struct vmw_sid_cmd {
1039		SVGA3dCmdHeader header;
1040		SVGA3dCmdSurfaceStretchBlt body;
1041	} *cmd;
1042	int ret;
1043
1044	cmd = container_of(header, struct vmw_sid_cmd, header);
1045	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1046				user_surface_converter,
1047				&cmd->body.src.sid, NULL);
1048	if (unlikely(ret != 0))
1049		return ret;
1050	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051				 user_surface_converter,
1052				 &cmd->body.dest.sid, NULL);
1053}
1054
1055static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1056					 struct vmw_sw_context *sw_context,
1057					 SVGA3dCmdHeader *header)
1058{
1059	struct vmw_sid_cmd {
1060		SVGA3dCmdHeader header;
1061		SVGA3dCmdBlitSurfaceToScreen body;
1062	} *cmd;
1063
1064	cmd = container_of(header, struct vmw_sid_cmd, header);
1065
1066	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1067				 user_surface_converter,
1068				 &cmd->body.srcImage.sid, NULL);
 
 
 
1069}
1070
1071static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1072				 struct vmw_sw_context *sw_context,
1073				 SVGA3dCmdHeader *header)
1074{
1075	struct vmw_sid_cmd {
1076		SVGA3dCmdHeader header;
1077		SVGA3dCmdPresent body;
1078	} *cmd;
1079
1080
1081	cmd = container_of(header, struct vmw_sid_cmd, header);
1082
1083	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1084				 user_surface_converter, &cmd->body.sid,
1085				 NULL);
 
 
 
1086}
1087
1088/**
1089 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1090 *
1091 * @dev_priv: The device private structure.
 
1092 * @new_query_bo: The new buffer holding query results.
1093 * @sw_context: The software context used for this command submission.
1094 *
1095 * This function checks whether @new_query_bo is suitable for holding
1096 * query results, and if another buffer currently is pinned for query
1097 * results. If so, the function prepares the state of @sw_context for
1098 * switching pinned buffers after successful submission of the current
1099 * command batch.
 
 
1100 */
1101static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1102				       struct vmw_dma_buffer *new_query_bo,
 
1103				       struct vmw_sw_context *sw_context)
1104{
1105	struct vmw_res_cache_entry *ctx_entry =
1106		&sw_context->res_cache[vmw_res_context];
1107	int ret;
1108
1109	BUG_ON(!ctx_entry->valid);
1110	sw_context->last_query_ctx = ctx_entry->res;
1111
1112	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1113
1114		if (unlikely(new_query_bo->base.num_pages > 4)) {
1115			DRM_ERROR("Query buffer too large.\n");
1116			return -EINVAL;
1117		}
1118
1119		if (unlikely(sw_context->cur_query_bo != NULL)) {
1120			sw_context->needs_post_query_barrier = true;
 
 
1121			ret = vmw_bo_to_validate_list(sw_context,
1122						      sw_context->cur_query_bo,
1123						      dev_priv->has_mob, NULL);
 
1124			if (unlikely(ret != 0))
1125				return ret;
1126		}
1127		sw_context->cur_query_bo = new_query_bo;
1128
1129		ret = vmw_bo_to_validate_list(sw_context,
1130					      dev_priv->dummy_query_bo,
1131					      dev_priv->has_mob, NULL);
 
1132		if (unlikely(ret != 0))
1133			return ret;
1134
1135	}
1136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1137	return 0;
1138}
1139
1140
1141/**
1142 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1143 *
1144 * @dev_priv: The device private structure.
1145 * @sw_context: The software context used for this command submission batch.
1146 *
1147 * This function will check if we're switching query buffers, and will then,
 
1148 * issue a dummy occlusion query wait used as a query barrier. When the fence
1149 * object following that query wait has signaled, we are sure that all
1150 * preceding queries have finished, and the old query buffer can be unpinned.
1151 * However, since both the new query buffer and the old one are fenced with
1152 * that fence, we can do an asynchronus unpin now, and be sure that the
1153 * old query buffer won't be moved until the fence has signaled.
1154 *
1155 * As mentioned above, both the new - and old query buffers need to be fenced
1156 * using a sequence emitted *after* calling this function.
1157 */
1158static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1159				     struct vmw_sw_context *sw_context)
1160{
 
 
 
 
1161	/*
1162	 * The validate list should still hold references to all
1163	 * contexts here.
1164	 */
1165
1166	if (sw_context->needs_post_query_barrier) {
1167		struct vmw_res_cache_entry *ctx_entry =
1168			&sw_context->res_cache[vmw_res_context];
1169		struct vmw_resource *ctx;
1170		int ret;
1171
1172		BUG_ON(!ctx_entry->valid);
1173		ctx = ctx_entry->res;
1174
1175		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1176
1177		if (unlikely(ret != 0))
1178			DRM_ERROR("Out of fifo space for dummy query.\n");
1179	}
1180
1181	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1182		if (dev_priv->pinned_bo) {
1183			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1184			vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1185		}
1186
1187		if (!sw_context->needs_post_query_barrier) {
1188			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1189
1190			/*
1191			 * We pin also the dummy_query_bo buffer so that we
1192			 * don't need to validate it when emitting
1193			 * dummy queries in context destroy paths.
1194			 */
1195
1196			if (!dev_priv->dummy_query_bo_pinned) {
1197				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198						    true);
1199				dev_priv->dummy_query_bo_pinned = true;
1200			}
1201
1202			BUG_ON(sw_context->last_query_ctx == NULL);
1203			dev_priv->query_cid = sw_context->last_query_ctx->id;
1204			dev_priv->query_cid_valid = true;
1205			dev_priv->pinned_bo =
1206				vmw_dmabuf_reference(sw_context->cur_query_bo);
1207		}
1208	}
1209}
1210
1211/**
1212 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213 * handle to a MOB id.
1214 *
1215 * @dev_priv: Pointer to a device private structure.
1216 * @sw_context: The software context used for this command batch validation.
1217 * @id: Pointer to the user-space handle to be translated.
1218 * @vmw_bo_p: Points to a location that, on successful return will carry
1219 * a reference-counted pointer to the DMA buffer identified by the
1220 * user-space handle in @id.
1221 *
1222 * This function saves information needed to translate a user-space buffer
1223 * handle to a MOB id. The translation does not take place immediately, but
1224 * during a call to vmw_apply_relocations(). This function builds a relocation
1225 * list and a list of buffers to validate. The former needs to be freed using
1226 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227 * needs to be freed using vmw_clear_validations.
1228 */
1229static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1230				 struct vmw_sw_context *sw_context,
1231				 SVGAMobId *id,
1232				 struct vmw_dma_buffer **vmw_bo_p)
1233{
1234	struct vmw_dma_buffer *vmw_bo = NULL;
1235	uint32_t handle = *id;
1236	struct vmw_relocation *reloc;
1237	int ret;
1238
1239	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240				     NULL);
1241	if (unlikely(ret != 0)) {
1242		DRM_ERROR("Could not find or use MOB buffer.\n");
1243		ret = -EINVAL;
1244		goto out_no_reloc;
1245	}
1246
1247	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1248		DRM_ERROR("Max number relocations per submission"
1249			  " exceeded\n");
1250		ret = -EINVAL;
1251		goto out_no_reloc;
1252	}
1253
1254	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1255	reloc->mob_loc = id;
1256	reloc->location = NULL;
1257
1258	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1259	if (unlikely(ret != 0))
1260		goto out_no_reloc;
1261
1262	*vmw_bo_p = vmw_bo;
1263	return 0;
1264
1265out_no_reloc:
1266	vmw_dmabuf_unreference(&vmw_bo);
1267	*vmw_bo_p = NULL;
1268	return ret;
1269}
1270
1271/**
1272 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1273 * handle to a valid SVGAGuestPtr
1274 *
1275 * @dev_priv: Pointer to a device private structure.
1276 * @sw_context: The software context used for this command batch validation.
1277 * @ptr: Pointer to the user-space handle to be translated.
1278 * @vmw_bo_p: Points to a location that, on successful return will carry
1279 * a reference-counted pointer to the DMA buffer identified by the
1280 * user-space handle in @id.
1281 *
1282 * This function saves information needed to translate a user-space buffer
1283 * handle to a valid SVGAGuestPtr. The translation does not take place
1284 * immediately, but during a call to vmw_apply_relocations().
1285 * This function builds a relocation list and a list of buffers to validate.
1286 * The former needs to be freed using either vmw_apply_relocations() or
1287 * vmw_free_relocations(). The latter needs to be freed using
1288 * vmw_clear_validations.
1289 */
1290static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1291				   struct vmw_sw_context *sw_context,
1292				   SVGAGuestPtr *ptr,
1293				   struct vmw_dma_buffer **vmw_bo_p)
1294{
1295	struct vmw_dma_buffer *vmw_bo = NULL;
 
1296	uint32_t handle = ptr->gmrId;
1297	struct vmw_relocation *reloc;
1298	int ret;
1299
1300	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301				     NULL);
1302	if (unlikely(ret != 0)) {
1303		DRM_ERROR("Could not find or use GMR region.\n");
1304		ret = -EINVAL;
1305		goto out_no_reloc;
1306	}
 
1307
1308	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1309		DRM_ERROR("Max number relocations per submission"
1310			  " exceeded\n");
1311		ret = -EINVAL;
1312		goto out_no_reloc;
1313	}
1314
1315	reloc = &sw_context->relocs[sw_context->cur_reloc++];
1316	reloc->location = ptr;
1317
1318	ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
 
1319	if (unlikely(ret != 0))
1320		goto out_no_reloc;
1321
1322	*vmw_bo_p = vmw_bo;
1323	return 0;
1324
1325out_no_reloc:
1326	vmw_dmabuf_unreference(&vmw_bo);
1327	*vmw_bo_p = NULL;
1328	return ret;
1329}
1330
1331
1332
1333/**
1334 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1335 *
1336 * @dev_priv: Pointer to a device private struct.
1337 * @sw_context: The software context used for this command submission.
1338 * @header: Pointer to the command header in the command stream.
1339 *
1340 * This function adds the new query into the query COTABLE
1341 */
1342static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1343				   struct vmw_sw_context *sw_context,
1344				   SVGA3dCmdHeader *header)
1345{
1346	struct vmw_dx_define_query_cmd {
1347		SVGA3dCmdHeader header;
1348		SVGA3dCmdDXDefineQuery q;
1349	} *cmd;
1350
1351	int    ret;
1352	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1353	struct vmw_resource *cotable_res;
1354
1355
1356	if (ctx_node == NULL) {
1357		DRM_ERROR("DX Context not set for query.\n");
1358		return -EINVAL;
1359	}
1360
1361	cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1362
1363	if (cmd->q.type <  SVGA3D_QUERYTYPE_MIN ||
1364	    cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1365		return -EINVAL;
1366
1367	cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1368	ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1369	vmw_resource_unreference(&cotable_res);
1370
1371	return ret;
1372}
1373
1374
1375
1376/**
1377 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1378 *
1379 * @dev_priv: Pointer to a device private struct.
1380 * @sw_context: The software context used for this command submission.
1381 * @header: Pointer to the command header in the command stream.
1382 *
1383 * The query bind operation will eventually associate the query ID
1384 * with its backing MOB.  In this function, we take the user mode
1385 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1386 * kernel mode equivalent.
1387 */
1388static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1389				 struct vmw_sw_context *sw_context,
1390				 SVGA3dCmdHeader *header)
1391{
1392	struct vmw_dx_bind_query_cmd {
1393		SVGA3dCmdHeader header;
1394		SVGA3dCmdDXBindQuery q;
1395	} *cmd;
1396
1397	struct vmw_dma_buffer *vmw_bo;
1398	int    ret;
1399
1400
1401	cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1402
1403	/*
1404	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1405	 * list so its kernel mode MOB ID can be filled in later
1406	 */
1407	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1408				    &vmw_bo);
1409
1410	if (ret != 0)
1411		return ret;
1412
1413	sw_context->dx_query_mob = vmw_bo;
1414	sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1415
1416	vmw_dmabuf_unreference(&vmw_bo);
1417
1418	return ret;
1419}
1420
1421
1422
1423/**
1424 * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
1425 *
1426 * @dev_priv: Pointer to a device private struct.
1427 * @sw_context: The software context used for this command submission.
1428 * @header: Pointer to the command header in the command stream.
1429 */
1430static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1431				  struct vmw_sw_context *sw_context,
1432				  SVGA3dCmdHeader *header)
1433{
1434	struct vmw_begin_gb_query_cmd {
1435		SVGA3dCmdHeader header;
1436		SVGA3dCmdBeginGBQuery q;
1437	} *cmd;
1438
1439	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1440			   header);
1441
1442	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1443				 user_context_converter, &cmd->q.cid,
1444				 NULL);
1445}
1446
1447/**
1448 * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1449 *
1450 * @dev_priv: Pointer to a device private struct.
1451 * @sw_context: The software context used for this command submission.
1452 * @header: Pointer to the command header in the command stream.
1453 */
1454static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1455			       struct vmw_sw_context *sw_context,
1456			       SVGA3dCmdHeader *header)
1457{
1458	struct vmw_begin_query_cmd {
1459		SVGA3dCmdHeader header;
1460		SVGA3dCmdBeginQuery q;
1461	} *cmd;
1462
1463	cmd = container_of(header, struct vmw_begin_query_cmd,
1464			   header);
1465
1466	if (unlikely(dev_priv->has_mob)) {
1467		struct {
1468			SVGA3dCmdHeader header;
1469			SVGA3dCmdBeginGBQuery q;
1470		} gb_cmd;
1471
1472		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1473
1474		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1475		gb_cmd.header.size = cmd->header.size;
1476		gb_cmd.q.cid = cmd->q.cid;
1477		gb_cmd.q.type = cmd->q.type;
1478
1479		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1480		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1481	}
1482
1483	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1484				 user_context_converter, &cmd->q.cid,
1485				 NULL);
1486}
1487
1488/**
1489 * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1490 *
1491 * @dev_priv: Pointer to a device private struct.
1492 * @sw_context: The software context used for this command submission.
1493 * @header: Pointer to the command header in the command stream.
1494 */
1495static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1496				struct vmw_sw_context *sw_context,
1497				SVGA3dCmdHeader *header)
1498{
1499	struct vmw_dma_buffer *vmw_bo;
1500	struct vmw_query_cmd {
1501		SVGA3dCmdHeader header;
1502		SVGA3dCmdEndGBQuery q;
1503	} *cmd;
1504	int ret;
1505
1506	cmd = container_of(header, struct vmw_query_cmd, header);
1507	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1508	if (unlikely(ret != 0))
1509		return ret;
1510
1511	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1512				    &cmd->q.mobid,
1513				    &vmw_bo);
1514	if (unlikely(ret != 0))
1515		return ret;
1516
1517	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1518
1519	vmw_dmabuf_unreference(&vmw_bo);
1520	return ret;
1521}
1522
1523/**
1524 * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1525 *
1526 * @dev_priv: Pointer to a device private struct.
1527 * @sw_context: The software context used for this command submission.
1528 * @header: Pointer to the command header in the command stream.
1529 */
1530static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1531			     struct vmw_sw_context *sw_context,
1532			     SVGA3dCmdHeader *header)
1533{
1534	struct vmw_dma_buffer *vmw_bo;
1535	struct vmw_query_cmd {
1536		SVGA3dCmdHeader header;
1537		SVGA3dCmdEndQuery q;
1538	} *cmd;
1539	int ret;
1540
1541	cmd = container_of(header, struct vmw_query_cmd, header);
1542	if (dev_priv->has_mob) {
1543		struct {
1544			SVGA3dCmdHeader header;
1545			SVGA3dCmdEndGBQuery q;
1546		} gb_cmd;
1547
1548		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1549
1550		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1551		gb_cmd.header.size = cmd->header.size;
1552		gb_cmd.q.cid = cmd->q.cid;
1553		gb_cmd.q.type = cmd->q.type;
1554		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1555		gb_cmd.q.offset = cmd->q.guestResult.offset;
1556
1557		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1558		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1559	}
1560
1561	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1562	if (unlikely(ret != 0))
1563		return ret;
1564
1565	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1566				      &cmd->q.guestResult,
1567				      &vmw_bo);
1568	if (unlikely(ret != 0))
1569		return ret;
1570
1571	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
 
1572
1573	vmw_dmabuf_unreference(&vmw_bo);
1574	return ret;
1575}
1576
1577/**
1578 * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1579 *
1580 * @dev_priv: Pointer to a device private struct.
1581 * @sw_context: The software context used for this command submission.
1582 * @header: Pointer to the command header in the command stream.
1583 */
1584static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1585				 struct vmw_sw_context *sw_context,
1586				 SVGA3dCmdHeader *header)
1587{
1588	struct vmw_dma_buffer *vmw_bo;
1589	struct vmw_query_cmd {
1590		SVGA3dCmdHeader header;
1591		SVGA3dCmdWaitForGBQuery q;
1592	} *cmd;
1593	int ret;
1594
1595	cmd = container_of(header, struct vmw_query_cmd, header);
1596	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1597	if (unlikely(ret != 0))
1598		return ret;
1599
1600	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1601				    &cmd->q.mobid,
1602				    &vmw_bo);
1603	if (unlikely(ret != 0))
1604		return ret;
1605
1606	vmw_dmabuf_unreference(&vmw_bo);
1607	return 0;
1608}
1609
1610/**
1611 * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1612 *
1613 * @dev_priv: Pointer to a device private struct.
1614 * @sw_context: The software context used for this command submission.
1615 * @header: Pointer to the command header in the command stream.
1616 */
1617static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1618			      struct vmw_sw_context *sw_context,
1619			      SVGA3dCmdHeader *header)
1620{
1621	struct vmw_dma_buffer *vmw_bo;
1622	struct vmw_query_cmd {
1623		SVGA3dCmdHeader header;
1624		SVGA3dCmdWaitForQuery q;
1625	} *cmd;
1626	int ret;
 
1627
1628	cmd = container_of(header, struct vmw_query_cmd, header);
1629	if (dev_priv->has_mob) {
1630		struct {
1631			SVGA3dCmdHeader header;
1632			SVGA3dCmdWaitForGBQuery q;
1633		} gb_cmd;
1634
1635		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1636
1637		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1638		gb_cmd.header.size = cmd->header.size;
1639		gb_cmd.q.cid = cmd->q.cid;
1640		gb_cmd.q.type = cmd->q.type;
1641		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1642		gb_cmd.q.offset = cmd->q.guestResult.offset;
1643
1644		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1645		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1646	}
1647
1648	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1649	if (unlikely(ret != 0))
1650		return ret;
1651
1652	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1653				      &cmd->q.guestResult,
1654				      &vmw_bo);
1655	if (unlikely(ret != 0))
1656		return ret;
1657
1658	vmw_dmabuf_unreference(&vmw_bo);
 
 
 
 
 
 
 
 
 
 
1659	return 0;
1660}
1661
1662static int vmw_cmd_dma(struct vmw_private *dev_priv,
1663		       struct vmw_sw_context *sw_context,
1664		       SVGA3dCmdHeader *header)
1665{
1666	struct vmw_dma_buffer *vmw_bo = NULL;
 
1667	struct vmw_surface *srf = NULL;
1668	struct vmw_dma_cmd {
1669		SVGA3dCmdHeader header;
1670		SVGA3dCmdSurfaceDMA dma;
1671	} *cmd;
1672	int ret;
1673	SVGA3dCmdSurfaceDMASuffix *suffix;
1674	uint32_t bo_size;
1675
1676	cmd = container_of(header, struct vmw_dma_cmd, header);
1677	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1678					       header->size - sizeof(*suffix));
1679
1680	/* Make sure device and verifier stays in sync. */
1681	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1682		DRM_ERROR("Invalid DMA suffix size.\n");
1683		return -EINVAL;
1684	}
1685
1686	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1687				      &cmd->dma.guest.ptr,
1688				      &vmw_bo);
1689	if (unlikely(ret != 0))
1690		return ret;
1691
1692	/* Make sure DMA doesn't cross BO boundaries. */
1693	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1694	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1695		DRM_ERROR("Invalid DMA offset.\n");
1696		return -EINVAL;
 
1697	}
1698
1699	bo_size -= cmd->dma.guest.ptr.offset;
1700	if (unlikely(suffix->maximumOffset > bo_size))
1701		suffix->maximumOffset = bo_size;
1702
1703	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1704				user_surface_converter, &cmd->dma.host.sid,
1705				NULL);
1706	if (unlikely(ret != 0)) {
1707		if (unlikely(ret != -ERESTARTSYS))
1708			DRM_ERROR("could not find surface for DMA.\n");
1709		goto out_no_surface;
1710	}
1711
1712	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
 
 
 
 
1713
1714	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1715			     header);
1716
1717out_no_surface:
 
 
 
 
 
 
 
1718	vmw_dmabuf_unreference(&vmw_bo);
1719	return ret;
1720}
1721
1722static int vmw_cmd_draw(struct vmw_private *dev_priv,
1723			struct vmw_sw_context *sw_context,
1724			SVGA3dCmdHeader *header)
1725{
1726	struct vmw_draw_cmd {
1727		SVGA3dCmdHeader header;
1728		SVGA3dCmdDrawPrimitives body;
1729	} *cmd;
1730	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1731		(unsigned long)header + sizeof(*cmd));
1732	SVGA3dPrimitiveRange *range;
1733	uint32_t i;
1734	uint32_t maxnum;
1735	int ret;
1736
1737	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1738	if (unlikely(ret != 0))
1739		return ret;
1740
1741	cmd = container_of(header, struct vmw_draw_cmd, header);
1742	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1743
1744	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1745		DRM_ERROR("Illegal number of vertex declarations.\n");
1746		return -EINVAL;
1747	}
1748
1749	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1750		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1751					user_surface_converter,
1752					&decl->array.surfaceId, NULL);
1753		if (unlikely(ret != 0))
1754			return ret;
1755	}
1756
1757	maxnum = (header->size - sizeof(cmd->body) -
1758		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1759	if (unlikely(cmd->body.numRanges > maxnum)) {
1760		DRM_ERROR("Illegal number of index ranges.\n");
1761		return -EINVAL;
1762	}
1763
1764	range = (SVGA3dPrimitiveRange *) decl;
1765	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1766		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1767					user_surface_converter,
1768					&range->indexArray.surfaceId, NULL);
1769		if (unlikely(ret != 0))
1770			return ret;
1771	}
1772	return 0;
1773}
1774
1775
1776static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1777			     struct vmw_sw_context *sw_context,
1778			     SVGA3dCmdHeader *header)
1779{
1780	struct vmw_tex_state_cmd {
1781		SVGA3dCmdHeader header;
1782		SVGA3dCmdSetTextureState state;
1783	} *cmd;
1784
1785	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1786	  ((unsigned long) header + header->size + sizeof(header));
1787	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1788		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1789	struct vmw_resource_val_node *ctx_node;
1790	struct vmw_resource_val_node *res_node;
1791	int ret;
1792
1793	cmd = container_of(header, struct vmw_tex_state_cmd,
1794			   header);
1795
1796	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1797				user_context_converter, &cmd->state.cid,
1798				&ctx_node);
1799	if (unlikely(ret != 0))
1800		return ret;
1801
1802	for (; cur_state < last_state; ++cur_state) {
1803		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1804			continue;
1805
1806		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1807			DRM_ERROR("Illegal texture/sampler unit %u.\n",
1808				  (unsigned) cur_state->stage);
1809			return -EINVAL;
1810		}
1811
1812		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1813					user_surface_converter,
1814					&cur_state->value, &res_node);
1815		if (unlikely(ret != 0))
1816			return ret;
1817
1818		if (dev_priv->has_mob) {
1819			struct vmw_ctx_bindinfo_tex binding;
1820
1821			binding.bi.ctx = ctx_node->res;
1822			binding.bi.res = res_node ? res_node->res : NULL;
1823			binding.bi.bt = vmw_ctx_binding_tex;
1824			binding.texture_stage = cur_state->stage;
1825			vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1826					0, binding.texture_stage);
1827		}
1828	}
1829
1830	return 0;
1831}
1832
1833static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1834				      struct vmw_sw_context *sw_context,
1835				      void *buf)
1836{
1837	struct vmw_dma_buffer *vmw_bo;
1838	int ret;
1839
1840	struct {
1841		uint32_t header;
1842		SVGAFifoCmdDefineGMRFB body;
1843	} *cmd = buf;
1844
1845	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1846				      &cmd->body.ptr,
1847				      &vmw_bo);
1848	if (unlikely(ret != 0))
1849		return ret;
1850
1851	vmw_dmabuf_unreference(&vmw_bo);
1852
1853	return ret;
1854}
1855
1856
1857/**
1858 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1859 * switching
1860 *
1861 * @dev_priv: Pointer to a device private struct.
1862 * @sw_context: The software context being used for this batch.
1863 * @val_node: The validation node representing the resource.
1864 * @buf_id: Pointer to the user-space backup buffer handle in the command
1865 * stream.
1866 * @backup_offset: Offset of backup into MOB.
1867 *
1868 * This function prepares for registering a switch of backup buffers
1869 * in the resource metadata just prior to unreserving. It's basically a wrapper
1870 * around vmw_cmd_res_switch_backup with a different interface.
1871 */
1872static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1873				     struct vmw_sw_context *sw_context,
1874				     struct vmw_resource_val_node *val_node,
1875				     uint32_t *buf_id,
1876				     unsigned long backup_offset)
1877{
1878	struct vmw_dma_buffer *dma_buf;
1879	int ret;
1880
1881	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1882	if (ret)
1883		return ret;
1884
1885	val_node->switching_backup = true;
1886	if (val_node->first_usage)
1887		val_node->no_buffer_needed = true;
1888
1889	vmw_dmabuf_unreference(&val_node->new_backup);
1890	val_node->new_backup = dma_buf;
1891	val_node->new_backup_offset = backup_offset;
1892
1893	return 0;
1894}
1895
1896
1897/**
1898 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1899 *
1900 * @dev_priv: Pointer to a device private struct.
1901 * @sw_context: The software context being used for this batch.
1902 * @res_type: The resource type.
1903 * @converter: Information about user-space binding for this resource type.
1904 * @res_id: Pointer to the user-space resource handle in the command stream.
1905 * @buf_id: Pointer to the user-space backup buffer handle in the command
1906 * stream.
1907 * @backup_offset: Offset of backup into MOB.
1908 *
1909 * This function prepares for registering a switch of backup buffers
1910 * in the resource metadata just prior to unreserving. It's basically a wrapper
1911 * around vmw_cmd_res_switch_backup with a different interface.
1912 */
1913static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1914				 struct vmw_sw_context *sw_context,
1915				 enum vmw_res_type res_type,
1916				 const struct vmw_user_resource_conv
1917				 *converter,
1918				 uint32_t *res_id,
1919				 uint32_t *buf_id,
1920				 unsigned long backup_offset)
1921{
1922	struct vmw_resource_val_node *val_node;
1923	int ret;
1924
1925	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1926				converter, res_id, &val_node);
1927	if (ret)
1928		return ret;
1929
1930	return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1931					 buf_id, backup_offset);
1932}
1933
1934/**
1935 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1936 * command
1937 *
1938 * @dev_priv: Pointer to a device private struct.
1939 * @sw_context: The software context being used for this batch.
1940 * @header: Pointer to the command header in the command stream.
1941 */
1942static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1943				   struct vmw_sw_context *sw_context,
1944				   SVGA3dCmdHeader *header)
1945{
1946	struct vmw_bind_gb_surface_cmd {
1947		SVGA3dCmdHeader header;
1948		SVGA3dCmdBindGBSurface body;
1949	} *cmd;
1950
1951	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1952
1953	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1954				     user_surface_converter,
1955				     &cmd->body.sid, &cmd->body.mobid,
1956				     0);
1957}
1958
1959/**
1960 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1961 * command
1962 *
1963 * @dev_priv: Pointer to a device private struct.
1964 * @sw_context: The software context being used for this batch.
1965 * @header: Pointer to the command header in the command stream.
1966 */
1967static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1968				   struct vmw_sw_context *sw_context,
1969				   SVGA3dCmdHeader *header)
1970{
1971	struct vmw_gb_surface_cmd {
1972		SVGA3dCmdHeader header;
1973		SVGA3dCmdUpdateGBImage body;
1974	} *cmd;
1975
1976	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1977
1978	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1979				 user_surface_converter,
1980				 &cmd->body.image.sid, NULL);
1981}
1982
1983/**
1984 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1985 * command
1986 *
1987 * @dev_priv: Pointer to a device private struct.
1988 * @sw_context: The software context being used for this batch.
1989 * @header: Pointer to the command header in the command stream.
1990 */
1991static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1992				     struct vmw_sw_context *sw_context,
1993				     SVGA3dCmdHeader *header)
1994{
1995	struct vmw_gb_surface_cmd {
1996		SVGA3dCmdHeader header;
1997		SVGA3dCmdUpdateGBSurface body;
1998	} *cmd;
1999
2000	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2001
2002	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2003				 user_surface_converter,
2004				 &cmd->body.sid, NULL);
2005}
2006
2007/**
2008 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2009 * command
2010 *
2011 * @dev_priv: Pointer to a device private struct.
2012 * @sw_context: The software context being used for this batch.
2013 * @header: Pointer to the command header in the command stream.
2014 */
2015static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2016				     struct vmw_sw_context *sw_context,
2017				     SVGA3dCmdHeader *header)
2018{
2019	struct vmw_gb_surface_cmd {
2020		SVGA3dCmdHeader header;
2021		SVGA3dCmdReadbackGBImage body;
2022	} *cmd;
2023
2024	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2025
2026	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2027				 user_surface_converter,
2028				 &cmd->body.image.sid, NULL);
2029}
2030
2031/**
2032 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2033 * command
2034 *
2035 * @dev_priv: Pointer to a device private struct.
2036 * @sw_context: The software context being used for this batch.
2037 * @header: Pointer to the command header in the command stream.
2038 */
2039static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2040				       struct vmw_sw_context *sw_context,
2041				       SVGA3dCmdHeader *header)
2042{
2043	struct vmw_gb_surface_cmd {
2044		SVGA3dCmdHeader header;
2045		SVGA3dCmdReadbackGBSurface body;
2046	} *cmd;
2047
2048	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2049
2050	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2051				 user_surface_converter,
2052				 &cmd->body.sid, NULL);
2053}
2054
2055/**
2056 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2057 * command
2058 *
2059 * @dev_priv: Pointer to a device private struct.
2060 * @sw_context: The software context being used for this batch.
2061 * @header: Pointer to the command header in the command stream.
2062 */
2063static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2064				       struct vmw_sw_context *sw_context,
2065				       SVGA3dCmdHeader *header)
2066{
2067	struct vmw_gb_surface_cmd {
2068		SVGA3dCmdHeader header;
2069		SVGA3dCmdInvalidateGBImage body;
2070	} *cmd;
2071
2072	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2073
2074	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2075				 user_surface_converter,
2076				 &cmd->body.image.sid, NULL);
2077}
2078
2079/**
2080 * vmw_cmd_invalidate_gb_surface - Validate an
2081 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2082 *
2083 * @dev_priv: Pointer to a device private struct.
2084 * @sw_context: The software context being used for this batch.
2085 * @header: Pointer to the command header in the command stream.
2086 */
2087static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2088					 struct vmw_sw_context *sw_context,
2089					 SVGA3dCmdHeader *header)
2090{
2091	struct vmw_gb_surface_cmd {
2092		SVGA3dCmdHeader header;
2093		SVGA3dCmdInvalidateGBSurface body;
2094	} *cmd;
2095
2096	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2097
2098	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2099				 user_surface_converter,
2100				 &cmd->body.sid, NULL);
2101}
2102
2103
2104/**
2105 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2106 * command
2107 *
2108 * @dev_priv: Pointer to a device private struct.
2109 * @sw_context: The software context being used for this batch.
2110 * @header: Pointer to the command header in the command stream.
2111 */
2112static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2113				 struct vmw_sw_context *sw_context,
2114				 SVGA3dCmdHeader *header)
2115{
2116	struct vmw_shader_define_cmd {
2117		SVGA3dCmdHeader header;
2118		SVGA3dCmdDefineShader body;
2119	} *cmd;
2120	int ret;
2121	size_t size;
2122	struct vmw_resource_val_node *val;
2123
2124	cmd = container_of(header, struct vmw_shader_define_cmd,
2125			   header);
2126
2127	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2128				user_context_converter, &cmd->body.cid,
2129				&val);
2130	if (unlikely(ret != 0))
2131		return ret;
2132
2133	if (unlikely(!dev_priv->has_mob))
2134		return 0;
2135
2136	size = cmd->header.size - sizeof(cmd->body);
2137	ret = vmw_compat_shader_add(dev_priv,
2138				    vmw_context_res_man(val->res),
2139				    cmd->body.shid, cmd + 1,
2140				    cmd->body.type, size,
2141				    &sw_context->staged_cmd_res);
2142	if (unlikely(ret != 0))
2143		return ret;
2144
2145	return vmw_resource_relocation_add(&sw_context->res_relocations,
2146					   NULL, &cmd->header.id -
2147					   sw_context->buf_start);
2148
2149	return 0;
2150}
2151
2152/**
2153 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2154 * command
2155 *
2156 * @dev_priv: Pointer to a device private struct.
2157 * @sw_context: The software context being used for this batch.
2158 * @header: Pointer to the command header in the command stream.
2159 */
2160static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2161				  struct vmw_sw_context *sw_context,
2162				  SVGA3dCmdHeader *header)
2163{
2164	struct vmw_shader_destroy_cmd {
2165		SVGA3dCmdHeader header;
2166		SVGA3dCmdDestroyShader body;
2167	} *cmd;
2168	int ret;
2169	struct vmw_resource_val_node *val;
2170
2171	cmd = container_of(header, struct vmw_shader_destroy_cmd,
2172			   header);
2173
2174	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2175				user_context_converter, &cmd->body.cid,
2176				&val);
2177	if (unlikely(ret != 0))
2178		return ret;
2179
2180	if (unlikely(!dev_priv->has_mob))
2181		return 0;
2182
2183	ret = vmw_shader_remove(vmw_context_res_man(val->res),
2184				cmd->body.shid,
2185				cmd->body.type,
2186				&sw_context->staged_cmd_res);
2187	if (unlikely(ret != 0))
2188		return ret;
2189
2190	return vmw_resource_relocation_add(&sw_context->res_relocations,
2191					   NULL, &cmd->header.id -
2192					   sw_context->buf_start);
2193
2194	return 0;
2195}
2196
2197/**
2198 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2199 * command
2200 *
2201 * @dev_priv: Pointer to a device private struct.
2202 * @sw_context: The software context being used for this batch.
2203 * @header: Pointer to the command header in the command stream.
2204 */
2205static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2206			      struct vmw_sw_context *sw_context,
2207			      SVGA3dCmdHeader *header)
2208{
2209	struct vmw_set_shader_cmd {
2210		SVGA3dCmdHeader header;
2211		SVGA3dCmdSetShader body;
2212	} *cmd;
2213	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2214	struct vmw_ctx_bindinfo_shader binding;
2215	struct vmw_resource *res = NULL;
2216	int ret;
2217
2218	cmd = container_of(header, struct vmw_set_shader_cmd,
2219			   header);
2220
2221	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2222		DRM_ERROR("Illegal shader type %u.\n",
2223			  (unsigned) cmd->body.type);
2224		return -EINVAL;
2225	}
2226
2227	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228				user_context_converter, &cmd->body.cid,
2229				&ctx_node);
2230	if (unlikely(ret != 0))
2231		return ret;
2232
2233	if (!dev_priv->has_mob)
2234		return 0;
2235
2236	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2237		res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2238					cmd->body.shid,
2239					cmd->body.type);
2240
2241		if (!IS_ERR(res)) {
2242			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2243						    &cmd->body.shid, res,
2244						    &res_node);
2245			vmw_resource_unreference(&res);
2246			if (unlikely(ret != 0))
2247				return ret;
2248		}
2249	}
2250
2251	if (!res_node) {
2252		ret = vmw_cmd_res_check(dev_priv, sw_context,
2253					vmw_res_shader,
2254					user_shader_converter,
2255					&cmd->body.shid, &res_node);
2256		if (unlikely(ret != 0))
2257			return ret;
2258	}
2259
2260	binding.bi.ctx = ctx_node->res;
2261	binding.bi.res = res_node ? res_node->res : NULL;
2262	binding.bi.bt = vmw_ctx_binding_shader;
2263	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2264	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2265			binding.shader_slot, 0);
2266	return 0;
2267}
2268
2269/**
2270 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2271 * command
2272 *
2273 * @dev_priv: Pointer to a device private struct.
2274 * @sw_context: The software context being used for this batch.
2275 * @header: Pointer to the command header in the command stream.
2276 */
2277static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2278				    struct vmw_sw_context *sw_context,
2279				    SVGA3dCmdHeader *header)
2280{
2281	struct vmw_set_shader_const_cmd {
2282		SVGA3dCmdHeader header;
2283		SVGA3dCmdSetShaderConst body;
2284	} *cmd;
2285	int ret;
2286
2287	cmd = container_of(header, struct vmw_set_shader_const_cmd,
2288			   header);
2289
2290	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2291				user_context_converter, &cmd->body.cid,
2292				NULL);
2293	if (unlikely(ret != 0))
2294		return ret;
2295
2296	if (dev_priv->has_mob)
2297		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2298
2299	return 0;
2300}
2301
2302/**
2303 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2304 * command
2305 *
2306 * @dev_priv: Pointer to a device private struct.
2307 * @sw_context: The software context being used for this batch.
2308 * @header: Pointer to the command header in the command stream.
2309 */
2310static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2311				  struct vmw_sw_context *sw_context,
2312				  SVGA3dCmdHeader *header)
2313{
2314	struct vmw_bind_gb_shader_cmd {
2315		SVGA3dCmdHeader header;
2316		SVGA3dCmdBindGBShader body;
2317	} *cmd;
2318
2319	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2320			   header);
2321
2322	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2323				     user_shader_converter,
2324				     &cmd->body.shid, &cmd->body.mobid,
2325				     cmd->body.offsetInBytes);
2326}
2327
2328/**
2329 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2330 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2331 *
2332 * @dev_priv: Pointer to a device private struct.
2333 * @sw_context: The software context being used for this batch.
2334 * @header: Pointer to the command header in the command stream.
2335 */
2336static int
2337vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2338				      struct vmw_sw_context *sw_context,
2339				      SVGA3dCmdHeader *header)
2340{
2341	struct {
2342		SVGA3dCmdHeader header;
2343		SVGA3dCmdDXSetSingleConstantBuffer body;
2344	} *cmd;
2345	struct vmw_resource_val_node *res_node = NULL;
2346	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2347	struct vmw_ctx_bindinfo_cb binding;
2348	int ret;
2349
2350	if (unlikely(ctx_node == NULL)) {
2351		DRM_ERROR("DX Context not set.\n");
2352		return -EINVAL;
2353	}
2354
2355	cmd = container_of(header, typeof(*cmd), header);
2356	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2357				user_surface_converter,
2358				&cmd->body.sid, &res_node);
2359	if (unlikely(ret != 0))
2360		return ret;
2361
2362	binding.bi.ctx = ctx_node->res;
2363	binding.bi.res = res_node ? res_node->res : NULL;
2364	binding.bi.bt = vmw_ctx_binding_cb;
2365	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2366	binding.offset = cmd->body.offsetInBytes;
2367	binding.size = cmd->body.sizeInBytes;
2368	binding.slot = cmd->body.slot;
2369
2370	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2371	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2372		DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2373			  (unsigned) cmd->body.type,
2374			  (unsigned) binding.slot);
2375		return -EINVAL;
2376	}
2377
2378	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2379			binding.shader_slot, binding.slot);
2380
2381	return 0;
2382}
2383
2384/**
2385 * vmw_cmd_dx_set_shader_res - Validate an
2386 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2387 *
2388 * @dev_priv: Pointer to a device private struct.
2389 * @sw_context: The software context being used for this batch.
2390 * @header: Pointer to the command header in the command stream.
2391 */
2392static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2393				     struct vmw_sw_context *sw_context,
2394				     SVGA3dCmdHeader *header)
2395{
2396	struct {
2397		SVGA3dCmdHeader header;
2398		SVGA3dCmdDXSetShaderResources body;
2399	} *cmd = container_of(header, typeof(*cmd), header);
2400	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2401		sizeof(SVGA3dShaderResourceViewId);
2402
2403	if ((u64) cmd->body.startView + (u64) num_sr_view >
2404	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2405	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2406		DRM_ERROR("Invalid shader binding.\n");
2407		return -EINVAL;
2408	}
2409
2410	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2411				     vmw_ctx_binding_sr,
2412				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2413				     (void *) &cmd[1], num_sr_view,
2414				     cmd->body.startView);
2415}
2416
2417/**
2418 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2419 * command
2420 *
2421 * @dev_priv: Pointer to a device private struct.
2422 * @sw_context: The software context being used for this batch.
2423 * @header: Pointer to the command header in the command stream.
2424 */
2425static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2426				 struct vmw_sw_context *sw_context,
2427				 SVGA3dCmdHeader *header)
2428{
2429	struct {
2430		SVGA3dCmdHeader header;
2431		SVGA3dCmdDXSetShader body;
2432	} *cmd;
2433	struct vmw_resource *res = NULL;
2434	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2435	struct vmw_ctx_bindinfo_shader binding;
2436	int ret = 0;
2437
2438	if (unlikely(ctx_node == NULL)) {
2439		DRM_ERROR("DX Context not set.\n");
2440		return -EINVAL;
2441	}
2442
2443	cmd = container_of(header, typeof(*cmd), header);
2444
2445	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2446		DRM_ERROR("Illegal shader type %u.\n",
2447			  (unsigned) cmd->body.type);
2448		return -EINVAL;
2449	}
2450
2451	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2452		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2453		if (IS_ERR(res)) {
2454			DRM_ERROR("Could not find shader for binding.\n");
2455			return PTR_ERR(res);
2456		}
2457
2458		ret = vmw_resource_val_add(sw_context, res, NULL);
2459		if (ret)
2460			goto out_unref;
2461	}
2462
2463	binding.bi.ctx = ctx_node->res;
2464	binding.bi.res = res;
2465	binding.bi.bt = vmw_ctx_binding_dx_shader;
2466	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2467
2468	vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2469			binding.shader_slot, 0);
2470out_unref:
2471	if (res)
2472		vmw_resource_unreference(&res);
2473
2474	return ret;
2475}
2476
2477/**
2478 * vmw_cmd_dx_set_vertex_buffers - Validates an
2479 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2480 *
2481 * @dev_priv: Pointer to a device private struct.
2482 * @sw_context: The software context being used for this batch.
2483 * @header: Pointer to the command header in the command stream.
2484 */
2485static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2486					 struct vmw_sw_context *sw_context,
2487					 SVGA3dCmdHeader *header)
2488{
2489	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2490	struct vmw_ctx_bindinfo_vb binding;
2491	struct vmw_resource_val_node *res_node;
2492	struct {
2493		SVGA3dCmdHeader header;
2494		SVGA3dCmdDXSetVertexBuffers body;
2495		SVGA3dVertexBuffer buf[];
2496	} *cmd;
2497	int i, ret, num;
2498
2499	if (unlikely(ctx_node == NULL)) {
2500		DRM_ERROR("DX Context not set.\n");
2501		return -EINVAL;
2502	}
2503
2504	cmd = container_of(header, typeof(*cmd), header);
2505	num = (cmd->header.size - sizeof(cmd->body)) /
2506		sizeof(SVGA3dVertexBuffer);
2507	if ((u64)num + (u64)cmd->body.startBuffer >
2508	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2509		DRM_ERROR("Invalid number of vertex buffers.\n");
2510		return -EINVAL;
2511	}
2512
2513	for (i = 0; i < num; i++) {
2514		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2515					user_surface_converter,
2516					&cmd->buf[i].sid, &res_node);
2517		if (unlikely(ret != 0))
2518			return ret;
2519
2520		binding.bi.ctx = ctx_node->res;
2521		binding.bi.bt = vmw_ctx_binding_vb;
2522		binding.bi.res = ((res_node) ? res_node->res : NULL);
2523		binding.offset = cmd->buf[i].offset;
2524		binding.stride = cmd->buf[i].stride;
2525		binding.slot = i + cmd->body.startBuffer;
2526
2527		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2528				0, binding.slot);
2529	}
2530
2531	return 0;
2532}
2533
2534/**
2535 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2536 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2537 *
2538 * @dev_priv: Pointer to a device private struct.
2539 * @sw_context: The software context being used for this batch.
2540 * @header: Pointer to the command header in the command stream.
2541 */
2542static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2543				       struct vmw_sw_context *sw_context,
2544				       SVGA3dCmdHeader *header)
2545{
2546	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2547	struct vmw_ctx_bindinfo_ib binding;
2548	struct vmw_resource_val_node *res_node;
2549	struct {
2550		SVGA3dCmdHeader header;
2551		SVGA3dCmdDXSetIndexBuffer body;
2552	} *cmd;
2553	int ret;
2554
2555	if (unlikely(ctx_node == NULL)) {
2556		DRM_ERROR("DX Context not set.\n");
2557		return -EINVAL;
2558	}
2559
2560	cmd = container_of(header, typeof(*cmd), header);
2561	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2562				user_surface_converter,
2563				&cmd->body.sid, &res_node);
2564	if (unlikely(ret != 0))
2565		return ret;
2566
2567	binding.bi.ctx = ctx_node->res;
2568	binding.bi.res = ((res_node) ? res_node->res : NULL);
2569	binding.bi.bt = vmw_ctx_binding_ib;
2570	binding.offset = cmd->body.offset;
2571	binding.format = cmd->body.format;
2572
2573	vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2574
2575	return 0;
2576}
2577
2578/**
2579 * vmw_cmd_dx_set_rendertarget - Validate an
2580 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2581 *
2582 * @dev_priv: Pointer to a device private struct.
2583 * @sw_context: The software context being used for this batch.
2584 * @header: Pointer to the command header in the command stream.
2585 */
2586static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2587					struct vmw_sw_context *sw_context,
2588					SVGA3dCmdHeader *header)
2589{
2590	struct {
2591		SVGA3dCmdHeader header;
2592		SVGA3dCmdDXSetRenderTargets body;
2593	} *cmd = container_of(header, typeof(*cmd), header);
2594	int ret;
2595	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2596		sizeof(SVGA3dRenderTargetViewId);
2597
2598	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2599		DRM_ERROR("Invalid DX Rendertarget binding.\n");
2600		return -EINVAL;
2601	}
2602
2603	ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2604				    vmw_ctx_binding_ds, 0,
2605				    &cmd->body.depthStencilViewId, 1, 0);
2606	if (ret)
2607		return ret;
2608
2609	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2610				     vmw_ctx_binding_dx_rt, 0,
2611				     (void *)&cmd[1], num_rt_view, 0);
2612}
2613
2614/**
2615 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2616 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2617 *
2618 * @dev_priv: Pointer to a device private struct.
2619 * @sw_context: The software context being used for this batch.
2620 * @header: Pointer to the command header in the command stream.
2621 */
2622static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2623					      struct vmw_sw_context *sw_context,
2624					      SVGA3dCmdHeader *header)
2625{
2626	struct {
2627		SVGA3dCmdHeader header;
2628		SVGA3dCmdDXClearRenderTargetView body;
2629	} *cmd = container_of(header, typeof(*cmd), header);
2630
2631	return vmw_view_id_val_add(sw_context, vmw_view_rt,
2632				   cmd->body.renderTargetViewId);
2633}
2634
2635/**
2636 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2637 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2638 *
2639 * @dev_priv: Pointer to a device private struct.
2640 * @sw_context: The software context being used for this batch.
2641 * @header: Pointer to the command header in the command stream.
2642 */
2643static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2644					      struct vmw_sw_context *sw_context,
2645					      SVGA3dCmdHeader *header)
2646{
2647	struct {
2648		SVGA3dCmdHeader header;
2649		SVGA3dCmdDXClearDepthStencilView body;
2650	} *cmd = container_of(header, typeof(*cmd), header);
2651
2652	return vmw_view_id_val_add(sw_context, vmw_view_ds,
2653				   cmd->body.depthStencilViewId);
2654}
2655
2656static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2657				  struct vmw_sw_context *sw_context,
2658				  SVGA3dCmdHeader *header)
2659{
2660	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2661	struct vmw_resource_val_node *srf_node;
2662	struct vmw_resource *res;
2663	enum vmw_view_type view_type;
2664	int ret;
2665	/*
2666	 * This is based on the fact that all affected define commands have
2667	 * the same initial command body layout.
2668	 */
2669	struct {
2670		SVGA3dCmdHeader header;
2671		uint32 defined_id;
2672		uint32 sid;
2673	} *cmd;
2674
2675	if (unlikely(ctx_node == NULL)) {
2676		DRM_ERROR("DX Context not set.\n");
2677		return -EINVAL;
2678	}
2679
2680	view_type = vmw_view_cmd_to_type(header->id);
2681	cmd = container_of(header, typeof(*cmd), header);
2682	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2683				user_surface_converter,
2684				&cmd->sid, &srf_node);
2685	if (unlikely(ret != 0))
2686		return ret;
2687
2688	res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2689	ret = vmw_cotable_notify(res, cmd->defined_id);
2690	vmw_resource_unreference(&res);
2691	if (unlikely(ret != 0))
2692		return ret;
2693
2694	return vmw_view_add(sw_context->man,
2695			    ctx_node->res,
2696			    srf_node->res,
2697			    view_type,
2698			    cmd->defined_id,
2699			    header,
2700			    header->size + sizeof(*header),
2701			    &sw_context->staged_cmd_res);
2702}
2703
2704/**
2705 * vmw_cmd_dx_set_so_targets - Validate an
2706 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2707 *
2708 * @dev_priv: Pointer to a device private struct.
2709 * @sw_context: The software context being used for this batch.
2710 * @header: Pointer to the command header in the command stream.
2711 */
2712static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2713				     struct vmw_sw_context *sw_context,
2714				     SVGA3dCmdHeader *header)
2715{
2716	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2717	struct vmw_ctx_bindinfo_so binding;
2718	struct vmw_resource_val_node *res_node;
2719	struct {
2720		SVGA3dCmdHeader header;
2721		SVGA3dCmdDXSetSOTargets body;
2722		SVGA3dSoTarget targets[];
2723	} *cmd;
2724	int i, ret, num;
2725
2726	if (unlikely(ctx_node == NULL)) {
2727		DRM_ERROR("DX Context not set.\n");
2728		return -EINVAL;
2729	}
2730
2731	cmd = container_of(header, typeof(*cmd), header);
2732	num = (cmd->header.size - sizeof(cmd->body)) /
2733		sizeof(SVGA3dSoTarget);
2734
2735	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2736		DRM_ERROR("Invalid DX SO binding.\n");
2737		return -EINVAL;
2738	}
2739
2740	for (i = 0; i < num; i++) {
2741		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2742					user_surface_converter,
2743					&cmd->targets[i].sid, &res_node);
2744		if (unlikely(ret != 0))
2745			return ret;
2746
2747		binding.bi.ctx = ctx_node->res;
2748		binding.bi.res = ((res_node) ? res_node->res : NULL);
2749		binding.bi.bt = vmw_ctx_binding_so,
2750		binding.offset = cmd->targets[i].offset;
2751		binding.size = cmd->targets[i].sizeInBytes;
2752		binding.slot = i;
2753
2754		vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2755				0, binding.slot);
2756	}
2757
2758	return 0;
2759}
2760
2761static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2762				struct vmw_sw_context *sw_context,
2763				SVGA3dCmdHeader *header)
2764{
2765	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2766	struct vmw_resource *res;
2767	/*
2768	 * This is based on the fact that all affected define commands have
2769	 * the same initial command body layout.
2770	 */
2771	struct {
2772		SVGA3dCmdHeader header;
2773		uint32 defined_id;
2774	} *cmd;
2775	enum vmw_so_type so_type;
2776	int ret;
2777
2778	if (unlikely(ctx_node == NULL)) {
2779		DRM_ERROR("DX Context not set.\n");
2780		return -EINVAL;
2781	}
2782
2783	so_type = vmw_so_cmd_to_type(header->id);
2784	res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2785	cmd = container_of(header, typeof(*cmd), header);
2786	ret = vmw_cotable_notify(res, cmd->defined_id);
2787	vmw_resource_unreference(&res);
2788
2789	return ret;
2790}
2791
2792/**
2793 * vmw_cmd_dx_check_subresource - Validate an
2794 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2795 *
2796 * @dev_priv: Pointer to a device private struct.
2797 * @sw_context: The software context being used for this batch.
2798 * @header: Pointer to the command header in the command stream.
2799 */
2800static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2801					struct vmw_sw_context *sw_context,
2802					SVGA3dCmdHeader *header)
2803{
2804	struct {
2805		SVGA3dCmdHeader header;
2806		union {
2807			SVGA3dCmdDXReadbackSubResource r_body;
2808			SVGA3dCmdDXInvalidateSubResource i_body;
2809			SVGA3dCmdDXUpdateSubResource u_body;
2810			SVGA3dSurfaceId sid;
2811		};
2812	} *cmd;
2813
2814	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2815		     offsetof(typeof(*cmd), sid));
2816	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2817		     offsetof(typeof(*cmd), sid));
2818	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2819		     offsetof(typeof(*cmd), sid));
2820
2821	cmd = container_of(header, typeof(*cmd), header);
2822
2823	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2824				 user_surface_converter,
2825				 &cmd->sid, NULL);
2826}
2827
2828static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2829				struct vmw_sw_context *sw_context,
2830				SVGA3dCmdHeader *header)
2831{
2832	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2833
2834	if (unlikely(ctx_node == NULL)) {
2835		DRM_ERROR("DX Context not set.\n");
2836		return -EINVAL;
2837	}
2838
2839	return 0;
2840}
2841
2842/**
2843 * vmw_cmd_dx_view_remove - validate a view remove command and
2844 * schedule the view resource for removal.
2845 *
2846 * @dev_priv: Pointer to a device private struct.
2847 * @sw_context: The software context being used for this batch.
2848 * @header: Pointer to the command header in the command stream.
2849 *
2850 * Check that the view exists, and if it was not created using this
2851 * command batch, make sure it's validated (present in the device) so that
2852 * the remove command will not confuse the device.
2853 */
2854static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2855				  struct vmw_sw_context *sw_context,
2856				  SVGA3dCmdHeader *header)
2857{
2858	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2859	struct {
2860		SVGA3dCmdHeader header;
2861		union vmw_view_destroy body;
2862	} *cmd = container_of(header, typeof(*cmd), header);
2863	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2864	struct vmw_resource *view;
2865	int ret;
2866
2867	if (!ctx_node) {
2868		DRM_ERROR("DX Context not set.\n");
2869		return -EINVAL;
2870	}
2871
2872	ret = vmw_view_remove(sw_context->man,
2873			      cmd->body.view_id, view_type,
2874			      &sw_context->staged_cmd_res,
2875			      &view);
2876	if (ret || !view)
2877		return ret;
2878
2879	/*
2880	 * Add view to the validate list iff it was not created using this
2881	 * command batch.
2882	 */
2883	return vmw_view_res_val_add(sw_context, view);
2884}
2885
2886/**
2887 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2888 * command
2889 *
2890 * @dev_priv: Pointer to a device private struct.
2891 * @sw_context: The software context being used for this batch.
2892 * @header: Pointer to the command header in the command stream.
2893 */
2894static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2895				    struct vmw_sw_context *sw_context,
2896				    SVGA3dCmdHeader *header)
2897{
2898	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2899	struct vmw_resource *res;
2900	struct {
2901		SVGA3dCmdHeader header;
2902		SVGA3dCmdDXDefineShader body;
2903	} *cmd = container_of(header, typeof(*cmd), header);
2904	int ret;
2905
2906	if (!ctx_node) {
2907		DRM_ERROR("DX Context not set.\n");
2908		return -EINVAL;
2909	}
2910
2911	res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2912	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2913	vmw_resource_unreference(&res);
2914	if (ret)
2915		return ret;
2916
2917	return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2918				 cmd->body.shaderId, cmd->body.type,
2919				 &sw_context->staged_cmd_res);
2920}
2921
2922/**
2923 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2924 * command
2925 *
2926 * @dev_priv: Pointer to a device private struct.
2927 * @sw_context: The software context being used for this batch.
2928 * @header: Pointer to the command header in the command stream.
2929 */
2930static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2931				     struct vmw_sw_context *sw_context,
2932				     SVGA3dCmdHeader *header)
2933{
2934	struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2935	struct {
2936		SVGA3dCmdHeader header;
2937		SVGA3dCmdDXDestroyShader body;
2938	} *cmd = container_of(header, typeof(*cmd), header);
2939	int ret;
2940
2941	if (!ctx_node) {
2942		DRM_ERROR("DX Context not set.\n");
2943		return -EINVAL;
2944	}
2945
2946	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2947				&sw_context->staged_cmd_res);
2948	if (ret)
2949		DRM_ERROR("Could not find shader to remove.\n");
2950
2951	return ret;
2952}
2953
2954/**
2955 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2956 * command
2957 *
2958 * @dev_priv: Pointer to a device private struct.
2959 * @sw_context: The software context being used for this batch.
2960 * @header: Pointer to the command header in the command stream.
2961 */
2962static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2963				  struct vmw_sw_context *sw_context,
2964				  SVGA3dCmdHeader *header)
2965{
2966	struct vmw_resource_val_node *ctx_node;
2967	struct vmw_resource_val_node *res_node;
2968	struct vmw_resource *res;
2969	struct {
2970		SVGA3dCmdHeader header;
2971		SVGA3dCmdDXBindShader body;
2972	} *cmd = container_of(header, typeof(*cmd), header);
2973	int ret;
2974
2975	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2976		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2977					user_context_converter,
2978					&cmd->body.cid, &ctx_node);
2979		if (ret)
2980			return ret;
2981	} else {
2982		ctx_node = sw_context->dx_ctx_node;
2983		if (!ctx_node) {
2984			DRM_ERROR("DX Context not set.\n");
2985			return -EINVAL;
2986		}
2987	}
2988
2989	res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2990				cmd->body.shid, 0);
2991	if (IS_ERR(res)) {
2992		DRM_ERROR("Could not find shader to bind.\n");
2993		return PTR_ERR(res);
2994	}
2995
2996	ret = vmw_resource_val_add(sw_context, res, &res_node);
2997	if (ret) {
2998		DRM_ERROR("Error creating resource validation node.\n");
2999		goto out_unref;
3000	}
3001
3002
3003	ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3004					&cmd->body.mobid,
3005					cmd->body.offsetInBytes);
3006out_unref:
3007	vmw_resource_unreference(&res);
3008
3009	return ret;
3010}
3011
3012/**
3013 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3014 *
3015 * @dev_priv: Pointer to a device private struct.
3016 * @sw_context: The software context being used for this batch.
3017 * @header: Pointer to the command header in the command stream.
3018 */
3019static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3020			      struct vmw_sw_context *sw_context,
3021			      SVGA3dCmdHeader *header)
3022{
3023	struct {
3024		SVGA3dCmdHeader header;
3025		SVGA3dCmdDXGenMips body;
3026	} *cmd = container_of(header, typeof(*cmd), header);
3027
3028	return vmw_view_id_val_add(sw_context, vmw_view_sr,
3029				   cmd->body.shaderResourceViewId);
3030}
3031
3032static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3033				struct vmw_sw_context *sw_context,
3034				void *buf, uint32_t *size)
3035{
3036	uint32_t size_remaining = *size;
3037	uint32_t cmd_id;
3038
3039	cmd_id = ((uint32_t *)buf)[0];
3040	switch (cmd_id) {
3041	case SVGA_CMD_UPDATE:
3042		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3043		break;
3044	case SVGA_CMD_DEFINE_GMRFB:
3045		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3046		break;
3047	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3048		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3049		break;
3050	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3051		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3052		break;
3053	default:
3054		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3055		return -EINVAL;
3056	}
3057
3058	if (*size > size_remaining) {
3059		DRM_ERROR("Invalid SVGA command (size mismatch):"
3060			  " %u.\n", cmd_id);
3061		return -EINVAL;
3062	}
3063
3064	if (unlikely(!sw_context->kernel)) {
3065		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3066		return -EPERM;
3067	}
3068
3069	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3070		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3071
3072	return 0;
3073}
3074
3075static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3076	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3077		    false, false, false),
3078	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3079		    false, false, false),
3080	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3081		    true, false, false),
3082	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3083		    true, false, false),
3084	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3085		    true, false, false),
3086	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3087		    false, false, false),
3088	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3089		    false, false, false),
3090	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3091		    true, false, false),
3092	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3093		    true, false, false),
3094	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3095		    true, false, false),
3096	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3097		    &vmw_cmd_set_render_target_check, true, false, false),
3098	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3099		    true, false, false),
3100	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3101		    true, false, false),
3102	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3103		    true, false, false),
3104	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3105		    true, false, false),
3106	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3107		    true, false, false),
3108	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3109		    true, false, false),
3110	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3111		    true, false, false),
3112	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3113		    false, false, false),
3114	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3115		    true, false, false),
3116	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3117		    true, false, false),
3118	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3119		    true, false, false),
3120	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3121		    true, false, false),
3122	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3123		    true, false, false),
3124	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3125		    true, false, false),
3126	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3127		    true, false, false),
3128	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3129		    true, false, false),
3130	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3131		    true, false, false),
3132	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3133		    true, false, false),
3134	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3135		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3136	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3137		    false, false, false),
3138	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3139		    false, false, false),
3140	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3141		    false, false, false),
3142	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3143		    false, false, false),
3144	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3145		    false, false, false),
3146	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3147		    false, false, false),
3148	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3149		    false, false, false),
3150	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3151		    false, false, false),
3152	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3153		    false, false, false),
3154	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3155		    false, false, false),
3156	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3157		    false, false, false),
3158	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3159		    false, false, false),
3160	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3161		    false, false, false),
3162	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3163		    false, false, true),
3164	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3165		    false, false, true),
3166	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3167		    false, false, true),
3168	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3169		    false, false, true),
3170	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3171		    false, false, true),
3172	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3173		    false, false, true),
3174	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3175		    false, false, true),
3176	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3177		    false, false, true),
3178	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3179		    true, false, true),
3180	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3181		    false, false, true),
3182	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3183		    true, false, true),
3184	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3185		    &vmw_cmd_update_gb_surface, true, false, true),
3186	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3187		    &vmw_cmd_readback_gb_image, true, false, true),
3188	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3189		    &vmw_cmd_readback_gb_surface, true, false, true),
3190	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3191		    &vmw_cmd_invalidate_gb_image, true, false, true),
3192	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3193		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3194	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3195		    false, false, true),
3196	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3197		    false, false, true),
3198	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3199		    false, false, true),
3200	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3201		    false, false, true),
3202	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3203		    false, false, true),
3204	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3205		    false, false, true),
3206	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3207		    true, false, true),
3208	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3209		    false, false, true),
3210	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3211		    false, false, false),
3212	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3213		    true, false, true),
3214	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3215		    true, false, true),
3216	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3217		    true, false, true),
3218	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3219		    true, false, true),
3220	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3221		    false, false, true),
3222	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3223		    false, false, true),
3224	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3225		    false, false, true),
3226	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3227		    false, false, true),
3228	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3229		    false, false, true),
3230	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3231		    false, false, true),
3232	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3233		    false, false, true),
3234	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3235		    false, false, true),
3236	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3237		    false, false, true),
3238	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3239		    false, false, true),
3240	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3241		    true, false, true),
3242	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3243		    false, false, true),
3244	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3245		    false, false, true),
3246	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3247		    false, false, true),
3248	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3249		    false, false, true),
3250
3251	/*
3252	 * DX commands
3253	 */
3254	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3255		    false, false, true),
3256	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3257		    false, false, true),
3258	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3259		    false, false, true),
3260	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3261		    false, false, true),
3262	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3263		    false, false, true),
3264	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3265		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3266	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3267		    &vmw_cmd_dx_set_shader_res, true, false, true),
3268	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3269		    true, false, true),
3270	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3271		    true, false, true),
3272	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3273		    true, false, true),
3274	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3275		    true, false, true),
3276	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3277		    true, false, true),
3278	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3279		    &vmw_cmd_dx_cid_check, true, false, true),
3280	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3281		    true, false, true),
3282	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3283		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3284	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3285		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3286	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3287		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3288	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3289		    true, false, true),
3290	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3291		    &vmw_cmd_dx_cid_check, true, false, true),
3292	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3293		    &vmw_cmd_dx_cid_check, true, false, true),
3294	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3295		    true, false, true),
3296	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3297		    true, false, true),
3298	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3299		    true, false, true),
3300	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3301		    &vmw_cmd_dx_cid_check, true, false, true),
3302	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3303		    true, false, true),
3304	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3305		    true, false, true),
3306	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3307		    true, false, true),
3308	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3309		    true, false, true),
3310	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3311		    true, false, true),
3312	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3313		    true, false, true),
3314	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3315		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3316	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3317		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3318	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3319		    true, false, true),
3320	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3321		    true, false, true),
3322	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3323		    &vmw_cmd_dx_check_subresource, true, false, true),
3324	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3325		    &vmw_cmd_dx_check_subresource, true, false, true),
3326	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3327		    &vmw_cmd_dx_check_subresource, true, false, true),
3328	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3329		    &vmw_cmd_dx_view_define, true, false, true),
3330	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3331		    &vmw_cmd_dx_view_remove, true, false, true),
3332	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3333		    &vmw_cmd_dx_view_define, true, false, true),
3334	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3335		    &vmw_cmd_dx_view_remove, true, false, true),
3336	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3337		    &vmw_cmd_dx_view_define, true, false, true),
3338	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3339		    &vmw_cmd_dx_view_remove, true, false, true),
3340	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3341		    &vmw_cmd_dx_so_define, true, false, true),
3342	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3343		    &vmw_cmd_dx_cid_check, true, false, true),
3344	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3345		    &vmw_cmd_dx_so_define, true, false, true),
3346	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3347		    &vmw_cmd_dx_cid_check, true, false, true),
3348	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3349		    &vmw_cmd_dx_so_define, true, false, true),
3350	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3351		    &vmw_cmd_dx_cid_check, true, false, true),
3352	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3353		    &vmw_cmd_dx_so_define, true, false, true),
3354	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3355		    &vmw_cmd_dx_cid_check, true, false, true),
3356	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3357		    &vmw_cmd_dx_so_define, true, false, true),
3358	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3359		    &vmw_cmd_dx_cid_check, true, false, true),
3360	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3361		    &vmw_cmd_dx_define_shader, true, false, true),
3362	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3363		    &vmw_cmd_dx_destroy_shader, true, false, true),
3364	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3365		    &vmw_cmd_dx_bind_shader, true, false, true),
3366	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3367		    &vmw_cmd_dx_so_define, true, false, true),
3368	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3369		    &vmw_cmd_dx_cid_check, true, false, true),
3370	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3371		    true, false, true),
3372	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3373		    &vmw_cmd_dx_set_so_targets, true, false, true),
3374	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3375		    &vmw_cmd_dx_cid_check, true, false, true),
3376	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3377		    &vmw_cmd_dx_cid_check, true, false, true),
3378	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3379		    &vmw_cmd_buffer_copy_check, true, false, true),
3380	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3381		    &vmw_cmd_pred_copy_check, true, false, true),
3382};
3383
3384static int vmw_cmd_check(struct vmw_private *dev_priv,
3385			 struct vmw_sw_context *sw_context,
3386			 void *buf, uint32_t *size)
3387{
3388	uint32_t cmd_id;
3389	uint32_t size_remaining = *size;
3390	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3391	int ret;
3392	const struct vmw_cmd_entry *entry;
3393	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3394
3395	cmd_id = ((uint32_t *)buf)[0];
3396	/* Handle any none 3D commands */
3397	if (unlikely(cmd_id < SVGA_CMD_MAX))
3398		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3399
3400
3401	cmd_id = header->id;
3402	*size = header->size + sizeof(SVGA3dCmdHeader);
3403
3404	cmd_id -= SVGA_3D_CMD_BASE;
3405	if (unlikely(*size > size_remaining))
3406		goto out_invalid;
3407
3408	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3409		goto out_invalid;
3410
3411	entry = &vmw_cmd_entries[cmd_id];
3412	if (unlikely(!entry->func))
3413		goto out_invalid;
3414
3415	if (unlikely(!entry->user_allow && !sw_context->kernel))
3416		goto out_privileged;
3417
3418	if (unlikely(entry->gb_disable && gb))
3419		goto out_old;
3420
3421	if (unlikely(entry->gb_enable && !gb))
3422		goto out_new;
3423
3424	ret = entry->func(dev_priv, sw_context, header);
3425	if (unlikely(ret != 0))
3426		goto out_invalid;
3427
3428	return 0;
3429out_invalid:
3430	DRM_ERROR("Invalid SVGA3D command: %d\n",
3431		  cmd_id + SVGA_3D_CMD_BASE);
3432	return -EINVAL;
3433out_privileged:
3434	DRM_ERROR("Privileged SVGA3D command: %d\n",
3435		  cmd_id + SVGA_3D_CMD_BASE);
3436	return -EPERM;
3437out_old:
3438	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3439		  cmd_id + SVGA_3D_CMD_BASE);
3440	return -EINVAL;
3441out_new:
3442	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3443		  cmd_id + SVGA_3D_CMD_BASE);
3444	return -EINVAL;
3445}
3446
3447static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3448			     struct vmw_sw_context *sw_context,
3449			     void *buf,
3450			     uint32_t size)
3451{
3452	int32_t cur_size = size;
3453	int ret;
3454
3455	sw_context->buf_start = buf;
3456
3457	while (cur_size > 0) {
3458		size = cur_size;
3459		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3460		if (unlikely(ret != 0))
3461			return ret;
3462		buf = (void *)((unsigned long) buf + size);
3463		cur_size -= size;
3464	}
3465
3466	if (unlikely(cur_size != 0)) {
3467		DRM_ERROR("Command verifier out of sync.\n");
3468		return -EINVAL;
3469	}
3470
3471	return 0;
3472}
3473
3474static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3475{
3476	sw_context->cur_reloc = 0;
3477}
3478
3479static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3480{
3481	uint32_t i;
3482	struct vmw_relocation *reloc;
3483	struct ttm_validate_buffer *validate;
3484	struct ttm_buffer_object *bo;
3485
3486	for (i = 0; i < sw_context->cur_reloc; ++i) {
3487		reloc = &sw_context->relocs[i];
3488		validate = &sw_context->val_bufs[reloc->index].base;
3489		bo = validate->bo;
3490		switch (bo->mem.mem_type) {
3491		case TTM_PL_VRAM:
3492			reloc->location->offset += bo->offset;
3493			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3494			break;
3495		case VMW_PL_GMR:
3496			reloc->location->gmrId = bo->mem.start;
3497			break;
3498		case VMW_PL_MOB:
3499			*reloc->mob_loc = bo->mem.start;
3500			break;
3501		default:
3502			BUG();
3503		}
3504	}
3505	vmw_free_relocations(sw_context);
3506}
3507
3508/**
3509 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3510 * all resources referenced by it.
3511 *
3512 * @list: The resource list.
3513 */
3514static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3515					  struct list_head *list)
3516{
3517	struct vmw_resource_val_node *val, *val_next;
3518
3519	/*
3520	 * Drop references to resources held during command submission.
3521	 */
3522
3523	list_for_each_entry_safe(val, val_next, list, head) {
3524		list_del_init(&val->head);
3525		vmw_resource_unreference(&val->res);
3526
3527		if (val->staged_bindings) {
3528			if (val->staged_bindings != sw_context->staged_bindings)
3529				vmw_binding_state_free(val->staged_bindings);
3530			else
3531				sw_context->staged_bindings_inuse = false;
3532			val->staged_bindings = NULL;
3533		}
3534
3535		kfree(val);
3536	}
3537}
3538
3539static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3540{
3541	struct vmw_validate_buffer *entry, *next;
3542	struct vmw_resource_val_node *val;
3543
3544	/*
3545	 * Drop references to DMA buffers held during command submission.
3546	 */
3547	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3548				 base.head) {
3549		list_del(&entry->base.head);
3550		ttm_bo_unref(&entry->base.bo);
3551		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3552		sw_context->cur_val_buf--;
3553	}
3554	BUG_ON(sw_context->cur_val_buf != 0);
3555
3556	list_for_each_entry(val, &sw_context->resource_list, head)
3557		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
 
 
 
 
 
 
 
3558}
3559
3560int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3561			       struct ttm_buffer_object *bo,
3562			       bool interruptible,
3563			       bool validate_as_mob)
3564{
3565	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3566						  base);
3567	int ret;
3568
3569	if (vbo->pin_count > 0)
3570		return 0;
3571
3572	if (validate_as_mob)
3573		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3574				       false);
 
 
 
 
 
3575
3576	/**
3577	 * Put BO in VRAM if there is space, otherwise as a GMR.
3578	 * If there is no space in VRAM and GMR ids are all used up,
3579	 * start evicting GMRs to make room. If the DMA buffer can't be
3580	 * used as a GMR, this will return -ENOMEM.
3581	 */
3582
3583	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3584			      false);
3585	if (likely(ret == 0 || ret == -ERESTARTSYS))
3586		return ret;
3587
3588	/**
3589	 * If that failed, try VRAM again, this time evicting
3590	 * previous contents.
3591	 */
3592
3593	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
 
3594	return ret;
3595}
3596
 
3597static int vmw_validate_buffers(struct vmw_private *dev_priv,
3598				struct vmw_sw_context *sw_context)
3599{
3600	struct vmw_validate_buffer *entry;
3601	int ret;
3602
3603	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3604		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3605						 true,
3606						 entry->validate_as_mob);
3607		if (unlikely(ret != 0))
3608			return ret;
3609	}
3610	return 0;
3611}
3612
3613static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3614				 uint32_t size)
3615{
3616	if (likely(sw_context->cmd_bounce_size >= size))
3617		return 0;
3618
3619	if (sw_context->cmd_bounce_size == 0)
3620		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3621
3622	while (sw_context->cmd_bounce_size < size) {
3623		sw_context->cmd_bounce_size =
3624			PAGE_ALIGN(sw_context->cmd_bounce_size +
3625				   (sw_context->cmd_bounce_size >> 1));
3626	}
3627
3628	if (sw_context->cmd_bounce != NULL)
3629		vfree(sw_context->cmd_bounce);
3630
3631	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3632
3633	if (sw_context->cmd_bounce == NULL) {
3634		DRM_ERROR("Failed to allocate command bounce buffer.\n");
3635		sw_context->cmd_bounce_size = 0;
3636		return -ENOMEM;
3637	}
3638
3639	return 0;
3640}
3641
3642/**
3643 * vmw_execbuf_fence_commands - create and submit a command stream fence
3644 *
3645 * Creates a fence object and submits a command stream marker.
3646 * If this fails for some reason, We sync the fifo and return NULL.
3647 * It is then safe to fence buffers with a NULL pointer.
3648 *
3649 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3650 * a userspace handle if @p_handle is not NULL, otherwise not.
3651 */
3652
3653int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3654			       struct vmw_private *dev_priv,
3655			       struct vmw_fence_obj **p_fence,
3656			       uint32_t *p_handle)
3657{
3658	uint32_t sequence;
3659	int ret;
3660	bool synced = false;
3661
3662	/* p_handle implies file_priv. */
3663	BUG_ON(p_handle != NULL && file_priv == NULL);
3664
3665	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3666	if (unlikely(ret != 0)) {
3667		DRM_ERROR("Fence submission error. Syncing.\n");
3668		synced = true;
3669	}
3670
3671	if (p_handle != NULL)
3672		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3673					    sequence, p_fence, p_handle);
 
 
3674	else
3675		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
 
 
3676
3677	if (unlikely(ret != 0 && !synced)) {
3678		(void) vmw_fallback_wait(dev_priv, false, false,
3679					 sequence, false,
3680					 VMW_FENCE_WAIT_TIMEOUT);
3681		*p_fence = NULL;
3682	}
3683
3684	return 0;
3685}
3686
3687/**
3688 * vmw_execbuf_copy_fence_user - copy fence object information to
3689 * user-space.
3690 *
3691 * @dev_priv: Pointer to a vmw_private struct.
3692 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3693 * @ret: Return value from fence object creation.
3694 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3695 * which the information should be copied.
3696 * @fence: Pointer to the fenc object.
3697 * @fence_handle: User-space fence handle.
3698 *
3699 * This function copies fence information to user-space. If copying fails,
3700 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3701 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3702 * the error will hopefully be detected.
3703 * Also if copying fails, user-space will be unable to signal the fence
3704 * object so we wait for it immediately, and then unreference the
3705 * user-space reference.
3706 */
3707void
3708vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3709			    struct vmw_fpriv *vmw_fp,
3710			    int ret,
3711			    struct drm_vmw_fence_rep __user *user_fence_rep,
3712			    struct vmw_fence_obj *fence,
3713			    uint32_t fence_handle)
3714{
3715	struct drm_vmw_fence_rep fence_rep;
3716
3717	if (user_fence_rep == NULL)
3718		return;
3719
3720	memset(&fence_rep, 0, sizeof(fence_rep));
3721
3722	fence_rep.error = ret;
3723	if (ret == 0) {
3724		BUG_ON(fence == NULL);
3725
3726		fence_rep.handle = fence_handle;
3727		fence_rep.seqno = fence->base.seqno;
3728		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3729		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3730	}
3731
3732	/*
3733	 * copy_to_user errors will be detected by user space not
3734	 * seeing fence_rep::error filled in. Typically
3735	 * user-space would have pre-set that member to -EFAULT.
3736	 */
3737	ret = copy_to_user(user_fence_rep, &fence_rep,
3738			   sizeof(fence_rep));
3739
3740	/*
3741	 * User-space lost the fence object. We need to sync
3742	 * and unreference the handle.
3743	 */
3744	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3745		ttm_ref_object_base_unref(vmw_fp->tfile,
3746					  fence_handle, TTM_REF_USAGE);
3747		DRM_ERROR("Fence copy error. Syncing.\n");
3748		(void) vmw_fence_obj_wait(fence, false, false,
 
3749					  VMW_FENCE_WAIT_TIMEOUT);
3750	}
3751}
3752
3753/**
3754 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3755 * the fifo.
3756 *
3757 * @dev_priv: Pointer to a device private structure.
3758 * @kernel_commands: Pointer to the unpatched command batch.
3759 * @command_size: Size of the unpatched command batch.
3760 * @sw_context: Structure holding the relocation lists.
3761 *
3762 * Side effects: If this function returns 0, then the command batch
3763 * pointed to by @kernel_commands will have been modified.
3764 */
3765static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3766				   void *kernel_commands,
3767				   u32 command_size,
3768				   struct vmw_sw_context *sw_context)
3769{
3770	void *cmd;
3771
3772	if (sw_context->dx_ctx_node)
3773		cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3774					  sw_context->dx_ctx_node->res->id);
3775	else
3776		cmd = vmw_fifo_reserve(dev_priv, command_size);
3777	if (!cmd) {
3778		DRM_ERROR("Failed reserving fifo space for commands.\n");
3779		return -ENOMEM;
3780	}
3781
3782	vmw_apply_relocations(sw_context);
3783	memcpy(cmd, kernel_commands, command_size);
3784	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3785	vmw_resource_relocations_free(&sw_context->res_relocations);
3786	vmw_fifo_commit(dev_priv, command_size);
3787
3788	return 0;
3789}
3790
3791/**
3792 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3793 * the command buffer manager.
3794 *
3795 * @dev_priv: Pointer to a device private structure.
3796 * @header: Opaque handle to the command buffer allocation.
3797 * @command_size: Size of the unpatched command batch.
3798 * @sw_context: Structure holding the relocation lists.
3799 *
3800 * Side effects: If this function returns 0, then the command buffer
3801 * represented by @header will have been modified.
3802 */
3803static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3804				     struct vmw_cmdbuf_header *header,
3805				     u32 command_size,
3806				     struct vmw_sw_context *sw_context)
3807{
3808	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3809		  SVGA3D_INVALID_ID);
3810	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3811				       id, false, header);
3812
3813	vmw_apply_relocations(sw_context);
3814	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3815	vmw_resource_relocations_free(&sw_context->res_relocations);
3816	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3817
3818	return 0;
3819}
3820
3821/**
3822 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3823 * submission using a command buffer.
3824 *
3825 * @dev_priv: Pointer to a device private structure.
3826 * @user_commands: User-space pointer to the commands to be submitted.
3827 * @command_size: Size of the unpatched command batch.
3828 * @header: Out parameter returning the opaque pointer to the command buffer.
3829 *
3830 * This function checks whether we can use the command buffer manager for
3831 * submission and if so, creates a command buffer of suitable size and
3832 * copies the user data into that buffer.
3833 *
3834 * On successful return, the function returns a pointer to the data in the
3835 * command buffer and *@header is set to non-NULL.
3836 * If command buffers could not be used, the function will return the value
3837 * of @kernel_commands on function call. That value may be NULL. In that case,
3838 * the value of *@header will be set to NULL.
3839 * If an error is encountered, the function will return a pointer error value.
3840 * If the function is interrupted by a signal while sleeping, it will return
3841 * -ERESTARTSYS casted to a pointer error value.
3842 */
3843static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3844				void __user *user_commands,
3845				void *kernel_commands,
3846				u32 command_size,
3847				struct vmw_cmdbuf_header **header)
3848{
3849	size_t cmdbuf_size;
3850	int ret;
3851
3852	*header = NULL;
3853	if (!dev_priv->cman || kernel_commands)
3854		return kernel_commands;
3855
3856	if (command_size > SVGA_CB_MAX_SIZE) {
3857		DRM_ERROR("Command buffer is too large.\n");
3858		return ERR_PTR(-EINVAL);
3859	}
3860
3861	/* If possible, add a little space for fencing. */
3862	cmdbuf_size = command_size + 512;
3863	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3864	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3865					   true, header);
3866	if (IS_ERR(kernel_commands))
3867		return kernel_commands;
3868
3869	ret = copy_from_user(kernel_commands, user_commands,
3870			     command_size);
3871	if (ret) {
3872		DRM_ERROR("Failed copying commands.\n");
3873		vmw_cmdbuf_header_free(*header);
3874		*header = NULL;
3875		return ERR_PTR(-EFAULT);
3876	}
3877
3878	return kernel_commands;
3879}
3880
3881static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3882				   struct vmw_sw_context *sw_context,
3883				   uint32_t handle)
3884{
3885	struct vmw_resource_val_node *ctx_node;
3886	struct vmw_resource *res;
3887	int ret;
3888
3889	if (handle == SVGA3D_INVALID_ID)
3890		return 0;
3891
3892	ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3893					      handle, user_context_converter,
3894					      &res);
3895	if (unlikely(ret != 0)) {
3896		DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3897			  (unsigned) handle);
3898		return ret;
3899	}
3900
3901	ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3902	if (unlikely(ret != 0))
3903		goto out_err;
3904
3905	sw_context->dx_ctx_node = ctx_node;
3906	sw_context->man = vmw_context_res_man(res);
3907out_err:
3908	vmw_resource_unreference(&res);
3909	return ret;
3910}
3911
3912int vmw_execbuf_process(struct drm_file *file_priv,
3913			struct vmw_private *dev_priv,
3914			void __user *user_commands,
3915			void *kernel_commands,
3916			uint32_t command_size,
3917			uint64_t throttle_us,
3918			uint32_t dx_context_handle,
3919			struct drm_vmw_fence_rep __user *user_fence_rep,
3920			struct vmw_fence_obj **out_fence)
3921{
3922	struct vmw_sw_context *sw_context = &dev_priv->ctx;
3923	struct vmw_fence_obj *fence = NULL;
3924	struct vmw_resource *error_resource;
3925	struct list_head resource_list;
3926	struct vmw_cmdbuf_header *header;
3927	struct ww_acquire_ctx ticket;
3928	uint32_t handle;
 
3929	int ret;
3930
3931	if (throttle_us) {
3932		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3933				   throttle_us);
3934
3935		if (ret)
3936			return ret;
3937	}
3938
3939	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3940					     kernel_commands, command_size,
3941					     &header);
3942	if (IS_ERR(kernel_commands))
3943		return PTR_ERR(kernel_commands);
3944
3945	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3946	if (ret) {
3947		ret = -ERESTARTSYS;
3948		goto out_free_header;
3949	}
3950
3951	sw_context->kernel = false;
3952	if (kernel_commands == NULL) {
 
 
3953		ret = vmw_resize_cmd_bounce(sw_context, command_size);
3954		if (unlikely(ret != 0))
3955			goto out_unlock;
3956
3957
3958		ret = copy_from_user(sw_context->cmd_bounce,
3959				     user_commands, command_size);
3960
3961		if (unlikely(ret != 0)) {
3962			ret = -EFAULT;
3963			DRM_ERROR("Failed copying commands.\n");
3964			goto out_unlock;
3965		}
3966		kernel_commands = sw_context->cmd_bounce;
3967	} else if (!header)
3968		sw_context->kernel = true;
3969
3970	sw_context->fp = vmw_fpriv(file_priv);
 
 
3971	sw_context->cur_reloc = 0;
3972	sw_context->cur_val_buf = 0;
 
 
3973	INIT_LIST_HEAD(&sw_context->resource_list);
3974	INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3975	sw_context->cur_query_bo = dev_priv->pinned_bo;
3976	sw_context->last_query_ctx = NULL;
3977	sw_context->needs_post_query_barrier = false;
3978	sw_context->dx_ctx_node = NULL;
3979	sw_context->dx_query_mob = NULL;
3980	sw_context->dx_query_ctx = NULL;
3981	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3982	INIT_LIST_HEAD(&sw_context->validate_nodes);
3983	INIT_LIST_HEAD(&sw_context->res_relocations);
3984	if (sw_context->staged_bindings)
3985		vmw_binding_state_reset(sw_context->staged_bindings);
3986
3987	if (!sw_context->res_ht_initialized) {
3988		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3989		if (unlikely(ret != 0))
3990			goto out_unlock;
3991		sw_context->res_ht_initialized = true;
3992	}
3993	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3994	INIT_LIST_HEAD(&resource_list);
3995	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3996	if (unlikely(ret != 0)) {
3997		list_splice_init(&sw_context->ctx_resource_list,
3998				 &sw_context->resource_list);
3999		goto out_err_nores;
4000	}
4001
4002	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4003				command_size);
4004	/*
4005	 * Merge the resource lists before checking the return status
4006	 * from vmd_cmd_check_all so that all the open hashtabs will
4007	 * be handled properly even if vmw_cmd_check_all fails.
4008	 */
4009	list_splice_init(&sw_context->ctx_resource_list,
4010			 &sw_context->resource_list);
4011
4012	if (unlikely(ret != 0))
4013		goto out_err_nores;
4014
4015	ret = vmw_resources_reserve(sw_context);
4016	if (unlikely(ret != 0))
4017		goto out_err_nores;
4018
4019	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4020				     true, NULL);
4021	if (unlikely(ret != 0))
4022		goto out_err_nores;
4023
4024	ret = vmw_validate_buffers(dev_priv, sw_context);
4025	if (unlikely(ret != 0))
4026		goto out_err;
4027
4028	ret = vmw_resources_validate(sw_context);
4029	if (unlikely(ret != 0))
4030		goto out_err;
4031
4032	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4033	if (unlikely(ret != 0)) {
4034		ret = -ERESTARTSYS;
4035		goto out_err;
4036	}
4037
4038	if (dev_priv->has_mob) {
4039		ret = vmw_rebind_contexts(sw_context);
4040		if (unlikely(ret != 0))
4041			goto out_unlock_binding;
4042	}
4043
4044	if (!header) {
4045		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4046					      command_size, sw_context);
4047	} else {
4048		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4049						sw_context);
4050		header = NULL;
4051	}
4052	mutex_unlock(&dev_priv->binding_mutex);
4053	if (ret)
4054		goto out_err;
4055
4056	vmw_query_bo_switch_commit(dev_priv, sw_context);
4057	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4058					 &fence,
4059					 (user_fence_rep) ? &handle : NULL);
4060	/*
4061	 * This error is harmless, because if fence submission fails,
4062	 * vmw_fifo_send_fence will sync. The error will be propagated to
4063	 * user-space in @fence_rep
4064	 */
4065
4066	if (ret != 0)
4067		DRM_ERROR("Fence submission error. Syncing.\n");
4068
4069	vmw_resources_unreserve(sw_context, false);
4070
4071	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4072				    (void *) fence);
4073
4074	if (unlikely(dev_priv->pinned_bo != NULL &&
4075		     !dev_priv->query_cid_valid))
4076		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4077
4078	vmw_clear_validations(sw_context);
4079	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4080				    user_fence_rep, fence, handle);
4081
4082	/* Don't unreference when handing fence out */
4083	if (unlikely(out_fence != NULL)) {
4084		*out_fence = fence;
4085		fence = NULL;
4086	} else if (likely(fence != NULL)) {
4087		vmw_fence_obj_unreference(&fence);
4088	}
4089
4090	list_splice_init(&sw_context->resource_list, &resource_list);
4091	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4092	mutex_unlock(&dev_priv->cmdbuf_mutex);
4093
4094	/*
4095	 * Unreference resources outside of the cmdbuf_mutex to
4096	 * avoid deadlocks in resource destruction paths.
4097	 */
4098	vmw_resource_list_unreference(sw_context, &resource_list);
4099
4100	return 0;
4101
4102out_unlock_binding:
4103	mutex_unlock(&dev_priv->binding_mutex);
4104out_err:
4105	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4106out_err_nores:
4107	vmw_resources_unreserve(sw_context, true);
4108	vmw_resource_relocations_free(&sw_context->res_relocations);
4109	vmw_free_relocations(sw_context);
 
 
 
4110	vmw_clear_validations(sw_context);
4111	if (unlikely(dev_priv->pinned_bo != NULL &&
4112		     !dev_priv->query_cid_valid))
4113		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4114out_unlock:
4115	list_splice_init(&sw_context->resource_list, &resource_list);
4116	error_resource = sw_context->error_resource;
4117	sw_context->error_resource = NULL;
4118	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4119	mutex_unlock(&dev_priv->cmdbuf_mutex);
4120
4121	/*
4122	 * Unreference resources outside of the cmdbuf_mutex to
4123	 * avoid deadlocks in resource destruction paths.
4124	 */
4125	vmw_resource_list_unreference(sw_context, &resource_list);
4126	if (unlikely(error_resource != NULL))
4127		vmw_resource_unreference(&error_resource);
4128out_free_header:
4129	if (header)
4130		vmw_cmdbuf_header_free(header);
4131
4132	return ret;
4133}
4134
4135/**
4136 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4137 *
4138 * @dev_priv: The device private structure.
4139 *
4140 * This function is called to idle the fifo and unpin the query buffer
4141 * if the normal way to do this hits an error, which should typically be
4142 * extremely rare.
4143 */
4144static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4145{
4146	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4147
4148	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4149	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4150	if (dev_priv->dummy_query_bo_pinned) {
4151		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4152		dev_priv->dummy_query_bo_pinned = false;
4153	}
4154}
4155
4156
4157/**
4158 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4159 * query bo.
4160 *
4161 * @dev_priv: The device private structure.
4162 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4163 * _after_ a query barrier that flushes all queries touching the current
4164 * buffer pointed to by @dev_priv->pinned_bo
4165 *
4166 * This function should be used to unpin the pinned query bo, or
4167 * as a query barrier when we need to make sure that all queries have
4168 * finished before the next fifo command. (For example on hardware
4169 * context destructions where the hardware may otherwise leak unfinished
4170 * queries).
4171 *
4172 * This function does not return any failure codes, but make attempts
4173 * to do safe unpinning in case of errors.
4174 *
4175 * The function will synchronize on the previous query barrier, and will
4176 * thus not finish until that barrier has executed.
4177 *
4178 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4179 * before calling this function.
4180 */
4181void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4182				     struct vmw_fence_obj *fence)
4183{
4184	int ret = 0;
4185	struct list_head validate_list;
4186	struct ttm_validate_buffer pinned_val, query_val;
4187	struct vmw_fence_obj *lfence = NULL;
4188	struct ww_acquire_ctx ticket;
 
4189
4190	if (dev_priv->pinned_bo == NULL)
4191		goto out_unlock;
4192
 
 
 
4193	INIT_LIST_HEAD(&validate_list);
4194
4195	pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4196	pinned_val.shared = false;
 
4197	list_add_tail(&pinned_val.head, &validate_list);
4198
4199	query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4200	query_val.shared = false;
4201	list_add_tail(&query_val.head, &validate_list);
4202
4203	ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4204				     false, NULL);
 
 
4205	if (unlikely(ret != 0)) {
4206		vmw_execbuf_unpin_panic(dev_priv);
4207		goto out_no_reserve;
4208	}
4209
4210	if (dev_priv->query_cid_valid) {
4211		BUG_ON(fence != NULL);
4212		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4213		if (unlikely(ret != 0)) {
4214			vmw_execbuf_unpin_panic(dev_priv);
4215			goto out_no_emit;
4216		}
4217		dev_priv->query_cid_valid = false;
4218	}
4219
4220	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4221	if (dev_priv->dummy_query_bo_pinned) {
4222		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4223		dev_priv->dummy_query_bo_pinned = false;
4224	}
4225	if (fence == NULL) {
4226		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4227						  NULL);
4228		fence = lfence;
4229	}
4230	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4231	if (lfence != NULL)
4232		vmw_fence_obj_unreference(&lfence);
4233
4234	ttm_bo_unref(&query_val.bo);
4235	ttm_bo_unref(&pinned_val.bo);
4236	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4237	DRM_INFO("Dummy query bo pin count: %d\n",
4238		 dev_priv->dummy_query_bo->pin_count);
4239
4240out_unlock:
 
4241	return;
4242
4243out_no_emit:
4244	ttm_eu_backoff_reservation(&ticket, &validate_list);
4245out_no_reserve:
4246	ttm_bo_unref(&query_val.bo);
4247	ttm_bo_unref(&pinned_val.bo);
4248	vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4249}
4250
4251/**
4252 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4253 * query bo.
4254 *
4255 * @dev_priv: The device private structure.
4256 *
4257 * This function should be used to unpin the pinned query bo, or
4258 * as a query barrier when we need to make sure that all queries have
4259 * finished before the next fifo command. (For example on hardware
4260 * context destructions where the hardware may otherwise leak unfinished
4261 * queries).
4262 *
4263 * This function does not return any failure codes, but make attempts
4264 * to do safe unpinning in case of errors.
4265 *
4266 * The function will synchronize on the previous query barrier, and will
4267 * thus not finish until that barrier has executed.
4268 */
4269void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4270{
4271	mutex_lock(&dev_priv->cmdbuf_mutex);
4272	if (dev_priv->query_cid_valid)
4273		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4274	mutex_unlock(&dev_priv->cmdbuf_mutex);
4275}
4276
4277int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4278		      struct drm_file *file_priv, size_t size)
 
4279{
4280	struct vmw_private *dev_priv = vmw_priv(dev);
4281	struct drm_vmw_execbuf_arg arg;
 
4282	int ret;
4283	static const size_t copy_offset[] = {
4284		offsetof(struct drm_vmw_execbuf_arg, context_handle),
4285		sizeof(struct drm_vmw_execbuf_arg)};
4286
4287	if (unlikely(size < copy_offset[0])) {
4288		DRM_ERROR("Invalid command size, ioctl %d\n",
4289			  DRM_VMW_EXECBUF);
4290		return -EINVAL;
4291	}
4292
4293	if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4294		return -EFAULT;
4295
4296	/*
4297	 * Extend the ioctl argument while
4298	 * maintaining backwards compatibility:
4299	 * We take different code paths depending on the value of
4300	 * arg.version.
4301	 */
4302
4303	if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4304		     arg.version == 0)) {
4305		DRM_ERROR("Incorrect execbuf version.\n");
 
 
4306		return -EINVAL;
4307	}
4308
4309	if (arg.version > 1 &&
4310	    copy_from_user(&arg.context_handle,
4311			   (void __user *) (data + copy_offset[0]),
4312			   copy_offset[arg.version - 1] -
4313			   copy_offset[0]) != 0)
4314		return -EFAULT;
4315
4316	switch (arg.version) {
4317	case 1:
4318		arg.context_handle = (uint32_t) -1;
4319		break;
4320	case 2:
4321		if (arg.pad64 != 0) {
4322			DRM_ERROR("Unused IOCTL data not set to zero.\n");
4323			return -EINVAL;
4324		}
4325		break;
4326	default:
4327		break;
4328	}
4329
4330	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4331	if (unlikely(ret != 0))
4332		return ret;
4333
4334	ret = vmw_execbuf_process(file_priv, dev_priv,
4335				  (void __user *)(unsigned long)arg.commands,
4336				  NULL, arg.command_size, arg.throttle_us,
4337				  arg.context_handle,
4338				  (void __user *)(unsigned long)arg.fence_rep,
4339				  NULL);
4340	ttm_read_unlock(&dev_priv->reservation_sem);
4341	if (unlikely(ret != 0))
4342		return ret;
4343
4344	vmw_kms_cursor_post_execbuf(dev_priv);
4345
4346	return 0;
 
 
4347}
v3.5.6
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_reg.h"
  30#include "ttm/ttm_bo_api.h"
  31#include "ttm/ttm_placement.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  32
  33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
  34			   struct vmw_sw_context *sw_context,
  35			   SVGA3dCmdHeader *header)
  36{
  37	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
  38}
  39
  40static int vmw_cmd_ok(struct vmw_private *dev_priv,
  41		      struct vmw_sw_context *sw_context,
  42		      SVGA3dCmdHeader *header)
  43{
  44	return 0;
  45}
  46
  47static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
  48					  struct vmw_resource **p_res)
  49{
  50	struct vmw_resource *res = *p_res;
  51
  52	if (list_empty(&res->validate_head)) {
  53		list_add_tail(&res->validate_head, &sw_context->resource_list);
  54		*p_res = NULL;
  55	} else
  56		vmw_resource_unreference(p_res);
  57}
  58
  59/**
  60 * vmw_bo_to_validate_list - add a bo to a validate list
  61 *
  62 * @sw_context: The software context used for this command submission batch.
  63 * @bo: The buffer object to add.
  64 * @fence_flags: Fence flags to be or'ed with any other fence flags for
  65 * this buffer on this submission batch.
  66 * @p_val_node: If non-NULL Will be updated with the validate node number
  67 * on return.
  68 *
  69 * Returns -EINVAL if the limit of number of buffer objects per command
  70 * submission is reached.
  71 */
  72static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
  73				   struct ttm_buffer_object *bo,
  74				   uint32_t fence_flags,
  75				   uint32_t *p_val_node)
  76{
  77	uint32_t val_node;
 
  78	struct ttm_validate_buffer *val_buf;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  79
  80	val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
 
 
  81
  82	if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
  83		DRM_ERROR("Max number of DMA buffers per submission"
  84			  " exceeded.\n");
  85		return -EINVAL;
  86	}
  87
  88	val_buf = &sw_context->val_bufs[val_node];
  89	if (unlikely(val_node == sw_context->cur_val_buf)) {
  90		val_buf->new_sync_obj_arg = NULL;
  91		val_buf->bo = ttm_bo_reference(bo);
  92		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
  93		++sw_context->cur_val_buf;
 
 
 
  94	}
  95
  96	val_buf->new_sync_obj_arg = (void *)
  97		((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
  98	sw_context->fence_flags |= fence_flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  99
 100	if (p_val_node)
 101		*p_val_node = val_node;
 
 
 
 
 102
 
 
 
 
 
 
 
 
 
 
 
 
 
 103	return 0;
 104}
 105
 106static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 107			     struct vmw_sw_context *sw_context,
 108			     SVGA3dCmdHeader *header)
 
 
 
 
 
 
 
 
 
 
 
 
 
 109{
 110	struct vmw_resource *ctx;
 
 
 
 
 
 
 
 
 111
 112	struct vmw_cid_cmd {
 113		SVGA3dCmdHeader header;
 114		__le32 cid;
 115	} *cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116	int ret;
 117
 118	cmd = container_of(header, struct vmw_cid_cmd, header);
 119	if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
 
 
 
 
 
 120		return 0;
 
 121
 122	ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
 123				&ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 124	if (unlikely(ret != 0)) {
 125		DRM_ERROR("Could not find or use context %u\n",
 126			  (unsigned) cmd->cid);
 
 127		return ret;
 128	}
 129
 130	sw_context->last_cid = cmd->cid;
 131	sw_context->cid_valid = true;
 132	sw_context->cur_ctx = ctx;
 133	vmw_resource_to_validate_list(sw_context, &ctx);
 134
 
 
 
 
 
 
 
 
 
 135	return 0;
 
 
 
 
 
 
 136}
 137
 138static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 139			     struct vmw_sw_context *sw_context,
 140			     uint32_t *sid)
 
 
 
 
 
 141{
 142	struct vmw_surface *srf;
 143	int ret;
 144	struct vmw_resource *res;
 
 
 
 
 
 
 145
 146	if (*sid == SVGA3D_INVALID_ID)
 147		return 0;
 148
 149	if (likely((sw_context->sid_valid  &&
 150		      *sid == sw_context->last_sid))) {
 151		*sid = sw_context->sid_translation;
 152		return 0;
 
 153	}
 154
 155	ret = vmw_user_surface_lookup_handle(dev_priv,
 156					     sw_context->tfile,
 157					     *sid, &srf);
 158	if (unlikely(ret != 0)) {
 159		DRM_ERROR("Could ot find or use surface 0x%08x "
 160			  "address 0x%08lx\n",
 161			  (unsigned int) *sid,
 162			  (unsigned long) sid);
 163		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164	}
 165
 166	ret = vmw_surface_validate(dev_priv, srf);
 167	if (unlikely(ret != 0)) {
 168		if (ret != -ERESTARTSYS)
 169			DRM_ERROR("Could not validate surface.\n");
 170		vmw_surface_unreference(&srf);
 171		return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172	}
 173
 174	sw_context->last_sid = *sid;
 175	sw_context->sid_valid = true;
 176	sw_context->sid_translation = srf->res.id;
 177	*sid = sw_context->sid_translation;
 178
 179	res = &srf->res;
 180	vmw_resource_to_validate_list(sw_context, &res);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 181
 182	return 0;
 183}
 184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 185
 186static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 187					   struct vmw_sw_context *sw_context,
 188					   SVGA3dCmdHeader *header)
 189{
 190	struct vmw_sid_cmd {
 191		SVGA3dCmdHeader header;
 192		SVGA3dCmdSetRenderTarget body;
 193	} *cmd;
 
 
 194	int ret;
 195
 196	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197	if (unlikely(ret != 0))
 198		return ret;
 199
 200	cmd = container_of(header, struct vmw_sid_cmd, header);
 201	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
 202	return ret;
 
 
 
 
 
 
 
 
 
 203}
 204
 205static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 206				      struct vmw_sw_context *sw_context,
 207				      SVGA3dCmdHeader *header)
 208{
 209	struct vmw_sid_cmd {
 210		SVGA3dCmdHeader header;
 211		SVGA3dCmdSurfaceCopy body;
 212	} *cmd;
 213	int ret;
 214
 215	cmd = container_of(header, struct vmw_sid_cmd, header);
 216	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
 217	if (unlikely(ret != 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218		return ret;
 219	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 
 
 
 220}
 221
 222static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 223				     struct vmw_sw_context *sw_context,
 224				     SVGA3dCmdHeader *header)
 225{
 226	struct vmw_sid_cmd {
 227		SVGA3dCmdHeader header;
 228		SVGA3dCmdSurfaceStretchBlt body;
 229	} *cmd;
 230	int ret;
 231
 232	cmd = container_of(header, struct vmw_sid_cmd, header);
 233	ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
 
 
 234	if (unlikely(ret != 0))
 235		return ret;
 236	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 
 
 237}
 238
 239static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 240					 struct vmw_sw_context *sw_context,
 241					 SVGA3dCmdHeader *header)
 242{
 243	struct vmw_sid_cmd {
 244		SVGA3dCmdHeader header;
 245		SVGA3dCmdBlitSurfaceToScreen body;
 246	} *cmd;
 247
 248	cmd = container_of(header, struct vmw_sid_cmd, header);
 249
 250	if (unlikely(!sw_context->kernel)) {
 251		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
 252		return -EPERM;
 253	}
 254
 255	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
 256}
 257
 258static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 259				 struct vmw_sw_context *sw_context,
 260				 SVGA3dCmdHeader *header)
 261{
 262	struct vmw_sid_cmd {
 263		SVGA3dCmdHeader header;
 264		SVGA3dCmdPresent body;
 265	} *cmd;
 266
 267
 268	cmd = container_of(header, struct vmw_sid_cmd, header);
 269
 270	if (unlikely(!sw_context->kernel)) {
 271		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
 272		return -EPERM;
 273	}
 274
 275	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 276}
 277
 278/**
 279 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
 280 *
 281 * @dev_priv: The device private structure.
 282 * @cid: The hardware context for the next query.
 283 * @new_query_bo: The new buffer holding query results.
 284 * @sw_context: The software context used for this command submission.
 285 *
 286 * This function checks whether @new_query_bo is suitable for holding
 287 * query results, and if another buffer currently is pinned for query
 288 * results. If so, the function prepares the state of @sw_context for
 289 * switching pinned buffers after successful submission of the current
 290 * command batch. It also checks whether we're using a new query context.
 291 * In that case, it makes sure we emit a query barrier for the old
 292 * context before the current query buffer is fenced.
 293 */
 294static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 295				       uint32_t cid,
 296				       struct ttm_buffer_object *new_query_bo,
 297				       struct vmw_sw_context *sw_context)
 298{
 
 
 299	int ret;
 300	bool add_cid = false;
 301	uint32_t cid_to_add;
 
 302
 303	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 304
 305		if (unlikely(new_query_bo->num_pages > 4)) {
 306			DRM_ERROR("Query buffer too large.\n");
 307			return -EINVAL;
 308		}
 309
 310		if (unlikely(sw_context->cur_query_bo != NULL)) {
 311			BUG_ON(!sw_context->query_cid_valid);
 312			add_cid = true;
 313			cid_to_add = sw_context->cur_query_cid;
 314			ret = vmw_bo_to_validate_list(sw_context,
 315						      sw_context->cur_query_bo,
 316						      DRM_VMW_FENCE_FLAG_EXEC,
 317						      NULL);
 318			if (unlikely(ret != 0))
 319				return ret;
 320		}
 321		sw_context->cur_query_bo = new_query_bo;
 322
 323		ret = vmw_bo_to_validate_list(sw_context,
 324					      dev_priv->dummy_query_bo,
 325					      DRM_VMW_FENCE_FLAG_EXEC,
 326					      NULL);
 327		if (unlikely(ret != 0))
 328			return ret;
 329
 330	}
 331
 332	if (unlikely(cid != sw_context->cur_query_cid &&
 333		     sw_context->query_cid_valid)) {
 334		add_cid = true;
 335		cid_to_add = sw_context->cur_query_cid;
 336	}
 337
 338	sw_context->cur_query_cid = cid;
 339	sw_context->query_cid_valid = true;
 340
 341	if (add_cid) {
 342		struct vmw_resource *ctx = sw_context->cur_ctx;
 343
 344		if (list_empty(&ctx->query_head))
 345			list_add_tail(&ctx->query_head,
 346				      &sw_context->query_list);
 347		ret = vmw_bo_to_validate_list(sw_context,
 348					      dev_priv->dummy_query_bo,
 349					      DRM_VMW_FENCE_FLAG_EXEC,
 350					      NULL);
 351		if (unlikely(ret != 0))
 352			return ret;
 353	}
 354	return 0;
 355}
 356
 357
 358/**
 359 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
 360 *
 361 * @dev_priv: The device private structure.
 362 * @sw_context: The software context used for this command submission batch.
 363 *
 364 * This function will check if we're switching query buffers, and will then,
 365 * if no other query waits are issued this command submission batch,
 366 * issue a dummy occlusion query wait used as a query barrier. When the fence
 367 * object following that query wait has signaled, we are sure that all
 368 * preseding queries have finished, and the old query buffer can be unpinned.
 369 * However, since both the new query buffer and the old one are fenced with
 370 * that fence, we can do an asynchronus unpin now, and be sure that the
 371 * old query buffer won't be moved until the fence has signaled.
 372 *
 373 * As mentioned above, both the new - and old query buffers need to be fenced
 374 * using a sequence emitted *after* calling this function.
 375 */
 376static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 377				     struct vmw_sw_context *sw_context)
 378{
 379
 380	struct vmw_resource *ctx, *next_ctx;
 381	int ret;
 382
 383	/*
 384	 * The validate list should still hold references to all
 385	 * contexts here.
 386	 */
 387
 388	list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
 389				 query_head) {
 390		list_del_init(&ctx->query_head);
 
 
 391
 392		BUG_ON(list_empty(&ctx->validate_head));
 
 393
 394		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 395
 396		if (unlikely(ret != 0))
 397			DRM_ERROR("Out of fifo space for dummy query.\n");
 398	}
 399
 400	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 401		if (dev_priv->pinned_bo) {
 402			vmw_bo_pin(dev_priv->pinned_bo, false);
 403			ttm_bo_unref(&dev_priv->pinned_bo);
 404		}
 405
 406		vmw_bo_pin(sw_context->cur_query_bo, true);
 
 407
 408		/*
 409		 * We pin also the dummy_query_bo buffer so that we
 410		 * don't need to validate it when emitting
 411		 * dummy queries in context destroy paths.
 412		 */
 413
 414		vmw_bo_pin(dev_priv->dummy_query_bo, true);
 415		dev_priv->dummy_query_bo_pinned = true;
 416
 417		dev_priv->query_cid = sw_context->cur_query_cid;
 418		dev_priv->pinned_bo =
 419			ttm_bo_reference(sw_context->cur_query_bo);
 
 
 
 
 
 
 420	}
 421}
 422
 423/**
 424 * vmw_query_switch_backoff - clear query barrier list
 425 * @sw_context: The sw context used for this submission batch.
 426 *
 427 * This function is used as part of an error path, where a previously
 428 * set up list of query barriers needs to be cleared.
 
 
 
 
 429 *
 
 
 
 
 
 
 430 */
 431static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
 
 
 
 432{
 433	struct list_head *list, *next;
 
 
 
 434
 435	list_for_each_safe(list, next, &sw_context->query_list) {
 436		list_del_init(list);
 
 
 
 
 437	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 438}
 439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 440static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 441				   struct vmw_sw_context *sw_context,
 442				   SVGAGuestPtr *ptr,
 443				   struct vmw_dma_buffer **vmw_bo_p)
 444{
 445	struct vmw_dma_buffer *vmw_bo = NULL;
 446	struct ttm_buffer_object *bo;
 447	uint32_t handle = ptr->gmrId;
 448	struct vmw_relocation *reloc;
 449	int ret;
 450
 451	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
 
 452	if (unlikely(ret != 0)) {
 453		DRM_ERROR("Could not find or use GMR region.\n");
 454		return -EINVAL;
 
 455	}
 456	bo = &vmw_bo->base;
 457
 458	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 459		DRM_ERROR("Max number relocations per submission"
 460			  " exceeded\n");
 461		ret = -EINVAL;
 462		goto out_no_reloc;
 463	}
 464
 465	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 466	reloc->location = ptr;
 467
 468	ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
 469				      &reloc->index);
 470	if (unlikely(ret != 0))
 471		goto out_no_reloc;
 472
 473	*vmw_bo_p = vmw_bo;
 474	return 0;
 475
 476out_no_reloc:
 477	vmw_dmabuf_unreference(&vmw_bo);
 478	vmw_bo_p = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 479	return ret;
 480}
 481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 483			     struct vmw_sw_context *sw_context,
 484			     SVGA3dCmdHeader *header)
 485{
 486	struct vmw_dma_buffer *vmw_bo;
 487	struct vmw_query_cmd {
 488		SVGA3dCmdHeader header;
 489		SVGA3dCmdEndQuery q;
 490	} *cmd;
 491	int ret;
 492
 493	cmd = container_of(header, struct vmw_query_cmd, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 494	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 495	if (unlikely(ret != 0))
 496		return ret;
 497
 498	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 499				      &cmd->q.guestResult,
 500				      &vmw_bo);
 501	if (unlikely(ret != 0))
 502		return ret;
 503
 504	ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
 505					  &vmw_bo->base, sw_context);
 506
 507	vmw_dmabuf_unreference(&vmw_bo);
 508	return ret;
 509}
 510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 512			      struct vmw_sw_context *sw_context,
 513			      SVGA3dCmdHeader *header)
 514{
 515	struct vmw_dma_buffer *vmw_bo;
 516	struct vmw_query_cmd {
 517		SVGA3dCmdHeader header;
 518		SVGA3dCmdWaitForQuery q;
 519	} *cmd;
 520	int ret;
 521	struct vmw_resource *ctx;
 522
 523	cmd = container_of(header, struct vmw_query_cmd, header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 524	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 525	if (unlikely(ret != 0))
 526		return ret;
 527
 528	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 529				      &cmd->q.guestResult,
 530				      &vmw_bo);
 531	if (unlikely(ret != 0))
 532		return ret;
 533
 534	vmw_dmabuf_unreference(&vmw_bo);
 535
 536	/*
 537	 * This wait will act as a barrier for previous waits for this
 538	 * context.
 539	 */
 540
 541	ctx = sw_context->cur_ctx;
 542	if (!list_empty(&ctx->query_head))
 543		list_del_init(&ctx->query_head);
 544
 545	return 0;
 546}
 547
 548static int vmw_cmd_dma(struct vmw_private *dev_priv,
 549		       struct vmw_sw_context *sw_context,
 550		       SVGA3dCmdHeader *header)
 551{
 552	struct vmw_dma_buffer *vmw_bo = NULL;
 553	struct ttm_buffer_object *bo;
 554	struct vmw_surface *srf = NULL;
 555	struct vmw_dma_cmd {
 556		SVGA3dCmdHeader header;
 557		SVGA3dCmdSurfaceDMA dma;
 558	} *cmd;
 559	int ret;
 560	struct vmw_resource *res;
 
 561
 562	cmd = container_of(header, struct vmw_dma_cmd, header);
 
 
 
 
 
 
 
 
 
 563	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 564				      &cmd->dma.guest.ptr,
 565				      &vmw_bo);
 566	if (unlikely(ret != 0))
 567		return ret;
 568
 569	bo = &vmw_bo->base;
 570	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
 571					     cmd->dma.host.sid, &srf);
 572	if (ret) {
 573		DRM_ERROR("could not find surface\n");
 574		goto out_no_reloc;
 575	}
 576
 577	ret = vmw_surface_validate(dev_priv, srf);
 
 
 
 
 
 
 578	if (unlikely(ret != 0)) {
 579		if (ret != -ERESTARTSYS)
 580			DRM_ERROR("Culd not validate surface.\n");
 581		goto out_no_validate;
 582	}
 583
 584	/*
 585	 * Patch command stream with device SID.
 586	 */
 587	cmd->dma.host.sid = srf->res.id;
 588	vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
 589
 590	vmw_dmabuf_unreference(&vmw_bo);
 
 591
 592	res = &srf->res;
 593	vmw_resource_to_validate_list(sw_context, &res);
 594
 595	return 0;
 596
 597out_no_validate:
 598	vmw_surface_unreference(&srf);
 599out_no_reloc:
 600	vmw_dmabuf_unreference(&vmw_bo);
 601	return ret;
 602}
 603
 604static int vmw_cmd_draw(struct vmw_private *dev_priv,
 605			struct vmw_sw_context *sw_context,
 606			SVGA3dCmdHeader *header)
 607{
 608	struct vmw_draw_cmd {
 609		SVGA3dCmdHeader header;
 610		SVGA3dCmdDrawPrimitives body;
 611	} *cmd;
 612	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
 613		(unsigned long)header + sizeof(*cmd));
 614	SVGA3dPrimitiveRange *range;
 615	uint32_t i;
 616	uint32_t maxnum;
 617	int ret;
 618
 619	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 620	if (unlikely(ret != 0))
 621		return ret;
 622
 623	cmd = container_of(header, struct vmw_draw_cmd, header);
 624	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
 625
 626	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
 627		DRM_ERROR("Illegal number of vertex declarations.\n");
 628		return -EINVAL;
 629	}
 630
 631	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
 632		ret = vmw_cmd_sid_check(dev_priv, sw_context,
 633					&decl->array.surfaceId);
 
 634		if (unlikely(ret != 0))
 635			return ret;
 636	}
 637
 638	maxnum = (header->size - sizeof(cmd->body) -
 639		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
 640	if (unlikely(cmd->body.numRanges > maxnum)) {
 641		DRM_ERROR("Illegal number of index ranges.\n");
 642		return -EINVAL;
 643	}
 644
 645	range = (SVGA3dPrimitiveRange *) decl;
 646	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
 647		ret = vmw_cmd_sid_check(dev_priv, sw_context,
 648					&range->indexArray.surfaceId);
 
 649		if (unlikely(ret != 0))
 650			return ret;
 651	}
 652	return 0;
 653}
 654
 655
 656static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 657			     struct vmw_sw_context *sw_context,
 658			     SVGA3dCmdHeader *header)
 659{
 660	struct vmw_tex_state_cmd {
 661		SVGA3dCmdHeader header;
 662		SVGA3dCmdSetTextureState state;
 663	};
 664
 665	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
 666	  ((unsigned long) header + header->size + sizeof(header));
 667	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
 668		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
 
 
 669	int ret;
 670
 671	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 
 
 
 
 
 672	if (unlikely(ret != 0))
 673		return ret;
 674
 675	for (; cur_state < last_state; ++cur_state) {
 676		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
 677			continue;
 678
 679		ret = vmw_cmd_sid_check(dev_priv, sw_context,
 680					&cur_state->value);
 
 
 
 
 
 
 
 681		if (unlikely(ret != 0))
 682			return ret;
 
 
 
 
 
 
 
 
 
 
 
 683	}
 684
 685	return 0;
 686}
 687
 688static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
 689				      struct vmw_sw_context *sw_context,
 690				      void *buf)
 691{
 692	struct vmw_dma_buffer *vmw_bo;
 693	int ret;
 694
 695	struct {
 696		uint32_t header;
 697		SVGAFifoCmdDefineGMRFB body;
 698	} *cmd = buf;
 699
 700	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
 701				      &cmd->body.ptr,
 702				      &vmw_bo);
 703	if (unlikely(ret != 0))
 704		return ret;
 705
 706	vmw_dmabuf_unreference(&vmw_bo);
 707
 708	return ret;
 709}
 710
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 711static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
 712				struct vmw_sw_context *sw_context,
 713				void *buf, uint32_t *size)
 714{
 715	uint32_t size_remaining = *size;
 716	uint32_t cmd_id;
 717
 718	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
 719	switch (cmd_id) {
 720	case SVGA_CMD_UPDATE:
 721		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
 722		break;
 723	case SVGA_CMD_DEFINE_GMRFB:
 724		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
 725		break;
 726	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
 727		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
 728		break;
 729	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
 730		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
 731		break;
 732	default:
 733		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
 734		return -EINVAL;
 735	}
 736
 737	if (*size > size_remaining) {
 738		DRM_ERROR("Invalid SVGA command (size mismatch):"
 739			  " %u.\n", cmd_id);
 740		return -EINVAL;
 741	}
 742
 743	if (unlikely(!sw_context->kernel)) {
 744		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
 745		return -EPERM;
 746	}
 747
 748	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
 749		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
 750
 751	return 0;
 752}
 753
 754typedef int (*vmw_cmd_func) (struct vmw_private *,
 755			     struct vmw_sw_context *,
 756			     SVGA3dCmdHeader *);
 757
 758#define VMW_CMD_DEF(cmd, func) \
 759	[cmd - SVGA_3D_CMD_BASE] = func
 760
 761static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
 762	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
 763	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
 764	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
 765	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
 766	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
 767	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
 768	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
 769	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
 770	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
 771	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
 
 
 
 772	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
 773		    &vmw_cmd_set_render_target_check),
 774	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
 775	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
 776	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
 777	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
 778	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
 779	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
 780	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
 781	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
 782	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
 783	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
 784	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
 785	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
 786	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
 787	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
 788	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
 789	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
 790	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
 791	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 792	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
 793		    &vmw_cmd_blt_surf_screen_check)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 794};
 795
 796static int vmw_cmd_check(struct vmw_private *dev_priv,
 797			 struct vmw_sw_context *sw_context,
 798			 void *buf, uint32_t *size)
 799{
 800	uint32_t cmd_id;
 801	uint32_t size_remaining = *size;
 802	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
 803	int ret;
 
 
 804
 805	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
 806	/* Handle any none 3D commands */
 807	if (unlikely(cmd_id < SVGA_CMD_MAX))
 808		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
 809
 810
 811	cmd_id = le32_to_cpu(header->id);
 812	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
 813
 814	cmd_id -= SVGA_3D_CMD_BASE;
 815	if (unlikely(*size > size_remaining))
 816		goto out_err;
 817
 818	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
 819		goto out_err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 820
 821	ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
 822	if (unlikely(ret != 0))
 823		goto out_err;
 824
 825	return 0;
 826out_err:
 827	DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
 
 
 
 
 
 
 
 
 
 
 
 
 828		  cmd_id + SVGA_3D_CMD_BASE);
 829	return -EINVAL;
 830}
 831
 832static int vmw_cmd_check_all(struct vmw_private *dev_priv,
 833			     struct vmw_sw_context *sw_context,
 834			     void *buf,
 835			     uint32_t size)
 836{
 837	int32_t cur_size = size;
 838	int ret;
 839
 
 
 840	while (cur_size > 0) {
 841		size = cur_size;
 842		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
 843		if (unlikely(ret != 0))
 844			return ret;
 845		buf = (void *)((unsigned long) buf + size);
 846		cur_size -= size;
 847	}
 848
 849	if (unlikely(cur_size != 0)) {
 850		DRM_ERROR("Command verifier out of sync.\n");
 851		return -EINVAL;
 852	}
 853
 854	return 0;
 855}
 856
 857static void vmw_free_relocations(struct vmw_sw_context *sw_context)
 858{
 859	sw_context->cur_reloc = 0;
 860}
 861
 862static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 863{
 864	uint32_t i;
 865	struct vmw_relocation *reloc;
 866	struct ttm_validate_buffer *validate;
 867	struct ttm_buffer_object *bo;
 868
 869	for (i = 0; i < sw_context->cur_reloc; ++i) {
 870		reloc = &sw_context->relocs[i];
 871		validate = &sw_context->val_bufs[reloc->index];
 872		bo = validate->bo;
 873		if (bo->mem.mem_type == TTM_PL_VRAM) {
 
 874			reloc->location->offset += bo->offset;
 875			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
 876		} else
 
 877			reloc->location->gmrId = bo->mem.start;
 
 
 
 
 
 
 
 878	}
 879	vmw_free_relocations(sw_context);
 880}
 881
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 882static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 883{
 884	struct ttm_validate_buffer *entry, *next;
 885	struct vmw_resource *res, *res_next;
 886
 887	/*
 888	 * Drop references to DMA buffers held during command submission.
 889	 */
 890	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
 891				 head) {
 892		list_del(&entry->head);
 893		vmw_dmabuf_validate_clear(entry->bo);
 894		ttm_bo_unref(&entry->bo);
 895		sw_context->cur_val_buf--;
 896	}
 897	BUG_ON(sw_context->cur_val_buf != 0);
 898
 899	/*
 900	 * Drop references to resources held during command submission.
 901	 */
 902	vmw_resource_unreserve(&sw_context->resource_list);
 903	list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
 904				 validate_head) {
 905		list_del_init(&res->validate_head);
 906		vmw_resource_unreference(&res);
 907	}
 908}
 909
 910static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 911				      struct ttm_buffer_object *bo)
 
 
 912{
 
 
 913	int ret;
 914
 
 
 915
 916	/*
 917	 * Don't validate pinned buffers.
 918	 */
 919
 920	if (bo == dev_priv->pinned_bo ||
 921	    (bo == dev_priv->dummy_query_bo &&
 922	     dev_priv->dummy_query_bo_pinned))
 923		return 0;
 924
 925	/**
 926	 * Put BO in VRAM if there is space, otherwise as a GMR.
 927	 * If there is no space in VRAM and GMR ids are all used up,
 928	 * start evicting GMRs to make room. If the DMA buffer can't be
 929	 * used as a GMR, this will return -ENOMEM.
 930	 */
 931
 932	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
 
 933	if (likely(ret == 0 || ret == -ERESTARTSYS))
 934		return ret;
 935
 936	/**
 937	 * If that failed, try VRAM again, this time evicting
 938	 * previous contents.
 939	 */
 940
 941	DRM_INFO("Falling through to VRAM.\n");
 942	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
 943	return ret;
 944}
 945
 946
 947static int vmw_validate_buffers(struct vmw_private *dev_priv,
 948				struct vmw_sw_context *sw_context)
 949{
 950	struct ttm_validate_buffer *entry;
 951	int ret;
 952
 953	list_for_each_entry(entry, &sw_context->validate_nodes, head) {
 954		ret = vmw_validate_single_buffer(dev_priv, entry->bo);
 
 
 955		if (unlikely(ret != 0))
 956			return ret;
 957	}
 958	return 0;
 959}
 960
 961static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
 962				 uint32_t size)
 963{
 964	if (likely(sw_context->cmd_bounce_size >= size))
 965		return 0;
 966
 967	if (sw_context->cmd_bounce_size == 0)
 968		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
 969
 970	while (sw_context->cmd_bounce_size < size) {
 971		sw_context->cmd_bounce_size =
 972			PAGE_ALIGN(sw_context->cmd_bounce_size +
 973				   (sw_context->cmd_bounce_size >> 1));
 974	}
 975
 976	if (sw_context->cmd_bounce != NULL)
 977		vfree(sw_context->cmd_bounce);
 978
 979	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
 980
 981	if (sw_context->cmd_bounce == NULL) {
 982		DRM_ERROR("Failed to allocate command bounce buffer.\n");
 983		sw_context->cmd_bounce_size = 0;
 984		return -ENOMEM;
 985	}
 986
 987	return 0;
 988}
 989
 990/**
 991 * vmw_execbuf_fence_commands - create and submit a command stream fence
 992 *
 993 * Creates a fence object and submits a command stream marker.
 994 * If this fails for some reason, We sync the fifo and return NULL.
 995 * It is then safe to fence buffers with a NULL pointer.
 996 *
 997 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
 998 * a userspace handle if @p_handle is not NULL, otherwise not.
 999 */
1000
1001int vmw_execbuf_fence_commands(struct drm_file *file_priv,
1002			       struct vmw_private *dev_priv,
1003			       struct vmw_fence_obj **p_fence,
1004			       uint32_t *p_handle)
1005{
1006	uint32_t sequence;
1007	int ret;
1008	bool synced = false;
1009
1010	/* p_handle implies file_priv. */
1011	BUG_ON(p_handle != NULL && file_priv == NULL);
1012
1013	ret = vmw_fifo_send_fence(dev_priv, &sequence);
1014	if (unlikely(ret != 0)) {
1015		DRM_ERROR("Fence submission error. Syncing.\n");
1016		synced = true;
1017	}
1018
1019	if (p_handle != NULL)
1020		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
1021					    sequence,
1022					    DRM_VMW_FENCE_FLAG_EXEC,
1023					    p_fence, p_handle);
1024	else
1025		ret = vmw_fence_create(dev_priv->fman, sequence,
1026				       DRM_VMW_FENCE_FLAG_EXEC,
1027				       p_fence);
1028
1029	if (unlikely(ret != 0 && !synced)) {
1030		(void) vmw_fallback_wait(dev_priv, false, false,
1031					 sequence, false,
1032					 VMW_FENCE_WAIT_TIMEOUT);
1033		*p_fence = NULL;
1034	}
1035
1036	return 0;
1037}
1038
1039/**
1040 * vmw_execbuf_copy_fence_user - copy fence object information to
1041 * user-space.
1042 *
1043 * @dev_priv: Pointer to a vmw_private struct.
1044 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
1045 * @ret: Return value from fence object creation.
1046 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
1047 * which the information should be copied.
1048 * @fence: Pointer to the fenc object.
1049 * @fence_handle: User-space fence handle.
1050 *
1051 * This function copies fence information to user-space. If copying fails,
1052 * The user-space struct drm_vmw_fence_rep::error member is hopefully
1053 * left untouched, and if it's preloaded with an -EFAULT by user-space,
1054 * the error will hopefully be detected.
1055 * Also if copying fails, user-space will be unable to signal the fence
1056 * object so we wait for it immediately, and then unreference the
1057 * user-space reference.
1058 */
1059void
1060vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
1061			    struct vmw_fpriv *vmw_fp,
1062			    int ret,
1063			    struct drm_vmw_fence_rep __user *user_fence_rep,
1064			    struct vmw_fence_obj *fence,
1065			    uint32_t fence_handle)
1066{
1067	struct drm_vmw_fence_rep fence_rep;
1068
1069	if (user_fence_rep == NULL)
1070		return;
1071
1072	memset(&fence_rep, 0, sizeof(fence_rep));
1073
1074	fence_rep.error = ret;
1075	if (ret == 0) {
1076		BUG_ON(fence == NULL);
1077
1078		fence_rep.handle = fence_handle;
1079		fence_rep.seqno = fence->seqno;
1080		vmw_update_seqno(dev_priv, &dev_priv->fifo);
1081		fence_rep.passed_seqno = dev_priv->last_read_seqno;
1082	}
1083
1084	/*
1085	 * copy_to_user errors will be detected by user space not
1086	 * seeing fence_rep::error filled in. Typically
1087	 * user-space would have pre-set that member to -EFAULT.
1088	 */
1089	ret = copy_to_user(user_fence_rep, &fence_rep,
1090			   sizeof(fence_rep));
1091
1092	/*
1093	 * User-space lost the fence object. We need to sync
1094	 * and unreference the handle.
1095	 */
1096	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
1097		ttm_ref_object_base_unref(vmw_fp->tfile,
1098					  fence_handle, TTM_REF_USAGE);
1099		DRM_ERROR("Fence copy error. Syncing.\n");
1100		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
1101					  false, false,
1102					  VMW_FENCE_WAIT_TIMEOUT);
1103	}
1104}
1105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1106int vmw_execbuf_process(struct drm_file *file_priv,
1107			struct vmw_private *dev_priv,
1108			void __user *user_commands,
1109			void *kernel_commands,
1110			uint32_t command_size,
1111			uint64_t throttle_us,
 
1112			struct drm_vmw_fence_rep __user *user_fence_rep,
1113			struct vmw_fence_obj **out_fence)
1114{
1115	struct vmw_sw_context *sw_context = &dev_priv->ctx;
1116	struct vmw_fence_obj *fence = NULL;
 
 
 
 
1117	uint32_t handle;
1118	void *cmd;
1119	int ret;
1120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
1122	if (unlikely(ret != 0))
1123		return -ERESTARTSYS;
 
 
1124
 
1125	if (kernel_commands == NULL) {
1126		sw_context->kernel = false;
1127
1128		ret = vmw_resize_cmd_bounce(sw_context, command_size);
1129		if (unlikely(ret != 0))
1130			goto out_unlock;
1131
1132
1133		ret = copy_from_user(sw_context->cmd_bounce,
1134				     user_commands, command_size);
1135
1136		if (unlikely(ret != 0)) {
1137			ret = -EFAULT;
1138			DRM_ERROR("Failed copying commands.\n");
1139			goto out_unlock;
1140		}
1141		kernel_commands = sw_context->cmd_bounce;
1142	} else
1143		sw_context->kernel = true;
1144
1145	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
1146	sw_context->cid_valid = false;
1147	sw_context->sid_valid = false;
1148	sw_context->cur_reloc = 0;
1149	sw_context->cur_val_buf = 0;
1150	sw_context->fence_flags = 0;
1151	INIT_LIST_HEAD(&sw_context->query_list);
1152	INIT_LIST_HEAD(&sw_context->resource_list);
 
1153	sw_context->cur_query_bo = dev_priv->pinned_bo;
1154	sw_context->cur_query_cid = dev_priv->query_cid;
1155	sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
 
 
 
 
 
 
 
 
1156
1157	INIT_LIST_HEAD(&sw_context->validate_nodes);
 
 
 
 
 
 
 
 
 
 
 
 
 
1158
1159	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
1160				command_size);
 
 
 
 
 
 
 
 
 
 
 
 
1161	if (unlikely(ret != 0))
1162		goto out_err;
1163
1164	ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
 
1165	if (unlikely(ret != 0))
1166		goto out_err;
1167
1168	ret = vmw_validate_buffers(dev_priv, sw_context);
1169	if (unlikely(ret != 0))
1170		goto out_err;
1171
1172	vmw_apply_relocations(sw_context);
 
 
1173
1174	if (throttle_us) {
1175		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
1176				   throttle_us);
 
 
1177
 
 
1178		if (unlikely(ret != 0))
1179			goto out_throttle;
1180	}
1181
1182	cmd = vmw_fifo_reserve(dev_priv, command_size);
1183	if (unlikely(cmd == NULL)) {
1184		DRM_ERROR("Failed reserving fifo space for commands.\n");
1185		ret = -ENOMEM;
1186		goto out_throttle;
 
 
1187	}
1188
1189	memcpy(cmd, kernel_commands, command_size);
1190	vmw_fifo_commit(dev_priv, command_size);
1191
1192	vmw_query_bo_switch_commit(dev_priv, sw_context);
1193	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1194					 &fence,
1195					 (user_fence_rep) ? &handle : NULL);
1196	/*
1197	 * This error is harmless, because if fence submission fails,
1198	 * vmw_fifo_send_fence will sync. The error will be propagated to
1199	 * user-space in @fence_rep
1200	 */
1201
1202	if (ret != 0)
1203		DRM_ERROR("Fence submission error. Syncing.\n");
1204
1205	ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
 
 
1206				    (void *) fence);
1207
 
 
 
 
1208	vmw_clear_validations(sw_context);
1209	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
1210				    user_fence_rep, fence, handle);
1211
1212	/* Don't unreference when handing fence out */
1213	if (unlikely(out_fence != NULL)) {
1214		*out_fence = fence;
1215		fence = NULL;
1216	} else if (likely(fence != NULL)) {
1217		vmw_fence_obj_unreference(&fence);
1218	}
1219
 
 
1220	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 
 
 
 
 
 
1221	return 0;
1222
 
 
1223out_err:
 
 
 
 
1224	vmw_free_relocations(sw_context);
1225out_throttle:
1226	vmw_query_switch_backoff(sw_context);
1227	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
1228	vmw_clear_validations(sw_context);
 
 
 
1229out_unlock:
 
 
 
 
1230	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
1231	return ret;
1232}
1233
1234/**
1235 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
1236 *
1237 * @dev_priv: The device private structure.
1238 *
1239 * This function is called to idle the fifo and unpin the query buffer
1240 * if the normal way to do this hits an error, which should typically be
1241 * extremely rare.
1242 */
1243static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
1244{
1245	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
1246
1247	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
1248	vmw_bo_pin(dev_priv->pinned_bo, false);
1249	vmw_bo_pin(dev_priv->dummy_query_bo, false);
1250	dev_priv->dummy_query_bo_pinned = false;
 
 
1251}
1252
1253
1254/**
1255 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
1256 * query bo.
1257 *
1258 * @dev_priv: The device private structure.
1259 * @only_on_cid_match: Only flush and unpin if the current active query cid
1260 * matches @cid.
1261 * @cid: Optional context id to match.
1262 *
1263 * This function should be used to unpin the pinned query bo, or
1264 * as a query barrier when we need to make sure that all queries have
1265 * finished before the next fifo command. (For example on hardware
1266 * context destructions where the hardware may otherwise leak unfinished
1267 * queries).
1268 *
1269 * This function does not return any failure codes, but make attempts
1270 * to do safe unpinning in case of errors.
1271 *
1272 * The function will synchronize on the previous query barrier, and will
1273 * thus not finish until that barrier has executed.
 
 
 
1274 */
1275void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
1276				   bool only_on_cid_match, uint32_t cid)
1277{
1278	int ret = 0;
1279	struct list_head validate_list;
1280	struct ttm_validate_buffer pinned_val, query_val;
1281	struct vmw_fence_obj *fence;
1282
1283	mutex_lock(&dev_priv->cmdbuf_mutex);
1284
1285	if (dev_priv->pinned_bo == NULL)
1286		goto out_unlock;
1287
1288	if (only_on_cid_match && cid != dev_priv->query_cid)
1289		goto out_unlock;
1290
1291	INIT_LIST_HEAD(&validate_list);
1292
1293	pinned_val.new_sync_obj_arg = (void *)(unsigned long)
1294		DRM_VMW_FENCE_FLAG_EXEC;
1295	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
1296	list_add_tail(&pinned_val.head, &validate_list);
1297
1298	query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
1299	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
1300	list_add_tail(&query_val.head, &validate_list);
1301
1302	do {
1303		ret = ttm_eu_reserve_buffers(&validate_list);
1304	} while (ret == -ERESTARTSYS);
1305
1306	if (unlikely(ret != 0)) {
1307		vmw_execbuf_unpin_panic(dev_priv);
1308		goto out_no_reserve;
1309	}
1310
1311	ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
1312	if (unlikely(ret != 0)) {
1313		vmw_execbuf_unpin_panic(dev_priv);
1314		goto out_no_emit;
 
 
 
 
1315	}
1316
1317	vmw_bo_pin(dev_priv->pinned_bo, false);
1318	vmw_bo_pin(dev_priv->dummy_query_bo, false);
1319	dev_priv->dummy_query_bo_pinned = false;
1320
1321	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1322	ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
 
 
 
 
 
 
 
1323
1324	ttm_bo_unref(&query_val.bo);
1325	ttm_bo_unref(&pinned_val.bo);
1326	ttm_bo_unref(&dev_priv->pinned_bo);
 
 
1327
1328out_unlock:
1329	mutex_unlock(&dev_priv->cmdbuf_mutex);
1330	return;
1331
1332out_no_emit:
1333	ttm_eu_backoff_reservation(&validate_list);
1334out_no_reserve:
1335	ttm_bo_unref(&query_val.bo);
1336	ttm_bo_unref(&pinned_val.bo);
1337	ttm_bo_unref(&dev_priv->pinned_bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1338	mutex_unlock(&dev_priv->cmdbuf_mutex);
1339}
1340
1341
1342int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
1343		      struct drm_file *file_priv)
1344{
1345	struct vmw_private *dev_priv = vmw_priv(dev);
1346	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
1347	struct vmw_master *vmaster = vmw_master(file_priv->master);
1348	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
1349
1350	/*
1351	 * This will allow us to extend the ioctl argument while
1352	 * maintaining backwards compatibility:
1353	 * We take different code paths depending on the value of
1354	 * arg->version.
1355	 */
1356
1357	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
 
1358		DRM_ERROR("Incorrect execbuf version.\n");
1359		DRM_ERROR("You're running outdated experimental "
1360			  "vmwgfx user-space drivers.");
1361		return -EINVAL;
1362	}
1363
1364	ret = ttm_read_lock(&vmaster->lock, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1365	if (unlikely(ret != 0))
1366		return ret;
1367
1368	ret = vmw_execbuf_process(file_priv, dev_priv,
1369				  (void __user *)(unsigned long)arg->commands,
1370				  NULL, arg->command_size, arg->throttle_us,
1371				  (void __user *)(unsigned long)arg->fence_rep,
 
1372				  NULL);
1373
1374	if (unlikely(ret != 0))
1375		goto out_unlock;
1376
1377	vmw_kms_cursor_post_execbuf(dev_priv);
1378
1379out_unlock:
1380	ttm_read_unlock(&vmaster->lock);
1381	return ret;
1382}