Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include "vmwgfx_binding.h"
  28#include "vmwgfx_bo.h"
  29#include "vmwgfx_drv.h"
  30#include "vmwgfx_mksstat.h"
  31#include "vmwgfx_so.h"
  32
  33#include <drm/ttm/ttm_bo.h>
  34#include <drm/ttm/ttm_placement.h>
  35
  36#include <linux/sync_file.h>
  37#include <linux/hashtable.h>
  38#include <linux/vmalloc.h>
  39
  40/*
  41 * Helper macro to get dx_ctx_node if available otherwise print an error
  42 * message. This is for use in command verifier function where if dx_ctx_node
  43 * is not set then command is invalid.
  44 */
  45#define VMW_GET_CTX_NODE(__sw_context)                                        \
  46({                                                                            \
  47	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
  48		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
  49		__sw_context->dx_ctx_node;                                    \
  50	});                                                                   \
  51})
  52
  53#define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
  54	struct {                                                              \
  55		SVGA3dCmdHeader header;                                       \
  56		__type body;                                                  \
  57	} __var
  58
  59/**
  60 * struct vmw_relocation - Buffer object relocation
  61 *
  62 * @head: List head for the command submission context's relocation list
  63 * @vbo: Non ref-counted pointer to buffer object
  64 * @mob_loc: Pointer to location for mob id to be modified
  65 * @location: Pointer to location for guest pointer to be modified
  66 */
  67struct vmw_relocation {
  68	struct list_head head;
  69	struct vmw_bo *vbo;
  70	union {
  71		SVGAMobId *mob_loc;
  72		SVGAGuestPtr *location;
  73	};
  74};
  75
  76/**
  77 * enum vmw_resource_relocation_type - Relocation type for resources
  78 *
  79 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  80 * command stream is replaced with the actual id after validation.
  81 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  82 * with a NOP.
  83 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
  84 * validation is -1, the command is replaced with a NOP. Otherwise no action.
  85 * @vmw_res_rel_max: Last value in the enum - used for error checking
  86*/
  87enum vmw_resource_relocation_type {
  88	vmw_res_rel_normal,
  89	vmw_res_rel_nop,
  90	vmw_res_rel_cond_nop,
  91	vmw_res_rel_max
  92};
  93
  94/**
  95 * struct vmw_resource_relocation - Relocation info for resources
  96 *
  97 * @head: List head for the software context's relocation list.
  98 * @res: Non-ref-counted pointer to the resource.
  99 * @offset: Offset of single byte entries into the command buffer where the id
 100 * that needs fixup is located.
 101 * @rel_type: Type of relocation.
 102 */
 103struct vmw_resource_relocation {
 104	struct list_head head;
 105	const struct vmw_resource *res;
 106	u32 offset:29;
 107	enum vmw_resource_relocation_type rel_type:3;
 108};
 109
 110/**
 111 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
 112 *
 113 * @head: List head of context list
 114 * @ctx: The context resource
 115 * @cur: The context's persistent binding state
 116 * @staged: The binding state changes of this command buffer
 117 */
 118struct vmw_ctx_validation_info {
 119	struct list_head head;
 120	struct vmw_resource *ctx;
 121	struct vmw_ctx_binding_state *cur;
 122	struct vmw_ctx_binding_state *staged;
 123};
 124
 125/**
 126 * struct vmw_cmd_entry - Describe a command for the verifier
 127 *
 128 * @func: Call-back to handle the command.
 129 * @user_allow: Whether allowed from the execbuf ioctl.
 130 * @gb_disable: Whether disabled if guest-backed objects are available.
 131 * @gb_enable: Whether enabled iff guest-backed objects are available.
 132 * @cmd_name: Name of the command.
 133 */
 134struct vmw_cmd_entry {
 135	int (*func) (struct vmw_private *, struct vmw_sw_context *,
 136		     SVGA3dCmdHeader *);
 137	bool user_allow;
 138	bool gb_disable;
 139	bool gb_enable;
 140	const char *cmd_name;
 141};
 142
 143#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 144	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 145				       (_gb_disable), (_gb_enable), #_cmd}
 146
 147static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 148					struct vmw_sw_context *sw_context,
 149					struct vmw_resource *ctx);
 150static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 151				 struct vmw_sw_context *sw_context,
 152				 SVGAMobId *id,
 153				 struct vmw_bo **vmw_bo_p);
 154/**
 155 * vmw_ptr_diff - Compute the offset from a to b in bytes
 156 *
 157 * @a: A starting pointer.
 158 * @b: A pointer offset in the same address space.
 159 *
 160 * Returns: The offset in bytes between the two pointers.
 161 */
 162static size_t vmw_ptr_diff(void *a, void *b)
 163{
 164	return (unsigned long) b - (unsigned long) a;
 165}
 166
 167/**
 168 * vmw_execbuf_bindings_commit - Commit modified binding state
 169 *
 170 * @sw_context: The command submission context
 171 * @backoff: Whether this is part of the error path and binding state changes
 172 * should be ignored
 173 */
 174static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
 175					bool backoff)
 176{
 177	struct vmw_ctx_validation_info *entry;
 178
 179	list_for_each_entry(entry, &sw_context->ctx_list, head) {
 180		if (!backoff)
 181			vmw_binding_state_commit(entry->cur, entry->staged);
 182
 183		if (entry->staged != sw_context->staged_bindings)
 184			vmw_binding_state_free(entry->staged);
 185		else
 186			sw_context->staged_bindings_inuse = false;
 187	}
 188
 189	/* List entries are freed with the validation context */
 190	INIT_LIST_HEAD(&sw_context->ctx_list);
 191}
 192
 193/**
 194 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
 195 *
 196 * @sw_context: The command submission context
 197 */
 198static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
 199{
 200	if (sw_context->dx_query_mob)
 201		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 202					  sw_context->dx_query_mob);
 203}
 204
 205/**
 206 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
 207 * the validate list.
 208 *
 209 * @dev_priv: Pointer to the device private:
 210 * @sw_context: The command submission context
 211 * @res: Pointer to the resource
 212 * @node: The validation node holding the context resource metadata
 213 */
 214static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 215				   struct vmw_sw_context *sw_context,
 216				   struct vmw_resource *res,
 217				   struct vmw_ctx_validation_info *node)
 218{
 219	int ret;
 220
 221	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 222	if (unlikely(ret != 0))
 223		goto out_err;
 224
 225	if (!sw_context->staged_bindings) {
 226		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
 227		if (IS_ERR(sw_context->staged_bindings)) {
 228			ret = PTR_ERR(sw_context->staged_bindings);
 229			sw_context->staged_bindings = NULL;
 230			goto out_err;
 231		}
 232	}
 233
 234	if (sw_context->staged_bindings_inuse) {
 235		node->staged = vmw_binding_state_alloc(dev_priv);
 236		if (IS_ERR(node->staged)) {
 237			ret = PTR_ERR(node->staged);
 238			node->staged = NULL;
 239			goto out_err;
 240		}
 241	} else {
 242		node->staged = sw_context->staged_bindings;
 243		sw_context->staged_bindings_inuse = true;
 244	}
 245
 246	node->ctx = res;
 247	node->cur = vmw_context_binding_state(res);
 248	list_add_tail(&node->head, &sw_context->ctx_list);
 249
 250	return 0;
 251
 252out_err:
 253	return ret;
 254}
 255
 256/**
 257 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
 258 *
 259 * @dev_priv: Pointer to the device private struct.
 260 * @res_type: The resource type.
 261 *
 262 * Guest-backed contexts and DX contexts require extra size to store execbuf
 263 * private information in the validation node. Typically the binding manager
 264 * associated data structures.
 265 *
 266 * Returns: The extra size requirement based on resource type.
 267 */
 268static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
 269					 enum vmw_res_type res_type)
 270{
 271	return (res_type == vmw_res_dx_context ||
 272		(res_type == vmw_res_context && dev_priv->has_mob)) ?
 273		sizeof(struct vmw_ctx_validation_info) : 0;
 274}
 275
 276/**
 277 * vmw_execbuf_rcache_update - Update a resource-node cache entry
 278 *
 279 * @rcache: Pointer to the entry to update.
 280 * @res: Pointer to the resource.
 281 * @private: Pointer to the execbuf-private space in the resource validation
 282 * node.
 283 */
 284static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
 285				      struct vmw_resource *res,
 286				      void *private)
 287{
 288	rcache->res = res;
 289	rcache->private = private;
 290	rcache->valid = 1;
 291	rcache->valid_handle = 0;
 292}
 293
 294enum vmw_val_add_flags {
 295	vmw_val_add_flag_none  =      0,
 296	vmw_val_add_flag_noctx = 1 << 0,
 297};
 298
 299/**
 300 * vmw_execbuf_res_val_add - Add a resource to the validation list.
 301 *
 302 * @sw_context: Pointer to the software context.
 303 * @res: Unreferenced rcu-protected pointer to the resource.
 304 * @dirty: Whether to change dirty status.
 305 * @flags: specifies whether to use the context or not
 306 *
 307 * Returns: 0 on success. Negative error code on failure. Typical error codes
 308 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
 309 */
 310static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
 311				   struct vmw_resource *res,
 312				   u32 dirty,
 313				   u32 flags)
 314{
 315	struct vmw_private *dev_priv = res->dev_priv;
 316	int ret;
 317	enum vmw_res_type res_type = vmw_res_type(res);
 318	struct vmw_res_cache_entry *rcache;
 319	struct vmw_ctx_validation_info *ctx_info;
 320	bool first_usage;
 321	unsigned int priv_size;
 322
 323	rcache = &sw_context->res_cache[res_type];
 324	if (likely(rcache->valid && rcache->res == res)) {
 325		if (dirty)
 326			vmw_validation_res_set_dirty(sw_context->ctx,
 327						     rcache->private, dirty);
 328		return 0;
 329	}
 330
 331	if ((flags & vmw_val_add_flag_noctx) != 0) {
 332		ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
 333						  (void **)&ctx_info, NULL);
 334		if (ret)
 335			return ret;
 336
 337	} else {
 338		priv_size = vmw_execbuf_res_size(dev_priv, res_type);
 339		ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
 340						  dirty, (void **)&ctx_info,
 341						  &first_usage);
 342		if (ret)
 343			return ret;
 344
 345		if (priv_size && first_usage) {
 346			ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
 347						      ctx_info);
 348			if (ret) {
 349				VMW_DEBUG_USER("Failed first usage context setup.\n");
 350				return ret;
 351			}
 352		}
 353	}
 354
 355	vmw_execbuf_rcache_update(rcache, res, ctx_info);
 356	return 0;
 357}
 358
 359/**
 360 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
 361 * validation list
 362 *
 363 * @sw_context: The software context holding the validation list.
 364 * @view: Pointer to the view resource.
 365 *
 366 * Returns 0 if success, negative error code otherwise.
 367 */
 368static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 369				struct vmw_resource *view)
 370{
 371	int ret;
 372
 373	/*
 374	 * First add the resource the view is pointing to, otherwise it may be
 375	 * swapped out when the view is validated.
 376	 */
 377	ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
 378				      vmw_view_dirtying(view), vmw_val_add_flag_noctx);
 379	if (ret)
 380		return ret;
 381
 382	return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
 383				       vmw_val_add_flag_noctx);
 384}
 385
 386/**
 387 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
 388 * to to the validation list.
 389 *
 390 * @sw_context: The software context holding the validation list.
 391 * @view_type: The view type to look up.
 392 * @id: view id of the view.
 393 *
 394 * The view is represented by a view id and the DX context it's created on, or
 395 * scheduled for creation on. If there is no DX context set, the function will
 396 * return an -EINVAL error pointer.
 397 *
 398 * Returns: Unreferenced pointer to the resource on success, negative error
 399 * pointer on failure.
 400 */
 401static struct vmw_resource *
 402vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 403		    enum vmw_view_type view_type, u32 id)
 404{
 405	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
 406	struct vmw_resource *view;
 407	int ret;
 408
 409	if (!ctx_node)
 410		return ERR_PTR(-EINVAL);
 411
 412	view = vmw_view_lookup(sw_context->man, view_type, id);
 413	if (IS_ERR(view))
 414		return view;
 415
 416	ret = vmw_view_res_val_add(sw_context, view);
 417	if (ret)
 418		return ERR_PTR(ret);
 419
 420	return view;
 421}
 422
 423/**
 424 * vmw_resource_context_res_add - Put resources previously bound to a context on
 425 * the validation list
 426 *
 427 * @dev_priv: Pointer to a device private structure
 428 * @sw_context: Pointer to a software context used for this command submission
 429 * @ctx: Pointer to the context resource
 430 *
 431 * This function puts all resources that were previously bound to @ctx on the
 432 * resource validation list. This is part of the context state reemission
 433 */
 434static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 435					struct vmw_sw_context *sw_context,
 436					struct vmw_resource *ctx)
 437{
 438	struct list_head *binding_list;
 439	struct vmw_ctx_bindinfo *entry;
 440	int ret = 0;
 441	struct vmw_resource *res;
 442	u32 i;
 443	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
 444		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 445
 446	/* Add all cotables to the validation list. */
 447	if (has_sm4_context(dev_priv) &&
 448	    vmw_res_type(ctx) == vmw_res_dx_context) {
 449		for (i = 0; i < cotable_max; ++i) {
 450			res = vmw_context_cotable(ctx, i);
 451			if (IS_ERR_OR_NULL(res))
 452				continue;
 453
 454			ret = vmw_execbuf_res_val_add(sw_context, res,
 455						      VMW_RES_DIRTY_SET,
 456						      vmw_val_add_flag_noctx);
 457			if (unlikely(ret != 0))
 458				return ret;
 459		}
 460	}
 461
 462	/* Add all resources bound to the context to the validation list */
 463	mutex_lock(&dev_priv->binding_mutex);
 464	binding_list = vmw_context_binding_list(ctx);
 465
 466	list_for_each_entry(entry, binding_list, ctx_list) {
 467		if (vmw_res_type(entry->res) == vmw_res_view)
 468			ret = vmw_view_res_val_add(sw_context, entry->res);
 469		else
 470			ret = vmw_execbuf_res_val_add(sw_context, entry->res,
 471						      vmw_binding_dirtying(entry->bt),
 472						      vmw_val_add_flag_noctx);
 473		if (unlikely(ret != 0))
 474			break;
 475	}
 476
 477	if (has_sm4_context(dev_priv) &&
 478	    vmw_res_type(ctx) == vmw_res_dx_context) {
 479		struct vmw_bo *dx_query_mob;
 480
 481		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 482		if (dx_query_mob) {
 483			vmw_bo_placement_set(dx_query_mob,
 484					     VMW_BO_DOMAIN_MOB,
 485					     VMW_BO_DOMAIN_MOB);
 486			ret = vmw_validation_add_bo(sw_context->ctx,
 487						    dx_query_mob);
 488		}
 489	}
 490
 491	mutex_unlock(&dev_priv->binding_mutex);
 492	return ret;
 493}
 494
 495/**
 496 * vmw_resource_relocation_add - Add a relocation to the relocation list
 497 *
 498 * @sw_context: Pointer to the software context.
 499 * @res: The resource.
 500 * @offset: Offset into the command buffer currently being parsed where the id
 501 * that needs fixup is located. Granularity is one byte.
 502 * @rel_type: Relocation type.
 503 */
 504static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
 505				       const struct vmw_resource *res,
 506				       unsigned long offset,
 507				       enum vmw_resource_relocation_type
 508				       rel_type)
 509{
 510	struct vmw_resource_relocation *rel;
 511
 512	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
 513	if (unlikely(!rel)) {
 514		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
 515		return -ENOMEM;
 516	}
 517
 518	rel->res = res;
 519	rel->offset = offset;
 520	rel->rel_type = rel_type;
 521	list_add_tail(&rel->head, &sw_context->res_relocations);
 522
 523	return 0;
 524}
 525
 526/**
 527 * vmw_resource_relocations_free - Free all relocations on a list
 528 *
 529 * @list: Pointer to the head of the relocation list
 530 */
 531static void vmw_resource_relocations_free(struct list_head *list)
 532{
 533	/* Memory is validation context memory, so no need to free it */
 534	INIT_LIST_HEAD(list);
 535}
 536
 537/**
 538 * vmw_resource_relocations_apply - Apply all relocations on a list
 539 *
 540 * @cb: Pointer to the start of the command buffer bein patch. This need not be
 541 * the same buffer as the one being parsed when the relocation list was built,
 542 * but the contents must be the same modulo the resource ids.
 543 * @list: Pointer to the head of the relocation list.
 544 */
 545static void vmw_resource_relocations_apply(uint32_t *cb,
 546					   struct list_head *list)
 547{
 548	struct vmw_resource_relocation *rel;
 549
 550	/* Validate the struct vmw_resource_relocation member size */
 551	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
 552	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
 553
 554	list_for_each_entry(rel, list, head) {
 555		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
 556		switch (rel->rel_type) {
 557		case vmw_res_rel_normal:
 558			*addr = rel->res->id;
 559			break;
 560		case vmw_res_rel_nop:
 561			*addr = SVGA_3D_CMD_NOP;
 562			break;
 563		default:
 564			if (rel->res->id == -1)
 565				*addr = SVGA_3D_CMD_NOP;
 566			break;
 567		}
 568	}
 569}
 570
 571static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 572			   struct vmw_sw_context *sw_context,
 573			   SVGA3dCmdHeader *header)
 574{
 575	return -EINVAL;
 576}
 577
 578static int vmw_cmd_ok(struct vmw_private *dev_priv,
 579		      struct vmw_sw_context *sw_context,
 580		      SVGA3dCmdHeader *header)
 581{
 582	return 0;
 583}
 584
 585/**
 586 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
 587 * list.
 588 *
 589 * @sw_context: Pointer to the software context.
 590 *
 591 * Note that since vmware's command submission currently is protected by the
 592 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
 593 * only a single thread at once will attempt this.
 594 */
 595static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 596{
 597	int ret;
 598
 599	ret = vmw_validation_res_reserve(sw_context->ctx, true);
 600	if (ret)
 601		return ret;
 602
 603	if (sw_context->dx_query_mob) {
 604		struct vmw_bo *expected_dx_query_mob;
 605
 606		expected_dx_query_mob =
 607			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 608		if (expected_dx_query_mob &&
 609		    expected_dx_query_mob != sw_context->dx_query_mob) {
 610			ret = -EINVAL;
 611		}
 612	}
 613
 614	return ret;
 615}
 616
 617/**
 618 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
 619 * resource validate list unless it's already there.
 620 *
 621 * @dev_priv: Pointer to a device private structure.
 622 * @sw_context: Pointer to the software context.
 623 * @res_type: Resource type.
 624 * @dirty: Whether to change dirty status.
 625 * @converter: User-space visible type specific information.
 626 * @id_loc: Pointer to the location in the command buffer currently being parsed
 627 * from where the user-space resource id handle is located.
 628 * @p_res: Pointer to pointer to resource validation node. Populated on
 629 * exit.
 630 */
 631static int
 632vmw_cmd_res_check(struct vmw_private *dev_priv,
 633		  struct vmw_sw_context *sw_context,
 634		  enum vmw_res_type res_type,
 635		  u32 dirty,
 636		  const struct vmw_user_resource_conv *converter,
 637		  uint32_t *id_loc,
 638		  struct vmw_resource **p_res)
 639{
 640	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
 641	struct vmw_resource *res;
 642	int ret = 0;
 643	bool needs_unref = false;
 644
 645	if (p_res)
 646		*p_res = NULL;
 647
 648	if (*id_loc == SVGA3D_INVALID_ID) {
 649		if (res_type == vmw_res_context) {
 650			VMW_DEBUG_USER("Illegal context invalid id.\n");
 651			return -EINVAL;
 652		}
 653		return 0;
 654	}
 655
 656	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
 657		res = rcache->res;
 658		if (dirty)
 659			vmw_validation_res_set_dirty(sw_context->ctx,
 660						     rcache->private, dirty);
 661	} else {
 662		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
 663
 664		ret = vmw_validation_preload_res(sw_context->ctx, size);
 665		if (ret)
 666			return ret;
 667
 668		ret = vmw_user_resource_lookup_handle
 669			(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
 670		if (ret != 0) {
 671			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
 672				       (unsigned int) *id_loc);
 673			return ret;
 674		}
 675		needs_unref = true;
 676
 677		ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
 678		if (unlikely(ret != 0))
 679			goto res_check_done;
 680
 681		if (rcache->valid && rcache->res == res) {
 682			rcache->valid_handle = true;
 683			rcache->handle = *id_loc;
 684		}
 685	}
 686
 687	ret = vmw_resource_relocation_add(sw_context, res,
 688					  vmw_ptr_diff(sw_context->buf_start,
 689						       id_loc),
 690					  vmw_res_rel_normal);
 691	if (p_res)
 692		*p_res = res;
 693
 694res_check_done:
 695	if (needs_unref)
 696		vmw_resource_unreference(&res);
 697
 698	return ret;
 699}
 700
 701/**
 702 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
 703 *
 704 * @ctx_res: context the query belongs to
 705 *
 706 * This function assumes binding_mutex is held.
 707 */
 708static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 709{
 710	struct vmw_private *dev_priv = ctx_res->dev_priv;
 711	struct vmw_bo *dx_query_mob;
 712	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
 713
 714	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 715
 716	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 717		return 0;
 718
 719	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
 720	if (cmd == NULL)
 721		return -ENOMEM;
 722
 723	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 724	cmd->header.size = sizeof(cmd->body);
 725	cmd->body.cid = ctx_res->id;
 726	cmd->body.mobid = dx_query_mob->tbo.resource->start;
 727	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 728
 729	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 730
 731	return 0;
 732}
 733
 734/**
 735 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
 736 * contexts.
 737 *
 738 * @sw_context: Pointer to the software context.
 739 *
 740 * Rebind context binding points that have been scrubbed because of eviction.
 741 */
 742static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 743{
 744	struct vmw_ctx_validation_info *val;
 745	int ret;
 746
 747	list_for_each_entry(val, &sw_context->ctx_list, head) {
 748		ret = vmw_binding_rebind_all(val->cur);
 749		if (unlikely(ret != 0)) {
 750			if (ret != -ERESTARTSYS)
 751				VMW_DEBUG_USER("Failed to rebind context.\n");
 752			return ret;
 753		}
 754
 755		ret = vmw_rebind_all_dx_query(val->ctx);
 756		if (ret != 0) {
 757			VMW_DEBUG_USER("Failed to rebind queries.\n");
 758			return ret;
 759		}
 760	}
 761
 762	return 0;
 763}
 764
 765/**
 766 * vmw_view_bindings_add - Add an array of view bindings to a context binding
 767 * state tracker.
 768 *
 769 * @sw_context: The execbuf state used for this command.
 770 * @view_type: View type for the bindings.
 771 * @binding_type: Binding type for the bindings.
 772 * @shader_slot: The shader slot to user for the bindings.
 773 * @view_ids: Array of view ids to be bound.
 774 * @num_views: Number of view ids in @view_ids.
 775 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 776 */
 777static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 778				 enum vmw_view_type view_type,
 779				 enum vmw_ctx_binding_type binding_type,
 780				 uint32 shader_slot,
 781				 uint32 view_ids[], u32 num_views,
 782				 u32 first_slot)
 783{
 784	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
 785	u32 i;
 786
 787	if (!ctx_node)
 788		return -EINVAL;
 789
 790	for (i = 0; i < num_views; ++i) {
 791		struct vmw_ctx_bindinfo_view binding;
 792		struct vmw_resource *view = NULL;
 793
 794		if (view_ids[i] != SVGA3D_INVALID_ID) {
 795			view = vmw_view_id_val_add(sw_context, view_type,
 796						   view_ids[i]);
 797			if (IS_ERR(view)) {
 798				VMW_DEBUG_USER("View not found.\n");
 799				return PTR_ERR(view);
 800			}
 801		}
 802		binding.bi.ctx = ctx_node->ctx;
 803		binding.bi.res = view;
 804		binding.bi.bt = binding_type;
 805		binding.shader_slot = shader_slot;
 806		binding.slot = first_slot + i;
 807		vmw_binding_add(ctx_node->staged, &binding.bi,
 808				shader_slot, binding.slot);
 809	}
 810
 811	return 0;
 812}
 813
 814/**
 815 * vmw_cmd_cid_check - Check a command header for valid context information.
 816 *
 817 * @dev_priv: Pointer to a device private structure.
 818 * @sw_context: Pointer to the software context.
 819 * @header: A command header with an embedded user-space context handle.
 820 *
 821 * Convenience function: Call vmw_cmd_res_check with the user-space context
 822 * handle embedded in @header.
 823 */
 824static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 825			     struct vmw_sw_context *sw_context,
 826			     SVGA3dCmdHeader *header)
 827{
 828	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
 829		container_of(header, typeof(*cmd), header);
 830
 831	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 832				 VMW_RES_DIRTY_SET, user_context_converter,
 833				 &cmd->body, NULL);
 834}
 835
 836/**
 837 * vmw_execbuf_info_from_res - Get the private validation metadata for a
 838 * recently validated resource
 839 *
 840 * @sw_context: Pointer to the command submission context
 841 * @res: The resource
 842 *
 843 * The resource pointed to by @res needs to be present in the command submission
 844 * context's resource cache and hence the last resource of that type to be
 845 * processed by the validation code.
 846 *
 847 * Return: a pointer to the private metadata of the resource, or NULL if it
 848 * wasn't found
 849 */
 850static struct vmw_ctx_validation_info *
 851vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
 852			  struct vmw_resource *res)
 853{
 854	struct vmw_res_cache_entry *rcache =
 855		&sw_context->res_cache[vmw_res_type(res)];
 856
 857	if (rcache->valid && rcache->res == res)
 858		return rcache->private;
 859
 860	WARN_ON_ONCE(true);
 861	return NULL;
 862}
 863
 864static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 865					   struct vmw_sw_context *sw_context,
 866					   SVGA3dCmdHeader *header)
 867{
 868	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
 869	struct vmw_resource *ctx;
 870	struct vmw_resource *res;
 871	int ret;
 872
 873	cmd = container_of(header, typeof(*cmd), header);
 874
 875	if (cmd->body.type >= SVGA3D_RT_MAX) {
 876		VMW_DEBUG_USER("Illegal render target type %u.\n",
 877			       (unsigned int) cmd->body.type);
 878		return -EINVAL;
 879	}
 880
 881	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 882				VMW_RES_DIRTY_SET, user_context_converter,
 883				&cmd->body.cid, &ctx);
 884	if (unlikely(ret != 0))
 885		return ret;
 886
 887	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 888				VMW_RES_DIRTY_SET, user_surface_converter,
 889				&cmd->body.target.sid, &res);
 890	if (unlikely(ret))
 891		return ret;
 892
 893	if (dev_priv->has_mob) {
 894		struct vmw_ctx_bindinfo_view binding;
 895		struct vmw_ctx_validation_info *node;
 896
 897		node = vmw_execbuf_info_from_res(sw_context, ctx);
 898		if (!node)
 899			return -EINVAL;
 900
 901		binding.bi.ctx = ctx;
 902		binding.bi.res = res;
 903		binding.bi.bt = vmw_ctx_binding_rt;
 904		binding.slot = cmd->body.type;
 905		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
 906	}
 907
 908	return 0;
 909}
 910
 911static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 912				      struct vmw_sw_context *sw_context,
 913				      SVGA3dCmdHeader *header)
 914{
 915	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
 916	int ret;
 917
 918	cmd = container_of(header, typeof(*cmd), header);
 919
 920	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 921				VMW_RES_DIRTY_NONE, user_surface_converter,
 922				&cmd->body.src.sid, NULL);
 923	if (ret)
 924		return ret;
 925
 926	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 927				 VMW_RES_DIRTY_SET, user_surface_converter,
 928				 &cmd->body.dest.sid, NULL);
 929}
 930
 931static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
 932				     struct vmw_sw_context *sw_context,
 933				     SVGA3dCmdHeader *header)
 934{
 935	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
 936	int ret;
 937
 938	cmd = container_of(header, typeof(*cmd), header);
 939	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 940				VMW_RES_DIRTY_NONE, user_surface_converter,
 941				&cmd->body.src, NULL);
 942	if (ret != 0)
 943		return ret;
 944
 945	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 946				 VMW_RES_DIRTY_SET, user_surface_converter,
 947				 &cmd->body.dest, NULL);
 948}
 949
 950static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
 951				   struct vmw_sw_context *sw_context,
 952				   SVGA3dCmdHeader *header)
 953{
 954	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
 955	int ret;
 956
 957	cmd = container_of(header, typeof(*cmd), header);
 958	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 959				VMW_RES_DIRTY_NONE, user_surface_converter,
 960				&cmd->body.srcSid, NULL);
 961	if (ret != 0)
 962		return ret;
 963
 964	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 965				 VMW_RES_DIRTY_SET, user_surface_converter,
 966				 &cmd->body.dstSid, NULL);
 967}
 968
 969static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 970				     struct vmw_sw_context *sw_context,
 971				     SVGA3dCmdHeader *header)
 972{
 973	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
 974	int ret;
 975
 976	cmd = container_of(header, typeof(*cmd), header);
 977	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 978				VMW_RES_DIRTY_NONE, user_surface_converter,
 979				&cmd->body.src.sid, NULL);
 980	if (unlikely(ret != 0))
 981		return ret;
 982
 983	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 984				 VMW_RES_DIRTY_SET, user_surface_converter,
 985				 &cmd->body.dest.sid, NULL);
 986}
 987
 988static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 989					 struct vmw_sw_context *sw_context,
 990					 SVGA3dCmdHeader *header)
 991{
 992	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
 993		container_of(header, typeof(*cmd), header);
 994
 995	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 996				 VMW_RES_DIRTY_NONE, user_surface_converter,
 997				 &cmd->body.srcImage.sid, NULL);
 998}
 999
1000static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1001				 struct vmw_sw_context *sw_context,
1002				 SVGA3dCmdHeader *header)
1003{
1004	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1005		container_of(header, typeof(*cmd), header);
1006
1007	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008				 VMW_RES_DIRTY_NONE, user_surface_converter,
1009				 &cmd->body.sid, NULL);
1010}
1011
1012/**
1013 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1014 *
1015 * @dev_priv: The device private structure.
1016 * @new_query_bo: The new buffer holding query results.
1017 * @sw_context: The software context used for this command submission.
1018 *
1019 * This function checks whether @new_query_bo is suitable for holding query
1020 * results, and if another buffer currently is pinned for query results. If so,
1021 * the function prepares the state of @sw_context for switching pinned buffers
1022 * after successful submission of the current command batch.
1023 */
1024static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1025				       struct vmw_bo *new_query_bo,
1026				       struct vmw_sw_context *sw_context)
1027{
1028	struct vmw_res_cache_entry *ctx_entry =
1029		&sw_context->res_cache[vmw_res_context];
1030	int ret;
1031
1032	BUG_ON(!ctx_entry->valid);
1033	sw_context->last_query_ctx = ctx_entry->res;
1034
1035	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1036
1037		if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1038			VMW_DEBUG_USER("Query buffer too large.\n");
1039			return -EINVAL;
1040		}
1041
1042		if (unlikely(sw_context->cur_query_bo != NULL)) {
1043			sw_context->needs_post_query_barrier = true;
1044			vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1045			ret = vmw_validation_add_bo(sw_context->ctx,
1046						    sw_context->cur_query_bo);
1047			if (unlikely(ret != 0))
1048				return ret;
1049		}
1050		sw_context->cur_query_bo = new_query_bo;
1051
1052		vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1053		ret = vmw_validation_add_bo(sw_context->ctx,
1054					    dev_priv->dummy_query_bo);
1055		if (unlikely(ret != 0))
1056			return ret;
1057	}
1058
1059	return 0;
1060}
1061
1062/**
1063 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1064 *
1065 * @dev_priv: The device private structure.
1066 * @sw_context: The software context used for this command submission batch.
1067 *
1068 * This function will check if we're switching query buffers, and will then,
1069 * issue a dummy occlusion query wait used as a query barrier. When the fence
1070 * object following that query wait has signaled, we are sure that all preceding
1071 * queries have finished, and the old query buffer can be unpinned. However,
1072 * since both the new query buffer and the old one are fenced with that fence,
1073 * we can do an asynchronus unpin now, and be sure that the old query buffer
1074 * won't be moved until the fence has signaled.
1075 *
1076 * As mentioned above, both the new - and old query buffers need to be fenced
1077 * using a sequence emitted *after* calling this function.
1078 */
1079static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1080				     struct vmw_sw_context *sw_context)
1081{
1082	/*
1083	 * The validate list should still hold references to all
1084	 * contexts here.
1085	 */
1086	if (sw_context->needs_post_query_barrier) {
1087		struct vmw_res_cache_entry *ctx_entry =
1088			&sw_context->res_cache[vmw_res_context];
1089		struct vmw_resource *ctx;
1090		int ret;
1091
1092		BUG_ON(!ctx_entry->valid);
1093		ctx = ctx_entry->res;
1094
1095		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1096
1097		if (unlikely(ret != 0))
1098			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1099	}
1100
1101	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1102		if (dev_priv->pinned_bo) {
1103			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1104			vmw_bo_unreference(&dev_priv->pinned_bo);
1105		}
1106
1107		if (!sw_context->needs_post_query_barrier) {
1108			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1109
1110			/*
1111			 * We pin also the dummy_query_bo buffer so that we
1112			 * don't need to validate it when emitting dummy queries
1113			 * in context destroy paths.
1114			 */
1115			if (!dev_priv->dummy_query_bo_pinned) {
1116				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1117						    true);
1118				dev_priv->dummy_query_bo_pinned = true;
1119			}
1120
1121			BUG_ON(sw_context->last_query_ctx == NULL);
1122			dev_priv->query_cid = sw_context->last_query_ctx->id;
1123			dev_priv->query_cid_valid = true;
1124			dev_priv->pinned_bo =
1125				vmw_bo_reference(sw_context->cur_query_bo);
1126		}
1127	}
1128}
1129
1130/**
1131 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1132 * to a MOB id.
1133 *
1134 * @dev_priv: Pointer to a device private structure.
1135 * @sw_context: The software context used for this command batch validation.
1136 * @id: Pointer to the user-space handle to be translated.
1137 * @vmw_bo_p: Points to a location that, on successful return will carry a
1138 * non-reference-counted pointer to the buffer object identified by the
1139 * user-space handle in @id.
1140 *
1141 * This function saves information needed to translate a user-space buffer
1142 * handle to a MOB id. The translation does not take place immediately, but
1143 * during a call to vmw_apply_relocations().
1144 *
1145 * This function builds a relocation list and a list of buffers to validate. The
1146 * former needs to be freed using either vmw_apply_relocations() or
1147 * vmw_free_relocations(). The latter needs to be freed using
1148 * vmw_clear_validations.
1149 */
1150static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1151				 struct vmw_sw_context *sw_context,
1152				 SVGAMobId *id,
1153				 struct vmw_bo **vmw_bo_p)
1154{
1155	struct vmw_bo *vmw_bo, *tmp_bo;
1156	uint32_t handle = *id;
1157	struct vmw_relocation *reloc;
1158	int ret;
1159
1160	vmw_validation_preload_bo(sw_context->ctx);
1161	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1162	if (ret != 0) {
1163		drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1164		return PTR_ERR(vmw_bo);
1165	}
1166	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1167	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1168	tmp_bo = vmw_bo;
1169	vmw_user_bo_unref(&tmp_bo);
1170	if (unlikely(ret != 0))
1171		return ret;
1172
1173	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1174	if (!reloc)
1175		return -ENOMEM;
1176
1177	reloc->mob_loc = id;
1178	reloc->vbo = vmw_bo;
1179
1180	*vmw_bo_p = vmw_bo;
1181	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1182
1183	return 0;
1184}
1185
1186/**
1187 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1188 * to a valid SVGAGuestPtr
1189 *
1190 * @dev_priv: Pointer to a device private structure.
1191 * @sw_context: The software context used for this command batch validation.
1192 * @ptr: Pointer to the user-space handle to be translated.
1193 * @vmw_bo_p: Points to a location that, on successful return will carry a
1194 * non-reference-counted pointer to the DMA buffer identified by the user-space
1195 * handle in @id.
1196 *
1197 * This function saves information needed to translate a user-space buffer
1198 * handle to a valid SVGAGuestPtr. The translation does not take place
1199 * immediately, but during a call to vmw_apply_relocations().
1200 *
1201 * This function builds a relocation list and a list of buffers to validate.
1202 * The former needs to be freed using either vmw_apply_relocations() or
1203 * vmw_free_relocations(). The latter needs to be freed using
1204 * vmw_clear_validations.
1205 */
1206static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1207				   struct vmw_sw_context *sw_context,
1208				   SVGAGuestPtr *ptr,
1209				   struct vmw_bo **vmw_bo_p)
1210{
1211	struct vmw_bo *vmw_bo, *tmp_bo;
1212	uint32_t handle = ptr->gmrId;
1213	struct vmw_relocation *reloc;
1214	int ret;
1215
1216	vmw_validation_preload_bo(sw_context->ctx);
1217	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1218	if (ret != 0) {
1219		drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1220		return PTR_ERR(vmw_bo);
1221	}
1222	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1223			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1224	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1225	tmp_bo = vmw_bo;
1226	vmw_user_bo_unref(&tmp_bo);
1227	if (unlikely(ret != 0))
1228		return ret;
1229
1230	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1231	if (!reloc)
1232		return -ENOMEM;
1233
1234	reloc->location = ptr;
1235	reloc->vbo = vmw_bo;
1236	*vmw_bo_p = vmw_bo;
1237	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1238
1239	return 0;
1240}
1241
1242/**
1243 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1244 *
1245 * @dev_priv: Pointer to a device private struct.
1246 * @sw_context: The software context used for this command submission.
1247 * @header: Pointer to the command header in the command stream.
1248 *
1249 * This function adds the new query into the query COTABLE
1250 */
1251static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1252				   struct vmw_sw_context *sw_context,
1253				   SVGA3dCmdHeader *header)
1254{
1255	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1256	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1257	struct vmw_resource *cotable_res;
1258	int ret;
1259
1260	if (!ctx_node)
1261		return -EINVAL;
1262
1263	cmd = container_of(header, typeof(*cmd), header);
1264
1265	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1266	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1267		return -EINVAL;
1268
1269	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1270	if (IS_ERR_OR_NULL(cotable_res))
1271		return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
1272	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1273
1274	return ret;
1275}
1276
1277/**
1278 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1279 *
1280 * @dev_priv: Pointer to a device private struct.
1281 * @sw_context: The software context used for this command submission.
1282 * @header: Pointer to the command header in the command stream.
1283 *
1284 * The query bind operation will eventually associate the query ID with its
1285 * backing MOB.  In this function, we take the user mode MOB ID and use
1286 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1287 */
1288static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1289				 struct vmw_sw_context *sw_context,
1290				 SVGA3dCmdHeader *header)
1291{
1292	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1293	struct vmw_bo *vmw_bo;
1294	int ret;
1295
1296	cmd = container_of(header, typeof(*cmd), header);
1297
1298	/*
1299	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1300	 * list so its kernel mode MOB ID can be filled in later
1301	 */
1302	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1303				    &vmw_bo);
1304
1305	if (ret != 0)
1306		return ret;
1307
1308	sw_context->dx_query_mob = vmw_bo;
1309	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1310	return 0;
1311}
1312
1313/**
1314 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1315 *
1316 * @dev_priv: Pointer to a device private struct.
1317 * @sw_context: The software context used for this command submission.
1318 * @header: Pointer to the command header in the command stream.
1319 */
1320static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1321				  struct vmw_sw_context *sw_context,
1322				  SVGA3dCmdHeader *header)
1323{
1324	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1325		container_of(header, typeof(*cmd), header);
1326
1327	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1328				 VMW_RES_DIRTY_SET, user_context_converter,
1329				 &cmd->body.cid, NULL);
1330}
1331
1332/**
1333 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1334 *
1335 * @dev_priv: Pointer to a device private struct.
1336 * @sw_context: The software context used for this command submission.
1337 * @header: Pointer to the command header in the command stream.
1338 */
1339static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1340			       struct vmw_sw_context *sw_context,
1341			       SVGA3dCmdHeader *header)
1342{
1343	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1344		container_of(header, typeof(*cmd), header);
1345
1346	if (unlikely(dev_priv->has_mob)) {
1347		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1348
1349		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1350
1351		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1352		gb_cmd.header.size = cmd->header.size;
1353		gb_cmd.body.cid = cmd->body.cid;
1354		gb_cmd.body.type = cmd->body.type;
1355
1356		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1357		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1358	}
1359
1360	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1361				 VMW_RES_DIRTY_SET, user_context_converter,
1362				 &cmd->body.cid, NULL);
1363}
1364
1365/**
1366 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1367 *
1368 * @dev_priv: Pointer to a device private struct.
1369 * @sw_context: The software context used for this command submission.
1370 * @header: Pointer to the command header in the command stream.
1371 */
1372static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1373				struct vmw_sw_context *sw_context,
1374				SVGA3dCmdHeader *header)
1375{
1376	struct vmw_bo *vmw_bo;
1377	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1378	int ret;
1379
1380	cmd = container_of(header, typeof(*cmd), header);
1381	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1382	if (unlikely(ret != 0))
1383		return ret;
1384
1385	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1386				    &vmw_bo);
1387	if (unlikely(ret != 0))
1388		return ret;
1389
1390	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1391
1392	return ret;
1393}
1394
1395/**
1396 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1397 *
1398 * @dev_priv: Pointer to a device private struct.
1399 * @sw_context: The software context used for this command submission.
1400 * @header: Pointer to the command header in the command stream.
1401 */
1402static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1403			     struct vmw_sw_context *sw_context,
1404			     SVGA3dCmdHeader *header)
1405{
1406	struct vmw_bo *vmw_bo;
1407	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1408	int ret;
1409
1410	cmd = container_of(header, typeof(*cmd), header);
1411	if (dev_priv->has_mob) {
1412		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1413
1414		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1415
1416		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1417		gb_cmd.header.size = cmd->header.size;
1418		gb_cmd.body.cid = cmd->body.cid;
1419		gb_cmd.body.type = cmd->body.type;
1420		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1421		gb_cmd.body.offset = cmd->body.guestResult.offset;
1422
1423		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1424		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1425	}
1426
1427	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1428	if (unlikely(ret != 0))
1429		return ret;
1430
1431	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1432				      &cmd->body.guestResult, &vmw_bo);
1433	if (unlikely(ret != 0))
1434		return ret;
1435
1436	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1437
1438	return ret;
1439}
1440
1441/**
1442 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1443 *
1444 * @dev_priv: Pointer to a device private struct.
1445 * @sw_context: The software context used for this command submission.
1446 * @header: Pointer to the command header in the command stream.
1447 */
1448static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1449				 struct vmw_sw_context *sw_context,
1450				 SVGA3dCmdHeader *header)
1451{
1452	struct vmw_bo *vmw_bo;
1453	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1454	int ret;
1455
1456	cmd = container_of(header, typeof(*cmd), header);
1457	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1458	if (unlikely(ret != 0))
1459		return ret;
1460
1461	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1462				    &vmw_bo);
1463	if (unlikely(ret != 0))
1464		return ret;
1465
1466	return 0;
1467}
1468
1469/**
1470 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1471 *
1472 * @dev_priv: Pointer to a device private struct.
1473 * @sw_context: The software context used for this command submission.
1474 * @header: Pointer to the command header in the command stream.
1475 */
1476static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1477			      struct vmw_sw_context *sw_context,
1478			      SVGA3dCmdHeader *header)
1479{
1480	struct vmw_bo *vmw_bo;
1481	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1482	int ret;
1483
1484	cmd = container_of(header, typeof(*cmd), header);
1485	if (dev_priv->has_mob) {
1486		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1487
1488		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1489
1490		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1491		gb_cmd.header.size = cmd->header.size;
1492		gb_cmd.body.cid = cmd->body.cid;
1493		gb_cmd.body.type = cmd->body.type;
1494		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1495		gb_cmd.body.offset = cmd->body.guestResult.offset;
1496
1497		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1498		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1499	}
1500
1501	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1502	if (unlikely(ret != 0))
1503		return ret;
1504
1505	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1506				      &cmd->body.guestResult, &vmw_bo);
1507	if (unlikely(ret != 0))
1508		return ret;
1509
1510	return 0;
1511}
1512
1513static int vmw_cmd_dma(struct vmw_private *dev_priv,
1514		       struct vmw_sw_context *sw_context,
1515		       SVGA3dCmdHeader *header)
1516{
1517	struct vmw_bo *vmw_bo = NULL;
1518	struct vmw_surface *srf = NULL;
1519	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1520	int ret;
1521	SVGA3dCmdSurfaceDMASuffix *suffix;
1522	uint32_t bo_size;
1523	bool dirty;
1524
1525	cmd = container_of(header, typeof(*cmd), header);
1526	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1527					       header->size - sizeof(*suffix));
1528
1529	/* Make sure device and verifier stays in sync. */
1530	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1531		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1532		return -EINVAL;
1533	}
1534
1535	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1536				      &cmd->body.guest.ptr, &vmw_bo);
1537	if (unlikely(ret != 0))
1538		return ret;
1539
1540	/* Make sure DMA doesn't cross BO boundaries. */
1541	bo_size = vmw_bo->tbo.base.size;
1542	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1543		VMW_DEBUG_USER("Invalid DMA offset.\n");
1544		return -EINVAL;
1545	}
1546
1547	bo_size -= cmd->body.guest.ptr.offset;
1548	if (unlikely(suffix->maximumOffset > bo_size))
1549		suffix->maximumOffset = bo_size;
1550
1551	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1552		VMW_RES_DIRTY_SET : 0;
1553	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1554				dirty, user_surface_converter,
1555				&cmd->body.host.sid, NULL);
1556	if (unlikely(ret != 0)) {
1557		if (unlikely(ret != -ERESTARTSYS))
1558			VMW_DEBUG_USER("could not find surface for DMA.\n");
1559		return ret;
1560	}
1561
1562	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1563
1564	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1565
1566	return 0;
1567}
1568
1569static int vmw_cmd_draw(struct vmw_private *dev_priv,
1570			struct vmw_sw_context *sw_context,
1571			SVGA3dCmdHeader *header)
1572{
1573	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1574	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1575		(unsigned long)header + sizeof(*cmd));
1576	SVGA3dPrimitiveRange *range;
1577	uint32_t i;
1578	uint32_t maxnum;
1579	int ret;
1580
1581	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1582	if (unlikely(ret != 0))
1583		return ret;
1584
1585	cmd = container_of(header, typeof(*cmd), header);
1586	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1587
1588	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1589		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1590		return -EINVAL;
1591	}
1592
1593	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1594		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1595					VMW_RES_DIRTY_NONE,
1596					user_surface_converter,
1597					&decl->array.surfaceId, NULL);
1598		if (unlikely(ret != 0))
1599			return ret;
1600	}
1601
1602	maxnum = (header->size - sizeof(cmd->body) -
1603		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1604	if (unlikely(cmd->body.numRanges > maxnum)) {
1605		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1606		return -EINVAL;
1607	}
1608
1609	range = (SVGA3dPrimitiveRange *) decl;
1610	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1611		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1612					VMW_RES_DIRTY_NONE,
1613					user_surface_converter,
1614					&range->indexArray.surfaceId, NULL);
1615		if (unlikely(ret != 0))
1616			return ret;
1617	}
1618	return 0;
1619}
1620
1621static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1622			     struct vmw_sw_context *sw_context,
1623			     SVGA3dCmdHeader *header)
1624{
1625	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1626	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1627	  ((unsigned long) header + header->size + sizeof(*header));
1628	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1629		((unsigned long) header + sizeof(*cmd));
1630	struct vmw_resource *ctx;
1631	struct vmw_resource *res;
1632	int ret;
1633
1634	cmd = container_of(header, typeof(*cmd), header);
1635
1636	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1637				VMW_RES_DIRTY_SET, user_context_converter,
1638				&cmd->body.cid, &ctx);
1639	if (unlikely(ret != 0))
1640		return ret;
1641
1642	for (; cur_state < last_state; ++cur_state) {
1643		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1644			continue;
1645
1646		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1647			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1648				       (unsigned int) cur_state->stage);
1649			return -EINVAL;
1650		}
1651
1652		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1653					VMW_RES_DIRTY_NONE,
1654					user_surface_converter,
1655					&cur_state->value, &res);
1656		if (unlikely(ret != 0))
1657			return ret;
1658
1659		if (dev_priv->has_mob) {
1660			struct vmw_ctx_bindinfo_tex binding;
1661			struct vmw_ctx_validation_info *node;
1662
1663			node = vmw_execbuf_info_from_res(sw_context, ctx);
1664			if (!node)
1665				return -EINVAL;
1666
1667			binding.bi.ctx = ctx;
1668			binding.bi.res = res;
1669			binding.bi.bt = vmw_ctx_binding_tex;
1670			binding.texture_stage = cur_state->stage;
1671			vmw_binding_add(node->staged, &binding.bi, 0,
1672					binding.texture_stage);
1673		}
1674	}
1675
1676	return 0;
1677}
1678
1679static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1680				      struct vmw_sw_context *sw_context,
1681				      void *buf)
1682{
1683	struct vmw_bo *vmw_bo;
1684
1685	struct {
1686		uint32_t header;
1687		SVGAFifoCmdDefineGMRFB body;
1688	} *cmd = buf;
1689
1690	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1691				       &vmw_bo);
1692}
1693
1694/**
1695 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1696 * switching
1697 *
1698 * @dev_priv: Pointer to a device private struct.
1699 * @sw_context: The software context being used for this batch.
1700 * @res: Pointer to the resource.
1701 * @buf_id: Pointer to the user-space backup buffer handle in the command
1702 * stream.
1703 * @backup_offset: Offset of backup into MOB.
1704 *
1705 * This function prepares for registering a switch of backup buffers in the
1706 * resource metadata just prior to unreserving. It's basically a wrapper around
1707 * vmw_cmd_res_switch_backup with a different interface.
1708 */
1709static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1710				     struct vmw_sw_context *sw_context,
1711				     struct vmw_resource *res, uint32_t *buf_id,
1712				     unsigned long backup_offset)
1713{
1714	struct vmw_bo *vbo;
1715	void *info;
1716	int ret;
1717
1718	info = vmw_execbuf_info_from_res(sw_context, res);
1719	if (!info)
1720		return -EINVAL;
1721
1722	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1723	if (ret)
1724		return ret;
1725
1726	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1727					 backup_offset);
1728	return 0;
1729}
1730
1731/**
1732 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1733 *
1734 * @dev_priv: Pointer to a device private struct.
1735 * @sw_context: The software context being used for this batch.
1736 * @res_type: The resource type.
1737 * @converter: Information about user-space binding for this resource type.
1738 * @res_id: Pointer to the user-space resource handle in the command stream.
1739 * @buf_id: Pointer to the user-space backup buffer handle in the command
1740 * stream.
1741 * @backup_offset: Offset of backup into MOB.
1742 *
1743 * This function prepares for registering a switch of backup buffers in the
1744 * resource metadata just prior to unreserving. It's basically a wrapper around
1745 * vmw_cmd_res_switch_backup with a different interface.
1746 */
1747static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1748				 struct vmw_sw_context *sw_context,
1749				 enum vmw_res_type res_type,
1750				 const struct vmw_user_resource_conv
1751				 *converter, uint32_t *res_id, uint32_t *buf_id,
1752				 unsigned long backup_offset)
1753{
1754	struct vmw_resource *res;
1755	int ret;
1756
1757	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1758				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1759	if (ret)
1760		return ret;
1761
1762	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1763					 backup_offset);
1764}
1765
1766/**
1767 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1768 *
1769 * @dev_priv: Pointer to a device private struct.
1770 * @sw_context: The software context being used for this batch.
1771 * @header: Pointer to the command header in the command stream.
1772 */
1773static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1774				   struct vmw_sw_context *sw_context,
1775				   SVGA3dCmdHeader *header)
1776{
1777	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1778		container_of(header, typeof(*cmd), header);
1779
1780	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1781				     user_surface_converter, &cmd->body.sid,
1782				     &cmd->body.mobid, 0);
1783}
1784
1785/**
1786 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1787 *
1788 * @dev_priv: Pointer to a device private struct.
1789 * @sw_context: The software context being used for this batch.
1790 * @header: Pointer to the command header in the command stream.
1791 */
1792static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1793				   struct vmw_sw_context *sw_context,
1794				   SVGA3dCmdHeader *header)
1795{
1796	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1797		container_of(header, typeof(*cmd), header);
1798
1799	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1800				 VMW_RES_DIRTY_NONE, user_surface_converter,
1801				 &cmd->body.image.sid, NULL);
1802}
1803
1804/**
1805 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1806 *
1807 * @dev_priv: Pointer to a device private struct.
1808 * @sw_context: The software context being used for this batch.
1809 * @header: Pointer to the command header in the command stream.
1810 */
1811static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1812				     struct vmw_sw_context *sw_context,
1813				     SVGA3dCmdHeader *header)
1814{
1815	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1816		container_of(header, typeof(*cmd), header);
1817
1818	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1819				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1820				 &cmd->body.sid, NULL);
1821}
1822
1823/**
1824 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1825 *
1826 * @dev_priv: Pointer to a device private struct.
1827 * @sw_context: The software context being used for this batch.
1828 * @header: Pointer to the command header in the command stream.
1829 */
1830static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1831				     struct vmw_sw_context *sw_context,
1832				     SVGA3dCmdHeader *header)
1833{
1834	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1835		container_of(header, typeof(*cmd), header);
1836
1837	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1838				 VMW_RES_DIRTY_NONE, user_surface_converter,
1839				 &cmd->body.image.sid, NULL);
1840}
1841
1842/**
1843 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1844 * command
1845 *
1846 * @dev_priv: Pointer to a device private struct.
1847 * @sw_context: The software context being used for this batch.
1848 * @header: Pointer to the command header in the command stream.
1849 */
1850static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1851				       struct vmw_sw_context *sw_context,
1852				       SVGA3dCmdHeader *header)
1853{
1854	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1855		container_of(header, typeof(*cmd), header);
1856
1857	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1858				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1859				 &cmd->body.sid, NULL);
1860}
1861
1862/**
1863 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1864 * command
1865 *
1866 * @dev_priv: Pointer to a device private struct.
1867 * @sw_context: The software context being used for this batch.
1868 * @header: Pointer to the command header in the command stream.
1869 */
1870static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1871				       struct vmw_sw_context *sw_context,
1872				       SVGA3dCmdHeader *header)
1873{
1874	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1875		container_of(header, typeof(*cmd), header);
1876
1877	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1878				 VMW_RES_DIRTY_NONE, user_surface_converter,
1879				 &cmd->body.image.sid, NULL);
1880}
1881
1882/**
1883 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1884 * command
1885 *
1886 * @dev_priv: Pointer to a device private struct.
1887 * @sw_context: The software context being used for this batch.
1888 * @header: Pointer to the command header in the command stream.
1889 */
1890static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1891					 struct vmw_sw_context *sw_context,
1892					 SVGA3dCmdHeader *header)
1893{
1894	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1895		container_of(header, typeof(*cmd), header);
1896
1897	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1898				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1899				 &cmd->body.sid, NULL);
1900}
1901
1902/**
1903 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1904 *
1905 * @dev_priv: Pointer to a device private struct.
1906 * @sw_context: The software context being used for this batch.
1907 * @header: Pointer to the command header in the command stream.
1908 */
1909static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1910				 struct vmw_sw_context *sw_context,
1911				 SVGA3dCmdHeader *header)
1912{
1913	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1914	int ret;
1915	size_t size;
1916	struct vmw_resource *ctx;
1917
1918	cmd = container_of(header, typeof(*cmd), header);
1919
1920	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1921				VMW_RES_DIRTY_SET, user_context_converter,
1922				&cmd->body.cid, &ctx);
1923	if (unlikely(ret != 0))
1924		return ret;
1925
1926	if (unlikely(!dev_priv->has_mob))
1927		return 0;
1928
1929	size = cmd->header.size - sizeof(cmd->body);
1930	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1931				    cmd->body.shid, cmd + 1, cmd->body.type,
1932				    size, &sw_context->staged_cmd_res);
1933	if (unlikely(ret != 0))
1934		return ret;
1935
1936	return vmw_resource_relocation_add(sw_context, NULL,
1937					   vmw_ptr_diff(sw_context->buf_start,
1938							&cmd->header.id),
1939					   vmw_res_rel_nop);
1940}
1941
1942/**
1943 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1944 *
1945 * @dev_priv: Pointer to a device private struct.
1946 * @sw_context: The software context being used for this batch.
1947 * @header: Pointer to the command header in the command stream.
1948 */
1949static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1950				  struct vmw_sw_context *sw_context,
1951				  SVGA3dCmdHeader *header)
1952{
1953	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1954	int ret;
1955	struct vmw_resource *ctx;
1956
1957	cmd = container_of(header, typeof(*cmd), header);
1958
1959	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1960				VMW_RES_DIRTY_SET, user_context_converter,
1961				&cmd->body.cid, &ctx);
1962	if (unlikely(ret != 0))
1963		return ret;
1964
1965	if (unlikely(!dev_priv->has_mob))
1966		return 0;
1967
1968	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1969				cmd->body.type, &sw_context->staged_cmd_res);
1970	if (unlikely(ret != 0))
1971		return ret;
1972
1973	return vmw_resource_relocation_add(sw_context, NULL,
1974					   vmw_ptr_diff(sw_context->buf_start,
1975							&cmd->header.id),
1976					   vmw_res_rel_nop);
1977}
1978
1979/**
1980 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1981 *
1982 * @dev_priv: Pointer to a device private struct.
1983 * @sw_context: The software context being used for this batch.
1984 * @header: Pointer to the command header in the command stream.
1985 */
1986static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1987			      struct vmw_sw_context *sw_context,
1988			      SVGA3dCmdHeader *header)
1989{
1990	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1991	struct vmw_ctx_bindinfo_shader binding;
1992	struct vmw_resource *ctx, *res = NULL;
1993	struct vmw_ctx_validation_info *ctx_info;
1994	int ret;
1995
1996	cmd = container_of(header, typeof(*cmd), header);
1997
1998	if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1999		VMW_DEBUG_USER("Illegal shader type %u.\n",
2000			       (unsigned int) cmd->body.type);
2001		return -EINVAL;
2002	}
2003
2004	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2005				VMW_RES_DIRTY_SET, user_context_converter,
2006				&cmd->body.cid, &ctx);
2007	if (unlikely(ret != 0))
2008		return ret;
2009
2010	if (!dev_priv->has_mob)
2011		return 0;
2012
2013	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2014		/*
2015		 * This is the compat shader path - Per device guest-backed
2016		 * shaders, but user-space thinks it's per context host-
2017		 * backed shaders.
2018		 */
2019		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2020					cmd->body.shid, cmd->body.type);
2021		if (!IS_ERR(res)) {
2022			ret = vmw_execbuf_res_val_add(sw_context, res,
2023						      VMW_RES_DIRTY_NONE,
2024						      vmw_val_add_flag_noctx);
2025			if (unlikely(ret != 0))
2026				return ret;
2027
2028			ret = vmw_resource_relocation_add
2029				(sw_context, res,
2030				 vmw_ptr_diff(sw_context->buf_start,
2031					      &cmd->body.shid),
2032				 vmw_res_rel_normal);
2033			if (unlikely(ret != 0))
2034				return ret;
2035		}
2036	}
2037
2038	if (IS_ERR_OR_NULL(res)) {
2039		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2040					VMW_RES_DIRTY_NONE,
2041					user_shader_converter, &cmd->body.shid,
2042					&res);
2043		if (unlikely(ret != 0))
2044			return ret;
2045	}
2046
2047	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2048	if (!ctx_info)
2049		return -EINVAL;
2050
2051	binding.bi.ctx = ctx;
2052	binding.bi.res = res;
2053	binding.bi.bt = vmw_ctx_binding_shader;
2054	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2055	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2056
2057	return 0;
2058}
2059
2060/**
2061 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2062 *
2063 * @dev_priv: Pointer to a device private struct.
2064 * @sw_context: The software context being used for this batch.
2065 * @header: Pointer to the command header in the command stream.
2066 */
2067static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2068				    struct vmw_sw_context *sw_context,
2069				    SVGA3dCmdHeader *header)
2070{
2071	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2072	int ret;
2073
2074	cmd = container_of(header, typeof(*cmd), header);
2075
2076	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2077				VMW_RES_DIRTY_SET, user_context_converter,
2078				&cmd->body.cid, NULL);
2079	if (unlikely(ret != 0))
2080		return ret;
2081
2082	if (dev_priv->has_mob)
2083		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2084
2085	return 0;
2086}
2087
2088/**
2089 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2090 *
2091 * @dev_priv: Pointer to a device private struct.
2092 * @sw_context: The software context being used for this batch.
2093 * @header: Pointer to the command header in the command stream.
2094 */
2095static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2096				  struct vmw_sw_context *sw_context,
2097				  SVGA3dCmdHeader *header)
2098{
2099	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2100		container_of(header, typeof(*cmd), header);
2101
2102	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2103				     user_shader_converter, &cmd->body.shid,
2104				     &cmd->body.mobid, cmd->body.offsetInBytes);
2105}
2106
2107/**
2108 * vmw_cmd_dx_set_single_constant_buffer - Validate
2109 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2110 *
2111 * @dev_priv: Pointer to a device private struct.
2112 * @sw_context: The software context being used for this batch.
2113 * @header: Pointer to the command header in the command stream.
2114 */
2115static int
2116vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2117				      struct vmw_sw_context *sw_context,
2118				      SVGA3dCmdHeader *header)
2119{
2120	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2121
2122	struct vmw_resource *res = NULL;
2123	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2124	struct vmw_ctx_bindinfo_cb binding;
2125	int ret;
2126
2127	if (!ctx_node)
2128		return -EINVAL;
2129
2130	cmd = container_of(header, typeof(*cmd), header);
2131	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2132				VMW_RES_DIRTY_NONE, user_surface_converter,
2133				&cmd->body.sid, &res);
2134	if (unlikely(ret != 0))
2135		return ret;
2136
2137	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2138	    cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2139		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2140			       (unsigned int) cmd->body.type,
2141			       (unsigned int) cmd->body.slot);
2142		return -EINVAL;
2143	}
2144
2145	binding.bi.ctx = ctx_node->ctx;
2146	binding.bi.res = res;
2147	binding.bi.bt = vmw_ctx_binding_cb;
2148	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2149	binding.offset = cmd->body.offsetInBytes;
2150	binding.size = cmd->body.sizeInBytes;
2151	binding.slot = cmd->body.slot;
2152
2153	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2154			binding.slot);
2155
2156	return 0;
2157}
2158
2159/**
2160 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2161 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2162 *
2163 * @dev_priv: Pointer to a device private struct.
2164 * @sw_context: The software context being used for this batch.
2165 * @header: Pointer to the command header in the command stream.
2166 */
2167static int
2168vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2169				      struct vmw_sw_context *sw_context,
2170				      SVGA3dCmdHeader *header)
2171{
2172	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2173
2174	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2175	u32 shader_slot;
2176
2177	if (!has_sm5_context(dev_priv))
2178		return -EINVAL;
2179
2180	if (!ctx_node)
2181		return -EINVAL;
2182
2183	cmd = container_of(header, typeof(*cmd), header);
2184	if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2185		VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2186			       (unsigned int) cmd->body.slot);
2187		return -EINVAL;
2188	}
2189
2190	shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2191	vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2192				     cmd->body.slot, cmd->body.offsetInBytes);
2193
2194	return 0;
2195}
2196
2197/**
2198 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2199 * command
2200 *
2201 * @dev_priv: Pointer to a device private struct.
2202 * @sw_context: The software context being used for this batch.
2203 * @header: Pointer to the command header in the command stream.
2204 */
2205static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2206				     struct vmw_sw_context *sw_context,
2207				     SVGA3dCmdHeader *header)
2208{
2209	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2210		container_of(header, typeof(*cmd), header);
2211
2212	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2213		sizeof(SVGA3dShaderResourceViewId);
2214
2215	if ((u64) cmd->body.startView + (u64) num_sr_view >
2216	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2217	    !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2218		VMW_DEBUG_USER("Invalid shader binding.\n");
2219		return -EINVAL;
2220	}
2221
2222	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2223				     vmw_ctx_binding_sr,
2224				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2225				     (void *) &cmd[1], num_sr_view,
2226				     cmd->body.startView);
2227}
2228
2229/**
2230 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2231 *
2232 * @dev_priv: Pointer to a device private struct.
2233 * @sw_context: The software context being used for this batch.
2234 * @header: Pointer to the command header in the command stream.
2235 */
2236static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2237				 struct vmw_sw_context *sw_context,
2238				 SVGA3dCmdHeader *header)
2239{
2240	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2241	struct vmw_resource *res = NULL;
2242	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2243	struct vmw_ctx_bindinfo_shader binding;
2244	int ret = 0;
2245
2246	if (!ctx_node)
2247		return -EINVAL;
2248
2249	cmd = container_of(header, typeof(*cmd), header);
2250
2251	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2252		VMW_DEBUG_USER("Illegal shader type %u.\n",
2253			       (unsigned int) cmd->body.type);
2254		return -EINVAL;
2255	}
2256
2257	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2258		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2259		if (IS_ERR(res)) {
2260			VMW_DEBUG_USER("Could not find shader for binding.\n");
2261			return PTR_ERR(res);
2262		}
2263
2264		ret = vmw_execbuf_res_val_add(sw_context, res,
2265					      VMW_RES_DIRTY_NONE,
2266					      vmw_val_add_flag_noctx);
2267		if (ret)
2268			return ret;
2269	}
2270
2271	binding.bi.ctx = ctx_node->ctx;
2272	binding.bi.res = res;
2273	binding.bi.bt = vmw_ctx_binding_dx_shader;
2274	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2275
2276	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2277
2278	return 0;
2279}
2280
2281/**
2282 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2283 * command
2284 *
2285 * @dev_priv: Pointer to a device private struct.
2286 * @sw_context: The software context being used for this batch.
2287 * @header: Pointer to the command header in the command stream.
2288 */
2289static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2290					 struct vmw_sw_context *sw_context,
2291					 SVGA3dCmdHeader *header)
2292{
2293	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2294	struct vmw_ctx_bindinfo_vb binding;
2295	struct vmw_resource *res;
2296	struct {
2297		SVGA3dCmdHeader header;
2298		SVGA3dCmdDXSetVertexBuffers body;
2299		SVGA3dVertexBuffer buf[];
2300	} *cmd;
2301	int i, ret, num;
2302
2303	if (!ctx_node)
2304		return -EINVAL;
2305
2306	cmd = container_of(header, typeof(*cmd), header);
2307	num = (cmd->header.size - sizeof(cmd->body)) /
2308		sizeof(SVGA3dVertexBuffer);
2309	if ((u64)num + (u64)cmd->body.startBuffer >
2310	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2311		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2312		return -EINVAL;
2313	}
2314
2315	for (i = 0; i < num; i++) {
2316		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2317					VMW_RES_DIRTY_NONE,
2318					user_surface_converter,
2319					&cmd->buf[i].sid, &res);
2320		if (unlikely(ret != 0))
2321			return ret;
2322
2323		binding.bi.ctx = ctx_node->ctx;
2324		binding.bi.bt = vmw_ctx_binding_vb;
2325		binding.bi.res = res;
2326		binding.offset = cmd->buf[i].offset;
2327		binding.stride = cmd->buf[i].stride;
2328		binding.slot = i + cmd->body.startBuffer;
2329
2330		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2331	}
2332
2333	return 0;
2334}
2335
2336/**
2337 * vmw_cmd_dx_set_index_buffer - Validate
2338 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2339 *
2340 * @dev_priv: Pointer to a device private struct.
2341 * @sw_context: The software context being used for this batch.
2342 * @header: Pointer to the command header in the command stream.
2343 */
2344static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2345				       struct vmw_sw_context *sw_context,
2346				       SVGA3dCmdHeader *header)
2347{
2348	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2349	struct vmw_ctx_bindinfo_ib binding;
2350	struct vmw_resource *res;
2351	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2352	int ret;
2353
2354	if (!ctx_node)
2355		return -EINVAL;
2356
2357	cmd = container_of(header, typeof(*cmd), header);
2358	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2359				VMW_RES_DIRTY_NONE, user_surface_converter,
2360				&cmd->body.sid, &res);
2361	if (unlikely(ret != 0))
2362		return ret;
2363
2364	binding.bi.ctx = ctx_node->ctx;
2365	binding.bi.res = res;
2366	binding.bi.bt = vmw_ctx_binding_ib;
2367	binding.offset = cmd->body.offset;
2368	binding.format = cmd->body.format;
2369
2370	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2371
2372	return 0;
2373}
2374
2375/**
2376 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2377 * command
2378 *
2379 * @dev_priv: Pointer to a device private struct.
2380 * @sw_context: The software context being used for this batch.
2381 * @header: Pointer to the command header in the command stream.
2382 */
2383static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2384					struct vmw_sw_context *sw_context,
2385					SVGA3dCmdHeader *header)
2386{
2387	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2388		container_of(header, typeof(*cmd), header);
2389	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2390		sizeof(SVGA3dRenderTargetViewId);
2391	int ret;
2392
2393	if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2394		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2395		return -EINVAL;
2396	}
2397
2398	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2399				    0, &cmd->body.depthStencilViewId, 1, 0);
2400	if (ret)
2401		return ret;
2402
2403	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2404				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2405				     num_rt_view, 0);
2406}
2407
2408/**
2409 * vmw_cmd_dx_clear_rendertarget_view - Validate
2410 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2411 *
2412 * @dev_priv: Pointer to a device private struct.
2413 * @sw_context: The software context being used for this batch.
2414 * @header: Pointer to the command header in the command stream.
2415 */
2416static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2417					      struct vmw_sw_context *sw_context,
2418					      SVGA3dCmdHeader *header)
2419{
2420	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2421		container_of(header, typeof(*cmd), header);
2422	struct vmw_resource *ret;
2423
2424	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2425				  cmd->body.renderTargetViewId);
2426
2427	return PTR_ERR_OR_ZERO(ret);
2428}
2429
2430/**
2431 * vmw_cmd_dx_clear_depthstencil_view - Validate
2432 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2433 *
2434 * @dev_priv: Pointer to a device private struct.
2435 * @sw_context: The software context being used for this batch.
2436 * @header: Pointer to the command header in the command stream.
2437 */
2438static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2439					      struct vmw_sw_context *sw_context,
2440					      SVGA3dCmdHeader *header)
2441{
2442	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2443		container_of(header, typeof(*cmd), header);
2444	struct vmw_resource *ret;
2445
2446	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2447				  cmd->body.depthStencilViewId);
2448
2449	return PTR_ERR_OR_ZERO(ret);
2450}
2451
2452static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2453				  struct vmw_sw_context *sw_context,
2454				  SVGA3dCmdHeader *header)
2455{
2456	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2457	struct vmw_resource *srf;
2458	struct vmw_resource *res;
2459	enum vmw_view_type view_type;
2460	int ret;
2461	/*
2462	 * This is based on the fact that all affected define commands have the
2463	 * same initial command body layout.
2464	 */
2465	struct {
2466		SVGA3dCmdHeader header;
2467		uint32 defined_id;
2468		uint32 sid;
2469	} *cmd;
2470
2471	if (!ctx_node)
2472		return -EINVAL;
2473
2474	view_type = vmw_view_cmd_to_type(header->id);
2475	if (view_type == vmw_view_max)
2476		return -EINVAL;
2477
2478	cmd = container_of(header, typeof(*cmd), header);
2479	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2480		VMW_DEBUG_USER("Invalid surface id.\n");
2481		return -EINVAL;
2482	}
2483	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2484				VMW_RES_DIRTY_NONE, user_surface_converter,
2485				&cmd->sid, &srf);
2486	if (unlikely(ret != 0))
2487		return ret;
2488
2489	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2490	if (IS_ERR_OR_NULL(res))
2491		return res ? PTR_ERR(res) : -EINVAL;
2492	ret = vmw_cotable_notify(res, cmd->defined_id);
2493	if (unlikely(ret != 0))
2494		return ret;
2495
2496	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2497			    cmd->defined_id, header,
2498			    header->size + sizeof(*header),
2499			    &sw_context->staged_cmd_res);
2500}
2501
2502/**
2503 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2504 *
2505 * @dev_priv: Pointer to a device private struct.
2506 * @sw_context: The software context being used for this batch.
2507 * @header: Pointer to the command header in the command stream.
2508 */
2509static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2510				     struct vmw_sw_context *sw_context,
2511				     SVGA3dCmdHeader *header)
2512{
2513	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2514	struct vmw_ctx_bindinfo_so_target binding;
2515	struct vmw_resource *res;
2516	struct {
2517		SVGA3dCmdHeader header;
2518		SVGA3dCmdDXSetSOTargets body;
2519		SVGA3dSoTarget targets[];
2520	} *cmd;
2521	int i, ret, num;
2522
2523	if (!ctx_node)
2524		return -EINVAL;
2525
2526	cmd = container_of(header, typeof(*cmd), header);
2527	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2528
2529	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2530		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2531		return -EINVAL;
2532	}
2533
2534	for (i = 0; i < num; i++) {
2535		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2536					VMW_RES_DIRTY_SET,
2537					user_surface_converter,
2538					&cmd->targets[i].sid, &res);
2539		if (unlikely(ret != 0))
2540			return ret;
2541
2542		binding.bi.ctx = ctx_node->ctx;
2543		binding.bi.res = res;
2544		binding.bi.bt = vmw_ctx_binding_so_target;
2545		binding.offset = cmd->targets[i].offset;
2546		binding.size = cmd->targets[i].sizeInBytes;
2547		binding.slot = i;
2548
2549		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2550	}
2551
2552	return 0;
2553}
2554
2555static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2556				struct vmw_sw_context *sw_context,
2557				SVGA3dCmdHeader *header)
2558{
2559	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2560	struct vmw_resource *res;
2561	/*
2562	 * This is based on the fact that all affected define commands have
2563	 * the same initial command body layout.
2564	 */
2565	struct {
2566		SVGA3dCmdHeader header;
2567		uint32 defined_id;
2568	} *cmd;
2569	enum vmw_so_type so_type;
2570	int ret;
2571
2572	if (!ctx_node)
2573		return -EINVAL;
2574
2575	so_type = vmw_so_cmd_to_type(header->id);
2576	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2577	if (IS_ERR_OR_NULL(res))
2578		return res ? PTR_ERR(res) : -EINVAL;
2579	cmd = container_of(header, typeof(*cmd), header);
2580	ret = vmw_cotable_notify(res, cmd->defined_id);
2581
2582	return ret;
2583}
2584
2585/**
2586 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2587 * command
2588 *
2589 * @dev_priv: Pointer to a device private struct.
2590 * @sw_context: The software context being used for this batch.
2591 * @header: Pointer to the command header in the command stream.
2592 */
2593static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2594					struct vmw_sw_context *sw_context,
2595					SVGA3dCmdHeader *header)
2596{
2597	struct {
2598		SVGA3dCmdHeader header;
2599		union {
2600			SVGA3dCmdDXReadbackSubResource r_body;
2601			SVGA3dCmdDXInvalidateSubResource i_body;
2602			SVGA3dCmdDXUpdateSubResource u_body;
2603			SVGA3dSurfaceId sid;
2604		};
2605	} *cmd;
2606
2607	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2608		     offsetof(typeof(*cmd), sid));
2609	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2610		     offsetof(typeof(*cmd), sid));
2611	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2612		     offsetof(typeof(*cmd), sid));
2613
2614	cmd = container_of(header, typeof(*cmd), header);
2615	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2616				 VMW_RES_DIRTY_NONE, user_surface_converter,
2617				 &cmd->sid, NULL);
2618}
2619
2620static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2621				struct vmw_sw_context *sw_context,
2622				SVGA3dCmdHeader *header)
2623{
2624	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2625
2626	if (!ctx_node)
2627		return -EINVAL;
2628
2629	return 0;
2630}
2631
2632/**
2633 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2634 * resource for removal.
2635 *
2636 * @dev_priv: Pointer to a device private struct.
2637 * @sw_context: The software context being used for this batch.
2638 * @header: Pointer to the command header in the command stream.
2639 *
2640 * Check that the view exists, and if it was not created using this command
2641 * batch, conditionally make this command a NOP.
2642 */
2643static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2644				  struct vmw_sw_context *sw_context,
2645				  SVGA3dCmdHeader *header)
2646{
2647	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2648	struct {
2649		SVGA3dCmdHeader header;
2650		union vmw_view_destroy body;
2651	} *cmd = container_of(header, typeof(*cmd), header);
2652	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2653	struct vmw_resource *view;
2654	int ret;
2655
2656	if (!ctx_node)
2657		return -EINVAL;
2658
2659	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2660			      &sw_context->staged_cmd_res, &view);
2661	if (ret || !view)
2662		return ret;
2663
2664	/*
2665	 * If the view wasn't created during this command batch, it might
2666	 * have been removed due to a context swapout, so add a
2667	 * relocation to conditionally make this command a NOP to avoid
2668	 * device errors.
2669	 */
2670	return vmw_resource_relocation_add(sw_context, view,
2671					   vmw_ptr_diff(sw_context->buf_start,
2672							&cmd->header.id),
2673					   vmw_res_rel_cond_nop);
2674}
2675
2676/**
2677 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2678 *
2679 * @dev_priv: Pointer to a device private struct.
2680 * @sw_context: The software context being used for this batch.
2681 * @header: Pointer to the command header in the command stream.
2682 */
2683static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2684				    struct vmw_sw_context *sw_context,
2685				    SVGA3dCmdHeader *header)
2686{
2687	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2688	struct vmw_resource *res;
2689	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2690		container_of(header, typeof(*cmd), header);
2691	int ret;
2692
2693	if (!ctx_node)
2694		return -EINVAL;
2695
2696	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2697	if (IS_ERR_OR_NULL(res))
2698		return res ? PTR_ERR(res) : -EINVAL;
2699	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2700	if (ret)
2701		return ret;
2702
2703	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2704				 cmd->body.shaderId, cmd->body.type,
2705				 &sw_context->staged_cmd_res);
2706}
2707
2708/**
2709 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2710 *
2711 * @dev_priv: Pointer to a device private struct.
2712 * @sw_context: The software context being used for this batch.
2713 * @header: Pointer to the command header in the command stream.
2714 */
2715static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2716				     struct vmw_sw_context *sw_context,
2717				     SVGA3dCmdHeader *header)
2718{
2719	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2720	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2721		container_of(header, typeof(*cmd), header);
2722	int ret;
2723
2724	if (!ctx_node)
2725		return -EINVAL;
2726
2727	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2728				&sw_context->staged_cmd_res);
2729
2730	return ret;
2731}
2732
2733/**
2734 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2735 *
2736 * @dev_priv: Pointer to a device private struct.
2737 * @sw_context: The software context being used for this batch.
2738 * @header: Pointer to the command header in the command stream.
2739 */
2740static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2741				  struct vmw_sw_context *sw_context,
2742				  SVGA3dCmdHeader *header)
2743{
2744	struct vmw_resource *ctx;
2745	struct vmw_resource *res;
2746	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2747		container_of(header, typeof(*cmd), header);
2748	int ret;
2749
2750	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2751		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2752					VMW_RES_DIRTY_SET,
2753					user_context_converter, &cmd->body.cid,
2754					&ctx);
2755		if (ret)
2756			return ret;
2757	} else {
2758		struct vmw_ctx_validation_info *ctx_node =
2759			VMW_GET_CTX_NODE(sw_context);
2760
2761		if (!ctx_node)
2762			return -EINVAL;
2763
2764		ctx = ctx_node->ctx;
2765	}
2766
2767	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2768	if (IS_ERR(res)) {
2769		VMW_DEBUG_USER("Could not find shader to bind.\n");
2770		return PTR_ERR(res);
2771	}
2772
2773	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2774				      vmw_val_add_flag_noctx);
2775	if (ret) {
2776		VMW_DEBUG_USER("Error creating resource validation node.\n");
2777		return ret;
2778	}
2779
2780	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2781					 &cmd->body.mobid,
2782					 cmd->body.offsetInBytes);
2783}
2784
2785/**
2786 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2787 *
2788 * @dev_priv: Pointer to a device private struct.
2789 * @sw_context: The software context being used for this batch.
2790 * @header: Pointer to the command header in the command stream.
2791 */
2792static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2793			      struct vmw_sw_context *sw_context,
2794			      SVGA3dCmdHeader *header)
2795{
2796	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2797		container_of(header, typeof(*cmd), header);
2798	struct vmw_resource *view;
2799	struct vmw_res_cache_entry *rcache;
2800
2801	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2802				   cmd->body.shaderResourceViewId);
2803	if (IS_ERR(view))
2804		return PTR_ERR(view);
2805
2806	/*
2807	 * Normally the shader-resource view is not gpu-dirtying, but for
2808	 * this particular command it is...
2809	 * So mark the last looked-up surface, which is the surface
2810	 * the view points to, gpu-dirty.
2811	 */
2812	rcache = &sw_context->res_cache[vmw_res_surface];
2813	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2814				     VMW_RES_DIRTY_SET);
2815	return 0;
2816}
2817
2818/**
2819 * vmw_cmd_dx_transfer_from_buffer - Validate
2820 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2821 *
2822 * @dev_priv: Pointer to a device private struct.
2823 * @sw_context: The software context being used for this batch.
2824 * @header: Pointer to the command header in the command stream.
2825 */
2826static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2827					   struct vmw_sw_context *sw_context,
2828					   SVGA3dCmdHeader *header)
2829{
2830	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2831		container_of(header, typeof(*cmd), header);
2832	int ret;
2833
2834	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2835				VMW_RES_DIRTY_NONE, user_surface_converter,
2836				&cmd->body.srcSid, NULL);
2837	if (ret != 0)
2838		return ret;
2839
2840	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2841				 VMW_RES_DIRTY_SET, user_surface_converter,
2842				 &cmd->body.destSid, NULL);
2843}
2844
2845/**
2846 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2847 *
2848 * @dev_priv: Pointer to a device private struct.
2849 * @sw_context: The software context being used for this batch.
2850 * @header: Pointer to the command header in the command stream.
2851 */
2852static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2853					   struct vmw_sw_context *sw_context,
2854					   SVGA3dCmdHeader *header)
2855{
2856	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2857		container_of(header, typeof(*cmd), header);
2858
2859	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2860		return -EINVAL;
2861
2862	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2863				 VMW_RES_DIRTY_SET, user_surface_converter,
2864				 &cmd->body.surface.sid, NULL);
2865}
2866
2867static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2868		       struct vmw_sw_context *sw_context,
2869		       SVGA3dCmdHeader *header)
2870{
2871	if (!has_sm5_context(dev_priv))
2872		return -EINVAL;
2873
2874	return 0;
2875}
2876
2877static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2878				   struct vmw_sw_context *sw_context,
2879				   SVGA3dCmdHeader *header)
2880{
2881	if (!has_sm5_context(dev_priv))
2882		return -EINVAL;
2883
2884	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2885}
2886
2887static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2888				   struct vmw_sw_context *sw_context,
2889				   SVGA3dCmdHeader *header)
2890{
2891	if (!has_sm5_context(dev_priv))
2892		return -EINVAL;
2893
2894	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2895}
2896
2897static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2898				  struct vmw_sw_context *sw_context,
2899				  SVGA3dCmdHeader *header)
2900{
2901	struct {
2902		SVGA3dCmdHeader header;
2903		SVGA3dCmdDXClearUAViewUint body;
2904	} *cmd = container_of(header, typeof(*cmd), header);
2905	struct vmw_resource *ret;
2906
2907	if (!has_sm5_context(dev_priv))
2908		return -EINVAL;
2909
2910	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2911				  cmd->body.uaViewId);
2912
2913	return PTR_ERR_OR_ZERO(ret);
2914}
2915
2916static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2917				   struct vmw_sw_context *sw_context,
2918				   SVGA3dCmdHeader *header)
2919{
2920	struct {
2921		SVGA3dCmdHeader header;
2922		SVGA3dCmdDXClearUAViewFloat body;
2923	} *cmd = container_of(header, typeof(*cmd), header);
2924	struct vmw_resource *ret;
2925
2926	if (!has_sm5_context(dev_priv))
2927		return -EINVAL;
2928
2929	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2930				  cmd->body.uaViewId);
2931
2932	return PTR_ERR_OR_ZERO(ret);
2933}
2934
2935static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2936			   struct vmw_sw_context *sw_context,
2937			   SVGA3dCmdHeader *header)
2938{
2939	struct {
2940		SVGA3dCmdHeader header;
2941		SVGA3dCmdDXSetUAViews body;
2942	} *cmd = container_of(header, typeof(*cmd), header);
2943	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2944		sizeof(SVGA3dUAViewId);
2945	int ret;
2946
2947	if (!has_sm5_context(dev_priv))
2948		return -EINVAL;
2949
2950	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2951		VMW_DEBUG_USER("Invalid UAV binding.\n");
2952		return -EINVAL;
2953	}
2954
2955	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2956				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2957				    num_uav, 0);
2958	if (ret)
2959		return ret;
2960
2961	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2962					 cmd->body.uavSpliceIndex);
2963
2964	return ret;
2965}
2966
2967static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2968			      struct vmw_sw_context *sw_context,
2969			      SVGA3dCmdHeader *header)
2970{
2971	struct {
2972		SVGA3dCmdHeader header;
2973		SVGA3dCmdDXSetCSUAViews body;
2974	} *cmd = container_of(header, typeof(*cmd), header);
2975	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2976		sizeof(SVGA3dUAViewId);
2977	int ret;
2978
2979	if (!has_sm5_context(dev_priv))
2980		return -EINVAL;
2981
2982	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2983		VMW_DEBUG_USER("Invalid UAV binding.\n");
2984		return -EINVAL;
2985	}
2986
2987	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2988				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2989				    num_uav, 0);
2990	if (ret)
2991		return ret;
2992
2993	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2994				  cmd->body.startIndex);
2995
2996	return ret;
2997}
2998
2999static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
3000					  struct vmw_sw_context *sw_context,
3001					  SVGA3dCmdHeader *header)
3002{
3003	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3004	struct vmw_resource *res;
3005	struct {
3006		SVGA3dCmdHeader header;
3007		SVGA3dCmdDXDefineStreamOutputWithMob body;
3008	} *cmd = container_of(header, typeof(*cmd), header);
3009	int ret;
3010
3011	if (!has_sm5_context(dev_priv))
3012		return -EINVAL;
3013
3014	if (!ctx_node) {
3015		DRM_ERROR("DX Context not set.\n");
3016		return -EINVAL;
3017	}
3018
3019	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3020	if (IS_ERR_OR_NULL(res))
3021		return res ? PTR_ERR(res) : -EINVAL;
3022	ret = vmw_cotable_notify(res, cmd->body.soid);
3023	if (ret)
3024		return ret;
3025
3026	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3027				       cmd->body.soid,
3028				       &sw_context->staged_cmd_res);
3029}
3030
3031static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3032					   struct vmw_sw_context *sw_context,
3033					   SVGA3dCmdHeader *header)
3034{
3035	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3036	struct vmw_resource *res;
3037	struct {
3038		SVGA3dCmdHeader header;
3039		SVGA3dCmdDXDestroyStreamOutput body;
3040	} *cmd = container_of(header, typeof(*cmd), header);
3041
3042	if (!ctx_node) {
3043		DRM_ERROR("DX Context not set.\n");
3044		return -EINVAL;
3045	}
3046
3047	/*
3048	 * When device does not support SM5 then streamoutput with mob command is
3049	 * not available to user-space. Simply return in this case.
3050	 */
3051	if (!has_sm5_context(dev_priv))
3052		return 0;
3053
3054	/*
3055	 * With SM5 capable device if lookup fails then user-space probably used
3056	 * old streamoutput define command. Return without an error.
3057	 */
3058	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3059					 cmd->body.soid);
3060	if (IS_ERR(res))
3061		return 0;
3062
3063	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3064					  &sw_context->staged_cmd_res);
3065}
3066
3067static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3068					struct vmw_sw_context *sw_context,
3069					SVGA3dCmdHeader *header)
3070{
3071	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3072	struct vmw_resource *res;
3073	struct {
3074		SVGA3dCmdHeader header;
3075		SVGA3dCmdDXBindStreamOutput body;
3076	} *cmd = container_of(header, typeof(*cmd), header);
3077	int ret;
3078
3079	if (!has_sm5_context(dev_priv))
3080		return -EINVAL;
3081
3082	if (!ctx_node) {
3083		DRM_ERROR("DX Context not set.\n");
3084		return -EINVAL;
3085	}
3086
3087	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3088					 cmd->body.soid);
3089	if (IS_ERR(res)) {
3090		DRM_ERROR("Could not find streamoutput to bind.\n");
3091		return PTR_ERR(res);
3092	}
3093
3094	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3095
3096	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3097				      vmw_val_add_flag_noctx);
3098	if (ret) {
3099		DRM_ERROR("Error creating resource validation node.\n");
3100		return ret;
3101	}
3102
3103	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3104					 &cmd->body.mobid,
3105					 cmd->body.offsetInBytes);
3106}
3107
3108static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3109				       struct vmw_sw_context *sw_context,
3110				       SVGA3dCmdHeader *header)
3111{
3112	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3113	struct vmw_resource *res;
3114	struct vmw_ctx_bindinfo_so binding;
3115	struct {
3116		SVGA3dCmdHeader header;
3117		SVGA3dCmdDXSetStreamOutput body;
3118	} *cmd = container_of(header, typeof(*cmd), header);
3119	int ret;
3120
3121	if (!ctx_node) {
3122		DRM_ERROR("DX Context not set.\n");
3123		return -EINVAL;
3124	}
3125
3126	if (cmd->body.soid == SVGA3D_INVALID_ID)
3127		return 0;
3128
3129	/*
3130	 * When device does not support SM5 then streamoutput with mob command is
3131	 * not available to user-space. Simply return in this case.
3132	 */
3133	if (!has_sm5_context(dev_priv))
3134		return 0;
3135
3136	/*
3137	 * With SM5 capable device if lookup fails then user-space probably used
3138	 * old streamoutput define command. Return without an error.
3139	 */
3140	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3141					 cmd->body.soid);
3142	if (IS_ERR(res)) {
3143		return 0;
3144	}
3145
3146	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3147				      vmw_val_add_flag_noctx);
3148	if (ret) {
3149		DRM_ERROR("Error creating resource validation node.\n");
3150		return ret;
3151	}
3152
3153	binding.bi.ctx = ctx_node->ctx;
3154	binding.bi.res = res;
3155	binding.bi.bt = vmw_ctx_binding_so;
3156	binding.slot = 0; /* Only one SO set to context at a time. */
3157
3158	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3159			binding.slot);
3160
3161	return ret;
3162}
3163
3164static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3165					      struct vmw_sw_context *sw_context,
3166					      SVGA3dCmdHeader *header)
3167{
3168	struct vmw_draw_indexed_instanced_indirect_cmd {
3169		SVGA3dCmdHeader header;
3170		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3171	} *cmd = container_of(header, typeof(*cmd), header);
3172
3173	if (!has_sm5_context(dev_priv))
3174		return -EINVAL;
3175
3176	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3177				 VMW_RES_DIRTY_NONE, user_surface_converter,
3178				 &cmd->body.argsBufferSid, NULL);
3179}
3180
3181static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3182				      struct vmw_sw_context *sw_context,
3183				      SVGA3dCmdHeader *header)
3184{
3185	struct vmw_draw_instanced_indirect_cmd {
3186		SVGA3dCmdHeader header;
3187		SVGA3dCmdDXDrawInstancedIndirect body;
3188	} *cmd = container_of(header, typeof(*cmd), header);
3189
3190	if (!has_sm5_context(dev_priv))
3191		return -EINVAL;
3192
3193	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3194				 VMW_RES_DIRTY_NONE, user_surface_converter,
3195				 &cmd->body.argsBufferSid, NULL);
3196}
3197
3198static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3199				     struct vmw_sw_context *sw_context,
3200				     SVGA3dCmdHeader *header)
3201{
3202	struct vmw_dispatch_indirect_cmd {
3203		SVGA3dCmdHeader header;
3204		SVGA3dCmdDXDispatchIndirect body;
3205	} *cmd = container_of(header, typeof(*cmd), header);
3206
3207	if (!has_sm5_context(dev_priv))
3208		return -EINVAL;
3209
3210	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3211				 VMW_RES_DIRTY_NONE, user_surface_converter,
3212				 &cmd->body.argsBufferSid, NULL);
3213}
3214
3215static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3216				struct vmw_sw_context *sw_context,
3217				void *buf, uint32_t *size)
3218{
3219	uint32_t size_remaining = *size;
3220	uint32_t cmd_id;
3221
3222	cmd_id = ((uint32_t *)buf)[0];
3223	switch (cmd_id) {
3224	case SVGA_CMD_UPDATE:
3225		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3226		break;
3227	case SVGA_CMD_DEFINE_GMRFB:
3228		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3229		break;
3230	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3231		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3232		break;
3233	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3234		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3235		break;
3236	default:
3237		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3238		return -EINVAL;
3239	}
3240
3241	if (*size > size_remaining) {
3242		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3243			       cmd_id);
3244		return -EINVAL;
3245	}
3246
3247	if (unlikely(!sw_context->kernel)) {
3248		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3249		return -EPERM;
3250	}
3251
3252	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3253		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3254
3255	return 0;
3256}
3257
3258static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3259	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3260		    false, false, false),
3261	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3262		    false, false, false),
3263	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3264		    true, false, false),
3265	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3266		    true, false, false),
3267	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3268		    true, false, false),
3269	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3270		    false, false, false),
3271	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3272		    false, false, false),
3273	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3274		    true, false, false),
3275	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3276		    true, false, false),
3277	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3278		    true, false, false),
3279	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3280		    &vmw_cmd_set_render_target_check, true, false, false),
3281	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3282		    true, false, false),
3283	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3284		    true, false, false),
3285	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3286		    true, false, false),
3287	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3288		    true, false, false),
3289	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3290		    true, false, false),
3291	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3292		    true, false, false),
3293	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3294		    true, false, false),
3295	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3296		    false, false, false),
3297	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3298		    true, false, false),
3299	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3300		    true, false, false),
3301	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3302		    true, false, false),
3303	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3304		    true, false, false),
3305	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3306		    true, false, false),
3307	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3308		    true, false, false),
3309	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3310		    true, false, false),
3311	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3312		    true, false, false),
3313	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3314		    true, false, false),
3315	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3316		    true, false, false),
3317	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3318		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3319	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3320		    false, false, false),
3321	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3322		    false, false, false),
3323	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3324		    false, false, false),
3325	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3326		    false, false, false),
3327	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3328		    false, false, false),
3329	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3330		    false, false, false),
3331	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3332		    false, false, false),
3333	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3334	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3335	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3336	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3337	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3338	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3339	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3340		    false, false, true),
3341	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3342		    false, false, true),
3343	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3344		    false, false, true),
3345	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3346		    false, false, true),
3347	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3348		    false, false, true),
3349	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3350		    false, false, true),
3351	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3352		    false, false, true),
3353	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3354		    false, false, true),
3355	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3356		    true, false, true),
3357	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3358		    false, false, true),
3359	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3360		    true, false, true),
3361	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3362		    &vmw_cmd_update_gb_surface, true, false, true),
3363	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3364		    &vmw_cmd_readback_gb_image, true, false, true),
3365	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3366		    &vmw_cmd_readback_gb_surface, true, false, true),
3367	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3368		    &vmw_cmd_invalidate_gb_image, true, false, true),
3369	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3370		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3371	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3372		    false, false, true),
3373	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3374		    false, false, true),
3375	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3376		    false, false, true),
3377	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3378		    false, false, true),
3379	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3380		    false, false, true),
3381	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3382		    false, false, true),
3383	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3384		    true, false, true),
3385	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3386		    false, false, true),
3387	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3388		    false, false, false),
3389	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3390		    true, false, true),
3391	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3392		    true, false, true),
3393	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3394		    true, false, true),
3395	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3396		    true, false, true),
3397	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3398		    true, false, true),
3399	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3400		    false, false, true),
3401	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3402		    false, false, true),
3403	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3404		    false, false, true),
3405	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3406		    false, false, true),
3407	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3408		    false, false, true),
3409	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3410		    false, false, true),
3411	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3412		    false, false, true),
3413	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3414		    false, false, true),
3415	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3416		    false, false, true),
3417	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3418		    false, false, true),
3419	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3420		    true, false, true),
3421	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3422		    false, false, true),
3423	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3424		    false, false, true),
3425	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3426		    false, false, true),
3427	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3428		    false, false, true),
3429
3430	/* SM commands */
3431	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3432		    false, false, true),
3433	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3434		    false, false, true),
3435	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3436		    false, false, true),
3437	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3438		    false, false, true),
3439	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3440		    false, false, true),
3441	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3442		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3443	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3444		    &vmw_cmd_dx_set_shader_res, true, false, true),
3445	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3446		    true, false, true),
3447	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3448		    true, false, true),
3449	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3450		    true, false, true),
3451	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3452		    true, false, true),
3453	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3454		    true, false, true),
3455	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3456		    &vmw_cmd_dx_cid_check, true, false, true),
3457	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3458		    true, false, true),
3459	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3460		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3461	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3462		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3463	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3464		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3465	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3466		    true, false, true),
3467	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3468		    &vmw_cmd_dx_cid_check, true, false, true),
3469	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3470		    &vmw_cmd_dx_cid_check, true, false, true),
3471	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3472		    true, false, true),
3473	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3474		    true, false, true),
3475	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3476		    true, false, true),
3477	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3478		    &vmw_cmd_dx_cid_check, true, false, true),
3479	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3480		    true, false, true),
3481	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3482		    true, false, true),
3483	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3484		    true, false, true),
3485	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3486		    true, false, true),
3487	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3488		    true, false, true),
3489	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3490		    true, false, true),
3491	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3492		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3493	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3494		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3495	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3496		    true, false, true),
3497	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3498		    true, false, true),
3499	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3500		    &vmw_cmd_dx_check_subresource, true, false, true),
3501	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3502		    &vmw_cmd_dx_check_subresource, true, false, true),
3503	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3504		    &vmw_cmd_dx_check_subresource, true, false, true),
3505	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3506		    &vmw_cmd_dx_view_define, true, false, true),
3507	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3508		    &vmw_cmd_dx_view_remove, true, false, true),
3509	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3510		    &vmw_cmd_dx_view_define, true, false, true),
3511	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3512		    &vmw_cmd_dx_view_remove, true, false, true),
3513	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3514		    &vmw_cmd_dx_view_define, true, false, true),
3515	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3516		    &vmw_cmd_dx_view_remove, true, false, true),
3517	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3518		    &vmw_cmd_dx_so_define, true, false, true),
3519	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3520		    &vmw_cmd_dx_cid_check, true, false, true),
3521	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3522		    &vmw_cmd_dx_so_define, true, false, true),
3523	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3524		    &vmw_cmd_dx_cid_check, true, false, true),
3525	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3526		    &vmw_cmd_dx_so_define, true, false, true),
3527	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3528		    &vmw_cmd_dx_cid_check, true, false, true),
3529	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3530		    &vmw_cmd_dx_so_define, true, false, true),
3531	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3532		    &vmw_cmd_dx_cid_check, true, false, true),
3533	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3534		    &vmw_cmd_dx_so_define, true, false, true),
3535	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3536		    &vmw_cmd_dx_cid_check, true, false, true),
3537	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3538		    &vmw_cmd_dx_define_shader, true, false, true),
3539	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3540		    &vmw_cmd_dx_destroy_shader, true, false, true),
3541	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3542		    &vmw_cmd_dx_bind_shader, true, false, true),
3543	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3544		    &vmw_cmd_dx_so_define, true, false, true),
3545	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3546		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3547	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3548		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3549	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3550		    &vmw_cmd_dx_set_so_targets, true, false, true),
3551	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3552		    &vmw_cmd_dx_cid_check, true, false, true),
3553	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3554		    &vmw_cmd_dx_cid_check, true, false, true),
3555	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3556		    &vmw_cmd_buffer_copy_check, true, false, true),
3557	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3558		    &vmw_cmd_pred_copy_check, true, false, true),
3559	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3560		    &vmw_cmd_dx_transfer_from_buffer,
3561		    true, false, true),
3562	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3563		    &vmw_cmd_dx_set_constant_buffer_offset,
3564		    true, false, true),
3565	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3566		    &vmw_cmd_dx_set_constant_buffer_offset,
3567		    true, false, true),
3568	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3569		    &vmw_cmd_dx_set_constant_buffer_offset,
3570		    true, false, true),
3571	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3572		    &vmw_cmd_dx_set_constant_buffer_offset,
3573		    true, false, true),
3574	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3575		    &vmw_cmd_dx_set_constant_buffer_offset,
3576		    true, false, true),
3577	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3578		    &vmw_cmd_dx_set_constant_buffer_offset,
3579		    true, false, true),
3580	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3581		    true, false, true),
3582
3583	/*
3584	 * SM5 commands
3585	 */
3586	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3587		    true, false, true),
3588	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3589		    true, false, true),
3590	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3591		    true, false, true),
3592	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3593		    &vmw_cmd_clear_uav_float, true, false, true),
3594	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3595		    false, true),
3596	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3597		    true),
3598	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3599		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3600	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3601		    &vmw_cmd_instanced_indirect, true, false, true),
3602	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3603	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3604		    &vmw_cmd_dispatch_indirect, true, false, true),
3605	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3606		    false, true),
3607	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3608		    &vmw_cmd_sm5_view_define, true, false, true),
3609	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3610		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3611	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3612		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3613	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3614		    &vmw_cmd_dx_so_define, true, false, true),
3615	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
3616		    &vmw_cmd_invalid, false, false, true),
3617};
3618
3619bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3620{
3621	u32 cmd_id = ((u32 *) buf)[0];
3622
3623	if (cmd_id >= SVGA_CMD_MAX) {
3624		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3625		const struct vmw_cmd_entry *entry;
3626
3627		*size = header->size + sizeof(SVGA3dCmdHeader);
3628		cmd_id = header->id;
3629		if (cmd_id >= SVGA_3D_CMD_MAX)
3630			return false;
3631
3632		cmd_id -= SVGA_3D_CMD_BASE;
3633		entry = &vmw_cmd_entries[cmd_id];
3634		*cmd = entry->cmd_name;
3635		return true;
3636	}
3637
3638	switch (cmd_id) {
3639	case SVGA_CMD_UPDATE:
3640		*cmd = "SVGA_CMD_UPDATE";
3641		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3642		break;
3643	case SVGA_CMD_DEFINE_GMRFB:
3644		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3645		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3646		break;
3647	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3648		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3649		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3650		break;
3651	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3652		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3653		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3654		break;
3655	default:
3656		*cmd = "UNKNOWN";
3657		*size = 0;
3658		return false;
3659	}
3660
3661	return true;
3662}
3663
3664static int vmw_cmd_check(struct vmw_private *dev_priv,
3665			 struct vmw_sw_context *sw_context, void *buf,
3666			 uint32_t *size)
3667{
3668	uint32_t cmd_id;
3669	uint32_t size_remaining = *size;
3670	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3671	int ret;
3672	const struct vmw_cmd_entry *entry;
3673	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3674
3675	cmd_id = ((uint32_t *)buf)[0];
3676	/* Handle any none 3D commands */
3677	if (unlikely(cmd_id < SVGA_CMD_MAX))
3678		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3679
3680
3681	cmd_id = header->id;
3682	*size = header->size + sizeof(SVGA3dCmdHeader);
3683
3684	cmd_id -= SVGA_3D_CMD_BASE;
3685	if (unlikely(*size > size_remaining))
3686		goto out_invalid;
3687
3688	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3689		goto out_invalid;
3690
3691	entry = &vmw_cmd_entries[cmd_id];
3692	if (unlikely(!entry->func))
3693		goto out_invalid;
3694
3695	if (unlikely(!entry->user_allow && !sw_context->kernel))
3696		goto out_privileged;
3697
3698	if (unlikely(entry->gb_disable && gb))
3699		goto out_old;
3700
3701	if (unlikely(entry->gb_enable && !gb))
3702		goto out_new;
3703
3704	ret = entry->func(dev_priv, sw_context, header);
3705	if (unlikely(ret != 0)) {
3706		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3707			       cmd_id + SVGA_3D_CMD_BASE, ret);
3708		return ret;
3709	}
3710
3711	return 0;
3712out_invalid:
3713	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3714		       cmd_id + SVGA_3D_CMD_BASE);
3715	return -EINVAL;
3716out_privileged:
3717	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3718		       cmd_id + SVGA_3D_CMD_BASE);
3719	return -EPERM;
3720out_old:
3721	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3722		       cmd_id + SVGA_3D_CMD_BASE);
3723	return -EINVAL;
3724out_new:
3725	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3726		       cmd_id + SVGA_3D_CMD_BASE);
3727	return -EINVAL;
3728}
3729
3730static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3731			     struct vmw_sw_context *sw_context, void *buf,
3732			     uint32_t size)
3733{
3734	int32_t cur_size = size;
3735	int ret;
3736
3737	sw_context->buf_start = buf;
3738
3739	while (cur_size > 0) {
3740		size = cur_size;
3741		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3742		if (unlikely(ret != 0))
3743			return ret;
3744		buf = (void *)((unsigned long) buf + size);
3745		cur_size -= size;
3746	}
3747
3748	if (unlikely(cur_size != 0)) {
3749		VMW_DEBUG_USER("Command verifier out of sync.\n");
3750		return -EINVAL;
3751	}
3752
3753	return 0;
3754}
3755
3756static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3757{
3758	/* Memory is validation context memory, so no need to free it */
3759	INIT_LIST_HEAD(&sw_context->bo_relocations);
3760}
3761
3762static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3763{
3764	struct vmw_relocation *reloc;
3765	struct ttm_buffer_object *bo;
3766
3767	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3768		bo = &reloc->vbo->tbo;
3769		switch (bo->resource->mem_type) {
3770		case TTM_PL_VRAM:
3771			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3772			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3773			break;
3774		case VMW_PL_GMR:
3775			reloc->location->gmrId = bo->resource->start;
3776			break;
3777		case VMW_PL_MOB:
3778			*reloc->mob_loc = bo->resource->start;
3779			break;
3780		default:
3781			BUG();
3782		}
3783	}
3784	vmw_free_relocations(sw_context);
3785}
3786
3787static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3788				 uint32_t size)
3789{
3790	if (likely(sw_context->cmd_bounce_size >= size))
3791		return 0;
3792
3793	if (sw_context->cmd_bounce_size == 0)
3794		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3795
3796	while (sw_context->cmd_bounce_size < size) {
3797		sw_context->cmd_bounce_size =
3798			PAGE_ALIGN(sw_context->cmd_bounce_size +
3799				   (sw_context->cmd_bounce_size >> 1));
3800	}
3801
3802	vfree(sw_context->cmd_bounce);
3803	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3804
3805	if (sw_context->cmd_bounce == NULL) {
3806		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3807		sw_context->cmd_bounce_size = 0;
3808		return -ENOMEM;
3809	}
3810
3811	return 0;
3812}
3813
3814/*
3815 * vmw_execbuf_fence_commands - create and submit a command stream fence
3816 *
3817 * Creates a fence object and submits a command stream marker.
3818 * If this fails for some reason, We sync the fifo and return NULL.
3819 * It is then safe to fence buffers with a NULL pointer.
3820 *
3821 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3822 * userspace handle if @p_handle is not NULL, otherwise not.
3823 */
3824
3825int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3826			       struct vmw_private *dev_priv,
3827			       struct vmw_fence_obj **p_fence,
3828			       uint32_t *p_handle)
3829{
3830	uint32_t sequence;
3831	int ret;
3832	bool synced = false;
3833
3834	/* p_handle implies file_priv. */
3835	BUG_ON(p_handle != NULL && file_priv == NULL);
3836
3837	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3838	if (unlikely(ret != 0)) {
3839		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3840		synced = true;
3841	}
3842
3843	if (p_handle != NULL)
3844		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3845					    sequence, p_fence, p_handle);
3846	else
3847		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3848
3849	if (unlikely(ret != 0 && !synced)) {
3850		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3851					 false, VMW_FENCE_WAIT_TIMEOUT);
3852		*p_fence = NULL;
3853	}
3854
3855	return ret;
3856}
3857
3858/**
3859 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3860 *
3861 * @dev_priv: Pointer to a vmw_private struct.
3862 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3863 * @ret: Return value from fence object creation.
3864 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3865 * the information should be copied.
3866 * @fence: Pointer to the fenc object.
3867 * @fence_handle: User-space fence handle.
3868 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3869 *
3870 * This function copies fence information to user-space. If copying fails, the
3871 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3872 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3873 * will hopefully be detected.
3874 *
3875 * Also if copying fails, user-space will be unable to signal the fence object
3876 * so we wait for it immediately, and then unreference the user-space reference.
3877 */
3878int
3879vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3880			    struct vmw_fpriv *vmw_fp, int ret,
3881			    struct drm_vmw_fence_rep __user *user_fence_rep,
3882			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3883			    int32_t out_fence_fd)
3884{
3885	struct drm_vmw_fence_rep fence_rep;
3886
3887	if (user_fence_rep == NULL)
3888		return 0;
3889
3890	memset(&fence_rep, 0, sizeof(fence_rep));
3891
3892	fence_rep.error = ret;
3893	fence_rep.fd = out_fence_fd;
3894	if (ret == 0) {
3895		BUG_ON(fence == NULL);
3896
3897		fence_rep.handle = fence_handle;
3898		fence_rep.seqno = fence->base.seqno;
3899		vmw_update_seqno(dev_priv);
3900		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3901	}
3902
3903	/*
3904	 * copy_to_user errors will be detected by user space not seeing
3905	 * fence_rep::error filled in. Typically user-space would have pre-set
3906	 * that member to -EFAULT.
3907	 */
3908	ret = copy_to_user(user_fence_rep, &fence_rep,
3909			   sizeof(fence_rep));
3910
3911	/*
3912	 * User-space lost the fence object. We need to sync and unreference the
3913	 * handle.
3914	 */
3915	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3916		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3917		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3918		(void) vmw_fence_obj_wait(fence, false, false,
3919					  VMW_FENCE_WAIT_TIMEOUT);
3920	}
3921
3922	return ret ? -EFAULT : 0;
3923}
3924
3925/**
3926 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3927 *
3928 * @dev_priv: Pointer to a device private structure.
3929 * @kernel_commands: Pointer to the unpatched command batch.
3930 * @command_size: Size of the unpatched command batch.
3931 * @sw_context: Structure holding the relocation lists.
3932 *
3933 * Side effects: If this function returns 0, then the command batch pointed to
3934 * by @kernel_commands will have been modified.
3935 */
3936static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3937				   void *kernel_commands, u32 command_size,
3938				   struct vmw_sw_context *sw_context)
3939{
3940	void *cmd;
3941
3942	if (sw_context->dx_ctx_node)
3943		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3944					  sw_context->dx_ctx_node->ctx->id);
3945	else
3946		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3947
3948	if (!cmd)
3949		return -ENOMEM;
3950
3951	vmw_apply_relocations(sw_context);
3952	memcpy(cmd, kernel_commands, command_size);
3953	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3954	vmw_resource_relocations_free(&sw_context->res_relocations);
3955	vmw_cmd_commit(dev_priv, command_size);
3956
3957	return 0;
3958}
3959
3960/**
3961 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3962 * command buffer manager.
3963 *
3964 * @dev_priv: Pointer to a device private structure.
3965 * @header: Opaque handle to the command buffer allocation.
3966 * @command_size: Size of the unpatched command batch.
3967 * @sw_context: Structure holding the relocation lists.
3968 *
3969 * Side effects: If this function returns 0, then the command buffer represented
3970 * by @header will have been modified.
3971 */
3972static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3973				     struct vmw_cmdbuf_header *header,
3974				     u32 command_size,
3975				     struct vmw_sw_context *sw_context)
3976{
3977	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3978		  SVGA3D_INVALID_ID);
3979	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3980				       header);
3981
3982	vmw_apply_relocations(sw_context);
3983	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3984	vmw_resource_relocations_free(&sw_context->res_relocations);
3985	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3986
3987	return 0;
3988}
3989
3990/**
3991 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3992 * submission using a command buffer.
3993 *
3994 * @dev_priv: Pointer to a device private structure.
3995 * @user_commands: User-space pointer to the commands to be submitted.
3996 * @command_size: Size of the unpatched command batch.
3997 * @header: Out parameter returning the opaque pointer to the command buffer.
3998 *
3999 * This function checks whether we can use the command buffer manager for
4000 * submission and if so, creates a command buffer of suitable size and copies
4001 * the user data into that buffer.
4002 *
4003 * On successful return, the function returns a pointer to the data in the
4004 * command buffer and *@header is set to non-NULL.
4005 *
4006 * @kernel_commands: If command buffers could not be used, the function will
4007 * return the value of @kernel_commands on function call. That value may be
4008 * NULL. In that case, the value of *@header will be set to NULL.
4009 *
4010 * If an error is encountered, the function will return a pointer error value.
4011 * If the function is interrupted by a signal while sleeping, it will return
4012 * -ERESTARTSYS casted to a pointer error value.
4013 */
4014static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4015				void __user *user_commands,
4016				void *kernel_commands, u32 command_size,
4017				struct vmw_cmdbuf_header **header)
4018{
4019	size_t cmdbuf_size;
4020	int ret;
4021
4022	*header = NULL;
4023	if (command_size > SVGA_CB_MAX_SIZE) {
4024		VMW_DEBUG_USER("Command buffer is too large.\n");
4025		return ERR_PTR(-EINVAL);
4026	}
4027
4028	if (!dev_priv->cman || kernel_commands)
4029		return kernel_commands;
4030
4031	/* If possible, add a little space for fencing. */
4032	cmdbuf_size = command_size + 512;
4033	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4034	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4035					   header);
4036	if (IS_ERR(kernel_commands))
4037		return kernel_commands;
4038
4039	ret = copy_from_user(kernel_commands, user_commands, command_size);
4040	if (ret) {
4041		VMW_DEBUG_USER("Failed copying commands.\n");
4042		vmw_cmdbuf_header_free(*header);
4043		*header = NULL;
4044		return ERR_PTR(-EFAULT);
4045	}
4046
4047	return kernel_commands;
4048}
4049
4050static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4051				   struct vmw_sw_context *sw_context,
4052				   uint32_t handle)
4053{
4054	struct vmw_resource *res;
4055	int ret;
4056	unsigned int size;
4057
4058	if (handle == SVGA3D_INVALID_ID)
4059		return 0;
4060
4061	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4062	ret = vmw_validation_preload_res(sw_context->ctx, size);
4063	if (ret)
4064		return ret;
4065
4066	ret = vmw_user_resource_lookup_handle
4067		(dev_priv, sw_context->fp->tfile, handle,
4068		 user_context_converter, &res);
4069	if (ret != 0) {
4070		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4071			       (unsigned int) handle);
4072		return ret;
4073	}
4074
4075	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4076				      vmw_val_add_flag_none);
4077	if (unlikely(ret != 0)) {
4078		vmw_resource_unreference(&res);
4079		return ret;
4080	}
4081
4082	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4083	sw_context->man = vmw_context_res_man(res);
4084
4085	vmw_resource_unreference(&res);
4086	return 0;
4087}
4088
4089int vmw_execbuf_process(struct drm_file *file_priv,
4090			struct vmw_private *dev_priv,
4091			void __user *user_commands, void *kernel_commands,
4092			uint32_t command_size, uint64_t throttle_us,
4093			uint32_t dx_context_handle,
4094			struct drm_vmw_fence_rep __user *user_fence_rep,
4095			struct vmw_fence_obj **out_fence, uint32_t flags)
4096{
4097	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4098	struct vmw_fence_obj *fence = NULL;
4099	struct vmw_cmdbuf_header *header;
4100	uint32_t handle = 0;
4101	int ret;
4102	int32_t out_fence_fd = -1;
4103	struct sync_file *sync_file = NULL;
4104	DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4105
4106	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4107		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4108		if (out_fence_fd < 0) {
4109			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4110			return out_fence_fd;
4111		}
4112	}
4113
4114	if (throttle_us) {
4115		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4116	}
4117
4118	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4119					     kernel_commands, command_size,
4120					     &header);
4121	if (IS_ERR(kernel_commands)) {
4122		ret = PTR_ERR(kernel_commands);
4123		goto out_free_fence_fd;
4124	}
4125
4126	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4127	if (ret) {
4128		ret = -ERESTARTSYS;
4129		goto out_free_header;
4130	}
4131
4132	sw_context->kernel = false;
4133	if (kernel_commands == NULL) {
4134		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4135		if (unlikely(ret != 0))
4136			goto out_unlock;
4137
4138		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4139				     command_size);
4140		if (unlikely(ret != 0)) {
4141			ret = -EFAULT;
4142			VMW_DEBUG_USER("Failed copying commands.\n");
4143			goto out_unlock;
4144		}
4145
4146		kernel_commands = sw_context->cmd_bounce;
4147	} else if (!header) {
4148		sw_context->kernel = true;
4149	}
4150
4151	sw_context->filp = file_priv;
4152	sw_context->fp = vmw_fpriv(file_priv);
4153	INIT_LIST_HEAD(&sw_context->ctx_list);
4154	sw_context->cur_query_bo = dev_priv->pinned_bo;
4155	sw_context->last_query_ctx = NULL;
4156	sw_context->needs_post_query_barrier = false;
4157	sw_context->dx_ctx_node = NULL;
4158	sw_context->dx_query_mob = NULL;
4159	sw_context->dx_query_ctx = NULL;
4160	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4161	INIT_LIST_HEAD(&sw_context->res_relocations);
4162	INIT_LIST_HEAD(&sw_context->bo_relocations);
4163
4164	if (sw_context->staged_bindings)
4165		vmw_binding_state_reset(sw_context->staged_bindings);
4166
4167	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4168	sw_context->ctx = &val_ctx;
4169	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4170	if (unlikely(ret != 0))
4171		goto out_err_nores;
4172
4173	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4174				command_size);
4175	if (unlikely(ret != 0))
4176		goto out_err_nores;
4177
4178	ret = vmw_resources_reserve(sw_context);
4179	if (unlikely(ret != 0))
4180		goto out_err_nores;
4181
4182	ret = vmw_validation_bo_reserve(&val_ctx, true);
4183	if (unlikely(ret != 0))
4184		goto out_err_nores;
4185
4186	ret = vmw_validation_bo_validate(&val_ctx, true);
4187	if (unlikely(ret != 0))
4188		goto out_err;
4189
4190	ret = vmw_validation_res_validate(&val_ctx, true);
4191	if (unlikely(ret != 0))
4192		goto out_err;
4193
4194	vmw_validation_drop_ht(&val_ctx);
4195
4196	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4197	if (unlikely(ret != 0)) {
4198		ret = -ERESTARTSYS;
4199		goto out_err;
4200	}
4201
4202	if (dev_priv->has_mob) {
4203		ret = vmw_rebind_contexts(sw_context);
4204		if (unlikely(ret != 0))
4205			goto out_unlock_binding;
4206	}
4207
4208	if (!header) {
4209		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4210					      command_size, sw_context);
4211	} else {
4212		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4213						sw_context);
4214		header = NULL;
4215	}
4216	mutex_unlock(&dev_priv->binding_mutex);
4217	if (ret)
4218		goto out_err;
4219
4220	vmw_query_bo_switch_commit(dev_priv, sw_context);
4221	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4222					 (user_fence_rep) ? &handle : NULL);
4223	/*
4224	 * This error is harmless, because if fence submission fails,
4225	 * vmw_fifo_send_fence will sync. The error will be propagated to
4226	 * user-space in @fence_rep
4227	 */
4228	if (ret != 0)
4229		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4230
4231	vmw_execbuf_bindings_commit(sw_context, false);
4232	vmw_bind_dx_query_mob(sw_context);
4233	vmw_validation_res_unreserve(&val_ctx, false);
4234
4235	vmw_validation_bo_fence(sw_context->ctx, fence);
4236
4237	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4238		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4239
4240	/*
4241	 * If anything fails here, give up trying to export the fence and do a
4242	 * sync since the user mode will not be able to sync the fence itself.
4243	 * This ensures we are still functionally correct.
4244	 */
4245	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4246
4247		sync_file = sync_file_create(&fence->base);
4248		if (!sync_file) {
4249			VMW_DEBUG_USER("Sync file create failed for fence\n");
4250			put_unused_fd(out_fence_fd);
4251			out_fence_fd = -1;
4252
4253			(void) vmw_fence_obj_wait(fence, false, false,
4254						  VMW_FENCE_WAIT_TIMEOUT);
4255		}
4256	}
4257
4258	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4259				    user_fence_rep, fence, handle, out_fence_fd);
4260
4261	if (sync_file) {
4262		if (ret) {
4263			/* usercopy of fence failed, put the file object */
4264			fput(sync_file->file);
4265			put_unused_fd(out_fence_fd);
4266		} else {
4267			/* Link the fence with the FD created earlier */
4268			fd_install(out_fence_fd, sync_file->file);
4269		}
4270	}
4271
4272	/* Don't unreference when handing fence out */
4273	if (unlikely(out_fence != NULL)) {
4274		*out_fence = fence;
4275		fence = NULL;
4276	} else if (likely(fence != NULL)) {
4277		vmw_fence_obj_unreference(&fence);
4278	}
4279
4280	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4281	mutex_unlock(&dev_priv->cmdbuf_mutex);
4282
4283	/*
4284	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4285	 * in resource destruction paths.
4286	 */
4287	vmw_validation_unref_lists(&val_ctx);
4288
4289	return ret;
4290
4291out_unlock_binding:
4292	mutex_unlock(&dev_priv->binding_mutex);
4293out_err:
4294	vmw_validation_bo_backoff(&val_ctx);
4295out_err_nores:
4296	vmw_execbuf_bindings_commit(sw_context, true);
4297	vmw_validation_res_unreserve(&val_ctx, true);
4298	vmw_resource_relocations_free(&sw_context->res_relocations);
4299	vmw_free_relocations(sw_context);
4300	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4301		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4302out_unlock:
4303	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4304	vmw_validation_drop_ht(&val_ctx);
4305	WARN_ON(!list_empty(&sw_context->ctx_list));
4306	mutex_unlock(&dev_priv->cmdbuf_mutex);
4307
4308	/*
4309	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4310	 * in resource destruction paths.
4311	 */
4312	vmw_validation_unref_lists(&val_ctx);
4313out_free_header:
4314	if (header)
4315		vmw_cmdbuf_header_free(header);
4316out_free_fence_fd:
4317	if (out_fence_fd >= 0)
4318		put_unused_fd(out_fence_fd);
4319
4320	return ret;
4321}
4322
4323/**
4324 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4325 *
4326 * @dev_priv: The device private structure.
4327 *
4328 * This function is called to idle the fifo and unpin the query buffer if the
4329 * normal way to do this hits an error, which should typically be extremely
4330 * rare.
4331 */
4332static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4333{
4334	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4335
4336	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4337	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4338	if (dev_priv->dummy_query_bo_pinned) {
4339		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4340		dev_priv->dummy_query_bo_pinned = false;
4341	}
4342}
4343
4344
4345/**
4346 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4347 * bo.
4348 *
4349 * @dev_priv: The device private structure.
4350 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4351 * query barrier that flushes all queries touching the current buffer pointed to
4352 * by @dev_priv->pinned_bo
4353 *
4354 * This function should be used to unpin the pinned query bo, or as a query
4355 * barrier when we need to make sure that all queries have finished before the
4356 * next fifo command. (For example on hardware context destructions where the
4357 * hardware may otherwise leak unfinished queries).
4358 *
4359 * This function does not return any failure codes, but make attempts to do safe
4360 * unpinning in case of errors.
4361 *
4362 * The function will synchronize on the previous query barrier, and will thus
4363 * not finish until that barrier has executed.
4364 *
4365 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4366 * calling this function.
4367 */
4368void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4369				     struct vmw_fence_obj *fence)
4370{
4371	int ret = 0;
4372	struct vmw_fence_obj *lfence = NULL;
4373	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4374
4375	if (dev_priv->pinned_bo == NULL)
4376		goto out_unlock;
4377
4378	vmw_bo_placement_set(dev_priv->pinned_bo,
4379			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4380			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4381	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4382	if (ret)
4383		goto out_no_reserve;
4384
4385	vmw_bo_placement_set(dev_priv->dummy_query_bo,
4386			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4387			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4388	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4389	if (ret)
4390		goto out_no_reserve;
4391
4392	ret = vmw_validation_bo_reserve(&val_ctx, false);
4393	if (ret)
4394		goto out_no_reserve;
4395
4396	if (dev_priv->query_cid_valid) {
4397		BUG_ON(fence != NULL);
4398		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4399		if (ret)
4400			goto out_no_emit;
4401		dev_priv->query_cid_valid = false;
4402	}
4403
4404	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4405	if (dev_priv->dummy_query_bo_pinned) {
4406		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4407		dev_priv->dummy_query_bo_pinned = false;
4408	}
4409	if (fence == NULL) {
4410		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4411						  NULL);
4412		fence = lfence;
4413	}
4414	vmw_validation_bo_fence(&val_ctx, fence);
4415	if (lfence != NULL)
4416		vmw_fence_obj_unreference(&lfence);
4417
4418	vmw_validation_unref_lists(&val_ctx);
4419	vmw_bo_unreference(&dev_priv->pinned_bo);
4420
4421out_unlock:
4422	return;
4423out_no_emit:
4424	vmw_validation_bo_backoff(&val_ctx);
4425out_no_reserve:
4426	vmw_validation_unref_lists(&val_ctx);
4427	vmw_execbuf_unpin_panic(dev_priv);
4428	vmw_bo_unreference(&dev_priv->pinned_bo);
4429}
4430
4431/**
4432 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4433 *
4434 * @dev_priv: The device private structure.
4435 *
4436 * This function should be used to unpin the pinned query bo, or as a query
4437 * barrier when we need to make sure that all queries have finished before the
4438 * next fifo command. (For example on hardware context destructions where the
4439 * hardware may otherwise leak unfinished queries).
4440 *
4441 * This function does not return any failure codes, but make attempts to do safe
4442 * unpinning in case of errors.
4443 *
4444 * The function will synchronize on the previous query barrier, and will thus
4445 * not finish until that barrier has executed.
4446 */
4447void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4448{
4449	mutex_lock(&dev_priv->cmdbuf_mutex);
4450	if (dev_priv->query_cid_valid)
4451		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4452	mutex_unlock(&dev_priv->cmdbuf_mutex);
4453}
4454
4455int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4456		      struct drm_file *file_priv)
4457{
4458	struct vmw_private *dev_priv = vmw_priv(dev);
4459	struct drm_vmw_execbuf_arg *arg = data;
4460	int ret;
4461	struct dma_fence *in_fence = NULL;
4462
4463	MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4464	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4465
4466	/*
4467	 * Extend the ioctl argument while maintaining backwards compatibility:
4468	 * We take different code paths depending on the value of arg->version.
4469	 *
4470	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4471	 */
4472	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4473		     arg->version == 0)) {
4474		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4475		ret = -EINVAL;
4476		goto mksstats_out;
4477	}
4478
4479	switch (arg->version) {
4480	case 1:
4481		/* For v1 core DRM have extended + zeropadded the data */
4482		arg->context_handle = (uint32_t) -1;
4483		break;
4484	case 2:
4485	default:
4486		/* For v2 and later core DRM would have correctly copied it */
4487		break;
4488	}
4489
4490	/* If imported a fence FD from elsewhere, then wait on it */
4491	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4492		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4493
4494		if (!in_fence) {
4495			VMW_DEBUG_USER("Cannot get imported fence\n");
4496			ret = -EINVAL;
4497			goto mksstats_out;
4498		}
4499
4500		ret = dma_fence_wait(in_fence, true);
4501		if (ret)
4502			goto out;
4503	}
4504
4505	ret = vmw_execbuf_process(file_priv, dev_priv,
4506				  (void __user *)(unsigned long)arg->commands,
4507				  NULL, arg->command_size, arg->throttle_us,
4508				  arg->context_handle,
4509				  (void __user *)(unsigned long)arg->fence_rep,
4510				  NULL, arg->flags);
4511
4512	if (unlikely(ret != 0))
4513		goto out;
4514
4515	vmw_kms_cursor_post_execbuf(dev_priv);
4516
4517out:
4518	if (in_fence)
4519		dma_fence_put(in_fence);
4520
4521mksstats_out:
4522	MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4523	return ret;
4524}
v6.8
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include "vmwgfx_binding.h"
  28#include "vmwgfx_bo.h"
  29#include "vmwgfx_drv.h"
  30#include "vmwgfx_mksstat.h"
  31#include "vmwgfx_so.h"
  32
  33#include <drm/ttm/ttm_bo.h>
  34#include <drm/ttm/ttm_placement.h>
  35
  36#include <linux/sync_file.h>
  37#include <linux/hashtable.h>
 
  38
  39/*
  40 * Helper macro to get dx_ctx_node if available otherwise print an error
  41 * message. This is for use in command verifier function where if dx_ctx_node
  42 * is not set then command is invalid.
  43 */
  44#define VMW_GET_CTX_NODE(__sw_context)                                        \
  45({                                                                            \
  46	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
  47		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
  48		__sw_context->dx_ctx_node;                                    \
  49	});                                                                   \
  50})
  51
  52#define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
  53	struct {                                                              \
  54		SVGA3dCmdHeader header;                                       \
  55		__type body;                                                  \
  56	} __var
  57
  58/**
  59 * struct vmw_relocation - Buffer object relocation
  60 *
  61 * @head: List head for the command submission context's relocation list
  62 * @vbo: Non ref-counted pointer to buffer object
  63 * @mob_loc: Pointer to location for mob id to be modified
  64 * @location: Pointer to location for guest pointer to be modified
  65 */
  66struct vmw_relocation {
  67	struct list_head head;
  68	struct vmw_bo *vbo;
  69	union {
  70		SVGAMobId *mob_loc;
  71		SVGAGuestPtr *location;
  72	};
  73};
  74
  75/**
  76 * enum vmw_resource_relocation_type - Relocation type for resources
  77 *
  78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  79 * command stream is replaced with the actual id after validation.
  80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  81 * with a NOP.
  82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
  83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
  84 * @vmw_res_rel_max: Last value in the enum - used for error checking
  85*/
  86enum vmw_resource_relocation_type {
  87	vmw_res_rel_normal,
  88	vmw_res_rel_nop,
  89	vmw_res_rel_cond_nop,
  90	vmw_res_rel_max
  91};
  92
  93/**
  94 * struct vmw_resource_relocation - Relocation info for resources
  95 *
  96 * @head: List head for the software context's relocation list.
  97 * @res: Non-ref-counted pointer to the resource.
  98 * @offset: Offset of single byte entries into the command buffer where the id
  99 * that needs fixup is located.
 100 * @rel_type: Type of relocation.
 101 */
 102struct vmw_resource_relocation {
 103	struct list_head head;
 104	const struct vmw_resource *res;
 105	u32 offset:29;
 106	enum vmw_resource_relocation_type rel_type:3;
 107};
 108
 109/**
 110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
 111 *
 112 * @head: List head of context list
 113 * @ctx: The context resource
 114 * @cur: The context's persistent binding state
 115 * @staged: The binding state changes of this command buffer
 116 */
 117struct vmw_ctx_validation_info {
 118	struct list_head head;
 119	struct vmw_resource *ctx;
 120	struct vmw_ctx_binding_state *cur;
 121	struct vmw_ctx_binding_state *staged;
 122};
 123
 124/**
 125 * struct vmw_cmd_entry - Describe a command for the verifier
 126 *
 127 * @func: Call-back to handle the command.
 128 * @user_allow: Whether allowed from the execbuf ioctl.
 129 * @gb_disable: Whether disabled if guest-backed objects are available.
 130 * @gb_enable: Whether enabled iff guest-backed objects are available.
 131 * @cmd_name: Name of the command.
 132 */
 133struct vmw_cmd_entry {
 134	int (*func) (struct vmw_private *, struct vmw_sw_context *,
 135		     SVGA3dCmdHeader *);
 136	bool user_allow;
 137	bool gb_disable;
 138	bool gb_enable;
 139	const char *cmd_name;
 140};
 141
 142#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 143	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 144				       (_gb_disable), (_gb_enable), #_cmd}
 145
 146static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 147					struct vmw_sw_context *sw_context,
 148					struct vmw_resource *ctx);
 149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 150				 struct vmw_sw_context *sw_context,
 151				 SVGAMobId *id,
 152				 struct vmw_bo **vmw_bo_p);
 153/**
 154 * vmw_ptr_diff - Compute the offset from a to b in bytes
 155 *
 156 * @a: A starting pointer.
 157 * @b: A pointer offset in the same address space.
 158 *
 159 * Returns: The offset in bytes between the two pointers.
 160 */
 161static size_t vmw_ptr_diff(void *a, void *b)
 162{
 163	return (unsigned long) b - (unsigned long) a;
 164}
 165
 166/**
 167 * vmw_execbuf_bindings_commit - Commit modified binding state
 168 *
 169 * @sw_context: The command submission context
 170 * @backoff: Whether this is part of the error path and binding state changes
 171 * should be ignored
 172 */
 173static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
 174					bool backoff)
 175{
 176	struct vmw_ctx_validation_info *entry;
 177
 178	list_for_each_entry(entry, &sw_context->ctx_list, head) {
 179		if (!backoff)
 180			vmw_binding_state_commit(entry->cur, entry->staged);
 181
 182		if (entry->staged != sw_context->staged_bindings)
 183			vmw_binding_state_free(entry->staged);
 184		else
 185			sw_context->staged_bindings_inuse = false;
 186	}
 187
 188	/* List entries are freed with the validation context */
 189	INIT_LIST_HEAD(&sw_context->ctx_list);
 190}
 191
 192/**
 193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
 194 *
 195 * @sw_context: The command submission context
 196 */
 197static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
 198{
 199	if (sw_context->dx_query_mob)
 200		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 201					  sw_context->dx_query_mob);
 202}
 203
 204/**
 205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
 206 * the validate list.
 207 *
 208 * @dev_priv: Pointer to the device private:
 209 * @sw_context: The command submission context
 210 * @res: Pointer to the resource
 211 * @node: The validation node holding the context resource metadata
 212 */
 213static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 214				   struct vmw_sw_context *sw_context,
 215				   struct vmw_resource *res,
 216				   struct vmw_ctx_validation_info *node)
 217{
 218	int ret;
 219
 220	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 221	if (unlikely(ret != 0))
 222		goto out_err;
 223
 224	if (!sw_context->staged_bindings) {
 225		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
 226		if (IS_ERR(sw_context->staged_bindings)) {
 227			ret = PTR_ERR(sw_context->staged_bindings);
 228			sw_context->staged_bindings = NULL;
 229			goto out_err;
 230		}
 231	}
 232
 233	if (sw_context->staged_bindings_inuse) {
 234		node->staged = vmw_binding_state_alloc(dev_priv);
 235		if (IS_ERR(node->staged)) {
 236			ret = PTR_ERR(node->staged);
 237			node->staged = NULL;
 238			goto out_err;
 239		}
 240	} else {
 241		node->staged = sw_context->staged_bindings;
 242		sw_context->staged_bindings_inuse = true;
 243	}
 244
 245	node->ctx = res;
 246	node->cur = vmw_context_binding_state(res);
 247	list_add_tail(&node->head, &sw_context->ctx_list);
 248
 249	return 0;
 250
 251out_err:
 252	return ret;
 253}
 254
 255/**
 256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
 257 *
 258 * @dev_priv: Pointer to the device private struct.
 259 * @res_type: The resource type.
 260 *
 261 * Guest-backed contexts and DX contexts require extra size to store execbuf
 262 * private information in the validation node. Typically the binding manager
 263 * associated data structures.
 264 *
 265 * Returns: The extra size requirement based on resource type.
 266 */
 267static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
 268					 enum vmw_res_type res_type)
 269{
 270	return (res_type == vmw_res_dx_context ||
 271		(res_type == vmw_res_context && dev_priv->has_mob)) ?
 272		sizeof(struct vmw_ctx_validation_info) : 0;
 273}
 274
 275/**
 276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
 277 *
 278 * @rcache: Pointer to the entry to update.
 279 * @res: Pointer to the resource.
 280 * @private: Pointer to the execbuf-private space in the resource validation
 281 * node.
 282 */
 283static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
 284				      struct vmw_resource *res,
 285				      void *private)
 286{
 287	rcache->res = res;
 288	rcache->private = private;
 289	rcache->valid = 1;
 290	rcache->valid_handle = 0;
 291}
 292
 293enum vmw_val_add_flags {
 294	vmw_val_add_flag_none  =      0,
 295	vmw_val_add_flag_noctx = 1 << 0,
 296};
 297
 298/**
 299 * vmw_execbuf_res_val_add - Add a resource to the validation list.
 300 *
 301 * @sw_context: Pointer to the software context.
 302 * @res: Unreferenced rcu-protected pointer to the resource.
 303 * @dirty: Whether to change dirty status.
 304 * @flags: specifies whether to use the context or not
 305 *
 306 * Returns: 0 on success. Negative error code on failure. Typical error codes
 307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
 308 */
 309static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
 310				   struct vmw_resource *res,
 311				   u32 dirty,
 312				   u32 flags)
 313{
 314	struct vmw_private *dev_priv = res->dev_priv;
 315	int ret;
 316	enum vmw_res_type res_type = vmw_res_type(res);
 317	struct vmw_res_cache_entry *rcache;
 318	struct vmw_ctx_validation_info *ctx_info;
 319	bool first_usage;
 320	unsigned int priv_size;
 321
 322	rcache = &sw_context->res_cache[res_type];
 323	if (likely(rcache->valid && rcache->res == res)) {
 324		if (dirty)
 325			vmw_validation_res_set_dirty(sw_context->ctx,
 326						     rcache->private, dirty);
 327		return 0;
 328	}
 329
 330	if ((flags & vmw_val_add_flag_noctx) != 0) {
 331		ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
 332						  (void **)&ctx_info, NULL);
 333		if (ret)
 334			return ret;
 335
 336	} else {
 337		priv_size = vmw_execbuf_res_size(dev_priv, res_type);
 338		ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
 339						  dirty, (void **)&ctx_info,
 340						  &first_usage);
 341		if (ret)
 342			return ret;
 343
 344		if (priv_size && first_usage) {
 345			ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
 346						      ctx_info);
 347			if (ret) {
 348				VMW_DEBUG_USER("Failed first usage context setup.\n");
 349				return ret;
 350			}
 351		}
 352	}
 353
 354	vmw_execbuf_rcache_update(rcache, res, ctx_info);
 355	return 0;
 356}
 357
 358/**
 359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
 360 * validation list
 361 *
 362 * @sw_context: The software context holding the validation list.
 363 * @view: Pointer to the view resource.
 364 *
 365 * Returns 0 if success, negative error code otherwise.
 366 */
 367static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 368				struct vmw_resource *view)
 369{
 370	int ret;
 371
 372	/*
 373	 * First add the resource the view is pointing to, otherwise it may be
 374	 * swapped out when the view is validated.
 375	 */
 376	ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
 377				      vmw_view_dirtying(view), vmw_val_add_flag_noctx);
 378	if (ret)
 379		return ret;
 380
 381	return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
 382				       vmw_val_add_flag_noctx);
 383}
 384
 385/**
 386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
 387 * to to the validation list.
 388 *
 389 * @sw_context: The software context holding the validation list.
 390 * @view_type: The view type to look up.
 391 * @id: view id of the view.
 392 *
 393 * The view is represented by a view id and the DX context it's created on, or
 394 * scheduled for creation on. If there is no DX context set, the function will
 395 * return an -EINVAL error pointer.
 396 *
 397 * Returns: Unreferenced pointer to the resource on success, negative error
 398 * pointer on failure.
 399 */
 400static struct vmw_resource *
 401vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 402		    enum vmw_view_type view_type, u32 id)
 403{
 404	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
 405	struct vmw_resource *view;
 406	int ret;
 407
 408	if (!ctx_node)
 409		return ERR_PTR(-EINVAL);
 410
 411	view = vmw_view_lookup(sw_context->man, view_type, id);
 412	if (IS_ERR(view))
 413		return view;
 414
 415	ret = vmw_view_res_val_add(sw_context, view);
 416	if (ret)
 417		return ERR_PTR(ret);
 418
 419	return view;
 420}
 421
 422/**
 423 * vmw_resource_context_res_add - Put resources previously bound to a context on
 424 * the validation list
 425 *
 426 * @dev_priv: Pointer to a device private structure
 427 * @sw_context: Pointer to a software context used for this command submission
 428 * @ctx: Pointer to the context resource
 429 *
 430 * This function puts all resources that were previously bound to @ctx on the
 431 * resource validation list. This is part of the context state reemission
 432 */
 433static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 434					struct vmw_sw_context *sw_context,
 435					struct vmw_resource *ctx)
 436{
 437	struct list_head *binding_list;
 438	struct vmw_ctx_bindinfo *entry;
 439	int ret = 0;
 440	struct vmw_resource *res;
 441	u32 i;
 442	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
 443		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 444
 445	/* Add all cotables to the validation list. */
 446	if (has_sm4_context(dev_priv) &&
 447	    vmw_res_type(ctx) == vmw_res_dx_context) {
 448		for (i = 0; i < cotable_max; ++i) {
 449			res = vmw_context_cotable(ctx, i);
 450			if (IS_ERR(res))
 451				continue;
 452
 453			ret = vmw_execbuf_res_val_add(sw_context, res,
 454						      VMW_RES_DIRTY_SET,
 455						      vmw_val_add_flag_noctx);
 456			if (unlikely(ret != 0))
 457				return ret;
 458		}
 459	}
 460
 461	/* Add all resources bound to the context to the validation list */
 462	mutex_lock(&dev_priv->binding_mutex);
 463	binding_list = vmw_context_binding_list(ctx);
 464
 465	list_for_each_entry(entry, binding_list, ctx_list) {
 466		if (vmw_res_type(entry->res) == vmw_res_view)
 467			ret = vmw_view_res_val_add(sw_context, entry->res);
 468		else
 469			ret = vmw_execbuf_res_val_add(sw_context, entry->res,
 470						      vmw_binding_dirtying(entry->bt),
 471						      vmw_val_add_flag_noctx);
 472		if (unlikely(ret != 0))
 473			break;
 474	}
 475
 476	if (has_sm4_context(dev_priv) &&
 477	    vmw_res_type(ctx) == vmw_res_dx_context) {
 478		struct vmw_bo *dx_query_mob;
 479
 480		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 481		if (dx_query_mob) {
 482			vmw_bo_placement_set(dx_query_mob,
 483					     VMW_BO_DOMAIN_MOB,
 484					     VMW_BO_DOMAIN_MOB);
 485			ret = vmw_validation_add_bo(sw_context->ctx,
 486						    dx_query_mob);
 487		}
 488	}
 489
 490	mutex_unlock(&dev_priv->binding_mutex);
 491	return ret;
 492}
 493
 494/**
 495 * vmw_resource_relocation_add - Add a relocation to the relocation list
 496 *
 497 * @sw_context: Pointer to the software context.
 498 * @res: The resource.
 499 * @offset: Offset into the command buffer currently being parsed where the id
 500 * that needs fixup is located. Granularity is one byte.
 501 * @rel_type: Relocation type.
 502 */
 503static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
 504				       const struct vmw_resource *res,
 505				       unsigned long offset,
 506				       enum vmw_resource_relocation_type
 507				       rel_type)
 508{
 509	struct vmw_resource_relocation *rel;
 510
 511	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
 512	if (unlikely(!rel)) {
 513		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
 514		return -ENOMEM;
 515	}
 516
 517	rel->res = res;
 518	rel->offset = offset;
 519	rel->rel_type = rel_type;
 520	list_add_tail(&rel->head, &sw_context->res_relocations);
 521
 522	return 0;
 523}
 524
 525/**
 526 * vmw_resource_relocations_free - Free all relocations on a list
 527 *
 528 * @list: Pointer to the head of the relocation list
 529 */
 530static void vmw_resource_relocations_free(struct list_head *list)
 531{
 532	/* Memory is validation context memory, so no need to free it */
 533	INIT_LIST_HEAD(list);
 534}
 535
 536/**
 537 * vmw_resource_relocations_apply - Apply all relocations on a list
 538 *
 539 * @cb: Pointer to the start of the command buffer bein patch. This need not be
 540 * the same buffer as the one being parsed when the relocation list was built,
 541 * but the contents must be the same modulo the resource ids.
 542 * @list: Pointer to the head of the relocation list.
 543 */
 544static void vmw_resource_relocations_apply(uint32_t *cb,
 545					   struct list_head *list)
 546{
 547	struct vmw_resource_relocation *rel;
 548
 549	/* Validate the struct vmw_resource_relocation member size */
 550	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
 551	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
 552
 553	list_for_each_entry(rel, list, head) {
 554		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
 555		switch (rel->rel_type) {
 556		case vmw_res_rel_normal:
 557			*addr = rel->res->id;
 558			break;
 559		case vmw_res_rel_nop:
 560			*addr = SVGA_3D_CMD_NOP;
 561			break;
 562		default:
 563			if (rel->res->id == -1)
 564				*addr = SVGA_3D_CMD_NOP;
 565			break;
 566		}
 567	}
 568}
 569
 570static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 571			   struct vmw_sw_context *sw_context,
 572			   SVGA3dCmdHeader *header)
 573{
 574	return -EINVAL;
 575}
 576
 577static int vmw_cmd_ok(struct vmw_private *dev_priv,
 578		      struct vmw_sw_context *sw_context,
 579		      SVGA3dCmdHeader *header)
 580{
 581	return 0;
 582}
 583
 584/**
 585 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
 586 * list.
 587 *
 588 * @sw_context: Pointer to the software context.
 589 *
 590 * Note that since vmware's command submission currently is protected by the
 591 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
 592 * only a single thread at once will attempt this.
 593 */
 594static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 595{
 596	int ret;
 597
 598	ret = vmw_validation_res_reserve(sw_context->ctx, true);
 599	if (ret)
 600		return ret;
 601
 602	if (sw_context->dx_query_mob) {
 603		struct vmw_bo *expected_dx_query_mob;
 604
 605		expected_dx_query_mob =
 606			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 607		if (expected_dx_query_mob &&
 608		    expected_dx_query_mob != sw_context->dx_query_mob) {
 609			ret = -EINVAL;
 610		}
 611	}
 612
 613	return ret;
 614}
 615
 616/**
 617 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
 618 * resource validate list unless it's already there.
 619 *
 620 * @dev_priv: Pointer to a device private structure.
 621 * @sw_context: Pointer to the software context.
 622 * @res_type: Resource type.
 623 * @dirty: Whether to change dirty status.
 624 * @converter: User-space visisble type specific information.
 625 * @id_loc: Pointer to the location in the command buffer currently being parsed
 626 * from where the user-space resource id handle is located.
 627 * @p_res: Pointer to pointer to resource validalidation node. Populated on
 628 * exit.
 629 */
 630static int
 631vmw_cmd_res_check(struct vmw_private *dev_priv,
 632		  struct vmw_sw_context *sw_context,
 633		  enum vmw_res_type res_type,
 634		  u32 dirty,
 635		  const struct vmw_user_resource_conv *converter,
 636		  uint32_t *id_loc,
 637		  struct vmw_resource **p_res)
 638{
 639	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
 640	struct vmw_resource *res;
 641	int ret = 0;
 642	bool needs_unref = false;
 643
 644	if (p_res)
 645		*p_res = NULL;
 646
 647	if (*id_loc == SVGA3D_INVALID_ID) {
 648		if (res_type == vmw_res_context) {
 649			VMW_DEBUG_USER("Illegal context invalid id.\n");
 650			return -EINVAL;
 651		}
 652		return 0;
 653	}
 654
 655	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
 656		res = rcache->res;
 657		if (dirty)
 658			vmw_validation_res_set_dirty(sw_context->ctx,
 659						     rcache->private, dirty);
 660	} else {
 661		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
 662
 663		ret = vmw_validation_preload_res(sw_context->ctx, size);
 664		if (ret)
 665			return ret;
 666
 667		ret = vmw_user_resource_lookup_handle
 668			(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
 669		if (ret != 0) {
 670			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
 671				       (unsigned int) *id_loc);
 672			return ret;
 673		}
 674		needs_unref = true;
 675
 676		ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
 677		if (unlikely(ret != 0))
 678			goto res_check_done;
 679
 680		if (rcache->valid && rcache->res == res) {
 681			rcache->valid_handle = true;
 682			rcache->handle = *id_loc;
 683		}
 684	}
 685
 686	ret = vmw_resource_relocation_add(sw_context, res,
 687					  vmw_ptr_diff(sw_context->buf_start,
 688						       id_loc),
 689					  vmw_res_rel_normal);
 690	if (p_res)
 691		*p_res = res;
 692
 693res_check_done:
 694	if (needs_unref)
 695		vmw_resource_unreference(&res);
 696
 697	return ret;
 698}
 699
 700/**
 701 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
 702 *
 703 * @ctx_res: context the query belongs to
 704 *
 705 * This function assumes binding_mutex is held.
 706 */
 707static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 708{
 709	struct vmw_private *dev_priv = ctx_res->dev_priv;
 710	struct vmw_bo *dx_query_mob;
 711	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
 712
 713	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 714
 715	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 716		return 0;
 717
 718	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
 719	if (cmd == NULL)
 720		return -ENOMEM;
 721
 722	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 723	cmd->header.size = sizeof(cmd->body);
 724	cmd->body.cid = ctx_res->id;
 725	cmd->body.mobid = dx_query_mob->tbo.resource->start;
 726	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 727
 728	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 729
 730	return 0;
 731}
 732
 733/**
 734 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
 735 * contexts.
 736 *
 737 * @sw_context: Pointer to the software context.
 738 *
 739 * Rebind context binding points that have been scrubbed because of eviction.
 740 */
 741static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 742{
 743	struct vmw_ctx_validation_info *val;
 744	int ret;
 745
 746	list_for_each_entry(val, &sw_context->ctx_list, head) {
 747		ret = vmw_binding_rebind_all(val->cur);
 748		if (unlikely(ret != 0)) {
 749			if (ret != -ERESTARTSYS)
 750				VMW_DEBUG_USER("Failed to rebind context.\n");
 751			return ret;
 752		}
 753
 754		ret = vmw_rebind_all_dx_query(val->ctx);
 755		if (ret != 0) {
 756			VMW_DEBUG_USER("Failed to rebind queries.\n");
 757			return ret;
 758		}
 759	}
 760
 761	return 0;
 762}
 763
 764/**
 765 * vmw_view_bindings_add - Add an array of view bindings to a context binding
 766 * state tracker.
 767 *
 768 * @sw_context: The execbuf state used for this command.
 769 * @view_type: View type for the bindings.
 770 * @binding_type: Binding type for the bindings.
 771 * @shader_slot: The shader slot to user for the bindings.
 772 * @view_ids: Array of view ids to be bound.
 773 * @num_views: Number of view ids in @view_ids.
 774 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 775 */
 776static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 777				 enum vmw_view_type view_type,
 778				 enum vmw_ctx_binding_type binding_type,
 779				 uint32 shader_slot,
 780				 uint32 view_ids[], u32 num_views,
 781				 u32 first_slot)
 782{
 783	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
 784	u32 i;
 785
 786	if (!ctx_node)
 787		return -EINVAL;
 788
 789	for (i = 0; i < num_views; ++i) {
 790		struct vmw_ctx_bindinfo_view binding;
 791		struct vmw_resource *view = NULL;
 792
 793		if (view_ids[i] != SVGA3D_INVALID_ID) {
 794			view = vmw_view_id_val_add(sw_context, view_type,
 795						   view_ids[i]);
 796			if (IS_ERR(view)) {
 797				VMW_DEBUG_USER("View not found.\n");
 798				return PTR_ERR(view);
 799			}
 800		}
 801		binding.bi.ctx = ctx_node->ctx;
 802		binding.bi.res = view;
 803		binding.bi.bt = binding_type;
 804		binding.shader_slot = shader_slot;
 805		binding.slot = first_slot + i;
 806		vmw_binding_add(ctx_node->staged, &binding.bi,
 807				shader_slot, binding.slot);
 808	}
 809
 810	return 0;
 811}
 812
 813/**
 814 * vmw_cmd_cid_check - Check a command header for valid context information.
 815 *
 816 * @dev_priv: Pointer to a device private structure.
 817 * @sw_context: Pointer to the software context.
 818 * @header: A command header with an embedded user-space context handle.
 819 *
 820 * Convenience function: Call vmw_cmd_res_check with the user-space context
 821 * handle embedded in @header.
 822 */
 823static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 824			     struct vmw_sw_context *sw_context,
 825			     SVGA3dCmdHeader *header)
 826{
 827	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
 828		container_of(header, typeof(*cmd), header);
 829
 830	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 831				 VMW_RES_DIRTY_SET, user_context_converter,
 832				 &cmd->body, NULL);
 833}
 834
 835/**
 836 * vmw_execbuf_info_from_res - Get the private validation metadata for a
 837 * recently validated resource
 838 *
 839 * @sw_context: Pointer to the command submission context
 840 * @res: The resource
 841 *
 842 * The resource pointed to by @res needs to be present in the command submission
 843 * context's resource cache and hence the last resource of that type to be
 844 * processed by the validation code.
 845 *
 846 * Return: a pointer to the private metadata of the resource, or NULL if it
 847 * wasn't found
 848 */
 849static struct vmw_ctx_validation_info *
 850vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
 851			  struct vmw_resource *res)
 852{
 853	struct vmw_res_cache_entry *rcache =
 854		&sw_context->res_cache[vmw_res_type(res)];
 855
 856	if (rcache->valid && rcache->res == res)
 857		return rcache->private;
 858
 859	WARN_ON_ONCE(true);
 860	return NULL;
 861}
 862
 863static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 864					   struct vmw_sw_context *sw_context,
 865					   SVGA3dCmdHeader *header)
 866{
 867	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
 868	struct vmw_resource *ctx;
 869	struct vmw_resource *res;
 870	int ret;
 871
 872	cmd = container_of(header, typeof(*cmd), header);
 873
 874	if (cmd->body.type >= SVGA3D_RT_MAX) {
 875		VMW_DEBUG_USER("Illegal render target type %u.\n",
 876			       (unsigned int) cmd->body.type);
 877		return -EINVAL;
 878	}
 879
 880	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 881				VMW_RES_DIRTY_SET, user_context_converter,
 882				&cmd->body.cid, &ctx);
 883	if (unlikely(ret != 0))
 884		return ret;
 885
 886	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 887				VMW_RES_DIRTY_SET, user_surface_converter,
 888				&cmd->body.target.sid, &res);
 889	if (unlikely(ret))
 890		return ret;
 891
 892	if (dev_priv->has_mob) {
 893		struct vmw_ctx_bindinfo_view binding;
 894		struct vmw_ctx_validation_info *node;
 895
 896		node = vmw_execbuf_info_from_res(sw_context, ctx);
 897		if (!node)
 898			return -EINVAL;
 899
 900		binding.bi.ctx = ctx;
 901		binding.bi.res = res;
 902		binding.bi.bt = vmw_ctx_binding_rt;
 903		binding.slot = cmd->body.type;
 904		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
 905	}
 906
 907	return 0;
 908}
 909
 910static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 911				      struct vmw_sw_context *sw_context,
 912				      SVGA3dCmdHeader *header)
 913{
 914	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
 915	int ret;
 916
 917	cmd = container_of(header, typeof(*cmd), header);
 918
 919	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 920				VMW_RES_DIRTY_NONE, user_surface_converter,
 921				&cmd->body.src.sid, NULL);
 922	if (ret)
 923		return ret;
 924
 925	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 926				 VMW_RES_DIRTY_SET, user_surface_converter,
 927				 &cmd->body.dest.sid, NULL);
 928}
 929
 930static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
 931				     struct vmw_sw_context *sw_context,
 932				     SVGA3dCmdHeader *header)
 933{
 934	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
 935	int ret;
 936
 937	cmd = container_of(header, typeof(*cmd), header);
 938	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 939				VMW_RES_DIRTY_NONE, user_surface_converter,
 940				&cmd->body.src, NULL);
 941	if (ret != 0)
 942		return ret;
 943
 944	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 945				 VMW_RES_DIRTY_SET, user_surface_converter,
 946				 &cmd->body.dest, NULL);
 947}
 948
 949static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
 950				   struct vmw_sw_context *sw_context,
 951				   SVGA3dCmdHeader *header)
 952{
 953	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
 954	int ret;
 955
 956	cmd = container_of(header, typeof(*cmd), header);
 957	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 958				VMW_RES_DIRTY_NONE, user_surface_converter,
 959				&cmd->body.srcSid, NULL);
 960	if (ret != 0)
 961		return ret;
 962
 963	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 964				 VMW_RES_DIRTY_SET, user_surface_converter,
 965				 &cmd->body.dstSid, NULL);
 966}
 967
 968static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 969				     struct vmw_sw_context *sw_context,
 970				     SVGA3dCmdHeader *header)
 971{
 972	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
 973	int ret;
 974
 975	cmd = container_of(header, typeof(*cmd), header);
 976	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 977				VMW_RES_DIRTY_NONE, user_surface_converter,
 978				&cmd->body.src.sid, NULL);
 979	if (unlikely(ret != 0))
 980		return ret;
 981
 982	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 983				 VMW_RES_DIRTY_SET, user_surface_converter,
 984				 &cmd->body.dest.sid, NULL);
 985}
 986
 987static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 988					 struct vmw_sw_context *sw_context,
 989					 SVGA3dCmdHeader *header)
 990{
 991	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
 992		container_of(header, typeof(*cmd), header);
 993
 994	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 995				 VMW_RES_DIRTY_NONE, user_surface_converter,
 996				 &cmd->body.srcImage.sid, NULL);
 997}
 998
 999static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1000				 struct vmw_sw_context *sw_context,
1001				 SVGA3dCmdHeader *header)
1002{
1003	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1004		container_of(header, typeof(*cmd), header);
1005
1006	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1007				 VMW_RES_DIRTY_NONE, user_surface_converter,
1008				 &cmd->body.sid, NULL);
1009}
1010
1011/**
1012 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1013 *
1014 * @dev_priv: The device private structure.
1015 * @new_query_bo: The new buffer holding query results.
1016 * @sw_context: The software context used for this command submission.
1017 *
1018 * This function checks whether @new_query_bo is suitable for holding query
1019 * results, and if another buffer currently is pinned for query results. If so,
1020 * the function prepares the state of @sw_context for switching pinned buffers
1021 * after successful submission of the current command batch.
1022 */
1023static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024				       struct vmw_bo *new_query_bo,
1025				       struct vmw_sw_context *sw_context)
1026{
1027	struct vmw_res_cache_entry *ctx_entry =
1028		&sw_context->res_cache[vmw_res_context];
1029	int ret;
1030
1031	BUG_ON(!ctx_entry->valid);
1032	sw_context->last_query_ctx = ctx_entry->res;
1033
1034	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035
1036		if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1037			VMW_DEBUG_USER("Query buffer too large.\n");
1038			return -EINVAL;
1039		}
1040
1041		if (unlikely(sw_context->cur_query_bo != NULL)) {
1042			sw_context->needs_post_query_barrier = true;
1043			vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1044			ret = vmw_validation_add_bo(sw_context->ctx,
1045						    sw_context->cur_query_bo);
1046			if (unlikely(ret != 0))
1047				return ret;
1048		}
1049		sw_context->cur_query_bo = new_query_bo;
1050
1051		vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1052		ret = vmw_validation_add_bo(sw_context->ctx,
1053					    dev_priv->dummy_query_bo);
1054		if (unlikely(ret != 0))
1055			return ret;
1056	}
1057
1058	return 0;
1059}
1060
1061/**
1062 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1063 *
1064 * @dev_priv: The device private structure.
1065 * @sw_context: The software context used for this command submission batch.
1066 *
1067 * This function will check if we're switching query buffers, and will then,
1068 * issue a dummy occlusion query wait used as a query barrier. When the fence
1069 * object following that query wait has signaled, we are sure that all preceding
1070 * queries have finished, and the old query buffer can be unpinned. However,
1071 * since both the new query buffer and the old one are fenced with that fence,
1072 * we can do an asynchronus unpin now, and be sure that the old query buffer
1073 * won't be moved until the fence has signaled.
1074 *
1075 * As mentioned above, both the new - and old query buffers need to be fenced
1076 * using a sequence emitted *after* calling this function.
1077 */
1078static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1079				     struct vmw_sw_context *sw_context)
1080{
1081	/*
1082	 * The validate list should still hold references to all
1083	 * contexts here.
1084	 */
1085	if (sw_context->needs_post_query_barrier) {
1086		struct vmw_res_cache_entry *ctx_entry =
1087			&sw_context->res_cache[vmw_res_context];
1088		struct vmw_resource *ctx;
1089		int ret;
1090
1091		BUG_ON(!ctx_entry->valid);
1092		ctx = ctx_entry->res;
1093
1094		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1095
1096		if (unlikely(ret != 0))
1097			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1098	}
1099
1100	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1101		if (dev_priv->pinned_bo) {
1102			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1103			vmw_bo_unreference(&dev_priv->pinned_bo);
1104		}
1105
1106		if (!sw_context->needs_post_query_barrier) {
1107			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1108
1109			/*
1110			 * We pin also the dummy_query_bo buffer so that we
1111			 * don't need to validate it when emitting dummy queries
1112			 * in context destroy paths.
1113			 */
1114			if (!dev_priv->dummy_query_bo_pinned) {
1115				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1116						    true);
1117				dev_priv->dummy_query_bo_pinned = true;
1118			}
1119
1120			BUG_ON(sw_context->last_query_ctx == NULL);
1121			dev_priv->query_cid = sw_context->last_query_ctx->id;
1122			dev_priv->query_cid_valid = true;
1123			dev_priv->pinned_bo =
1124				vmw_bo_reference(sw_context->cur_query_bo);
1125		}
1126	}
1127}
1128
1129/**
1130 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1131 * to a MOB id.
1132 *
1133 * @dev_priv: Pointer to a device private structure.
1134 * @sw_context: The software context used for this command batch validation.
1135 * @id: Pointer to the user-space handle to be translated.
1136 * @vmw_bo_p: Points to a location that, on successful return will carry a
1137 * non-reference-counted pointer to the buffer object identified by the
1138 * user-space handle in @id.
1139 *
1140 * This function saves information needed to translate a user-space buffer
1141 * handle to a MOB id. The translation does not take place immediately, but
1142 * during a call to vmw_apply_relocations().
1143 *
1144 * This function builds a relocation list and a list of buffers to validate. The
1145 * former needs to be freed using either vmw_apply_relocations() or
1146 * vmw_free_relocations(). The latter needs to be freed using
1147 * vmw_clear_validations.
1148 */
1149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1150				 struct vmw_sw_context *sw_context,
1151				 SVGAMobId *id,
1152				 struct vmw_bo **vmw_bo_p)
1153{
1154	struct vmw_bo *vmw_bo, *tmp_bo;
1155	uint32_t handle = *id;
1156	struct vmw_relocation *reloc;
1157	int ret;
1158
1159	vmw_validation_preload_bo(sw_context->ctx);
1160	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1161	if (ret != 0) {
1162		drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1163		return PTR_ERR(vmw_bo);
1164	}
1165	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1166	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1167	tmp_bo = vmw_bo;
1168	vmw_user_bo_unref(&tmp_bo);
1169	if (unlikely(ret != 0))
1170		return ret;
1171
1172	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1173	if (!reloc)
1174		return -ENOMEM;
1175
1176	reloc->mob_loc = id;
1177	reloc->vbo = vmw_bo;
1178
1179	*vmw_bo_p = vmw_bo;
1180	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1181
1182	return 0;
1183}
1184
1185/**
1186 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1187 * to a valid SVGAGuestPtr
1188 *
1189 * @dev_priv: Pointer to a device private structure.
1190 * @sw_context: The software context used for this command batch validation.
1191 * @ptr: Pointer to the user-space handle to be translated.
1192 * @vmw_bo_p: Points to a location that, on successful return will carry a
1193 * non-reference-counted pointer to the DMA buffer identified by the user-space
1194 * handle in @id.
1195 *
1196 * This function saves information needed to translate a user-space buffer
1197 * handle to a valid SVGAGuestPtr. The translation does not take place
1198 * immediately, but during a call to vmw_apply_relocations().
1199 *
1200 * This function builds a relocation list and a list of buffers to validate.
1201 * The former needs to be freed using either vmw_apply_relocations() or
1202 * vmw_free_relocations(). The latter needs to be freed using
1203 * vmw_clear_validations.
1204 */
1205static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1206				   struct vmw_sw_context *sw_context,
1207				   SVGAGuestPtr *ptr,
1208				   struct vmw_bo **vmw_bo_p)
1209{
1210	struct vmw_bo *vmw_bo, *tmp_bo;
1211	uint32_t handle = ptr->gmrId;
1212	struct vmw_relocation *reloc;
1213	int ret;
1214
1215	vmw_validation_preload_bo(sw_context->ctx);
1216	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1217	if (ret != 0) {
1218		drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1219		return PTR_ERR(vmw_bo);
1220	}
1221	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1222			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1223	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1224	tmp_bo = vmw_bo;
1225	vmw_user_bo_unref(&tmp_bo);
1226	if (unlikely(ret != 0))
1227		return ret;
1228
1229	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1230	if (!reloc)
1231		return -ENOMEM;
1232
1233	reloc->location = ptr;
1234	reloc->vbo = vmw_bo;
1235	*vmw_bo_p = vmw_bo;
1236	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1237
1238	return 0;
1239}
1240
1241/**
1242 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1243 *
1244 * @dev_priv: Pointer to a device private struct.
1245 * @sw_context: The software context used for this command submission.
1246 * @header: Pointer to the command header in the command stream.
1247 *
1248 * This function adds the new query into the query COTABLE
1249 */
1250static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1251				   struct vmw_sw_context *sw_context,
1252				   SVGA3dCmdHeader *header)
1253{
1254	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1255	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1256	struct vmw_resource *cotable_res;
1257	int ret;
1258
1259	if (!ctx_node)
1260		return -EINVAL;
1261
1262	cmd = container_of(header, typeof(*cmd), header);
1263
1264	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1265	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1266		return -EINVAL;
1267
1268	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
 
 
1269	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1270
1271	return ret;
1272}
1273
1274/**
1275 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1276 *
1277 * @dev_priv: Pointer to a device private struct.
1278 * @sw_context: The software context used for this command submission.
1279 * @header: Pointer to the command header in the command stream.
1280 *
1281 * The query bind operation will eventually associate the query ID with its
1282 * backing MOB.  In this function, we take the user mode MOB ID and use
1283 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1284 */
1285static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1286				 struct vmw_sw_context *sw_context,
1287				 SVGA3dCmdHeader *header)
1288{
1289	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1290	struct vmw_bo *vmw_bo;
1291	int ret;
1292
1293	cmd = container_of(header, typeof(*cmd), header);
1294
1295	/*
1296	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1297	 * list so its kernel mode MOB ID can be filled in later
1298	 */
1299	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1300				    &vmw_bo);
1301
1302	if (ret != 0)
1303		return ret;
1304
1305	sw_context->dx_query_mob = vmw_bo;
1306	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1307	return 0;
1308}
1309
1310/**
1311 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1312 *
1313 * @dev_priv: Pointer to a device private struct.
1314 * @sw_context: The software context used for this command submission.
1315 * @header: Pointer to the command header in the command stream.
1316 */
1317static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1318				  struct vmw_sw_context *sw_context,
1319				  SVGA3dCmdHeader *header)
1320{
1321	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1322		container_of(header, typeof(*cmd), header);
1323
1324	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1325				 VMW_RES_DIRTY_SET, user_context_converter,
1326				 &cmd->body.cid, NULL);
1327}
1328
1329/**
1330 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1331 *
1332 * @dev_priv: Pointer to a device private struct.
1333 * @sw_context: The software context used for this command submission.
1334 * @header: Pointer to the command header in the command stream.
1335 */
1336static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1337			       struct vmw_sw_context *sw_context,
1338			       SVGA3dCmdHeader *header)
1339{
1340	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1341		container_of(header, typeof(*cmd), header);
1342
1343	if (unlikely(dev_priv->has_mob)) {
1344		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1345
1346		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1347
1348		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1349		gb_cmd.header.size = cmd->header.size;
1350		gb_cmd.body.cid = cmd->body.cid;
1351		gb_cmd.body.type = cmd->body.type;
1352
1353		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1354		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1355	}
1356
1357	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1358				 VMW_RES_DIRTY_SET, user_context_converter,
1359				 &cmd->body.cid, NULL);
1360}
1361
1362/**
1363 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1364 *
1365 * @dev_priv: Pointer to a device private struct.
1366 * @sw_context: The software context used for this command submission.
1367 * @header: Pointer to the command header in the command stream.
1368 */
1369static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1370				struct vmw_sw_context *sw_context,
1371				SVGA3dCmdHeader *header)
1372{
1373	struct vmw_bo *vmw_bo;
1374	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1375	int ret;
1376
1377	cmd = container_of(header, typeof(*cmd), header);
1378	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1379	if (unlikely(ret != 0))
1380		return ret;
1381
1382	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1383				    &vmw_bo);
1384	if (unlikely(ret != 0))
1385		return ret;
1386
1387	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1388
1389	return ret;
1390}
1391
1392/**
1393 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1394 *
1395 * @dev_priv: Pointer to a device private struct.
1396 * @sw_context: The software context used for this command submission.
1397 * @header: Pointer to the command header in the command stream.
1398 */
1399static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1400			     struct vmw_sw_context *sw_context,
1401			     SVGA3dCmdHeader *header)
1402{
1403	struct vmw_bo *vmw_bo;
1404	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1405	int ret;
1406
1407	cmd = container_of(header, typeof(*cmd), header);
1408	if (dev_priv->has_mob) {
1409		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1410
1411		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1412
1413		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1414		gb_cmd.header.size = cmd->header.size;
1415		gb_cmd.body.cid = cmd->body.cid;
1416		gb_cmd.body.type = cmd->body.type;
1417		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1418		gb_cmd.body.offset = cmd->body.guestResult.offset;
1419
1420		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1421		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1422	}
1423
1424	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1425	if (unlikely(ret != 0))
1426		return ret;
1427
1428	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1429				      &cmd->body.guestResult, &vmw_bo);
1430	if (unlikely(ret != 0))
1431		return ret;
1432
1433	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1434
1435	return ret;
1436}
1437
1438/**
1439 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1440 *
1441 * @dev_priv: Pointer to a device private struct.
1442 * @sw_context: The software context used for this command submission.
1443 * @header: Pointer to the command header in the command stream.
1444 */
1445static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1446				 struct vmw_sw_context *sw_context,
1447				 SVGA3dCmdHeader *header)
1448{
1449	struct vmw_bo *vmw_bo;
1450	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1451	int ret;
1452
1453	cmd = container_of(header, typeof(*cmd), header);
1454	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1455	if (unlikely(ret != 0))
1456		return ret;
1457
1458	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1459				    &vmw_bo);
1460	if (unlikely(ret != 0))
1461		return ret;
1462
1463	return 0;
1464}
1465
1466/**
1467 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1468 *
1469 * @dev_priv: Pointer to a device private struct.
1470 * @sw_context: The software context used for this command submission.
1471 * @header: Pointer to the command header in the command stream.
1472 */
1473static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1474			      struct vmw_sw_context *sw_context,
1475			      SVGA3dCmdHeader *header)
1476{
1477	struct vmw_bo *vmw_bo;
1478	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1479	int ret;
1480
1481	cmd = container_of(header, typeof(*cmd), header);
1482	if (dev_priv->has_mob) {
1483		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1484
1485		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1486
1487		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1488		gb_cmd.header.size = cmd->header.size;
1489		gb_cmd.body.cid = cmd->body.cid;
1490		gb_cmd.body.type = cmd->body.type;
1491		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1492		gb_cmd.body.offset = cmd->body.guestResult.offset;
1493
1494		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1495		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1496	}
1497
1498	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1499	if (unlikely(ret != 0))
1500		return ret;
1501
1502	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1503				      &cmd->body.guestResult, &vmw_bo);
1504	if (unlikely(ret != 0))
1505		return ret;
1506
1507	return 0;
1508}
1509
1510static int vmw_cmd_dma(struct vmw_private *dev_priv,
1511		       struct vmw_sw_context *sw_context,
1512		       SVGA3dCmdHeader *header)
1513{
1514	struct vmw_bo *vmw_bo = NULL;
1515	struct vmw_surface *srf = NULL;
1516	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1517	int ret;
1518	SVGA3dCmdSurfaceDMASuffix *suffix;
1519	uint32_t bo_size;
1520	bool dirty;
1521
1522	cmd = container_of(header, typeof(*cmd), header);
1523	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1524					       header->size - sizeof(*suffix));
1525
1526	/* Make sure device and verifier stays in sync. */
1527	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1528		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1529		return -EINVAL;
1530	}
1531
1532	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1533				      &cmd->body.guest.ptr, &vmw_bo);
1534	if (unlikely(ret != 0))
1535		return ret;
1536
1537	/* Make sure DMA doesn't cross BO boundaries. */
1538	bo_size = vmw_bo->tbo.base.size;
1539	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1540		VMW_DEBUG_USER("Invalid DMA offset.\n");
1541		return -EINVAL;
1542	}
1543
1544	bo_size -= cmd->body.guest.ptr.offset;
1545	if (unlikely(suffix->maximumOffset > bo_size))
1546		suffix->maximumOffset = bo_size;
1547
1548	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1549		VMW_RES_DIRTY_SET : 0;
1550	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1551				dirty, user_surface_converter,
1552				&cmd->body.host.sid, NULL);
1553	if (unlikely(ret != 0)) {
1554		if (unlikely(ret != -ERESTARTSYS))
1555			VMW_DEBUG_USER("could not find surface for DMA.\n");
1556		return ret;
1557	}
1558
1559	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1560
1561	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1562
1563	return 0;
1564}
1565
1566static int vmw_cmd_draw(struct vmw_private *dev_priv,
1567			struct vmw_sw_context *sw_context,
1568			SVGA3dCmdHeader *header)
1569{
1570	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1571	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1572		(unsigned long)header + sizeof(*cmd));
1573	SVGA3dPrimitiveRange *range;
1574	uint32_t i;
1575	uint32_t maxnum;
1576	int ret;
1577
1578	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1579	if (unlikely(ret != 0))
1580		return ret;
1581
1582	cmd = container_of(header, typeof(*cmd), header);
1583	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1584
1585	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1586		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1587		return -EINVAL;
1588	}
1589
1590	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1591		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1592					VMW_RES_DIRTY_NONE,
1593					user_surface_converter,
1594					&decl->array.surfaceId, NULL);
1595		if (unlikely(ret != 0))
1596			return ret;
1597	}
1598
1599	maxnum = (header->size - sizeof(cmd->body) -
1600		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1601	if (unlikely(cmd->body.numRanges > maxnum)) {
1602		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1603		return -EINVAL;
1604	}
1605
1606	range = (SVGA3dPrimitiveRange *) decl;
1607	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1608		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1609					VMW_RES_DIRTY_NONE,
1610					user_surface_converter,
1611					&range->indexArray.surfaceId, NULL);
1612		if (unlikely(ret != 0))
1613			return ret;
1614	}
1615	return 0;
1616}
1617
1618static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1619			     struct vmw_sw_context *sw_context,
1620			     SVGA3dCmdHeader *header)
1621{
1622	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1623	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1624	  ((unsigned long) header + header->size + sizeof(*header));
1625	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1626		((unsigned long) header + sizeof(*cmd));
1627	struct vmw_resource *ctx;
1628	struct vmw_resource *res;
1629	int ret;
1630
1631	cmd = container_of(header, typeof(*cmd), header);
1632
1633	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1634				VMW_RES_DIRTY_SET, user_context_converter,
1635				&cmd->body.cid, &ctx);
1636	if (unlikely(ret != 0))
1637		return ret;
1638
1639	for (; cur_state < last_state; ++cur_state) {
1640		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1641			continue;
1642
1643		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1644			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1645				       (unsigned int) cur_state->stage);
1646			return -EINVAL;
1647		}
1648
1649		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1650					VMW_RES_DIRTY_NONE,
1651					user_surface_converter,
1652					&cur_state->value, &res);
1653		if (unlikely(ret != 0))
1654			return ret;
1655
1656		if (dev_priv->has_mob) {
1657			struct vmw_ctx_bindinfo_tex binding;
1658			struct vmw_ctx_validation_info *node;
1659
1660			node = vmw_execbuf_info_from_res(sw_context, ctx);
1661			if (!node)
1662				return -EINVAL;
1663
1664			binding.bi.ctx = ctx;
1665			binding.bi.res = res;
1666			binding.bi.bt = vmw_ctx_binding_tex;
1667			binding.texture_stage = cur_state->stage;
1668			vmw_binding_add(node->staged, &binding.bi, 0,
1669					binding.texture_stage);
1670		}
1671	}
1672
1673	return 0;
1674}
1675
1676static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1677				      struct vmw_sw_context *sw_context,
1678				      void *buf)
1679{
1680	struct vmw_bo *vmw_bo;
1681
1682	struct {
1683		uint32_t header;
1684		SVGAFifoCmdDefineGMRFB body;
1685	} *cmd = buf;
1686
1687	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1688				       &vmw_bo);
1689}
1690
1691/**
1692 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1693 * switching
1694 *
1695 * @dev_priv: Pointer to a device private struct.
1696 * @sw_context: The software context being used for this batch.
1697 * @res: Pointer to the resource.
1698 * @buf_id: Pointer to the user-space backup buffer handle in the command
1699 * stream.
1700 * @backup_offset: Offset of backup into MOB.
1701 *
1702 * This function prepares for registering a switch of backup buffers in the
1703 * resource metadata just prior to unreserving. It's basically a wrapper around
1704 * vmw_cmd_res_switch_backup with a different interface.
1705 */
1706static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1707				     struct vmw_sw_context *sw_context,
1708				     struct vmw_resource *res, uint32_t *buf_id,
1709				     unsigned long backup_offset)
1710{
1711	struct vmw_bo *vbo;
1712	void *info;
1713	int ret;
1714
1715	info = vmw_execbuf_info_from_res(sw_context, res);
1716	if (!info)
1717		return -EINVAL;
1718
1719	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1720	if (ret)
1721		return ret;
1722
1723	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1724					 backup_offset);
1725	return 0;
1726}
1727
1728/**
1729 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1730 *
1731 * @dev_priv: Pointer to a device private struct.
1732 * @sw_context: The software context being used for this batch.
1733 * @res_type: The resource type.
1734 * @converter: Information about user-space binding for this resource type.
1735 * @res_id: Pointer to the user-space resource handle in the command stream.
1736 * @buf_id: Pointer to the user-space backup buffer handle in the command
1737 * stream.
1738 * @backup_offset: Offset of backup into MOB.
1739 *
1740 * This function prepares for registering a switch of backup buffers in the
1741 * resource metadata just prior to unreserving. It's basically a wrapper around
1742 * vmw_cmd_res_switch_backup with a different interface.
1743 */
1744static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1745				 struct vmw_sw_context *sw_context,
1746				 enum vmw_res_type res_type,
1747				 const struct vmw_user_resource_conv
1748				 *converter, uint32_t *res_id, uint32_t *buf_id,
1749				 unsigned long backup_offset)
1750{
1751	struct vmw_resource *res;
1752	int ret;
1753
1754	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1755				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1756	if (ret)
1757		return ret;
1758
1759	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1760					 backup_offset);
1761}
1762
1763/**
1764 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1765 *
1766 * @dev_priv: Pointer to a device private struct.
1767 * @sw_context: The software context being used for this batch.
1768 * @header: Pointer to the command header in the command stream.
1769 */
1770static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1771				   struct vmw_sw_context *sw_context,
1772				   SVGA3dCmdHeader *header)
1773{
1774	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1775		container_of(header, typeof(*cmd), header);
1776
1777	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1778				     user_surface_converter, &cmd->body.sid,
1779				     &cmd->body.mobid, 0);
1780}
1781
1782/**
1783 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1784 *
1785 * @dev_priv: Pointer to a device private struct.
1786 * @sw_context: The software context being used for this batch.
1787 * @header: Pointer to the command header in the command stream.
1788 */
1789static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1790				   struct vmw_sw_context *sw_context,
1791				   SVGA3dCmdHeader *header)
1792{
1793	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1794		container_of(header, typeof(*cmd), header);
1795
1796	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1797				 VMW_RES_DIRTY_NONE, user_surface_converter,
1798				 &cmd->body.image.sid, NULL);
1799}
1800
1801/**
1802 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1803 *
1804 * @dev_priv: Pointer to a device private struct.
1805 * @sw_context: The software context being used for this batch.
1806 * @header: Pointer to the command header in the command stream.
1807 */
1808static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1809				     struct vmw_sw_context *sw_context,
1810				     SVGA3dCmdHeader *header)
1811{
1812	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1813		container_of(header, typeof(*cmd), header);
1814
1815	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1816				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1817				 &cmd->body.sid, NULL);
1818}
1819
1820/**
1821 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1822 *
1823 * @dev_priv: Pointer to a device private struct.
1824 * @sw_context: The software context being used for this batch.
1825 * @header: Pointer to the command header in the command stream.
1826 */
1827static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1828				     struct vmw_sw_context *sw_context,
1829				     SVGA3dCmdHeader *header)
1830{
1831	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1832		container_of(header, typeof(*cmd), header);
1833
1834	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1835				 VMW_RES_DIRTY_NONE, user_surface_converter,
1836				 &cmd->body.image.sid, NULL);
1837}
1838
1839/**
1840 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1841 * command
1842 *
1843 * @dev_priv: Pointer to a device private struct.
1844 * @sw_context: The software context being used for this batch.
1845 * @header: Pointer to the command header in the command stream.
1846 */
1847static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1848				       struct vmw_sw_context *sw_context,
1849				       SVGA3dCmdHeader *header)
1850{
1851	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1852		container_of(header, typeof(*cmd), header);
1853
1854	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1855				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1856				 &cmd->body.sid, NULL);
1857}
1858
1859/**
1860 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1861 * command
1862 *
1863 * @dev_priv: Pointer to a device private struct.
1864 * @sw_context: The software context being used for this batch.
1865 * @header: Pointer to the command header in the command stream.
1866 */
1867static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1868				       struct vmw_sw_context *sw_context,
1869				       SVGA3dCmdHeader *header)
1870{
1871	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1872		container_of(header, typeof(*cmd), header);
1873
1874	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1875				 VMW_RES_DIRTY_NONE, user_surface_converter,
1876				 &cmd->body.image.sid, NULL);
1877}
1878
1879/**
1880 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1881 * command
1882 *
1883 * @dev_priv: Pointer to a device private struct.
1884 * @sw_context: The software context being used for this batch.
1885 * @header: Pointer to the command header in the command stream.
1886 */
1887static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1888					 struct vmw_sw_context *sw_context,
1889					 SVGA3dCmdHeader *header)
1890{
1891	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1892		container_of(header, typeof(*cmd), header);
1893
1894	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1895				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1896				 &cmd->body.sid, NULL);
1897}
1898
1899/**
1900 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1901 *
1902 * @dev_priv: Pointer to a device private struct.
1903 * @sw_context: The software context being used for this batch.
1904 * @header: Pointer to the command header in the command stream.
1905 */
1906static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1907				 struct vmw_sw_context *sw_context,
1908				 SVGA3dCmdHeader *header)
1909{
1910	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1911	int ret;
1912	size_t size;
1913	struct vmw_resource *ctx;
1914
1915	cmd = container_of(header, typeof(*cmd), header);
1916
1917	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1918				VMW_RES_DIRTY_SET, user_context_converter,
1919				&cmd->body.cid, &ctx);
1920	if (unlikely(ret != 0))
1921		return ret;
1922
1923	if (unlikely(!dev_priv->has_mob))
1924		return 0;
1925
1926	size = cmd->header.size - sizeof(cmd->body);
1927	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1928				    cmd->body.shid, cmd + 1, cmd->body.type,
1929				    size, &sw_context->staged_cmd_res);
1930	if (unlikely(ret != 0))
1931		return ret;
1932
1933	return vmw_resource_relocation_add(sw_context, NULL,
1934					   vmw_ptr_diff(sw_context->buf_start,
1935							&cmd->header.id),
1936					   vmw_res_rel_nop);
1937}
1938
1939/**
1940 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1941 *
1942 * @dev_priv: Pointer to a device private struct.
1943 * @sw_context: The software context being used for this batch.
1944 * @header: Pointer to the command header in the command stream.
1945 */
1946static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1947				  struct vmw_sw_context *sw_context,
1948				  SVGA3dCmdHeader *header)
1949{
1950	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1951	int ret;
1952	struct vmw_resource *ctx;
1953
1954	cmd = container_of(header, typeof(*cmd), header);
1955
1956	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1957				VMW_RES_DIRTY_SET, user_context_converter,
1958				&cmd->body.cid, &ctx);
1959	if (unlikely(ret != 0))
1960		return ret;
1961
1962	if (unlikely(!dev_priv->has_mob))
1963		return 0;
1964
1965	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1966				cmd->body.type, &sw_context->staged_cmd_res);
1967	if (unlikely(ret != 0))
1968		return ret;
1969
1970	return vmw_resource_relocation_add(sw_context, NULL,
1971					   vmw_ptr_diff(sw_context->buf_start,
1972							&cmd->header.id),
1973					   vmw_res_rel_nop);
1974}
1975
1976/**
1977 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1978 *
1979 * @dev_priv: Pointer to a device private struct.
1980 * @sw_context: The software context being used for this batch.
1981 * @header: Pointer to the command header in the command stream.
1982 */
1983static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1984			      struct vmw_sw_context *sw_context,
1985			      SVGA3dCmdHeader *header)
1986{
1987	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1988	struct vmw_ctx_bindinfo_shader binding;
1989	struct vmw_resource *ctx, *res = NULL;
1990	struct vmw_ctx_validation_info *ctx_info;
1991	int ret;
1992
1993	cmd = container_of(header, typeof(*cmd), header);
1994
1995	if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1996		VMW_DEBUG_USER("Illegal shader type %u.\n",
1997			       (unsigned int) cmd->body.type);
1998		return -EINVAL;
1999	}
2000
2001	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2002				VMW_RES_DIRTY_SET, user_context_converter,
2003				&cmd->body.cid, &ctx);
2004	if (unlikely(ret != 0))
2005		return ret;
2006
2007	if (!dev_priv->has_mob)
2008		return 0;
2009
2010	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2011		/*
2012		 * This is the compat shader path - Per device guest-backed
2013		 * shaders, but user-space thinks it's per context host-
2014		 * backed shaders.
2015		 */
2016		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2017					cmd->body.shid, cmd->body.type);
2018		if (!IS_ERR(res)) {
2019			ret = vmw_execbuf_res_val_add(sw_context, res,
2020						      VMW_RES_DIRTY_NONE,
2021						      vmw_val_add_flag_noctx);
2022			if (unlikely(ret != 0))
2023				return ret;
2024
2025			ret = vmw_resource_relocation_add
2026				(sw_context, res,
2027				 vmw_ptr_diff(sw_context->buf_start,
2028					      &cmd->body.shid),
2029				 vmw_res_rel_normal);
2030			if (unlikely(ret != 0))
2031				return ret;
2032		}
2033	}
2034
2035	if (IS_ERR_OR_NULL(res)) {
2036		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2037					VMW_RES_DIRTY_NONE,
2038					user_shader_converter, &cmd->body.shid,
2039					&res);
2040		if (unlikely(ret != 0))
2041			return ret;
2042	}
2043
2044	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2045	if (!ctx_info)
2046		return -EINVAL;
2047
2048	binding.bi.ctx = ctx;
2049	binding.bi.res = res;
2050	binding.bi.bt = vmw_ctx_binding_shader;
2051	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2052	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2053
2054	return 0;
2055}
2056
2057/**
2058 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2059 *
2060 * @dev_priv: Pointer to a device private struct.
2061 * @sw_context: The software context being used for this batch.
2062 * @header: Pointer to the command header in the command stream.
2063 */
2064static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2065				    struct vmw_sw_context *sw_context,
2066				    SVGA3dCmdHeader *header)
2067{
2068	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2069	int ret;
2070
2071	cmd = container_of(header, typeof(*cmd), header);
2072
2073	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2074				VMW_RES_DIRTY_SET, user_context_converter,
2075				&cmd->body.cid, NULL);
2076	if (unlikely(ret != 0))
2077		return ret;
2078
2079	if (dev_priv->has_mob)
2080		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2081
2082	return 0;
2083}
2084
2085/**
2086 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2087 *
2088 * @dev_priv: Pointer to a device private struct.
2089 * @sw_context: The software context being used for this batch.
2090 * @header: Pointer to the command header in the command stream.
2091 */
2092static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2093				  struct vmw_sw_context *sw_context,
2094				  SVGA3dCmdHeader *header)
2095{
2096	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2097		container_of(header, typeof(*cmd), header);
2098
2099	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2100				     user_shader_converter, &cmd->body.shid,
2101				     &cmd->body.mobid, cmd->body.offsetInBytes);
2102}
2103
2104/**
2105 * vmw_cmd_dx_set_single_constant_buffer - Validate
2106 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2107 *
2108 * @dev_priv: Pointer to a device private struct.
2109 * @sw_context: The software context being used for this batch.
2110 * @header: Pointer to the command header in the command stream.
2111 */
2112static int
2113vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2114				      struct vmw_sw_context *sw_context,
2115				      SVGA3dCmdHeader *header)
2116{
2117	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2118
2119	struct vmw_resource *res = NULL;
2120	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2121	struct vmw_ctx_bindinfo_cb binding;
2122	int ret;
2123
2124	if (!ctx_node)
2125		return -EINVAL;
2126
2127	cmd = container_of(header, typeof(*cmd), header);
2128	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2129				VMW_RES_DIRTY_NONE, user_surface_converter,
2130				&cmd->body.sid, &res);
2131	if (unlikely(ret != 0))
2132		return ret;
2133
2134	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2135	    cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2136		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2137			       (unsigned int) cmd->body.type,
2138			       (unsigned int) cmd->body.slot);
2139		return -EINVAL;
2140	}
2141
2142	binding.bi.ctx = ctx_node->ctx;
2143	binding.bi.res = res;
2144	binding.bi.bt = vmw_ctx_binding_cb;
2145	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2146	binding.offset = cmd->body.offsetInBytes;
2147	binding.size = cmd->body.sizeInBytes;
2148	binding.slot = cmd->body.slot;
2149
2150	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2151			binding.slot);
2152
2153	return 0;
2154}
2155
2156/**
2157 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2158 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2159 *
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2163 */
2164static int
2165vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2166				      struct vmw_sw_context *sw_context,
2167				      SVGA3dCmdHeader *header)
2168{
2169	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2170
2171	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2172	u32 shader_slot;
2173
2174	if (!has_sm5_context(dev_priv))
2175		return -EINVAL;
2176
2177	if (!ctx_node)
2178		return -EINVAL;
2179
2180	cmd = container_of(header, typeof(*cmd), header);
2181	if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2182		VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2183			       (unsigned int) cmd->body.slot);
2184		return -EINVAL;
2185	}
2186
2187	shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2188	vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2189				     cmd->body.slot, cmd->body.offsetInBytes);
2190
2191	return 0;
2192}
2193
2194/**
2195 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2196 * command
2197 *
2198 * @dev_priv: Pointer to a device private struct.
2199 * @sw_context: The software context being used for this batch.
2200 * @header: Pointer to the command header in the command stream.
2201 */
2202static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2203				     struct vmw_sw_context *sw_context,
2204				     SVGA3dCmdHeader *header)
2205{
2206	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2207		container_of(header, typeof(*cmd), header);
2208
2209	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2210		sizeof(SVGA3dShaderResourceViewId);
2211
2212	if ((u64) cmd->body.startView + (u64) num_sr_view >
2213	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2214	    !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2215		VMW_DEBUG_USER("Invalid shader binding.\n");
2216		return -EINVAL;
2217	}
2218
2219	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2220				     vmw_ctx_binding_sr,
2221				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2222				     (void *) &cmd[1], num_sr_view,
2223				     cmd->body.startView);
2224}
2225
2226/**
2227 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2228 *
2229 * @dev_priv: Pointer to a device private struct.
2230 * @sw_context: The software context being used for this batch.
2231 * @header: Pointer to the command header in the command stream.
2232 */
2233static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2234				 struct vmw_sw_context *sw_context,
2235				 SVGA3dCmdHeader *header)
2236{
2237	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2238	struct vmw_resource *res = NULL;
2239	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2240	struct vmw_ctx_bindinfo_shader binding;
2241	int ret = 0;
2242
2243	if (!ctx_node)
2244		return -EINVAL;
2245
2246	cmd = container_of(header, typeof(*cmd), header);
2247
2248	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2249		VMW_DEBUG_USER("Illegal shader type %u.\n",
2250			       (unsigned int) cmd->body.type);
2251		return -EINVAL;
2252	}
2253
2254	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2255		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2256		if (IS_ERR(res)) {
2257			VMW_DEBUG_USER("Could not find shader for binding.\n");
2258			return PTR_ERR(res);
2259		}
2260
2261		ret = vmw_execbuf_res_val_add(sw_context, res,
2262					      VMW_RES_DIRTY_NONE,
2263					      vmw_val_add_flag_noctx);
2264		if (ret)
2265			return ret;
2266	}
2267
2268	binding.bi.ctx = ctx_node->ctx;
2269	binding.bi.res = res;
2270	binding.bi.bt = vmw_ctx_binding_dx_shader;
2271	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2272
2273	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2274
2275	return 0;
2276}
2277
2278/**
2279 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2280 * command
2281 *
2282 * @dev_priv: Pointer to a device private struct.
2283 * @sw_context: The software context being used for this batch.
2284 * @header: Pointer to the command header in the command stream.
2285 */
2286static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2287					 struct vmw_sw_context *sw_context,
2288					 SVGA3dCmdHeader *header)
2289{
2290	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2291	struct vmw_ctx_bindinfo_vb binding;
2292	struct vmw_resource *res;
2293	struct {
2294		SVGA3dCmdHeader header;
2295		SVGA3dCmdDXSetVertexBuffers body;
2296		SVGA3dVertexBuffer buf[];
2297	} *cmd;
2298	int i, ret, num;
2299
2300	if (!ctx_node)
2301		return -EINVAL;
2302
2303	cmd = container_of(header, typeof(*cmd), header);
2304	num = (cmd->header.size - sizeof(cmd->body)) /
2305		sizeof(SVGA3dVertexBuffer);
2306	if ((u64)num + (u64)cmd->body.startBuffer >
2307	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2308		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2309		return -EINVAL;
2310	}
2311
2312	for (i = 0; i < num; i++) {
2313		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2314					VMW_RES_DIRTY_NONE,
2315					user_surface_converter,
2316					&cmd->buf[i].sid, &res);
2317		if (unlikely(ret != 0))
2318			return ret;
2319
2320		binding.bi.ctx = ctx_node->ctx;
2321		binding.bi.bt = vmw_ctx_binding_vb;
2322		binding.bi.res = res;
2323		binding.offset = cmd->buf[i].offset;
2324		binding.stride = cmd->buf[i].stride;
2325		binding.slot = i + cmd->body.startBuffer;
2326
2327		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2328	}
2329
2330	return 0;
2331}
2332
2333/**
2334 * vmw_cmd_dx_set_index_buffer - Validate
2335 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2336 *
2337 * @dev_priv: Pointer to a device private struct.
2338 * @sw_context: The software context being used for this batch.
2339 * @header: Pointer to the command header in the command stream.
2340 */
2341static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2342				       struct vmw_sw_context *sw_context,
2343				       SVGA3dCmdHeader *header)
2344{
2345	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2346	struct vmw_ctx_bindinfo_ib binding;
2347	struct vmw_resource *res;
2348	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2349	int ret;
2350
2351	if (!ctx_node)
2352		return -EINVAL;
2353
2354	cmd = container_of(header, typeof(*cmd), header);
2355	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2356				VMW_RES_DIRTY_NONE, user_surface_converter,
2357				&cmd->body.sid, &res);
2358	if (unlikely(ret != 0))
2359		return ret;
2360
2361	binding.bi.ctx = ctx_node->ctx;
2362	binding.bi.res = res;
2363	binding.bi.bt = vmw_ctx_binding_ib;
2364	binding.offset = cmd->body.offset;
2365	binding.format = cmd->body.format;
2366
2367	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2368
2369	return 0;
2370}
2371
2372/**
2373 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2374 * command
2375 *
2376 * @dev_priv: Pointer to a device private struct.
2377 * @sw_context: The software context being used for this batch.
2378 * @header: Pointer to the command header in the command stream.
2379 */
2380static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2381					struct vmw_sw_context *sw_context,
2382					SVGA3dCmdHeader *header)
2383{
2384	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2385		container_of(header, typeof(*cmd), header);
2386	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2387		sizeof(SVGA3dRenderTargetViewId);
2388	int ret;
2389
2390	if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2391		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2392		return -EINVAL;
2393	}
2394
2395	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2396				    0, &cmd->body.depthStencilViewId, 1, 0);
2397	if (ret)
2398		return ret;
2399
2400	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2401				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2402				     num_rt_view, 0);
2403}
2404
2405/**
2406 * vmw_cmd_dx_clear_rendertarget_view - Validate
2407 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2408 *
2409 * @dev_priv: Pointer to a device private struct.
2410 * @sw_context: The software context being used for this batch.
2411 * @header: Pointer to the command header in the command stream.
2412 */
2413static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2414					      struct vmw_sw_context *sw_context,
2415					      SVGA3dCmdHeader *header)
2416{
2417	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2418		container_of(header, typeof(*cmd), header);
2419	struct vmw_resource *ret;
2420
2421	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2422				  cmd->body.renderTargetViewId);
2423
2424	return PTR_ERR_OR_ZERO(ret);
2425}
2426
2427/**
2428 * vmw_cmd_dx_clear_depthstencil_view - Validate
2429 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2430 *
2431 * @dev_priv: Pointer to a device private struct.
2432 * @sw_context: The software context being used for this batch.
2433 * @header: Pointer to the command header in the command stream.
2434 */
2435static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2436					      struct vmw_sw_context *sw_context,
2437					      SVGA3dCmdHeader *header)
2438{
2439	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2440		container_of(header, typeof(*cmd), header);
2441	struct vmw_resource *ret;
2442
2443	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2444				  cmd->body.depthStencilViewId);
2445
2446	return PTR_ERR_OR_ZERO(ret);
2447}
2448
2449static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2450				  struct vmw_sw_context *sw_context,
2451				  SVGA3dCmdHeader *header)
2452{
2453	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2454	struct vmw_resource *srf;
2455	struct vmw_resource *res;
2456	enum vmw_view_type view_type;
2457	int ret;
2458	/*
2459	 * This is based on the fact that all affected define commands have the
2460	 * same initial command body layout.
2461	 */
2462	struct {
2463		SVGA3dCmdHeader header;
2464		uint32 defined_id;
2465		uint32 sid;
2466	} *cmd;
2467
2468	if (!ctx_node)
2469		return -EINVAL;
2470
2471	view_type = vmw_view_cmd_to_type(header->id);
2472	if (view_type == vmw_view_max)
2473		return -EINVAL;
2474
2475	cmd = container_of(header, typeof(*cmd), header);
2476	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2477		VMW_DEBUG_USER("Invalid surface id.\n");
2478		return -EINVAL;
2479	}
2480	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2481				VMW_RES_DIRTY_NONE, user_surface_converter,
2482				&cmd->sid, &srf);
2483	if (unlikely(ret != 0))
2484		return ret;
2485
2486	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
 
 
2487	ret = vmw_cotable_notify(res, cmd->defined_id);
2488	if (unlikely(ret != 0))
2489		return ret;
2490
2491	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2492			    cmd->defined_id, header,
2493			    header->size + sizeof(*header),
2494			    &sw_context->staged_cmd_res);
2495}
2496
2497/**
2498 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2499 *
2500 * @dev_priv: Pointer to a device private struct.
2501 * @sw_context: The software context being used for this batch.
2502 * @header: Pointer to the command header in the command stream.
2503 */
2504static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2505				     struct vmw_sw_context *sw_context,
2506				     SVGA3dCmdHeader *header)
2507{
2508	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2509	struct vmw_ctx_bindinfo_so_target binding;
2510	struct vmw_resource *res;
2511	struct {
2512		SVGA3dCmdHeader header;
2513		SVGA3dCmdDXSetSOTargets body;
2514		SVGA3dSoTarget targets[];
2515	} *cmd;
2516	int i, ret, num;
2517
2518	if (!ctx_node)
2519		return -EINVAL;
2520
2521	cmd = container_of(header, typeof(*cmd), header);
2522	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2523
2524	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2525		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2526		return -EINVAL;
2527	}
2528
2529	for (i = 0; i < num; i++) {
2530		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2531					VMW_RES_DIRTY_SET,
2532					user_surface_converter,
2533					&cmd->targets[i].sid, &res);
2534		if (unlikely(ret != 0))
2535			return ret;
2536
2537		binding.bi.ctx = ctx_node->ctx;
2538		binding.bi.res = res;
2539		binding.bi.bt = vmw_ctx_binding_so_target;
2540		binding.offset = cmd->targets[i].offset;
2541		binding.size = cmd->targets[i].sizeInBytes;
2542		binding.slot = i;
2543
2544		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2545	}
2546
2547	return 0;
2548}
2549
2550static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2551				struct vmw_sw_context *sw_context,
2552				SVGA3dCmdHeader *header)
2553{
2554	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2555	struct vmw_resource *res;
2556	/*
2557	 * This is based on the fact that all affected define commands have
2558	 * the same initial command body layout.
2559	 */
2560	struct {
2561		SVGA3dCmdHeader header;
2562		uint32 defined_id;
2563	} *cmd;
2564	enum vmw_so_type so_type;
2565	int ret;
2566
2567	if (!ctx_node)
2568		return -EINVAL;
2569
2570	so_type = vmw_so_cmd_to_type(header->id);
2571	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2572	if (IS_ERR(res))
2573		return PTR_ERR(res);
2574	cmd = container_of(header, typeof(*cmd), header);
2575	ret = vmw_cotable_notify(res, cmd->defined_id);
2576
2577	return ret;
2578}
2579
2580/**
2581 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2582 * command
2583 *
2584 * @dev_priv: Pointer to a device private struct.
2585 * @sw_context: The software context being used for this batch.
2586 * @header: Pointer to the command header in the command stream.
2587 */
2588static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2589					struct vmw_sw_context *sw_context,
2590					SVGA3dCmdHeader *header)
2591{
2592	struct {
2593		SVGA3dCmdHeader header;
2594		union {
2595			SVGA3dCmdDXReadbackSubResource r_body;
2596			SVGA3dCmdDXInvalidateSubResource i_body;
2597			SVGA3dCmdDXUpdateSubResource u_body;
2598			SVGA3dSurfaceId sid;
2599		};
2600	} *cmd;
2601
2602	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2603		     offsetof(typeof(*cmd), sid));
2604	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2605		     offsetof(typeof(*cmd), sid));
2606	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2607		     offsetof(typeof(*cmd), sid));
2608
2609	cmd = container_of(header, typeof(*cmd), header);
2610	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2611				 VMW_RES_DIRTY_NONE, user_surface_converter,
2612				 &cmd->sid, NULL);
2613}
2614
2615static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2616				struct vmw_sw_context *sw_context,
2617				SVGA3dCmdHeader *header)
2618{
2619	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2620
2621	if (!ctx_node)
2622		return -EINVAL;
2623
2624	return 0;
2625}
2626
2627/**
2628 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2629 * resource for removal.
2630 *
2631 * @dev_priv: Pointer to a device private struct.
2632 * @sw_context: The software context being used for this batch.
2633 * @header: Pointer to the command header in the command stream.
2634 *
2635 * Check that the view exists, and if it was not created using this command
2636 * batch, conditionally make this command a NOP.
2637 */
2638static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2639				  struct vmw_sw_context *sw_context,
2640				  SVGA3dCmdHeader *header)
2641{
2642	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2643	struct {
2644		SVGA3dCmdHeader header;
2645		union vmw_view_destroy body;
2646	} *cmd = container_of(header, typeof(*cmd), header);
2647	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2648	struct vmw_resource *view;
2649	int ret;
2650
2651	if (!ctx_node)
2652		return -EINVAL;
2653
2654	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2655			      &sw_context->staged_cmd_res, &view);
2656	if (ret || !view)
2657		return ret;
2658
2659	/*
2660	 * If the view wasn't created during this command batch, it might
2661	 * have been removed due to a context swapout, so add a
2662	 * relocation to conditionally make this command a NOP to avoid
2663	 * device errors.
2664	 */
2665	return vmw_resource_relocation_add(sw_context, view,
2666					   vmw_ptr_diff(sw_context->buf_start,
2667							&cmd->header.id),
2668					   vmw_res_rel_cond_nop);
2669}
2670
2671/**
2672 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2673 *
2674 * @dev_priv: Pointer to a device private struct.
2675 * @sw_context: The software context being used for this batch.
2676 * @header: Pointer to the command header in the command stream.
2677 */
2678static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2679				    struct vmw_sw_context *sw_context,
2680				    SVGA3dCmdHeader *header)
2681{
2682	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2683	struct vmw_resource *res;
2684	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2685		container_of(header, typeof(*cmd), header);
2686	int ret;
2687
2688	if (!ctx_node)
2689		return -EINVAL;
2690
2691	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
 
 
2692	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2693	if (ret)
2694		return ret;
2695
2696	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2697				 cmd->body.shaderId, cmd->body.type,
2698				 &sw_context->staged_cmd_res);
2699}
2700
2701/**
2702 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2703 *
2704 * @dev_priv: Pointer to a device private struct.
2705 * @sw_context: The software context being used for this batch.
2706 * @header: Pointer to the command header in the command stream.
2707 */
2708static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2709				     struct vmw_sw_context *sw_context,
2710				     SVGA3dCmdHeader *header)
2711{
2712	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2713	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2714		container_of(header, typeof(*cmd), header);
2715	int ret;
2716
2717	if (!ctx_node)
2718		return -EINVAL;
2719
2720	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2721				&sw_context->staged_cmd_res);
2722
2723	return ret;
2724}
2725
2726/**
2727 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2728 *
2729 * @dev_priv: Pointer to a device private struct.
2730 * @sw_context: The software context being used for this batch.
2731 * @header: Pointer to the command header in the command stream.
2732 */
2733static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2734				  struct vmw_sw_context *sw_context,
2735				  SVGA3dCmdHeader *header)
2736{
2737	struct vmw_resource *ctx;
2738	struct vmw_resource *res;
2739	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2740		container_of(header, typeof(*cmd), header);
2741	int ret;
2742
2743	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2744		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2745					VMW_RES_DIRTY_SET,
2746					user_context_converter, &cmd->body.cid,
2747					&ctx);
2748		if (ret)
2749			return ret;
2750	} else {
2751		struct vmw_ctx_validation_info *ctx_node =
2752			VMW_GET_CTX_NODE(sw_context);
2753
2754		if (!ctx_node)
2755			return -EINVAL;
2756
2757		ctx = ctx_node->ctx;
2758	}
2759
2760	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2761	if (IS_ERR(res)) {
2762		VMW_DEBUG_USER("Could not find shader to bind.\n");
2763		return PTR_ERR(res);
2764	}
2765
2766	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2767				      vmw_val_add_flag_noctx);
2768	if (ret) {
2769		VMW_DEBUG_USER("Error creating resource validation node.\n");
2770		return ret;
2771	}
2772
2773	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2774					 &cmd->body.mobid,
2775					 cmd->body.offsetInBytes);
2776}
2777
2778/**
2779 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2780 *
2781 * @dev_priv: Pointer to a device private struct.
2782 * @sw_context: The software context being used for this batch.
2783 * @header: Pointer to the command header in the command stream.
2784 */
2785static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2786			      struct vmw_sw_context *sw_context,
2787			      SVGA3dCmdHeader *header)
2788{
2789	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2790		container_of(header, typeof(*cmd), header);
2791	struct vmw_resource *view;
2792	struct vmw_res_cache_entry *rcache;
2793
2794	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2795				   cmd->body.shaderResourceViewId);
2796	if (IS_ERR(view))
2797		return PTR_ERR(view);
2798
2799	/*
2800	 * Normally the shader-resource view is not gpu-dirtying, but for
2801	 * this particular command it is...
2802	 * So mark the last looked-up surface, which is the surface
2803	 * the view points to, gpu-dirty.
2804	 */
2805	rcache = &sw_context->res_cache[vmw_res_surface];
2806	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2807				     VMW_RES_DIRTY_SET);
2808	return 0;
2809}
2810
2811/**
2812 * vmw_cmd_dx_transfer_from_buffer - Validate
2813 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2814 *
2815 * @dev_priv: Pointer to a device private struct.
2816 * @sw_context: The software context being used for this batch.
2817 * @header: Pointer to the command header in the command stream.
2818 */
2819static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2820					   struct vmw_sw_context *sw_context,
2821					   SVGA3dCmdHeader *header)
2822{
2823	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2824		container_of(header, typeof(*cmd), header);
2825	int ret;
2826
2827	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2828				VMW_RES_DIRTY_NONE, user_surface_converter,
2829				&cmd->body.srcSid, NULL);
2830	if (ret != 0)
2831		return ret;
2832
2833	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2834				 VMW_RES_DIRTY_SET, user_surface_converter,
2835				 &cmd->body.destSid, NULL);
2836}
2837
2838/**
2839 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2840 *
2841 * @dev_priv: Pointer to a device private struct.
2842 * @sw_context: The software context being used for this batch.
2843 * @header: Pointer to the command header in the command stream.
2844 */
2845static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2846					   struct vmw_sw_context *sw_context,
2847					   SVGA3dCmdHeader *header)
2848{
2849	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2850		container_of(header, typeof(*cmd), header);
2851
2852	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2853		return -EINVAL;
2854
2855	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2856				 VMW_RES_DIRTY_SET, user_surface_converter,
2857				 &cmd->body.surface.sid, NULL);
2858}
2859
2860static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2861		       struct vmw_sw_context *sw_context,
2862		       SVGA3dCmdHeader *header)
2863{
2864	if (!has_sm5_context(dev_priv))
2865		return -EINVAL;
2866
2867	return 0;
2868}
2869
2870static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2871				   struct vmw_sw_context *sw_context,
2872				   SVGA3dCmdHeader *header)
2873{
2874	if (!has_sm5_context(dev_priv))
2875		return -EINVAL;
2876
2877	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2878}
2879
2880static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2881				   struct vmw_sw_context *sw_context,
2882				   SVGA3dCmdHeader *header)
2883{
2884	if (!has_sm5_context(dev_priv))
2885		return -EINVAL;
2886
2887	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2888}
2889
2890static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2891				  struct vmw_sw_context *sw_context,
2892				  SVGA3dCmdHeader *header)
2893{
2894	struct {
2895		SVGA3dCmdHeader header;
2896		SVGA3dCmdDXClearUAViewUint body;
2897	} *cmd = container_of(header, typeof(*cmd), header);
2898	struct vmw_resource *ret;
2899
2900	if (!has_sm5_context(dev_priv))
2901		return -EINVAL;
2902
2903	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2904				  cmd->body.uaViewId);
2905
2906	return PTR_ERR_OR_ZERO(ret);
2907}
2908
2909static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2910				   struct vmw_sw_context *sw_context,
2911				   SVGA3dCmdHeader *header)
2912{
2913	struct {
2914		SVGA3dCmdHeader header;
2915		SVGA3dCmdDXClearUAViewFloat body;
2916	} *cmd = container_of(header, typeof(*cmd), header);
2917	struct vmw_resource *ret;
2918
2919	if (!has_sm5_context(dev_priv))
2920		return -EINVAL;
2921
2922	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2923				  cmd->body.uaViewId);
2924
2925	return PTR_ERR_OR_ZERO(ret);
2926}
2927
2928static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2929			   struct vmw_sw_context *sw_context,
2930			   SVGA3dCmdHeader *header)
2931{
2932	struct {
2933		SVGA3dCmdHeader header;
2934		SVGA3dCmdDXSetUAViews body;
2935	} *cmd = container_of(header, typeof(*cmd), header);
2936	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2937		sizeof(SVGA3dUAViewId);
2938	int ret;
2939
2940	if (!has_sm5_context(dev_priv))
2941		return -EINVAL;
2942
2943	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2944		VMW_DEBUG_USER("Invalid UAV binding.\n");
2945		return -EINVAL;
2946	}
2947
2948	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2949				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2950				    num_uav, 0);
2951	if (ret)
2952		return ret;
2953
2954	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2955					 cmd->body.uavSpliceIndex);
2956
2957	return ret;
2958}
2959
2960static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2961			      struct vmw_sw_context *sw_context,
2962			      SVGA3dCmdHeader *header)
2963{
2964	struct {
2965		SVGA3dCmdHeader header;
2966		SVGA3dCmdDXSetCSUAViews body;
2967	} *cmd = container_of(header, typeof(*cmd), header);
2968	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2969		sizeof(SVGA3dUAViewId);
2970	int ret;
2971
2972	if (!has_sm5_context(dev_priv))
2973		return -EINVAL;
2974
2975	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2976		VMW_DEBUG_USER("Invalid UAV binding.\n");
2977		return -EINVAL;
2978	}
2979
2980	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2981				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2982				    num_uav, 0);
2983	if (ret)
2984		return ret;
2985
2986	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2987				  cmd->body.startIndex);
2988
2989	return ret;
2990}
2991
2992static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2993					  struct vmw_sw_context *sw_context,
2994					  SVGA3dCmdHeader *header)
2995{
2996	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2997	struct vmw_resource *res;
2998	struct {
2999		SVGA3dCmdHeader header;
3000		SVGA3dCmdDXDefineStreamOutputWithMob body;
3001	} *cmd = container_of(header, typeof(*cmd), header);
3002	int ret;
3003
3004	if (!has_sm5_context(dev_priv))
3005		return -EINVAL;
3006
3007	if (!ctx_node) {
3008		DRM_ERROR("DX Context not set.\n");
3009		return -EINVAL;
3010	}
3011
3012	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
 
 
3013	ret = vmw_cotable_notify(res, cmd->body.soid);
3014	if (ret)
3015		return ret;
3016
3017	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3018				       cmd->body.soid,
3019				       &sw_context->staged_cmd_res);
3020}
3021
3022static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3023					   struct vmw_sw_context *sw_context,
3024					   SVGA3dCmdHeader *header)
3025{
3026	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3027	struct vmw_resource *res;
3028	struct {
3029		SVGA3dCmdHeader header;
3030		SVGA3dCmdDXDestroyStreamOutput body;
3031	} *cmd = container_of(header, typeof(*cmd), header);
3032
3033	if (!ctx_node) {
3034		DRM_ERROR("DX Context not set.\n");
3035		return -EINVAL;
3036	}
3037
3038	/*
3039	 * When device does not support SM5 then streamoutput with mob command is
3040	 * not available to user-space. Simply return in this case.
3041	 */
3042	if (!has_sm5_context(dev_priv))
3043		return 0;
3044
3045	/*
3046	 * With SM5 capable device if lookup fails then user-space probably used
3047	 * old streamoutput define command. Return without an error.
3048	 */
3049	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3050					 cmd->body.soid);
3051	if (IS_ERR(res))
3052		return 0;
3053
3054	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3055					  &sw_context->staged_cmd_res);
3056}
3057
3058static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3059					struct vmw_sw_context *sw_context,
3060					SVGA3dCmdHeader *header)
3061{
3062	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3063	struct vmw_resource *res;
3064	struct {
3065		SVGA3dCmdHeader header;
3066		SVGA3dCmdDXBindStreamOutput body;
3067	} *cmd = container_of(header, typeof(*cmd), header);
3068	int ret;
3069
3070	if (!has_sm5_context(dev_priv))
3071		return -EINVAL;
3072
3073	if (!ctx_node) {
3074		DRM_ERROR("DX Context not set.\n");
3075		return -EINVAL;
3076	}
3077
3078	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3079					 cmd->body.soid);
3080	if (IS_ERR(res)) {
3081		DRM_ERROR("Could not find streamoutput to bind.\n");
3082		return PTR_ERR(res);
3083	}
3084
3085	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3086
3087	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3088				      vmw_val_add_flag_noctx);
3089	if (ret) {
3090		DRM_ERROR("Error creating resource validation node.\n");
3091		return ret;
3092	}
3093
3094	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3095					 &cmd->body.mobid,
3096					 cmd->body.offsetInBytes);
3097}
3098
3099static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3100				       struct vmw_sw_context *sw_context,
3101				       SVGA3dCmdHeader *header)
3102{
3103	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3104	struct vmw_resource *res;
3105	struct vmw_ctx_bindinfo_so binding;
3106	struct {
3107		SVGA3dCmdHeader header;
3108		SVGA3dCmdDXSetStreamOutput body;
3109	} *cmd = container_of(header, typeof(*cmd), header);
3110	int ret;
3111
3112	if (!ctx_node) {
3113		DRM_ERROR("DX Context not set.\n");
3114		return -EINVAL;
3115	}
3116
3117	if (cmd->body.soid == SVGA3D_INVALID_ID)
3118		return 0;
3119
3120	/*
3121	 * When device does not support SM5 then streamoutput with mob command is
3122	 * not available to user-space. Simply return in this case.
3123	 */
3124	if (!has_sm5_context(dev_priv))
3125		return 0;
3126
3127	/*
3128	 * With SM5 capable device if lookup fails then user-space probably used
3129	 * old streamoutput define command. Return without an error.
3130	 */
3131	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3132					 cmd->body.soid);
3133	if (IS_ERR(res)) {
3134		return 0;
3135	}
3136
3137	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3138				      vmw_val_add_flag_noctx);
3139	if (ret) {
3140		DRM_ERROR("Error creating resource validation node.\n");
3141		return ret;
3142	}
3143
3144	binding.bi.ctx = ctx_node->ctx;
3145	binding.bi.res = res;
3146	binding.bi.bt = vmw_ctx_binding_so;
3147	binding.slot = 0; /* Only one SO set to context at a time. */
3148
3149	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3150			binding.slot);
3151
3152	return ret;
3153}
3154
3155static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3156					      struct vmw_sw_context *sw_context,
3157					      SVGA3dCmdHeader *header)
3158{
3159	struct vmw_draw_indexed_instanced_indirect_cmd {
3160		SVGA3dCmdHeader header;
3161		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3162	} *cmd = container_of(header, typeof(*cmd), header);
3163
3164	if (!has_sm5_context(dev_priv))
3165		return -EINVAL;
3166
3167	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3168				 VMW_RES_DIRTY_NONE, user_surface_converter,
3169				 &cmd->body.argsBufferSid, NULL);
3170}
3171
3172static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3173				      struct vmw_sw_context *sw_context,
3174				      SVGA3dCmdHeader *header)
3175{
3176	struct vmw_draw_instanced_indirect_cmd {
3177		SVGA3dCmdHeader header;
3178		SVGA3dCmdDXDrawInstancedIndirect body;
3179	} *cmd = container_of(header, typeof(*cmd), header);
3180
3181	if (!has_sm5_context(dev_priv))
3182		return -EINVAL;
3183
3184	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3185				 VMW_RES_DIRTY_NONE, user_surface_converter,
3186				 &cmd->body.argsBufferSid, NULL);
3187}
3188
3189static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3190				     struct vmw_sw_context *sw_context,
3191				     SVGA3dCmdHeader *header)
3192{
3193	struct vmw_dispatch_indirect_cmd {
3194		SVGA3dCmdHeader header;
3195		SVGA3dCmdDXDispatchIndirect body;
3196	} *cmd = container_of(header, typeof(*cmd), header);
3197
3198	if (!has_sm5_context(dev_priv))
3199		return -EINVAL;
3200
3201	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3202				 VMW_RES_DIRTY_NONE, user_surface_converter,
3203				 &cmd->body.argsBufferSid, NULL);
3204}
3205
3206static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3207				struct vmw_sw_context *sw_context,
3208				void *buf, uint32_t *size)
3209{
3210	uint32_t size_remaining = *size;
3211	uint32_t cmd_id;
3212
3213	cmd_id = ((uint32_t *)buf)[0];
3214	switch (cmd_id) {
3215	case SVGA_CMD_UPDATE:
3216		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3217		break;
3218	case SVGA_CMD_DEFINE_GMRFB:
3219		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3220		break;
3221	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3222		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3223		break;
3224	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3225		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3226		break;
3227	default:
3228		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3229		return -EINVAL;
3230	}
3231
3232	if (*size > size_remaining) {
3233		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3234			       cmd_id);
3235		return -EINVAL;
3236	}
3237
3238	if (unlikely(!sw_context->kernel)) {
3239		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3240		return -EPERM;
3241	}
3242
3243	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3244		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3245
3246	return 0;
3247}
3248
3249static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3250	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3251		    false, false, false),
3252	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3253		    false, false, false),
3254	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3255		    true, false, false),
3256	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3257		    true, false, false),
3258	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3259		    true, false, false),
3260	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3261		    false, false, false),
3262	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3263		    false, false, false),
3264	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3265		    true, false, false),
3266	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3267		    true, false, false),
3268	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3269		    true, false, false),
3270	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3271		    &vmw_cmd_set_render_target_check, true, false, false),
3272	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3273		    true, false, false),
3274	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3275		    true, false, false),
3276	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3277		    true, false, false),
3278	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3279		    true, false, false),
3280	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3281		    true, false, false),
3282	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3283		    true, false, false),
3284	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3285		    true, false, false),
3286	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3287		    false, false, false),
3288	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3289		    true, false, false),
3290	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3291		    true, false, false),
3292	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3293		    true, false, false),
3294	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3295		    true, false, false),
3296	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3297		    true, false, false),
3298	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3299		    true, false, false),
3300	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3301		    true, false, false),
3302	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3303		    true, false, false),
3304	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3305		    true, false, false),
3306	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3307		    true, false, false),
3308	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3309		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3310	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3311		    false, false, false),
3312	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3313		    false, false, false),
3314	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3315		    false, false, false),
3316	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3317		    false, false, false),
3318	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3319		    false, false, false),
3320	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3321		    false, false, false),
3322	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3323		    false, false, false),
3324	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3325	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3326	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3327	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3328	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3329	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3330	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3331		    false, false, true),
3332	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3333		    false, false, true),
3334	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3335		    false, false, true),
3336	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3337		    false, false, true),
3338	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3339		    false, false, true),
3340	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3341		    false, false, true),
3342	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3343		    false, false, true),
3344	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3345		    false, false, true),
3346	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3347		    true, false, true),
3348	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3349		    false, false, true),
3350	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3351		    true, false, true),
3352	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3353		    &vmw_cmd_update_gb_surface, true, false, true),
3354	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3355		    &vmw_cmd_readback_gb_image, true, false, true),
3356	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3357		    &vmw_cmd_readback_gb_surface, true, false, true),
3358	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3359		    &vmw_cmd_invalidate_gb_image, true, false, true),
3360	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3361		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3362	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3363		    false, false, true),
3364	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3365		    false, false, true),
3366	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3367		    false, false, true),
3368	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3369		    false, false, true),
3370	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3371		    false, false, true),
3372	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3373		    false, false, true),
3374	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3375		    true, false, true),
3376	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3377		    false, false, true),
3378	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3379		    false, false, false),
3380	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3381		    true, false, true),
3382	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3383		    true, false, true),
3384	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3385		    true, false, true),
3386	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3387		    true, false, true),
3388	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3389		    true, false, true),
3390	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3391		    false, false, true),
3392	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3393		    false, false, true),
3394	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3395		    false, false, true),
3396	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3397		    false, false, true),
3398	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3399		    false, false, true),
3400	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3401		    false, false, true),
3402	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3403		    false, false, true),
3404	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3405		    false, false, true),
3406	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3407		    false, false, true),
3408	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3409		    false, false, true),
3410	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3411		    true, false, true),
3412	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3413		    false, false, true),
3414	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3415		    false, false, true),
3416	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3417		    false, false, true),
3418	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3419		    false, false, true),
3420
3421	/* SM commands */
3422	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3423		    false, false, true),
3424	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3425		    false, false, true),
3426	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3427		    false, false, true),
3428	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3429		    false, false, true),
3430	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3431		    false, false, true),
3432	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3433		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3434	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3435		    &vmw_cmd_dx_set_shader_res, true, false, true),
3436	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3437		    true, false, true),
3438	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3439		    true, false, true),
3440	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3441		    true, false, true),
3442	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3443		    true, false, true),
3444	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3445		    true, false, true),
3446	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3447		    &vmw_cmd_dx_cid_check, true, false, true),
3448	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3449		    true, false, true),
3450	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3451		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3452	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3453		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3454	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3455		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3456	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3457		    true, false, true),
3458	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3459		    &vmw_cmd_dx_cid_check, true, false, true),
3460	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3461		    &vmw_cmd_dx_cid_check, true, false, true),
3462	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3463		    true, false, true),
3464	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3465		    true, false, true),
3466	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3467		    true, false, true),
3468	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3469		    &vmw_cmd_dx_cid_check, true, false, true),
3470	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3471		    true, false, true),
3472	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3473		    true, false, true),
3474	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3475		    true, false, true),
3476	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3477		    true, false, true),
3478	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3479		    true, false, true),
3480	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3481		    true, false, true),
3482	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3483		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3484	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3485		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3486	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3487		    true, false, true),
3488	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3489		    true, false, true),
3490	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3491		    &vmw_cmd_dx_check_subresource, true, false, true),
3492	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3493		    &vmw_cmd_dx_check_subresource, true, false, true),
3494	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3495		    &vmw_cmd_dx_check_subresource, true, false, true),
3496	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3497		    &vmw_cmd_dx_view_define, true, false, true),
3498	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3499		    &vmw_cmd_dx_view_remove, true, false, true),
3500	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3501		    &vmw_cmd_dx_view_define, true, false, true),
3502	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3503		    &vmw_cmd_dx_view_remove, true, false, true),
3504	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3505		    &vmw_cmd_dx_view_define, true, false, true),
3506	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3507		    &vmw_cmd_dx_view_remove, true, false, true),
3508	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3509		    &vmw_cmd_dx_so_define, true, false, true),
3510	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3511		    &vmw_cmd_dx_cid_check, true, false, true),
3512	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3513		    &vmw_cmd_dx_so_define, true, false, true),
3514	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3515		    &vmw_cmd_dx_cid_check, true, false, true),
3516	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3517		    &vmw_cmd_dx_so_define, true, false, true),
3518	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3519		    &vmw_cmd_dx_cid_check, true, false, true),
3520	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3521		    &vmw_cmd_dx_so_define, true, false, true),
3522	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3523		    &vmw_cmd_dx_cid_check, true, false, true),
3524	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3525		    &vmw_cmd_dx_so_define, true, false, true),
3526	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3527		    &vmw_cmd_dx_cid_check, true, false, true),
3528	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3529		    &vmw_cmd_dx_define_shader, true, false, true),
3530	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3531		    &vmw_cmd_dx_destroy_shader, true, false, true),
3532	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3533		    &vmw_cmd_dx_bind_shader, true, false, true),
3534	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3535		    &vmw_cmd_dx_so_define, true, false, true),
3536	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3537		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3538	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3539		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3540	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3541		    &vmw_cmd_dx_set_so_targets, true, false, true),
3542	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3543		    &vmw_cmd_dx_cid_check, true, false, true),
3544	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3545		    &vmw_cmd_dx_cid_check, true, false, true),
3546	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3547		    &vmw_cmd_buffer_copy_check, true, false, true),
3548	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3549		    &vmw_cmd_pred_copy_check, true, false, true),
3550	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3551		    &vmw_cmd_dx_transfer_from_buffer,
3552		    true, false, true),
3553	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3554		    &vmw_cmd_dx_set_constant_buffer_offset,
3555		    true, false, true),
3556	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3557		    &vmw_cmd_dx_set_constant_buffer_offset,
3558		    true, false, true),
3559	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3560		    &vmw_cmd_dx_set_constant_buffer_offset,
3561		    true, false, true),
3562	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3563		    &vmw_cmd_dx_set_constant_buffer_offset,
3564		    true, false, true),
3565	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3566		    &vmw_cmd_dx_set_constant_buffer_offset,
3567		    true, false, true),
3568	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3569		    &vmw_cmd_dx_set_constant_buffer_offset,
3570		    true, false, true),
3571	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3572		    true, false, true),
3573
3574	/*
3575	 * SM5 commands
3576	 */
3577	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3578		    true, false, true),
3579	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3580		    true, false, true),
3581	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3582		    true, false, true),
3583	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3584		    &vmw_cmd_clear_uav_float, true, false, true),
3585	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3586		    false, true),
3587	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3588		    true),
3589	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3590		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3591	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3592		    &vmw_cmd_instanced_indirect, true, false, true),
3593	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3594	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3595		    &vmw_cmd_dispatch_indirect, true, false, true),
3596	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3597		    false, true),
3598	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3599		    &vmw_cmd_sm5_view_define, true, false, true),
3600	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3601		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3602	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3603		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3604	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3605		    &vmw_cmd_dx_so_define, true, false, true),
 
 
3606};
3607
3608bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3609{
3610	u32 cmd_id = ((u32 *) buf)[0];
3611
3612	if (cmd_id >= SVGA_CMD_MAX) {
3613		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3614		const struct vmw_cmd_entry *entry;
3615
3616		*size = header->size + sizeof(SVGA3dCmdHeader);
3617		cmd_id = header->id;
3618		if (cmd_id >= SVGA_3D_CMD_MAX)
3619			return false;
3620
3621		cmd_id -= SVGA_3D_CMD_BASE;
3622		entry = &vmw_cmd_entries[cmd_id];
3623		*cmd = entry->cmd_name;
3624		return true;
3625	}
3626
3627	switch (cmd_id) {
3628	case SVGA_CMD_UPDATE:
3629		*cmd = "SVGA_CMD_UPDATE";
3630		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3631		break;
3632	case SVGA_CMD_DEFINE_GMRFB:
3633		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3634		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3635		break;
3636	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3637		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3638		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3639		break;
3640	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3641		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3642		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3643		break;
3644	default:
3645		*cmd = "UNKNOWN";
3646		*size = 0;
3647		return false;
3648	}
3649
3650	return true;
3651}
3652
3653static int vmw_cmd_check(struct vmw_private *dev_priv,
3654			 struct vmw_sw_context *sw_context, void *buf,
3655			 uint32_t *size)
3656{
3657	uint32_t cmd_id;
3658	uint32_t size_remaining = *size;
3659	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3660	int ret;
3661	const struct vmw_cmd_entry *entry;
3662	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3663
3664	cmd_id = ((uint32_t *)buf)[0];
3665	/* Handle any none 3D commands */
3666	if (unlikely(cmd_id < SVGA_CMD_MAX))
3667		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3668
3669
3670	cmd_id = header->id;
3671	*size = header->size + sizeof(SVGA3dCmdHeader);
3672
3673	cmd_id -= SVGA_3D_CMD_BASE;
3674	if (unlikely(*size > size_remaining))
3675		goto out_invalid;
3676
3677	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3678		goto out_invalid;
3679
3680	entry = &vmw_cmd_entries[cmd_id];
3681	if (unlikely(!entry->func))
3682		goto out_invalid;
3683
3684	if (unlikely(!entry->user_allow && !sw_context->kernel))
3685		goto out_privileged;
3686
3687	if (unlikely(entry->gb_disable && gb))
3688		goto out_old;
3689
3690	if (unlikely(entry->gb_enable && !gb))
3691		goto out_new;
3692
3693	ret = entry->func(dev_priv, sw_context, header);
3694	if (unlikely(ret != 0)) {
3695		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3696			       cmd_id + SVGA_3D_CMD_BASE, ret);
3697		return ret;
3698	}
3699
3700	return 0;
3701out_invalid:
3702	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3703		       cmd_id + SVGA_3D_CMD_BASE);
3704	return -EINVAL;
3705out_privileged:
3706	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3707		       cmd_id + SVGA_3D_CMD_BASE);
3708	return -EPERM;
3709out_old:
3710	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3711		       cmd_id + SVGA_3D_CMD_BASE);
3712	return -EINVAL;
3713out_new:
3714	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3715		       cmd_id + SVGA_3D_CMD_BASE);
3716	return -EINVAL;
3717}
3718
3719static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3720			     struct vmw_sw_context *sw_context, void *buf,
3721			     uint32_t size)
3722{
3723	int32_t cur_size = size;
3724	int ret;
3725
3726	sw_context->buf_start = buf;
3727
3728	while (cur_size > 0) {
3729		size = cur_size;
3730		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3731		if (unlikely(ret != 0))
3732			return ret;
3733		buf = (void *)((unsigned long) buf + size);
3734		cur_size -= size;
3735	}
3736
3737	if (unlikely(cur_size != 0)) {
3738		VMW_DEBUG_USER("Command verifier out of sync.\n");
3739		return -EINVAL;
3740	}
3741
3742	return 0;
3743}
3744
3745static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3746{
3747	/* Memory is validation context memory, so no need to free it */
3748	INIT_LIST_HEAD(&sw_context->bo_relocations);
3749}
3750
3751static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3752{
3753	struct vmw_relocation *reloc;
3754	struct ttm_buffer_object *bo;
3755
3756	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3757		bo = &reloc->vbo->tbo;
3758		switch (bo->resource->mem_type) {
3759		case TTM_PL_VRAM:
3760			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3761			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3762			break;
3763		case VMW_PL_GMR:
3764			reloc->location->gmrId = bo->resource->start;
3765			break;
3766		case VMW_PL_MOB:
3767			*reloc->mob_loc = bo->resource->start;
3768			break;
3769		default:
3770			BUG();
3771		}
3772	}
3773	vmw_free_relocations(sw_context);
3774}
3775
3776static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3777				 uint32_t size)
3778{
3779	if (likely(sw_context->cmd_bounce_size >= size))
3780		return 0;
3781
3782	if (sw_context->cmd_bounce_size == 0)
3783		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3784
3785	while (sw_context->cmd_bounce_size < size) {
3786		sw_context->cmd_bounce_size =
3787			PAGE_ALIGN(sw_context->cmd_bounce_size +
3788				   (sw_context->cmd_bounce_size >> 1));
3789	}
3790
3791	vfree(sw_context->cmd_bounce);
3792	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3793
3794	if (sw_context->cmd_bounce == NULL) {
3795		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3796		sw_context->cmd_bounce_size = 0;
3797		return -ENOMEM;
3798	}
3799
3800	return 0;
3801}
3802
3803/*
3804 * vmw_execbuf_fence_commands - create and submit a command stream fence
3805 *
3806 * Creates a fence object and submits a command stream marker.
3807 * If this fails for some reason, We sync the fifo and return NULL.
3808 * It is then safe to fence buffers with a NULL pointer.
3809 *
3810 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3811 * userspace handle if @p_handle is not NULL, otherwise not.
3812 */
3813
3814int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3815			       struct vmw_private *dev_priv,
3816			       struct vmw_fence_obj **p_fence,
3817			       uint32_t *p_handle)
3818{
3819	uint32_t sequence;
3820	int ret;
3821	bool synced = false;
3822
3823	/* p_handle implies file_priv. */
3824	BUG_ON(p_handle != NULL && file_priv == NULL);
3825
3826	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3827	if (unlikely(ret != 0)) {
3828		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3829		synced = true;
3830	}
3831
3832	if (p_handle != NULL)
3833		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3834					    sequence, p_fence, p_handle);
3835	else
3836		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3837
3838	if (unlikely(ret != 0 && !synced)) {
3839		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3840					 false, VMW_FENCE_WAIT_TIMEOUT);
3841		*p_fence = NULL;
3842	}
3843
3844	return ret;
3845}
3846
3847/**
3848 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3849 *
3850 * @dev_priv: Pointer to a vmw_private struct.
3851 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3852 * @ret: Return value from fence object creation.
3853 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3854 * the information should be copied.
3855 * @fence: Pointer to the fenc object.
3856 * @fence_handle: User-space fence handle.
3857 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3858 *
3859 * This function copies fence information to user-space. If copying fails, the
3860 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3861 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3862 * will hopefully be detected.
3863 *
3864 * Also if copying fails, user-space will be unable to signal the fence object
3865 * so we wait for it immediately, and then unreference the user-space reference.
3866 */
3867int
3868vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3869			    struct vmw_fpriv *vmw_fp, int ret,
3870			    struct drm_vmw_fence_rep __user *user_fence_rep,
3871			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3872			    int32_t out_fence_fd)
3873{
3874	struct drm_vmw_fence_rep fence_rep;
3875
3876	if (user_fence_rep == NULL)
3877		return 0;
3878
3879	memset(&fence_rep, 0, sizeof(fence_rep));
3880
3881	fence_rep.error = ret;
3882	fence_rep.fd = out_fence_fd;
3883	if (ret == 0) {
3884		BUG_ON(fence == NULL);
3885
3886		fence_rep.handle = fence_handle;
3887		fence_rep.seqno = fence->base.seqno;
3888		vmw_update_seqno(dev_priv);
3889		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3890	}
3891
3892	/*
3893	 * copy_to_user errors will be detected by user space not seeing
3894	 * fence_rep::error filled in. Typically user-space would have pre-set
3895	 * that member to -EFAULT.
3896	 */
3897	ret = copy_to_user(user_fence_rep, &fence_rep,
3898			   sizeof(fence_rep));
3899
3900	/*
3901	 * User-space lost the fence object. We need to sync and unreference the
3902	 * handle.
3903	 */
3904	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3905		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3906		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3907		(void) vmw_fence_obj_wait(fence, false, false,
3908					  VMW_FENCE_WAIT_TIMEOUT);
3909	}
3910
3911	return ret ? -EFAULT : 0;
3912}
3913
3914/**
3915 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3916 *
3917 * @dev_priv: Pointer to a device private structure.
3918 * @kernel_commands: Pointer to the unpatched command batch.
3919 * @command_size: Size of the unpatched command batch.
3920 * @sw_context: Structure holding the relocation lists.
3921 *
3922 * Side effects: If this function returns 0, then the command batch pointed to
3923 * by @kernel_commands will have been modified.
3924 */
3925static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3926				   void *kernel_commands, u32 command_size,
3927				   struct vmw_sw_context *sw_context)
3928{
3929	void *cmd;
3930
3931	if (sw_context->dx_ctx_node)
3932		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3933					  sw_context->dx_ctx_node->ctx->id);
3934	else
3935		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3936
3937	if (!cmd)
3938		return -ENOMEM;
3939
3940	vmw_apply_relocations(sw_context);
3941	memcpy(cmd, kernel_commands, command_size);
3942	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3943	vmw_resource_relocations_free(&sw_context->res_relocations);
3944	vmw_cmd_commit(dev_priv, command_size);
3945
3946	return 0;
3947}
3948
3949/**
3950 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3951 * command buffer manager.
3952 *
3953 * @dev_priv: Pointer to a device private structure.
3954 * @header: Opaque handle to the command buffer allocation.
3955 * @command_size: Size of the unpatched command batch.
3956 * @sw_context: Structure holding the relocation lists.
3957 *
3958 * Side effects: If this function returns 0, then the command buffer represented
3959 * by @header will have been modified.
3960 */
3961static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3962				     struct vmw_cmdbuf_header *header,
3963				     u32 command_size,
3964				     struct vmw_sw_context *sw_context)
3965{
3966	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3967		  SVGA3D_INVALID_ID);
3968	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3969				       header);
3970
3971	vmw_apply_relocations(sw_context);
3972	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3973	vmw_resource_relocations_free(&sw_context->res_relocations);
3974	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3975
3976	return 0;
3977}
3978
3979/**
3980 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3981 * submission using a command buffer.
3982 *
3983 * @dev_priv: Pointer to a device private structure.
3984 * @user_commands: User-space pointer to the commands to be submitted.
3985 * @command_size: Size of the unpatched command batch.
3986 * @header: Out parameter returning the opaque pointer to the command buffer.
3987 *
3988 * This function checks whether we can use the command buffer manager for
3989 * submission and if so, creates a command buffer of suitable size and copies
3990 * the user data into that buffer.
3991 *
3992 * On successful return, the function returns a pointer to the data in the
3993 * command buffer and *@header is set to non-NULL.
3994 *
3995 * @kernel_commands: If command buffers could not be used, the function will
3996 * return the value of @kernel_commands on function call. That value may be
3997 * NULL. In that case, the value of *@header will be set to NULL.
3998 *
3999 * If an error is encountered, the function will return a pointer error value.
4000 * If the function is interrupted by a signal while sleeping, it will return
4001 * -ERESTARTSYS casted to a pointer error value.
4002 */
4003static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4004				void __user *user_commands,
4005				void *kernel_commands, u32 command_size,
4006				struct vmw_cmdbuf_header **header)
4007{
4008	size_t cmdbuf_size;
4009	int ret;
4010
4011	*header = NULL;
4012	if (command_size > SVGA_CB_MAX_SIZE) {
4013		VMW_DEBUG_USER("Command buffer is too large.\n");
4014		return ERR_PTR(-EINVAL);
4015	}
4016
4017	if (!dev_priv->cman || kernel_commands)
4018		return kernel_commands;
4019
4020	/* If possible, add a little space for fencing. */
4021	cmdbuf_size = command_size + 512;
4022	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4023	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4024					   header);
4025	if (IS_ERR(kernel_commands))
4026		return kernel_commands;
4027
4028	ret = copy_from_user(kernel_commands, user_commands, command_size);
4029	if (ret) {
4030		VMW_DEBUG_USER("Failed copying commands.\n");
4031		vmw_cmdbuf_header_free(*header);
4032		*header = NULL;
4033		return ERR_PTR(-EFAULT);
4034	}
4035
4036	return kernel_commands;
4037}
4038
4039static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4040				   struct vmw_sw_context *sw_context,
4041				   uint32_t handle)
4042{
4043	struct vmw_resource *res;
4044	int ret;
4045	unsigned int size;
4046
4047	if (handle == SVGA3D_INVALID_ID)
4048		return 0;
4049
4050	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4051	ret = vmw_validation_preload_res(sw_context->ctx, size);
4052	if (ret)
4053		return ret;
4054
4055	ret = vmw_user_resource_lookup_handle
4056		(dev_priv, sw_context->fp->tfile, handle,
4057		 user_context_converter, &res);
4058	if (ret != 0) {
4059		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4060			       (unsigned int) handle);
4061		return ret;
4062	}
4063
4064	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4065				      vmw_val_add_flag_none);
4066	if (unlikely(ret != 0)) {
4067		vmw_resource_unreference(&res);
4068		return ret;
4069	}
4070
4071	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4072	sw_context->man = vmw_context_res_man(res);
4073
4074	vmw_resource_unreference(&res);
4075	return 0;
4076}
4077
4078int vmw_execbuf_process(struct drm_file *file_priv,
4079			struct vmw_private *dev_priv,
4080			void __user *user_commands, void *kernel_commands,
4081			uint32_t command_size, uint64_t throttle_us,
4082			uint32_t dx_context_handle,
4083			struct drm_vmw_fence_rep __user *user_fence_rep,
4084			struct vmw_fence_obj **out_fence, uint32_t flags)
4085{
4086	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4087	struct vmw_fence_obj *fence = NULL;
4088	struct vmw_cmdbuf_header *header;
4089	uint32_t handle = 0;
4090	int ret;
4091	int32_t out_fence_fd = -1;
4092	struct sync_file *sync_file = NULL;
4093	DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4094
4095	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4096		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4097		if (out_fence_fd < 0) {
4098			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4099			return out_fence_fd;
4100		}
4101	}
4102
4103	if (throttle_us) {
4104		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4105	}
4106
4107	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4108					     kernel_commands, command_size,
4109					     &header);
4110	if (IS_ERR(kernel_commands)) {
4111		ret = PTR_ERR(kernel_commands);
4112		goto out_free_fence_fd;
4113	}
4114
4115	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4116	if (ret) {
4117		ret = -ERESTARTSYS;
4118		goto out_free_header;
4119	}
4120
4121	sw_context->kernel = false;
4122	if (kernel_commands == NULL) {
4123		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4124		if (unlikely(ret != 0))
4125			goto out_unlock;
4126
4127		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4128				     command_size);
4129		if (unlikely(ret != 0)) {
4130			ret = -EFAULT;
4131			VMW_DEBUG_USER("Failed copying commands.\n");
4132			goto out_unlock;
4133		}
4134
4135		kernel_commands = sw_context->cmd_bounce;
4136	} else if (!header) {
4137		sw_context->kernel = true;
4138	}
4139
4140	sw_context->filp = file_priv;
4141	sw_context->fp = vmw_fpriv(file_priv);
4142	INIT_LIST_HEAD(&sw_context->ctx_list);
4143	sw_context->cur_query_bo = dev_priv->pinned_bo;
4144	sw_context->last_query_ctx = NULL;
4145	sw_context->needs_post_query_barrier = false;
4146	sw_context->dx_ctx_node = NULL;
4147	sw_context->dx_query_mob = NULL;
4148	sw_context->dx_query_ctx = NULL;
4149	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4150	INIT_LIST_HEAD(&sw_context->res_relocations);
4151	INIT_LIST_HEAD(&sw_context->bo_relocations);
4152
4153	if (sw_context->staged_bindings)
4154		vmw_binding_state_reset(sw_context->staged_bindings);
4155
4156	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4157	sw_context->ctx = &val_ctx;
4158	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4159	if (unlikely(ret != 0))
4160		goto out_err_nores;
4161
4162	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4163				command_size);
4164	if (unlikely(ret != 0))
4165		goto out_err_nores;
4166
4167	ret = vmw_resources_reserve(sw_context);
4168	if (unlikely(ret != 0))
4169		goto out_err_nores;
4170
4171	ret = vmw_validation_bo_reserve(&val_ctx, true);
4172	if (unlikely(ret != 0))
4173		goto out_err_nores;
4174
4175	ret = vmw_validation_bo_validate(&val_ctx, true);
4176	if (unlikely(ret != 0))
4177		goto out_err;
4178
4179	ret = vmw_validation_res_validate(&val_ctx, true);
4180	if (unlikely(ret != 0))
4181		goto out_err;
4182
4183	vmw_validation_drop_ht(&val_ctx);
4184
4185	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4186	if (unlikely(ret != 0)) {
4187		ret = -ERESTARTSYS;
4188		goto out_err;
4189	}
4190
4191	if (dev_priv->has_mob) {
4192		ret = vmw_rebind_contexts(sw_context);
4193		if (unlikely(ret != 0))
4194			goto out_unlock_binding;
4195	}
4196
4197	if (!header) {
4198		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4199					      command_size, sw_context);
4200	} else {
4201		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4202						sw_context);
4203		header = NULL;
4204	}
4205	mutex_unlock(&dev_priv->binding_mutex);
4206	if (ret)
4207		goto out_err;
4208
4209	vmw_query_bo_switch_commit(dev_priv, sw_context);
4210	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4211					 (user_fence_rep) ? &handle : NULL);
4212	/*
4213	 * This error is harmless, because if fence submission fails,
4214	 * vmw_fifo_send_fence will sync. The error will be propagated to
4215	 * user-space in @fence_rep
4216	 */
4217	if (ret != 0)
4218		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4219
4220	vmw_execbuf_bindings_commit(sw_context, false);
4221	vmw_bind_dx_query_mob(sw_context);
4222	vmw_validation_res_unreserve(&val_ctx, false);
4223
4224	vmw_validation_bo_fence(sw_context->ctx, fence);
4225
4226	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4227		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4228
4229	/*
4230	 * If anything fails here, give up trying to export the fence and do a
4231	 * sync since the user mode will not be able to sync the fence itself.
4232	 * This ensures we are still functionally correct.
4233	 */
4234	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4235
4236		sync_file = sync_file_create(&fence->base);
4237		if (!sync_file) {
4238			VMW_DEBUG_USER("Sync file create failed for fence\n");
4239			put_unused_fd(out_fence_fd);
4240			out_fence_fd = -1;
4241
4242			(void) vmw_fence_obj_wait(fence, false, false,
4243						  VMW_FENCE_WAIT_TIMEOUT);
4244		}
4245	}
4246
4247	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4248				    user_fence_rep, fence, handle, out_fence_fd);
4249
4250	if (sync_file) {
4251		if (ret) {
4252			/* usercopy of fence failed, put the file object */
4253			fput(sync_file->file);
4254			put_unused_fd(out_fence_fd);
4255		} else {
4256			/* Link the fence with the FD created earlier */
4257			fd_install(out_fence_fd, sync_file->file);
4258		}
4259	}
4260
4261	/* Don't unreference when handing fence out */
4262	if (unlikely(out_fence != NULL)) {
4263		*out_fence = fence;
4264		fence = NULL;
4265	} else if (likely(fence != NULL)) {
4266		vmw_fence_obj_unreference(&fence);
4267	}
4268
4269	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4270	mutex_unlock(&dev_priv->cmdbuf_mutex);
4271
4272	/*
4273	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4274	 * in resource destruction paths.
4275	 */
4276	vmw_validation_unref_lists(&val_ctx);
4277
4278	return ret;
4279
4280out_unlock_binding:
4281	mutex_unlock(&dev_priv->binding_mutex);
4282out_err:
4283	vmw_validation_bo_backoff(&val_ctx);
4284out_err_nores:
4285	vmw_execbuf_bindings_commit(sw_context, true);
4286	vmw_validation_res_unreserve(&val_ctx, true);
4287	vmw_resource_relocations_free(&sw_context->res_relocations);
4288	vmw_free_relocations(sw_context);
4289	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4290		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4291out_unlock:
4292	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4293	vmw_validation_drop_ht(&val_ctx);
4294	WARN_ON(!list_empty(&sw_context->ctx_list));
4295	mutex_unlock(&dev_priv->cmdbuf_mutex);
4296
4297	/*
4298	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4299	 * in resource destruction paths.
4300	 */
4301	vmw_validation_unref_lists(&val_ctx);
4302out_free_header:
4303	if (header)
4304		vmw_cmdbuf_header_free(header);
4305out_free_fence_fd:
4306	if (out_fence_fd >= 0)
4307		put_unused_fd(out_fence_fd);
4308
4309	return ret;
4310}
4311
4312/**
4313 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4314 *
4315 * @dev_priv: The device private structure.
4316 *
4317 * This function is called to idle the fifo and unpin the query buffer if the
4318 * normal way to do this hits an error, which should typically be extremely
4319 * rare.
4320 */
4321static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4322{
4323	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4324
4325	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4326	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4327	if (dev_priv->dummy_query_bo_pinned) {
4328		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4329		dev_priv->dummy_query_bo_pinned = false;
4330	}
4331}
4332
4333
4334/**
4335 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4336 * bo.
4337 *
4338 * @dev_priv: The device private structure.
4339 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4340 * query barrier that flushes all queries touching the current buffer pointed to
4341 * by @dev_priv->pinned_bo
4342 *
4343 * This function should be used to unpin the pinned query bo, or as a query
4344 * barrier when we need to make sure that all queries have finished before the
4345 * next fifo command. (For example on hardware context destructions where the
4346 * hardware may otherwise leak unfinished queries).
4347 *
4348 * This function does not return any failure codes, but make attempts to do safe
4349 * unpinning in case of errors.
4350 *
4351 * The function will synchronize on the previous query barrier, and will thus
4352 * not finish until that barrier has executed.
4353 *
4354 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4355 * calling this function.
4356 */
4357void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4358				     struct vmw_fence_obj *fence)
4359{
4360	int ret = 0;
4361	struct vmw_fence_obj *lfence = NULL;
4362	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4363
4364	if (dev_priv->pinned_bo == NULL)
4365		goto out_unlock;
4366
4367	vmw_bo_placement_set(dev_priv->pinned_bo,
4368			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4369			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4370	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4371	if (ret)
4372		goto out_no_reserve;
4373
4374	vmw_bo_placement_set(dev_priv->dummy_query_bo,
4375			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4376			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4377	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4378	if (ret)
4379		goto out_no_reserve;
4380
4381	ret = vmw_validation_bo_reserve(&val_ctx, false);
4382	if (ret)
4383		goto out_no_reserve;
4384
4385	if (dev_priv->query_cid_valid) {
4386		BUG_ON(fence != NULL);
4387		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4388		if (ret)
4389			goto out_no_emit;
4390		dev_priv->query_cid_valid = false;
4391	}
4392
4393	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4394	if (dev_priv->dummy_query_bo_pinned) {
4395		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4396		dev_priv->dummy_query_bo_pinned = false;
4397	}
4398	if (fence == NULL) {
4399		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4400						  NULL);
4401		fence = lfence;
4402	}
4403	vmw_validation_bo_fence(&val_ctx, fence);
4404	if (lfence != NULL)
4405		vmw_fence_obj_unreference(&lfence);
4406
4407	vmw_validation_unref_lists(&val_ctx);
4408	vmw_bo_unreference(&dev_priv->pinned_bo);
4409
4410out_unlock:
4411	return;
4412out_no_emit:
4413	vmw_validation_bo_backoff(&val_ctx);
4414out_no_reserve:
4415	vmw_validation_unref_lists(&val_ctx);
4416	vmw_execbuf_unpin_panic(dev_priv);
4417	vmw_bo_unreference(&dev_priv->pinned_bo);
4418}
4419
4420/**
4421 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4422 *
4423 * @dev_priv: The device private structure.
4424 *
4425 * This function should be used to unpin the pinned query bo, or as a query
4426 * barrier when we need to make sure that all queries have finished before the
4427 * next fifo command. (For example on hardware context destructions where the
4428 * hardware may otherwise leak unfinished queries).
4429 *
4430 * This function does not return any failure codes, but make attempts to do safe
4431 * unpinning in case of errors.
4432 *
4433 * The function will synchronize on the previous query barrier, and will thus
4434 * not finish until that barrier has executed.
4435 */
4436void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4437{
4438	mutex_lock(&dev_priv->cmdbuf_mutex);
4439	if (dev_priv->query_cid_valid)
4440		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4441	mutex_unlock(&dev_priv->cmdbuf_mutex);
4442}
4443
4444int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4445		      struct drm_file *file_priv)
4446{
4447	struct vmw_private *dev_priv = vmw_priv(dev);
4448	struct drm_vmw_execbuf_arg *arg = data;
4449	int ret;
4450	struct dma_fence *in_fence = NULL;
4451
4452	MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4453	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4454
4455	/*
4456	 * Extend the ioctl argument while maintaining backwards compatibility:
4457	 * We take different code paths depending on the value of arg->version.
4458	 *
4459	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4460	 */
4461	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4462		     arg->version == 0)) {
4463		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4464		ret = -EINVAL;
4465		goto mksstats_out;
4466	}
4467
4468	switch (arg->version) {
4469	case 1:
4470		/* For v1 core DRM have extended + zeropadded the data */
4471		arg->context_handle = (uint32_t) -1;
4472		break;
4473	case 2:
4474	default:
4475		/* For v2 and later core DRM would have correctly copied it */
4476		break;
4477	}
4478
4479	/* If imported a fence FD from elsewhere, then wait on it */
4480	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4481		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4482
4483		if (!in_fence) {
4484			VMW_DEBUG_USER("Cannot get imported fence\n");
4485			ret = -EINVAL;
4486			goto mksstats_out;
4487		}
4488
4489		ret = dma_fence_wait(in_fence, true);
4490		if (ret)
4491			goto out;
4492	}
4493
4494	ret = vmw_execbuf_process(file_priv, dev_priv,
4495				  (void __user *)(unsigned long)arg->commands,
4496				  NULL, arg->command_size, arg->throttle_us,
4497				  arg->context_handle,
4498				  (void __user *)(unsigned long)arg->fence_rep,
4499				  NULL, arg->flags);
4500
4501	if (unlikely(ret != 0))
4502		goto out;
4503
4504	vmw_kms_cursor_post_execbuf(dev_priv);
4505
4506out:
4507	if (in_fence)
4508		dma_fence_put(in_fence);
4509
4510mksstats_out:
4511	MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4512	return ret;
4513}