Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include "vmwgfx_binding.h"
  28#include "vmwgfx_bo.h"
  29#include "vmwgfx_drv.h"
  30#include "vmwgfx_mksstat.h"
  31#include "vmwgfx_so.h"
  32
  33#include <drm/ttm/ttm_bo.h>
 
 
  34#include <drm/ttm/ttm_placement.h>
  35
  36#include <linux/sync_file.h>
  37#include <linux/hashtable.h>
  38#include <linux/vmalloc.h>
  39
  40/*
  41 * Helper macro to get dx_ctx_node if available otherwise print an error
  42 * message. This is for use in command verifier function where if dx_ctx_node
  43 * is not set then command is invalid.
  44 */
  45#define VMW_GET_CTX_NODE(__sw_context)                                        \
  46({                                                                            \
  47	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
  48		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
  49		__sw_context->dx_ctx_node;                                    \
  50	});                                                                   \
  51})
  52
  53#define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
  54	struct {                                                              \
  55		SVGA3dCmdHeader header;                                       \
  56		__type body;                                                  \
  57	} __var
  58
  59/**
  60 * struct vmw_relocation - Buffer object relocation
  61 *
  62 * @head: List head for the command submission context's relocation list
  63 * @vbo: Non ref-counted pointer to buffer object
  64 * @mob_loc: Pointer to location for mob id to be modified
  65 * @location: Pointer to location for guest pointer to be modified
  66 */
  67struct vmw_relocation {
  68	struct list_head head;
  69	struct vmw_bo *vbo;
  70	union {
  71		SVGAMobId *mob_loc;
  72		SVGAGuestPtr *location;
  73	};
  74};
  75
  76/**
  77 * enum vmw_resource_relocation_type - Relocation type for resources
  78 *
  79 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  80 * command stream is replaced with the actual id after validation.
  81 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  82 * with a NOP.
  83 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
  84 * validation is -1, the command is replaced with a NOP. Otherwise no action.
  85 * @vmw_res_rel_max: Last value in the enum - used for error checking
  86*/
  87enum vmw_resource_relocation_type {
  88	vmw_res_rel_normal,
  89	vmw_res_rel_nop,
  90	vmw_res_rel_cond_nop,
  91	vmw_res_rel_max
  92};
  93
  94/**
  95 * struct vmw_resource_relocation - Relocation info for resources
  96 *
  97 * @head: List head for the software context's relocation list.
  98 * @res: Non-ref-counted pointer to the resource.
  99 * @offset: Offset of single byte entries into the command buffer where the id
 100 * that needs fixup is located.
 101 * @rel_type: Type of relocation.
 102 */
 103struct vmw_resource_relocation {
 104	struct list_head head;
 105	const struct vmw_resource *res;
 106	u32 offset:29;
 107	enum vmw_resource_relocation_type rel_type:3;
 108};
 109
 110/**
 111 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
 112 *
 113 * @head: List head of context list
 114 * @ctx: The context resource
 115 * @cur: The context's persistent binding state
 116 * @staged: The binding state changes of this command buffer
 
 
 
 
 
 
 
 
 117 */
 118struct vmw_ctx_validation_info {
 119	struct list_head head;
 120	struct vmw_resource *ctx;
 121	struct vmw_ctx_binding_state *cur;
 122	struct vmw_ctx_binding_state *staged;
 
 
 
 
 123};
 124
 125/**
 126 * struct vmw_cmd_entry - Describe a command for the verifier
 127 *
 128 * @func: Call-back to handle the command.
 129 * @user_allow: Whether allowed from the execbuf ioctl.
 130 * @gb_disable: Whether disabled if guest-backed objects are available.
 131 * @gb_enable: Whether enabled iff guest-backed objects are available.
 132 * @cmd_name: Name of the command.
 133 */
 134struct vmw_cmd_entry {
 135	int (*func) (struct vmw_private *, struct vmw_sw_context *,
 136		     SVGA3dCmdHeader *);
 137	bool user_allow;
 138	bool gb_disable;
 139	bool gb_enable;
 140	const char *cmd_name;
 141};
 142
 143#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 144	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 145				       (_gb_disable), (_gb_enable), #_cmd}
 146
 147static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 148					struct vmw_sw_context *sw_context,
 149					struct vmw_resource *ctx);
 150static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 151				 struct vmw_sw_context *sw_context,
 152				 SVGAMobId *id,
 153				 struct vmw_bo **vmw_bo_p);
 154/**
 155 * vmw_ptr_diff - Compute the offset from a to b in bytes
 156 *
 157 * @a: A starting pointer.
 158 * @b: A pointer offset in the same address space.
 159 *
 160 * Returns: The offset in bytes between the two pointers.
 161 */
 162static size_t vmw_ptr_diff(void *a, void *b)
 163{
 164	return (unsigned long) b - (unsigned long) a;
 165}
 166
 167/**
 168 * vmw_execbuf_bindings_commit - Commit modified binding state
 
 169 *
 170 * @sw_context: The command submission context
 171 * @backoff: Whether this is part of the error path and binding state changes
 172 * should be ignored
 173 */
 174static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
 175					bool backoff)
 176{
 177	struct vmw_ctx_validation_info *entry;
 178
 179	list_for_each_entry(entry, &sw_context->ctx_list, head) {
 180		if (!backoff)
 181			vmw_binding_state_commit(entry->cur, entry->staged);
 182
 183		if (entry->staged != sw_context->staged_bindings)
 184			vmw_binding_state_free(entry->staged);
 185		else
 186			sw_context->staged_bindings_inuse = false;
 187	}
 188
 189	/* List entries are freed with the validation context */
 190	INIT_LIST_HEAD(&sw_context->ctx_list);
 191}
 192
 193/**
 194 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
 195 *
 196 * @sw_context: The command submission context
 197 */
 198static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
 199{
 200	if (sw_context->dx_query_mob)
 201		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 202					  sw_context->dx_query_mob);
 203}
 204
 205/**
 206 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
 207 * the validate list.
 208 *
 209 * @dev_priv: Pointer to the device private:
 210 * @sw_context: The command submission context
 211 * @res: Pointer to the resource
 212 * @node: The validation node holding the context resource metadata
 213 */
 214static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 215				   struct vmw_sw_context *sw_context,
 216				   struct vmw_resource *res,
 217				   struct vmw_ctx_validation_info *node)
 218{
 219	int ret;
 220
 221	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 222	if (unlikely(ret != 0))
 223		goto out_err;
 224
 225	if (!sw_context->staged_bindings) {
 226		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
 227		if (IS_ERR(sw_context->staged_bindings)) {
 228			ret = PTR_ERR(sw_context->staged_bindings);
 229			sw_context->staged_bindings = NULL;
 230			goto out_err;
 231		}
 232	}
 233
 234	if (sw_context->staged_bindings_inuse) {
 235		node->staged = vmw_binding_state_alloc(dev_priv);
 236		if (IS_ERR(node->staged)) {
 237			ret = PTR_ERR(node->staged);
 238			node->staged = NULL;
 239			goto out_err;
 
 
 
 
 
 240		}
 241	} else {
 242		node->staged = sw_context->staged_bindings;
 243		sw_context->staged_bindings_inuse = true;
 244	}
 245
 246	node->ctx = res;
 247	node->cur = vmw_context_binding_state(res);
 248	list_add_tail(&node->head, &sw_context->ctx_list);
 249
 250	return 0;
 251
 252out_err:
 253	return ret;
 254}
 255
 256/**
 257 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
 258 *
 259 * @dev_priv: Pointer to the device private struct.
 260 * @res_type: The resource type.
 261 *
 262 * Guest-backed contexts and DX contexts require extra size to store execbuf
 263 * private information in the validation node. Typically the binding manager
 264 * associated data structures.
 265 *
 266 * Returns: The extra size requirement based on resource type.
 267 */
 268static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
 269					 enum vmw_res_type res_type)
 270{
 271	return (res_type == vmw_res_dx_context ||
 272		(res_type == vmw_res_context && dev_priv->has_mob)) ?
 273		sizeof(struct vmw_ctx_validation_info) : 0;
 274}
 275
 276/**
 277 * vmw_execbuf_rcache_update - Update a resource-node cache entry
 
 278 *
 279 * @rcache: Pointer to the entry to update.
 280 * @res: Pointer to the resource.
 281 * @private: Pointer to the execbuf-private space in the resource validation
 282 * node.
 283 */
 284static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
 285				      struct vmw_resource *res,
 286				      void *private)
 287{
 288	rcache->res = res;
 289	rcache->private = private;
 290	rcache->valid = 1;
 291	rcache->valid_handle = 0;
 292}
 293
 294enum vmw_val_add_flags {
 295	vmw_val_add_flag_none  =      0,
 296	vmw_val_add_flag_noctx = 1 << 0,
 297};
 298
 299/**
 300 * vmw_execbuf_res_val_add - Add a resource to the validation list.
 301 *
 302 * @sw_context: Pointer to the software context.
 303 * @res: Unreferenced rcu-protected pointer to the resource.
 304 * @dirty: Whether to change dirty status.
 305 * @flags: specifies whether to use the context or not
 306 *
 307 * Returns: 0 on success. Negative error code on failure. Typical error codes
 308 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
 309 */
 310static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
 311				   struct vmw_resource *res,
 312				   u32 dirty,
 313				   u32 flags)
 314{
 315	struct vmw_private *dev_priv = res->dev_priv;
 316	int ret;
 317	enum vmw_res_type res_type = vmw_res_type(res);
 318	struct vmw_res_cache_entry *rcache;
 319	struct vmw_ctx_validation_info *ctx_info;
 320	bool first_usage;
 321	unsigned int priv_size;
 322
 323	rcache = &sw_context->res_cache[res_type];
 324	if (likely(rcache->valid && rcache->res == res)) {
 325		if (dirty)
 326			vmw_validation_res_set_dirty(sw_context->ctx,
 327						     rcache->private, dirty);
 328		return 0;
 329	}
 330
 331	if ((flags & vmw_val_add_flag_noctx) != 0) {
 332		ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
 333						  (void **)&ctx_info, NULL);
 334		if (ret)
 335			return ret;
 336
 337	} else {
 338		priv_size = vmw_execbuf_res_size(dev_priv, res_type);
 339		ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
 340						  dirty, (void **)&ctx_info,
 341						  &first_usage);
 342		if (ret)
 343			return ret;
 344
 345		if (priv_size && first_usage) {
 346			ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
 347						      ctx_info);
 348			if (ret) {
 349				VMW_DEBUG_USER("Failed first usage context setup.\n");
 350				return ret;
 351			}
 352		}
 353	}
 354
 355	vmw_execbuf_rcache_update(rcache, res, ctx_info);
 356	return 0;
 357}
 358
 359/**
 360 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
 361 * validation list
 362 *
 363 * @sw_context: The software context holding the validation list.
 364 * @view: Pointer to the view resource.
 365 *
 366 * Returns 0 if success, negative error code otherwise.
 367 */
 368static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 369				struct vmw_resource *view)
 370{
 371	int ret;
 372
 373	/*
 374	 * First add the resource the view is pointing to, otherwise it may be
 375	 * swapped out when the view is validated.
 376	 */
 377	ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
 378				      vmw_view_dirtying(view), vmw_val_add_flag_noctx);
 379	if (ret)
 380		return ret;
 
 
 
 
 381
 382	return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
 383				       vmw_val_add_flag_noctx);
 384}
 385
 386/**
 387 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
 388 * to to the validation list.
 389 *
 390 * @sw_context: The software context holding the validation list.
 391 * @view_type: The view type to look up.
 392 * @id: view id of the view.
 393 *
 394 * The view is represented by a view id and the DX context it's created on, or
 395 * scheduled for creation on. If there is no DX context set, the function will
 396 * return an -EINVAL error pointer.
 397 *
 398 * Returns: Unreferenced pointer to the resource on success, negative error
 399 * pointer on failure.
 400 */
 401static struct vmw_resource *
 402vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 403		    enum vmw_view_type view_type, u32 id)
 404{
 405	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
 406	struct vmw_resource *view;
 407	int ret;
 408
 409	if (!ctx_node)
 410		return ERR_PTR(-EINVAL);
 411
 412	view = vmw_view_lookup(sw_context->man, view_type, id);
 413	if (IS_ERR(view))
 414		return view;
 415
 416	ret = vmw_view_res_val_add(sw_context, view);
 417	if (ret)
 418		return ERR_PTR(ret);
 419
 420	return view;
 421}
 422
 423/**
 424 * vmw_resource_context_res_add - Put resources previously bound to a context on
 425 * the validation list
 426 *
 427 * @dev_priv: Pointer to a device private structure
 428 * @sw_context: Pointer to a software context used for this command submission
 429 * @ctx: Pointer to the context resource
 430 *
 431 * This function puts all resources that were previously bound to @ctx on the
 432 * resource validation list. This is part of the context state reemission
 433 */
 434static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 435					struct vmw_sw_context *sw_context,
 436					struct vmw_resource *ctx)
 437{
 438	struct list_head *binding_list;
 439	struct vmw_ctx_bindinfo *entry;
 440	int ret = 0;
 441	struct vmw_resource *res;
 442	u32 i;
 443	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
 444		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
 445
 446	/* Add all cotables to the validation list. */
 447	if (has_sm4_context(dev_priv) &&
 448	    vmw_res_type(ctx) == vmw_res_dx_context) {
 449		for (i = 0; i < cotable_max; ++i) {
 450			res = vmw_context_cotable(ctx, i);
 451			if (IS_ERR_OR_NULL(res))
 452				continue;
 453
 454			ret = vmw_execbuf_res_val_add(sw_context, res,
 455						      VMW_RES_DIRTY_SET,
 456						      vmw_val_add_flag_noctx);
 457			if (unlikely(ret != 0))
 458				return ret;
 459		}
 460	}
 461
 462	/* Add all resources bound to the context to the validation list */
 463	mutex_lock(&dev_priv->binding_mutex);
 464	binding_list = vmw_context_binding_list(ctx);
 465
 466	list_for_each_entry(entry, binding_list, ctx_list) {
 467		if (vmw_res_type(entry->res) == vmw_res_view)
 468			ret = vmw_view_res_val_add(sw_context, entry->res);
 469		else
 470			ret = vmw_execbuf_res_val_add(sw_context, entry->res,
 471						      vmw_binding_dirtying(entry->bt),
 472						      vmw_val_add_flag_noctx);
 473		if (unlikely(ret != 0))
 474			break;
 475	}
 476
 477	if (has_sm4_context(dev_priv) &&
 478	    vmw_res_type(ctx) == vmw_res_dx_context) {
 479		struct vmw_bo *dx_query_mob;
 480
 481		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 482		if (dx_query_mob) {
 483			vmw_bo_placement_set(dx_query_mob,
 484					     VMW_BO_DOMAIN_MOB,
 485					     VMW_BO_DOMAIN_MOB);
 486			ret = vmw_validation_add_bo(sw_context->ctx,
 487						    dx_query_mob);
 488		}
 489	}
 490
 491	mutex_unlock(&dev_priv->binding_mutex);
 492	return ret;
 493}
 494
 495/**
 496 * vmw_resource_relocation_add - Add a relocation to the relocation list
 497 *
 498 * @sw_context: Pointer to the software context.
 499 * @res: The resource.
 500 * @offset: Offset into the command buffer currently being parsed where the id
 501 * that needs fixup is located. Granularity is one byte.
 502 * @rel_type: Relocation type.
 503 */
 504static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
 505				       const struct vmw_resource *res,
 506				       unsigned long offset,
 507				       enum vmw_resource_relocation_type
 508				       rel_type)
 509{
 510	struct vmw_resource_relocation *rel;
 511
 512	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
 513	if (unlikely(!rel)) {
 514		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
 515		return -ENOMEM;
 516	}
 517
 518	rel->res = res;
 519	rel->offset = offset;
 520	rel->rel_type = rel_type;
 521	list_add_tail(&rel->head, &sw_context->res_relocations);
 522
 523	return 0;
 524}
 525
 526/**
 527 * vmw_resource_relocations_free - Free all relocations on a list
 528 *
 529 * @list: Pointer to the head of the relocation list
 530 */
 531static void vmw_resource_relocations_free(struct list_head *list)
 532{
 533	/* Memory is validation context memory, so no need to free it */
 534	INIT_LIST_HEAD(list);
 
 
 
 
 535}
 536
 537/**
 538 * vmw_resource_relocations_apply - Apply all relocations on a list
 539 *
 540 * @cb: Pointer to the start of the command buffer bein patch. This need not be
 541 * the same buffer as the one being parsed when the relocation list was built,
 542 * but the contents must be the same modulo the resource ids.
 
 543 * @list: Pointer to the head of the relocation list.
 544 */
 545static void vmw_resource_relocations_apply(uint32_t *cb,
 546					   struct list_head *list)
 547{
 548	struct vmw_resource_relocation *rel;
 549
 550	/* Validate the struct vmw_resource_relocation member size */
 551	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
 552	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
 553
 554	list_for_each_entry(rel, list, head) {
 555		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
 556		switch (rel->rel_type) {
 557		case vmw_res_rel_normal:
 558			*addr = rel->res->id;
 559			break;
 560		case vmw_res_rel_nop:
 561			*addr = SVGA_3D_CMD_NOP;
 562			break;
 563		default:
 564			if (rel->res->id == -1)
 565				*addr = SVGA_3D_CMD_NOP;
 566			break;
 567		}
 568	}
 569}
 570
 571static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 572			   struct vmw_sw_context *sw_context,
 573			   SVGA3dCmdHeader *header)
 574{
 575	return -EINVAL;
 576}
 577
 578static int vmw_cmd_ok(struct vmw_private *dev_priv,
 579		      struct vmw_sw_context *sw_context,
 580		      SVGA3dCmdHeader *header)
 581{
 582	return 0;
 583}
 584
 585/**
 586 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
 587 * list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 588 *
 589 * @sw_context: Pointer to the software context.
 590 *
 591 * Note that since vmware's command submission currently is protected by the
 592 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
 593 * only a single thread at once will attempt this.
 594 */
 595static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 596{
 
 597	int ret;
 598
 599	ret = vmw_validation_res_reserve(sw_context->ctx, true);
 600	if (ret)
 601		return ret;
 602
 603	if (sw_context->dx_query_mob) {
 604		struct vmw_bo *expected_dx_query_mob;
 
 605
 606		expected_dx_query_mob =
 607			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 608		if (expected_dx_query_mob &&
 609		    expected_dx_query_mob != sw_context->dx_query_mob) {
 610			ret = -EINVAL;
 
 
 
 
 611		}
 612	}
 
 
 613
 614	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615}
 616
 617/**
 618 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
 619 * resource validate list unless it's already there.
 620 *
 621 * @dev_priv: Pointer to a device private structure.
 622 * @sw_context: Pointer to the software context.
 623 * @res_type: Resource type.
 624 * @dirty: Whether to change dirty status.
 625 * @converter: User-space visible type specific information.
 626 * @id_loc: Pointer to the location in the command buffer currently being parsed
 627 * from where the user-space resource id handle is located.
 628 * @p_res: Pointer to pointer to resource validation node. Populated on
 629 * exit.
 630 */
 631static int
 632vmw_cmd_res_check(struct vmw_private *dev_priv,
 633		  struct vmw_sw_context *sw_context,
 634		  enum vmw_res_type res_type,
 635		  u32 dirty,
 636		  const struct vmw_user_resource_conv *converter,
 637		  uint32_t *id_loc,
 638		  struct vmw_resource **p_res)
 639{
 640	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
 
 641	struct vmw_resource *res;
 642	int ret = 0;
 643	bool needs_unref = false;
 644
 645	if (p_res)
 646		*p_res = NULL;
 647
 648	if (*id_loc == SVGA3D_INVALID_ID) {
 
 
 649		if (res_type == vmw_res_context) {
 650			VMW_DEBUG_USER("Illegal context invalid id.\n");
 651			return -EINVAL;
 652		}
 653		return 0;
 654	}
 655
 656	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
 657		res = rcache->res;
 658		if (dirty)
 659			vmw_validation_res_set_dirty(sw_context->ctx,
 660						     rcache->private, dirty);
 661	} else {
 662		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
 663
 664		ret = vmw_validation_preload_res(sw_context->ctx, size);
 665		if (ret)
 666			return ret;
 667
 668		ret = vmw_user_resource_lookup_handle
 669			(dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
 670		if (ret != 0) {
 671			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
 672				       (unsigned int) *id_loc);
 673			return ret;
 674		}
 675		needs_unref = true;
 
 
 
 
 
 
 
 
 
 
 
 
 676
 677		ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
 678		if (unlikely(ret != 0))
 679			goto res_check_done;
 680
 681		if (rcache->valid && rcache->res == res) {
 682			rcache->valid_handle = true;
 683			rcache->handle = *id_loc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684		}
 
 685	}
 686
 687	ret = vmw_resource_relocation_add(sw_context, res,
 688					  vmw_ptr_diff(sw_context->buf_start,
 689						       id_loc),
 690					  vmw_res_rel_normal);
 691	if (p_res)
 692		*p_res = res;
 693
 694res_check_done:
 695	if (needs_unref)
 696		vmw_resource_unreference(&res);
 697
 698	return ret;
 699}
 700
 701/**
 702 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
 
 703 *
 704 * @ctx_res: context the query belongs to
 705 *
 706 * This function assumes binding_mutex is held.
 
 
 
 
 
 707 */
 708static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 
 
 
 
 
 
 709{
 710	struct vmw_private *dev_priv = ctx_res->dev_priv;
 711	struct vmw_bo *dx_query_mob;
 712	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
 713
 714	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 715
 716	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 717		return 0;
 718
 719	cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
 720	if (cmd == NULL)
 721		return -ENOMEM;
 722
 723	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 724	cmd->header.size = sizeof(cmd->body);
 725	cmd->body.cid = ctx_res->id;
 726	cmd->body.mobid = dx_query_mob->tbo.resource->start;
 727	vmw_cmd_commit(dev_priv, sizeof(*cmd));
 728
 729	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 730
 731	return 0;
 732}
 733
 734/**
 735 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
 736 * contexts.
 737 *
 738 * @sw_context: Pointer to the software context.
 739 *
 740 * Rebind context binding points that have been scrubbed because of eviction.
 741 */
 742static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 743{
 744	struct vmw_ctx_validation_info *val;
 745	int ret;
 746
 747	list_for_each_entry(val, &sw_context->ctx_list, head) {
 748		ret = vmw_binding_rebind_all(val->cur);
 
 
 
 749		if (unlikely(ret != 0)) {
 750			if (ret != -ERESTARTSYS)
 751				VMW_DEBUG_USER("Failed to rebind context.\n");
 752			return ret;
 753		}
 754
 755		ret = vmw_rebind_all_dx_query(val->ctx);
 756		if (ret != 0) {
 757			VMW_DEBUG_USER("Failed to rebind queries.\n");
 758			return ret;
 759		}
 760	}
 761
 762	return 0;
 763}
 764
 765/**
 766 * vmw_view_bindings_add - Add an array of view bindings to a context binding
 767 * state tracker.
 768 *
 769 * @sw_context: The execbuf state used for this command.
 770 * @view_type: View type for the bindings.
 771 * @binding_type: Binding type for the bindings.
 772 * @shader_slot: The shader slot to user for the bindings.
 773 * @view_ids: Array of view ids to be bound.
 774 * @num_views: Number of view ids in @view_ids.
 775 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 776 */
 777static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 778				 enum vmw_view_type view_type,
 779				 enum vmw_ctx_binding_type binding_type,
 780				 uint32 shader_slot,
 781				 uint32 view_ids[], u32 num_views,
 782				 u32 first_slot)
 783{
 784	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
 785	u32 i;
 786
 787	if (!ctx_node)
 788		return -EINVAL;
 789
 790	for (i = 0; i < num_views; ++i) {
 791		struct vmw_ctx_bindinfo_view binding;
 792		struct vmw_resource *view = NULL;
 793
 794		if (view_ids[i] != SVGA3D_INVALID_ID) {
 795			view = vmw_view_id_val_add(sw_context, view_type,
 796						   view_ids[i]);
 797			if (IS_ERR(view)) {
 798				VMW_DEBUG_USER("View not found.\n");
 799				return PTR_ERR(view);
 800			}
 801		}
 802		binding.bi.ctx = ctx_node->ctx;
 803		binding.bi.res = view;
 804		binding.bi.bt = binding_type;
 805		binding.shader_slot = shader_slot;
 806		binding.slot = first_slot + i;
 807		vmw_binding_add(ctx_node->staged, &binding.bi,
 808				shader_slot, binding.slot);
 809	}
 810
 811	return 0;
 812}
 813
 814/**
 815 * vmw_cmd_cid_check - Check a command header for valid context information.
 816 *
 817 * @dev_priv: Pointer to a device private structure.
 818 * @sw_context: Pointer to the software context.
 819 * @header: A command header with an embedded user-space context handle.
 820 *
 821 * Convenience function: Call vmw_cmd_res_check with the user-space context
 822 * handle embedded in @header.
 823 */
 824static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 825			     struct vmw_sw_context *sw_context,
 826			     SVGA3dCmdHeader *header)
 827{
 828	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
 829		container_of(header, typeof(*cmd), header);
 
 
 830
 
 831	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 832				 VMW_RES_DIRTY_SET, user_context_converter,
 833				 &cmd->body, NULL);
 834}
 835
 836/**
 837 * vmw_execbuf_info_from_res - Get the private validation metadata for a
 838 * recently validated resource
 839 *
 840 * @sw_context: Pointer to the command submission context
 841 * @res: The resource
 842 *
 843 * The resource pointed to by @res needs to be present in the command submission
 844 * context's resource cache and hence the last resource of that type to be
 845 * processed by the validation code.
 846 *
 847 * Return: a pointer to the private metadata of the resource, or NULL if it
 848 * wasn't found
 849 */
 850static struct vmw_ctx_validation_info *
 851vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
 852			  struct vmw_resource *res)
 853{
 854	struct vmw_res_cache_entry *rcache =
 855		&sw_context->res_cache[vmw_res_type(res)];
 856
 857	if (rcache->valid && rcache->res == res)
 858		return rcache->private;
 859
 860	WARN_ON_ONCE(true);
 861	return NULL;
 862}
 863
 864static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 865					   struct vmw_sw_context *sw_context,
 866					   SVGA3dCmdHeader *header)
 867{
 868	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
 869	struct vmw_resource *ctx;
 870	struct vmw_resource *res;
 
 
 
 871	int ret;
 872
 873	cmd = container_of(header, typeof(*cmd), header);
 874
 875	if (cmd->body.type >= SVGA3D_RT_MAX) {
 876		VMW_DEBUG_USER("Illegal render target type %u.\n",
 877			       (unsigned int) cmd->body.type);
 878		return -EINVAL;
 879	}
 880
 881	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 882				VMW_RES_DIRTY_SET, user_context_converter,
 883				&cmd->body.cid, &ctx);
 884	if (unlikely(ret != 0))
 885		return ret;
 886
 887	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 888				VMW_RES_DIRTY_SET, user_surface_converter,
 889				&cmd->body.target.sid, &res);
 890	if (unlikely(ret))
 891		return ret;
 892
 893	if (dev_priv->has_mob) {
 894		struct vmw_ctx_bindinfo_view binding;
 895		struct vmw_ctx_validation_info *node;
 896
 897		node = vmw_execbuf_info_from_res(sw_context, ctx);
 898		if (!node)
 899			return -EINVAL;
 900
 901		binding.bi.ctx = ctx;
 902		binding.bi.res = res;
 903		binding.bi.bt = vmw_ctx_binding_rt;
 904		binding.slot = cmd->body.type;
 905		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
 906	}
 907
 908	return 0;
 909}
 910
 911static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 912				      struct vmw_sw_context *sw_context,
 913				      SVGA3dCmdHeader *header)
 914{
 915	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
 
 
 
 916	int ret;
 917
 918	cmd = container_of(header, typeof(*cmd), header);
 919
 920	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 921				VMW_RES_DIRTY_NONE, user_surface_converter,
 922				&cmd->body.src.sid, NULL);
 923	if (ret)
 924		return ret;
 925
 926	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 927				 VMW_RES_DIRTY_SET, user_surface_converter,
 928				 &cmd->body.dest.sid, NULL);
 929}
 930
 931static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
 932				     struct vmw_sw_context *sw_context,
 933				     SVGA3dCmdHeader *header)
 934{
 935	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
 936	int ret;
 937
 938	cmd = container_of(header, typeof(*cmd), header);
 939	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 940				VMW_RES_DIRTY_NONE, user_surface_converter,
 941				&cmd->body.src, NULL);
 942	if (ret != 0)
 943		return ret;
 944
 945	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 946				 VMW_RES_DIRTY_SET, user_surface_converter,
 947				 &cmd->body.dest, NULL);
 948}
 949
 950static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
 951				   struct vmw_sw_context *sw_context,
 952				   SVGA3dCmdHeader *header)
 953{
 954	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
 955	int ret;
 956
 957	cmd = container_of(header, typeof(*cmd), header);
 958	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 959				VMW_RES_DIRTY_NONE, user_surface_converter,
 960				&cmd->body.srcSid, NULL);
 961	if (ret != 0)
 962		return ret;
 963
 964	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 965				 VMW_RES_DIRTY_SET, user_surface_converter,
 966				 &cmd->body.dstSid, NULL);
 967}
 968
 969static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 970				     struct vmw_sw_context *sw_context,
 971				     SVGA3dCmdHeader *header)
 972{
 973	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
 
 
 
 974	int ret;
 975
 976	cmd = container_of(header, typeof(*cmd), header);
 977	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 978				VMW_RES_DIRTY_NONE, user_surface_converter,
 979				&cmd->body.src.sid, NULL);
 980	if (unlikely(ret != 0))
 981		return ret;
 982
 983	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 984				 VMW_RES_DIRTY_SET, user_surface_converter,
 985				 &cmd->body.dest.sid, NULL);
 986}
 987
 988static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 989					 struct vmw_sw_context *sw_context,
 990					 SVGA3dCmdHeader *header)
 991{
 992	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
 993		container_of(header, typeof(*cmd), header);
 
 
 
 
 994
 995	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 996				 VMW_RES_DIRTY_NONE, user_surface_converter,
 997				 &cmd->body.srcImage.sid, NULL);
 998}
 999
1000static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1001				 struct vmw_sw_context *sw_context,
1002				 SVGA3dCmdHeader *header)
1003{
1004	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1005		container_of(header, typeof(*cmd), header);
 
 
 
 
 
1006
1007	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008				 VMW_RES_DIRTY_NONE, user_surface_converter,
1009				 &cmd->body.sid, NULL);
1010}
1011
1012/**
1013 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1014 *
1015 * @dev_priv: The device private structure.
1016 * @new_query_bo: The new buffer holding query results.
1017 * @sw_context: The software context used for this command submission.
1018 *
1019 * This function checks whether @new_query_bo is suitable for holding query
1020 * results, and if another buffer currently is pinned for query results. If so,
1021 * the function prepares the state of @sw_context for switching pinned buffers
1022 * after successful submission of the current command batch.
 
1023 */
1024static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1025				       struct vmw_bo *new_query_bo,
1026				       struct vmw_sw_context *sw_context)
1027{
1028	struct vmw_res_cache_entry *ctx_entry =
1029		&sw_context->res_cache[vmw_res_context];
1030	int ret;
1031
1032	BUG_ON(!ctx_entry->valid);
1033	sw_context->last_query_ctx = ctx_entry->res;
1034
1035	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1036
1037		if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1038			VMW_DEBUG_USER("Query buffer too large.\n");
1039			return -EINVAL;
1040		}
1041
1042		if (unlikely(sw_context->cur_query_bo != NULL)) {
1043			sw_context->needs_post_query_barrier = true;
1044			vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1045			ret = vmw_validation_add_bo(sw_context->ctx,
1046						    sw_context->cur_query_bo);
1047			if (unlikely(ret != 0))
1048				return ret;
1049		}
1050		sw_context->cur_query_bo = new_query_bo;
1051
1052		vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1053		ret = vmw_validation_add_bo(sw_context->ctx,
1054					    dev_priv->dummy_query_bo);
1055		if (unlikely(ret != 0))
1056			return ret;
 
1057	}
1058
1059	return 0;
1060}
1061
 
1062/**
1063 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1064 *
1065 * @dev_priv: The device private structure.
1066 * @sw_context: The software context used for this command submission batch.
1067 *
1068 * This function will check if we're switching query buffers, and will then,
1069 * issue a dummy occlusion query wait used as a query barrier. When the fence
1070 * object following that query wait has signaled, we are sure that all preceding
1071 * queries have finished, and the old query buffer can be unpinned. However,
1072 * since both the new query buffer and the old one are fenced with that fence,
1073 * we can do an asynchronus unpin now, and be sure that the old query buffer
1074 * won't be moved until the fence has signaled.
1075 *
1076 * As mentioned above, both the new - and old query buffers need to be fenced
1077 * using a sequence emitted *after* calling this function.
1078 */
1079static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1080				     struct vmw_sw_context *sw_context)
1081{
1082	/*
1083	 * The validate list should still hold references to all
1084	 * contexts here.
1085	 */
 
1086	if (sw_context->needs_post_query_barrier) {
1087		struct vmw_res_cache_entry *ctx_entry =
1088			&sw_context->res_cache[vmw_res_context];
1089		struct vmw_resource *ctx;
1090		int ret;
1091
1092		BUG_ON(!ctx_entry->valid);
1093		ctx = ctx_entry->res;
1094
1095		ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1096
1097		if (unlikely(ret != 0))
1098			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1099	}
1100
1101	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1102		if (dev_priv->pinned_bo) {
1103			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1104			vmw_bo_unreference(&dev_priv->pinned_bo);
1105		}
1106
1107		if (!sw_context->needs_post_query_barrier) {
1108			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1109
1110			/*
1111			 * We pin also the dummy_query_bo buffer so that we
1112			 * don't need to validate it when emitting dummy queries
1113			 * in context destroy paths.
1114			 */
1115			if (!dev_priv->dummy_query_bo_pinned) {
1116				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1117						    true);
1118				dev_priv->dummy_query_bo_pinned = true;
1119			}
1120
1121			BUG_ON(sw_context->last_query_ctx == NULL);
1122			dev_priv->query_cid = sw_context->last_query_ctx->id;
1123			dev_priv->query_cid_valid = true;
1124			dev_priv->pinned_bo =
1125				vmw_bo_reference(sw_context->cur_query_bo);
1126		}
1127	}
1128}
1129
1130/**
1131 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1132 * to a MOB id.
1133 *
1134 * @dev_priv: Pointer to a device private structure.
1135 * @sw_context: The software context used for this command batch validation.
1136 * @id: Pointer to the user-space handle to be translated.
1137 * @vmw_bo_p: Points to a location that, on successful return will carry a
1138 * non-reference-counted pointer to the buffer object identified by the
1139 * user-space handle in @id.
1140 *
1141 * This function saves information needed to translate a user-space buffer
1142 * handle to a MOB id. The translation does not take place immediately, but
1143 * during a call to vmw_apply_relocations().
1144 *
1145 * This function builds a relocation list and a list of buffers to validate. The
1146 * former needs to be freed using either vmw_apply_relocations() or
1147 * vmw_free_relocations(). The latter needs to be freed using
1148 * vmw_clear_validations.
1149 */
1150static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1151				 struct vmw_sw_context *sw_context,
1152				 SVGAMobId *id,
1153				 struct vmw_bo **vmw_bo_p)
1154{
1155	struct vmw_bo *vmw_bo, *tmp_bo;
 
1156	uint32_t handle = *id;
1157	struct vmw_relocation *reloc;
1158	int ret;
1159
1160	vmw_validation_preload_bo(sw_context->ctx);
1161	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1162	if (ret != 0) {
1163		drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1164		return PTR_ERR(vmw_bo);
1165	}
1166	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1167	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1168	tmp_bo = vmw_bo;
1169	vmw_user_bo_unref(&tmp_bo);
1170	if (unlikely(ret != 0))
1171		return ret;
1172
1173	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1174	if (!reloc)
1175		return -ENOMEM;
 
 
 
1176
 
1177	reloc->mob_loc = id;
1178	reloc->vbo = vmw_bo;
1179
1180	*vmw_bo_p = vmw_bo;
1181	list_add_tail(&reloc->head, &sw_context->bo_relocations);
 
1182
 
1183	return 0;
 
 
 
 
 
1184}
1185
1186/**
1187 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1188 * to a valid SVGAGuestPtr
1189 *
1190 * @dev_priv: Pointer to a device private structure.
1191 * @sw_context: The software context used for this command batch validation.
1192 * @ptr: Pointer to the user-space handle to be translated.
1193 * @vmw_bo_p: Points to a location that, on successful return will carry a
1194 * non-reference-counted pointer to the DMA buffer identified by the user-space
1195 * handle in @id.
1196 *
1197 * This function saves information needed to translate a user-space buffer
1198 * handle to a valid SVGAGuestPtr. The translation does not take place
1199 * immediately, but during a call to vmw_apply_relocations().
1200 *
1201 * This function builds a relocation list and a list of buffers to validate.
1202 * The former needs to be freed using either vmw_apply_relocations() or
1203 * vmw_free_relocations(). The latter needs to be freed using
1204 * vmw_clear_validations.
1205 */
1206static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1207				   struct vmw_sw_context *sw_context,
1208				   SVGAGuestPtr *ptr,
1209				   struct vmw_bo **vmw_bo_p)
1210{
1211	struct vmw_bo *vmw_bo, *tmp_bo;
 
1212	uint32_t handle = ptr->gmrId;
1213	struct vmw_relocation *reloc;
1214	int ret;
1215
1216	vmw_validation_preload_bo(sw_context->ctx);
1217	ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1218	if (ret != 0) {
1219		drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1220		return PTR_ERR(vmw_bo);
1221	}
1222	vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1223			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1224	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1225	tmp_bo = vmw_bo;
1226	vmw_user_bo_unref(&tmp_bo);
1227	if (unlikely(ret != 0))
1228		return ret;
1229
1230	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1231	if (!reloc)
1232		return -ENOMEM;
 
 
 
1233
 
1234	reloc->location = ptr;
1235	reloc->vbo = vmw_bo;
1236	*vmw_bo_p = vmw_bo;
1237	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1238
1239	return 0;
1240}
1241
1242/**
1243 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1244 *
1245 * @dev_priv: Pointer to a device private struct.
1246 * @sw_context: The software context used for this command submission.
1247 * @header: Pointer to the command header in the command stream.
1248 *
1249 * This function adds the new query into the query COTABLE
1250 */
1251static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1252				   struct vmw_sw_context *sw_context,
1253				   SVGA3dCmdHeader *header)
1254{
1255	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1256	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1257	struct vmw_resource *cotable_res;
1258	int ret;
1259
1260	if (!ctx_node)
1261		return -EINVAL;
1262
1263	cmd = container_of(header, typeof(*cmd), header);
1264
1265	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1266	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1267		return -EINVAL;
1268
1269	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1270	if (IS_ERR_OR_NULL(cotable_res))
1271		return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
1272	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1273
 
 
 
1274	return ret;
1275}
1276
1277/**
1278 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1279 *
1280 * @dev_priv: Pointer to a device private struct.
1281 * @sw_context: The software context used for this command submission.
1282 * @header: Pointer to the command header in the command stream.
1283 *
1284 * The query bind operation will eventually associate the query ID with its
1285 * backing MOB.  In this function, we take the user mode MOB ID and use
1286 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1287 */
1288static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1289				 struct vmw_sw_context *sw_context,
1290				 SVGA3dCmdHeader *header)
1291{
1292	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1293	struct vmw_bo *vmw_bo;
1294	int ret;
1295
1296	cmd = container_of(header, typeof(*cmd), header);
1297
1298	/*
1299	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1300	 * list so its kernel mode MOB ID can be filled in later
1301	 */
1302	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1303				    &vmw_bo);
1304
1305	if (ret != 0)
1306		return ret;
1307
1308	sw_context->dx_query_mob = vmw_bo;
1309	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1310	return 0;
1311}
1312
1313/**
1314 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1315 *
1316 * @dev_priv: Pointer to a device private struct.
1317 * @sw_context: The software context used for this command submission.
1318 * @header: Pointer to the command header in the command stream.
1319 */
1320static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1321				  struct vmw_sw_context *sw_context,
1322				  SVGA3dCmdHeader *header)
1323{
1324	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1325		container_of(header, typeof(*cmd), header);
 
 
 
 
 
1326
1327	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1328				 VMW_RES_DIRTY_SET, user_context_converter,
1329				 &cmd->body.cid, NULL);
1330}
1331
1332/**
1333 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1334 *
1335 * @dev_priv: Pointer to a device private struct.
1336 * @sw_context: The software context used for this command submission.
1337 * @header: Pointer to the command header in the command stream.
1338 */
1339static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1340			       struct vmw_sw_context *sw_context,
1341			       SVGA3dCmdHeader *header)
1342{
1343	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1344		container_of(header, typeof(*cmd), header);
 
 
 
 
 
1345
1346	if (unlikely(dev_priv->has_mob)) {
1347		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
 
 
 
1348
1349		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1350
1351		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1352		gb_cmd.header.size = cmd->header.size;
1353		gb_cmd.body.cid = cmd->body.cid;
1354		gb_cmd.body.type = cmd->body.type;
1355
1356		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1357		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1358	}
1359
1360	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1361				 VMW_RES_DIRTY_SET, user_context_converter,
1362				 &cmd->body.cid, NULL);
1363}
1364
1365/**
1366 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1367 *
1368 * @dev_priv: Pointer to a device private struct.
1369 * @sw_context: The software context used for this command submission.
1370 * @header: Pointer to the command header in the command stream.
1371 */
1372static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1373				struct vmw_sw_context *sw_context,
1374				SVGA3dCmdHeader *header)
1375{
1376	struct vmw_bo *vmw_bo;
1377	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
 
 
 
1378	int ret;
1379
1380	cmd = container_of(header, typeof(*cmd), header);
1381	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1382	if (unlikely(ret != 0))
1383		return ret;
1384
1385	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
 
1386				    &vmw_bo);
1387	if (unlikely(ret != 0))
1388		return ret;
1389
1390	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1391
 
1392	return ret;
1393}
1394
1395/**
1396 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1397 *
1398 * @dev_priv: Pointer to a device private struct.
1399 * @sw_context: The software context used for this command submission.
1400 * @header: Pointer to the command header in the command stream.
1401 */
1402static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1403			     struct vmw_sw_context *sw_context,
1404			     SVGA3dCmdHeader *header)
1405{
1406	struct vmw_bo *vmw_bo;
1407	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
 
 
 
1408	int ret;
1409
1410	cmd = container_of(header, typeof(*cmd), header);
1411	if (dev_priv->has_mob) {
1412		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
 
 
 
1413
1414		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1415
1416		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1417		gb_cmd.header.size = cmd->header.size;
1418		gb_cmd.body.cid = cmd->body.cid;
1419		gb_cmd.body.type = cmd->body.type;
1420		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1421		gb_cmd.body.offset = cmd->body.guestResult.offset;
1422
1423		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1424		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1425	}
1426
1427	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1428	if (unlikely(ret != 0))
1429		return ret;
1430
1431	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1432				      &cmd->body.guestResult, &vmw_bo);
 
1433	if (unlikely(ret != 0))
1434		return ret;
1435
1436	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1437
 
1438	return ret;
1439}
1440
1441/**
1442 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1443 *
1444 * @dev_priv: Pointer to a device private struct.
1445 * @sw_context: The software context used for this command submission.
1446 * @header: Pointer to the command header in the command stream.
1447 */
1448static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1449				 struct vmw_sw_context *sw_context,
1450				 SVGA3dCmdHeader *header)
1451{
1452	struct vmw_bo *vmw_bo;
1453	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
 
 
 
1454	int ret;
1455
1456	cmd = container_of(header, typeof(*cmd), header);
1457	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1458	if (unlikely(ret != 0))
1459		return ret;
1460
1461	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
 
1462				    &vmw_bo);
1463	if (unlikely(ret != 0))
1464		return ret;
1465
 
1466	return 0;
1467}
1468
1469/**
1470 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1471 *
1472 * @dev_priv: Pointer to a device private struct.
1473 * @sw_context: The software context used for this command submission.
1474 * @header: Pointer to the command header in the command stream.
1475 */
1476static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1477			      struct vmw_sw_context *sw_context,
1478			      SVGA3dCmdHeader *header)
1479{
1480	struct vmw_bo *vmw_bo;
1481	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
 
 
 
1482	int ret;
1483
1484	cmd = container_of(header, typeof(*cmd), header);
1485	if (dev_priv->has_mob) {
1486		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
 
 
 
1487
1488		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1489
1490		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1491		gb_cmd.header.size = cmd->header.size;
1492		gb_cmd.body.cid = cmd->body.cid;
1493		gb_cmd.body.type = cmd->body.type;
1494		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1495		gb_cmd.body.offset = cmd->body.guestResult.offset;
1496
1497		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1498		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1499	}
1500
1501	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1502	if (unlikely(ret != 0))
1503		return ret;
1504
1505	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1506				      &cmd->body.guestResult, &vmw_bo);
 
1507	if (unlikely(ret != 0))
1508		return ret;
1509
 
1510	return 0;
1511}
1512
1513static int vmw_cmd_dma(struct vmw_private *dev_priv,
1514		       struct vmw_sw_context *sw_context,
1515		       SVGA3dCmdHeader *header)
1516{
1517	struct vmw_bo *vmw_bo = NULL;
1518	struct vmw_surface *srf = NULL;
1519	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
 
 
 
1520	int ret;
1521	SVGA3dCmdSurfaceDMASuffix *suffix;
1522	uint32_t bo_size;
1523	bool dirty;
1524
1525	cmd = container_of(header, typeof(*cmd), header);
1526	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1527					       header->size - sizeof(*suffix));
1528
1529	/* Make sure device and verifier stays in sync. */
1530	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1531		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1532		return -EINVAL;
1533	}
1534
1535	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1536				      &cmd->body.guest.ptr, &vmw_bo);
 
1537	if (unlikely(ret != 0))
1538		return ret;
1539
1540	/* Make sure DMA doesn't cross BO boundaries. */
1541	bo_size = vmw_bo->tbo.base.size;
1542	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1543		VMW_DEBUG_USER("Invalid DMA offset.\n");
1544		return -EINVAL;
1545	}
1546
1547	bo_size -= cmd->body.guest.ptr.offset;
1548	if (unlikely(suffix->maximumOffset > bo_size))
1549		suffix->maximumOffset = bo_size;
1550
1551	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1552		VMW_RES_DIRTY_SET : 0;
1553	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1554				dirty, user_surface_converter,
1555				&cmd->body.host.sid, NULL);
1556	if (unlikely(ret != 0)) {
1557		if (unlikely(ret != -ERESTARTSYS))
1558			VMW_DEBUG_USER("could not find surface for DMA.\n");
1559		return ret;
1560	}
1561
1562	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1563
1564	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
 
1565
1566	return 0;
 
 
1567}
1568
1569static int vmw_cmd_draw(struct vmw_private *dev_priv,
1570			struct vmw_sw_context *sw_context,
1571			SVGA3dCmdHeader *header)
1572{
1573	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
 
 
 
1574	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1575		(unsigned long)header + sizeof(*cmd));
1576	SVGA3dPrimitiveRange *range;
1577	uint32_t i;
1578	uint32_t maxnum;
1579	int ret;
1580
1581	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1582	if (unlikely(ret != 0))
1583		return ret;
1584
1585	cmd = container_of(header, typeof(*cmd), header);
1586	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1587
1588	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1589		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1590		return -EINVAL;
1591	}
1592
1593	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1594		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1595					VMW_RES_DIRTY_NONE,
1596					user_surface_converter,
1597					&decl->array.surfaceId, NULL);
1598		if (unlikely(ret != 0))
1599			return ret;
1600	}
1601
1602	maxnum = (header->size - sizeof(cmd->body) -
1603		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1604	if (unlikely(cmd->body.numRanges > maxnum)) {
1605		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1606		return -EINVAL;
1607	}
1608
1609	range = (SVGA3dPrimitiveRange *) decl;
1610	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1611		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1612					VMW_RES_DIRTY_NONE,
1613					user_surface_converter,
1614					&range->indexArray.surfaceId, NULL);
1615		if (unlikely(ret != 0))
1616			return ret;
1617	}
1618	return 0;
1619}
1620
 
1621static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1622			     struct vmw_sw_context *sw_context,
1623			     SVGA3dCmdHeader *header)
1624{
1625	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
 
 
 
 
1626	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1627	  ((unsigned long) header + header->size + sizeof(*header));
1628	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1629		((unsigned long) header + sizeof(*cmd));
1630	struct vmw_resource *ctx;
1631	struct vmw_resource *res;
1632	int ret;
1633
1634	cmd = container_of(header, typeof(*cmd), header);
 
1635
1636	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1637				VMW_RES_DIRTY_SET, user_context_converter,
1638				&cmd->body.cid, &ctx);
1639	if (unlikely(ret != 0))
1640		return ret;
1641
1642	for (; cur_state < last_state; ++cur_state) {
1643		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1644			continue;
1645
1646		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1647			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1648				       (unsigned int) cur_state->stage);
1649			return -EINVAL;
1650		}
1651
1652		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1653					VMW_RES_DIRTY_NONE,
1654					user_surface_converter,
1655					&cur_state->value, &res);
1656		if (unlikely(ret != 0))
1657			return ret;
1658
1659		if (dev_priv->has_mob) {
1660			struct vmw_ctx_bindinfo_tex binding;
1661			struct vmw_ctx_validation_info *node;
1662
1663			node = vmw_execbuf_info_from_res(sw_context, ctx);
1664			if (!node)
1665				return -EINVAL;
1666
1667			binding.bi.ctx = ctx;
1668			binding.bi.res = res;
1669			binding.bi.bt = vmw_ctx_binding_tex;
1670			binding.texture_stage = cur_state->stage;
1671			vmw_binding_add(node->staged, &binding.bi, 0,
1672					binding.texture_stage);
1673		}
1674	}
1675
1676	return 0;
1677}
1678
1679static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1680				      struct vmw_sw_context *sw_context,
1681				      void *buf)
1682{
1683	struct vmw_bo *vmw_bo;
 
1684
1685	struct {
1686		uint32_t header;
1687		SVGAFifoCmdDefineGMRFB body;
1688	} *cmd = buf;
1689
1690	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1691				       &vmw_bo);
1692}
1693
1694/**
1695 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1696 * switching
1697 *
1698 * @dev_priv: Pointer to a device private struct.
1699 * @sw_context: The software context being used for this batch.
1700 * @res: Pointer to the resource.
1701 * @buf_id: Pointer to the user-space backup buffer handle in the command
1702 * stream.
1703 * @backup_offset: Offset of backup into MOB.
1704 *
1705 * This function prepares for registering a switch of backup buffers in the
1706 * resource metadata just prior to unreserving. It's basically a wrapper around
1707 * vmw_cmd_res_switch_backup with a different interface.
1708 */
1709static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1710				     struct vmw_sw_context *sw_context,
1711				     struct vmw_resource *res, uint32_t *buf_id,
1712				     unsigned long backup_offset)
1713{
1714	struct vmw_bo *vbo;
1715	void *info;
1716	int ret;
1717
1718	info = vmw_execbuf_info_from_res(sw_context, res);
1719	if (!info)
1720		return -EINVAL;
1721
1722	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1723	if (ret)
1724		return ret;
1725
1726	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1727					 backup_offset);
1728	return 0;
1729}
1730
1731/**
1732 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1733 *
1734 * @dev_priv: Pointer to a device private struct.
1735 * @sw_context: The software context being used for this batch.
1736 * @res_type: The resource type.
1737 * @converter: Information about user-space binding for this resource type.
1738 * @res_id: Pointer to the user-space resource handle in the command stream.
1739 * @buf_id: Pointer to the user-space backup buffer handle in the command
1740 * stream.
1741 * @backup_offset: Offset of backup into MOB.
1742 *
1743 * This function prepares for registering a switch of backup buffers in the
1744 * resource metadata just prior to unreserving. It's basically a wrapper around
1745 * vmw_cmd_res_switch_backup with a different interface.
1746 */
1747static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1748				 struct vmw_sw_context *sw_context,
1749				 enum vmw_res_type res_type,
1750				 const struct vmw_user_resource_conv
1751				 *converter, uint32_t *res_id, uint32_t *buf_id,
 
 
1752				 unsigned long backup_offset)
1753{
1754	struct vmw_resource *res;
1755	int ret;
 
 
1756
1757	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1758				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1759	if (ret)
1760		return ret;
1761
1762	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1763					 backup_offset);
 
 
 
 
 
 
 
 
 
 
1764}
1765
1766/**
1767 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
 
1768 *
1769 * @dev_priv: Pointer to a device private struct.
1770 * @sw_context: The software context being used for this batch.
1771 * @header: Pointer to the command header in the command stream.
1772 */
1773static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1774				   struct vmw_sw_context *sw_context,
1775				   SVGA3dCmdHeader *header)
1776{
1777	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1778		container_of(header, typeof(*cmd), header);
 
 
 
 
1779
1780	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1781				     user_surface_converter, &cmd->body.sid,
1782				     &cmd->body.mobid, 0);
 
1783}
1784
1785/**
1786 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
 
1787 *
1788 * @dev_priv: Pointer to a device private struct.
1789 * @sw_context: The software context being used for this batch.
1790 * @header: Pointer to the command header in the command stream.
1791 */
1792static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1793				   struct vmw_sw_context *sw_context,
1794				   SVGA3dCmdHeader *header)
1795{
1796	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1797		container_of(header, typeof(*cmd), header);
 
 
 
 
1798
1799	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1800				 VMW_RES_DIRTY_NONE, user_surface_converter,
1801				 &cmd->body.image.sid, NULL);
1802}
1803
1804/**
1805 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
 
1806 *
1807 * @dev_priv: Pointer to a device private struct.
1808 * @sw_context: The software context being used for this batch.
1809 * @header: Pointer to the command header in the command stream.
1810 */
1811static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1812				     struct vmw_sw_context *sw_context,
1813				     SVGA3dCmdHeader *header)
1814{
1815	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1816		container_of(header, typeof(*cmd), header);
 
 
 
 
1817
1818	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1819				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1820				 &cmd->body.sid, NULL);
1821}
1822
1823/**
1824 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
 
1825 *
1826 * @dev_priv: Pointer to a device private struct.
1827 * @sw_context: The software context being used for this batch.
1828 * @header: Pointer to the command header in the command stream.
1829 */
1830static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1831				     struct vmw_sw_context *sw_context,
1832				     SVGA3dCmdHeader *header)
1833{
1834	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1835		container_of(header, typeof(*cmd), header);
 
 
 
 
1836
1837	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1838				 VMW_RES_DIRTY_NONE, user_surface_converter,
1839				 &cmd->body.image.sid, NULL);
1840}
1841
1842/**
1843 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1844 * command
1845 *
1846 * @dev_priv: Pointer to a device private struct.
1847 * @sw_context: The software context being used for this batch.
1848 * @header: Pointer to the command header in the command stream.
1849 */
1850static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1851				       struct vmw_sw_context *sw_context,
1852				       SVGA3dCmdHeader *header)
1853{
1854	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1855		container_of(header, typeof(*cmd), header);
 
 
 
 
1856
1857	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1858				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1859				 &cmd->body.sid, NULL);
1860}
1861
1862/**
1863 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1864 * command
1865 *
1866 * @dev_priv: Pointer to a device private struct.
1867 * @sw_context: The software context being used for this batch.
1868 * @header: Pointer to the command header in the command stream.
1869 */
1870static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1871				       struct vmw_sw_context *sw_context,
1872				       SVGA3dCmdHeader *header)
1873{
1874	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1875		container_of(header, typeof(*cmd), header);
 
 
 
 
1876
1877	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1878				 VMW_RES_DIRTY_NONE, user_surface_converter,
1879				 &cmd->body.image.sid, NULL);
1880}
1881
1882/**
1883 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1884 * command
1885 *
1886 * @dev_priv: Pointer to a device private struct.
1887 * @sw_context: The software context being used for this batch.
1888 * @header: Pointer to the command header in the command stream.
1889 */
1890static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1891					 struct vmw_sw_context *sw_context,
1892					 SVGA3dCmdHeader *header)
1893{
1894	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1895		container_of(header, typeof(*cmd), header);
 
 
 
 
1896
1897	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1898				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1899				 &cmd->body.sid, NULL);
1900}
1901
 
1902/**
1903 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
 
1904 *
1905 * @dev_priv: Pointer to a device private struct.
1906 * @sw_context: The software context being used for this batch.
1907 * @header: Pointer to the command header in the command stream.
1908 */
1909static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1910				 struct vmw_sw_context *sw_context,
1911				 SVGA3dCmdHeader *header)
1912{
1913	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
 
 
 
1914	int ret;
1915	size_t size;
1916	struct vmw_resource *ctx;
1917
1918	cmd = container_of(header, typeof(*cmd), header);
 
1919
1920	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1921				VMW_RES_DIRTY_SET, user_context_converter,
1922				&cmd->body.cid, &ctx);
1923	if (unlikely(ret != 0))
1924		return ret;
1925
1926	if (unlikely(!dev_priv->has_mob))
1927		return 0;
1928
1929	size = cmd->header.size - sizeof(cmd->body);
1930	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1931				    cmd->body.shid, cmd + 1, cmd->body.type,
1932				    size, &sw_context->staged_cmd_res);
 
 
1933	if (unlikely(ret != 0))
1934		return ret;
1935
1936	return vmw_resource_relocation_add(sw_context, NULL,
1937					   vmw_ptr_diff(sw_context->buf_start,
1938							&cmd->header.id),
1939					   vmw_res_rel_nop);
 
1940}
1941
1942/**
1943 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
 
1944 *
1945 * @dev_priv: Pointer to a device private struct.
1946 * @sw_context: The software context being used for this batch.
1947 * @header: Pointer to the command header in the command stream.
1948 */
1949static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1950				  struct vmw_sw_context *sw_context,
1951				  SVGA3dCmdHeader *header)
1952{
1953	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
 
 
 
1954	int ret;
1955	struct vmw_resource *ctx;
1956
1957	cmd = container_of(header, typeof(*cmd), header);
 
1958
1959	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1960				VMW_RES_DIRTY_SET, user_context_converter,
1961				&cmd->body.cid, &ctx);
1962	if (unlikely(ret != 0))
1963		return ret;
1964
1965	if (unlikely(!dev_priv->has_mob))
1966		return 0;
1967
1968	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1969				cmd->body.type, &sw_context->staged_cmd_res);
 
 
1970	if (unlikely(ret != 0))
1971		return ret;
1972
1973	return vmw_resource_relocation_add(sw_context, NULL,
1974					   vmw_ptr_diff(sw_context->buf_start,
1975							&cmd->header.id),
1976					   vmw_res_rel_nop);
 
1977}
1978
1979/**
1980 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
 
1981 *
1982 * @dev_priv: Pointer to a device private struct.
1983 * @sw_context: The software context being used for this batch.
1984 * @header: Pointer to the command header in the command stream.
1985 */
1986static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1987			      struct vmw_sw_context *sw_context,
1988			      SVGA3dCmdHeader *header)
1989{
1990	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1991	struct vmw_ctx_bindinfo_shader binding;
1992	struct vmw_resource *ctx, *res = NULL;
1993	struct vmw_ctx_validation_info *ctx_info;
 
1994	int ret;
1995
1996	cmd = container_of(header, typeof(*cmd), header);
1997
1998	if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1999		VMW_DEBUG_USER("Illegal shader type %u.\n",
2000			       (unsigned int) cmd->body.type);
2001		return -EINVAL;
2002	}
2003
2004	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2005				VMW_RES_DIRTY_SET, user_context_converter,
2006				&cmd->body.cid, &ctx);
2007	if (unlikely(ret != 0))
2008		return ret;
2009
2010	if (!dev_priv->has_mob)
2011		return 0;
2012
2013	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2014		/*
2015		 * This is the compat shader path - Per device guest-backed
2016		 * shaders, but user-space thinks it's per context host-
2017		 * backed shaders.
2018		 */
2019		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2020					cmd->body.shid, cmd->body.type);
2021		if (!IS_ERR(res)) {
2022			ret = vmw_execbuf_res_val_add(sw_context, res,
2023						      VMW_RES_DIRTY_NONE,
2024						      vmw_val_add_flag_noctx);
2025			if (unlikely(ret != 0))
2026				return ret;
2027
2028			ret = vmw_resource_relocation_add
2029				(sw_context, res,
2030				 vmw_ptr_diff(sw_context->buf_start,
2031					      &cmd->body.shid),
2032				 vmw_res_rel_normal);
2033			if (unlikely(ret != 0))
2034				return ret;
2035		}
2036	}
2037
2038	if (IS_ERR_OR_NULL(res)) {
2039		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2040					VMW_RES_DIRTY_NONE,
2041					user_shader_converter, &cmd->body.shid,
2042					&res);
2043		if (unlikely(ret != 0))
2044			return ret;
2045	}
2046
2047	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2048	if (!ctx_info)
2049		return -EINVAL;
2050
2051	binding.bi.ctx = ctx;
2052	binding.bi.res = res;
2053	binding.bi.bt = vmw_ctx_binding_shader;
2054	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2055	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2056
2057	return 0;
2058}
2059
2060/**
2061 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
 
2062 *
2063 * @dev_priv: Pointer to a device private struct.
2064 * @sw_context: The software context being used for this batch.
2065 * @header: Pointer to the command header in the command stream.
2066 */
2067static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2068				    struct vmw_sw_context *sw_context,
2069				    SVGA3dCmdHeader *header)
2070{
2071	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
 
 
 
2072	int ret;
2073
2074	cmd = container_of(header, typeof(*cmd), header);
 
2075
2076	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2077				VMW_RES_DIRTY_SET, user_context_converter,
2078				&cmd->body.cid, NULL);
2079	if (unlikely(ret != 0))
2080		return ret;
2081
2082	if (dev_priv->has_mob)
2083		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2084
2085	return 0;
2086}
2087
2088/**
2089 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2090 *
2091 * @dev_priv: Pointer to a device private struct.
2092 * @sw_context: The software context being used for this batch.
2093 * @header: Pointer to the command header in the command stream.
2094 */
2095static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2096				  struct vmw_sw_context *sw_context,
2097				  SVGA3dCmdHeader *header)
2098{
2099	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2100		container_of(header, typeof(*cmd), header);
2101
2102	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2103				     user_shader_converter, &cmd->body.shid,
2104				     &cmd->body.mobid, cmd->body.offsetInBytes);
2105}
2106
2107/**
2108 * vmw_cmd_dx_set_single_constant_buffer - Validate
2109 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2110 *
2111 * @dev_priv: Pointer to a device private struct.
2112 * @sw_context: The software context being used for this batch.
2113 * @header: Pointer to the command header in the command stream.
2114 */
2115static int
2116vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2117				      struct vmw_sw_context *sw_context,
2118				      SVGA3dCmdHeader *header)
2119{
2120	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2121
2122	struct vmw_resource *res = NULL;
2123	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2124	struct vmw_ctx_bindinfo_cb binding;
2125	int ret;
2126
2127	if (!ctx_node)
2128		return -EINVAL;
2129
2130	cmd = container_of(header, typeof(*cmd), header);
2131	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2132				VMW_RES_DIRTY_NONE, user_surface_converter,
2133				&cmd->body.sid, &res);
2134	if (unlikely(ret != 0))
2135		return ret;
2136
2137	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2138	    cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2139		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2140			       (unsigned int) cmd->body.type,
2141			       (unsigned int) cmd->body.slot);
2142		return -EINVAL;
2143	}
2144
2145	binding.bi.ctx = ctx_node->ctx;
2146	binding.bi.res = res;
2147	binding.bi.bt = vmw_ctx_binding_cb;
2148	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2149	binding.offset = cmd->body.offsetInBytes;
2150	binding.size = cmd->body.sizeInBytes;
2151	binding.slot = cmd->body.slot;
2152
2153	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2154			binding.slot);
2155
2156	return 0;
2157}
2158
2159/**
2160 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2161 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2162 *
2163 * @dev_priv: Pointer to a device private struct.
2164 * @sw_context: The software context being used for this batch.
2165 * @header: Pointer to the command header in the command stream.
2166 */
2167static int
2168vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2169				      struct vmw_sw_context *sw_context,
2170				      SVGA3dCmdHeader *header)
2171{
2172	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2173
2174	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2175	u32 shader_slot;
2176
2177	if (!has_sm5_context(dev_priv))
2178		return -EINVAL;
2179
2180	if (!ctx_node)
2181		return -EINVAL;
2182
2183	cmd = container_of(header, typeof(*cmd), header);
2184	if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2185		VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2186			       (unsigned int) cmd->body.slot);
2187		return -EINVAL;
2188	}
2189
2190	shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2191	vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2192				     cmd->body.slot, cmd->body.offsetInBytes);
2193
2194	return 0;
2195}
2196
2197/**
2198 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2199 * command
2200 *
2201 * @dev_priv: Pointer to a device private struct.
2202 * @sw_context: The software context being used for this batch.
2203 * @header: Pointer to the command header in the command stream.
2204 */
2205static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2206				     struct vmw_sw_context *sw_context,
2207				     SVGA3dCmdHeader *header)
2208{
2209	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2210		container_of(header, typeof(*cmd), header);
2211
2212	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2213		sizeof(SVGA3dShaderResourceViewId);
2214
2215	if ((u64) cmd->body.startView + (u64) num_sr_view >
2216	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2217	    !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2218		VMW_DEBUG_USER("Invalid shader binding.\n");
2219		return -EINVAL;
2220	}
2221
2222	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2223				     vmw_ctx_binding_sr,
2224				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2225				     (void *) &cmd[1], num_sr_view,
2226				     cmd->body.startView);
2227}
2228
2229/**
2230 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2231 *
2232 * @dev_priv: Pointer to a device private struct.
2233 * @sw_context: The software context being used for this batch.
2234 * @header: Pointer to the command header in the command stream.
2235 */
2236static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2237				 struct vmw_sw_context *sw_context,
2238				 SVGA3dCmdHeader *header)
2239{
2240	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2241	struct vmw_resource *res = NULL;
2242	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2243	struct vmw_ctx_bindinfo_shader binding;
2244	int ret = 0;
2245
2246	if (!ctx_node)
2247		return -EINVAL;
2248
2249	cmd = container_of(header, typeof(*cmd), header);
2250
2251	if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2252		VMW_DEBUG_USER("Illegal shader type %u.\n",
2253			       (unsigned int) cmd->body.type);
2254		return -EINVAL;
2255	}
2256
2257	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2258		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2259		if (IS_ERR(res)) {
2260			VMW_DEBUG_USER("Could not find shader for binding.\n");
2261			return PTR_ERR(res);
2262		}
2263
2264		ret = vmw_execbuf_res_val_add(sw_context, res,
2265					      VMW_RES_DIRTY_NONE,
2266					      vmw_val_add_flag_noctx);
2267		if (ret)
2268			return ret;
2269	}
2270
2271	binding.bi.ctx = ctx_node->ctx;
2272	binding.bi.res = res;
2273	binding.bi.bt = vmw_ctx_binding_dx_shader;
2274	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2275
2276	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2277
2278	return 0;
2279}
2280
2281/**
2282 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2283 * command
2284 *
2285 * @dev_priv: Pointer to a device private struct.
2286 * @sw_context: The software context being used for this batch.
2287 * @header: Pointer to the command header in the command stream.
2288 */
2289static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2290					 struct vmw_sw_context *sw_context,
2291					 SVGA3dCmdHeader *header)
2292{
2293	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2294	struct vmw_ctx_bindinfo_vb binding;
2295	struct vmw_resource *res;
2296	struct {
2297		SVGA3dCmdHeader header;
2298		SVGA3dCmdDXSetVertexBuffers body;
2299		SVGA3dVertexBuffer buf[];
2300	} *cmd;
2301	int i, ret, num;
2302
2303	if (!ctx_node)
2304		return -EINVAL;
2305
2306	cmd = container_of(header, typeof(*cmd), header);
2307	num = (cmd->header.size - sizeof(cmd->body)) /
2308		sizeof(SVGA3dVertexBuffer);
2309	if ((u64)num + (u64)cmd->body.startBuffer >
2310	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2311		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2312		return -EINVAL;
2313	}
2314
2315	for (i = 0; i < num; i++) {
2316		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2317					VMW_RES_DIRTY_NONE,
2318					user_surface_converter,
2319					&cmd->buf[i].sid, &res);
2320		if (unlikely(ret != 0))
2321			return ret;
2322
2323		binding.bi.ctx = ctx_node->ctx;
2324		binding.bi.bt = vmw_ctx_binding_vb;
2325		binding.bi.res = res;
2326		binding.offset = cmd->buf[i].offset;
2327		binding.stride = cmd->buf[i].stride;
2328		binding.slot = i + cmd->body.startBuffer;
2329
2330		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2331	}
2332
2333	return 0;
2334}
2335
2336/**
2337 * vmw_cmd_dx_set_index_buffer - Validate
2338 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2339 *
2340 * @dev_priv: Pointer to a device private struct.
2341 * @sw_context: The software context being used for this batch.
2342 * @header: Pointer to the command header in the command stream.
2343 */
2344static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2345				       struct vmw_sw_context *sw_context,
2346				       SVGA3dCmdHeader *header)
2347{
2348	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2349	struct vmw_ctx_bindinfo_ib binding;
2350	struct vmw_resource *res;
2351	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2352	int ret;
2353
2354	if (!ctx_node)
2355		return -EINVAL;
2356
2357	cmd = container_of(header, typeof(*cmd), header);
2358	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2359				VMW_RES_DIRTY_NONE, user_surface_converter,
2360				&cmd->body.sid, &res);
2361	if (unlikely(ret != 0))
2362		return ret;
2363
2364	binding.bi.ctx = ctx_node->ctx;
2365	binding.bi.res = res;
2366	binding.bi.bt = vmw_ctx_binding_ib;
2367	binding.offset = cmd->body.offset;
2368	binding.format = cmd->body.format;
2369
2370	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2371
2372	return 0;
2373}
2374
2375/**
2376 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2377 * command
2378 *
2379 * @dev_priv: Pointer to a device private struct.
2380 * @sw_context: The software context being used for this batch.
2381 * @header: Pointer to the command header in the command stream.
2382 */
2383static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2384					struct vmw_sw_context *sw_context,
2385					SVGA3dCmdHeader *header)
2386{
2387	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2388		container_of(header, typeof(*cmd), header);
2389	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2390		sizeof(SVGA3dRenderTargetViewId);
2391	int ret;
2392
2393	if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2394		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2395		return -EINVAL;
2396	}
2397
2398	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2399				    0, &cmd->body.depthStencilViewId, 1, 0);
2400	if (ret)
2401		return ret;
2402
2403	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2404				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2405				     num_rt_view, 0);
2406}
2407
2408/**
2409 * vmw_cmd_dx_clear_rendertarget_view - Validate
2410 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2411 *
2412 * @dev_priv: Pointer to a device private struct.
2413 * @sw_context: The software context being used for this batch.
2414 * @header: Pointer to the command header in the command stream.
2415 */
2416static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2417					      struct vmw_sw_context *sw_context,
2418					      SVGA3dCmdHeader *header)
2419{
2420	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2421		container_of(header, typeof(*cmd), header);
2422	struct vmw_resource *ret;
2423
2424	ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2425				  cmd->body.renderTargetViewId);
2426
2427	return PTR_ERR_OR_ZERO(ret);
2428}
2429
2430/**
2431 * vmw_cmd_dx_clear_depthstencil_view - Validate
2432 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2433 *
2434 * @dev_priv: Pointer to a device private struct.
2435 * @sw_context: The software context being used for this batch.
2436 * @header: Pointer to the command header in the command stream.
2437 */
2438static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2439					      struct vmw_sw_context *sw_context,
2440					      SVGA3dCmdHeader *header)
2441{
2442	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2443		container_of(header, typeof(*cmd), header);
2444	struct vmw_resource *ret;
2445
2446	ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2447				  cmd->body.depthStencilViewId);
2448
2449	return PTR_ERR_OR_ZERO(ret);
2450}
2451
2452static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2453				  struct vmw_sw_context *sw_context,
2454				  SVGA3dCmdHeader *header)
2455{
2456	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2457	struct vmw_resource *srf;
2458	struct vmw_resource *res;
2459	enum vmw_view_type view_type;
2460	int ret;
2461	/*
2462	 * This is based on the fact that all affected define commands have the
2463	 * same initial command body layout.
2464	 */
2465	struct {
2466		SVGA3dCmdHeader header;
2467		uint32 defined_id;
2468		uint32 sid;
2469	} *cmd;
2470
2471	if (!ctx_node)
2472		return -EINVAL;
2473
2474	view_type = vmw_view_cmd_to_type(header->id);
2475	if (view_type == vmw_view_max)
2476		return -EINVAL;
2477
2478	cmd = container_of(header, typeof(*cmd), header);
2479	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2480		VMW_DEBUG_USER("Invalid surface id.\n");
2481		return -EINVAL;
2482	}
2483	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2484				VMW_RES_DIRTY_NONE, user_surface_converter,
2485				&cmd->sid, &srf);
2486	if (unlikely(ret != 0))
2487		return ret;
2488
2489	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2490	if (IS_ERR_OR_NULL(res))
2491		return res ? PTR_ERR(res) : -EINVAL;
2492	ret = vmw_cotable_notify(res, cmd->defined_id);
2493	if (unlikely(ret != 0))
2494		return ret;
2495
2496	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2497			    cmd->defined_id, header,
2498			    header->size + sizeof(*header),
2499			    &sw_context->staged_cmd_res);
2500}
2501
2502/**
2503 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2504 *
2505 * @dev_priv: Pointer to a device private struct.
2506 * @sw_context: The software context being used for this batch.
2507 * @header: Pointer to the command header in the command stream.
2508 */
2509static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2510				     struct vmw_sw_context *sw_context,
2511				     SVGA3dCmdHeader *header)
2512{
2513	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2514	struct vmw_ctx_bindinfo_so_target binding;
2515	struct vmw_resource *res;
2516	struct {
2517		SVGA3dCmdHeader header;
2518		SVGA3dCmdDXSetSOTargets body;
2519		SVGA3dSoTarget targets[];
2520	} *cmd;
2521	int i, ret, num;
2522
2523	if (!ctx_node)
2524		return -EINVAL;
2525
2526	cmd = container_of(header, typeof(*cmd), header);
2527	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2528
2529	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2530		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2531		return -EINVAL;
2532	}
2533
2534	for (i = 0; i < num; i++) {
2535		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2536					VMW_RES_DIRTY_SET,
2537					user_surface_converter,
2538					&cmd->targets[i].sid, &res);
2539		if (unlikely(ret != 0))
2540			return ret;
2541
2542		binding.bi.ctx = ctx_node->ctx;
2543		binding.bi.res = res;
2544		binding.bi.bt = vmw_ctx_binding_so_target;
2545		binding.offset = cmd->targets[i].offset;
2546		binding.size = cmd->targets[i].sizeInBytes;
2547		binding.slot = i;
2548
2549		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2550	}
2551
2552	return 0;
2553}
2554
2555static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2556				struct vmw_sw_context *sw_context,
2557				SVGA3dCmdHeader *header)
2558{
2559	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2560	struct vmw_resource *res;
2561	/*
2562	 * This is based on the fact that all affected define commands have
2563	 * the same initial command body layout.
2564	 */
2565	struct {
2566		SVGA3dCmdHeader header;
2567		uint32 defined_id;
2568	} *cmd;
2569	enum vmw_so_type so_type;
2570	int ret;
2571
2572	if (!ctx_node)
2573		return -EINVAL;
2574
2575	so_type = vmw_so_cmd_to_type(header->id);
2576	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2577	if (IS_ERR_OR_NULL(res))
2578		return res ? PTR_ERR(res) : -EINVAL;
2579	cmd = container_of(header, typeof(*cmd), header);
2580	ret = vmw_cotable_notify(res, cmd->defined_id);
2581
2582	return ret;
2583}
2584
2585/**
2586 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2587 * command
2588 *
2589 * @dev_priv: Pointer to a device private struct.
2590 * @sw_context: The software context being used for this batch.
2591 * @header: Pointer to the command header in the command stream.
2592 */
2593static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2594					struct vmw_sw_context *sw_context,
2595					SVGA3dCmdHeader *header)
2596{
2597	struct {
2598		SVGA3dCmdHeader header;
2599		union {
2600			SVGA3dCmdDXReadbackSubResource r_body;
2601			SVGA3dCmdDXInvalidateSubResource i_body;
2602			SVGA3dCmdDXUpdateSubResource u_body;
2603			SVGA3dSurfaceId sid;
2604		};
2605	} *cmd;
2606
2607	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2608		     offsetof(typeof(*cmd), sid));
2609	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2610		     offsetof(typeof(*cmd), sid));
2611	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2612		     offsetof(typeof(*cmd), sid));
2613
2614	cmd = container_of(header, typeof(*cmd), header);
2615	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2616				 VMW_RES_DIRTY_NONE, user_surface_converter,
2617				 &cmd->sid, NULL);
2618}
2619
2620static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2621				struct vmw_sw_context *sw_context,
2622				SVGA3dCmdHeader *header)
2623{
2624	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2625
2626	if (!ctx_node)
2627		return -EINVAL;
2628
2629	return 0;
2630}
2631
2632/**
2633 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2634 * resource for removal.
2635 *
2636 * @dev_priv: Pointer to a device private struct.
2637 * @sw_context: The software context being used for this batch.
2638 * @header: Pointer to the command header in the command stream.
2639 *
2640 * Check that the view exists, and if it was not created using this command
2641 * batch, conditionally make this command a NOP.
2642 */
2643static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2644				  struct vmw_sw_context *sw_context,
2645				  SVGA3dCmdHeader *header)
2646{
2647	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2648	struct {
2649		SVGA3dCmdHeader header;
2650		union vmw_view_destroy body;
2651	} *cmd = container_of(header, typeof(*cmd), header);
2652	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2653	struct vmw_resource *view;
2654	int ret;
2655
2656	if (!ctx_node)
2657		return -EINVAL;
2658
2659	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2660			      &sw_context->staged_cmd_res, &view);
2661	if (ret || !view)
2662		return ret;
2663
2664	/*
2665	 * If the view wasn't created during this command batch, it might
2666	 * have been removed due to a context swapout, so add a
2667	 * relocation to conditionally make this command a NOP to avoid
2668	 * device errors.
2669	 */
2670	return vmw_resource_relocation_add(sw_context, view,
2671					   vmw_ptr_diff(sw_context->buf_start,
2672							&cmd->header.id),
2673					   vmw_res_rel_cond_nop);
2674}
2675
2676/**
2677 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2678 *
2679 * @dev_priv: Pointer to a device private struct.
2680 * @sw_context: The software context being used for this batch.
2681 * @header: Pointer to the command header in the command stream.
2682 */
2683static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2684				    struct vmw_sw_context *sw_context,
2685				    SVGA3dCmdHeader *header)
2686{
2687	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2688	struct vmw_resource *res;
2689	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2690		container_of(header, typeof(*cmd), header);
2691	int ret;
2692
2693	if (!ctx_node)
2694		return -EINVAL;
2695
2696	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2697	if (IS_ERR_OR_NULL(res))
2698		return res ? PTR_ERR(res) : -EINVAL;
2699	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2700	if (ret)
2701		return ret;
2702
2703	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2704				 cmd->body.shaderId, cmd->body.type,
2705				 &sw_context->staged_cmd_res);
2706}
2707
2708/**
2709 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2710 *
2711 * @dev_priv: Pointer to a device private struct.
2712 * @sw_context: The software context being used for this batch.
2713 * @header: Pointer to the command header in the command stream.
2714 */
2715static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2716				     struct vmw_sw_context *sw_context,
2717				     SVGA3dCmdHeader *header)
2718{
2719	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2720	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2721		container_of(header, typeof(*cmd), header);
2722	int ret;
2723
2724	if (!ctx_node)
2725		return -EINVAL;
2726
2727	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2728				&sw_context->staged_cmd_res);
2729
2730	return ret;
2731}
2732
2733/**
2734 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2735 *
2736 * @dev_priv: Pointer to a device private struct.
2737 * @sw_context: The software context being used for this batch.
2738 * @header: Pointer to the command header in the command stream.
2739 */
2740static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2741				  struct vmw_sw_context *sw_context,
2742				  SVGA3dCmdHeader *header)
2743{
2744	struct vmw_resource *ctx;
2745	struct vmw_resource *res;
2746	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2747		container_of(header, typeof(*cmd), header);
2748	int ret;
2749
2750	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2751		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2752					VMW_RES_DIRTY_SET,
2753					user_context_converter, &cmd->body.cid,
2754					&ctx);
2755		if (ret)
2756			return ret;
2757	} else {
2758		struct vmw_ctx_validation_info *ctx_node =
2759			VMW_GET_CTX_NODE(sw_context);
2760
2761		if (!ctx_node)
2762			return -EINVAL;
2763
2764		ctx = ctx_node->ctx;
2765	}
2766
2767	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2768	if (IS_ERR(res)) {
2769		VMW_DEBUG_USER("Could not find shader to bind.\n");
2770		return PTR_ERR(res);
2771	}
2772
2773	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2774				      vmw_val_add_flag_noctx);
2775	if (ret) {
2776		VMW_DEBUG_USER("Error creating resource validation node.\n");
2777		return ret;
2778	}
2779
2780	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2781					 &cmd->body.mobid,
2782					 cmd->body.offsetInBytes);
2783}
2784
2785/**
2786 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2787 *
2788 * @dev_priv: Pointer to a device private struct.
2789 * @sw_context: The software context being used for this batch.
2790 * @header: Pointer to the command header in the command stream.
2791 */
2792static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2793			      struct vmw_sw_context *sw_context,
2794			      SVGA3dCmdHeader *header)
2795{
2796	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2797		container_of(header, typeof(*cmd), header);
2798	struct vmw_resource *view;
2799	struct vmw_res_cache_entry *rcache;
2800
2801	view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2802				   cmd->body.shaderResourceViewId);
2803	if (IS_ERR(view))
2804		return PTR_ERR(view);
2805
2806	/*
2807	 * Normally the shader-resource view is not gpu-dirtying, but for
2808	 * this particular command it is...
2809	 * So mark the last looked-up surface, which is the surface
2810	 * the view points to, gpu-dirty.
2811	 */
2812	rcache = &sw_context->res_cache[vmw_res_surface];
2813	vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2814				     VMW_RES_DIRTY_SET);
2815	return 0;
2816}
2817
2818/**
2819 * vmw_cmd_dx_transfer_from_buffer - Validate
2820 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2821 *
2822 * @dev_priv: Pointer to a device private struct.
2823 * @sw_context: The software context being used for this batch.
2824 * @header: Pointer to the command header in the command stream.
2825 */
2826static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2827					   struct vmw_sw_context *sw_context,
2828					   SVGA3dCmdHeader *header)
2829{
2830	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2831		container_of(header, typeof(*cmd), header);
2832	int ret;
2833
2834	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2835				VMW_RES_DIRTY_NONE, user_surface_converter,
2836				&cmd->body.srcSid, NULL);
2837	if (ret != 0)
2838		return ret;
2839
2840	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2841				 VMW_RES_DIRTY_SET, user_surface_converter,
2842				 &cmd->body.destSid, NULL);
2843}
2844
2845/**
2846 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2847 *
2848 * @dev_priv: Pointer to a device private struct.
2849 * @sw_context: The software context being used for this batch.
2850 * @header: Pointer to the command header in the command stream.
2851 */
2852static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2853					   struct vmw_sw_context *sw_context,
2854					   SVGA3dCmdHeader *header)
2855{
2856	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2857		container_of(header, typeof(*cmd), header);
2858
2859	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2860		return -EINVAL;
2861
2862	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2863				 VMW_RES_DIRTY_SET, user_surface_converter,
2864				 &cmd->body.surface.sid, NULL);
2865}
2866
2867static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2868		       struct vmw_sw_context *sw_context,
2869		       SVGA3dCmdHeader *header)
2870{
2871	if (!has_sm5_context(dev_priv))
2872		return -EINVAL;
2873
2874	return 0;
2875}
2876
2877static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2878				   struct vmw_sw_context *sw_context,
2879				   SVGA3dCmdHeader *header)
2880{
2881	if (!has_sm5_context(dev_priv))
2882		return -EINVAL;
2883
2884	return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2885}
2886
2887static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2888				   struct vmw_sw_context *sw_context,
2889				   SVGA3dCmdHeader *header)
2890{
2891	if (!has_sm5_context(dev_priv))
2892		return -EINVAL;
2893
2894	return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2895}
2896
2897static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2898				  struct vmw_sw_context *sw_context,
2899				  SVGA3dCmdHeader *header)
2900{
2901	struct {
2902		SVGA3dCmdHeader header;
2903		SVGA3dCmdDXClearUAViewUint body;
2904	} *cmd = container_of(header, typeof(*cmd), header);
2905	struct vmw_resource *ret;
2906
2907	if (!has_sm5_context(dev_priv))
2908		return -EINVAL;
2909
2910	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2911				  cmd->body.uaViewId);
2912
2913	return PTR_ERR_OR_ZERO(ret);
2914}
2915
2916static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2917				   struct vmw_sw_context *sw_context,
2918				   SVGA3dCmdHeader *header)
2919{
2920	struct {
2921		SVGA3dCmdHeader header;
2922		SVGA3dCmdDXClearUAViewFloat body;
2923	} *cmd = container_of(header, typeof(*cmd), header);
2924	struct vmw_resource *ret;
2925
2926	if (!has_sm5_context(dev_priv))
2927		return -EINVAL;
2928
2929	ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2930				  cmd->body.uaViewId);
2931
2932	return PTR_ERR_OR_ZERO(ret);
2933}
2934
2935static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2936			   struct vmw_sw_context *sw_context,
2937			   SVGA3dCmdHeader *header)
2938{
2939	struct {
2940		SVGA3dCmdHeader header;
2941		SVGA3dCmdDXSetUAViews body;
2942	} *cmd = container_of(header, typeof(*cmd), header);
2943	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2944		sizeof(SVGA3dUAViewId);
2945	int ret;
2946
2947	if (!has_sm5_context(dev_priv))
2948		return -EINVAL;
2949
2950	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2951		VMW_DEBUG_USER("Invalid UAV binding.\n");
2952		return -EINVAL;
2953	}
2954
2955	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2956				    vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2957				    num_uav, 0);
2958	if (ret)
2959		return ret;
2960
2961	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2962					 cmd->body.uavSpliceIndex);
2963
2964	return ret;
2965}
2966
2967static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2968			      struct vmw_sw_context *sw_context,
2969			      SVGA3dCmdHeader *header)
2970{
2971	struct {
2972		SVGA3dCmdHeader header;
2973		SVGA3dCmdDXSetCSUAViews body;
2974	} *cmd = container_of(header, typeof(*cmd), header);
2975	u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2976		sizeof(SVGA3dUAViewId);
2977	int ret;
2978
2979	if (!has_sm5_context(dev_priv))
2980		return -EINVAL;
2981
2982	if (num_uav > vmw_max_num_uavs(dev_priv)) {
2983		VMW_DEBUG_USER("Invalid UAV binding.\n");
2984		return -EINVAL;
2985	}
2986
2987	ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2988				    vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2989				    num_uav, 0);
2990	if (ret)
2991		return ret;
2992
2993	vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2994				  cmd->body.startIndex);
2995
2996	return ret;
2997}
2998
2999static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
3000					  struct vmw_sw_context *sw_context,
3001					  SVGA3dCmdHeader *header)
3002{
3003	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3004	struct vmw_resource *res;
3005	struct {
3006		SVGA3dCmdHeader header;
3007		SVGA3dCmdDXDefineStreamOutputWithMob body;
3008	} *cmd = container_of(header, typeof(*cmd), header);
3009	int ret;
3010
3011	if (!has_sm5_context(dev_priv))
3012		return -EINVAL;
3013
3014	if (!ctx_node) {
3015		DRM_ERROR("DX Context not set.\n");
3016		return -EINVAL;
3017	}
3018
3019	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3020	if (IS_ERR_OR_NULL(res))
3021		return res ? PTR_ERR(res) : -EINVAL;
3022	ret = vmw_cotable_notify(res, cmd->body.soid);
3023	if (ret)
3024		return ret;
3025
3026	return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3027				       cmd->body.soid,
3028				       &sw_context->staged_cmd_res);
3029}
3030
3031static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3032					   struct vmw_sw_context *sw_context,
3033					   SVGA3dCmdHeader *header)
3034{
3035	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3036	struct vmw_resource *res;
3037	struct {
3038		SVGA3dCmdHeader header;
3039		SVGA3dCmdDXDestroyStreamOutput body;
3040	} *cmd = container_of(header, typeof(*cmd), header);
3041
3042	if (!ctx_node) {
3043		DRM_ERROR("DX Context not set.\n");
3044		return -EINVAL;
3045	}
3046
3047	/*
3048	 * When device does not support SM5 then streamoutput with mob command is
3049	 * not available to user-space. Simply return in this case.
3050	 */
3051	if (!has_sm5_context(dev_priv))
3052		return 0;
3053
3054	/*
3055	 * With SM5 capable device if lookup fails then user-space probably used
3056	 * old streamoutput define command. Return without an error.
3057	 */
3058	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3059					 cmd->body.soid);
3060	if (IS_ERR(res))
3061		return 0;
3062
3063	return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3064					  &sw_context->staged_cmd_res);
3065}
3066
3067static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3068					struct vmw_sw_context *sw_context,
3069					SVGA3dCmdHeader *header)
3070{
3071	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3072	struct vmw_resource *res;
3073	struct {
3074		SVGA3dCmdHeader header;
3075		SVGA3dCmdDXBindStreamOutput body;
3076	} *cmd = container_of(header, typeof(*cmd), header);
3077	int ret;
3078
3079	if (!has_sm5_context(dev_priv))
3080		return -EINVAL;
3081
3082	if (!ctx_node) {
3083		DRM_ERROR("DX Context not set.\n");
3084		return -EINVAL;
3085	}
3086
3087	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3088					 cmd->body.soid);
3089	if (IS_ERR(res)) {
3090		DRM_ERROR("Could not find streamoutput to bind.\n");
3091		return PTR_ERR(res);
3092	}
3093
3094	vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3095
3096	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3097				      vmw_val_add_flag_noctx);
3098	if (ret) {
3099		DRM_ERROR("Error creating resource validation node.\n");
3100		return ret;
3101	}
3102
3103	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3104					 &cmd->body.mobid,
3105					 cmd->body.offsetInBytes);
3106}
3107
3108static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3109				       struct vmw_sw_context *sw_context,
3110				       SVGA3dCmdHeader *header)
3111{
3112	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3113	struct vmw_resource *res;
3114	struct vmw_ctx_bindinfo_so binding;
3115	struct {
3116		SVGA3dCmdHeader header;
3117		SVGA3dCmdDXSetStreamOutput body;
3118	} *cmd = container_of(header, typeof(*cmd), header);
3119	int ret;
3120
3121	if (!ctx_node) {
3122		DRM_ERROR("DX Context not set.\n");
3123		return -EINVAL;
3124	}
3125
3126	if (cmd->body.soid == SVGA3D_INVALID_ID)
3127		return 0;
3128
3129	/*
3130	 * When device does not support SM5 then streamoutput with mob command is
3131	 * not available to user-space. Simply return in this case.
3132	 */
3133	if (!has_sm5_context(dev_priv))
3134		return 0;
3135
3136	/*
3137	 * With SM5 capable device if lookup fails then user-space probably used
3138	 * old streamoutput define command. Return without an error.
3139	 */
3140	res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3141					 cmd->body.soid);
3142	if (IS_ERR(res)) {
3143		return 0;
3144	}
3145
3146	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3147				      vmw_val_add_flag_noctx);
3148	if (ret) {
3149		DRM_ERROR("Error creating resource validation node.\n");
3150		return ret;
3151	}
3152
3153	binding.bi.ctx = ctx_node->ctx;
3154	binding.bi.res = res;
3155	binding.bi.bt = vmw_ctx_binding_so;
3156	binding.slot = 0; /* Only one SO set to context at a time. */
3157
3158	vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3159			binding.slot);
3160
3161	return ret;
3162}
3163
3164static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3165					      struct vmw_sw_context *sw_context,
3166					      SVGA3dCmdHeader *header)
3167{
3168	struct vmw_draw_indexed_instanced_indirect_cmd {
3169		SVGA3dCmdHeader header;
3170		SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3171	} *cmd = container_of(header, typeof(*cmd), header);
3172
3173	if (!has_sm5_context(dev_priv))
3174		return -EINVAL;
3175
3176	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3177				 VMW_RES_DIRTY_NONE, user_surface_converter,
3178				 &cmd->body.argsBufferSid, NULL);
3179}
3180
3181static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3182				      struct vmw_sw_context *sw_context,
3183				      SVGA3dCmdHeader *header)
3184{
3185	struct vmw_draw_instanced_indirect_cmd {
3186		SVGA3dCmdHeader header;
3187		SVGA3dCmdDXDrawInstancedIndirect body;
3188	} *cmd = container_of(header, typeof(*cmd), header);
3189
3190	if (!has_sm5_context(dev_priv))
3191		return -EINVAL;
3192
3193	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3194				 VMW_RES_DIRTY_NONE, user_surface_converter,
3195				 &cmd->body.argsBufferSid, NULL);
3196}
3197
3198static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3199				     struct vmw_sw_context *sw_context,
3200				     SVGA3dCmdHeader *header)
3201{
3202	struct vmw_dispatch_indirect_cmd {
3203		SVGA3dCmdHeader header;
3204		SVGA3dCmdDXDispatchIndirect body;
3205	} *cmd = container_of(header, typeof(*cmd), header);
3206
3207	if (!has_sm5_context(dev_priv))
3208		return -EINVAL;
3209
3210	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3211				 VMW_RES_DIRTY_NONE, user_surface_converter,
3212				 &cmd->body.argsBufferSid, NULL);
3213}
3214
3215static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3216				struct vmw_sw_context *sw_context,
3217				void *buf, uint32_t *size)
3218{
3219	uint32_t size_remaining = *size;
3220	uint32_t cmd_id;
3221
3222	cmd_id = ((uint32_t *)buf)[0];
3223	switch (cmd_id) {
3224	case SVGA_CMD_UPDATE:
3225		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3226		break;
3227	case SVGA_CMD_DEFINE_GMRFB:
3228		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3229		break;
3230	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3231		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3232		break;
3233	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3234		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3235		break;
3236	default:
3237		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3238		return -EINVAL;
3239	}
3240
3241	if (*size > size_remaining) {
3242		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3243			       cmd_id);
3244		return -EINVAL;
3245	}
3246
3247	if (unlikely(!sw_context->kernel)) {
3248		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3249		return -EPERM;
3250	}
3251
3252	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3253		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3254
3255	return 0;
3256}
3257
3258static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3259	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3260		    false, false, false),
3261	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3262		    false, false, false),
3263	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3264		    true, false, false),
3265	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3266		    true, false, false),
3267	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3268		    true, false, false),
3269	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3270		    false, false, false),
3271	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3272		    false, false, false),
3273	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3274		    true, false, false),
3275	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3276		    true, false, false),
3277	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3278		    true, false, false),
3279	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3280		    &vmw_cmd_set_render_target_check, true, false, false),
3281	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3282		    true, false, false),
3283	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3284		    true, false, false),
3285	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3286		    true, false, false),
3287	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3288		    true, false, false),
3289	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3290		    true, false, false),
3291	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3292		    true, false, false),
3293	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3294		    true, false, false),
3295	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3296		    false, false, false),
3297	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3298		    true, false, false),
3299	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3300		    true, false, false),
3301	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3302		    true, false, false),
3303	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3304		    true, false, false),
3305	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3306		    true, false, false),
3307	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3308		    true, false, false),
3309	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3310		    true, false, false),
3311	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3312		    true, false, false),
3313	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3314		    true, false, false),
3315	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3316		    true, false, false),
3317	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3318		    &vmw_cmd_blt_surf_screen_check, false, false, false),
3319	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3320		    false, false, false),
3321	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3322		    false, false, false),
3323	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3324		    false, false, false),
3325	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3326		    false, false, false),
3327	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3328		    false, false, false),
3329	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
 
 
 
 
3330		    false, false, false),
3331	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
 
 
 
 
 
 
 
 
3332		    false, false, false),
3333	VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3334	VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3335	VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3336	VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3337	VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3338	VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3339	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3340		    false, false, true),
3341	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3342		    false, false, true),
3343	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3344		    false, false, true),
3345	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3346		    false, false, true),
3347	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3348		    false, false, true),
3349	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3350		    false, false, true),
3351	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3352		    false, false, true),
3353	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3354		    false, false, true),
3355	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3356		    true, false, true),
3357	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3358		    false, false, true),
3359	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3360		    true, false, true),
3361	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3362		    &vmw_cmd_update_gb_surface, true, false, true),
3363	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3364		    &vmw_cmd_readback_gb_image, true, false, true),
3365	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3366		    &vmw_cmd_readback_gb_surface, true, false, true),
3367	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3368		    &vmw_cmd_invalidate_gb_image, true, false, true),
3369	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3370		    &vmw_cmd_invalidate_gb_surface, true, false, true),
3371	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3372		    false, false, true),
3373	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3374		    false, false, true),
3375	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3376		    false, false, true),
3377	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3378		    false, false, true),
3379	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3380		    false, false, true),
3381	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3382		    false, false, true),
3383	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3384		    true, false, true),
3385	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3386		    false, false, true),
3387	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3388		    false, false, false),
3389	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3390		    true, false, true),
3391	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3392		    true, false, true),
3393	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3394		    true, false, true),
3395	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3396		    true, false, true),
3397	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3398		    true, false, true),
3399	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3400		    false, false, true),
3401	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3402		    false, false, true),
3403	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3404		    false, false, true),
3405	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3406		    false, false, true),
3407	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3408		    false, false, true),
3409	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3410		    false, false, true),
3411	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3412		    false, false, true),
3413	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3414		    false, false, true),
3415	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3416		    false, false, true),
3417	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3418		    false, false, true),
3419	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3420		    true, false, true),
3421	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3422		    false, false, true),
3423	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3424		    false, false, true),
3425	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3426		    false, false, true),
3427	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3428		    false, false, true),
3429
3430	/* SM commands */
3431	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3432		    false, false, true),
3433	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3434		    false, false, true),
3435	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3436		    false, false, true),
3437	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3438		    false, false, true),
3439	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3440		    false, false, true),
3441	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3442		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3443	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3444		    &vmw_cmd_dx_set_shader_res, true, false, true),
3445	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3446		    true, false, true),
3447	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3448		    true, false, true),
3449	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3450		    true, false, true),
3451	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3452		    true, false, true),
3453	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3454		    true, false, true),
3455	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3456		    &vmw_cmd_dx_cid_check, true, false, true),
3457	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3458		    true, false, true),
3459	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3460		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3461	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3462		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3463	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3464		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3465	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3466		    true, false, true),
3467	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3468		    &vmw_cmd_dx_cid_check, true, false, true),
3469	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3470		    &vmw_cmd_dx_cid_check, true, false, true),
3471	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3472		    true, false, true),
3473	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3474		    true, false, true),
3475	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3476		    true, false, true),
3477	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3478		    &vmw_cmd_dx_cid_check, true, false, true),
3479	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3480		    true, false, true),
3481	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3482		    true, false, true),
3483	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3484		    true, false, true),
3485	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3486		    true, false, true),
3487	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3488		    true, false, true),
3489	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3490		    true, false, true),
3491	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3492		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3493	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3494		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3495	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3496		    true, false, true),
3497	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3498		    true, false, true),
3499	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3500		    &vmw_cmd_dx_check_subresource, true, false, true),
3501	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3502		    &vmw_cmd_dx_check_subresource, true, false, true),
3503	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3504		    &vmw_cmd_dx_check_subresource, true, false, true),
3505	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3506		    &vmw_cmd_dx_view_define, true, false, true),
3507	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3508		    &vmw_cmd_dx_view_remove, true, false, true),
3509	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3510		    &vmw_cmd_dx_view_define, true, false, true),
3511	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3512		    &vmw_cmd_dx_view_remove, true, false, true),
3513	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3514		    &vmw_cmd_dx_view_define, true, false, true),
3515	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3516		    &vmw_cmd_dx_view_remove, true, false, true),
3517	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3518		    &vmw_cmd_dx_so_define, true, false, true),
3519	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3520		    &vmw_cmd_dx_cid_check, true, false, true),
3521	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3522		    &vmw_cmd_dx_so_define, true, false, true),
3523	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3524		    &vmw_cmd_dx_cid_check, true, false, true),
3525	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3526		    &vmw_cmd_dx_so_define, true, false, true),
3527	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3528		    &vmw_cmd_dx_cid_check, true, false, true),
3529	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3530		    &vmw_cmd_dx_so_define, true, false, true),
3531	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3532		    &vmw_cmd_dx_cid_check, true, false, true),
3533	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3534		    &vmw_cmd_dx_so_define, true, false, true),
3535	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3536		    &vmw_cmd_dx_cid_check, true, false, true),
3537	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3538		    &vmw_cmd_dx_define_shader, true, false, true),
3539	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3540		    &vmw_cmd_dx_destroy_shader, true, false, true),
3541	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3542		    &vmw_cmd_dx_bind_shader, true, false, true),
3543	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3544		    &vmw_cmd_dx_so_define, true, false, true),
3545	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3546		    &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3547	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3548		    &vmw_cmd_dx_set_streamoutput, true, false, true),
3549	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3550		    &vmw_cmd_dx_set_so_targets, true, false, true),
3551	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3552		    &vmw_cmd_dx_cid_check, true, false, true),
3553	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3554		    &vmw_cmd_dx_cid_check, true, false, true),
3555	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3556		    &vmw_cmd_buffer_copy_check, true, false, true),
3557	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3558		    &vmw_cmd_pred_copy_check, true, false, true),
3559	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3560		    &vmw_cmd_dx_transfer_from_buffer,
3561		    true, false, true),
3562	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3563		    &vmw_cmd_dx_set_constant_buffer_offset,
3564		    true, false, true),
3565	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3566		    &vmw_cmd_dx_set_constant_buffer_offset,
3567		    true, false, true),
3568	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3569		    &vmw_cmd_dx_set_constant_buffer_offset,
3570		    true, false, true),
3571	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3572		    &vmw_cmd_dx_set_constant_buffer_offset,
3573		    true, false, true),
3574	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3575		    &vmw_cmd_dx_set_constant_buffer_offset,
3576		    true, false, true),
3577	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3578		    &vmw_cmd_dx_set_constant_buffer_offset,
3579		    true, false, true),
3580	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3581		    true, false, true),
3582
3583	/*
3584	 * SM5 commands
3585	 */
3586	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3587		    true, false, true),
3588	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3589		    true, false, true),
3590	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3591		    true, false, true),
3592	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3593		    &vmw_cmd_clear_uav_float, true, false, true),
3594	VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3595		    false, true),
3596	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3597		    true),
3598	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3599		    &vmw_cmd_indexed_instanced_indirect, true, false, true),
3600	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3601		    &vmw_cmd_instanced_indirect, true, false, true),
3602	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3603	VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3604		    &vmw_cmd_dispatch_indirect, true, false, true),
3605	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3606		    false, true),
3607	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3608		    &vmw_cmd_sm5_view_define, true, false, true),
3609	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3610		    &vmw_cmd_dx_define_streamoutput, true, false, true),
3611	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3612		    &vmw_cmd_dx_bind_streamoutput, true, false, true),
3613	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3614		    &vmw_cmd_dx_so_define, true, false, true),
3615	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
3616		    &vmw_cmd_invalid, false, false, true),
3617};
3618
3619bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3620{
3621	u32 cmd_id = ((u32 *) buf)[0];
3622
3623	if (cmd_id >= SVGA_CMD_MAX) {
3624		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3625		const struct vmw_cmd_entry *entry;
3626
3627		*size = header->size + sizeof(SVGA3dCmdHeader);
3628		cmd_id = header->id;
3629		if (cmd_id >= SVGA_3D_CMD_MAX)
3630			return false;
3631
3632		cmd_id -= SVGA_3D_CMD_BASE;
3633		entry = &vmw_cmd_entries[cmd_id];
3634		*cmd = entry->cmd_name;
3635		return true;
3636	}
3637
3638	switch (cmd_id) {
3639	case SVGA_CMD_UPDATE:
3640		*cmd = "SVGA_CMD_UPDATE";
3641		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3642		break;
3643	case SVGA_CMD_DEFINE_GMRFB:
3644		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3645		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3646		break;
3647	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3648		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3649		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3650		break;
3651	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3652		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3653		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3654		break;
3655	default:
3656		*cmd = "UNKNOWN";
3657		*size = 0;
3658		return false;
3659	}
3660
3661	return true;
3662}
3663
3664static int vmw_cmd_check(struct vmw_private *dev_priv,
3665			 struct vmw_sw_context *sw_context, void *buf,
3666			 uint32_t *size)
3667{
3668	uint32_t cmd_id;
3669	uint32_t size_remaining = *size;
3670	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3671	int ret;
3672	const struct vmw_cmd_entry *entry;
3673	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3674
3675	cmd_id = ((uint32_t *)buf)[0];
3676	/* Handle any none 3D commands */
3677	if (unlikely(cmd_id < SVGA_CMD_MAX))
3678		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3679
3680
3681	cmd_id = header->id;
3682	*size = header->size + sizeof(SVGA3dCmdHeader);
3683
3684	cmd_id -= SVGA_3D_CMD_BASE;
3685	if (unlikely(*size > size_remaining))
3686		goto out_invalid;
3687
3688	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3689		goto out_invalid;
3690
3691	entry = &vmw_cmd_entries[cmd_id];
3692	if (unlikely(!entry->func))
3693		goto out_invalid;
3694
3695	if (unlikely(!entry->user_allow && !sw_context->kernel))
3696		goto out_privileged;
3697
3698	if (unlikely(entry->gb_disable && gb))
3699		goto out_old;
3700
3701	if (unlikely(entry->gb_enable && !gb))
3702		goto out_new;
3703
3704	ret = entry->func(dev_priv, sw_context, header);
3705	if (unlikely(ret != 0)) {
3706		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3707			       cmd_id + SVGA_3D_CMD_BASE, ret);
3708		return ret;
3709	}
3710
3711	return 0;
3712out_invalid:
3713	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3714		       cmd_id + SVGA_3D_CMD_BASE);
3715	return -EINVAL;
3716out_privileged:
3717	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3718		       cmd_id + SVGA_3D_CMD_BASE);
3719	return -EPERM;
3720out_old:
3721	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3722		       cmd_id + SVGA_3D_CMD_BASE);
3723	return -EINVAL;
3724out_new:
3725	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3726		       cmd_id + SVGA_3D_CMD_BASE);
3727	return -EINVAL;
3728}
3729
3730static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3731			     struct vmw_sw_context *sw_context, void *buf,
 
3732			     uint32_t size)
3733{
3734	int32_t cur_size = size;
3735	int ret;
3736
3737	sw_context->buf_start = buf;
3738
3739	while (cur_size > 0) {
3740		size = cur_size;
3741		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3742		if (unlikely(ret != 0))
3743			return ret;
3744		buf = (void *)((unsigned long) buf + size);
3745		cur_size -= size;
3746	}
3747
3748	if (unlikely(cur_size != 0)) {
3749		VMW_DEBUG_USER("Command verifier out of sync.\n");
3750		return -EINVAL;
3751	}
3752
3753	return 0;
3754}
3755
3756static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3757{
3758	/* Memory is validation context memory, so no need to free it */
3759	INIT_LIST_HEAD(&sw_context->bo_relocations);
3760}
3761
3762static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3763{
 
3764	struct vmw_relocation *reloc;
 
3765	struct ttm_buffer_object *bo;
3766
3767	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3768		bo = &reloc->vbo->tbo;
3769		switch (bo->resource->mem_type) {
 
 
3770		case TTM_PL_VRAM:
3771			reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3772			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3773			break;
3774		case VMW_PL_GMR:
3775			reloc->location->gmrId = bo->resource->start;
3776			break;
3777		case VMW_PL_MOB:
3778			*reloc->mob_loc = bo->resource->start;
3779			break;
3780		default:
3781			BUG();
3782		}
3783	}
3784	vmw_free_relocations(sw_context);
3785}
3786
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3787static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3788				 uint32_t size)
3789{
3790	if (likely(sw_context->cmd_bounce_size >= size))
3791		return 0;
3792
3793	if (sw_context->cmd_bounce_size == 0)
3794		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3795
3796	while (sw_context->cmd_bounce_size < size) {
3797		sw_context->cmd_bounce_size =
3798			PAGE_ALIGN(sw_context->cmd_bounce_size +
3799				   (sw_context->cmd_bounce_size >> 1));
3800	}
3801
3802	vfree(sw_context->cmd_bounce);
 
 
3803	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3804
3805	if (sw_context->cmd_bounce == NULL) {
3806		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3807		sw_context->cmd_bounce_size = 0;
3808		return -ENOMEM;
3809	}
3810
3811	return 0;
3812}
3813
3814/*
3815 * vmw_execbuf_fence_commands - create and submit a command stream fence
3816 *
3817 * Creates a fence object and submits a command stream marker.
3818 * If this fails for some reason, We sync the fifo and return NULL.
3819 * It is then safe to fence buffers with a NULL pointer.
3820 *
3821 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3822 * userspace handle if @p_handle is not NULL, otherwise not.
3823 */
3824
3825int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3826			       struct vmw_private *dev_priv,
3827			       struct vmw_fence_obj **p_fence,
3828			       uint32_t *p_handle)
3829{
3830	uint32_t sequence;
3831	int ret;
3832	bool synced = false;
3833
3834	/* p_handle implies file_priv. */
3835	BUG_ON(p_handle != NULL && file_priv == NULL);
3836
3837	ret = vmw_cmd_send_fence(dev_priv, &sequence);
3838	if (unlikely(ret != 0)) {
3839		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3840		synced = true;
3841	}
3842
3843	if (p_handle != NULL)
3844		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3845					    sequence, p_fence, p_handle);
 
 
3846	else
3847		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
 
 
3848
3849	if (unlikely(ret != 0 && !synced)) {
3850		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3851					 false, VMW_FENCE_WAIT_TIMEOUT);
 
3852		*p_fence = NULL;
3853	}
3854
3855	return ret;
3856}
3857
3858/**
3859 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
 
3860 *
3861 * @dev_priv: Pointer to a vmw_private struct.
3862 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3863 * @ret: Return value from fence object creation.
3864 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3865 * the information should be copied.
3866 * @fence: Pointer to the fenc object.
3867 * @fence_handle: User-space fence handle.
3868 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3869 *
3870 * This function copies fence information to user-space. If copying fails, the
3871 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3872 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3873 * will hopefully be detected.
3874 *
3875 * Also if copying fails, user-space will be unable to signal the fence object
3876 * so we wait for it immediately, and then unreference the user-space reference.
3877 */
3878int
3879vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3880			    struct vmw_fpriv *vmw_fp, int ret,
 
3881			    struct drm_vmw_fence_rep __user *user_fence_rep,
3882			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3883			    int32_t out_fence_fd)
3884{
3885	struct drm_vmw_fence_rep fence_rep;
3886
3887	if (user_fence_rep == NULL)
3888		return 0;
3889
3890	memset(&fence_rep, 0, sizeof(fence_rep));
3891
3892	fence_rep.error = ret;
3893	fence_rep.fd = out_fence_fd;
3894	if (ret == 0) {
3895		BUG_ON(fence == NULL);
3896
3897		fence_rep.handle = fence_handle;
3898		fence_rep.seqno = fence->base.seqno;
3899		vmw_update_seqno(dev_priv);
3900		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3901	}
3902
3903	/*
3904	 * copy_to_user errors will be detected by user space not seeing
3905	 * fence_rep::error filled in. Typically user-space would have pre-set
3906	 * that member to -EFAULT.
3907	 */
3908	ret = copy_to_user(user_fence_rep, &fence_rep,
3909			   sizeof(fence_rep));
3910
3911	/*
3912	 * User-space lost the fence object. We need to sync and unreference the
3913	 * handle.
3914	 */
3915	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3916		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3917		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3918		(void) vmw_fence_obj_wait(fence, false, false,
 
 
3919					  VMW_FENCE_WAIT_TIMEOUT);
3920	}
3921
3922	return ret ? -EFAULT : 0;
3923}
3924
3925/**
3926 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3927 *
3928 * @dev_priv: Pointer to a device private structure.
3929 * @kernel_commands: Pointer to the unpatched command batch.
3930 * @command_size: Size of the unpatched command batch.
3931 * @sw_context: Structure holding the relocation lists.
3932 *
3933 * Side effects: If this function returns 0, then the command batch pointed to
3934 * by @kernel_commands will have been modified.
3935 */
3936static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3937				   void *kernel_commands, u32 command_size,
3938				   struct vmw_sw_context *sw_context)
3939{
3940	void *cmd;
3941
3942	if (sw_context->dx_ctx_node)
3943		cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3944					  sw_context->dx_ctx_node->ctx->id);
3945	else
3946		cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3947
3948	if (!cmd)
3949		return -ENOMEM;
3950
3951	vmw_apply_relocations(sw_context);
3952	memcpy(cmd, kernel_commands, command_size);
3953	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3954	vmw_resource_relocations_free(&sw_context->res_relocations);
3955	vmw_cmd_commit(dev_priv, command_size);
3956
3957	return 0;
3958}
3959
3960/**
3961 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3962 * command buffer manager.
3963 *
3964 * @dev_priv: Pointer to a device private structure.
3965 * @header: Opaque handle to the command buffer allocation.
3966 * @command_size: Size of the unpatched command batch.
3967 * @sw_context: Structure holding the relocation lists.
3968 *
3969 * Side effects: If this function returns 0, then the command buffer represented
3970 * by @header will have been modified.
3971 */
3972static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3973				     struct vmw_cmdbuf_header *header,
3974				     u32 command_size,
3975				     struct vmw_sw_context *sw_context)
3976{
3977	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3978		  SVGA3D_INVALID_ID);
3979	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3980				       header);
3981
3982	vmw_apply_relocations(sw_context);
3983	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3984	vmw_resource_relocations_free(&sw_context->res_relocations);
3985	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3986
3987	return 0;
3988}
3989
3990/**
3991 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3992 * submission using a command buffer.
3993 *
3994 * @dev_priv: Pointer to a device private structure.
3995 * @user_commands: User-space pointer to the commands to be submitted.
3996 * @command_size: Size of the unpatched command batch.
3997 * @header: Out parameter returning the opaque pointer to the command buffer.
3998 *
3999 * This function checks whether we can use the command buffer manager for
4000 * submission and if so, creates a command buffer of suitable size and copies
4001 * the user data into that buffer.
4002 *
4003 * On successful return, the function returns a pointer to the data in the
4004 * command buffer and *@header is set to non-NULL.
4005 *
4006 * @kernel_commands: If command buffers could not be used, the function will
4007 * return the value of @kernel_commands on function call. That value may be
4008 * NULL. In that case, the value of *@header will be set to NULL.
4009 *
4010 * If an error is encountered, the function will return a pointer error value.
4011 * If the function is interrupted by a signal while sleeping, it will return
4012 * -ERESTARTSYS casted to a pointer error value.
4013 */
4014static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4015				void __user *user_commands,
4016				void *kernel_commands, u32 command_size,
4017				struct vmw_cmdbuf_header **header)
4018{
4019	size_t cmdbuf_size;
4020	int ret;
4021
4022	*header = NULL;
4023	if (command_size > SVGA_CB_MAX_SIZE) {
4024		VMW_DEBUG_USER("Command buffer is too large.\n");
4025		return ERR_PTR(-EINVAL);
4026	}
4027
4028	if (!dev_priv->cman || kernel_commands)
4029		return kernel_commands;
4030
4031	/* If possible, add a little space for fencing. */
4032	cmdbuf_size = command_size + 512;
4033	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4034	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4035					   header);
4036	if (IS_ERR(kernel_commands))
4037		return kernel_commands;
4038
4039	ret = copy_from_user(kernel_commands, user_commands, command_size);
4040	if (ret) {
4041		VMW_DEBUG_USER("Failed copying commands.\n");
4042		vmw_cmdbuf_header_free(*header);
4043		*header = NULL;
4044		return ERR_PTR(-EFAULT);
4045	}
4046
4047	return kernel_commands;
4048}
4049
4050static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4051				   struct vmw_sw_context *sw_context,
4052				   uint32_t handle)
4053{
4054	struct vmw_resource *res;
4055	int ret;
4056	unsigned int size;
4057
4058	if (handle == SVGA3D_INVALID_ID)
4059		return 0;
4060
4061	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4062	ret = vmw_validation_preload_res(sw_context->ctx, size);
4063	if (ret)
4064		return ret;
4065
4066	ret = vmw_user_resource_lookup_handle
4067		(dev_priv, sw_context->fp->tfile, handle,
4068		 user_context_converter, &res);
4069	if (ret != 0) {
4070		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4071			       (unsigned int) handle);
4072		return ret;
4073	}
4074
4075	ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4076				      vmw_val_add_flag_none);
4077	if (unlikely(ret != 0)) {
4078		vmw_resource_unreference(&res);
4079		return ret;
4080	}
4081
4082	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4083	sw_context->man = vmw_context_res_man(res);
4084
4085	vmw_resource_unreference(&res);
4086	return 0;
4087}
4088
4089int vmw_execbuf_process(struct drm_file *file_priv,
4090			struct vmw_private *dev_priv,
4091			void __user *user_commands, void *kernel_commands,
4092			uint32_t command_size, uint64_t throttle_us,
4093			uint32_t dx_context_handle,
 
4094			struct drm_vmw_fence_rep __user *user_fence_rep,
4095			struct vmw_fence_obj **out_fence, uint32_t flags)
4096{
4097	struct vmw_sw_context *sw_context = &dev_priv->ctx;
4098	struct vmw_fence_obj *fence = NULL;
4099	struct vmw_cmdbuf_header *header;
4100	uint32_t handle = 0;
 
 
 
4101	int ret;
4102	int32_t out_fence_fd = -1;
4103	struct sync_file *sync_file = NULL;
4104	DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4105
4106	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4107		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4108		if (out_fence_fd < 0) {
4109			VMW_DEBUG_USER("Failed to get a fence fd.\n");
4110			return out_fence_fd;
4111		}
4112	}
4113
4114	if (throttle_us) {
4115		VMW_DEBUG_USER("Throttling is no longer supported.\n");
4116	}
4117
4118	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4119					     kernel_commands, command_size,
4120					     &header);
4121	if (IS_ERR(kernel_commands)) {
4122		ret = PTR_ERR(kernel_commands);
4123		goto out_free_fence_fd;
4124	}
4125
4126	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4127	if (ret) {
4128		ret = -ERESTARTSYS;
4129		goto out_free_header;
4130	}
4131
4132	sw_context->kernel = false;
4133	if (kernel_commands == NULL) {
 
 
4134		ret = vmw_resize_cmd_bounce(sw_context, command_size);
4135		if (unlikely(ret != 0))
4136			goto out_unlock;
4137
4138		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4139				     command_size);
 
 
4140		if (unlikely(ret != 0)) {
4141			ret = -EFAULT;
4142			VMW_DEBUG_USER("Failed copying commands.\n");
4143			goto out_unlock;
4144		}
4145
4146		kernel_commands = sw_context->cmd_bounce;
4147	} else if (!header) {
4148		sw_context->kernel = true;
4149	}
4150
4151	sw_context->filp = file_priv;
4152	sw_context->fp = vmw_fpriv(file_priv);
4153	INIT_LIST_HEAD(&sw_context->ctx_list);
 
 
 
4154	sw_context->cur_query_bo = dev_priv->pinned_bo;
4155	sw_context->last_query_ctx = NULL;
4156	sw_context->needs_post_query_barrier = false;
4157	sw_context->dx_ctx_node = NULL;
4158	sw_context->dx_query_mob = NULL;
4159	sw_context->dx_query_ctx = NULL;
4160	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
 
4161	INIT_LIST_HEAD(&sw_context->res_relocations);
4162	INIT_LIST_HEAD(&sw_context->bo_relocations);
4163
4164	if (sw_context->staged_bindings)
4165		vmw_binding_state_reset(sw_context->staged_bindings);
4166
4167	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4168	sw_context->ctx = &val_ctx;
4169	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4170	if (unlikely(ret != 0))
4171		goto out_err_nores;
4172
 
4173	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4174				command_size);
4175	if (unlikely(ret != 0))
4176		goto out_err_nores;
4177
4178	ret = vmw_resources_reserve(sw_context);
4179	if (unlikely(ret != 0))
4180		goto out_err_nores;
4181
4182	ret = vmw_validation_bo_reserve(&val_ctx, true);
4183	if (unlikely(ret != 0))
4184		goto out_err_nores;
4185
4186	ret = vmw_validation_bo_validate(&val_ctx, true);
4187	if (unlikely(ret != 0))
4188		goto out_err;
4189
4190	ret = vmw_validation_res_validate(&val_ctx, true);
4191	if (unlikely(ret != 0))
4192		goto out_err;
4193
4194	vmw_validation_drop_ht(&val_ctx);
 
 
 
 
 
 
4195
4196	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4197	if (unlikely(ret != 0)) {
4198		ret = -ERESTARTSYS;
4199		goto out_err;
4200	}
4201
4202	if (dev_priv->has_mob) {
4203		ret = vmw_rebind_contexts(sw_context);
4204		if (unlikely(ret != 0))
4205			goto out_unlock_binding;
4206	}
4207
4208	if (!header) {
4209		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4210					      command_size, sw_context);
4211	} else {
4212		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4213						sw_context);
4214		header = NULL;
4215	}
4216	mutex_unlock(&dev_priv->binding_mutex);
4217	if (ret)
4218		goto out_err;
 
 
 
 
 
4219
4220	vmw_query_bo_switch_commit(dev_priv, sw_context);
4221	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
 
4222					 (user_fence_rep) ? &handle : NULL);
4223	/*
4224	 * This error is harmless, because if fence submission fails,
4225	 * vmw_fifo_send_fence will sync. The error will be propagated to
4226	 * user-space in @fence_rep
4227	 */
 
4228	if (ret != 0)
4229		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4230
4231	vmw_execbuf_bindings_commit(sw_context, false);
4232	vmw_bind_dx_query_mob(sw_context);
4233	vmw_validation_res_unreserve(&val_ctx, false);
4234
4235	vmw_validation_bo_fence(sw_context->ctx, fence);
 
4236
4237	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
 
4238		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
4239
4240	/*
4241	 * If anything fails here, give up trying to export the fence and do a
4242	 * sync since the user mode will not be able to sync the fence itself.
4243	 * This ensures we are still functionally correct.
4244	 */
4245	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4246
4247		sync_file = sync_file_create(&fence->base);
4248		if (!sync_file) {
4249			VMW_DEBUG_USER("Sync file create failed for fence\n");
4250			put_unused_fd(out_fence_fd);
4251			out_fence_fd = -1;
4252
4253			(void) vmw_fence_obj_wait(fence, false, false,
4254						  VMW_FENCE_WAIT_TIMEOUT);
4255		}
4256	}
4257
4258	ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4259				    user_fence_rep, fence, handle, out_fence_fd);
4260
4261	if (sync_file) {
4262		if (ret) {
4263			/* usercopy of fence failed, put the file object */
4264			fput(sync_file->file);
4265			put_unused_fd(out_fence_fd);
4266		} else {
4267			/* Link the fence with the FD created earlier */
4268			fd_install(out_fence_fd, sync_file->file);
4269		}
4270	}
4271
4272	/* Don't unreference when handing fence out */
4273	if (unlikely(out_fence != NULL)) {
4274		*out_fence = fence;
4275		fence = NULL;
4276	} else if (likely(fence != NULL)) {
4277		vmw_fence_obj_unreference(&fence);
4278	}
4279
4280	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
 
 
4281	mutex_unlock(&dev_priv->cmdbuf_mutex);
4282
4283	/*
4284	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4285	 * in resource destruction paths.
4286	 */
4287	vmw_validation_unref_lists(&val_ctx);
4288
4289	return ret;
4290
4291out_unlock_binding:
4292	mutex_unlock(&dev_priv->binding_mutex);
4293out_err:
4294	vmw_validation_bo_backoff(&val_ctx);
4295out_err_nores:
4296	vmw_execbuf_bindings_commit(sw_context, true);
4297	vmw_validation_res_unreserve(&val_ctx, true);
4298	vmw_resource_relocations_free(&sw_context->res_relocations);
4299	vmw_free_relocations(sw_context);
4300	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
 
 
4301		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4302out_unlock:
4303	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4304	vmw_validation_drop_ht(&val_ctx);
4305	WARN_ON(!list_empty(&sw_context->ctx_list));
 
 
4306	mutex_unlock(&dev_priv->cmdbuf_mutex);
4307
4308	/*
4309	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4310	 * in resource destruction paths.
4311	 */
4312	vmw_validation_unref_lists(&val_ctx);
4313out_free_header:
4314	if (header)
4315		vmw_cmdbuf_header_free(header);
4316out_free_fence_fd:
4317	if (out_fence_fd >= 0)
4318		put_unused_fd(out_fence_fd);
4319
4320	return ret;
4321}
4322
4323/**
4324 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4325 *
4326 * @dev_priv: The device private structure.
4327 *
4328 * This function is called to idle the fifo and unpin the query buffer if the
4329 * normal way to do this hits an error, which should typically be extremely
4330 * rare.
4331 */
4332static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4333{
4334	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4335
4336	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4337	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4338	if (dev_priv->dummy_query_bo_pinned) {
4339		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4340		dev_priv->dummy_query_bo_pinned = false;
4341	}
4342}
4343
4344
4345/**
4346 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4347 * bo.
4348 *
4349 * @dev_priv: The device private structure.
4350 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4351 * query barrier that flushes all queries touching the current buffer pointed to
4352 * by @dev_priv->pinned_bo
4353 *
4354 * This function should be used to unpin the pinned query bo, or as a query
4355 * barrier when we need to make sure that all queries have finished before the
4356 * next fifo command. (For example on hardware context destructions where the
4357 * hardware may otherwise leak unfinished queries).
 
4358 *
4359 * This function does not return any failure codes, but make attempts to do safe
4360 * unpinning in case of errors.
4361 *
4362 * The function will synchronize on the previous query barrier, and will thus
4363 * not finish until that barrier has executed.
4364 *
4365 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4366 * calling this function.
4367 */
4368void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4369				     struct vmw_fence_obj *fence)
4370{
4371	int ret = 0;
 
 
4372	struct vmw_fence_obj *lfence = NULL;
4373	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4374
4375	if (dev_priv->pinned_bo == NULL)
4376		goto out_unlock;
4377
4378	vmw_bo_placement_set(dev_priv->pinned_bo,
4379			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4380			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4381	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4382	if (ret)
4383		goto out_no_reserve;
4384
4385	vmw_bo_placement_set(dev_priv->dummy_query_bo,
4386			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4387			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4388	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4389	if (ret)
4390		goto out_no_reserve;
 
 
 
4391
4392	ret = vmw_validation_bo_reserve(&val_ctx, false);
4393	if (ret)
4394		goto out_no_reserve;
 
4395
4396	if (dev_priv->query_cid_valid) {
4397		BUG_ON(fence != NULL);
4398		ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4399		if (ret)
 
4400			goto out_no_emit;
 
4401		dev_priv->query_cid_valid = false;
4402	}
4403
4404	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4405	if (dev_priv->dummy_query_bo_pinned) {
4406		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4407		dev_priv->dummy_query_bo_pinned = false;
4408	}
4409	if (fence == NULL) {
4410		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4411						  NULL);
4412		fence = lfence;
4413	}
4414	vmw_validation_bo_fence(&val_ctx, fence);
4415	if (lfence != NULL)
4416		vmw_fence_obj_unreference(&lfence);
4417
4418	vmw_validation_unref_lists(&val_ctx);
4419	vmw_bo_unreference(&dev_priv->pinned_bo);
 
4420
4421out_unlock:
4422	return;
 
4423out_no_emit:
4424	vmw_validation_bo_backoff(&val_ctx);
4425out_no_reserve:
4426	vmw_validation_unref_lists(&val_ctx);
4427	vmw_execbuf_unpin_panic(dev_priv);
4428	vmw_bo_unreference(&dev_priv->pinned_bo);
4429}
4430
4431/**
4432 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
 
4433 *
4434 * @dev_priv: The device private structure.
4435 *
4436 * This function should be used to unpin the pinned query bo, or as a query
4437 * barrier when we need to make sure that all queries have finished before the
4438 * next fifo command. (For example on hardware context destructions where the
4439 * hardware may otherwise leak unfinished queries).
 
4440 *
4441 * This function does not return any failure codes, but make attempts to do safe
4442 * unpinning in case of errors.
4443 *
4444 * The function will synchronize on the previous query barrier, and will thus
4445 * not finish until that barrier has executed.
4446 */
4447void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4448{
4449	mutex_lock(&dev_priv->cmdbuf_mutex);
4450	if (dev_priv->query_cid_valid)
4451		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4452	mutex_unlock(&dev_priv->cmdbuf_mutex);
4453}
4454
 
4455int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4456		      struct drm_file *file_priv)
4457{
4458	struct vmw_private *dev_priv = vmw_priv(dev);
4459	struct drm_vmw_execbuf_arg *arg = data;
4460	int ret;
4461	struct dma_fence *in_fence = NULL;
4462
4463	MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4464	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4465
4466	/*
4467	 * Extend the ioctl argument while maintaining backwards compatibility:
4468	 * We take different code paths depending on the value of arg->version.
4469	 *
4470	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4471	 */
4472	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4473		     arg->version == 0)) {
4474		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4475		ret = -EINVAL;
4476		goto mksstats_out;
4477	}
4478
4479	switch (arg->version) {
4480	case 1:
4481		/* For v1 core DRM have extended + zeropadded the data */
4482		arg->context_handle = (uint32_t) -1;
4483		break;
4484	case 2:
4485	default:
4486		/* For v2 and later core DRM would have correctly copied it */
4487		break;
4488	}
4489
4490	/* If imported a fence FD from elsewhere, then wait on it */
4491	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4492		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4493
4494		if (!in_fence) {
4495			VMW_DEBUG_USER("Cannot get imported fence\n");
4496			ret = -EINVAL;
4497			goto mksstats_out;
4498		}
4499
4500		ret = dma_fence_wait(in_fence, true);
4501		if (ret)
4502			goto out;
4503	}
4504
4505	ret = vmw_execbuf_process(file_priv, dev_priv,
4506				  (void __user *)(unsigned long)arg->commands,
4507				  NULL, arg->command_size, arg->throttle_us,
4508				  arg->context_handle,
4509				  (void __user *)(unsigned long)arg->fence_rep,
4510				  NULL, arg->flags);
4511
4512	if (unlikely(ret != 0))
4513		goto out;
4514
4515	vmw_kms_cursor_post_execbuf(dev_priv);
4516
4517out:
4518	if (in_fence)
4519		dma_fence_put(in_fence);
4520
4521mksstats_out:
4522	MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4523	return ret;
4524}
v3.15
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
 
 
 
 
 
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_reg.h"
  30#include <drm/ttm/ttm_bo_api.h>
  31#include <drm/ttm/ttm_placement.h>
  32
  33#define VMW_RES_HT_ORDER 12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  34
  35/**
  36 * struct vmw_resource_relocation - Relocation info for resources
  37 *
  38 * @head: List head for the software context's relocation list.
  39 * @res: Non-ref-counted pointer to the resource.
  40 * @offset: Offset of 4 byte entries into the command buffer where the
  41 * id that needs fixup is located.
 
  42 */
  43struct vmw_resource_relocation {
  44	struct list_head head;
  45	const struct vmw_resource *res;
  46	unsigned long offset;
 
  47};
  48
  49/**
  50 * struct vmw_resource_val_node - Validation info for resources
  51 *
  52 * @head: List head for the software context's resource list.
  53 * @hash: Hash entry for quick resouce to val_node lookup.
  54 * @res: Ref-counted pointer to the resource.
  55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  56 * @new_backup: Refcounted pointer to the new backup buffer.
  57 * @staged_bindings: If @res is a context, tracks bindings set up during
  58 * the command batch. Otherwise NULL.
  59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  60 * @first_usage: Set to true the first time the resource is referenced in
  61 * the command stream.
  62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
  63 * reservation. The command stream will provide one.
  64 */
  65struct vmw_resource_val_node {
  66	struct list_head head;
  67	struct drm_hash_item hash;
  68	struct vmw_resource *res;
  69	struct vmw_dma_buffer *new_backup;
  70	struct vmw_ctx_binding_state *staged_bindings;
  71	unsigned long new_backup_offset;
  72	bool first_usage;
  73	bool no_buffer_needed;
  74};
  75
  76/**
  77 * struct vmw_cmd_entry - Describe a command for the verifier
  78 *
 
  79 * @user_allow: Whether allowed from the execbuf ioctl.
  80 * @gb_disable: Whether disabled if guest-backed objects are available.
  81 * @gb_enable: Whether enabled iff guest-backed objects are available.
 
  82 */
  83struct vmw_cmd_entry {
  84	int (*func) (struct vmw_private *, struct vmw_sw_context *,
  85		     SVGA3dCmdHeader *);
  86	bool user_allow;
  87	bool gb_disable;
  88	bool gb_enable;
 
  89};
  90
  91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
  92	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
  93				       (_gb_disable), (_gb_enable)}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  94
  95/**
  96 * vmw_resource_unreserve - unreserve resources previously reserved for
  97 * command submission.
  98 *
  99 * @list_head: list of resources to unreserve.
 100 * @backoff: Whether command submission failed.
 
 101 */
 102static void vmw_resource_list_unreserve(struct list_head *list,
 103					bool backoff)
 104{
 105	struct vmw_resource_val_node *val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106
 107	list_for_each_entry(val, list, head) {
 108		struct vmw_resource *res = val->res;
 109		struct vmw_dma_buffer *new_backup =
 110			backoff ? NULL : val->new_backup;
 
 
 
 
 111
 112		/*
 113		 * Transfer staged context bindings to the
 114		 * persistent context binding tracker.
 115		 */
 116		if (unlikely(val->staged_bindings)) {
 117			if (!backoff) {
 118				vmw_context_binding_state_transfer
 119					(val->res, val->staged_bindings);
 120			}
 121			kfree(val->staged_bindings);
 122			val->staged_bindings = NULL;
 123		}
 124		vmw_resource_unreserve(res, new_backup,
 125			val->new_backup_offset);
 126		vmw_dmabuf_unreference(&val->new_backup);
 127	}
 
 
 
 
 
 
 
 
 
 128}
 129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130
 131/**
 132 * vmw_resource_val_add - Add a resource to the software context's
 133 * resource list if it's not already on it.
 134 *
 135 * @sw_context: Pointer to the software context.
 136 * @res: Pointer to the resource.
 137 * @p_node On successful return points to a valid pointer to a
 138 * struct vmw_resource_val_node, if non-NULL on entry.
 139 */
 140static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 141				struct vmw_resource *res,
 142				struct vmw_resource_val_node **p_node)
 143{
 144	struct vmw_resource_val_node *node;
 145	struct drm_hash_item *hash;
 146	int ret;
 147
 148	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
 149				    &hash) == 0)) {
 150		node = container_of(hash, struct vmw_resource_val_node, hash);
 151		node->first_usage = false;
 152		if (unlikely(p_node != NULL))
 153			*p_node = node;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154		return 0;
 155	}
 156
 157	node = kzalloc(sizeof(*node), GFP_KERNEL);
 158	if (unlikely(node == NULL)) {
 159		DRM_ERROR("Failed to allocate a resource validation "
 160			  "entry.\n");
 161		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162	}
 163
 164	node->hash.key = (unsigned long) res;
 165	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
 166	if (unlikely(ret != 0)) {
 167		DRM_ERROR("Failed to initialize a resource validation "
 168			  "entry.\n");
 169		kfree(node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170		return ret;
 171	}
 172	list_add_tail(&node->head, &sw_context->resource_list);
 173	node->res = vmw_resource_reference(res);
 174	node->first_usage = true;
 175
 176	if (unlikely(p_node != NULL))
 177		*p_node = node;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 178
 179	return 0;
 180}
 181
 182/**
 183 * vmw_resource_context_res_add - Put resources previously bound to a context on
 184 * the validation list
 185 *
 186 * @dev_priv: Pointer to a device private structure
 187 * @sw_context: Pointer to a software context used for this command submission
 188 * @ctx: Pointer to the context resource
 189 *
 190 * This function puts all resources that were previously bound to @ctx on
 191 * the resource validation list. This is part of the context state reemission
 192 */
 193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 194					struct vmw_sw_context *sw_context,
 195					struct vmw_resource *ctx)
 196{
 197	struct list_head *binding_list;
 198	struct vmw_ctx_binding *entry;
 199	int ret = 0;
 200	struct vmw_resource *res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 201
 
 202	mutex_lock(&dev_priv->binding_mutex);
 203	binding_list = vmw_context_binding_list(ctx);
 204
 205	list_for_each_entry(entry, binding_list, ctx_list) {
 206		res = vmw_resource_reference_unless_doomed(entry->bi.res);
 207		if (unlikely(res == NULL))
 208			continue;
 209
 210		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
 211		vmw_resource_unreference(&res);
 212		if (unlikely(ret != 0))
 213			break;
 214	}
 215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 216	mutex_unlock(&dev_priv->binding_mutex);
 217	return ret;
 218}
 219
 220/**
 221 * vmw_resource_relocation_add - Add a relocation to the relocation list
 222 *
 223 * @list: Pointer to head of relocation list.
 224 * @res: The resource.
 225 * @offset: Offset into the command buffer currently being parsed where the
 226 * id that needs fixup is located. Granularity is 4 bytes.
 
 227 */
 228static int vmw_resource_relocation_add(struct list_head *list,
 229				       const struct vmw_resource *res,
 230				       unsigned long offset)
 
 
 231{
 232	struct vmw_resource_relocation *rel;
 233
 234	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
 235	if (unlikely(rel == NULL)) {
 236		DRM_ERROR("Failed to allocate a resource relocation.\n");
 237		return -ENOMEM;
 238	}
 239
 240	rel->res = res;
 241	rel->offset = offset;
 242	list_add_tail(&rel->head, list);
 
 243
 244	return 0;
 245}
 246
 247/**
 248 * vmw_resource_relocations_free - Free all relocations on a list
 249 *
 250 * @list: Pointer to the head of the relocation list.
 251 */
 252static void vmw_resource_relocations_free(struct list_head *list)
 253{
 254	struct vmw_resource_relocation *rel, *n;
 255
 256	list_for_each_entry_safe(rel, n, list, head) {
 257		list_del(&rel->head);
 258		kfree(rel);
 259	}
 260}
 261
 262/**
 263 * vmw_resource_relocations_apply - Apply all relocations on a list
 264 *
 265 * @cb: Pointer to the start of the command buffer bein patch. This need
 266 * not be the same buffer as the one being parsed when the relocation
 267 * list was built, but the contents must be the same modulo the
 268 * resource ids.
 269 * @list: Pointer to the head of the relocation list.
 270 */
 271static void vmw_resource_relocations_apply(uint32_t *cb,
 272					   struct list_head *list)
 273{
 274	struct vmw_resource_relocation *rel;
 275
 
 
 
 
 276	list_for_each_entry(rel, list, head) {
 277		if (likely(rel->res != NULL))
 278			cb[rel->offset] = rel->res->id;
 279		else
 280			cb[rel->offset] = SVGA_3D_CMD_NOP;
 
 
 
 
 
 
 
 
 
 281	}
 282}
 283
 284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 285			   struct vmw_sw_context *sw_context,
 286			   SVGA3dCmdHeader *header)
 287{
 288	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
 289}
 290
 291static int vmw_cmd_ok(struct vmw_private *dev_priv,
 292		      struct vmw_sw_context *sw_context,
 293		      SVGA3dCmdHeader *header)
 294{
 295	return 0;
 296}
 297
 298/**
 299 * vmw_bo_to_validate_list - add a bo to a validate list
 300 *
 301 * @sw_context: The software context used for this command submission batch.
 302 * @bo: The buffer object to add.
 303 * @validate_as_mob: Validate this buffer as a MOB.
 304 * @p_val_node: If non-NULL Will be updated with the validate node number
 305 * on return.
 306 *
 307 * Returns -EINVAL if the limit of number of buffer objects per command
 308 * submission is reached.
 309 */
 310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 311				   struct ttm_buffer_object *bo,
 312				   bool validate_as_mob,
 313				   uint32_t *p_val_node)
 314{
 315	uint32_t val_node;
 316	struct vmw_validate_buffer *vval_buf;
 317	struct ttm_validate_buffer *val_buf;
 318	struct drm_hash_item *hash;
 319	int ret;
 320
 321	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
 322				    &hash) == 0)) {
 323		vval_buf = container_of(hash, struct vmw_validate_buffer,
 324					hash);
 325		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
 326			DRM_ERROR("Inconsistent buffer usage.\n");
 327			return -EINVAL;
 328		}
 329		val_buf = &vval_buf->base;
 330		val_node = vval_buf - sw_context->val_bufs;
 331	} else {
 332		val_node = sw_context->cur_val_buf;
 333		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
 334			DRM_ERROR("Max number of DMA buffers per submission "
 335				  "exceeded.\n");
 336			return -EINVAL;
 337		}
 338		vval_buf = &sw_context->val_bufs[val_node];
 339		vval_buf->hash.key = (unsigned long) bo;
 340		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 341		if (unlikely(ret != 0)) {
 342			DRM_ERROR("Failed to initialize a buffer validation "
 343				  "entry.\n");
 344			return ret;
 345		}
 346		++sw_context->cur_val_buf;
 347		val_buf = &vval_buf->base;
 348		val_buf->bo = ttm_bo_reference(bo);
 349		val_buf->reserved = false;
 350		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 351		vval_buf->validate_as_mob = validate_as_mob;
 352	}
 353
 354	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
 355
 356	if (p_val_node)
 357		*p_val_node = val_node;
 358
 359	return 0;
 360}
 361
 362/**
 363 * vmw_resources_reserve - Reserve all resources on the sw_context's
 364 * resource list.
 365 *
 366 * @sw_context: Pointer to the software context.
 367 *
 368 * Note that since vmware's command submission currently is protected by
 369 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
 370 * since only a single thread at once will attempt this.
 371 */
 372static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 373{
 374	struct vmw_resource_val_node *val;
 375	int ret;
 376
 377	list_for_each_entry(val, &sw_context->resource_list, head) {
 378		struct vmw_resource *res = val->res;
 
 379
 380		ret = vmw_resource_reserve(res, val->no_buffer_needed);
 381		if (unlikely(ret != 0))
 382			return ret;
 383
 384		if (res->backup) {
 385			struct ttm_buffer_object *bo = &res->backup->base;
 386
 387			ret = vmw_bo_to_validate_list
 388				(sw_context, bo,
 389				 vmw_resource_needs_backup(res), NULL);
 390
 391			if (unlikely(ret != 0))
 392				return ret;
 393		}
 394	}
 395	return 0;
 396}
 397
 398/**
 399 * vmw_resources_validate - Validate all resources on the sw_context's
 400 * resource list.
 401 *
 402 * @sw_context: Pointer to the software context.
 403 *
 404 * Before this function is called, all resource backup buffers must have
 405 * been validated.
 406 */
 407static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 408{
 409	struct vmw_resource_val_node *val;
 410	int ret;
 411
 412	list_for_each_entry(val, &sw_context->resource_list, head) {
 413		struct vmw_resource *res = val->res;
 414
 415		ret = vmw_resource_validate(res);
 416		if (unlikely(ret != 0)) {
 417			if (ret != -ERESTARTSYS)
 418				DRM_ERROR("Failed to validate resource.\n");
 419			return ret;
 420		}
 421	}
 422	return 0;
 423}
 424
 425/**
 426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
 427 * on the resource validate list unless it's already there.
 428 *
 429 * @dev_priv: Pointer to a device private structure.
 430 * @sw_context: Pointer to the software context.
 431 * @res_type: Resource type.
 432 * @converter: User-space visisble type specific information.
 433 * @id: user-space resource id handle.
 434 * @id_loc: Pointer to the location in the command buffer currently being
 435 * parsed from where the user-space resource id handle is located.
 436 * @p_val: Pointer to pointer to resource validalidation node. Populated
 437 * on exit.
 438 */
 439static int
 440vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
 441			 struct vmw_sw_context *sw_context,
 442			 enum vmw_res_type res_type,
 443			 const struct vmw_user_resource_conv *converter,
 444			 uint32_t id,
 445			 uint32_t *id_loc,
 446			 struct vmw_resource_val_node **p_val)
 447{
 448	struct vmw_res_cache_entry *rcache =
 449		&sw_context->res_cache[res_type];
 450	struct vmw_resource *res;
 451	struct vmw_resource_val_node *node;
 452	int ret;
 
 
 
 453
 454	if (id == SVGA3D_INVALID_ID) {
 455		if (p_val)
 456			*p_val = NULL;
 457		if (res_type == vmw_res_context) {
 458			DRM_ERROR("Illegal context invalid id.\n");
 459			return -EINVAL;
 460		}
 461		return 0;
 462	}
 463
 464	/*
 465	 * Fastpath in case of repeated commands referencing the same
 466	 * resource
 467	 */
 
 
 
 468
 469	if (likely(rcache->valid && id == rcache->handle)) {
 470		const struct vmw_resource *res = rcache->res;
 
 471
 472		rcache->node->first_usage = false;
 473		if (p_val)
 474			*p_val = rcache->node;
 475
 476		return vmw_resource_relocation_add
 477			(&sw_context->res_relocations, res,
 478			 id_loc - sw_context->buf_start);
 479	}
 480
 481	ret = vmw_user_resource_lookup_handle(dev_priv,
 482					      sw_context->fp->tfile,
 483					      id,
 484					      converter,
 485					      &res);
 486	if (unlikely(ret != 0)) {
 487		DRM_ERROR("Could not find or use resource 0x%08x.\n",
 488			  (unsigned) id);
 489		dump_stack();
 490		return ret;
 491	}
 492
 493	rcache->valid = true;
 494	rcache->res = res;
 495	rcache->handle = id;
 496
 497	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 498					  res,
 499					  id_loc - sw_context->buf_start);
 500	if (unlikely(ret != 0))
 501		goto out_no_reloc;
 502
 503	ret = vmw_resource_val_add(sw_context, res, &node);
 504	if (unlikely(ret != 0))
 505		goto out_no_reloc;
 506
 507	rcache->node = node;
 508	if (p_val)
 509		*p_val = node;
 510
 511	if (dev_priv->has_mob && node->first_usage &&
 512	    res_type == vmw_res_context) {
 513		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 514		if (unlikely(ret != 0))
 515			goto out_no_reloc;
 516		node->staged_bindings =
 517			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
 518		if (node->staged_bindings == NULL) {
 519			DRM_ERROR("Failed to allocate context binding "
 520				  "information.\n");
 521			goto out_no_reloc;
 522		}
 523		INIT_LIST_HEAD(&node->staged_bindings->list);
 524	}
 525
 526	vmw_resource_unreference(&res);
 527	return 0;
 
 
 
 
 528
 529out_no_reloc:
 530	BUG_ON(sw_context->error_resource != NULL);
 531	sw_context->error_resource = res;
 532
 533	return ret;
 534}
 535
 536/**
 537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
 538 * on the resource validate list unless it's already there.
 539 *
 540 * @dev_priv: Pointer to a device private structure.
 541 * @sw_context: Pointer to the software context.
 542 * @res_type: Resource type.
 543 * @converter: User-space visisble type specific information.
 544 * @id_loc: Pointer to the location in the command buffer currently being
 545 * parsed from where the user-space resource id handle is located.
 546 * @p_val: Pointer to pointer to resource validalidation node. Populated
 547 * on exit.
 548 */
 549static int
 550vmw_cmd_res_check(struct vmw_private *dev_priv,
 551		  struct vmw_sw_context *sw_context,
 552		  enum vmw_res_type res_type,
 553		  const struct vmw_user_resource_conv *converter,
 554		  uint32_t *id_loc,
 555		  struct vmw_resource_val_node **p_val)
 556{
 557	return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
 558					converter, *id_loc, id_loc, p_val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559}
 560
 561/**
 562 * vmw_rebind_contexts - Rebind all resources previously bound to
 563 * referenced contexts.
 564 *
 565 * @sw_context: Pointer to the software context.
 566 *
 567 * Rebind context binding points that have been scrubbed because of eviction.
 568 */
 569static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 570{
 571	struct vmw_resource_val_node *val;
 572	int ret;
 573
 574	list_for_each_entry(val, &sw_context->resource_list, head) {
 575		if (likely(!val->staged_bindings))
 576			continue;
 577
 578		ret = vmw_context_rebind_all(val->res);
 579		if (unlikely(ret != 0)) {
 580			if (ret != -ERESTARTSYS)
 581				DRM_ERROR("Failed to rebind context.\n");
 
 
 
 
 
 
 582			return ret;
 583		}
 584	}
 585
 586	return 0;
 587}
 588
 589/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590 * vmw_cmd_cid_check - Check a command header for valid context information.
 591 *
 592 * @dev_priv: Pointer to a device private structure.
 593 * @sw_context: Pointer to the software context.
 594 * @header: A command header with an embedded user-space context handle.
 595 *
 596 * Convenience function: Call vmw_cmd_res_check with the user-space context
 597 * handle embedded in @header.
 598 */
 599static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 600			     struct vmw_sw_context *sw_context,
 601			     SVGA3dCmdHeader *header)
 602{
 603	struct vmw_cid_cmd {
 604		SVGA3dCmdHeader header;
 605		uint32_t cid;
 606	} *cmd;
 607
 608	cmd = container_of(header, struct vmw_cid_cmd, header);
 609	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 610				 user_context_converter, &cmd->cid, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611}
 612
 613static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 614					   struct vmw_sw_context *sw_context,
 615					   SVGA3dCmdHeader *header)
 616{
 617	struct vmw_sid_cmd {
 618		SVGA3dCmdHeader header;
 619		SVGA3dCmdSetRenderTarget body;
 620	} *cmd;
 621	struct vmw_resource_val_node *ctx_node;
 622	struct vmw_resource_val_node *res_node;
 623	int ret;
 624
 625	cmd = container_of(header, struct vmw_sid_cmd, header);
 
 
 
 
 
 
 626
 627	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 628				user_context_converter, &cmd->body.cid,
 629				&ctx_node);
 630	if (unlikely(ret != 0))
 631		return ret;
 632
 633	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 634				user_surface_converter,
 635				&cmd->body.target.sid, &res_node);
 636	if (unlikely(ret != 0))
 637		return ret;
 638
 639	if (dev_priv->has_mob) {
 640		struct vmw_ctx_bindinfo bi;
 
 641
 642		bi.ctx = ctx_node->res;
 643		bi.res = res_node ? res_node->res : NULL;
 644		bi.bt = vmw_ctx_binding_rt;
 645		bi.i1.rt_type = cmd->body.type;
 646		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
 
 
 
 
 647	}
 648
 649	return 0;
 650}
 651
 652static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 653				      struct vmw_sw_context *sw_context,
 654				      SVGA3dCmdHeader *header)
 655{
 656	struct vmw_sid_cmd {
 657		SVGA3dCmdHeader header;
 658		SVGA3dCmdSurfaceCopy body;
 659	} *cmd;
 660	int ret;
 661
 662	cmd = container_of(header, struct vmw_sid_cmd, header);
 
 663	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 664				user_surface_converter,
 665				&cmd->body.src.sid, NULL);
 666	if (unlikely(ret != 0))
 667		return ret;
 
 668	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 669				 user_surface_converter,
 670				 &cmd->body.dest.sid, NULL);
 671}
 672
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 674				     struct vmw_sw_context *sw_context,
 675				     SVGA3dCmdHeader *header)
 676{
 677	struct vmw_sid_cmd {
 678		SVGA3dCmdHeader header;
 679		SVGA3dCmdSurfaceStretchBlt body;
 680	} *cmd;
 681	int ret;
 682
 683	cmd = container_of(header, struct vmw_sid_cmd, header);
 684	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 685				user_surface_converter,
 686				&cmd->body.src.sid, NULL);
 687	if (unlikely(ret != 0))
 688		return ret;
 
 689	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 690				 user_surface_converter,
 691				 &cmd->body.dest.sid, NULL);
 692}
 693
 694static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 695					 struct vmw_sw_context *sw_context,
 696					 SVGA3dCmdHeader *header)
 697{
 698	struct vmw_sid_cmd {
 699		SVGA3dCmdHeader header;
 700		SVGA3dCmdBlitSurfaceToScreen body;
 701	} *cmd;
 702
 703	cmd = container_of(header, struct vmw_sid_cmd, header);
 704
 705	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 706				 user_surface_converter,
 707				 &cmd->body.srcImage.sid, NULL);
 708}
 709
 710static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 711				 struct vmw_sw_context *sw_context,
 712				 SVGA3dCmdHeader *header)
 713{
 714	struct vmw_sid_cmd {
 715		SVGA3dCmdHeader header;
 716		SVGA3dCmdPresent body;
 717	} *cmd;
 718
 719
 720	cmd = container_of(header, struct vmw_sid_cmd, header);
 721
 722	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 723				 user_surface_converter, &cmd->body.sid,
 724				 NULL);
 725}
 726
 727/**
 728 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
 729 *
 730 * @dev_priv: The device private structure.
 731 * @new_query_bo: The new buffer holding query results.
 732 * @sw_context: The software context used for this command submission.
 733 *
 734 * This function checks whether @new_query_bo is suitable for holding
 735 * query results, and if another buffer currently is pinned for query
 736 * results. If so, the function prepares the state of @sw_context for
 737 * switching pinned buffers after successful submission of the current
 738 * command batch.
 739 */
 740static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 741				       struct ttm_buffer_object *new_query_bo,
 742				       struct vmw_sw_context *sw_context)
 743{
 744	struct vmw_res_cache_entry *ctx_entry =
 745		&sw_context->res_cache[vmw_res_context];
 746	int ret;
 747
 748	BUG_ON(!ctx_entry->valid);
 749	sw_context->last_query_ctx = ctx_entry->res;
 750
 751	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 752
 753		if (unlikely(new_query_bo->num_pages > 4)) {
 754			DRM_ERROR("Query buffer too large.\n");
 755			return -EINVAL;
 756		}
 757
 758		if (unlikely(sw_context->cur_query_bo != NULL)) {
 759			sw_context->needs_post_query_barrier = true;
 760			ret = vmw_bo_to_validate_list(sw_context,
 761						      sw_context->cur_query_bo,
 762						      dev_priv->has_mob, NULL);
 763			if (unlikely(ret != 0))
 764				return ret;
 765		}
 766		sw_context->cur_query_bo = new_query_bo;
 767
 768		ret = vmw_bo_to_validate_list(sw_context,
 769					      dev_priv->dummy_query_bo,
 770					      dev_priv->has_mob, NULL);
 771		if (unlikely(ret != 0))
 772			return ret;
 773
 774	}
 775
 776	return 0;
 777}
 778
 779
 780/**
 781 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
 782 *
 783 * @dev_priv: The device private structure.
 784 * @sw_context: The software context used for this command submission batch.
 785 *
 786 * This function will check if we're switching query buffers, and will then,
 787 * issue a dummy occlusion query wait used as a query barrier. When the fence
 788 * object following that query wait has signaled, we are sure that all
 789 * preceding queries have finished, and the old query buffer can be unpinned.
 790 * However, since both the new query buffer and the old one are fenced with
 791 * that fence, we can do an asynchronus unpin now, and be sure that the
 792 * old query buffer won't be moved until the fence has signaled.
 793 *
 794 * As mentioned above, both the new - and old query buffers need to be fenced
 795 * using a sequence emitted *after* calling this function.
 796 */
 797static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 798				     struct vmw_sw_context *sw_context)
 799{
 800	/*
 801	 * The validate list should still hold references to all
 802	 * contexts here.
 803	 */
 804
 805	if (sw_context->needs_post_query_barrier) {
 806		struct vmw_res_cache_entry *ctx_entry =
 807			&sw_context->res_cache[vmw_res_context];
 808		struct vmw_resource *ctx;
 809		int ret;
 810
 811		BUG_ON(!ctx_entry->valid);
 812		ctx = ctx_entry->res;
 813
 814		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 815
 816		if (unlikely(ret != 0))
 817			DRM_ERROR("Out of fifo space for dummy query.\n");
 818	}
 819
 820	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 821		if (dev_priv->pinned_bo) {
 822			vmw_bo_pin(dev_priv->pinned_bo, false);
 823			ttm_bo_unref(&dev_priv->pinned_bo);
 824		}
 825
 826		if (!sw_context->needs_post_query_barrier) {
 827			vmw_bo_pin(sw_context->cur_query_bo, true);
 828
 829			/*
 830			 * We pin also the dummy_query_bo buffer so that we
 831			 * don't need to validate it when emitting
 832			 * dummy queries in context destroy paths.
 833			 */
 834
 835			vmw_bo_pin(dev_priv->dummy_query_bo, true);
 836			dev_priv->dummy_query_bo_pinned = true;
 
 
 837
 838			BUG_ON(sw_context->last_query_ctx == NULL);
 839			dev_priv->query_cid = sw_context->last_query_ctx->id;
 840			dev_priv->query_cid_valid = true;
 841			dev_priv->pinned_bo =
 842				ttm_bo_reference(sw_context->cur_query_bo);
 843		}
 844	}
 845}
 846
 847/**
 848 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
 849 * handle to a MOB id.
 850 *
 851 * @dev_priv: Pointer to a device private structure.
 852 * @sw_context: The software context used for this command batch validation.
 853 * @id: Pointer to the user-space handle to be translated.
 854 * @vmw_bo_p: Points to a location that, on successful return will carry
 855 * a reference-counted pointer to the DMA buffer identified by the
 856 * user-space handle in @id.
 857 *
 858 * This function saves information needed to translate a user-space buffer
 859 * handle to a MOB id. The translation does not take place immediately, but
 860 * during a call to vmw_apply_relocations(). This function builds a relocation
 861 * list and a list of buffers to validate. The former needs to be freed using
 862 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
 863 * needs to be freed using vmw_clear_validations.
 
 
 864 */
 865static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 866				 struct vmw_sw_context *sw_context,
 867				 SVGAMobId *id,
 868				 struct vmw_dma_buffer **vmw_bo_p)
 869{
 870	struct vmw_dma_buffer *vmw_bo = NULL;
 871	struct ttm_buffer_object *bo;
 872	uint32_t handle = *id;
 873	struct vmw_relocation *reloc;
 874	int ret;
 875
 876	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 877	if (unlikely(ret != 0)) {
 878		DRM_ERROR("Could not find or use MOB buffer.\n");
 879		return -EINVAL;
 880	}
 881	bo = &vmw_bo->base;
 
 
 
 
 
 
 882
 883	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 884		DRM_ERROR("Max number relocations per submission"
 885			  " exceeded\n");
 886		ret = -EINVAL;
 887		goto out_no_reloc;
 888	}
 889
 890	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 891	reloc->mob_loc = id;
 892	reloc->location = NULL;
 893
 894	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
 895	if (unlikely(ret != 0))
 896		goto out_no_reloc;
 897
 898	*vmw_bo_p = vmw_bo;
 899	return 0;
 900
 901out_no_reloc:
 902	vmw_dmabuf_unreference(&vmw_bo);
 903	vmw_bo_p = NULL;
 904	return ret;
 905}
 906
 907/**
 908 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
 909 * handle to a valid SVGAGuestPtr
 910 *
 911 * @dev_priv: Pointer to a device private structure.
 912 * @sw_context: The software context used for this command batch validation.
 913 * @ptr: Pointer to the user-space handle to be translated.
 914 * @vmw_bo_p: Points to a location that, on successful return will carry
 915 * a reference-counted pointer to the DMA buffer identified by the
 916 * user-space handle in @id.
 917 *
 918 * This function saves information needed to translate a user-space buffer
 919 * handle to a valid SVGAGuestPtr. The translation does not take place
 920 * immediately, but during a call to vmw_apply_relocations().
 
 921 * This function builds a relocation list and a list of buffers to validate.
 922 * The former needs to be freed using either vmw_apply_relocations() or
 923 * vmw_free_relocations(). The latter needs to be freed using
 924 * vmw_clear_validations.
 925 */
 926static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 927				   struct vmw_sw_context *sw_context,
 928				   SVGAGuestPtr *ptr,
 929				   struct vmw_dma_buffer **vmw_bo_p)
 930{
 931	struct vmw_dma_buffer *vmw_bo = NULL;
 932	struct ttm_buffer_object *bo;
 933	uint32_t handle = ptr->gmrId;
 934	struct vmw_relocation *reloc;
 935	int ret;
 936
 937	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 938	if (unlikely(ret != 0)) {
 939		DRM_ERROR("Could not find or use GMR region.\n");
 940		return -EINVAL;
 941	}
 942	bo = &vmw_bo->base;
 
 
 
 
 
 
 
 943
 944	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 945		DRM_ERROR("Max number relocations per submission"
 946			  " exceeded\n");
 947		ret = -EINVAL;
 948		goto out_no_reloc;
 949	}
 950
 951	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 952	reloc->location = ptr;
 
 
 
 
 
 
 953
 954	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
 955	if (unlikely(ret != 0))
 956		goto out_no_reloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 957
 958	*vmw_bo_p = vmw_bo;
 959	return 0;
 
 
 960
 961out_no_reloc:
 962	vmw_dmabuf_unreference(&vmw_bo);
 963	vmw_bo_p = NULL;
 964	return ret;
 965}
 966
 967/**
 968 * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969 *
 970 * @dev_priv: Pointer to a device private struct.
 971 * @sw_context: The software context used for this command submission.
 972 * @header: Pointer to the command header in the command stream.
 973 */
 974static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
 975				  struct vmw_sw_context *sw_context,
 976				  SVGA3dCmdHeader *header)
 977{
 978	struct vmw_begin_gb_query_cmd {
 979		SVGA3dCmdHeader header;
 980		SVGA3dCmdBeginGBQuery q;
 981	} *cmd;
 982
 983	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
 984			   header);
 985
 986	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 987				 user_context_converter, &cmd->q.cid,
 988				 NULL);
 989}
 990
 991/**
 992 * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
 993 *
 994 * @dev_priv: Pointer to a device private struct.
 995 * @sw_context: The software context used for this command submission.
 996 * @header: Pointer to the command header in the command stream.
 997 */
 998static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
 999			       struct vmw_sw_context *sw_context,
1000			       SVGA3dCmdHeader *header)
1001{
1002	struct vmw_begin_query_cmd {
1003		SVGA3dCmdHeader header;
1004		SVGA3dCmdBeginQuery q;
1005	} *cmd;
1006
1007	cmd = container_of(header, struct vmw_begin_query_cmd,
1008			   header);
1009
1010	if (unlikely(dev_priv->has_mob)) {
1011		struct {
1012			SVGA3dCmdHeader header;
1013			SVGA3dCmdBeginGBQuery q;
1014		} gb_cmd;
1015
1016		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1017
1018		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1019		gb_cmd.header.size = cmd->header.size;
1020		gb_cmd.q.cid = cmd->q.cid;
1021		gb_cmd.q.type = cmd->q.type;
1022
1023		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1024		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1025	}
1026
1027	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1028				 user_context_converter, &cmd->q.cid,
1029				 NULL);
1030}
1031
1032/**
1033 * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1034 *
1035 * @dev_priv: Pointer to a device private struct.
1036 * @sw_context: The software context used for this command submission.
1037 * @header: Pointer to the command header in the command stream.
1038 */
1039static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1040				struct vmw_sw_context *sw_context,
1041				SVGA3dCmdHeader *header)
1042{
1043	struct vmw_dma_buffer *vmw_bo;
1044	struct vmw_query_cmd {
1045		SVGA3dCmdHeader header;
1046		SVGA3dCmdEndGBQuery q;
1047	} *cmd;
1048	int ret;
1049
1050	cmd = container_of(header, struct vmw_query_cmd, header);
1051	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1052	if (unlikely(ret != 0))
1053		return ret;
1054
1055	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1056				    &cmd->q.mobid,
1057				    &vmw_bo);
1058	if (unlikely(ret != 0))
1059		return ret;
1060
1061	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1062
1063	vmw_dmabuf_unreference(&vmw_bo);
1064	return ret;
1065}
1066
1067/**
1068 * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1069 *
1070 * @dev_priv: Pointer to a device private struct.
1071 * @sw_context: The software context used for this command submission.
1072 * @header: Pointer to the command header in the command stream.
1073 */
1074static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1075			     struct vmw_sw_context *sw_context,
1076			     SVGA3dCmdHeader *header)
1077{
1078	struct vmw_dma_buffer *vmw_bo;
1079	struct vmw_query_cmd {
1080		SVGA3dCmdHeader header;
1081		SVGA3dCmdEndQuery q;
1082	} *cmd;
1083	int ret;
1084
1085	cmd = container_of(header, struct vmw_query_cmd, header);
1086	if (dev_priv->has_mob) {
1087		struct {
1088			SVGA3dCmdHeader header;
1089			SVGA3dCmdEndGBQuery q;
1090		} gb_cmd;
1091
1092		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1093
1094		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1095		gb_cmd.header.size = cmd->header.size;
1096		gb_cmd.q.cid = cmd->q.cid;
1097		gb_cmd.q.type = cmd->q.type;
1098		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1099		gb_cmd.q.offset = cmd->q.guestResult.offset;
1100
1101		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1102		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1103	}
1104
1105	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1106	if (unlikely(ret != 0))
1107		return ret;
1108
1109	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1110				      &cmd->q.guestResult,
1111				      &vmw_bo);
1112	if (unlikely(ret != 0))
1113		return ret;
1114
1115	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1116
1117	vmw_dmabuf_unreference(&vmw_bo);
1118	return ret;
1119}
1120
1121/**
1122 * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1123 *
1124 * @dev_priv: Pointer to a device private struct.
1125 * @sw_context: The software context used for this command submission.
1126 * @header: Pointer to the command header in the command stream.
1127 */
1128static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1129				 struct vmw_sw_context *sw_context,
1130				 SVGA3dCmdHeader *header)
1131{
1132	struct vmw_dma_buffer *vmw_bo;
1133	struct vmw_query_cmd {
1134		SVGA3dCmdHeader header;
1135		SVGA3dCmdWaitForGBQuery q;
1136	} *cmd;
1137	int ret;
1138
1139	cmd = container_of(header, struct vmw_query_cmd, header);
1140	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1141	if (unlikely(ret != 0))
1142		return ret;
1143
1144	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1145				    &cmd->q.mobid,
1146				    &vmw_bo);
1147	if (unlikely(ret != 0))
1148		return ret;
1149
1150	vmw_dmabuf_unreference(&vmw_bo);
1151	return 0;
1152}
1153
1154/**
1155 * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1156 *
1157 * @dev_priv: Pointer to a device private struct.
1158 * @sw_context: The software context used for this command submission.
1159 * @header: Pointer to the command header in the command stream.
1160 */
1161static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1162			      struct vmw_sw_context *sw_context,
1163			      SVGA3dCmdHeader *header)
1164{
1165	struct vmw_dma_buffer *vmw_bo;
1166	struct vmw_query_cmd {
1167		SVGA3dCmdHeader header;
1168		SVGA3dCmdWaitForQuery q;
1169	} *cmd;
1170	int ret;
1171
1172	cmd = container_of(header, struct vmw_query_cmd, header);
1173	if (dev_priv->has_mob) {
1174		struct {
1175			SVGA3dCmdHeader header;
1176			SVGA3dCmdWaitForGBQuery q;
1177		} gb_cmd;
1178
1179		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1180
1181		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1182		gb_cmd.header.size = cmd->header.size;
1183		gb_cmd.q.cid = cmd->q.cid;
1184		gb_cmd.q.type = cmd->q.type;
1185		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1186		gb_cmd.q.offset = cmd->q.guestResult.offset;
1187
1188		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1189		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1190	}
1191
1192	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1193	if (unlikely(ret != 0))
1194		return ret;
1195
1196	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1197				      &cmd->q.guestResult,
1198				      &vmw_bo);
1199	if (unlikely(ret != 0))
1200		return ret;
1201
1202	vmw_dmabuf_unreference(&vmw_bo);
1203	return 0;
1204}
1205
1206static int vmw_cmd_dma(struct vmw_private *dev_priv,
1207		       struct vmw_sw_context *sw_context,
1208		       SVGA3dCmdHeader *header)
1209{
1210	struct vmw_dma_buffer *vmw_bo = NULL;
1211	struct vmw_surface *srf = NULL;
1212	struct vmw_dma_cmd {
1213		SVGA3dCmdHeader header;
1214		SVGA3dCmdSurfaceDMA dma;
1215	} *cmd;
1216	int ret;
1217	SVGA3dCmdSurfaceDMASuffix *suffix;
1218	uint32_t bo_size;
 
1219
1220	cmd = container_of(header, struct vmw_dma_cmd, header);
1221	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1222					       header->size - sizeof(*suffix));
1223
1224	/* Make sure device and verifier stays in sync. */
1225	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1226		DRM_ERROR("Invalid DMA suffix size.\n");
1227		return -EINVAL;
1228	}
1229
1230	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1231				      &cmd->dma.guest.ptr,
1232				      &vmw_bo);
1233	if (unlikely(ret != 0))
1234		return ret;
1235
1236	/* Make sure DMA doesn't cross BO boundaries. */
1237	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1238	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1239		DRM_ERROR("Invalid DMA offset.\n");
1240		return -EINVAL;
1241	}
1242
1243	bo_size -= cmd->dma.guest.ptr.offset;
1244	if (unlikely(suffix->maximumOffset > bo_size))
1245		suffix->maximumOffset = bo_size;
1246
 
 
1247	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1248				user_surface_converter, &cmd->dma.host.sid,
1249				NULL);
1250	if (unlikely(ret != 0)) {
1251		if (unlikely(ret != -ERESTARTSYS))
1252			DRM_ERROR("could not find surface for DMA.\n");
1253		goto out_no_surface;
1254	}
1255
1256	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1257
1258	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1259			     header);
1260
1261out_no_surface:
1262	vmw_dmabuf_unreference(&vmw_bo);
1263	return ret;
1264}
1265
1266static int vmw_cmd_draw(struct vmw_private *dev_priv,
1267			struct vmw_sw_context *sw_context,
1268			SVGA3dCmdHeader *header)
1269{
1270	struct vmw_draw_cmd {
1271		SVGA3dCmdHeader header;
1272		SVGA3dCmdDrawPrimitives body;
1273	} *cmd;
1274	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1275		(unsigned long)header + sizeof(*cmd));
1276	SVGA3dPrimitiveRange *range;
1277	uint32_t i;
1278	uint32_t maxnum;
1279	int ret;
1280
1281	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1282	if (unlikely(ret != 0))
1283		return ret;
1284
1285	cmd = container_of(header, struct vmw_draw_cmd, header);
1286	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1287
1288	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1289		DRM_ERROR("Illegal number of vertex declarations.\n");
1290		return -EINVAL;
1291	}
1292
1293	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1294		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 
1295					user_surface_converter,
1296					&decl->array.surfaceId, NULL);
1297		if (unlikely(ret != 0))
1298			return ret;
1299	}
1300
1301	maxnum = (header->size - sizeof(cmd->body) -
1302		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1303	if (unlikely(cmd->body.numRanges > maxnum)) {
1304		DRM_ERROR("Illegal number of index ranges.\n");
1305		return -EINVAL;
1306	}
1307
1308	range = (SVGA3dPrimitiveRange *) decl;
1309	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1310		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 
1311					user_surface_converter,
1312					&range->indexArray.surfaceId, NULL);
1313		if (unlikely(ret != 0))
1314			return ret;
1315	}
1316	return 0;
1317}
1318
1319
1320static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1321			     struct vmw_sw_context *sw_context,
1322			     SVGA3dCmdHeader *header)
1323{
1324	struct vmw_tex_state_cmd {
1325		SVGA3dCmdHeader header;
1326		SVGA3dCmdSetTextureState state;
1327	} *cmd;
1328
1329	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1330	  ((unsigned long) header + header->size + sizeof(header));
1331	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1332		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1333	struct vmw_resource_val_node *ctx_node;
1334	struct vmw_resource_val_node *res_node;
1335	int ret;
1336
1337	cmd = container_of(header, struct vmw_tex_state_cmd,
1338			   header);
1339
1340	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1341				user_context_converter, &cmd->state.cid,
1342				&ctx_node);
1343	if (unlikely(ret != 0))
1344		return ret;
1345
1346	for (; cur_state < last_state; ++cur_state) {
1347		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1348			continue;
1349
 
 
 
 
 
 
1350		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 
1351					user_surface_converter,
1352					&cur_state->value, &res_node);
1353		if (unlikely(ret != 0))
1354			return ret;
1355
1356		if (dev_priv->has_mob) {
1357			struct vmw_ctx_bindinfo bi;
 
1358
1359			bi.ctx = ctx_node->res;
1360			bi.res = res_node ? res_node->res : NULL;
1361			bi.bt = vmw_ctx_binding_tex;
1362			bi.i1.texture_stage = cur_state->stage;
1363			vmw_context_binding_add(ctx_node->staged_bindings,
1364						&bi);
 
 
 
 
1365		}
1366	}
1367
1368	return 0;
1369}
1370
1371static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1372				      struct vmw_sw_context *sw_context,
1373				      void *buf)
1374{
1375	struct vmw_dma_buffer *vmw_bo;
1376	int ret;
1377
1378	struct {
1379		uint32_t header;
1380		SVGAFifoCmdDefineGMRFB body;
1381	} *cmd = buf;
1382
1383	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1384				      &cmd->body.ptr,
1385				      &vmw_bo);
1386	if (unlikely(ret != 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1387		return ret;
1388
1389	vmw_dmabuf_unreference(&vmw_bo);
1390
1391	return ret;
1392}
1393
1394/**
1395 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context being used for this batch.
1399 * @res_type: The resource type.
1400 * @converter: Information about user-space binding for this resource type.
1401 * @res_id: Pointer to the user-space resource handle in the command stream.
1402 * @buf_id: Pointer to the user-space backup buffer handle in the command
1403 * stream.
1404 * @backup_offset: Offset of backup into MOB.
1405 *
1406 * This function prepares for registering a switch of backup buffers
1407 * in the resource metadata just prior to unreserving.
 
1408 */
1409static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1410				 struct vmw_sw_context *sw_context,
1411				 enum vmw_res_type res_type,
1412				 const struct vmw_user_resource_conv
1413				 *converter,
1414				 uint32_t *res_id,
1415				 uint32_t *buf_id,
1416				 unsigned long backup_offset)
1417{
 
1418	int ret;
1419	struct vmw_dma_buffer *dma_buf;
1420	struct vmw_resource_val_node *val_node;
1421
1422	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1423				converter, res_id, &val_node);
1424	if (unlikely(ret != 0))
1425		return ret;
1426
1427	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1428	if (unlikely(ret != 0))
1429		return ret;
1430
1431	if (val_node->first_usage)
1432		val_node->no_buffer_needed = true;
1433
1434	vmw_dmabuf_unreference(&val_node->new_backup);
1435	val_node->new_backup = dma_buf;
1436	val_node->new_backup_offset = backup_offset;
1437
1438	return 0;
1439}
1440
1441/**
1442 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1443 * command
1444 *
1445 * @dev_priv: Pointer to a device private struct.
1446 * @sw_context: The software context being used for this batch.
1447 * @header: Pointer to the command header in the command stream.
1448 */
1449static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1450				   struct vmw_sw_context *sw_context,
1451				   SVGA3dCmdHeader *header)
1452{
1453	struct vmw_bind_gb_surface_cmd {
1454		SVGA3dCmdHeader header;
1455		SVGA3dCmdBindGBSurface body;
1456	} *cmd;
1457
1458	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1459
1460	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1461				     user_surface_converter,
1462				     &cmd->body.sid, &cmd->body.mobid,
1463				     0);
1464}
1465
1466/**
1467 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1468 * command
1469 *
1470 * @dev_priv: Pointer to a device private struct.
1471 * @sw_context: The software context being used for this batch.
1472 * @header: Pointer to the command header in the command stream.
1473 */
1474static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1475				   struct vmw_sw_context *sw_context,
1476				   SVGA3dCmdHeader *header)
1477{
1478	struct vmw_gb_surface_cmd {
1479		SVGA3dCmdHeader header;
1480		SVGA3dCmdUpdateGBImage body;
1481	} *cmd;
1482
1483	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1484
1485	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1486				 user_surface_converter,
1487				 &cmd->body.image.sid, NULL);
1488}
1489
1490/**
1491 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1492 * command
1493 *
1494 * @dev_priv: Pointer to a device private struct.
1495 * @sw_context: The software context being used for this batch.
1496 * @header: Pointer to the command header in the command stream.
1497 */
1498static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1499				     struct vmw_sw_context *sw_context,
1500				     SVGA3dCmdHeader *header)
1501{
1502	struct vmw_gb_surface_cmd {
1503		SVGA3dCmdHeader header;
1504		SVGA3dCmdUpdateGBSurface body;
1505	} *cmd;
1506
1507	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1508
1509	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1510				 user_surface_converter,
1511				 &cmd->body.sid, NULL);
1512}
1513
1514/**
1515 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1516 * command
1517 *
1518 * @dev_priv: Pointer to a device private struct.
1519 * @sw_context: The software context being used for this batch.
1520 * @header: Pointer to the command header in the command stream.
1521 */
1522static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1523				     struct vmw_sw_context *sw_context,
1524				     SVGA3dCmdHeader *header)
1525{
1526	struct vmw_gb_surface_cmd {
1527		SVGA3dCmdHeader header;
1528		SVGA3dCmdReadbackGBImage body;
1529	} *cmd;
1530
1531	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1532
1533	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1534				 user_surface_converter,
1535				 &cmd->body.image.sid, NULL);
1536}
1537
1538/**
1539 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1540 * command
1541 *
1542 * @dev_priv: Pointer to a device private struct.
1543 * @sw_context: The software context being used for this batch.
1544 * @header: Pointer to the command header in the command stream.
1545 */
1546static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1547				       struct vmw_sw_context *sw_context,
1548				       SVGA3dCmdHeader *header)
1549{
1550	struct vmw_gb_surface_cmd {
1551		SVGA3dCmdHeader header;
1552		SVGA3dCmdReadbackGBSurface body;
1553	} *cmd;
1554
1555	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1556
1557	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1558				 user_surface_converter,
1559				 &cmd->body.sid, NULL);
1560}
1561
1562/**
1563 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1564 * command
1565 *
1566 * @dev_priv: Pointer to a device private struct.
1567 * @sw_context: The software context being used for this batch.
1568 * @header: Pointer to the command header in the command stream.
1569 */
1570static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1571				       struct vmw_sw_context *sw_context,
1572				       SVGA3dCmdHeader *header)
1573{
1574	struct vmw_gb_surface_cmd {
1575		SVGA3dCmdHeader header;
1576		SVGA3dCmdInvalidateGBImage body;
1577	} *cmd;
1578
1579	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1580
1581	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1582				 user_surface_converter,
1583				 &cmd->body.image.sid, NULL);
1584}
1585
1586/**
1587 * vmw_cmd_invalidate_gb_surface - Validate an
1588 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1589 *
1590 * @dev_priv: Pointer to a device private struct.
1591 * @sw_context: The software context being used for this batch.
1592 * @header: Pointer to the command header in the command stream.
1593 */
1594static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1595					 struct vmw_sw_context *sw_context,
1596					 SVGA3dCmdHeader *header)
1597{
1598	struct vmw_gb_surface_cmd {
1599		SVGA3dCmdHeader header;
1600		SVGA3dCmdInvalidateGBSurface body;
1601	} *cmd;
1602
1603	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1604
1605	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1606				 user_surface_converter,
1607				 &cmd->body.sid, NULL);
1608}
1609
1610
1611/**
1612 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1613 * command
1614 *
1615 * @dev_priv: Pointer to a device private struct.
1616 * @sw_context: The software context being used for this batch.
1617 * @header: Pointer to the command header in the command stream.
1618 */
1619static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1620				 struct vmw_sw_context *sw_context,
1621				 SVGA3dCmdHeader *header)
1622{
1623	struct vmw_shader_define_cmd {
1624		SVGA3dCmdHeader header;
1625		SVGA3dCmdDefineShader body;
1626	} *cmd;
1627	int ret;
1628	size_t size;
 
1629
1630	cmd = container_of(header, struct vmw_shader_define_cmd,
1631			   header);
1632
1633	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1634				user_context_converter, &cmd->body.cid,
1635				NULL);
1636	if (unlikely(ret != 0))
1637		return ret;
1638
1639	if (unlikely(!dev_priv->has_mob))
1640		return 0;
1641
1642	size = cmd->header.size - sizeof(cmd->body);
1643	ret = vmw_compat_shader_add(sw_context->fp->shman,
1644				    cmd->body.shid, cmd + 1,
1645				    cmd->body.type, size,
1646				    sw_context->fp->tfile,
1647				    &sw_context->staged_shaders);
1648	if (unlikely(ret != 0))
1649		return ret;
1650
1651	return vmw_resource_relocation_add(&sw_context->res_relocations,
1652					   NULL, &cmd->header.id -
1653					   sw_context->buf_start);
1654
1655	return 0;
1656}
1657
1658/**
1659 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1660 * command
1661 *
1662 * @dev_priv: Pointer to a device private struct.
1663 * @sw_context: The software context being used for this batch.
1664 * @header: Pointer to the command header in the command stream.
1665 */
1666static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1667				  struct vmw_sw_context *sw_context,
1668				  SVGA3dCmdHeader *header)
1669{
1670	struct vmw_shader_destroy_cmd {
1671		SVGA3dCmdHeader header;
1672		SVGA3dCmdDestroyShader body;
1673	} *cmd;
1674	int ret;
 
1675
1676	cmd = container_of(header, struct vmw_shader_destroy_cmd,
1677			   header);
1678
1679	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1680				user_context_converter, &cmd->body.cid,
1681				NULL);
1682	if (unlikely(ret != 0))
1683		return ret;
1684
1685	if (unlikely(!dev_priv->has_mob))
1686		return 0;
1687
1688	ret = vmw_compat_shader_remove(sw_context->fp->shman,
1689				       cmd->body.shid,
1690				       cmd->body.type,
1691				       &sw_context->staged_shaders);
1692	if (unlikely(ret != 0))
1693		return ret;
1694
1695	return vmw_resource_relocation_add(&sw_context->res_relocations,
1696					   NULL, &cmd->header.id -
1697					   sw_context->buf_start);
1698
1699	return 0;
1700}
1701
1702/**
1703 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1704 * command
1705 *
1706 * @dev_priv: Pointer to a device private struct.
1707 * @sw_context: The software context being used for this batch.
1708 * @header: Pointer to the command header in the command stream.
1709 */
1710static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1711			      struct vmw_sw_context *sw_context,
1712			      SVGA3dCmdHeader *header)
1713{
1714	struct vmw_set_shader_cmd {
1715		SVGA3dCmdHeader header;
1716		SVGA3dCmdSetShader body;
1717	} *cmd;
1718	struct vmw_resource_val_node *ctx_node;
1719	int ret;
1720
1721	cmd = container_of(header, struct vmw_set_shader_cmd,
1722			   header);
 
 
 
 
 
1723
1724	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1725				user_context_converter, &cmd->body.cid,
1726				&ctx_node);
1727	if (unlikely(ret != 0))
1728		return ret;
1729
1730	if (dev_priv->has_mob) {
1731		struct vmw_ctx_bindinfo bi;
1732		struct vmw_resource_val_node *res_node;
1733		u32 shid = cmd->body.shid;
1734
1735		if (shid != SVGA3D_INVALID_ID)
1736			(void) vmw_compat_shader_lookup(sw_context->fp->shman,
1737							cmd->body.type,
1738							&shid);
1739
1740		ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1741					       vmw_res_shader,
1742					       user_shader_converter,
1743					       shid,
1744					       &cmd->body.shid, &res_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745		if (unlikely(ret != 0))
1746			return ret;
 
1747
1748		bi.ctx = ctx_node->res;
1749		bi.res = res_node ? res_node->res : NULL;
1750		bi.bt = vmw_ctx_binding_shader;
1751		bi.i1.shader_type = cmd->body.type;
1752		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1753	}
 
 
 
1754
1755	return 0;
1756}
1757
1758/**
1759 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1760 * command
1761 *
1762 * @dev_priv: Pointer to a device private struct.
1763 * @sw_context: The software context being used for this batch.
1764 * @header: Pointer to the command header in the command stream.
1765 */
1766static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1767				    struct vmw_sw_context *sw_context,
1768				    SVGA3dCmdHeader *header)
1769{
1770	struct vmw_set_shader_const_cmd {
1771		SVGA3dCmdHeader header;
1772		SVGA3dCmdSetShaderConst body;
1773	} *cmd;
1774	int ret;
1775
1776	cmd = container_of(header, struct vmw_set_shader_const_cmd,
1777			   header);
1778
1779	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1780				user_context_converter, &cmd->body.cid,
1781				NULL);
1782	if (unlikely(ret != 0))
1783		return ret;
1784
1785	if (dev_priv->has_mob)
1786		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1787
1788	return 0;
1789}
1790
1791/**
1792 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1793 * command
1794 *
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @header: Pointer to the command header in the command stream.
1798 */
1799static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800				  struct vmw_sw_context *sw_context,
1801				  SVGA3dCmdHeader *header)
1802{
1803	struct vmw_bind_gb_shader_cmd {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804		SVGA3dCmdHeader header;
1805		SVGA3dCmdBindGBShader body;
1806	} *cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807
1808	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1809			   header);
1810
1811	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1812				     user_shader_converter,
1813				     &cmd->body.shid, &cmd->body.mobid,
1814				     cmd->body.offsetInBytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1815}
1816
1817static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1818				struct vmw_sw_context *sw_context,
1819				void *buf, uint32_t *size)
1820{
1821	uint32_t size_remaining = *size;
1822	uint32_t cmd_id;
1823
1824	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1825	switch (cmd_id) {
1826	case SVGA_CMD_UPDATE:
1827		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1828		break;
1829	case SVGA_CMD_DEFINE_GMRFB:
1830		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1831		break;
1832	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1833		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1834		break;
1835	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1836		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1837		break;
1838	default:
1839		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1840		return -EINVAL;
1841	}
1842
1843	if (*size > size_remaining) {
1844		DRM_ERROR("Invalid SVGA command (size mismatch):"
1845			  " %u.\n", cmd_id);
1846		return -EINVAL;
1847	}
1848
1849	if (unlikely(!sw_context->kernel)) {
1850		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1851		return -EPERM;
1852	}
1853
1854	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1855		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1856
1857	return 0;
1858}
1859
1860static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1861	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1862		    false, false, false),
1863	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1864		    false, false, false),
1865	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1866		    true, false, false),
1867	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1868		    true, false, false),
1869	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1870		    true, false, false),
1871	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1872		    false, false, false),
1873	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1874		    false, false, false),
1875	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1876		    true, false, false),
1877	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1878		    true, false, false),
1879	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1880		    true, false, false),
1881	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1882		    &vmw_cmd_set_render_target_check, true, false, false),
1883	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1884		    true, false, false),
1885	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1886		    true, false, false),
1887	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1888		    true, false, false),
1889	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1890		    true, false, false),
1891	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1892		    true, false, false),
1893	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1894		    true, false, false),
1895	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1896		    true, false, false),
1897	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1898		    false, false, false),
1899	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1900		    true, false, false),
1901	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1902		    true, false, false),
1903	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1904		    true, false, false),
1905	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1906		    true, false, false),
1907	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1908		    true, false, false),
1909	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1910		    true, false, false),
1911	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1912		    true, false, false),
1913	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1914		    true, false, false),
1915	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1916		    true, false, false),
1917	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1918		    true, false, false),
1919	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1920		    &vmw_cmd_blt_surf_screen_check, false, false, false),
1921	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1922		    false, false, false),
1923	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1924		    false, false, false),
1925	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1926		    false, false, false),
1927	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1928		    false, false, false),
1929	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1930		    false, false, false),
1931	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1932		    false, false, false),
1933	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1934		    false, false, false),
1935	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1936		    false, false, false),
1937	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1938		    false, false, false),
1939	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1940		    false, false, false),
1941	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1942		    false, false, false),
1943	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1944		    false, false, false),
1945	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1946		    false, false, false),
 
 
 
 
 
 
1947	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1948		    false, false, true),
1949	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1950		    false, false, true),
1951	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1952		    false, false, true),
1953	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1954		    false, false, true),
1955	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1956		    false, false, true),
1957	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1958		    false, false, true),
1959	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1960		    false, false, true),
1961	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1962		    false, false, true),
1963	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1964		    true, false, true),
1965	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1966		    false, false, true),
1967	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1968		    true, false, true),
1969	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1970		    &vmw_cmd_update_gb_surface, true, false, true),
1971	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1972		    &vmw_cmd_readback_gb_image, true, false, true),
1973	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1974		    &vmw_cmd_readback_gb_surface, true, false, true),
1975	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1976		    &vmw_cmd_invalidate_gb_image, true, false, true),
1977	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1978		    &vmw_cmd_invalidate_gb_surface, true, false, true),
1979	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1980		    false, false, true),
1981	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1982		    false, false, true),
1983	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1984		    false, false, true),
1985	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1986		    false, false, true),
1987	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1988		    false, false, true),
1989	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1990		    false, false, true),
1991	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1992		    true, false, true),
1993	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1994		    false, false, true),
1995	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1996		    false, false, false),
1997	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1998		    true, false, true),
1999	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2000		    true, false, true),
2001	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2002		    true, false, true),
2003	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2004		    true, false, true),
 
 
2005	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2006		    false, false, true),
2007	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2008		    false, false, true),
2009	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2010		    false, false, true),
2011	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2012		    false, false, true),
2013	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2014		    false, false, true),
2015	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2016		    false, false, true),
2017	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2018		    false, false, true),
2019	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2020		    false, false, true),
2021	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2022		    false, false, true),
2023	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2024		    false, false, true),
2025	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2026		    true, false, true)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2027};
2028
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2029static int vmw_cmd_check(struct vmw_private *dev_priv,
2030			 struct vmw_sw_context *sw_context,
2031			 void *buf, uint32_t *size)
2032{
2033	uint32_t cmd_id;
2034	uint32_t size_remaining = *size;
2035	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2036	int ret;
2037	const struct vmw_cmd_entry *entry;
2038	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2039
2040	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2041	/* Handle any none 3D commands */
2042	if (unlikely(cmd_id < SVGA_CMD_MAX))
2043		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2044
2045
2046	cmd_id = le32_to_cpu(header->id);
2047	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2048
2049	cmd_id -= SVGA_3D_CMD_BASE;
2050	if (unlikely(*size > size_remaining))
2051		goto out_invalid;
2052
2053	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2054		goto out_invalid;
2055
2056	entry = &vmw_cmd_entries[cmd_id];
2057	if (unlikely(!entry->func))
2058		goto out_invalid;
2059
2060	if (unlikely(!entry->user_allow && !sw_context->kernel))
2061		goto out_privileged;
2062
2063	if (unlikely(entry->gb_disable && gb))
2064		goto out_old;
2065
2066	if (unlikely(entry->gb_enable && !gb))
2067		goto out_new;
2068
2069	ret = entry->func(dev_priv, sw_context, header);
2070	if (unlikely(ret != 0))
2071		goto out_invalid;
 
 
 
2072
2073	return 0;
2074out_invalid:
2075	DRM_ERROR("Invalid SVGA3D command: %d\n",
2076		  cmd_id + SVGA_3D_CMD_BASE);
2077	return -EINVAL;
2078out_privileged:
2079	DRM_ERROR("Privileged SVGA3D command: %d\n",
2080		  cmd_id + SVGA_3D_CMD_BASE);
2081	return -EPERM;
2082out_old:
2083	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2084		  cmd_id + SVGA_3D_CMD_BASE);
2085	return -EINVAL;
2086out_new:
2087	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2088		  cmd_id + SVGA_3D_CMD_BASE);
2089	return -EINVAL;
2090}
2091
2092static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2093			     struct vmw_sw_context *sw_context,
2094			     void *buf,
2095			     uint32_t size)
2096{
2097	int32_t cur_size = size;
2098	int ret;
2099
2100	sw_context->buf_start = buf;
2101
2102	while (cur_size > 0) {
2103		size = cur_size;
2104		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2105		if (unlikely(ret != 0))
2106			return ret;
2107		buf = (void *)((unsigned long) buf + size);
2108		cur_size -= size;
2109	}
2110
2111	if (unlikely(cur_size != 0)) {
2112		DRM_ERROR("Command verifier out of sync.\n");
2113		return -EINVAL;
2114	}
2115
2116	return 0;
2117}
2118
2119static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2120{
2121	sw_context->cur_reloc = 0;
 
2122}
2123
2124static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2125{
2126	uint32_t i;
2127	struct vmw_relocation *reloc;
2128	struct ttm_validate_buffer *validate;
2129	struct ttm_buffer_object *bo;
2130
2131	for (i = 0; i < sw_context->cur_reloc; ++i) {
2132		reloc = &sw_context->relocs[i];
2133		validate = &sw_context->val_bufs[reloc->index].base;
2134		bo = validate->bo;
2135		switch (bo->mem.mem_type) {
2136		case TTM_PL_VRAM:
2137			reloc->location->offset += bo->offset;
2138			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2139			break;
2140		case VMW_PL_GMR:
2141			reloc->location->gmrId = bo->mem.start;
2142			break;
2143		case VMW_PL_MOB:
2144			*reloc->mob_loc = bo->mem.start;
2145			break;
2146		default:
2147			BUG();
2148		}
2149	}
2150	vmw_free_relocations(sw_context);
2151}
2152
2153/**
2154 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2155 * all resources referenced by it.
2156 *
2157 * @list: The resource list.
2158 */
2159static void vmw_resource_list_unreference(struct list_head *list)
2160{
2161	struct vmw_resource_val_node *val, *val_next;
2162
2163	/*
2164	 * Drop references to resources held during command submission.
2165	 */
2166
2167	list_for_each_entry_safe(val, val_next, list, head) {
2168		list_del_init(&val->head);
2169		vmw_resource_unreference(&val->res);
2170		if (unlikely(val->staged_bindings))
2171			kfree(val->staged_bindings);
2172		kfree(val);
2173	}
2174}
2175
2176static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2177{
2178	struct vmw_validate_buffer *entry, *next;
2179	struct vmw_resource_val_node *val;
2180
2181	/*
2182	 * Drop references to DMA buffers held during command submission.
2183	 */
2184	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2185				 base.head) {
2186		list_del(&entry->base.head);
2187		ttm_bo_unref(&entry->base.bo);
2188		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2189		sw_context->cur_val_buf--;
2190	}
2191	BUG_ON(sw_context->cur_val_buf != 0);
2192
2193	list_for_each_entry(val, &sw_context->resource_list, head)
2194		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2195}
2196
2197static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2198				      struct ttm_buffer_object *bo,
2199				      bool validate_as_mob)
2200{
2201	int ret;
2202
2203
2204	/*
2205	 * Don't validate pinned buffers.
2206	 */
2207
2208	if (bo == dev_priv->pinned_bo ||
2209	    (bo == dev_priv->dummy_query_bo &&
2210	     dev_priv->dummy_query_bo_pinned))
2211		return 0;
2212
2213	if (validate_as_mob)
2214		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2215
2216	/**
2217	 * Put BO in VRAM if there is space, otherwise as a GMR.
2218	 * If there is no space in VRAM and GMR ids are all used up,
2219	 * start evicting GMRs to make room. If the DMA buffer can't be
2220	 * used as a GMR, this will return -ENOMEM.
2221	 */
2222
2223	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2224	if (likely(ret == 0 || ret == -ERESTARTSYS))
2225		return ret;
2226
2227	/**
2228	 * If that failed, try VRAM again, this time evicting
2229	 * previous contents.
2230	 */
2231
2232	DRM_INFO("Falling through to VRAM.\n");
2233	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2234	return ret;
2235}
2236
2237static int vmw_validate_buffers(struct vmw_private *dev_priv,
2238				struct vmw_sw_context *sw_context)
2239{
2240	struct vmw_validate_buffer *entry;
2241	int ret;
2242
2243	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2244		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2245						 entry->validate_as_mob);
2246		if (unlikely(ret != 0))
2247			return ret;
2248	}
2249	return 0;
2250}
2251
2252static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2253				 uint32_t size)
2254{
2255	if (likely(sw_context->cmd_bounce_size >= size))
2256		return 0;
2257
2258	if (sw_context->cmd_bounce_size == 0)
2259		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2260
2261	while (sw_context->cmd_bounce_size < size) {
2262		sw_context->cmd_bounce_size =
2263			PAGE_ALIGN(sw_context->cmd_bounce_size +
2264				   (sw_context->cmd_bounce_size >> 1));
2265	}
2266
2267	if (sw_context->cmd_bounce != NULL)
2268		vfree(sw_context->cmd_bounce);
2269
2270	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2271
2272	if (sw_context->cmd_bounce == NULL) {
2273		DRM_ERROR("Failed to allocate command bounce buffer.\n");
2274		sw_context->cmd_bounce_size = 0;
2275		return -ENOMEM;
2276	}
2277
2278	return 0;
2279}
2280
2281/**
2282 * vmw_execbuf_fence_commands - create and submit a command stream fence
2283 *
2284 * Creates a fence object and submits a command stream marker.
2285 * If this fails for some reason, We sync the fifo and return NULL.
2286 * It is then safe to fence buffers with a NULL pointer.
2287 *
2288 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2289 * a userspace handle if @p_handle is not NULL, otherwise not.
2290 */
2291
2292int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2293			       struct vmw_private *dev_priv,
2294			       struct vmw_fence_obj **p_fence,
2295			       uint32_t *p_handle)
2296{
2297	uint32_t sequence;
2298	int ret;
2299	bool synced = false;
2300
2301	/* p_handle implies file_priv. */
2302	BUG_ON(p_handle != NULL && file_priv == NULL);
2303
2304	ret = vmw_fifo_send_fence(dev_priv, &sequence);
2305	if (unlikely(ret != 0)) {
2306		DRM_ERROR("Fence submission error. Syncing.\n");
2307		synced = true;
2308	}
2309
2310	if (p_handle != NULL)
2311		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2312					    sequence,
2313					    DRM_VMW_FENCE_FLAG_EXEC,
2314					    p_fence, p_handle);
2315	else
2316		ret = vmw_fence_create(dev_priv->fman, sequence,
2317				       DRM_VMW_FENCE_FLAG_EXEC,
2318				       p_fence);
2319
2320	if (unlikely(ret != 0 && !synced)) {
2321		(void) vmw_fallback_wait(dev_priv, false, false,
2322					 sequence, false,
2323					 VMW_FENCE_WAIT_TIMEOUT);
2324		*p_fence = NULL;
2325	}
2326
2327	return 0;
2328}
2329
2330/**
2331 * vmw_execbuf_copy_fence_user - copy fence object information to
2332 * user-space.
2333 *
2334 * @dev_priv: Pointer to a vmw_private struct.
2335 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2336 * @ret: Return value from fence object creation.
2337 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2338 * which the information should be copied.
2339 * @fence: Pointer to the fenc object.
2340 * @fence_handle: User-space fence handle.
 
2341 *
2342 * This function copies fence information to user-space. If copying fails,
2343 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2344 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2345 * the error will hopefully be detected.
2346 * Also if copying fails, user-space will be unable to signal the fence
2347 * object so we wait for it immediately, and then unreference the
2348 * user-space reference.
2349 */
2350void
2351vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2352			    struct vmw_fpriv *vmw_fp,
2353			    int ret,
2354			    struct drm_vmw_fence_rep __user *user_fence_rep,
2355			    struct vmw_fence_obj *fence,
2356			    uint32_t fence_handle)
2357{
2358	struct drm_vmw_fence_rep fence_rep;
2359
2360	if (user_fence_rep == NULL)
2361		return;
2362
2363	memset(&fence_rep, 0, sizeof(fence_rep));
2364
2365	fence_rep.error = ret;
 
2366	if (ret == 0) {
2367		BUG_ON(fence == NULL);
2368
2369		fence_rep.handle = fence_handle;
2370		fence_rep.seqno = fence->seqno;
2371		vmw_update_seqno(dev_priv, &dev_priv->fifo);
2372		fence_rep.passed_seqno = dev_priv->last_read_seqno;
2373	}
2374
2375	/*
2376	 * copy_to_user errors will be detected by user space not
2377	 * seeing fence_rep::error filled in. Typically
2378	 * user-space would have pre-set that member to -EFAULT.
2379	 */
2380	ret = copy_to_user(user_fence_rep, &fence_rep,
2381			   sizeof(fence_rep));
2382
2383	/*
2384	 * User-space lost the fence object. We need to sync
2385	 * and unreference the handle.
2386	 */
2387	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2388		ttm_ref_object_base_unref(vmw_fp->tfile,
2389					  fence_handle, TTM_REF_USAGE);
2390		DRM_ERROR("Fence copy error. Syncing.\n");
2391		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
2392					  false, false,
2393					  VMW_FENCE_WAIT_TIMEOUT);
2394	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2395}
2396
2397int vmw_execbuf_process(struct drm_file *file_priv,
2398			struct vmw_private *dev_priv,
2399			void __user *user_commands,
2400			void *kernel_commands,
2401			uint32_t command_size,
2402			uint64_t throttle_us,
2403			struct drm_vmw_fence_rep __user *user_fence_rep,
2404			struct vmw_fence_obj **out_fence)
2405{
2406	struct vmw_sw_context *sw_context = &dev_priv->ctx;
2407	struct vmw_fence_obj *fence = NULL;
2408	struct vmw_resource *error_resource;
2409	struct list_head resource_list;
2410	struct ww_acquire_ctx ticket;
2411	uint32_t handle;
2412	void *cmd;
2413	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2414
2415	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2416	if (unlikely(ret != 0))
2417		return -ERESTARTSYS;
 
 
2418
 
2419	if (kernel_commands == NULL) {
2420		sw_context->kernel = false;
2421
2422		ret = vmw_resize_cmd_bounce(sw_context, command_size);
2423		if (unlikely(ret != 0))
2424			goto out_unlock;
2425
2426
2427		ret = copy_from_user(sw_context->cmd_bounce,
2428				     user_commands, command_size);
2429
2430		if (unlikely(ret != 0)) {
2431			ret = -EFAULT;
2432			DRM_ERROR("Failed copying commands.\n");
2433			goto out_unlock;
2434		}
 
2435		kernel_commands = sw_context->cmd_bounce;
2436	} else
2437		sw_context->kernel = true;
 
2438
 
2439	sw_context->fp = vmw_fpriv(file_priv);
2440	sw_context->cur_reloc = 0;
2441	sw_context->cur_val_buf = 0;
2442	sw_context->fence_flags = 0;
2443	INIT_LIST_HEAD(&sw_context->resource_list);
2444	sw_context->cur_query_bo = dev_priv->pinned_bo;
2445	sw_context->last_query_ctx = NULL;
2446	sw_context->needs_post_query_barrier = false;
 
 
 
2447	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2448	INIT_LIST_HEAD(&sw_context->validate_nodes);
2449	INIT_LIST_HEAD(&sw_context->res_relocations);
2450	if (!sw_context->res_ht_initialized) {
2451		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2452		if (unlikely(ret != 0))
2453			goto out_unlock;
2454		sw_context->res_ht_initialized = true;
2455	}
2456	INIT_LIST_HEAD(&sw_context->staged_shaders);
 
 
 
2457
2458	INIT_LIST_HEAD(&resource_list);
2459	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2460				command_size);
2461	if (unlikely(ret != 0))
2462		goto out_err_nores;
2463
2464	ret = vmw_resources_reserve(sw_context);
2465	if (unlikely(ret != 0))
2466		goto out_err_nores;
2467
2468	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2469	if (unlikely(ret != 0))
2470		goto out_err;
2471
2472	ret = vmw_validate_buffers(dev_priv, sw_context);
2473	if (unlikely(ret != 0))
2474		goto out_err;
2475
2476	ret = vmw_resources_validate(sw_context);
2477	if (unlikely(ret != 0))
2478		goto out_err;
2479
2480	if (throttle_us) {
2481		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2482				   throttle_us);
2483
2484		if (unlikely(ret != 0))
2485			goto out_err;
2486	}
2487
2488	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2489	if (unlikely(ret != 0)) {
2490		ret = -ERESTARTSYS;
2491		goto out_err;
2492	}
2493
2494	if (dev_priv->has_mob) {
2495		ret = vmw_rebind_contexts(sw_context);
2496		if (unlikely(ret != 0))
2497			goto out_unlock_binding;
2498	}
2499
2500	cmd = vmw_fifo_reserve(dev_priv, command_size);
2501	if (unlikely(cmd == NULL)) {
2502		DRM_ERROR("Failed reserving fifo space for commands.\n");
2503		ret = -ENOMEM;
2504		goto out_unlock_binding;
 
 
2505	}
2506
2507	vmw_apply_relocations(sw_context);
2508	memcpy(cmd, kernel_commands, command_size);
2509
2510	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2511	vmw_resource_relocations_free(&sw_context->res_relocations);
2512
2513	vmw_fifo_commit(dev_priv, command_size);
2514
2515	vmw_query_bo_switch_commit(dev_priv, sw_context);
2516	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2517					 &fence,
2518					 (user_fence_rep) ? &handle : NULL);
2519	/*
2520	 * This error is harmless, because if fence submission fails,
2521	 * vmw_fifo_send_fence will sync. The error will be propagated to
2522	 * user-space in @fence_rep
2523	 */
2524
2525	if (ret != 0)
2526		DRM_ERROR("Fence submission error. Syncing.\n");
2527
2528	vmw_resource_list_unreserve(&sw_context->resource_list, false);
2529	mutex_unlock(&dev_priv->binding_mutex);
 
2530
2531	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2532				    (void *) fence);
2533
2534	if (unlikely(dev_priv->pinned_bo != NULL &&
2535		     !dev_priv->query_cid_valid))
2536		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
2537
2538	vmw_clear_validations(sw_context);
2539	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2540				    user_fence_rep, fence, handle);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2541
2542	/* Don't unreference when handing fence out */
2543	if (unlikely(out_fence != NULL)) {
2544		*out_fence = fence;
2545		fence = NULL;
2546	} else if (likely(fence != NULL)) {
2547		vmw_fence_obj_unreference(&fence);
2548	}
2549
2550	list_splice_init(&sw_context->resource_list, &resource_list);
2551	vmw_compat_shaders_commit(sw_context->fp->shman,
2552				  &sw_context->staged_shaders);
2553	mutex_unlock(&dev_priv->cmdbuf_mutex);
2554
2555	/*
2556	 * Unreference resources outside of the cmdbuf_mutex to
2557	 * avoid deadlocks in resource destruction paths.
2558	 */
2559	vmw_resource_list_unreference(&resource_list);
2560
2561	return 0;
2562
2563out_unlock_binding:
2564	mutex_unlock(&dev_priv->binding_mutex);
2565out_err:
2566	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2567out_err_nores:
2568	vmw_resource_list_unreserve(&sw_context->resource_list, true);
 
2569	vmw_resource_relocations_free(&sw_context->res_relocations);
2570	vmw_free_relocations(sw_context);
2571	vmw_clear_validations(sw_context);
2572	if (unlikely(dev_priv->pinned_bo != NULL &&
2573		     !dev_priv->query_cid_valid))
2574		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2575out_unlock:
2576	list_splice_init(&sw_context->resource_list, &resource_list);
2577	error_resource = sw_context->error_resource;
2578	sw_context->error_resource = NULL;
2579	vmw_compat_shaders_revert(sw_context->fp->shman,
2580				  &sw_context->staged_shaders);
2581	mutex_unlock(&dev_priv->cmdbuf_mutex);
2582
2583	/*
2584	 * Unreference resources outside of the cmdbuf_mutex to
2585	 * avoid deadlocks in resource destruction paths.
2586	 */
2587	vmw_resource_list_unreference(&resource_list);
2588	if (unlikely(error_resource != NULL))
2589		vmw_resource_unreference(&error_resource);
 
 
 
 
2590
2591	return ret;
2592}
2593
2594/**
2595 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2596 *
2597 * @dev_priv: The device private structure.
2598 *
2599 * This function is called to idle the fifo and unpin the query buffer
2600 * if the normal way to do this hits an error, which should typically be
2601 * extremely rare.
2602 */
2603static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2604{
2605	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2606
2607	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2608	vmw_bo_pin(dev_priv->pinned_bo, false);
2609	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2610	dev_priv->dummy_query_bo_pinned = false;
 
 
2611}
2612
2613
2614/**
2615 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2616 * query bo.
2617 *
2618 * @dev_priv: The device private structure.
2619 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2620 * _after_ a query barrier that flushes all queries touching the current
2621 * buffer pointed to by @dev_priv->pinned_bo
2622 *
2623 * This function should be used to unpin the pinned query bo, or
2624 * as a query barrier when we need to make sure that all queries have
2625 * finished before the next fifo command. (For example on hardware
2626 * context destructions where the hardware may otherwise leak unfinished
2627 * queries).
2628 *
2629 * This function does not return any failure codes, but make attempts
2630 * to do safe unpinning in case of errors.
2631 *
2632 * The function will synchronize on the previous query barrier, and will
2633 * thus not finish until that barrier has executed.
2634 *
2635 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2636 * before calling this function.
2637 */
2638void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2639				     struct vmw_fence_obj *fence)
2640{
2641	int ret = 0;
2642	struct list_head validate_list;
2643	struct ttm_validate_buffer pinned_val, query_val;
2644	struct vmw_fence_obj *lfence = NULL;
2645	struct ww_acquire_ctx ticket;
2646
2647	if (dev_priv->pinned_bo == NULL)
2648		goto out_unlock;
2649
2650	INIT_LIST_HEAD(&validate_list);
 
 
 
 
 
2651
2652	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2653	list_add_tail(&pinned_val.head, &validate_list);
2654
2655	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2656	list_add_tail(&query_val.head, &validate_list);
2657
2658	do {
2659		ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2660	} while (ret == -ERESTARTSYS);
2661
2662	if (unlikely(ret != 0)) {
2663		vmw_execbuf_unpin_panic(dev_priv);
2664		goto out_no_reserve;
2665	}
2666
2667	if (dev_priv->query_cid_valid) {
2668		BUG_ON(fence != NULL);
2669		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2670		if (unlikely(ret != 0)) {
2671			vmw_execbuf_unpin_panic(dev_priv);
2672			goto out_no_emit;
2673		}
2674		dev_priv->query_cid_valid = false;
2675	}
2676
2677	vmw_bo_pin(dev_priv->pinned_bo, false);
2678	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2679	dev_priv->dummy_query_bo_pinned = false;
2680
 
2681	if (fence == NULL) {
2682		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2683						  NULL);
2684		fence = lfence;
2685	}
2686	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2687	if (lfence != NULL)
2688		vmw_fence_obj_unreference(&lfence);
2689
2690	ttm_bo_unref(&query_val.bo);
2691	ttm_bo_unref(&pinned_val.bo);
2692	ttm_bo_unref(&dev_priv->pinned_bo);
2693
2694out_unlock:
2695	return;
2696
2697out_no_emit:
2698	ttm_eu_backoff_reservation(&ticket, &validate_list);
2699out_no_reserve:
2700	ttm_bo_unref(&query_val.bo);
2701	ttm_bo_unref(&pinned_val.bo);
2702	ttm_bo_unref(&dev_priv->pinned_bo);
2703}
2704
2705/**
2706 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2707 * query bo.
2708 *
2709 * @dev_priv: The device private structure.
2710 *
2711 * This function should be used to unpin the pinned query bo, or
2712 * as a query barrier when we need to make sure that all queries have
2713 * finished before the next fifo command. (For example on hardware
2714 * context destructions where the hardware may otherwise leak unfinished
2715 * queries).
2716 *
2717 * This function does not return any failure codes, but make attempts
2718 * to do safe unpinning in case of errors.
2719 *
2720 * The function will synchronize on the previous query barrier, and will
2721 * thus not finish until that barrier has executed.
2722 */
2723void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2724{
2725	mutex_lock(&dev_priv->cmdbuf_mutex);
2726	if (dev_priv->query_cid_valid)
2727		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2728	mutex_unlock(&dev_priv->cmdbuf_mutex);
2729}
2730
2731
2732int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2733		      struct drm_file *file_priv)
2734{
2735	struct vmw_private *dev_priv = vmw_priv(dev);
2736	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2737	int ret;
 
 
 
 
2738
2739	/*
2740	 * This will allow us to extend the ioctl argument while
2741	 * maintaining backwards compatibility:
2742	 * We take different code paths depending on the value of
2743	 * arg->version.
2744	 */
 
 
 
 
 
 
2745
2746	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2747		DRM_ERROR("Incorrect execbuf version.\n");
2748		DRM_ERROR("You're running outdated experimental "
2749			  "vmwgfx user-space drivers.");
2750		return -EINVAL;
 
 
 
 
2751	}
2752
2753	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2754	if (unlikely(ret != 0))
2755		return ret;
 
 
 
 
 
 
 
 
 
 
 
2756
2757	ret = vmw_execbuf_process(file_priv, dev_priv,
2758				  (void __user *)(unsigned long)arg->commands,
2759				  NULL, arg->command_size, arg->throttle_us,
 
2760				  (void __user *)(unsigned long)arg->fence_rep,
2761				  NULL);
2762
2763	if (unlikely(ret != 0))
2764		goto out_unlock;
2765
2766	vmw_kms_cursor_post_execbuf(dev_priv);
2767
2768out_unlock:
2769	ttm_read_unlock(&dev_priv->reservation_sem);
 
 
 
 
2770	return ret;
2771}