Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27#include <linux/sync_file.h>
  28
  29#include "vmwgfx_drv.h"
  30#include "vmwgfx_reg.h"
  31#include <drm/ttm/ttm_bo_api.h>
  32#include <drm/ttm/ttm_placement.h>
  33#include "vmwgfx_so.h"
  34#include "vmwgfx_binding.h"
  35
  36#define VMW_RES_HT_ORDER 12
  37
  38/*
  39 * Helper macro to get dx_ctx_node if available otherwise print an error
  40 * message. This is for use in command verifier function where if dx_ctx_node
  41 * is not set then command is invalid.
  42 */
  43#define VMW_GET_CTX_NODE(__sw_context)                                        \
  44({                                                                            \
  45	__sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({            \
  46		VMW_DEBUG_USER("SM context is not set at %s\n", __func__);    \
  47		__sw_context->dx_ctx_node;                                    \
  48	});                                                                   \
  49})
  50
  51#define VMW_DECLARE_CMD_VAR(__var, __type)                                    \
  52	struct {                                                              \
  53		SVGA3dCmdHeader header;                                       \
  54		__type body;                                                  \
  55	} __var
  56
  57/**
  58 * struct vmw_relocation - Buffer object relocation
  59 *
  60 * @head: List head for the command submission context's relocation list
  61 * @vbo: Non ref-counted pointer to buffer object
  62 * @mob_loc: Pointer to location for mob id to be modified
  63 * @location: Pointer to location for guest pointer to be modified
  64 */
  65struct vmw_relocation {
  66	struct list_head head;
  67	struct vmw_buffer_object *vbo;
  68	union {
  69		SVGAMobId *mob_loc;
  70		SVGAGuestPtr *location;
  71	};
  72};
  73
  74/**
  75 * enum vmw_resource_relocation_type - Relocation type for resources
  76 *
  77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
  78 * command stream is replaced with the actual id after validation.
  79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
  80 * with a NOP.
  81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
  82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
  83 */
  84enum vmw_resource_relocation_type {
  85	vmw_res_rel_normal,
  86	vmw_res_rel_nop,
  87	vmw_res_rel_cond_nop,
  88	vmw_res_rel_max
  89};
  90
  91/**
  92 * struct vmw_resource_relocation - Relocation info for resources
  93 *
  94 * @head: List head for the software context's relocation list.
  95 * @res: Non-ref-counted pointer to the resource.
  96 * @offset: Offset of single byte entries into the command buffer where the id
  97 * that needs fixup is located.
  98 * @rel_type: Type of relocation.
  99 */
 100struct vmw_resource_relocation {
 101	struct list_head head;
 102	const struct vmw_resource *res;
 103	u32 offset:29;
 104	enum vmw_resource_relocation_type rel_type:3;
 105};
 106
 107/**
 108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
 109 *
 110 * @head: List head of context list
 111 * @ctx: The context resource
 112 * @cur: The context's persistent binding state
 113 * @staged: The binding state changes of this command buffer
 
 
 
 
 
 
 
 
 114 */
 115struct vmw_ctx_validation_info {
 116	struct list_head head;
 117	struct vmw_resource *ctx;
 118	struct vmw_ctx_binding_state *cur;
 119	struct vmw_ctx_binding_state *staged;
 
 
 
 
 120};
 121
 122/**
 123 * struct vmw_cmd_entry - Describe a command for the verifier
 124 *
 125 * @user_allow: Whether allowed from the execbuf ioctl.
 126 * @gb_disable: Whether disabled if guest-backed objects are available.
 127 * @gb_enable: Whether enabled iff guest-backed objects are available.
 128 */
 129struct vmw_cmd_entry {
 130	int (*func) (struct vmw_private *, struct vmw_sw_context *,
 131		     SVGA3dCmdHeader *);
 132	bool user_allow;
 133	bool gb_disable;
 134	bool gb_enable;
 135	const char *cmd_name;
 136};
 137
 138#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
 139	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
 140				       (_gb_disable), (_gb_enable), #_cmd}
 141
 142static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 143					struct vmw_sw_context *sw_context,
 144					struct vmw_resource *ctx);
 145static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 146				 struct vmw_sw_context *sw_context,
 147				 SVGAMobId *id,
 148				 struct vmw_buffer_object **vmw_bo_p);
 149/**
 150 * vmw_ptr_diff - Compute the offset from a to b in bytes
 
 151 *
 152 * @a: A starting pointer.
 153 * @b: A pointer offset in the same address space.
 154 *
 155 * Returns: The offset in bytes between the two pointers.
 156 */
 157static size_t vmw_ptr_diff(void *a, void *b)
 158{
 159	return (unsigned long) b - (unsigned long) a;
 160}
 161
 162/**
 163 * vmw_execbuf_bindings_commit - Commit modified binding state
 164 *
 165 * @sw_context: The command submission context
 166 * @backoff: Whether this is part of the error path and binding state changes
 167 * should be ignored
 168 */
 169static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
 170					bool backoff)
 171{
 172	struct vmw_ctx_validation_info *entry;
 173
 174	list_for_each_entry(entry, &sw_context->ctx_list, head) {
 175		if (!backoff)
 176			vmw_binding_state_commit(entry->cur, entry->staged);
 177
 178		if (entry->staged != sw_context->staged_bindings)
 179			vmw_binding_state_free(entry->staged);
 180		else
 181			sw_context->staged_bindings_inuse = false;
 182	}
 183
 184	/* List entries are freed with the validation context */
 185	INIT_LIST_HEAD(&sw_context->ctx_list);
 186}
 187
 188/**
 189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
 190 *
 191 * @sw_context: The command submission context
 192 */
 193static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
 194{
 195	if (sw_context->dx_query_mob)
 196		vmw_context_bind_dx_query(sw_context->dx_query_ctx,
 197					  sw_context->dx_query_mob);
 198}
 199
 200/**
 201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
 202 * the validate list.
 203 *
 204 * @dev_priv: Pointer to the device private:
 205 * @sw_context: The command submission context
 206 * @node: The validation node holding the context resource metadata
 207 */
 208static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
 209				   struct vmw_sw_context *sw_context,
 210				   struct vmw_resource *res,
 211				   struct vmw_ctx_validation_info *node)
 212{
 213	int ret;
 214
 215	ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 216	if (unlikely(ret != 0))
 217		goto out_err;
 218
 219	if (!sw_context->staged_bindings) {
 220		sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
 221		if (IS_ERR(sw_context->staged_bindings)) {
 222			ret = PTR_ERR(sw_context->staged_bindings);
 223			sw_context->staged_bindings = NULL;
 224			goto out_err;
 225		}
 226	}
 227
 228	if (sw_context->staged_bindings_inuse) {
 229		node->staged = vmw_binding_state_alloc(dev_priv);
 230		if (IS_ERR(node->staged)) {
 231			ret = PTR_ERR(node->staged);
 232			node->staged = NULL;
 233			goto out_err;
 234		}
 235	} else {
 236		node->staged = sw_context->staged_bindings;
 237		sw_context->staged_bindings_inuse = true;
 238	}
 239
 240	node->ctx = res;
 241	node->cur = vmw_context_binding_state(res);
 242	list_add_tail(&node->head, &sw_context->ctx_list);
 243
 244	return 0;
 245
 246out_err:
 247	return ret;
 248}
 249
 250/**
 251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
 252 *
 253 * @dev_priv: Pointer to the device private struct.
 254 * @res_type: The resource type.
 255 *
 256 * Guest-backed contexts and DX contexts require extra size to store execbuf
 257 * private information in the validation node. Typically the binding manager
 258 * associated data structures.
 259 *
 260 * Returns: The extra size requirement based on resource type.
 261 */
 262static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
 263					 enum vmw_res_type res_type)
 264{
 265	return (res_type == vmw_res_dx_context ||
 266		(res_type == vmw_res_context && dev_priv->has_mob)) ?
 267		sizeof(struct vmw_ctx_validation_info) : 0;
 268}
 269
 270/**
 271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
 272 *
 273 * @rcache: Pointer to the entry to update.
 274 * @res: Pointer to the resource.
 275 * @private: Pointer to the execbuf-private space in the resource validation
 276 * node.
 277 */
 278static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
 279				      struct vmw_resource *res,
 280				      void *private)
 281{
 282	rcache->res = res;
 283	rcache->private = private;
 284	rcache->valid = 1;
 285	rcache->valid_handle = 0;
 286}
 287
 288/**
 289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
 290 * rcu-protected pointer to the validation list.
 291 *
 292 * @sw_context: Pointer to the software context.
 293 * @res: Unreferenced rcu-protected pointer to the resource.
 294 * @dirty: Whether to change dirty status.
 295 *
 296 * Returns: 0 on success. Negative error code on failure. Typical error codes
 297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
 298 */
 299static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
 300					 struct vmw_resource *res,
 301					 u32 dirty)
 302{
 303	struct vmw_private *dev_priv = res->dev_priv;
 304	int ret;
 305	enum vmw_res_type res_type = vmw_res_type(res);
 306	struct vmw_res_cache_entry *rcache;
 307	struct vmw_ctx_validation_info *ctx_info;
 308	bool first_usage;
 309	unsigned int priv_size;
 310
 311	rcache = &sw_context->res_cache[res_type];
 312	if (likely(rcache->valid && rcache->res == res)) {
 313		if (dirty)
 314			vmw_validation_res_set_dirty(sw_context->ctx,
 315						     rcache->private, dirty);
 316		vmw_user_resource_noref_release();
 317		return 0;
 318	}
 319
 320	priv_size = vmw_execbuf_res_size(dev_priv, res_type);
 321	ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
 322					  dirty, (void **)&ctx_info,
 323					  &first_usage);
 324	vmw_user_resource_noref_release();
 325	if (ret)
 326		return ret;
 327
 328	if (priv_size && first_usage) {
 329		ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
 330					      ctx_info);
 331		if (ret) {
 332			VMW_DEBUG_USER("Failed first usage context setup.\n");
 333			return ret;
 334		}
 335	}
 336
 337	vmw_execbuf_rcache_update(rcache, res, ctx_info);
 338	return 0;
 339}
 340
 341/**
 342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
 343 * validation list if it's not already on it
 344 *
 345 * @sw_context: Pointer to the software context.
 346 * @res: Pointer to the resource.
 347 * @dirty: Whether to change dirty status.
 348 *
 349 * Returns: Zero on success. Negative error code on failure.
 350 */
 351static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
 352					 struct vmw_resource *res,
 353					 u32 dirty)
 354{
 355	struct vmw_res_cache_entry *rcache;
 356	enum vmw_res_type res_type = vmw_res_type(res);
 357	void *ptr;
 358	int ret;
 359
 360	rcache = &sw_context->res_cache[res_type];
 361	if (likely(rcache->valid && rcache->res == res)) {
 362		if (dirty)
 363			vmw_validation_res_set_dirty(sw_context->ctx,
 364						     rcache->private, dirty);
 365		return 0;
 366	}
 367
 368	ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
 369					  &ptr, NULL);
 370	if (ret)
 
 
 
 371		return ret;
 
 
 
 
 372
 373	vmw_execbuf_rcache_update(rcache, res, ptr);
 
 374
 375	return 0;
 376}
 377
 378/**
 379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
 380 * validation list
 381 *
 382 * @sw_context: The software context holding the validation list.
 383 * @view: Pointer to the view resource.
 384 *
 385 * Returns 0 if success, negative error code otherwise.
 386 */
 387static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
 388				struct vmw_resource *view)
 389{
 390	int ret;
 391
 392	/*
 393	 * First add the resource the view is pointing to, otherwise it may be
 394	 * swapped out when the view is validated.
 395	 */
 396	ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
 397					    vmw_view_dirtying(view));
 398	if (ret)
 399		return ret;
 400
 401	return vmw_execbuf_res_noctx_val_add(sw_context, view,
 402					     VMW_RES_DIRTY_NONE);
 403}
 404
 405/**
 406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
 407 * to to the validation list.
 408 *
 409 * @sw_context: The software context holding the validation list.
 410 * @view_type: The view type to look up.
 411 * @id: view id of the view.
 412 *
 413 * The view is represented by a view id and the DX context it's created on, or
 414 * scheduled for creation on. If there is no DX context set, the function will
 415 * return an -EINVAL error pointer.
 416 *
 417 * Returns: Unreferenced pointer to the resource on success, negative error
 418 * pointer on failure.
 419 */
 420static struct vmw_resource *
 421vmw_view_id_val_add(struct vmw_sw_context *sw_context,
 422		    enum vmw_view_type view_type, u32 id)
 423{
 424	struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
 425	struct vmw_resource *view;
 426	int ret;
 427
 428	if (!ctx_node)
 429		return ERR_PTR(-EINVAL);
 430
 431	view = vmw_view_lookup(sw_context->man, view_type, id);
 432	if (IS_ERR(view))
 433		return view;
 434
 435	ret = vmw_view_res_val_add(sw_context, view);
 436	if (ret)
 437		return ERR_PTR(ret);
 438
 439	return view;
 440}
 441
 442/**
 443 * vmw_resource_context_res_add - Put resources previously bound to a context on
 444 * the validation list
 445 *
 446 * @dev_priv: Pointer to a device private structure
 447 * @sw_context: Pointer to a software context used for this command submission
 448 * @ctx: Pointer to the context resource
 449 *
 450 * This function puts all resources that were previously bound to @ctx on the
 451 * resource validation list. This is part of the context state reemission
 452 */
 453static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 454					struct vmw_sw_context *sw_context,
 455					struct vmw_resource *ctx)
 456{
 457	struct list_head *binding_list;
 458	struct vmw_ctx_bindinfo *entry;
 459	int ret = 0;
 460	struct vmw_resource *res;
 461	u32 i;
 462
 463	/* Add all cotables to the validation list. */
 464	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 465		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 466			res = vmw_context_cotable(ctx, i);
 467			if (IS_ERR(res))
 468				continue;
 469
 470			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
 471							    VMW_RES_DIRTY_SET);
 472			if (unlikely(ret != 0))
 473				return ret;
 474		}
 475	}
 476
 477	/* Add all resources bound to the context to the validation list */
 478	mutex_lock(&dev_priv->binding_mutex);
 479	binding_list = vmw_context_binding_list(ctx);
 480
 481	list_for_each_entry(entry, binding_list, ctx_list) {
 482		if (vmw_res_type(entry->res) == vmw_res_view)
 483			ret = vmw_view_res_val_add(sw_context, entry->res);
 484		else
 485			ret = vmw_execbuf_res_noctx_val_add
 486				(sw_context, entry->res,
 487				 vmw_binding_dirtying(entry->bt));
 488		if (unlikely(ret != 0))
 489			break;
 490	}
 491
 492	if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
 493		struct vmw_buffer_object *dx_query_mob;
 494
 495		dx_query_mob = vmw_context_get_dx_query_mob(ctx);
 496		if (dx_query_mob)
 497			ret = vmw_validation_add_bo(sw_context->ctx,
 498						    dx_query_mob, true, false);
 499	}
 500
 501	mutex_unlock(&dev_priv->binding_mutex);
 502	return ret;
 503}
 504
 505/**
 506 * vmw_resource_relocation_add - Add a relocation to the relocation list
 507 *
 508 * @list: Pointer to head of relocation list.
 509 * @res: The resource.
 510 * @offset: Offset into the command buffer currently being parsed where the id
 511 * that needs fixup is located. Granularity is one byte.
 512 * @rel_type: Relocation type.
 513 */
 514static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
 515				       const struct vmw_resource *res,
 516				       unsigned long offset,
 517				       enum vmw_resource_relocation_type
 518				       rel_type)
 519{
 520	struct vmw_resource_relocation *rel;
 521
 522	rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
 523	if (unlikely(!rel)) {
 524		VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
 525		return -ENOMEM;
 526	}
 527
 528	rel->res = res;
 529	rel->offset = offset;
 530	rel->rel_type = rel_type;
 531	list_add_tail(&rel->head, &sw_context->res_relocations);
 532
 533	return 0;
 534}
 535
 536/**
 537 * vmw_resource_relocations_free - Free all relocations on a list
 538 *
 539 * @list: Pointer to the head of the relocation list
 540 */
 541static void vmw_resource_relocations_free(struct list_head *list)
 542{
 543	/* Memory is validation context memory, so no need to free it */
 544	INIT_LIST_HEAD(list);
 
 
 
 
 545}
 546
 547/**
 548 * vmw_resource_relocations_apply - Apply all relocations on a list
 549 *
 550 * @cb: Pointer to the start of the command buffer bein patch. This need not be
 551 * the same buffer as the one being parsed when the relocation list was built,
 552 * but the contents must be the same modulo the resource ids.
 
 553 * @list: Pointer to the head of the relocation list.
 554 */
 555static void vmw_resource_relocations_apply(uint32_t *cb,
 556					   struct list_head *list)
 557{
 558	struct vmw_resource_relocation *rel;
 559
 560	/* Validate the struct vmw_resource_relocation member size */
 561	BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
 562	BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
 563
 564	list_for_each_entry(rel, list, head) {
 565		u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
 566		switch (rel->rel_type) {
 567		case vmw_res_rel_normal:
 568			*addr = rel->res->id;
 569			break;
 570		case vmw_res_rel_nop:
 571			*addr = SVGA_3D_CMD_NOP;
 572			break;
 573		default:
 574			if (rel->res->id == -1)
 575				*addr = SVGA_3D_CMD_NOP;
 576			break;
 577		}
 578	}
 579}
 580
 581static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 582			   struct vmw_sw_context *sw_context,
 583			   SVGA3dCmdHeader *header)
 584{
 585	return -EINVAL;
 586}
 587
 588static int vmw_cmd_ok(struct vmw_private *dev_priv,
 589		      struct vmw_sw_context *sw_context,
 590		      SVGA3dCmdHeader *header)
 591{
 592	return 0;
 593}
 594
 595/**
 596 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
 597 * list.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 598 *
 599 * @sw_context: Pointer to the software context.
 600 *
 601 * Note that since vmware's command submission currently is protected by the
 602 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
 603 * only a single thread at once will attempt this.
 604 */
 605static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 606{
 
 607	int ret;
 608
 609	ret = vmw_validation_res_reserve(sw_context->ctx, true);
 610	if (ret)
 611		return ret;
 
 
 
 
 
 
 612
 613	if (sw_context->dx_query_mob) {
 614		struct vmw_buffer_object *expected_dx_query_mob;
 
 615
 616		expected_dx_query_mob =
 617			vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
 618		if (expected_dx_query_mob &&
 619		    expected_dx_query_mob != sw_context->dx_query_mob) {
 620			ret = -EINVAL;
 621		}
 622	}
 
 
 623
 624	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625}
 626
 627/**
 628 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
 629 * resource validate list unless it's already there.
 630 *
 631 * @dev_priv: Pointer to a device private structure.
 632 * @sw_context: Pointer to the software context.
 633 * @res_type: Resource type.
 634 * @dirty: Whether to change dirty status.
 635 * @converter: User-space visisble type specific information.
 636 * @id_loc: Pointer to the location in the command buffer currently being parsed
 637 * from where the user-space resource id handle is located.
 638 * @p_val: Pointer to pointer to resource validalidation node. Populated on
 639 * exit.
 
 640 */
 641static int
 642vmw_cmd_res_check(struct vmw_private *dev_priv,
 643		  struct vmw_sw_context *sw_context,
 644		  enum vmw_res_type res_type,
 645		  u32 dirty,
 646		  const struct vmw_user_resource_conv *converter,
 647		  uint32_t *id_loc,
 648		  struct vmw_resource **p_res)
 649{
 650	struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
 
 651	struct vmw_resource *res;
 
 652	int ret;
 653
 654	if (p_res)
 655		*p_res = NULL;
 656
 657	if (*id_loc == SVGA3D_INVALID_ID) {
 658		if (res_type == vmw_res_context) {
 659			VMW_DEBUG_USER("Illegal context invalid id.\n");
 660			return -EINVAL;
 661		}
 662		return 0;
 663	}
 664
 665	if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
 666		res = rcache->res;
 667		if (dirty)
 668			vmw_validation_res_set_dirty(sw_context->ctx,
 669						     rcache->private, dirty);
 670	} else {
 671		unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
 672
 673		ret = vmw_validation_preload_res(sw_context->ctx, size);
 674		if (ret)
 675			return ret;
 676
 677		res = vmw_user_resource_noref_lookup_handle
 678			(dev_priv, sw_context->fp->tfile, *id_loc, converter);
 679		if (IS_ERR(res)) {
 680			VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
 681				       (unsigned int) *id_loc);
 682			return PTR_ERR(res);
 683		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 684
 685		ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
 686		if (unlikely(ret != 0))
 687			return ret;
 688
 689		if (rcache->valid && rcache->res == res) {
 690			rcache->valid_handle = true;
 691			rcache->handle = *id_loc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692		}
 
 693	}
 694
 695	ret = vmw_resource_relocation_add(sw_context, res,
 696					  vmw_ptr_diff(sw_context->buf_start,
 697						       id_loc),
 698					  vmw_res_rel_normal);
 699	if (p_res)
 700		*p_res = res;
 701
 702	return 0;
 
 
 
 
 
 
 703}
 704
 705/**
 706 * vmw_rebind_dx_query - Rebind DX query associated with the context
 
 707 *
 708 * @ctx_res: context the query belongs to
 709 *
 710 * This function assumes binding_mutex is held.
 
 
 
 
 
 711 */
 712static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
 
 
 
 
 
 
 713{
 714	struct vmw_private *dev_priv = ctx_res->dev_priv;
 715	struct vmw_buffer_object *dx_query_mob;
 716	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
 717
 718	dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
 719
 720	if (!dx_query_mob || dx_query_mob->dx_query_ctx)
 721		return 0;
 722
 723	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
 724	if (cmd == NULL)
 725		return -ENOMEM;
 726
 727	cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
 728	cmd->header.size = sizeof(cmd->body);
 729	cmd->body.cid = ctx_res->id;
 730	cmd->body.mobid = dx_query_mob->base.mem.start;
 731	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 732
 733	vmw_context_bind_dx_query(ctx_res, dx_query_mob);
 734
 735	return 0;
 736}
 737
 738/**
 739 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
 740 * contexts.
 741 *
 742 * @sw_context: Pointer to the software context.
 743 *
 744 * Rebind context binding points that have been scrubbed because of eviction.
 745 */
 746static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 747{
 748	struct vmw_ctx_validation_info *val;
 749	int ret;
 750
 751	list_for_each_entry(val, &sw_context->ctx_list, head) {
 752		ret = vmw_binding_rebind_all(val->cur);
 
 
 
 753		if (unlikely(ret != 0)) {
 754			if (ret != -ERESTARTSYS)
 755				VMW_DEBUG_USER("Failed to rebind context.\n");
 756			return ret;
 757		}
 758
 759		ret = vmw_rebind_all_dx_query(val->ctx);
 760		if (ret != 0) {
 761			VMW_DEBUG_USER("Failed to rebind queries.\n");
 762			return ret;
 763		}
 764	}
 765
 766	return 0;
 767}
 768
 769/**
 770 * vmw_view_bindings_add - Add an array of view bindings to a context binding
 771 * state tracker.
 772 *
 773 * @sw_context: The execbuf state used for this command.
 774 * @view_type: View type for the bindings.
 775 * @binding_type: Binding type for the bindings.
 776 * @shader_slot: The shader slot to user for the bindings.
 777 * @view_ids: Array of view ids to be bound.
 778 * @num_views: Number of view ids in @view_ids.
 779 * @first_slot: The binding slot to be used for the first view id in @view_ids.
 780 */
 781static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
 782				 enum vmw_view_type view_type,
 783				 enum vmw_ctx_binding_type binding_type,
 784				 uint32 shader_slot,
 785				 uint32 view_ids[], u32 num_views,
 786				 u32 first_slot)
 787{
 788	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
 789	u32 i;
 790
 791	if (!ctx_node)
 792		return -EINVAL;
 793
 794	for (i = 0; i < num_views; ++i) {
 795		struct vmw_ctx_bindinfo_view binding;
 796		struct vmw_resource *view = NULL;
 797
 798		if (view_ids[i] != SVGA3D_INVALID_ID) {
 799			view = vmw_view_id_val_add(sw_context, view_type,
 800						   view_ids[i]);
 801			if (IS_ERR(view)) {
 802				VMW_DEBUG_USER("View not found.\n");
 803				return PTR_ERR(view);
 804			}
 805		}
 806		binding.bi.ctx = ctx_node->ctx;
 807		binding.bi.res = view;
 808		binding.bi.bt = binding_type;
 809		binding.shader_slot = shader_slot;
 810		binding.slot = first_slot + i;
 811		vmw_binding_add(ctx_node->staged, &binding.bi,
 812				shader_slot, binding.slot);
 813	}
 814
 815	return 0;
 816}
 817
 818/**
 819 * vmw_cmd_cid_check - Check a command header for valid context information.
 820 *
 821 * @dev_priv: Pointer to a device private structure.
 822 * @sw_context: Pointer to the software context.
 823 * @header: A command header with an embedded user-space context handle.
 824 *
 825 * Convenience function: Call vmw_cmd_res_check with the user-space context
 826 * handle embedded in @header.
 827 */
 828static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 829			     struct vmw_sw_context *sw_context,
 830			     SVGA3dCmdHeader *header)
 831{
 832	VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
 833		container_of(header, typeof(*cmd), header);
 
 
 834
 
 835	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 836				 VMW_RES_DIRTY_SET, user_context_converter,
 837				 &cmd->body, NULL);
 838}
 839
 840/**
 841 * vmw_execbuf_info_from_res - Get the private validation metadata for a
 842 * recently validated resource
 843 *
 844 * @sw_context: Pointer to the command submission context
 845 * @res: The resource
 846 *
 847 * The resource pointed to by @res needs to be present in the command submission
 848 * context's resource cache and hence the last resource of that type to be
 849 * processed by the validation code.
 850 *
 851 * Return: a pointer to the private metadata of the resource, or NULL if it
 852 * wasn't found
 853 */
 854static struct vmw_ctx_validation_info *
 855vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
 856			  struct vmw_resource *res)
 857{
 858	struct vmw_res_cache_entry *rcache =
 859		&sw_context->res_cache[vmw_res_type(res)];
 860
 861	if (rcache->valid && rcache->res == res)
 862		return rcache->private;
 863
 864	WARN_ON_ONCE(true);
 865	return NULL;
 866}
 867
 868static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 869					   struct vmw_sw_context *sw_context,
 870					   SVGA3dCmdHeader *header)
 871{
 872	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
 873	struct vmw_resource *ctx;
 874	struct vmw_resource *res;
 
 
 
 875	int ret;
 876
 877	cmd = container_of(header, typeof(*cmd), header);
 878
 879	if (cmd->body.type >= SVGA3D_RT_MAX) {
 880		VMW_DEBUG_USER("Illegal render target type %u.\n",
 881			       (unsigned int) cmd->body.type);
 882		return -EINVAL;
 883	}
 884
 885	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 886				VMW_RES_DIRTY_SET, user_context_converter,
 887				&cmd->body.cid, &ctx);
 888	if (unlikely(ret != 0))
 889		return ret;
 890
 891	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 892				VMW_RES_DIRTY_SET, user_surface_converter,
 893				&cmd->body.target.sid, &res);
 894	if (unlikely(ret))
 895		return ret;
 896
 897	if (dev_priv->has_mob) {
 898		struct vmw_ctx_bindinfo_view binding;
 899		struct vmw_ctx_validation_info *node;
 900
 901		node = vmw_execbuf_info_from_res(sw_context, ctx);
 902		if (!node)
 903			return -EINVAL;
 904
 905		binding.bi.ctx = ctx;
 906		binding.bi.res = res;
 907		binding.bi.bt = vmw_ctx_binding_rt;
 908		binding.slot = cmd->body.type;
 909		vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
 910	}
 911
 912	return 0;
 913}
 914
 915static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 916				      struct vmw_sw_context *sw_context,
 917				      SVGA3dCmdHeader *header)
 918{
 919	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
 
 
 
 920	int ret;
 921
 922	cmd = container_of(header, typeof(*cmd), header);
 923
 924	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 925				VMW_RES_DIRTY_NONE, user_surface_converter,
 926				&cmd->body.src.sid, NULL);
 927	if (ret)
 928		return ret;
 929
 930	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 931				 VMW_RES_DIRTY_SET, user_surface_converter,
 932				 &cmd->body.dest.sid, NULL);
 933}
 934
 935static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
 936				     struct vmw_sw_context *sw_context,
 937				     SVGA3dCmdHeader *header)
 938{
 939	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
 940	int ret;
 941
 942	cmd = container_of(header, typeof(*cmd), header);
 943	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 944				VMW_RES_DIRTY_NONE, user_surface_converter,
 945				&cmd->body.src, NULL);
 946	if (ret != 0)
 947		return ret;
 948
 949	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 950				 VMW_RES_DIRTY_SET, user_surface_converter,
 951				 &cmd->body.dest, NULL);
 952}
 953
 954static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
 955				   struct vmw_sw_context *sw_context,
 956				   SVGA3dCmdHeader *header)
 957{
 958	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
 959	int ret;
 960
 961	cmd = container_of(header, typeof(*cmd), header);
 962	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 963				VMW_RES_DIRTY_NONE, user_surface_converter,
 964				&cmd->body.srcSid, NULL);
 965	if (ret != 0)
 966		return ret;
 967
 968	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 969				 VMW_RES_DIRTY_SET, user_surface_converter,
 970				 &cmd->body.dstSid, NULL);
 971}
 972
 973static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 974				     struct vmw_sw_context *sw_context,
 975				     SVGA3dCmdHeader *header)
 976{
 977	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
 
 
 
 978	int ret;
 979
 980	cmd = container_of(header, typeof(*cmd), header);
 981	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 982				VMW_RES_DIRTY_NONE, user_surface_converter,
 983				&cmd->body.src.sid, NULL);
 984	if (unlikely(ret != 0))
 985		return ret;
 986
 987	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 988				 VMW_RES_DIRTY_SET, user_surface_converter,
 989				 &cmd->body.dest.sid, NULL);
 990}
 991
 992static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 993					 struct vmw_sw_context *sw_context,
 994					 SVGA3dCmdHeader *header)
 995{
 996	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
 997		container_of(header, typeof(*cmd), header);
 
 
 
 
 998
 999	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1000				 VMW_RES_DIRTY_NONE, user_surface_converter,
1001				 &cmd->body.srcImage.sid, NULL);
1002}
1003
1004static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1005				 struct vmw_sw_context *sw_context,
1006				 SVGA3dCmdHeader *header)
1007{
1008	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1009		container_of(header, typeof(*cmd), header);
 
 
 
 
 
1010
1011	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1012				 VMW_RES_DIRTY_NONE, user_surface_converter,
1013				 &cmd->body.sid, NULL);
1014}
1015
1016/**
1017 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1018 *
1019 * @dev_priv: The device private structure.
1020 * @new_query_bo: The new buffer holding query results.
1021 * @sw_context: The software context used for this command submission.
1022 *
1023 * This function checks whether @new_query_bo is suitable for holding query
1024 * results, and if another buffer currently is pinned for query results. If so,
1025 * the function prepares the state of @sw_context for switching pinned buffers
1026 * after successful submission of the current command batch.
 
1027 */
1028static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1029				       struct vmw_buffer_object *new_query_bo,
1030				       struct vmw_sw_context *sw_context)
1031{
1032	struct vmw_res_cache_entry *ctx_entry =
1033		&sw_context->res_cache[vmw_res_context];
1034	int ret;
1035
1036	BUG_ON(!ctx_entry->valid);
1037	sw_context->last_query_ctx = ctx_entry->res;
1038
1039	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1040
1041		if (unlikely(new_query_bo->base.num_pages > 4)) {
1042			VMW_DEBUG_USER("Query buffer too large.\n");
1043			return -EINVAL;
1044		}
1045
1046		if (unlikely(sw_context->cur_query_bo != NULL)) {
1047			sw_context->needs_post_query_barrier = true;
1048			ret = vmw_validation_add_bo(sw_context->ctx,
1049						    sw_context->cur_query_bo,
1050						    dev_priv->has_mob, false);
1051			if (unlikely(ret != 0))
1052				return ret;
1053		}
1054		sw_context->cur_query_bo = new_query_bo;
1055
1056		ret = vmw_validation_add_bo(sw_context->ctx,
1057					    dev_priv->dummy_query_bo,
1058					    dev_priv->has_mob, false);
1059		if (unlikely(ret != 0))
1060			return ret;
 
1061	}
1062
1063	return 0;
1064}
1065
 
1066/**
1067 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1068 *
1069 * @dev_priv: The device private structure.
1070 * @sw_context: The software context used for this command submission batch.
1071 *
1072 * This function will check if we're switching query buffers, and will then,
1073 * issue a dummy occlusion query wait used as a query barrier. When the fence
1074 * object following that query wait has signaled, we are sure that all preceding
1075 * queries have finished, and the old query buffer can be unpinned. However,
1076 * since both the new query buffer and the old one are fenced with that fence,
1077 * we can do an asynchronus unpin now, and be sure that the old query buffer
1078 * won't be moved until the fence has signaled.
1079 *
1080 * As mentioned above, both the new - and old query buffers need to be fenced
1081 * using a sequence emitted *after* calling this function.
1082 */
1083static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1084				     struct vmw_sw_context *sw_context)
1085{
1086	/*
1087	 * The validate list should still hold references to all
1088	 * contexts here.
1089	 */
 
1090	if (sw_context->needs_post_query_barrier) {
1091		struct vmw_res_cache_entry *ctx_entry =
1092			&sw_context->res_cache[vmw_res_context];
1093		struct vmw_resource *ctx;
1094		int ret;
1095
1096		BUG_ON(!ctx_entry->valid);
1097		ctx = ctx_entry->res;
1098
1099		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1100
1101		if (unlikely(ret != 0))
1102			VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1103	}
1104
1105	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1106		if (dev_priv->pinned_bo) {
1107			vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1108			vmw_bo_unreference(&dev_priv->pinned_bo);
1109		}
1110
1111		if (!sw_context->needs_post_query_barrier) {
1112			vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1113
1114			/*
1115			 * We pin also the dummy_query_bo buffer so that we
1116			 * don't need to validate it when emitting dummy queries
1117			 * in context destroy paths.
1118			 */
1119			if (!dev_priv->dummy_query_bo_pinned) {
1120				vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1121						    true);
1122				dev_priv->dummy_query_bo_pinned = true;
1123			}
1124
1125			BUG_ON(sw_context->last_query_ctx == NULL);
1126			dev_priv->query_cid = sw_context->last_query_ctx->id;
1127			dev_priv->query_cid_valid = true;
1128			dev_priv->pinned_bo =
1129				vmw_bo_reference(sw_context->cur_query_bo);
1130		}
1131	}
1132}
1133
1134/**
1135 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1136 * to a MOB id.
1137 *
1138 * @dev_priv: Pointer to a device private structure.
1139 * @sw_context: The software context used for this command batch validation.
1140 * @id: Pointer to the user-space handle to be translated.
1141 * @vmw_bo_p: Points to a location that, on successful return will carry a
1142 * non-reference-counted pointer to the buffer object identified by the
1143 * user-space handle in @id.
1144 *
1145 * This function saves information needed to translate a user-space buffer
1146 * handle to a MOB id. The translation does not take place immediately, but
1147 * during a call to vmw_apply_relocations().
1148 *
1149 * This function builds a relocation list and a list of buffers to validate. The
1150 * former needs to be freed using either vmw_apply_relocations() or
1151 * vmw_free_relocations(). The latter needs to be freed using
1152 * vmw_clear_validations.
1153 */
1154static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1155				 struct vmw_sw_context *sw_context,
1156				 SVGAMobId *id,
1157				 struct vmw_buffer_object **vmw_bo_p)
1158{
1159	struct vmw_buffer_object *vmw_bo;
 
1160	uint32_t handle = *id;
1161	struct vmw_relocation *reloc;
1162	int ret;
1163
1164	vmw_validation_preload_bo(sw_context->ctx);
1165	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1166	if (IS_ERR(vmw_bo)) {
1167		VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1168		return PTR_ERR(vmw_bo);
1169	}
 
1170
1171	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1172	vmw_user_bo_noref_release();
1173	if (unlikely(ret != 0))
1174		return ret;
1175
1176	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1177	if (!reloc)
1178		return -ENOMEM;
1179
 
1180	reloc->mob_loc = id;
1181	reloc->vbo = vmw_bo;
1182
1183	*vmw_bo_p = vmw_bo;
1184	list_add_tail(&reloc->head, &sw_context->bo_relocations);
 
1185
 
1186	return 0;
 
 
 
 
 
1187}
1188
1189/**
1190 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1191 * to a valid SVGAGuestPtr
1192 *
1193 * @dev_priv: Pointer to a device private structure.
1194 * @sw_context: The software context used for this command batch validation.
1195 * @ptr: Pointer to the user-space handle to be translated.
1196 * @vmw_bo_p: Points to a location that, on successful return will carry a
1197 * non-reference-counted pointer to the DMA buffer identified by the user-space
1198 * handle in @id.
1199 *
1200 * This function saves information needed to translate a user-space buffer
1201 * handle to a valid SVGAGuestPtr. The translation does not take place
1202 * immediately, but during a call to vmw_apply_relocations().
1203 *
1204 * This function builds a relocation list and a list of buffers to validate.
1205 * The former needs to be freed using either vmw_apply_relocations() or
1206 * vmw_free_relocations(). The latter needs to be freed using
1207 * vmw_clear_validations.
1208 */
1209static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1210				   struct vmw_sw_context *sw_context,
1211				   SVGAGuestPtr *ptr,
1212				   struct vmw_buffer_object **vmw_bo_p)
1213{
1214	struct vmw_buffer_object *vmw_bo;
 
1215	uint32_t handle = ptr->gmrId;
1216	struct vmw_relocation *reloc;
1217	int ret;
1218
1219	vmw_validation_preload_bo(sw_context->ctx);
1220	vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1221	if (IS_ERR(vmw_bo)) {
1222		VMW_DEBUG_USER("Could not find or use GMR region.\n");
1223		return PTR_ERR(vmw_bo);
1224	}
 
1225
1226	ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1227	vmw_user_bo_noref_release();
1228	if (unlikely(ret != 0))
1229		return ret;
1230
1231	reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1232	if (!reloc)
1233		return -ENOMEM;
1234
 
1235	reloc->location = ptr;
1236	reloc->vbo = vmw_bo;
1237	*vmw_bo_p = vmw_bo;
1238	list_add_tail(&reloc->head, &sw_context->bo_relocations);
1239
1240	return 0;
1241}
1242
1243/**
1244 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1245 *
1246 * @dev_priv: Pointer to a device private struct.
1247 * @sw_context: The software context used for this command submission.
1248 * @header: Pointer to the command header in the command stream.
1249 *
1250 * This function adds the new query into the query COTABLE
1251 */
1252static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1253				   struct vmw_sw_context *sw_context,
1254				   SVGA3dCmdHeader *header)
1255{
1256	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1257	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1258	struct vmw_resource *cotable_res;
1259	int ret;
1260
1261	if (!ctx_node)
1262		return -EINVAL;
1263
1264	cmd = container_of(header, typeof(*cmd), header);
1265
1266	if (cmd->body.type <  SVGA3D_QUERYTYPE_MIN ||
1267	    cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1268		return -EINVAL;
1269
1270	cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1271	ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1272
 
 
 
1273	return ret;
1274}
1275
1276/**
1277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1278 *
1279 * @dev_priv: Pointer to a device private struct.
1280 * @sw_context: The software context used for this command submission.
1281 * @header: Pointer to the command header in the command stream.
1282 *
1283 * The query bind operation will eventually associate the query ID with its
1284 * backing MOB.  In this function, we take the user mode MOB ID and use
1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1286 */
1287static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1288				 struct vmw_sw_context *sw_context,
1289				 SVGA3dCmdHeader *header)
1290{
1291	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1292	struct vmw_buffer_object *vmw_bo;
1293	int ret;
1294
1295	cmd = container_of(header, typeof(*cmd), header);
1296
1297	/*
1298	 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299	 * list so its kernel mode MOB ID can be filled in later
1300	 */
1301	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1302				    &vmw_bo);
1303
1304	if (ret != 0)
1305		return ret;
1306
1307	sw_context->dx_query_mob = vmw_bo;
1308	sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1309	return 0;
1310}
1311
1312/**
1313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1314 *
1315 * @dev_priv: Pointer to a device private struct.
1316 * @sw_context: The software context used for this command submission.
1317 * @header: Pointer to the command header in the command stream.
1318 */
1319static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1320				  struct vmw_sw_context *sw_context,
1321				  SVGA3dCmdHeader *header)
1322{
1323	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1324		container_of(header, typeof(*cmd), header);
 
 
 
 
 
1325
1326	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1327				 VMW_RES_DIRTY_SET, user_context_converter,
1328				 &cmd->body.cid, NULL);
1329}
1330
1331/**
1332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1333 *
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1337 */
1338static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1339			       struct vmw_sw_context *sw_context,
1340			       SVGA3dCmdHeader *header)
1341{
1342	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1343		container_of(header, typeof(*cmd), header);
 
 
 
 
 
1344
1345	if (unlikely(dev_priv->has_mob)) {
1346		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
 
 
 
1347
1348		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1349
1350		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1351		gb_cmd.header.size = cmd->header.size;
1352		gb_cmd.body.cid = cmd->body.cid;
1353		gb_cmd.body.type = cmd->body.type;
1354
1355		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1356		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1357	}
1358
1359	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1360				 VMW_RES_DIRTY_SET, user_context_converter,
1361				 &cmd->body.cid, NULL);
1362}
1363
1364/**
1365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1366 *
1367 * @dev_priv: Pointer to a device private struct.
1368 * @sw_context: The software context used for this command submission.
1369 * @header: Pointer to the command header in the command stream.
1370 */
1371static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1372				struct vmw_sw_context *sw_context,
1373				SVGA3dCmdHeader *header)
1374{
1375	struct vmw_buffer_object *vmw_bo;
1376	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
 
 
 
1377	int ret;
1378
1379	cmd = container_of(header, typeof(*cmd), header);
1380	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381	if (unlikely(ret != 0))
1382		return ret;
1383
1384	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
 
1385				    &vmw_bo);
1386	if (unlikely(ret != 0))
1387		return ret;
1388
1389	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1390
 
1391	return ret;
1392}
1393
1394/**
1395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1400 */
1401static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1402			     struct vmw_sw_context *sw_context,
1403			     SVGA3dCmdHeader *header)
1404{
1405	struct vmw_buffer_object *vmw_bo;
1406	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
 
 
 
1407	int ret;
1408
1409	cmd = container_of(header, typeof(*cmd), header);
1410	if (dev_priv->has_mob) {
1411		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
 
 
 
1412
1413		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1414
1415		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1416		gb_cmd.header.size = cmd->header.size;
1417		gb_cmd.body.cid = cmd->body.cid;
1418		gb_cmd.body.type = cmd->body.type;
1419		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1420		gb_cmd.body.offset = cmd->body.guestResult.offset;
1421
1422		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1423		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1424	}
1425
1426	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1427	if (unlikely(ret != 0))
1428		return ret;
1429
1430	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1431				      &cmd->body.guestResult, &vmw_bo);
 
1432	if (unlikely(ret != 0))
1433		return ret;
1434
1435	ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1436
 
1437	return ret;
1438}
1439
1440/**
1441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1442 *
1443 * @dev_priv: Pointer to a device private struct.
1444 * @sw_context: The software context used for this command submission.
1445 * @header: Pointer to the command header in the command stream.
1446 */
1447static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1448				 struct vmw_sw_context *sw_context,
1449				 SVGA3dCmdHeader *header)
1450{
1451	struct vmw_buffer_object *vmw_bo;
1452	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
 
 
 
1453	int ret;
1454
1455	cmd = container_of(header, typeof(*cmd), header);
1456	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1457	if (unlikely(ret != 0))
1458		return ret;
1459
1460	ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
 
1461				    &vmw_bo);
1462	if (unlikely(ret != 0))
1463		return ret;
1464
 
1465	return 0;
1466}
1467
1468/**
1469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1470 *
1471 * @dev_priv: Pointer to a device private struct.
1472 * @sw_context: The software context used for this command submission.
1473 * @header: Pointer to the command header in the command stream.
1474 */
1475static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1476			      struct vmw_sw_context *sw_context,
1477			      SVGA3dCmdHeader *header)
1478{
1479	struct vmw_buffer_object *vmw_bo;
1480	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
 
 
 
1481	int ret;
1482
1483	cmd = container_of(header, typeof(*cmd), header);
1484	if (dev_priv->has_mob) {
1485		VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
 
 
 
1486
1487		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1488
1489		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1490		gb_cmd.header.size = cmd->header.size;
1491		gb_cmd.body.cid = cmd->body.cid;
1492		gb_cmd.body.type = cmd->body.type;
1493		gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1494		gb_cmd.body.offset = cmd->body.guestResult.offset;
1495
1496		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1497		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1498	}
1499
1500	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1501	if (unlikely(ret != 0))
1502		return ret;
1503
1504	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1505				      &cmd->body.guestResult, &vmw_bo);
 
1506	if (unlikely(ret != 0))
1507		return ret;
1508
 
1509	return 0;
1510}
1511
1512static int vmw_cmd_dma(struct vmw_private *dev_priv,
1513		       struct vmw_sw_context *sw_context,
1514		       SVGA3dCmdHeader *header)
1515{
1516	struct vmw_buffer_object *vmw_bo = NULL;
1517	struct vmw_surface *srf = NULL;
1518	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
 
 
 
1519	int ret;
1520	SVGA3dCmdSurfaceDMASuffix *suffix;
1521	uint32_t bo_size;
1522	bool dirty;
1523
1524	cmd = container_of(header, typeof(*cmd), header);
1525	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1526					       header->size - sizeof(*suffix));
1527
1528	/* Make sure device and verifier stays in sync. */
1529	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1530		VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1531		return -EINVAL;
1532	}
1533
1534	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1535				      &cmd->body.guest.ptr, &vmw_bo);
 
1536	if (unlikely(ret != 0))
1537		return ret;
1538
1539	/* Make sure DMA doesn't cross BO boundaries. */
1540	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1541	if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1542		VMW_DEBUG_USER("Invalid DMA offset.\n");
1543		return -EINVAL;
1544	}
1545
1546	bo_size -= cmd->body.guest.ptr.offset;
1547	if (unlikely(suffix->maximumOffset > bo_size))
1548		suffix->maximumOffset = bo_size;
1549
1550	dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1551		VMW_RES_DIRTY_SET : 0;
1552	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1553				dirty, user_surface_converter,
1554				&cmd->body.host.sid, NULL);
1555	if (unlikely(ret != 0)) {
1556		if (unlikely(ret != -ERESTARTSYS))
1557			VMW_DEBUG_USER("could not find surface for DMA.\n");
1558		return ret;
1559	}
1560
1561	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1562
1563	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
 
1564
1565	return 0;
 
 
1566}
1567
1568static int vmw_cmd_draw(struct vmw_private *dev_priv,
1569			struct vmw_sw_context *sw_context,
1570			SVGA3dCmdHeader *header)
1571{
1572	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
 
 
 
1573	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1574		(unsigned long)header + sizeof(*cmd));
1575	SVGA3dPrimitiveRange *range;
1576	uint32_t i;
1577	uint32_t maxnum;
1578	int ret;
1579
1580	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1581	if (unlikely(ret != 0))
1582		return ret;
1583
1584	cmd = container_of(header, typeof(*cmd), header);
1585	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1586
1587	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1588		VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1589		return -EINVAL;
1590	}
1591
1592	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1593		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1594					VMW_RES_DIRTY_NONE,
1595					user_surface_converter,
1596					&decl->array.surfaceId, NULL);
1597		if (unlikely(ret != 0))
1598			return ret;
1599	}
1600
1601	maxnum = (header->size - sizeof(cmd->body) -
1602		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1603	if (unlikely(cmd->body.numRanges > maxnum)) {
1604		VMW_DEBUG_USER("Illegal number of index ranges.\n");
1605		return -EINVAL;
1606	}
1607
1608	range = (SVGA3dPrimitiveRange *) decl;
1609	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1610		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1611					VMW_RES_DIRTY_NONE,
1612					user_surface_converter,
1613					&range->indexArray.surfaceId, NULL);
1614		if (unlikely(ret != 0))
1615			return ret;
1616	}
1617	return 0;
1618}
1619
 
1620static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1621			     struct vmw_sw_context *sw_context,
1622			     SVGA3dCmdHeader *header)
1623{
1624	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
 
 
 
 
1625	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1626	  ((unsigned long) header + header->size + sizeof(header));
1627	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1628		((unsigned long) header + sizeof(*cmd));
1629	struct vmw_resource *ctx;
1630	struct vmw_resource *res;
1631	int ret;
1632
1633	cmd = container_of(header, typeof(*cmd), header);
 
1634
1635	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1636				VMW_RES_DIRTY_SET, user_context_converter,
1637				&cmd->body.cid, &ctx);
1638	if (unlikely(ret != 0))
1639		return ret;
1640
1641	for (; cur_state < last_state; ++cur_state) {
1642		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1643			continue;
1644
1645		if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1646			VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647				       (unsigned int) cur_state->stage);
1648			return -EINVAL;
1649		}
1650
1651		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1652					VMW_RES_DIRTY_NONE,
1653					user_surface_converter,
1654					&cur_state->value, &res);
1655		if (unlikely(ret != 0))
1656			return ret;
1657
1658		if (dev_priv->has_mob) {
1659			struct vmw_ctx_bindinfo_tex binding;
1660			struct vmw_ctx_validation_info *node;
1661
1662			node = vmw_execbuf_info_from_res(sw_context, ctx);
1663			if (!node)
1664				return -EINVAL;
1665
1666			binding.bi.ctx = ctx;
1667			binding.bi.res = res;
1668			binding.bi.bt = vmw_ctx_binding_tex;
1669			binding.texture_stage = cur_state->stage;
1670			vmw_binding_add(node->staged, &binding.bi, 0,
1671					binding.texture_stage);
1672		}
1673	}
1674
1675	return 0;
1676}
1677
1678static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1679				      struct vmw_sw_context *sw_context,
1680				      void *buf)
1681{
1682	struct vmw_buffer_object *vmw_bo;
 
1683
1684	struct {
1685		uint32_t header;
1686		SVGAFifoCmdDefineGMRFB body;
1687	} *cmd = buf;
1688
1689	return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1690				       &vmw_bo);
1691}
1692
1693/**
1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1695 * switching
1696 *
1697 * @dev_priv: Pointer to a device private struct.
1698 * @sw_context: The software context being used for this batch.
1699 * @val_node: The validation node representing the resource.
1700 * @buf_id: Pointer to the user-space backup buffer handle in the command
1701 * stream.
1702 * @backup_offset: Offset of backup into MOB.
1703 *
1704 * This function prepares for registering a switch of backup buffers in the
1705 * resource metadata just prior to unreserving. It's basically a wrapper around
1706 * vmw_cmd_res_switch_backup with a different interface.
1707 */
1708static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1709				     struct vmw_sw_context *sw_context,
1710				     struct vmw_resource *res, uint32_t *buf_id,
1711				     unsigned long backup_offset)
1712{
1713	struct vmw_buffer_object *vbo;
1714	void *info;
1715	int ret;
1716
1717	info = vmw_execbuf_info_from_res(sw_context, res);
1718	if (!info)
1719		return -EINVAL;
1720
1721	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1722	if (ret)
1723		return ret;
1724
1725	vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1726					 backup_offset);
1727	return 0;
1728}
1729
1730/**
1731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1732 *
1733 * @dev_priv: Pointer to a device private struct.
1734 * @sw_context: The software context being used for this batch.
1735 * @res_type: The resource type.
1736 * @converter: Information about user-space binding for this resource type.
1737 * @res_id: Pointer to the user-space resource handle in the command stream.
1738 * @buf_id: Pointer to the user-space backup buffer handle in the command
1739 * stream.
1740 * @backup_offset: Offset of backup into MOB.
1741 *
1742 * This function prepares for registering a switch of backup buffers in the
1743 * resource metadata just prior to unreserving. It's basically a wrapper around
1744 * vmw_cmd_res_switch_backup with a different interface.
1745 */
1746static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1747				 struct vmw_sw_context *sw_context,
1748				 enum vmw_res_type res_type,
1749				 const struct vmw_user_resource_conv
1750				 *converter, uint32_t *res_id, uint32_t *buf_id,
 
 
1751				 unsigned long backup_offset)
1752{
1753	struct vmw_resource *res;
1754	int ret;
 
 
1755
1756	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1757				VMW_RES_DIRTY_NONE, converter, res_id, &res);
1758	if (ret)
1759		return ret;
1760
1761	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1762					 backup_offset);
 
 
 
 
 
 
 
 
 
 
1763}
1764
1765/**
1766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
 
1767 *
1768 * @dev_priv: Pointer to a device private struct.
1769 * @sw_context: The software context being used for this batch.
1770 * @header: Pointer to the command header in the command stream.
1771 */
1772static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1773				   struct vmw_sw_context *sw_context,
1774				   SVGA3dCmdHeader *header)
1775{
1776	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1777		container_of(header, typeof(*cmd), header);
 
 
 
 
1778
1779	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1780				     user_surface_converter, &cmd->body.sid,
1781				     &cmd->body.mobid, 0);
 
1782}
1783
1784/**
1785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
 
1786 *
1787 * @dev_priv: Pointer to a device private struct.
1788 * @sw_context: The software context being used for this batch.
1789 * @header: Pointer to the command header in the command stream.
1790 */
1791static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1792				   struct vmw_sw_context *sw_context,
1793				   SVGA3dCmdHeader *header)
1794{
1795	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1796		container_of(header, typeof(*cmd), header);
 
 
 
 
1797
1798	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1799				 VMW_RES_DIRTY_NONE, user_surface_converter,
1800				 &cmd->body.image.sid, NULL);
1801}
1802
1803/**
1804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
 
1805 *
1806 * @dev_priv: Pointer to a device private struct.
1807 * @sw_context: The software context being used for this batch.
1808 * @header: Pointer to the command header in the command stream.
1809 */
1810static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1811				     struct vmw_sw_context *sw_context,
1812				     SVGA3dCmdHeader *header)
1813{
1814	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1815		container_of(header, typeof(*cmd), header);
 
 
 
 
1816
1817	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1819				 &cmd->body.sid, NULL);
1820}
1821
1822/**
1823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
 
1824 *
1825 * @dev_priv: Pointer to a device private struct.
1826 * @sw_context: The software context being used for this batch.
1827 * @header: Pointer to the command header in the command stream.
1828 */
1829static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1830				     struct vmw_sw_context *sw_context,
1831				     SVGA3dCmdHeader *header)
1832{
1833	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1834		container_of(header, typeof(*cmd), header);
 
 
 
 
1835
1836	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1837				 VMW_RES_DIRTY_NONE, user_surface_converter,
1838				 &cmd->body.image.sid, NULL);
1839}
1840
1841/**
1842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1843 * command
1844 *
1845 * @dev_priv: Pointer to a device private struct.
1846 * @sw_context: The software context being used for this batch.
1847 * @header: Pointer to the command header in the command stream.
1848 */
1849static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1850				       struct vmw_sw_context *sw_context,
1851				       SVGA3dCmdHeader *header)
1852{
1853	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1854		container_of(header, typeof(*cmd), header);
 
 
 
 
1855
1856	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1857				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1858				 &cmd->body.sid, NULL);
1859}
1860
1861/**
1862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1863 * command
1864 *
1865 * @dev_priv: Pointer to a device private struct.
1866 * @sw_context: The software context being used for this batch.
1867 * @header: Pointer to the command header in the command stream.
1868 */
1869static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1870				       struct vmw_sw_context *sw_context,
1871				       SVGA3dCmdHeader *header)
1872{
1873	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1874		container_of(header, typeof(*cmd), header);
 
 
 
 
1875
1876	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1877				 VMW_RES_DIRTY_NONE, user_surface_converter,
1878				 &cmd->body.image.sid, NULL);
1879}
1880
1881/**
1882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1883 * command
1884 *
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1888 */
1889static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1890					 struct vmw_sw_context *sw_context,
1891					 SVGA3dCmdHeader *header)
1892{
1893	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1894		container_of(header, typeof(*cmd), header);
 
 
 
 
1895
1896	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1897				 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1898				 &cmd->body.sid, NULL);
1899}
1900
 
1901/**
1902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
 
1903 *
1904 * @dev_priv: Pointer to a device private struct.
1905 * @sw_context: The software context being used for this batch.
1906 * @header: Pointer to the command header in the command stream.
1907 */
1908static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1909				 struct vmw_sw_context *sw_context,
1910				 SVGA3dCmdHeader *header)
1911{
1912	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
 
 
 
1913	int ret;
1914	size_t size;
1915	struct vmw_resource *ctx;
1916
1917	cmd = container_of(header, typeof(*cmd), header);
 
1918
1919	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1920				VMW_RES_DIRTY_SET, user_context_converter,
1921				&cmd->body.cid, &ctx);
1922	if (unlikely(ret != 0))
1923		return ret;
1924
1925	if (unlikely(!dev_priv->has_mob))
1926		return 0;
1927
1928	size = cmd->header.size - sizeof(cmd->body);
1929	ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1930				    cmd->body.shid, cmd + 1, cmd->body.type,
1931				    size, &sw_context->staged_cmd_res);
 
 
1932	if (unlikely(ret != 0))
1933		return ret;
1934
1935	return vmw_resource_relocation_add(sw_context, NULL,
1936					   vmw_ptr_diff(sw_context->buf_start,
1937							&cmd->header.id),
1938					   vmw_res_rel_nop);
 
1939}
1940
1941/**
1942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
 
1943 *
1944 * @dev_priv: Pointer to a device private struct.
1945 * @sw_context: The software context being used for this batch.
1946 * @header: Pointer to the command header in the command stream.
1947 */
1948static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1949				  struct vmw_sw_context *sw_context,
1950				  SVGA3dCmdHeader *header)
1951{
1952	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
 
 
 
1953	int ret;
1954	struct vmw_resource *ctx;
1955
1956	cmd = container_of(header, typeof(*cmd), header);
 
1957
1958	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1959				VMW_RES_DIRTY_SET, user_context_converter,
1960				&cmd->body.cid, &ctx);
1961	if (unlikely(ret != 0))
1962		return ret;
1963
1964	if (unlikely(!dev_priv->has_mob))
1965		return 0;
1966
1967	ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1968				cmd->body.type, &sw_context->staged_cmd_res);
 
 
1969	if (unlikely(ret != 0))
1970		return ret;
1971
1972	return vmw_resource_relocation_add(sw_context, NULL,
1973					   vmw_ptr_diff(sw_context->buf_start,
1974							&cmd->header.id),
1975					   vmw_res_rel_nop);
 
1976}
1977
1978/**
1979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
 
1980 *
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1984 */
1985static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1986			      struct vmw_sw_context *sw_context,
1987			      SVGA3dCmdHeader *header)
1988{
1989	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1990	struct vmw_ctx_bindinfo_shader binding;
1991	struct vmw_resource *ctx, *res = NULL;
1992	struct vmw_ctx_validation_info *ctx_info;
 
1993	int ret;
1994
1995	cmd = container_of(header, typeof(*cmd), header);
1996
1997	if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1998		VMW_DEBUG_USER("Illegal shader type %u.\n",
1999			       (unsigned int) cmd->body.type);
2000		return -EINVAL;
2001	}
2002
2003	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2004				VMW_RES_DIRTY_SET, user_context_converter,
2005				&cmd->body.cid, &ctx);
2006	if (unlikely(ret != 0))
2007		return ret;
2008
2009	if (!dev_priv->has_mob)
2010		return 0;
2011
2012	if (cmd->body.shid != SVGA3D_INVALID_ID) {
2013		/*
2014		 * This is the compat shader path - Per device guest-backed
2015		 * shaders, but user-space thinks it's per context host-
2016		 * backed shaders.
2017		 */
2018		res = vmw_shader_lookup(vmw_context_res_man(ctx),
2019					cmd->body.shid, cmd->body.type);
2020		if (!IS_ERR(res)) {
2021			ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2022							    VMW_RES_DIRTY_NONE);
2023			if (unlikely(ret != 0))
2024				return ret;
2025
2026			ret = vmw_resource_relocation_add
2027				(sw_context, res,
2028				 vmw_ptr_diff(sw_context->buf_start,
2029					      &cmd->body.shid),
2030				 vmw_res_rel_normal);
2031			if (unlikely(ret != 0))
2032				return ret;
2033		}
2034	}
2035
2036	if (IS_ERR_OR_NULL(res)) {
2037		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2038					VMW_RES_DIRTY_NONE,
2039					user_shader_converter, &cmd->body.shid,
2040					&res);
2041		if (unlikely(ret != 0))
2042			return ret;
2043	}
2044
2045	ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2046	if (!ctx_info)
2047		return -EINVAL;
2048
2049	binding.bi.ctx = ctx;
2050	binding.bi.res = res;
2051	binding.bi.bt = vmw_ctx_binding_shader;
2052	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2053	vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2054
2055	return 0;
2056}
2057
2058/**
2059 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
 
2060 *
2061 * @dev_priv: Pointer to a device private struct.
2062 * @sw_context: The software context being used for this batch.
2063 * @header: Pointer to the command header in the command stream.
2064 */
2065static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2066				    struct vmw_sw_context *sw_context,
2067				    SVGA3dCmdHeader *header)
2068{
2069	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
 
 
 
2070	int ret;
2071
2072	cmd = container_of(header, typeof(*cmd), header);
 
2073
2074	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2075				VMW_RES_DIRTY_SET, user_context_converter,
2076				&cmd->body.cid, NULL);
2077	if (unlikely(ret != 0))
2078		return ret;
2079
2080	if (dev_priv->has_mob)
2081		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2082
2083	return 0;
2084}
2085
2086/**
2087 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2088 *
2089 * @dev_priv: Pointer to a device private struct.
2090 * @sw_context: The software context being used for this batch.
2091 * @header: Pointer to the command header in the command stream.
2092 */
2093static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2094				  struct vmw_sw_context *sw_context,
2095				  SVGA3dCmdHeader *header)
2096{
2097	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2098		container_of(header, typeof(*cmd), header);
2099
2100	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2101				     user_shader_converter, &cmd->body.shid,
2102				     &cmd->body.mobid, cmd->body.offsetInBytes);
2103}
2104
2105/**
2106 * vmw_cmd_dx_set_single_constant_buffer - Validate
2107 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2108 *
2109 * @dev_priv: Pointer to a device private struct.
2110 * @sw_context: The software context being used for this batch.
2111 * @header: Pointer to the command header in the command stream.
2112 */
2113static int
2114vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2115				      struct vmw_sw_context *sw_context,
2116				      SVGA3dCmdHeader *header)
2117{
2118	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2119	struct vmw_resource *res = NULL;
2120	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2121	struct vmw_ctx_bindinfo_cb binding;
2122	int ret;
2123
2124	if (!ctx_node)
2125		return -EINVAL;
2126
2127	cmd = container_of(header, typeof(*cmd), header);
2128	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2129				VMW_RES_DIRTY_NONE, user_surface_converter,
2130				&cmd->body.sid, &res);
2131	if (unlikely(ret != 0))
2132		return ret;
2133
2134	binding.bi.ctx = ctx_node->ctx;
2135	binding.bi.res = res;
2136	binding.bi.bt = vmw_ctx_binding_cb;
2137	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2138	binding.offset = cmd->body.offsetInBytes;
2139	binding.size = cmd->body.sizeInBytes;
2140	binding.slot = cmd->body.slot;
2141
2142	if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2143	    binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2144		VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2145			       (unsigned int) cmd->body.type,
2146			       (unsigned int) binding.slot);
2147		return -EINVAL;
2148	}
2149
2150	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2151			binding.slot);
2152
2153	return 0;
2154}
2155
2156/**
2157 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2158 * command
2159 *
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2163 */
2164static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2165				     struct vmw_sw_context *sw_context,
2166				     SVGA3dCmdHeader *header)
2167{
2168	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2169		container_of(header, typeof(*cmd), header);
2170	u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2171		sizeof(SVGA3dShaderResourceViewId);
2172
2173	if ((u64) cmd->body.startView + (u64) num_sr_view >
2174	    (u64) SVGA3D_DX_MAX_SRVIEWS ||
2175	    cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2176		VMW_DEBUG_USER("Invalid shader binding.\n");
2177		return -EINVAL;
2178	}
2179
2180	return vmw_view_bindings_add(sw_context, vmw_view_sr,
2181				     vmw_ctx_binding_sr,
2182				     cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2183				     (void *) &cmd[1], num_sr_view,
2184				     cmd->body.startView);
2185}
2186
2187/**
2188 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2189 *
2190 * @dev_priv: Pointer to a device private struct.
2191 * @sw_context: The software context being used for this batch.
2192 * @header: Pointer to the command header in the command stream.
2193 */
2194static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2195				 struct vmw_sw_context *sw_context,
2196				 SVGA3dCmdHeader *header)
2197{
2198	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2199	struct vmw_resource *res = NULL;
2200	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2201	struct vmw_ctx_bindinfo_shader binding;
2202	int ret = 0;
2203
2204	if (!ctx_node)
2205		return -EINVAL;
2206
2207	cmd = container_of(header, typeof(*cmd), header);
2208
2209	if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2210	    cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2211		VMW_DEBUG_USER("Illegal shader type %u.\n",
2212			       (unsigned int) cmd->body.type);
2213		return -EINVAL;
2214	}
2215
2216	if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2217		res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2218		if (IS_ERR(res)) {
2219			VMW_DEBUG_USER("Could not find shader for binding.\n");
2220			return PTR_ERR(res);
2221		}
2222
2223		ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2224						    VMW_RES_DIRTY_NONE);
2225		if (ret)
2226			return ret;
2227	}
2228
2229	binding.bi.ctx = ctx_node->ctx;
2230	binding.bi.res = res;
2231	binding.bi.bt = vmw_ctx_binding_dx_shader;
2232	binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2233
2234	vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2235
2236	return 0;
2237}
2238
2239/**
2240 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2241 * command
2242 *
2243 * @dev_priv: Pointer to a device private struct.
2244 * @sw_context: The software context being used for this batch.
2245 * @header: Pointer to the command header in the command stream.
2246 */
2247static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2248					 struct vmw_sw_context *sw_context,
2249					 SVGA3dCmdHeader *header)
2250{
2251	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2252	struct vmw_ctx_bindinfo_vb binding;
2253	struct vmw_resource *res;
2254	struct {
2255		SVGA3dCmdHeader header;
2256		SVGA3dCmdDXSetVertexBuffers body;
2257		SVGA3dVertexBuffer buf[];
2258	} *cmd;
2259	int i, ret, num;
2260
2261	if (!ctx_node)
2262		return -EINVAL;
2263
2264	cmd = container_of(header, typeof(*cmd), header);
2265	num = (cmd->header.size - sizeof(cmd->body)) /
2266		sizeof(SVGA3dVertexBuffer);
2267	if ((u64)num + (u64)cmd->body.startBuffer >
2268	    (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2269		VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2270		return -EINVAL;
2271	}
2272
2273	for (i = 0; i < num; i++) {
2274		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2275					VMW_RES_DIRTY_NONE,
2276					user_surface_converter,
2277					&cmd->buf[i].sid, &res);
2278		if (unlikely(ret != 0))
2279			return ret;
2280
2281		binding.bi.ctx = ctx_node->ctx;
2282		binding.bi.bt = vmw_ctx_binding_vb;
2283		binding.bi.res = res;
2284		binding.offset = cmd->buf[i].offset;
2285		binding.stride = cmd->buf[i].stride;
2286		binding.slot = i + cmd->body.startBuffer;
2287
2288		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2289	}
2290
2291	return 0;
2292}
2293
2294/**
2295 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2296 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2297 *
2298 * @dev_priv: Pointer to a device private struct.
2299 * @sw_context: The software context being used for this batch.
2300 * @header: Pointer to the command header in the command stream.
2301 */
2302static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2303				       struct vmw_sw_context *sw_context,
2304				       SVGA3dCmdHeader *header)
2305{
2306	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2307	struct vmw_ctx_bindinfo_ib binding;
2308	struct vmw_resource *res;
2309	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2310	int ret;
2311
2312	if (!ctx_node)
2313		return -EINVAL;
2314
2315	cmd = container_of(header, typeof(*cmd), header);
2316	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2317				VMW_RES_DIRTY_NONE, user_surface_converter,
2318				&cmd->body.sid, &res);
2319	if (unlikely(ret != 0))
2320		return ret;
2321
2322	binding.bi.ctx = ctx_node->ctx;
2323	binding.bi.res = res;
2324	binding.bi.bt = vmw_ctx_binding_ib;
2325	binding.offset = cmd->body.offset;
2326	binding.format = cmd->body.format;
2327
2328	vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2329
2330	return 0;
2331}
2332
2333/**
2334 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2335 * command
2336 *
2337 * @dev_priv: Pointer to a device private struct.
2338 * @sw_context: The software context being used for this batch.
2339 * @header: Pointer to the command header in the command stream.
2340 */
2341static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2342					struct vmw_sw_context *sw_context,
2343					SVGA3dCmdHeader *header)
2344{
2345	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2346		container_of(header, typeof(*cmd), header);
2347	u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2348		sizeof(SVGA3dRenderTargetViewId);
2349	int ret;
2350
2351	if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2352		VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2353		return -EINVAL;
2354	}
2355
2356	ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2357				    0, &cmd->body.depthStencilViewId, 1, 0);
2358	if (ret)
2359		return ret;
2360
2361	return vmw_view_bindings_add(sw_context, vmw_view_rt,
2362				     vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2363				     num_rt_view, 0);
2364}
2365
2366/**
2367 * vmw_cmd_dx_clear_rendertarget_view - Validate
2368 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2369 *
2370 * @dev_priv: Pointer to a device private struct.
2371 * @sw_context: The software context being used for this batch.
2372 * @header: Pointer to the command header in the command stream.
2373 */
2374static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2375					      struct vmw_sw_context *sw_context,
2376					      SVGA3dCmdHeader *header)
2377{
2378	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2379		container_of(header, typeof(*cmd), header);
2380
2381	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2382					   cmd->body.renderTargetViewId));
2383}
2384
2385/**
2386 * vmw_cmd_dx_clear_rendertarget_view - Validate
2387 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2388 *
2389 * @dev_priv: Pointer to a device private struct.
2390 * @sw_context: The software context being used for this batch.
2391 * @header: Pointer to the command header in the command stream.
2392 */
2393static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2394					      struct vmw_sw_context *sw_context,
2395					      SVGA3dCmdHeader *header)
2396{
2397	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2398		container_of(header, typeof(*cmd), header);
2399
2400	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2401					   cmd->body.depthStencilViewId));
2402}
2403
2404static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2405				  struct vmw_sw_context *sw_context,
2406				  SVGA3dCmdHeader *header)
2407{
2408	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2409	struct vmw_resource *srf;
2410	struct vmw_resource *res;
2411	enum vmw_view_type view_type;
2412	int ret;
2413	/*
2414	 * This is based on the fact that all affected define commands have the
2415	 * same initial command body layout.
2416	 */
2417	struct {
2418		SVGA3dCmdHeader header;
2419		uint32 defined_id;
2420		uint32 sid;
2421	} *cmd;
2422
2423	if (!ctx_node)
2424		return -EINVAL;
2425
2426	view_type = vmw_view_cmd_to_type(header->id);
2427	if (view_type == vmw_view_max)
2428		return -EINVAL;
2429
2430	cmd = container_of(header, typeof(*cmd), header);
2431	if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2432		VMW_DEBUG_USER("Invalid surface id.\n");
2433		return -EINVAL;
2434	}
2435	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2436				VMW_RES_DIRTY_NONE, user_surface_converter,
2437				&cmd->sid, &srf);
2438	if (unlikely(ret != 0))
2439		return ret;
2440
2441	res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2442	ret = vmw_cotable_notify(res, cmd->defined_id);
2443	if (unlikely(ret != 0))
2444		return ret;
2445
2446	return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2447			    cmd->defined_id, header,
2448			    header->size + sizeof(*header),
2449			    &sw_context->staged_cmd_res);
2450}
2451
2452/**
2453 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2454 *
2455 * @dev_priv: Pointer to a device private struct.
2456 * @sw_context: The software context being used for this batch.
2457 * @header: Pointer to the command header in the command stream.
2458 */
2459static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2460				     struct vmw_sw_context *sw_context,
2461				     SVGA3dCmdHeader *header)
2462{
2463	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2464	struct vmw_ctx_bindinfo_so binding;
2465	struct vmw_resource *res;
2466	struct {
2467		SVGA3dCmdHeader header;
2468		SVGA3dCmdDXSetSOTargets body;
2469		SVGA3dSoTarget targets[];
2470	} *cmd;
2471	int i, ret, num;
2472
2473	if (!ctx_node)
2474		return -EINVAL;
2475
2476	cmd = container_of(header, typeof(*cmd), header);
2477	num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2478
2479	if (num > SVGA3D_DX_MAX_SOTARGETS) {
2480		VMW_DEBUG_USER("Invalid DX SO binding.\n");
2481		return -EINVAL;
2482	}
2483
2484	for (i = 0; i < num; i++) {
2485		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2486					VMW_RES_DIRTY_SET,
2487					user_surface_converter,
2488					&cmd->targets[i].sid, &res);
2489		if (unlikely(ret != 0))
2490			return ret;
2491
2492		binding.bi.ctx = ctx_node->ctx;
2493		binding.bi.res = res;
2494		binding.bi.bt = vmw_ctx_binding_so,
2495		binding.offset = cmd->targets[i].offset;
2496		binding.size = cmd->targets[i].sizeInBytes;
2497		binding.slot = i;
2498
2499		vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2500	}
2501
2502	return 0;
2503}
2504
2505static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2506				struct vmw_sw_context *sw_context,
2507				SVGA3dCmdHeader *header)
2508{
2509	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2510	struct vmw_resource *res;
2511	/*
2512	 * This is based on the fact that all affected define commands have
2513	 * the same initial command body layout.
2514	 */
2515	struct {
2516		SVGA3dCmdHeader header;
2517		uint32 defined_id;
2518	} *cmd;
2519	enum vmw_so_type so_type;
2520	int ret;
2521
2522	if (!ctx_node)
2523		return -EINVAL;
2524
2525	so_type = vmw_so_cmd_to_type(header->id);
2526	res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2527	cmd = container_of(header, typeof(*cmd), header);
2528	ret = vmw_cotable_notify(res, cmd->defined_id);
2529
2530	return ret;
2531}
2532
2533/**
2534 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2535 * command
2536 *
2537 * @dev_priv: Pointer to a device private struct.
2538 * @sw_context: The software context being used for this batch.
2539 * @header: Pointer to the command header in the command stream.
2540 */
2541static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2542					struct vmw_sw_context *sw_context,
2543					SVGA3dCmdHeader *header)
2544{
2545	struct {
2546		SVGA3dCmdHeader header;
2547		union {
2548			SVGA3dCmdDXReadbackSubResource r_body;
2549			SVGA3dCmdDXInvalidateSubResource i_body;
2550			SVGA3dCmdDXUpdateSubResource u_body;
2551			SVGA3dSurfaceId sid;
2552		};
2553	} *cmd;
2554
2555	BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2556		     offsetof(typeof(*cmd), sid));
2557	BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2558		     offsetof(typeof(*cmd), sid));
2559	BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2560		     offsetof(typeof(*cmd), sid));
2561
2562	cmd = container_of(header, typeof(*cmd), header);
2563
2564	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2565				 VMW_RES_DIRTY_NONE, user_surface_converter,
2566				 &cmd->sid, NULL);
2567}
2568
2569static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2570				struct vmw_sw_context *sw_context,
2571				SVGA3dCmdHeader *header)
2572{
2573	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2574
2575	if (!ctx_node)
2576		return -EINVAL;
2577
2578	return 0;
2579}
2580
2581/**
2582 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2583 * resource for removal.
2584 *
2585 * @dev_priv: Pointer to a device private struct.
2586 * @sw_context: The software context being used for this batch.
2587 * @header: Pointer to the command header in the command stream.
2588 *
2589 * Check that the view exists, and if it was not created using this command
2590 * batch, conditionally make this command a NOP.
2591 */
2592static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2593				  struct vmw_sw_context *sw_context,
2594				  SVGA3dCmdHeader *header)
2595{
2596	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2597	struct {
2598		SVGA3dCmdHeader header;
2599		union vmw_view_destroy body;
2600	} *cmd = container_of(header, typeof(*cmd), header);
2601	enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2602	struct vmw_resource *view;
2603	int ret;
2604
2605	if (!ctx_node)
2606		return -EINVAL;
2607
2608	ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2609			      &sw_context->staged_cmd_res, &view);
2610	if (ret || !view)
2611		return ret;
2612
2613	/*
2614	 * If the view wasn't created during this command batch, it might
2615	 * have been removed due to a context swapout, so add a
2616	 * relocation to conditionally make this command a NOP to avoid
2617	 * device errors.
2618	 */
2619	return vmw_resource_relocation_add(sw_context, view,
2620					   vmw_ptr_diff(sw_context->buf_start,
2621							&cmd->header.id),
2622					   vmw_res_rel_cond_nop);
2623}
2624
2625/**
2626 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2627 *
2628 * @dev_priv: Pointer to a device private struct.
2629 * @sw_context: The software context being used for this batch.
2630 * @header: Pointer to the command header in the command stream.
2631 */
2632static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2633				    struct vmw_sw_context *sw_context,
2634				    SVGA3dCmdHeader *header)
2635{
2636	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2637	struct vmw_resource *res;
2638	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2639		container_of(header, typeof(*cmd), header);
2640	int ret;
2641
2642	if (!ctx_node)
2643		return -EINVAL;
2644
2645	res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2646	ret = vmw_cotable_notify(res, cmd->body.shaderId);
2647	if (ret)
2648		return ret;
2649
2650	return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2651				 cmd->body.shaderId, cmd->body.type,
2652				 &sw_context->staged_cmd_res);
2653}
2654
2655/**
2656 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2657 *
2658 * @dev_priv: Pointer to a device private struct.
2659 * @sw_context: The software context being used for this batch.
2660 * @header: Pointer to the command header in the command stream.
2661 */
2662static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2663				     struct vmw_sw_context *sw_context,
2664				     SVGA3dCmdHeader *header)
2665{
2666	struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2667	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2668		container_of(header, typeof(*cmd), header);
2669	int ret;
2670
2671	if (!ctx_node)
2672		return -EINVAL;
2673
2674	ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2675				&sw_context->staged_cmd_res);
2676
2677	return ret;
2678}
2679
2680/**
2681 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2682 *
2683 * @dev_priv: Pointer to a device private struct.
2684 * @sw_context: The software context being used for this batch.
2685 * @header: Pointer to the command header in the command stream.
2686 */
2687static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2688				  struct vmw_sw_context *sw_context,
2689				  SVGA3dCmdHeader *header)
2690{
2691	struct vmw_resource *ctx;
2692	struct vmw_resource *res;
2693	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2694		container_of(header, typeof(*cmd), header);
2695	int ret;
2696
2697	if (cmd->body.cid != SVGA3D_INVALID_ID) {
2698		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2699					VMW_RES_DIRTY_SET,
2700					user_context_converter, &cmd->body.cid,
2701					&ctx);
2702		if (ret)
2703			return ret;
2704	} else {
2705		struct vmw_ctx_validation_info *ctx_node =
2706			VMW_GET_CTX_NODE(sw_context);
2707
2708		if (!ctx_node)
2709			return -EINVAL;
2710
2711		ctx = ctx_node->ctx;
2712	}
2713
2714	res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2715	if (IS_ERR(res)) {
2716		VMW_DEBUG_USER("Could not find shader to bind.\n");
2717		return PTR_ERR(res);
2718	}
2719
2720	ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2721					    VMW_RES_DIRTY_NONE);
2722	if (ret) {
2723		VMW_DEBUG_USER("Error creating resource validation node.\n");
2724		return ret;
2725	}
2726
2727	return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2728					 &cmd->body.mobid,
2729					 cmd->body.offsetInBytes);
2730}
2731
2732/**
2733 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2734 *
2735 * @dev_priv: Pointer to a device private struct.
2736 * @sw_context: The software context being used for this batch.
2737 * @header: Pointer to the command header in the command stream.
2738 */
2739static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2740			      struct vmw_sw_context *sw_context,
2741			      SVGA3dCmdHeader *header)
2742{
2743	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2744		container_of(header, typeof(*cmd), header);
2745
2746	return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2747					   cmd->body.shaderResourceViewId));
2748}
2749
2750/**
2751 * vmw_cmd_dx_transfer_from_buffer - Validate
2752 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2753 *
2754 * @dev_priv: Pointer to a device private struct.
2755 * @sw_context: The software context being used for this batch.
2756 * @header: Pointer to the command header in the command stream.
2757 */
2758static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2759					   struct vmw_sw_context *sw_context,
2760					   SVGA3dCmdHeader *header)
2761{
2762	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2763		container_of(header, typeof(*cmd), header);
2764	int ret;
2765
2766	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2767				VMW_RES_DIRTY_NONE, user_surface_converter,
2768				&cmd->body.srcSid, NULL);
2769	if (ret != 0)
2770		return ret;
2771
2772	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2773				 VMW_RES_DIRTY_SET, user_surface_converter,
2774				 &cmd->body.destSid, NULL);
2775}
2776
2777/**
2778 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2779 *
2780 * @dev_priv: Pointer to a device private struct.
2781 * @sw_context: The software context being used for this batch.
2782 * @header: Pointer to the command header in the command stream.
2783 */
2784static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2785					   struct vmw_sw_context *sw_context,
2786					   SVGA3dCmdHeader *header)
2787{
2788	VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2789		container_of(header, typeof(*cmd), header);
2790
2791	if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2792		return -EINVAL;
2793
2794	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2795				 VMW_RES_DIRTY_SET, user_surface_converter,
2796				 &cmd->body.surface.sid, NULL);
2797}
2798
2799static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2800				struct vmw_sw_context *sw_context,
2801				void *buf, uint32_t *size)
2802{
2803	uint32_t size_remaining = *size;
2804	uint32_t cmd_id;
2805
2806	cmd_id = ((uint32_t *)buf)[0];
2807	switch (cmd_id) {
2808	case SVGA_CMD_UPDATE:
2809		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2810		break;
2811	case SVGA_CMD_DEFINE_GMRFB:
2812		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2813		break;
2814	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2815		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2816		break;
2817	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2818		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2819		break;
2820	default:
2821		VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
2822		return -EINVAL;
2823	}
2824
2825	if (*size > size_remaining) {
2826		VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2827			       cmd_id);
2828		return -EINVAL;
2829	}
2830
2831	if (unlikely(!sw_context->kernel)) {
2832		VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
2833		return -EPERM;
2834	}
2835
2836	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2837		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2838
2839	return 0;
2840}
2841
2842static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2843	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2844		    false, false, false),
2845	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2846		    false, false, false),
2847	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2848		    true, false, false),
2849	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2850		    true, false, false),
2851	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2852		    true, false, false),
2853	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2854		    false, false, false),
2855	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2856		    false, false, false),
2857	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2858		    true, false, false),
2859	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2860		    true, false, false),
2861	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2862		    true, false, false),
2863	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2864		    &vmw_cmd_set_render_target_check, true, false, false),
2865	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2866		    true, false, false),
2867	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2868		    true, false, false),
2869	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2870		    true, false, false),
2871	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2872		    true, false, false),
2873	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2874		    true, false, false),
2875	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2876		    true, false, false),
2877	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2878		    true, false, false),
2879	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2880		    false, false, false),
2881	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2882		    true, false, false),
2883	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2884		    true, false, false),
2885	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2886		    true, false, false),
2887	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2888		    true, false, false),
2889	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2890		    true, false, false),
2891	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2892		    true, false, false),
2893	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2894		    true, false, false),
2895	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2896		    true, false, false),
2897	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2898		    true, false, false),
2899	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2900		    true, false, false),
2901	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
2902		    &vmw_cmd_blt_surf_screen_check, false, false, false),
2903	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2904		    false, false, false),
2905	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2906		    false, false, false),
2907	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2908		    false, false, false),
2909	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2910		    false, false, false),
2911	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2912		    false, false, false),
2913	VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
2914		    false, false, false),
2915	VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
2916		    false, false, false),
2917	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2918		    false, false, false),
2919	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2920		    false, false, false),
2921	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2922		    false, false, false),
2923	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2924		    false, false, false),
2925	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2926		    false, false, false),
2927	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2928		    false, false, false),
2929	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2930		    false, false, true),
2931	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2932		    false, false, true),
2933	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2934		    false, false, true),
2935	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2936		    false, false, true),
2937	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2938		    false, false, true),
2939	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2940		    false, false, true),
2941	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2942		    false, false, true),
2943	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2944		    false, false, true),
2945	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2946		    true, false, true),
2947	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2948		    false, false, true),
2949	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2950		    true, false, true),
2951	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2952		    &vmw_cmd_update_gb_surface, true, false, true),
2953	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2954		    &vmw_cmd_readback_gb_image, true, false, true),
2955	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2956		    &vmw_cmd_readback_gb_surface, true, false, true),
2957	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2958		    &vmw_cmd_invalidate_gb_image, true, false, true),
2959	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2960		    &vmw_cmd_invalidate_gb_surface, true, false, true),
2961	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2962		    false, false, true),
2963	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2964		    false, false, true),
2965	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2966		    false, false, true),
2967	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2968		    false, false, true),
2969	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2970		    false, false, true),
2971	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2972		    false, false, true),
2973	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2974		    true, false, true),
2975	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2976		    false, false, true),
2977	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2978		    false, false, false),
2979	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2980		    true, false, true),
2981	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2982		    true, false, true),
2983	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2984		    true, false, true),
2985	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2986		    true, false, true),
2987	VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
2988		    true, false, true),
2989	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2990		    false, false, true),
2991	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2992		    false, false, true),
2993	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2994		    false, false, true),
2995	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2996		    false, false, true),
2997	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2998		    false, false, true),
2999	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3000		    false, false, true),
3001	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3002		    false, false, true),
3003	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3004		    false, false, true),
3005	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3006		    false, false, true),
3007	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3008		    false, false, true),
3009	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3010		    true, false, true),
3011	VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3012		    false, false, true),
3013	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3014		    false, false, true),
3015	VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3016		    false, false, true),
3017	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3018		    false, false, true),
3019
3020	/* SM commands */
3021	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3022		    false, false, true),
3023	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3024		    false, false, true),
3025	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3026		    false, false, true),
3027	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3028		    false, false, true),
3029	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3030		    false, false, true),
3031	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3032		    &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3033	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3034		    &vmw_cmd_dx_set_shader_res, true, false, true),
3035	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3036		    true, false, true),
3037	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3038		    true, false, true),
3039	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3040		    true, false, true),
3041	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3042		    true, false, true),
3043	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3044		    true, false, true),
3045	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3046		    &vmw_cmd_dx_cid_check, true, false, true),
3047	VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3048		    true, false, true),
3049	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3050		    &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3051	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3052		    &vmw_cmd_dx_set_index_buffer, true, false, true),
3053	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3054		    &vmw_cmd_dx_set_rendertargets, true, false, true),
3055	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3056		    true, false, true),
3057	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3058		    &vmw_cmd_dx_cid_check, true, false, true),
3059	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3060		    &vmw_cmd_dx_cid_check, true, false, true),
3061	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3062		    true, false, true),
3063	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3064		    true, false, true),
3065	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3066		    true, false, true),
3067	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3068		    &vmw_cmd_dx_cid_check, true, false, true),
3069	VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3070		    true, false, true),
3071	VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3072		    true, false, true),
3073	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3074		    true, false, true),
3075	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3076		    true, false, true),
3077	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3078		    true, false, true),
3079	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3080		    true, false, true),
3081	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3082		    &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3083	VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3084		    &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3085	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3086		    true, false, true),
3087	VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3088		    true, false, true),
3089	VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3090		    &vmw_cmd_dx_check_subresource, true, false, true),
3091	VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3092		    &vmw_cmd_dx_check_subresource, true, false, true),
3093	VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3094		    &vmw_cmd_dx_check_subresource, true, false, true),
3095	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3096		    &vmw_cmd_dx_view_define, true, false, true),
3097	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3098		    &vmw_cmd_dx_view_remove, true, false, true),
3099	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3100		    &vmw_cmd_dx_view_define, true, false, true),
3101	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3102		    &vmw_cmd_dx_view_remove, true, false, true),
3103	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3104		    &vmw_cmd_dx_view_define, true, false, true),
3105	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3106		    &vmw_cmd_dx_view_remove, true, false, true),
3107	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3108		    &vmw_cmd_dx_so_define, true, false, true),
3109	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3110		    &vmw_cmd_dx_cid_check, true, false, true),
3111	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3112		    &vmw_cmd_dx_so_define, true, false, true),
3113	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3114		    &vmw_cmd_dx_cid_check, true, false, true),
3115	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3116		    &vmw_cmd_dx_so_define, true, false, true),
3117	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3118		    &vmw_cmd_dx_cid_check, true, false, true),
3119	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3120		    &vmw_cmd_dx_so_define, true, false, true),
3121	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3122		    &vmw_cmd_dx_cid_check, true, false, true),
3123	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3124		    &vmw_cmd_dx_so_define, true, false, true),
3125	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3126		    &vmw_cmd_dx_cid_check, true, false, true),
3127	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3128		    &vmw_cmd_dx_define_shader, true, false, true),
3129	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3130		    &vmw_cmd_dx_destroy_shader, true, false, true),
3131	VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3132		    &vmw_cmd_dx_bind_shader, true, false, true),
3133	VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3134		    &vmw_cmd_dx_so_define, true, false, true),
3135	VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3136		    &vmw_cmd_dx_cid_check, true, false, true),
3137	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3138		    true, false, true),
3139	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3140		    &vmw_cmd_dx_set_so_targets, true, false, true),
3141	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3142		    &vmw_cmd_dx_cid_check, true, false, true),
3143	VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3144		    &vmw_cmd_dx_cid_check, true, false, true),
3145	VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3146		    &vmw_cmd_buffer_copy_check, true, false, true),
3147	VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3148		    &vmw_cmd_pred_copy_check, true, false, true),
3149	VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3150		    &vmw_cmd_dx_transfer_from_buffer,
3151		    true, false, true),
3152	VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3153		    true, false, true),
3154};
3155
3156bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3157{
3158	u32 cmd_id = ((u32 *) buf)[0];
3159
3160	if (cmd_id >= SVGA_CMD_MAX) {
3161		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3162		const struct vmw_cmd_entry *entry;
3163
3164		*size = header->size + sizeof(SVGA3dCmdHeader);
3165		cmd_id = header->id;
3166		if (cmd_id >= SVGA_3D_CMD_MAX)
3167			return false;
3168
3169		cmd_id -= SVGA_3D_CMD_BASE;
3170		entry = &vmw_cmd_entries[cmd_id];
3171		*cmd = entry->cmd_name;
3172		return true;
3173	}
3174
3175	switch (cmd_id) {
3176	case SVGA_CMD_UPDATE:
3177		*cmd = "SVGA_CMD_UPDATE";
3178		*size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3179		break;
3180	case SVGA_CMD_DEFINE_GMRFB:
3181		*cmd = "SVGA_CMD_DEFINE_GMRFB";
3182		*size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3183		break;
3184	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3185		*cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3186		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3187		break;
3188	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3189		*cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3190		*size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3191		break;
3192	default:
3193		*cmd = "UNKNOWN";
3194		*size = 0;
3195		return false;
3196	}
3197
3198	return true;
3199}
3200
3201static int vmw_cmd_check(struct vmw_private *dev_priv,
3202			 struct vmw_sw_context *sw_context, void *buf,
3203			 uint32_t *size)
3204{
3205	uint32_t cmd_id;
3206	uint32_t size_remaining = *size;
3207	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3208	int ret;
3209	const struct vmw_cmd_entry *entry;
3210	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3211
3212	cmd_id = ((uint32_t *)buf)[0];
3213	/* Handle any none 3D commands */
3214	if (unlikely(cmd_id < SVGA_CMD_MAX))
3215		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3216
3217
3218	cmd_id = header->id;
3219	*size = header->size + sizeof(SVGA3dCmdHeader);
3220
3221	cmd_id -= SVGA_3D_CMD_BASE;
3222	if (unlikely(*size > size_remaining))
3223		goto out_invalid;
3224
3225	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3226		goto out_invalid;
3227
3228	entry = &vmw_cmd_entries[cmd_id];
3229	if (unlikely(!entry->func))
3230		goto out_invalid;
3231
3232	if (unlikely(!entry->user_allow && !sw_context->kernel))
3233		goto out_privileged;
3234
3235	if (unlikely(entry->gb_disable && gb))
3236		goto out_old;
3237
3238	if (unlikely(entry->gb_enable && !gb))
3239		goto out_new;
3240
3241	ret = entry->func(dev_priv, sw_context, header);
3242	if (unlikely(ret != 0)) {
3243		VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3244			       cmd_id + SVGA_3D_CMD_BASE, ret);
3245		return ret;
3246	}
3247
3248	return 0;
3249out_invalid:
3250	VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3251		       cmd_id + SVGA_3D_CMD_BASE);
3252	return -EINVAL;
3253out_privileged:
3254	VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3255		       cmd_id + SVGA_3D_CMD_BASE);
3256	return -EPERM;
3257out_old:
3258	VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3259		       cmd_id + SVGA_3D_CMD_BASE);
3260	return -EINVAL;
3261out_new:
3262	VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3263		       cmd_id + SVGA_3D_CMD_BASE);
3264	return -EINVAL;
3265}
3266
3267static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3268			     struct vmw_sw_context *sw_context, void *buf,
 
3269			     uint32_t size)
3270{
3271	int32_t cur_size = size;
3272	int ret;
3273
3274	sw_context->buf_start = buf;
3275
3276	while (cur_size > 0) {
3277		size = cur_size;
3278		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3279		if (unlikely(ret != 0))
3280			return ret;
3281		buf = (void *)((unsigned long) buf + size);
3282		cur_size -= size;
3283	}
3284
3285	if (unlikely(cur_size != 0)) {
3286		VMW_DEBUG_USER("Command verifier out of sync.\n");
3287		return -EINVAL;
3288	}
3289
3290	return 0;
3291}
3292
3293static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3294{
3295	/* Memory is validation context memory, so no need to free it */
3296	INIT_LIST_HEAD(&sw_context->bo_relocations);
3297}
3298
3299static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3300{
 
3301	struct vmw_relocation *reloc;
 
3302	struct ttm_buffer_object *bo;
3303
3304	list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3305		bo = &reloc->vbo->base;
 
 
3306		switch (bo->mem.mem_type) {
3307		case TTM_PL_VRAM:
3308			reloc->location->offset += bo->offset;
3309			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3310			break;
3311		case VMW_PL_GMR:
3312			reloc->location->gmrId = bo->mem.start;
3313			break;
3314		case VMW_PL_MOB:
3315			*reloc->mob_loc = bo->mem.start;
3316			break;
3317		default:
3318			BUG();
3319		}
3320	}
3321	vmw_free_relocations(sw_context);
3322}
3323
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3324static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3325				 uint32_t size)
3326{
3327	if (likely(sw_context->cmd_bounce_size >= size))
3328		return 0;
3329
3330	if (sw_context->cmd_bounce_size == 0)
3331		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3332
3333	while (sw_context->cmd_bounce_size < size) {
3334		sw_context->cmd_bounce_size =
3335			PAGE_ALIGN(sw_context->cmd_bounce_size +
3336				   (sw_context->cmd_bounce_size >> 1));
3337	}
3338
3339	vfree(sw_context->cmd_bounce);
 
 
3340	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3341
3342	if (sw_context->cmd_bounce == NULL) {
3343		VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3344		sw_context->cmd_bounce_size = 0;
3345		return -ENOMEM;
3346	}
3347
3348	return 0;
3349}
3350
3351/**
3352 * vmw_execbuf_fence_commands - create and submit a command stream fence
3353 *
3354 * Creates a fence object and submits a command stream marker.
3355 * If this fails for some reason, We sync the fifo and return NULL.
3356 * It is then safe to fence buffers with a NULL pointer.
3357 *
3358 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3359 * userspace handle if @p_handle is not NULL, otherwise not.
3360 */
3361
3362int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3363			       struct vmw_private *dev_priv,
3364			       struct vmw_fence_obj **p_fence,
3365			       uint32_t *p_handle)
3366{
3367	uint32_t sequence;
3368	int ret;
3369	bool synced = false;
3370
3371	/* p_handle implies file_priv. */
3372	BUG_ON(p_handle != NULL && file_priv == NULL);
3373
3374	ret = vmw_fifo_send_fence(dev_priv, &sequence);
3375	if (unlikely(ret != 0)) {
3376		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3377		synced = true;
3378	}
3379
3380	if (p_handle != NULL)
3381		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3382					    sequence, p_fence, p_handle);
 
 
3383	else
3384		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
 
 
3385
3386	if (unlikely(ret != 0 && !synced)) {
3387		(void) vmw_fallback_wait(dev_priv, false, false, sequence,
3388					 false, VMW_FENCE_WAIT_TIMEOUT);
 
3389		*p_fence = NULL;
3390	}
3391
3392	return ret;
3393}
3394
3395/**
3396 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
 
3397 *
3398 * @dev_priv: Pointer to a vmw_private struct.
3399 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3400 * @ret: Return value from fence object creation.
3401 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3402 * the information should be copied.
3403 * @fence: Pointer to the fenc object.
3404 * @fence_handle: User-space fence handle.
3405 * @out_fence_fd: exported file descriptor for the fence.  -1 if not used
3406 * @sync_file:  Only used to clean up in case of an error in this function.
3407 *
3408 * This function copies fence information to user-space. If copying fails, the
3409 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3410 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3411 * will hopefully be detected.
3412 *
3413 * Also if copying fails, user-space will be unable to signal the fence object
3414 * so we wait for it immediately, and then unreference the user-space reference.
 
 
 
 
 
3415 */
3416void
3417vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3418			    struct vmw_fpriv *vmw_fp, int ret,
 
3419			    struct drm_vmw_fence_rep __user *user_fence_rep,
3420			    struct vmw_fence_obj *fence, uint32_t fence_handle,
3421			    int32_t out_fence_fd, struct sync_file *sync_file)
3422{
3423	struct drm_vmw_fence_rep fence_rep;
3424
3425	if (user_fence_rep == NULL)
3426		return;
3427
3428	memset(&fence_rep, 0, sizeof(fence_rep));
3429
3430	fence_rep.error = ret;
3431	fence_rep.fd = out_fence_fd;
3432	if (ret == 0) {
3433		BUG_ON(fence == NULL);
3434
3435		fence_rep.handle = fence_handle;
3436		fence_rep.seqno = fence->base.seqno;
3437		vmw_update_seqno(dev_priv, &dev_priv->fifo);
3438		fence_rep.passed_seqno = dev_priv->last_read_seqno;
3439	}
3440
3441	/*
3442	 * copy_to_user errors will be detected by user space not seeing
3443	 * fence_rep::error filled in. Typically user-space would have pre-set
3444	 * that member to -EFAULT.
3445	 */
3446	ret = copy_to_user(user_fence_rep, &fence_rep,
3447			   sizeof(fence_rep));
3448
3449	/*
3450	 * User-space lost the fence object. We need to sync and unreference the
3451	 * handle.
3452	 */
3453	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3454		if (sync_file)
3455			fput(sync_file->file);
3456
3457		if (fence_rep.fd != -1) {
3458			put_unused_fd(fence_rep.fd);
3459			fence_rep.fd = -1;
3460		}
3461
3462		ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3463					  TTM_REF_USAGE);
3464		VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3465		(void) vmw_fence_obj_wait(fence, false, false,
3466					  VMW_FENCE_WAIT_TIMEOUT);
3467	}
3468}
3469
3470/**
3471 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3472 *
3473 * @dev_priv: Pointer to a device private structure.
3474 * @kernel_commands: Pointer to the unpatched command batch.
3475 * @command_size: Size of the unpatched command batch.
3476 * @sw_context: Structure holding the relocation lists.
3477 *
3478 * Side effects: If this function returns 0, then the command batch pointed to
3479 * by @kernel_commands will have been modified.
3480 */
3481static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3482				   void *kernel_commands, u32 command_size,
3483				   struct vmw_sw_context *sw_context)
3484{
3485	void *cmd;
3486
3487	if (sw_context->dx_ctx_node)
3488		cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3489					  sw_context->dx_ctx_node->ctx->id);
3490	else
3491		cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3492
3493	if (!cmd)
3494		return -ENOMEM;
3495
3496	vmw_apply_relocations(sw_context);
3497	memcpy(cmd, kernel_commands, command_size);
3498	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3499	vmw_resource_relocations_free(&sw_context->res_relocations);
3500	vmw_fifo_commit(dev_priv, command_size);
3501
3502	return 0;
3503}
3504
3505/**
3506 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3507 * command buffer manager.
3508 *
3509 * @dev_priv: Pointer to a device private structure.
3510 * @header: Opaque handle to the command buffer allocation.
3511 * @command_size: Size of the unpatched command batch.
3512 * @sw_context: Structure holding the relocation lists.
3513 *
3514 * Side effects: If this function returns 0, then the command buffer represented
3515 * by @header will have been modified.
3516 */
3517static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3518				     struct vmw_cmdbuf_header *header,
3519				     u32 command_size,
3520				     struct vmw_sw_context *sw_context)
3521{
3522	u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3523		  SVGA3D_INVALID_ID);
3524	void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3525				       header);
3526
3527	vmw_apply_relocations(sw_context);
3528	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3529	vmw_resource_relocations_free(&sw_context->res_relocations);
3530	vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3531
3532	return 0;
3533}
3534
3535/**
3536 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3537 * submission using a command buffer.
3538 *
3539 * @dev_priv: Pointer to a device private structure.
3540 * @user_commands: User-space pointer to the commands to be submitted.
3541 * @command_size: Size of the unpatched command batch.
3542 * @header: Out parameter returning the opaque pointer to the command buffer.
3543 *
3544 * This function checks whether we can use the command buffer manager for
3545 * submission and if so, creates a command buffer of suitable size and copies
3546 * the user data into that buffer.
3547 *
3548 * On successful return, the function returns a pointer to the data in the
3549 * command buffer and *@header is set to non-NULL.
3550 *
3551 * If command buffers could not be used, the function will return the value of
3552 * @kernel_commands on function call. That value may be NULL. In that case, the
3553 * value of *@header will be set to NULL.
3554 *
3555 * If an error is encountered, the function will return a pointer error value.
3556 * If the function is interrupted by a signal while sleeping, it will return
3557 * -ERESTARTSYS casted to a pointer error value.
3558 */
3559static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3560				void __user *user_commands,
3561				void *kernel_commands, u32 command_size,
3562				struct vmw_cmdbuf_header **header)
3563{
3564	size_t cmdbuf_size;
3565	int ret;
3566
3567	*header = NULL;
3568	if (command_size > SVGA_CB_MAX_SIZE) {
3569		VMW_DEBUG_USER("Command buffer is too large.\n");
3570		return ERR_PTR(-EINVAL);
3571	}
3572
3573	if (!dev_priv->cman || kernel_commands)
3574		return kernel_commands;
3575
3576	/* If possible, add a little space for fencing. */
3577	cmdbuf_size = command_size + 512;
3578	cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3579	kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3580					   header);
3581	if (IS_ERR(kernel_commands))
3582		return kernel_commands;
3583
3584	ret = copy_from_user(kernel_commands, user_commands, command_size);
3585	if (ret) {
3586		VMW_DEBUG_USER("Failed copying commands.\n");
3587		vmw_cmdbuf_header_free(*header);
3588		*header = NULL;
3589		return ERR_PTR(-EFAULT);
3590	}
3591
3592	return kernel_commands;
3593}
3594
3595static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3596				   struct vmw_sw_context *sw_context,
3597				   uint32_t handle)
3598{
3599	struct vmw_resource *res;
3600	int ret;
3601	unsigned int size;
3602
3603	if (handle == SVGA3D_INVALID_ID)
3604		return 0;
3605
3606	size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3607	ret = vmw_validation_preload_res(sw_context->ctx, size);
3608	if (ret)
3609		return ret;
3610
3611	res = vmw_user_resource_noref_lookup_handle
3612		(dev_priv, sw_context->fp->tfile, handle,
3613		 user_context_converter);
3614	if (IS_ERR(res)) {
3615		VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3616			       (unsigned int) handle);
3617		return PTR_ERR(res);
3618	}
3619
3620	ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
3621	if (unlikely(ret != 0))
3622		return ret;
3623
3624	sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3625	sw_context->man = vmw_context_res_man(res);
3626
3627	return 0;
3628}
3629
3630int vmw_execbuf_process(struct drm_file *file_priv,
3631			struct vmw_private *dev_priv,
3632			void __user *user_commands, void *kernel_commands,
3633			uint32_t command_size, uint64_t throttle_us,
3634			uint32_t dx_context_handle,
 
3635			struct drm_vmw_fence_rep __user *user_fence_rep,
3636			struct vmw_fence_obj **out_fence, uint32_t flags)
3637{
3638	struct vmw_sw_context *sw_context = &dev_priv->ctx;
3639	struct vmw_fence_obj *fence = NULL;
3640	struct vmw_cmdbuf_header *header;
3641	uint32_t handle = 0;
 
 
 
3642	int ret;
3643	int32_t out_fence_fd = -1;
3644	struct sync_file *sync_file = NULL;
3645	DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3646
3647	vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3648
3649	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3650		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3651		if (out_fence_fd < 0) {
3652			VMW_DEBUG_USER("Failed to get a fence fd.\n");
3653			return out_fence_fd;
3654		}
3655	}
3656
3657	if (throttle_us) {
3658		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3659				   throttle_us);
3660
3661		if (ret)
3662			goto out_free_fence_fd;
3663	}
3664
3665	kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3666					     kernel_commands, command_size,
3667					     &header);
3668	if (IS_ERR(kernel_commands)) {
3669		ret = PTR_ERR(kernel_commands);
3670		goto out_free_fence_fd;
3671	}
3672
3673	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3674	if (ret) {
3675		ret = -ERESTARTSYS;
3676		goto out_free_header;
3677	}
3678
3679	sw_context->kernel = false;
3680	if (kernel_commands == NULL) {
 
 
3681		ret = vmw_resize_cmd_bounce(sw_context, command_size);
3682		if (unlikely(ret != 0))
3683			goto out_unlock;
3684
3685		ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3686				     command_size);
 
 
3687		if (unlikely(ret != 0)) {
3688			ret = -EFAULT;
3689			VMW_DEBUG_USER("Failed copying commands.\n");
3690			goto out_unlock;
3691		}
3692
3693		kernel_commands = sw_context->cmd_bounce;
3694	} else if (!header) {
3695		sw_context->kernel = true;
3696	}
3697
3698	sw_context->fp = vmw_fpriv(file_priv);
3699	INIT_LIST_HEAD(&sw_context->ctx_list);
 
 
 
3700	sw_context->cur_query_bo = dev_priv->pinned_bo;
3701	sw_context->last_query_ctx = NULL;
3702	sw_context->needs_post_query_barrier = false;
3703	sw_context->dx_ctx_node = NULL;
3704	sw_context->dx_query_mob = NULL;
3705	sw_context->dx_query_ctx = NULL;
3706	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
 
3707	INIT_LIST_HEAD(&sw_context->res_relocations);
3708	INIT_LIST_HEAD(&sw_context->bo_relocations);
3709
3710	if (sw_context->staged_bindings)
3711		vmw_binding_state_reset(sw_context->staged_bindings);
3712
3713	if (!sw_context->res_ht_initialized) {
3714		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3715		if (unlikely(ret != 0))
3716			goto out_unlock;
3717
3718		sw_context->res_ht_initialized = true;
3719	}
 
3720
3721	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3722	sw_context->ctx = &val_ctx;
3723	ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3724	if (unlikely(ret != 0))
3725		goto out_err_nores;
3726
3727	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3728				command_size);
3729	if (unlikely(ret != 0))
3730		goto out_err_nores;
3731
3732	ret = vmw_resources_reserve(sw_context);
3733	if (unlikely(ret != 0))
3734		goto out_err_nores;
3735
3736	ret = vmw_validation_bo_reserve(&val_ctx, true);
3737	if (unlikely(ret != 0))
3738		goto out_err_nores;
3739
3740	ret = vmw_validation_bo_validate(&val_ctx, true);
3741	if (unlikely(ret != 0))
3742		goto out_err;
3743
3744	ret = vmw_validation_res_validate(&val_ctx, true);
3745	if (unlikely(ret != 0))
3746		goto out_err;
3747
3748	vmw_validation_drop_ht(&val_ctx);
 
 
 
 
 
 
3749
3750	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3751	if (unlikely(ret != 0)) {
3752		ret = -ERESTARTSYS;
3753		goto out_err;
3754	}
3755
3756	if (dev_priv->has_mob) {
3757		ret = vmw_rebind_contexts(sw_context);
3758		if (unlikely(ret != 0))
3759			goto out_unlock_binding;
3760	}
3761
3762	if (!header) {
3763		ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3764					      command_size, sw_context);
3765	} else {
3766		ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3767						sw_context);
3768		header = NULL;
3769	}
3770	mutex_unlock(&dev_priv->binding_mutex);
3771	if (ret)
3772		goto out_err;
 
 
 
 
 
3773
3774	vmw_query_bo_switch_commit(dev_priv, sw_context);
3775	ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
 
3776					 (user_fence_rep) ? &handle : NULL);
3777	/*
3778	 * This error is harmless, because if fence submission fails,
3779	 * vmw_fifo_send_fence will sync. The error will be propagated to
3780	 * user-space in @fence_rep
3781	 */
 
3782	if (ret != 0)
3783		VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3784
3785	vmw_execbuf_bindings_commit(sw_context, false);
3786	vmw_bind_dx_query_mob(sw_context);
3787	vmw_validation_res_unreserve(&val_ctx, false);
3788
3789	vmw_validation_bo_fence(sw_context->ctx, fence);
 
3790
3791	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
 
3792		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
3793
3794	/*
3795	 * If anything fails here, give up trying to export the fence and do a
3796	 * sync since the user mode will not be able to sync the fence itself.
3797	 * This ensures we are still functionally correct.
3798	 */
3799	if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3800
3801		sync_file = sync_file_create(&fence->base);
3802		if (!sync_file) {
3803			VMW_DEBUG_USER("Sync file create failed for fence\n");
3804			put_unused_fd(out_fence_fd);
3805			out_fence_fd = -1;
3806
3807			(void) vmw_fence_obj_wait(fence, false, false,
3808						  VMW_FENCE_WAIT_TIMEOUT);
3809		} else {
3810			/* Link the fence with the FD created earlier */
3811			fd_install(out_fence_fd, sync_file->file);
3812		}
3813	}
3814
3815	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3816				    user_fence_rep, fence, handle, out_fence_fd,
3817				    sync_file);
3818
3819	/* Don't unreference when handing fence out */
3820	if (unlikely(out_fence != NULL)) {
3821		*out_fence = fence;
3822		fence = NULL;
3823	} else if (likely(fence != NULL)) {
3824		vmw_fence_obj_unreference(&fence);
3825	}
3826
3827	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
 
 
3828	mutex_unlock(&dev_priv->cmdbuf_mutex);
3829
3830	/*
3831	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3832	 * in resource destruction paths.
3833	 */
3834	vmw_validation_unref_lists(&val_ctx);
3835
3836	return 0;
3837
3838out_unlock_binding:
3839	mutex_unlock(&dev_priv->binding_mutex);
3840out_err:
3841	vmw_validation_bo_backoff(&val_ctx);
3842out_err_nores:
3843	vmw_execbuf_bindings_commit(sw_context, true);
3844	vmw_validation_res_unreserve(&val_ctx, true);
3845	vmw_resource_relocations_free(&sw_context->res_relocations);
3846	vmw_free_relocations(sw_context);
3847	if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
 
 
3848		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3849out_unlock:
3850	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
3851	vmw_validation_drop_ht(&val_ctx);
3852	WARN_ON(!list_empty(&sw_context->ctx_list));
 
 
3853	mutex_unlock(&dev_priv->cmdbuf_mutex);
3854
3855	/*
3856	 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3857	 * in resource destruction paths.
3858	 */
3859	vmw_validation_unref_lists(&val_ctx);
3860out_free_header:
3861	if (header)
3862		vmw_cmdbuf_header_free(header);
3863out_free_fence_fd:
3864	if (out_fence_fd >= 0)
3865		put_unused_fd(out_fence_fd);
3866
3867	return ret;
3868}
3869
3870/**
3871 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3872 *
3873 * @dev_priv: The device private structure.
3874 *
3875 * This function is called to idle the fifo and unpin the query buffer if the
3876 * normal way to do this hits an error, which should typically be extremely
3877 * rare.
3878 */
3879static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3880{
3881	VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
3882
3883	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
3884	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3885	if (dev_priv->dummy_query_bo_pinned) {
3886		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3887		dev_priv->dummy_query_bo_pinned = false;
3888	}
3889}
3890
3891
3892/**
3893 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3894 * bo.
3895 *
3896 * @dev_priv: The device private structure.
3897 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3898 * query barrier that flushes all queries touching the current buffer pointed to
3899 * by @dev_priv->pinned_bo
3900 *
3901 * This function should be used to unpin the pinned query bo, or as a query
3902 * barrier when we need to make sure that all queries have finished before the
3903 * next fifo command. (For example on hardware context destructions where the
3904 * hardware may otherwise leak unfinished queries).
 
3905 *
3906 * This function does not return any failure codes, but make attempts to do safe
3907 * unpinning in case of errors.
3908 *
3909 * The function will synchronize on the previous query barrier, and will thus
3910 * not finish until that barrier has executed.
3911 *
3912 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3913 * calling this function.
3914 */
3915void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3916				     struct vmw_fence_obj *fence)
3917{
3918	int ret = 0;
 
 
3919	struct vmw_fence_obj *lfence = NULL;
3920	DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3921
3922	if (dev_priv->pinned_bo == NULL)
3923		goto out_unlock;
3924
3925	ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3926				    false);
3927	if (ret)
3928		goto out_no_reserve;
3929
3930	ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3931				    false);
3932	if (ret)
3933		goto out_no_reserve;
 
 
 
 
 
3934
3935	ret = vmw_validation_bo_reserve(&val_ctx, false);
3936	if (ret)
3937		goto out_no_reserve;
 
3938
3939	if (dev_priv->query_cid_valid) {
3940		BUG_ON(fence != NULL);
3941		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3942		if (ret)
 
3943			goto out_no_emit;
 
3944		dev_priv->query_cid_valid = false;
3945	}
3946
3947	vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3948	if (dev_priv->dummy_query_bo_pinned) {
3949		vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3950		dev_priv->dummy_query_bo_pinned = false;
3951	}
3952	if (fence == NULL) {
3953		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3954						  NULL);
3955		fence = lfence;
3956	}
3957	vmw_validation_bo_fence(&val_ctx, fence);
3958	if (lfence != NULL)
3959		vmw_fence_obj_unreference(&lfence);
3960
3961	vmw_validation_unref_lists(&val_ctx);
3962	vmw_bo_unreference(&dev_priv->pinned_bo);
 
3963
3964out_unlock:
3965	return;
 
3966out_no_emit:
3967	vmw_validation_bo_backoff(&val_ctx);
3968out_no_reserve:
3969	vmw_validation_unref_lists(&val_ctx);
3970	vmw_execbuf_unpin_panic(dev_priv);
3971	vmw_bo_unreference(&dev_priv->pinned_bo);
3972}
3973
3974/**
3975 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
 
3976 *
3977 * @dev_priv: The device private structure.
3978 *
3979 * This function should be used to unpin the pinned query bo, or as a query
3980 * barrier when we need to make sure that all queries have finished before the
3981 * next fifo command. (For example on hardware context destructions where the
3982 * hardware may otherwise leak unfinished queries).
 
3983 *
3984 * This function does not return any failure codes, but make attempts to do safe
3985 * unpinning in case of errors.
3986 *
3987 * The function will synchronize on the previous query barrier, and will thus
3988 * not finish until that barrier has executed.
3989 */
3990void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
3991{
3992	mutex_lock(&dev_priv->cmdbuf_mutex);
3993	if (dev_priv->query_cid_valid)
3994		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3995	mutex_unlock(&dev_priv->cmdbuf_mutex);
3996}
3997
 
3998int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
3999		      struct drm_file *file_priv)
4000{
4001	struct vmw_private *dev_priv = vmw_priv(dev);
4002	struct drm_vmw_execbuf_arg *arg = data;
4003	int ret;
4004	struct dma_fence *in_fence = NULL;
4005
4006	/*
4007	 * Extend the ioctl argument while maintaining backwards compatibility:
4008	 * We take different code paths depending on the value of arg->version.
4009	 *
4010	 * Note: The ioctl argument is extended and zeropadded by core DRM.
4011	 */
4012	if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4013		     arg->version == 0)) {
4014		VMW_DEBUG_USER("Incorrect execbuf version.\n");
4015		return -EINVAL;
4016	}
4017
4018	switch (arg->version) {
4019	case 1:
4020		/* For v1 core DRM have extended + zeropadded the data */
4021		arg->context_handle = (uint32_t) -1;
4022		break;
4023	case 2:
4024	default:
4025		/* For v2 and later core DRM would have correctly copied it */
4026		break;
4027	}
4028
4029	/* If imported a fence FD from elsewhere, then wait on it */
4030	if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4031		in_fence = sync_file_get_fence(arg->imported_fence_fd);
4032
4033		if (!in_fence) {
4034			VMW_DEBUG_USER("Cannot get imported fence\n");
4035			return -EINVAL;
4036		}
4037
4038		ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4039		if (ret)
4040			goto out;
4041	}
4042
4043	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4044	if (unlikely(ret != 0))
4045		return ret;
4046
4047	ret = vmw_execbuf_process(file_priv, dev_priv,
4048				  (void __user *)(unsigned long)arg->commands,
4049				  NULL, arg->command_size, arg->throttle_us,
4050				  arg->context_handle,
4051				  (void __user *)(unsigned long)arg->fence_rep,
4052				  NULL, arg->flags);
4053
4054	ttm_read_unlock(&dev_priv->reservation_sem);
4055	if (unlikely(ret != 0))
4056		goto out;
4057
4058	vmw_kms_cursor_post_execbuf(dev_priv);
4059
4060out:
4061	if (in_fence)
4062		dma_fence_put(in_fence);
4063	return ret;
4064}
v3.15
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
 
  27
  28#include "vmwgfx_drv.h"
  29#include "vmwgfx_reg.h"
  30#include <drm/ttm/ttm_bo_api.h>
  31#include <drm/ttm/ttm_placement.h>
 
 
  32
  33#define VMW_RES_HT_ORDER 12
  34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  35/**
  36 * struct vmw_resource_relocation - Relocation info for resources
  37 *
  38 * @head: List head for the software context's relocation list.
  39 * @res: Non-ref-counted pointer to the resource.
  40 * @offset: Offset of 4 byte entries into the command buffer where the
  41 * id that needs fixup is located.
 
  42 */
  43struct vmw_resource_relocation {
  44	struct list_head head;
  45	const struct vmw_resource *res;
  46	unsigned long offset;
 
  47};
  48
  49/**
  50 * struct vmw_resource_val_node - Validation info for resources
  51 *
  52 * @head: List head for the software context's resource list.
  53 * @hash: Hash entry for quick resouce to val_node lookup.
  54 * @res: Ref-counted pointer to the resource.
  55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
  56 * @new_backup: Refcounted pointer to the new backup buffer.
  57 * @staged_bindings: If @res is a context, tracks bindings set up during
  58 * the command batch. Otherwise NULL.
  59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
  60 * @first_usage: Set to true the first time the resource is referenced in
  61 * the command stream.
  62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
  63 * reservation. The command stream will provide one.
  64 */
  65struct vmw_resource_val_node {
  66	struct list_head head;
  67	struct drm_hash_item hash;
  68	struct vmw_resource *res;
  69	struct vmw_dma_buffer *new_backup;
  70	struct vmw_ctx_binding_state *staged_bindings;
  71	unsigned long new_backup_offset;
  72	bool first_usage;
  73	bool no_buffer_needed;
  74};
  75
  76/**
  77 * struct vmw_cmd_entry - Describe a command for the verifier
  78 *
  79 * @user_allow: Whether allowed from the execbuf ioctl.
  80 * @gb_disable: Whether disabled if guest-backed objects are available.
  81 * @gb_enable: Whether enabled iff guest-backed objects are available.
  82 */
  83struct vmw_cmd_entry {
  84	int (*func) (struct vmw_private *, struct vmw_sw_context *,
  85		     SVGA3dCmdHeader *);
  86	bool user_allow;
  87	bool gb_disable;
  88	bool gb_enable;
 
  89};
  90
  91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
  92	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
  93				       (_gb_disable), (_gb_enable)}
  94
 
 
 
 
 
 
 
  95/**
  96 * vmw_resource_unreserve - unreserve resources previously reserved for
  97 * command submission.
  98 *
  99 * @list_head: list of resources to unreserve.
 100 * @backoff: Whether command submission failed.
 
 
 101 */
 102static void vmw_resource_list_unreserve(struct list_head *list,
 
 
 
 
 
 
 
 
 
 
 
 
 103					bool backoff)
 104{
 105	struct vmw_resource_val_node *val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 106
 107	list_for_each_entry(val, list, head) {
 108		struct vmw_resource *res = val->res;
 109		struct vmw_dma_buffer *new_backup =
 110			backoff ? NULL : val->new_backup;
 
 
 
 
 
 
 
 111
 112		/*
 113		 * Transfer staged context bindings to the
 114		 * persistent context binding tracker.
 115		 */
 116		if (unlikely(val->staged_bindings)) {
 117			if (!backoff) {
 118				vmw_context_binding_state_transfer
 119					(val->res, val->staged_bindings);
 120			}
 121			kfree(val->staged_bindings);
 122			val->staged_bindings = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123		}
 124		vmw_resource_unreserve(res, new_backup,
 125			val->new_backup_offset);
 126		vmw_dmabuf_unreference(&val->new_backup);
 127	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128}
 129
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 130
 131/**
 132 * vmw_resource_val_add - Add a resource to the software context's
 133 * resource list if it's not already on it.
 134 *
 135 * @sw_context: Pointer to the software context.
 136 * @res: Pointer to the resource.
 137 * @p_node On successful return points to a valid pointer to a
 138 * struct vmw_resource_val_node, if non-NULL on entry.
 
 
 139 */
 140static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
 141				struct vmw_resource *res,
 142				struct vmw_resource_val_node **p_node)
 143{
 144	struct vmw_resource_val_node *node;
 145	struct drm_hash_item *hash;
 146	int ret;
 147
 148	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
 149				    &hash) == 0)) {
 150		node = container_of(hash, struct vmw_resource_val_node, hash);
 151		node->first_usage = false;
 152		if (unlikely(p_node != NULL))
 153			*p_node = node;
 
 
 
 
 154		return 0;
 155	}
 156
 157	node = kzalloc(sizeof(*node), GFP_KERNEL);
 158	if (unlikely(node == NULL)) {
 159		DRM_ERROR("Failed to allocate a resource validation "
 160			  "entry.\n");
 161		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162	}
 163
 164	node->hash.key = (unsigned long) res;
 165	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
 166	if (unlikely(ret != 0)) {
 167		DRM_ERROR("Failed to initialize a resource validation "
 168			  "entry.\n");
 169		kfree(node);
 170		return ret;
 171	}
 172	list_add_tail(&node->head, &sw_context->resource_list);
 173	node->res = vmw_resource_reference(res);
 174	node->first_usage = true;
 175
 176	if (unlikely(p_node != NULL))
 177		*p_node = node;
 178
 179	return 0;
 180}
 181
 182/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183 * vmw_resource_context_res_add - Put resources previously bound to a context on
 184 * the validation list
 185 *
 186 * @dev_priv: Pointer to a device private structure
 187 * @sw_context: Pointer to a software context used for this command submission
 188 * @ctx: Pointer to the context resource
 189 *
 190 * This function puts all resources that were previously bound to @ctx on
 191 * the resource validation list. This is part of the context state reemission
 192 */
 193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
 194					struct vmw_sw_context *sw_context,
 195					struct vmw_resource *ctx)
 196{
 197	struct list_head *binding_list;
 198	struct vmw_ctx_binding *entry;
 199	int ret = 0;
 200	struct vmw_resource *res;
 
 201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202	mutex_lock(&dev_priv->binding_mutex);
 203	binding_list = vmw_context_binding_list(ctx);
 204
 205	list_for_each_entry(entry, binding_list, ctx_list) {
 206		res = vmw_resource_reference_unless_doomed(entry->bi.res);
 207		if (unlikely(res == NULL))
 208			continue;
 209
 210		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
 211		vmw_resource_unreference(&res);
 212		if (unlikely(ret != 0))
 213			break;
 214	}
 215
 
 
 
 
 
 
 
 
 
 216	mutex_unlock(&dev_priv->binding_mutex);
 217	return ret;
 218}
 219
 220/**
 221 * vmw_resource_relocation_add - Add a relocation to the relocation list
 222 *
 223 * @list: Pointer to head of relocation list.
 224 * @res: The resource.
 225 * @offset: Offset into the command buffer currently being parsed where the
 226 * id that needs fixup is located. Granularity is 4 bytes.
 
 227 */
 228static int vmw_resource_relocation_add(struct list_head *list,
 229				       const struct vmw_resource *res,
 230				       unsigned long offset)
 
 
 231{
 232	struct vmw_resource_relocation *rel;
 233
 234	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
 235	if (unlikely(rel == NULL)) {
 236		DRM_ERROR("Failed to allocate a resource relocation.\n");
 237		return -ENOMEM;
 238	}
 239
 240	rel->res = res;
 241	rel->offset = offset;
 242	list_add_tail(&rel->head, list);
 
 243
 244	return 0;
 245}
 246
 247/**
 248 * vmw_resource_relocations_free - Free all relocations on a list
 249 *
 250 * @list: Pointer to the head of the relocation list.
 251 */
 252static void vmw_resource_relocations_free(struct list_head *list)
 253{
 254	struct vmw_resource_relocation *rel, *n;
 255
 256	list_for_each_entry_safe(rel, n, list, head) {
 257		list_del(&rel->head);
 258		kfree(rel);
 259	}
 260}
 261
 262/**
 263 * vmw_resource_relocations_apply - Apply all relocations on a list
 264 *
 265 * @cb: Pointer to the start of the command buffer bein patch. This need
 266 * not be the same buffer as the one being parsed when the relocation
 267 * list was built, but the contents must be the same modulo the
 268 * resource ids.
 269 * @list: Pointer to the head of the relocation list.
 270 */
 271static void vmw_resource_relocations_apply(uint32_t *cb,
 272					   struct list_head *list)
 273{
 274	struct vmw_resource_relocation *rel;
 275
 
 
 
 
 276	list_for_each_entry(rel, list, head) {
 277		if (likely(rel->res != NULL))
 278			cb[rel->offset] = rel->res->id;
 279		else
 280			cb[rel->offset] = SVGA_3D_CMD_NOP;
 
 
 
 
 
 
 
 
 
 281	}
 282}
 283
 284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
 285			   struct vmw_sw_context *sw_context,
 286			   SVGA3dCmdHeader *header)
 287{
 288	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
 289}
 290
 291static int vmw_cmd_ok(struct vmw_private *dev_priv,
 292		      struct vmw_sw_context *sw_context,
 293		      SVGA3dCmdHeader *header)
 294{
 295	return 0;
 296}
 297
 298/**
 299 * vmw_bo_to_validate_list - add a bo to a validate list
 300 *
 301 * @sw_context: The software context used for this command submission batch.
 302 * @bo: The buffer object to add.
 303 * @validate_as_mob: Validate this buffer as a MOB.
 304 * @p_val_node: If non-NULL Will be updated with the validate node number
 305 * on return.
 306 *
 307 * Returns -EINVAL if the limit of number of buffer objects per command
 308 * submission is reached.
 309 */
 310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
 311				   struct ttm_buffer_object *bo,
 312				   bool validate_as_mob,
 313				   uint32_t *p_val_node)
 314{
 315	uint32_t val_node;
 316	struct vmw_validate_buffer *vval_buf;
 317	struct ttm_validate_buffer *val_buf;
 318	struct drm_hash_item *hash;
 319	int ret;
 320
 321	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
 322				    &hash) == 0)) {
 323		vval_buf = container_of(hash, struct vmw_validate_buffer,
 324					hash);
 325		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
 326			DRM_ERROR("Inconsistent buffer usage.\n");
 327			return -EINVAL;
 328		}
 329		val_buf = &vval_buf->base;
 330		val_node = vval_buf - sw_context->val_bufs;
 331	} else {
 332		val_node = sw_context->cur_val_buf;
 333		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
 334			DRM_ERROR("Max number of DMA buffers per submission "
 335				  "exceeded.\n");
 336			return -EINVAL;
 337		}
 338		vval_buf = &sw_context->val_bufs[val_node];
 339		vval_buf->hash.key = (unsigned long) bo;
 340		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
 341		if (unlikely(ret != 0)) {
 342			DRM_ERROR("Failed to initialize a buffer validation "
 343				  "entry.\n");
 344			return ret;
 345		}
 346		++sw_context->cur_val_buf;
 347		val_buf = &vval_buf->base;
 348		val_buf->bo = ttm_bo_reference(bo);
 349		val_buf->reserved = false;
 350		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
 351		vval_buf->validate_as_mob = validate_as_mob;
 352	}
 353
 354	sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
 355
 356	if (p_val_node)
 357		*p_val_node = val_node;
 358
 359	return 0;
 360}
 361
 362/**
 363 * vmw_resources_reserve - Reserve all resources on the sw_context's
 364 * resource list.
 365 *
 366 * @sw_context: Pointer to the software context.
 367 *
 368 * Note that since vmware's command submission currently is protected by
 369 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
 370 * since only a single thread at once will attempt this.
 371 */
 372static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
 373{
 374	struct vmw_resource_val_node *val;
 375	int ret;
 376
 377	list_for_each_entry(val, &sw_context->resource_list, head) {
 378		struct vmw_resource *res = val->res;
 379
 380		ret = vmw_resource_reserve(res, val->no_buffer_needed);
 381		if (unlikely(ret != 0))
 382			return ret;
 383
 384		if (res->backup) {
 385			struct ttm_buffer_object *bo = &res->backup->base;
 386
 387			ret = vmw_bo_to_validate_list
 388				(sw_context, bo,
 389				 vmw_resource_needs_backup(res), NULL);
 390
 391			if (unlikely(ret != 0))
 392				return ret;
 
 
 
 393		}
 394	}
 395	return 0;
 396}
 397
 398/**
 399 * vmw_resources_validate - Validate all resources on the sw_context's
 400 * resource list.
 401 *
 402 * @sw_context: Pointer to the software context.
 403 *
 404 * Before this function is called, all resource backup buffers must have
 405 * been validated.
 406 */
 407static int vmw_resources_validate(struct vmw_sw_context *sw_context)
 408{
 409	struct vmw_resource_val_node *val;
 410	int ret;
 411
 412	list_for_each_entry(val, &sw_context->resource_list, head) {
 413		struct vmw_resource *res = val->res;
 414
 415		ret = vmw_resource_validate(res);
 416		if (unlikely(ret != 0)) {
 417			if (ret != -ERESTARTSYS)
 418				DRM_ERROR("Failed to validate resource.\n");
 419			return ret;
 420		}
 421	}
 422	return 0;
 423}
 424
 425/**
 426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
 427 * on the resource validate list unless it's already there.
 428 *
 429 * @dev_priv: Pointer to a device private structure.
 430 * @sw_context: Pointer to the software context.
 431 * @res_type: Resource type.
 
 432 * @converter: User-space visisble type specific information.
 433 * @id: user-space resource id handle.
 434 * @id_loc: Pointer to the location in the command buffer currently being
 435 * parsed from where the user-space resource id handle is located.
 436 * @p_val: Pointer to pointer to resource validalidation node. Populated
 437 * on exit.
 438 */
 439static int
 440vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
 441			 struct vmw_sw_context *sw_context,
 442			 enum vmw_res_type res_type,
 443			 const struct vmw_user_resource_conv *converter,
 444			 uint32_t id,
 445			 uint32_t *id_loc,
 446			 struct vmw_resource_val_node **p_val)
 447{
 448	struct vmw_res_cache_entry *rcache =
 449		&sw_context->res_cache[res_type];
 450	struct vmw_resource *res;
 451	struct vmw_resource_val_node *node;
 452	int ret;
 453
 454	if (id == SVGA3D_INVALID_ID) {
 455		if (p_val)
 456			*p_val = NULL;
 
 457		if (res_type == vmw_res_context) {
 458			DRM_ERROR("Illegal context invalid id.\n");
 459			return -EINVAL;
 460		}
 461		return 0;
 462	}
 463
 464	/*
 465	 * Fastpath in case of repeated commands referencing the same
 466	 * resource
 467	 */
 
 
 
 468
 469	if (likely(rcache->valid && id == rcache->handle)) {
 470		const struct vmw_resource *res = rcache->res;
 
 471
 472		rcache->node->first_usage = false;
 473		if (p_val)
 474			*p_val = rcache->node;
 475
 476		return vmw_resource_relocation_add
 477			(&sw_context->res_relocations, res,
 478			 id_loc - sw_context->buf_start);
 479	}
 480
 481	ret = vmw_user_resource_lookup_handle(dev_priv,
 482					      sw_context->fp->tfile,
 483					      id,
 484					      converter,
 485					      &res);
 486	if (unlikely(ret != 0)) {
 487		DRM_ERROR("Could not find or use resource 0x%08x.\n",
 488			  (unsigned) id);
 489		dump_stack();
 490		return ret;
 491	}
 492
 493	rcache->valid = true;
 494	rcache->res = res;
 495	rcache->handle = id;
 496
 497	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
 498					  res,
 499					  id_loc - sw_context->buf_start);
 500	if (unlikely(ret != 0))
 501		goto out_no_reloc;
 502
 503	ret = vmw_resource_val_add(sw_context, res, &node);
 504	if (unlikely(ret != 0))
 505		goto out_no_reloc;
 506
 507	rcache->node = node;
 508	if (p_val)
 509		*p_val = node;
 510
 511	if (dev_priv->has_mob && node->first_usage &&
 512	    res_type == vmw_res_context) {
 513		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
 514		if (unlikely(ret != 0))
 515			goto out_no_reloc;
 516		node->staged_bindings =
 517			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
 518		if (node->staged_bindings == NULL) {
 519			DRM_ERROR("Failed to allocate context binding "
 520				  "information.\n");
 521			goto out_no_reloc;
 522		}
 523		INIT_LIST_HEAD(&node->staged_bindings->list);
 524	}
 525
 526	vmw_resource_unreference(&res);
 
 
 
 
 
 
 527	return 0;
 528
 529out_no_reloc:
 530	BUG_ON(sw_context->error_resource != NULL);
 531	sw_context->error_resource = res;
 532
 533	return ret;
 534}
 535
 536/**
 537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
 538 * on the resource validate list unless it's already there.
 539 *
 540 * @dev_priv: Pointer to a device private structure.
 541 * @sw_context: Pointer to the software context.
 542 * @res_type: Resource type.
 543 * @converter: User-space visisble type specific information.
 544 * @id_loc: Pointer to the location in the command buffer currently being
 545 * parsed from where the user-space resource id handle is located.
 546 * @p_val: Pointer to pointer to resource validalidation node. Populated
 547 * on exit.
 548 */
 549static int
 550vmw_cmd_res_check(struct vmw_private *dev_priv,
 551		  struct vmw_sw_context *sw_context,
 552		  enum vmw_res_type res_type,
 553		  const struct vmw_user_resource_conv *converter,
 554		  uint32_t *id_loc,
 555		  struct vmw_resource_val_node **p_val)
 556{
 557	return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
 558					converter, *id_loc, id_loc, p_val);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 559}
 560
 561/**
 562 * vmw_rebind_contexts - Rebind all resources previously bound to
 563 * referenced contexts.
 564 *
 565 * @sw_context: Pointer to the software context.
 566 *
 567 * Rebind context binding points that have been scrubbed because of eviction.
 568 */
 569static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
 570{
 571	struct vmw_resource_val_node *val;
 572	int ret;
 573
 574	list_for_each_entry(val, &sw_context->resource_list, head) {
 575		if (likely(!val->staged_bindings))
 576			continue;
 577
 578		ret = vmw_context_rebind_all(val->res);
 579		if (unlikely(ret != 0)) {
 580			if (ret != -ERESTARTSYS)
 581				DRM_ERROR("Failed to rebind context.\n");
 
 
 
 
 
 
 582			return ret;
 583		}
 584	}
 585
 586	return 0;
 587}
 588
 589/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590 * vmw_cmd_cid_check - Check a command header for valid context information.
 591 *
 592 * @dev_priv: Pointer to a device private structure.
 593 * @sw_context: Pointer to the software context.
 594 * @header: A command header with an embedded user-space context handle.
 595 *
 596 * Convenience function: Call vmw_cmd_res_check with the user-space context
 597 * handle embedded in @header.
 598 */
 599static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 600			     struct vmw_sw_context *sw_context,
 601			     SVGA3dCmdHeader *header)
 602{
 603	struct vmw_cid_cmd {
 604		SVGA3dCmdHeader header;
 605		uint32_t cid;
 606	} *cmd;
 607
 608	cmd = container_of(header, struct vmw_cid_cmd, header);
 609	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 610				 user_context_converter, &cmd->cid, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 611}
 612
 613static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
 614					   struct vmw_sw_context *sw_context,
 615					   SVGA3dCmdHeader *header)
 616{
 617	struct vmw_sid_cmd {
 618		SVGA3dCmdHeader header;
 619		SVGA3dCmdSetRenderTarget body;
 620	} *cmd;
 621	struct vmw_resource_val_node *ctx_node;
 622	struct vmw_resource_val_node *res_node;
 623	int ret;
 624
 625	cmd = container_of(header, struct vmw_sid_cmd, header);
 
 
 
 
 
 
 626
 627	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 628				user_context_converter, &cmd->body.cid,
 629				&ctx_node);
 630	if (unlikely(ret != 0))
 631		return ret;
 632
 633	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 634				user_surface_converter,
 635				&cmd->body.target.sid, &res_node);
 636	if (unlikely(ret != 0))
 637		return ret;
 638
 639	if (dev_priv->has_mob) {
 640		struct vmw_ctx_bindinfo bi;
 
 
 
 
 
 641
 642		bi.ctx = ctx_node->res;
 643		bi.res = res_node ? res_node->res : NULL;
 644		bi.bt = vmw_ctx_binding_rt;
 645		bi.i1.rt_type = cmd->body.type;
 646		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
 647	}
 648
 649	return 0;
 650}
 651
 652static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
 653				      struct vmw_sw_context *sw_context,
 654				      SVGA3dCmdHeader *header)
 655{
 656	struct vmw_sid_cmd {
 657		SVGA3dCmdHeader header;
 658		SVGA3dCmdSurfaceCopy body;
 659	} *cmd;
 660	int ret;
 661
 662	cmd = container_of(header, struct vmw_sid_cmd, header);
 
 663	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 664				user_surface_converter,
 665				&cmd->body.src.sid, NULL);
 666	if (unlikely(ret != 0))
 667		return ret;
 
 668	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 669				 user_surface_converter,
 670				 &cmd->body.dest.sid, NULL);
 671}
 672
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 673static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
 674				     struct vmw_sw_context *sw_context,
 675				     SVGA3dCmdHeader *header)
 676{
 677	struct vmw_sid_cmd {
 678		SVGA3dCmdHeader header;
 679		SVGA3dCmdSurfaceStretchBlt body;
 680	} *cmd;
 681	int ret;
 682
 683	cmd = container_of(header, struct vmw_sid_cmd, header);
 684	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 685				user_surface_converter,
 686				&cmd->body.src.sid, NULL);
 687	if (unlikely(ret != 0))
 688		return ret;
 
 689	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 690				 user_surface_converter,
 691				 &cmd->body.dest.sid, NULL);
 692}
 693
 694static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 695					 struct vmw_sw_context *sw_context,
 696					 SVGA3dCmdHeader *header)
 697{
 698	struct vmw_sid_cmd {
 699		SVGA3dCmdHeader header;
 700		SVGA3dCmdBlitSurfaceToScreen body;
 701	} *cmd;
 702
 703	cmd = container_of(header, struct vmw_sid_cmd, header);
 704
 705	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 706				 user_surface_converter,
 707				 &cmd->body.srcImage.sid, NULL);
 708}
 709
 710static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 711				 struct vmw_sw_context *sw_context,
 712				 SVGA3dCmdHeader *header)
 713{
 714	struct vmw_sid_cmd {
 715		SVGA3dCmdHeader header;
 716		SVGA3dCmdPresent body;
 717	} *cmd;
 718
 719
 720	cmd = container_of(header, struct vmw_sid_cmd, header);
 721
 722	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 723				 user_surface_converter, &cmd->body.sid,
 724				 NULL);
 725}
 726
 727/**
 728 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
 729 *
 730 * @dev_priv: The device private structure.
 731 * @new_query_bo: The new buffer holding query results.
 732 * @sw_context: The software context used for this command submission.
 733 *
 734 * This function checks whether @new_query_bo is suitable for holding
 735 * query results, and if another buffer currently is pinned for query
 736 * results. If so, the function prepares the state of @sw_context for
 737 * switching pinned buffers after successful submission of the current
 738 * command batch.
 739 */
 740static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
 741				       struct ttm_buffer_object *new_query_bo,
 742				       struct vmw_sw_context *sw_context)
 743{
 744	struct vmw_res_cache_entry *ctx_entry =
 745		&sw_context->res_cache[vmw_res_context];
 746	int ret;
 747
 748	BUG_ON(!ctx_entry->valid);
 749	sw_context->last_query_ctx = ctx_entry->res;
 750
 751	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
 752
 753		if (unlikely(new_query_bo->num_pages > 4)) {
 754			DRM_ERROR("Query buffer too large.\n");
 755			return -EINVAL;
 756		}
 757
 758		if (unlikely(sw_context->cur_query_bo != NULL)) {
 759			sw_context->needs_post_query_barrier = true;
 760			ret = vmw_bo_to_validate_list(sw_context,
 761						      sw_context->cur_query_bo,
 762						      dev_priv->has_mob, NULL);
 763			if (unlikely(ret != 0))
 764				return ret;
 765		}
 766		sw_context->cur_query_bo = new_query_bo;
 767
 768		ret = vmw_bo_to_validate_list(sw_context,
 769					      dev_priv->dummy_query_bo,
 770					      dev_priv->has_mob, NULL);
 771		if (unlikely(ret != 0))
 772			return ret;
 773
 774	}
 775
 776	return 0;
 777}
 778
 779
 780/**
 781 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
 782 *
 783 * @dev_priv: The device private structure.
 784 * @sw_context: The software context used for this command submission batch.
 785 *
 786 * This function will check if we're switching query buffers, and will then,
 787 * issue a dummy occlusion query wait used as a query barrier. When the fence
 788 * object following that query wait has signaled, we are sure that all
 789 * preceding queries have finished, and the old query buffer can be unpinned.
 790 * However, since both the new query buffer and the old one are fenced with
 791 * that fence, we can do an asynchronus unpin now, and be sure that the
 792 * old query buffer won't be moved until the fence has signaled.
 793 *
 794 * As mentioned above, both the new - and old query buffers need to be fenced
 795 * using a sequence emitted *after* calling this function.
 796 */
 797static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
 798				     struct vmw_sw_context *sw_context)
 799{
 800	/*
 801	 * The validate list should still hold references to all
 802	 * contexts here.
 803	 */
 804
 805	if (sw_context->needs_post_query_barrier) {
 806		struct vmw_res_cache_entry *ctx_entry =
 807			&sw_context->res_cache[vmw_res_context];
 808		struct vmw_resource *ctx;
 809		int ret;
 810
 811		BUG_ON(!ctx_entry->valid);
 812		ctx = ctx_entry->res;
 813
 814		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
 815
 816		if (unlikely(ret != 0))
 817			DRM_ERROR("Out of fifo space for dummy query.\n");
 818	}
 819
 820	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
 821		if (dev_priv->pinned_bo) {
 822			vmw_bo_pin(dev_priv->pinned_bo, false);
 823			ttm_bo_unref(&dev_priv->pinned_bo);
 824		}
 825
 826		if (!sw_context->needs_post_query_barrier) {
 827			vmw_bo_pin(sw_context->cur_query_bo, true);
 828
 829			/*
 830			 * We pin also the dummy_query_bo buffer so that we
 831			 * don't need to validate it when emitting
 832			 * dummy queries in context destroy paths.
 833			 */
 834
 835			vmw_bo_pin(dev_priv->dummy_query_bo, true);
 836			dev_priv->dummy_query_bo_pinned = true;
 
 
 837
 838			BUG_ON(sw_context->last_query_ctx == NULL);
 839			dev_priv->query_cid = sw_context->last_query_ctx->id;
 840			dev_priv->query_cid_valid = true;
 841			dev_priv->pinned_bo =
 842				ttm_bo_reference(sw_context->cur_query_bo);
 843		}
 844	}
 845}
 846
 847/**
 848 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
 849 * handle to a MOB id.
 850 *
 851 * @dev_priv: Pointer to a device private structure.
 852 * @sw_context: The software context used for this command batch validation.
 853 * @id: Pointer to the user-space handle to be translated.
 854 * @vmw_bo_p: Points to a location that, on successful return will carry
 855 * a reference-counted pointer to the DMA buffer identified by the
 856 * user-space handle in @id.
 857 *
 858 * This function saves information needed to translate a user-space buffer
 859 * handle to a MOB id. The translation does not take place immediately, but
 860 * during a call to vmw_apply_relocations(). This function builds a relocation
 861 * list and a list of buffers to validate. The former needs to be freed using
 862 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
 863 * needs to be freed using vmw_clear_validations.
 
 
 864 */
 865static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
 866				 struct vmw_sw_context *sw_context,
 867				 SVGAMobId *id,
 868				 struct vmw_dma_buffer **vmw_bo_p)
 869{
 870	struct vmw_dma_buffer *vmw_bo = NULL;
 871	struct ttm_buffer_object *bo;
 872	uint32_t handle = *id;
 873	struct vmw_relocation *reloc;
 874	int ret;
 875
 876	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 877	if (unlikely(ret != 0)) {
 878		DRM_ERROR("Could not find or use MOB buffer.\n");
 879		return -EINVAL;
 
 880	}
 881	bo = &vmw_bo->base;
 882
 883	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 884		DRM_ERROR("Max number relocations per submission"
 885			  " exceeded\n");
 886		ret = -EINVAL;
 887		goto out_no_reloc;
 888	}
 
 
 889
 890	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 891	reloc->mob_loc = id;
 892	reloc->location = NULL;
 893
 894	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
 895	if (unlikely(ret != 0))
 896		goto out_no_reloc;
 897
 898	*vmw_bo_p = vmw_bo;
 899	return 0;
 900
 901out_no_reloc:
 902	vmw_dmabuf_unreference(&vmw_bo);
 903	vmw_bo_p = NULL;
 904	return ret;
 905}
 906
 907/**
 908 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
 909 * handle to a valid SVGAGuestPtr
 910 *
 911 * @dev_priv: Pointer to a device private structure.
 912 * @sw_context: The software context used for this command batch validation.
 913 * @ptr: Pointer to the user-space handle to be translated.
 914 * @vmw_bo_p: Points to a location that, on successful return will carry
 915 * a reference-counted pointer to the DMA buffer identified by the
 916 * user-space handle in @id.
 917 *
 918 * This function saves information needed to translate a user-space buffer
 919 * handle to a valid SVGAGuestPtr. The translation does not take place
 920 * immediately, but during a call to vmw_apply_relocations().
 
 921 * This function builds a relocation list and a list of buffers to validate.
 922 * The former needs to be freed using either vmw_apply_relocations() or
 923 * vmw_free_relocations(). The latter needs to be freed using
 924 * vmw_clear_validations.
 925 */
 926static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 927				   struct vmw_sw_context *sw_context,
 928				   SVGAGuestPtr *ptr,
 929				   struct vmw_dma_buffer **vmw_bo_p)
 930{
 931	struct vmw_dma_buffer *vmw_bo = NULL;
 932	struct ttm_buffer_object *bo;
 933	uint32_t handle = ptr->gmrId;
 934	struct vmw_relocation *reloc;
 935	int ret;
 936
 937	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
 938	if (unlikely(ret != 0)) {
 939		DRM_ERROR("Could not find or use GMR region.\n");
 940		return -EINVAL;
 
 941	}
 942	bo = &vmw_bo->base;
 943
 944	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
 945		DRM_ERROR("Max number relocations per submission"
 946			  " exceeded\n");
 947		ret = -EINVAL;
 948		goto out_no_reloc;
 949	}
 
 
 950
 951	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 952	reloc->location = ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 953
 954	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
 955	if (unlikely(ret != 0))
 956		goto out_no_reloc;
 957
 958	*vmw_bo_p = vmw_bo;
 959	return 0;
 960
 961out_no_reloc:
 962	vmw_dmabuf_unreference(&vmw_bo);
 963	vmw_bo_p = NULL;
 964	return ret;
 965}
 966
 967/**
 968 * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969 *
 970 * @dev_priv: Pointer to a device private struct.
 971 * @sw_context: The software context used for this command submission.
 972 * @header: Pointer to the command header in the command stream.
 973 */
 974static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
 975				  struct vmw_sw_context *sw_context,
 976				  SVGA3dCmdHeader *header)
 977{
 978	struct vmw_begin_gb_query_cmd {
 979		SVGA3dCmdHeader header;
 980		SVGA3dCmdBeginGBQuery q;
 981	} *cmd;
 982
 983	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
 984			   header);
 985
 986	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
 987				 user_context_converter, &cmd->q.cid,
 988				 NULL);
 989}
 990
 991/**
 992 * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
 993 *
 994 * @dev_priv: Pointer to a device private struct.
 995 * @sw_context: The software context used for this command submission.
 996 * @header: Pointer to the command header in the command stream.
 997 */
 998static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
 999			       struct vmw_sw_context *sw_context,
1000			       SVGA3dCmdHeader *header)
1001{
1002	struct vmw_begin_query_cmd {
1003		SVGA3dCmdHeader header;
1004		SVGA3dCmdBeginQuery q;
1005	} *cmd;
1006
1007	cmd = container_of(header, struct vmw_begin_query_cmd,
1008			   header);
1009
1010	if (unlikely(dev_priv->has_mob)) {
1011		struct {
1012			SVGA3dCmdHeader header;
1013			SVGA3dCmdBeginGBQuery q;
1014		} gb_cmd;
1015
1016		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1017
1018		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1019		gb_cmd.header.size = cmd->header.size;
1020		gb_cmd.q.cid = cmd->q.cid;
1021		gb_cmd.q.type = cmd->q.type;
1022
1023		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1024		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1025	}
1026
1027	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1028				 user_context_converter, &cmd->q.cid,
1029				 NULL);
1030}
1031
1032/**
1033 * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1034 *
1035 * @dev_priv: Pointer to a device private struct.
1036 * @sw_context: The software context used for this command submission.
1037 * @header: Pointer to the command header in the command stream.
1038 */
1039static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1040				struct vmw_sw_context *sw_context,
1041				SVGA3dCmdHeader *header)
1042{
1043	struct vmw_dma_buffer *vmw_bo;
1044	struct vmw_query_cmd {
1045		SVGA3dCmdHeader header;
1046		SVGA3dCmdEndGBQuery q;
1047	} *cmd;
1048	int ret;
1049
1050	cmd = container_of(header, struct vmw_query_cmd, header);
1051	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1052	if (unlikely(ret != 0))
1053		return ret;
1054
1055	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1056				    &cmd->q.mobid,
1057				    &vmw_bo);
1058	if (unlikely(ret != 0))
1059		return ret;
1060
1061	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1062
1063	vmw_dmabuf_unreference(&vmw_bo);
1064	return ret;
1065}
1066
1067/**
1068 * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1069 *
1070 * @dev_priv: Pointer to a device private struct.
1071 * @sw_context: The software context used for this command submission.
1072 * @header: Pointer to the command header in the command stream.
1073 */
1074static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1075			     struct vmw_sw_context *sw_context,
1076			     SVGA3dCmdHeader *header)
1077{
1078	struct vmw_dma_buffer *vmw_bo;
1079	struct vmw_query_cmd {
1080		SVGA3dCmdHeader header;
1081		SVGA3dCmdEndQuery q;
1082	} *cmd;
1083	int ret;
1084
1085	cmd = container_of(header, struct vmw_query_cmd, header);
1086	if (dev_priv->has_mob) {
1087		struct {
1088			SVGA3dCmdHeader header;
1089			SVGA3dCmdEndGBQuery q;
1090		} gb_cmd;
1091
1092		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1093
1094		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1095		gb_cmd.header.size = cmd->header.size;
1096		gb_cmd.q.cid = cmd->q.cid;
1097		gb_cmd.q.type = cmd->q.type;
1098		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1099		gb_cmd.q.offset = cmd->q.guestResult.offset;
1100
1101		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1102		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1103	}
1104
1105	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1106	if (unlikely(ret != 0))
1107		return ret;
1108
1109	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1110				      &cmd->q.guestResult,
1111				      &vmw_bo);
1112	if (unlikely(ret != 0))
1113		return ret;
1114
1115	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1116
1117	vmw_dmabuf_unreference(&vmw_bo);
1118	return ret;
1119}
1120
1121/**
1122 * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1123 *
1124 * @dev_priv: Pointer to a device private struct.
1125 * @sw_context: The software context used for this command submission.
1126 * @header: Pointer to the command header in the command stream.
1127 */
1128static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1129				 struct vmw_sw_context *sw_context,
1130				 SVGA3dCmdHeader *header)
1131{
1132	struct vmw_dma_buffer *vmw_bo;
1133	struct vmw_query_cmd {
1134		SVGA3dCmdHeader header;
1135		SVGA3dCmdWaitForGBQuery q;
1136	} *cmd;
1137	int ret;
1138
1139	cmd = container_of(header, struct vmw_query_cmd, header);
1140	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1141	if (unlikely(ret != 0))
1142		return ret;
1143
1144	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1145				    &cmd->q.mobid,
1146				    &vmw_bo);
1147	if (unlikely(ret != 0))
1148		return ret;
1149
1150	vmw_dmabuf_unreference(&vmw_bo);
1151	return 0;
1152}
1153
1154/**
1155 * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1156 *
1157 * @dev_priv: Pointer to a device private struct.
1158 * @sw_context: The software context used for this command submission.
1159 * @header: Pointer to the command header in the command stream.
1160 */
1161static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1162			      struct vmw_sw_context *sw_context,
1163			      SVGA3dCmdHeader *header)
1164{
1165	struct vmw_dma_buffer *vmw_bo;
1166	struct vmw_query_cmd {
1167		SVGA3dCmdHeader header;
1168		SVGA3dCmdWaitForQuery q;
1169	} *cmd;
1170	int ret;
1171
1172	cmd = container_of(header, struct vmw_query_cmd, header);
1173	if (dev_priv->has_mob) {
1174		struct {
1175			SVGA3dCmdHeader header;
1176			SVGA3dCmdWaitForGBQuery q;
1177		} gb_cmd;
1178
1179		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1180
1181		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1182		gb_cmd.header.size = cmd->header.size;
1183		gb_cmd.q.cid = cmd->q.cid;
1184		gb_cmd.q.type = cmd->q.type;
1185		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1186		gb_cmd.q.offset = cmd->q.guestResult.offset;
1187
1188		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1189		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1190	}
1191
1192	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1193	if (unlikely(ret != 0))
1194		return ret;
1195
1196	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1197				      &cmd->q.guestResult,
1198				      &vmw_bo);
1199	if (unlikely(ret != 0))
1200		return ret;
1201
1202	vmw_dmabuf_unreference(&vmw_bo);
1203	return 0;
1204}
1205
1206static int vmw_cmd_dma(struct vmw_private *dev_priv,
1207		       struct vmw_sw_context *sw_context,
1208		       SVGA3dCmdHeader *header)
1209{
1210	struct vmw_dma_buffer *vmw_bo = NULL;
1211	struct vmw_surface *srf = NULL;
1212	struct vmw_dma_cmd {
1213		SVGA3dCmdHeader header;
1214		SVGA3dCmdSurfaceDMA dma;
1215	} *cmd;
1216	int ret;
1217	SVGA3dCmdSurfaceDMASuffix *suffix;
1218	uint32_t bo_size;
 
1219
1220	cmd = container_of(header, struct vmw_dma_cmd, header);
1221	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1222					       header->size - sizeof(*suffix));
1223
1224	/* Make sure device and verifier stays in sync. */
1225	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1226		DRM_ERROR("Invalid DMA suffix size.\n");
1227		return -EINVAL;
1228	}
1229
1230	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1231				      &cmd->dma.guest.ptr,
1232				      &vmw_bo);
1233	if (unlikely(ret != 0))
1234		return ret;
1235
1236	/* Make sure DMA doesn't cross BO boundaries. */
1237	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1238	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1239		DRM_ERROR("Invalid DMA offset.\n");
1240		return -EINVAL;
1241	}
1242
1243	bo_size -= cmd->dma.guest.ptr.offset;
1244	if (unlikely(suffix->maximumOffset > bo_size))
1245		suffix->maximumOffset = bo_size;
1246
 
 
1247	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1248				user_surface_converter, &cmd->dma.host.sid,
1249				NULL);
1250	if (unlikely(ret != 0)) {
1251		if (unlikely(ret != -ERESTARTSYS))
1252			DRM_ERROR("could not find surface for DMA.\n");
1253		goto out_no_surface;
1254	}
1255
1256	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1257
1258	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1259			     header);
1260
1261out_no_surface:
1262	vmw_dmabuf_unreference(&vmw_bo);
1263	return ret;
1264}
1265
1266static int vmw_cmd_draw(struct vmw_private *dev_priv,
1267			struct vmw_sw_context *sw_context,
1268			SVGA3dCmdHeader *header)
1269{
1270	struct vmw_draw_cmd {
1271		SVGA3dCmdHeader header;
1272		SVGA3dCmdDrawPrimitives body;
1273	} *cmd;
1274	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1275		(unsigned long)header + sizeof(*cmd));
1276	SVGA3dPrimitiveRange *range;
1277	uint32_t i;
1278	uint32_t maxnum;
1279	int ret;
1280
1281	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1282	if (unlikely(ret != 0))
1283		return ret;
1284
1285	cmd = container_of(header, struct vmw_draw_cmd, header);
1286	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1287
1288	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1289		DRM_ERROR("Illegal number of vertex declarations.\n");
1290		return -EINVAL;
1291	}
1292
1293	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1294		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 
1295					user_surface_converter,
1296					&decl->array.surfaceId, NULL);
1297		if (unlikely(ret != 0))
1298			return ret;
1299	}
1300
1301	maxnum = (header->size - sizeof(cmd->body) -
1302		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1303	if (unlikely(cmd->body.numRanges > maxnum)) {
1304		DRM_ERROR("Illegal number of index ranges.\n");
1305		return -EINVAL;
1306	}
1307
1308	range = (SVGA3dPrimitiveRange *) decl;
1309	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1310		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 
1311					user_surface_converter,
1312					&range->indexArray.surfaceId, NULL);
1313		if (unlikely(ret != 0))
1314			return ret;
1315	}
1316	return 0;
1317}
1318
1319
1320static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1321			     struct vmw_sw_context *sw_context,
1322			     SVGA3dCmdHeader *header)
1323{
1324	struct vmw_tex_state_cmd {
1325		SVGA3dCmdHeader header;
1326		SVGA3dCmdSetTextureState state;
1327	} *cmd;
1328
1329	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1330	  ((unsigned long) header + header->size + sizeof(header));
1331	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1332		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1333	struct vmw_resource_val_node *ctx_node;
1334	struct vmw_resource_val_node *res_node;
1335	int ret;
1336
1337	cmd = container_of(header, struct vmw_tex_state_cmd,
1338			   header);
1339
1340	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1341				user_context_converter, &cmd->state.cid,
1342				&ctx_node);
1343	if (unlikely(ret != 0))
1344		return ret;
1345
1346	for (; cur_state < last_state; ++cur_state) {
1347		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1348			continue;
1349
 
 
 
 
 
 
1350		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
 
1351					user_surface_converter,
1352					&cur_state->value, &res_node);
1353		if (unlikely(ret != 0))
1354			return ret;
1355
1356		if (dev_priv->has_mob) {
1357			struct vmw_ctx_bindinfo bi;
 
1358
1359			bi.ctx = ctx_node->res;
1360			bi.res = res_node ? res_node->res : NULL;
1361			bi.bt = vmw_ctx_binding_tex;
1362			bi.i1.texture_stage = cur_state->stage;
1363			vmw_context_binding_add(ctx_node->staged_bindings,
1364						&bi);
 
 
 
 
1365		}
1366	}
1367
1368	return 0;
1369}
1370
1371static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1372				      struct vmw_sw_context *sw_context,
1373				      void *buf)
1374{
1375	struct vmw_dma_buffer *vmw_bo;
1376	int ret;
1377
1378	struct {
1379		uint32_t header;
1380		SVGAFifoCmdDefineGMRFB body;
1381	} *cmd = buf;
1382
1383	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1384				      &cmd->body.ptr,
1385				      &vmw_bo);
1386	if (unlikely(ret != 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1387		return ret;
1388
1389	vmw_dmabuf_unreference(&vmw_bo);
1390
1391	return ret;
1392}
1393
1394/**
1395 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context being used for this batch.
1399 * @res_type: The resource type.
1400 * @converter: Information about user-space binding for this resource type.
1401 * @res_id: Pointer to the user-space resource handle in the command stream.
1402 * @buf_id: Pointer to the user-space backup buffer handle in the command
1403 * stream.
1404 * @backup_offset: Offset of backup into MOB.
1405 *
1406 * This function prepares for registering a switch of backup buffers
1407 * in the resource metadata just prior to unreserving.
 
1408 */
1409static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1410				 struct vmw_sw_context *sw_context,
1411				 enum vmw_res_type res_type,
1412				 const struct vmw_user_resource_conv
1413				 *converter,
1414				 uint32_t *res_id,
1415				 uint32_t *buf_id,
1416				 unsigned long backup_offset)
1417{
 
1418	int ret;
1419	struct vmw_dma_buffer *dma_buf;
1420	struct vmw_resource_val_node *val_node;
1421
1422	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1423				converter, res_id, &val_node);
1424	if (unlikely(ret != 0))
1425		return ret;
1426
1427	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1428	if (unlikely(ret != 0))
1429		return ret;
1430
1431	if (val_node->first_usage)
1432		val_node->no_buffer_needed = true;
1433
1434	vmw_dmabuf_unreference(&val_node->new_backup);
1435	val_node->new_backup = dma_buf;
1436	val_node->new_backup_offset = backup_offset;
1437
1438	return 0;
1439}
1440
1441/**
1442 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1443 * command
1444 *
1445 * @dev_priv: Pointer to a device private struct.
1446 * @sw_context: The software context being used for this batch.
1447 * @header: Pointer to the command header in the command stream.
1448 */
1449static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1450				   struct vmw_sw_context *sw_context,
1451				   SVGA3dCmdHeader *header)
1452{
1453	struct vmw_bind_gb_surface_cmd {
1454		SVGA3dCmdHeader header;
1455		SVGA3dCmdBindGBSurface body;
1456	} *cmd;
1457
1458	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1459
1460	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1461				     user_surface_converter,
1462				     &cmd->body.sid, &cmd->body.mobid,
1463				     0);
1464}
1465
1466/**
1467 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1468 * command
1469 *
1470 * @dev_priv: Pointer to a device private struct.
1471 * @sw_context: The software context being used for this batch.
1472 * @header: Pointer to the command header in the command stream.
1473 */
1474static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1475				   struct vmw_sw_context *sw_context,
1476				   SVGA3dCmdHeader *header)
1477{
1478	struct vmw_gb_surface_cmd {
1479		SVGA3dCmdHeader header;
1480		SVGA3dCmdUpdateGBImage body;
1481	} *cmd;
1482
1483	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1484
1485	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1486				 user_surface_converter,
1487				 &cmd->body.image.sid, NULL);
1488}
1489
1490/**
1491 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1492 * command
1493 *
1494 * @dev_priv: Pointer to a device private struct.
1495 * @sw_context: The software context being used for this batch.
1496 * @header: Pointer to the command header in the command stream.
1497 */
1498static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1499				     struct vmw_sw_context *sw_context,
1500				     SVGA3dCmdHeader *header)
1501{
1502	struct vmw_gb_surface_cmd {
1503		SVGA3dCmdHeader header;
1504		SVGA3dCmdUpdateGBSurface body;
1505	} *cmd;
1506
1507	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1508
1509	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1510				 user_surface_converter,
1511				 &cmd->body.sid, NULL);
1512}
1513
1514/**
1515 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1516 * command
1517 *
1518 * @dev_priv: Pointer to a device private struct.
1519 * @sw_context: The software context being used for this batch.
1520 * @header: Pointer to the command header in the command stream.
1521 */
1522static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1523				     struct vmw_sw_context *sw_context,
1524				     SVGA3dCmdHeader *header)
1525{
1526	struct vmw_gb_surface_cmd {
1527		SVGA3dCmdHeader header;
1528		SVGA3dCmdReadbackGBImage body;
1529	} *cmd;
1530
1531	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1532
1533	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1534				 user_surface_converter,
1535				 &cmd->body.image.sid, NULL);
1536}
1537
1538/**
1539 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1540 * command
1541 *
1542 * @dev_priv: Pointer to a device private struct.
1543 * @sw_context: The software context being used for this batch.
1544 * @header: Pointer to the command header in the command stream.
1545 */
1546static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1547				       struct vmw_sw_context *sw_context,
1548				       SVGA3dCmdHeader *header)
1549{
1550	struct vmw_gb_surface_cmd {
1551		SVGA3dCmdHeader header;
1552		SVGA3dCmdReadbackGBSurface body;
1553	} *cmd;
1554
1555	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1556
1557	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1558				 user_surface_converter,
1559				 &cmd->body.sid, NULL);
1560}
1561
1562/**
1563 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1564 * command
1565 *
1566 * @dev_priv: Pointer to a device private struct.
1567 * @sw_context: The software context being used for this batch.
1568 * @header: Pointer to the command header in the command stream.
1569 */
1570static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1571				       struct vmw_sw_context *sw_context,
1572				       SVGA3dCmdHeader *header)
1573{
1574	struct vmw_gb_surface_cmd {
1575		SVGA3dCmdHeader header;
1576		SVGA3dCmdInvalidateGBImage body;
1577	} *cmd;
1578
1579	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1580
1581	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1582				 user_surface_converter,
1583				 &cmd->body.image.sid, NULL);
1584}
1585
1586/**
1587 * vmw_cmd_invalidate_gb_surface - Validate an
1588 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1589 *
1590 * @dev_priv: Pointer to a device private struct.
1591 * @sw_context: The software context being used for this batch.
1592 * @header: Pointer to the command header in the command stream.
1593 */
1594static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1595					 struct vmw_sw_context *sw_context,
1596					 SVGA3dCmdHeader *header)
1597{
1598	struct vmw_gb_surface_cmd {
1599		SVGA3dCmdHeader header;
1600		SVGA3dCmdInvalidateGBSurface body;
1601	} *cmd;
1602
1603	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1604
1605	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1606				 user_surface_converter,
1607				 &cmd->body.sid, NULL);
1608}
1609
1610
1611/**
1612 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1613 * command
1614 *
1615 * @dev_priv: Pointer to a device private struct.
1616 * @sw_context: The software context being used for this batch.
1617 * @header: Pointer to the command header in the command stream.
1618 */
1619static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1620				 struct vmw_sw_context *sw_context,
1621				 SVGA3dCmdHeader *header)
1622{
1623	struct vmw_shader_define_cmd {
1624		SVGA3dCmdHeader header;
1625		SVGA3dCmdDefineShader body;
1626	} *cmd;
1627	int ret;
1628	size_t size;
 
1629
1630	cmd = container_of(header, struct vmw_shader_define_cmd,
1631			   header);
1632
1633	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1634				user_context_converter, &cmd->body.cid,
1635				NULL);
1636	if (unlikely(ret != 0))
1637		return ret;
1638
1639	if (unlikely(!dev_priv->has_mob))
1640		return 0;
1641
1642	size = cmd->header.size - sizeof(cmd->body);
1643	ret = vmw_compat_shader_add(sw_context->fp->shman,
1644				    cmd->body.shid, cmd + 1,
1645				    cmd->body.type, size,
1646				    sw_context->fp->tfile,
1647				    &sw_context->staged_shaders);
1648	if (unlikely(ret != 0))
1649		return ret;
1650
1651	return vmw_resource_relocation_add(&sw_context->res_relocations,
1652					   NULL, &cmd->header.id -
1653					   sw_context->buf_start);
1654
1655	return 0;
1656}
1657
1658/**
1659 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1660 * command
1661 *
1662 * @dev_priv: Pointer to a device private struct.
1663 * @sw_context: The software context being used for this batch.
1664 * @header: Pointer to the command header in the command stream.
1665 */
1666static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1667				  struct vmw_sw_context *sw_context,
1668				  SVGA3dCmdHeader *header)
1669{
1670	struct vmw_shader_destroy_cmd {
1671		SVGA3dCmdHeader header;
1672		SVGA3dCmdDestroyShader body;
1673	} *cmd;
1674	int ret;
 
1675
1676	cmd = container_of(header, struct vmw_shader_destroy_cmd,
1677			   header);
1678
1679	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1680				user_context_converter, &cmd->body.cid,
1681				NULL);
1682	if (unlikely(ret != 0))
1683		return ret;
1684
1685	if (unlikely(!dev_priv->has_mob))
1686		return 0;
1687
1688	ret = vmw_compat_shader_remove(sw_context->fp->shman,
1689				       cmd->body.shid,
1690				       cmd->body.type,
1691				       &sw_context->staged_shaders);
1692	if (unlikely(ret != 0))
1693		return ret;
1694
1695	return vmw_resource_relocation_add(&sw_context->res_relocations,
1696					   NULL, &cmd->header.id -
1697					   sw_context->buf_start);
1698
1699	return 0;
1700}
1701
1702/**
1703 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1704 * command
1705 *
1706 * @dev_priv: Pointer to a device private struct.
1707 * @sw_context: The software context being used for this batch.
1708 * @header: Pointer to the command header in the command stream.
1709 */
1710static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1711			      struct vmw_sw_context *sw_context,
1712			      SVGA3dCmdHeader *header)
1713{
1714	struct vmw_set_shader_cmd {
1715		SVGA3dCmdHeader header;
1716		SVGA3dCmdSetShader body;
1717	} *cmd;
1718	struct vmw_resource_val_node *ctx_node;
1719	int ret;
1720
1721	cmd = container_of(header, struct vmw_set_shader_cmd,
1722			   header);
 
 
 
 
 
1723
1724	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1725				user_context_converter, &cmd->body.cid,
1726				&ctx_node);
1727	if (unlikely(ret != 0))
1728		return ret;
1729
1730	if (dev_priv->has_mob) {
1731		struct vmw_ctx_bindinfo bi;
1732		struct vmw_resource_val_node *res_node;
1733		u32 shid = cmd->body.shid;
1734
1735		if (shid != SVGA3D_INVALID_ID)
1736			(void) vmw_compat_shader_lookup(sw_context->fp->shman,
1737							cmd->body.type,
1738							&shid);
1739
1740		ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1741					       vmw_res_shader,
1742					       user_shader_converter,
1743					       shid,
1744					       &cmd->body.shid, &res_node);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1745		if (unlikely(ret != 0))
1746			return ret;
 
1747
1748		bi.ctx = ctx_node->res;
1749		bi.res = res_node ? res_node->res : NULL;
1750		bi.bt = vmw_ctx_binding_shader;
1751		bi.i1.shader_type = cmd->body.type;
1752		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1753	}
 
 
 
1754
1755	return 0;
1756}
1757
1758/**
1759 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1760 * command
1761 *
1762 * @dev_priv: Pointer to a device private struct.
1763 * @sw_context: The software context being used for this batch.
1764 * @header: Pointer to the command header in the command stream.
1765 */
1766static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1767				    struct vmw_sw_context *sw_context,
1768				    SVGA3dCmdHeader *header)
1769{
1770	struct vmw_set_shader_const_cmd {
1771		SVGA3dCmdHeader header;
1772		SVGA3dCmdSetShaderConst body;
1773	} *cmd;
1774	int ret;
1775
1776	cmd = container_of(header, struct vmw_set_shader_const_cmd,
1777			   header);
1778
1779	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1780				user_context_converter, &cmd->body.cid,
1781				NULL);
1782	if (unlikely(ret != 0))
1783		return ret;
1784
1785	if (dev_priv->has_mob)
1786		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1787
1788	return 0;
1789}
1790
1791/**
1792 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1793 * command
1794 *
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @header: Pointer to the command header in the command stream.
1798 */
1799static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1800				  struct vmw_sw_context *sw_context,
1801				  SVGA3dCmdHeader *header)
1802{
1803	struct vmw_bind_gb_shader_cmd {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1804		SVGA3dCmdHeader header;
1805		SVGA3dCmdBindGBShader body;
1806	} *cmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1807
1808	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1809			   header);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1810
1811	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1812				     user_shader_converter,
1813				     &cmd->body.shid, &cmd->body.mobid,
1814				     cmd->body.offsetInBytes);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1815}
1816
1817static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1818				struct vmw_sw_context *sw_context,
1819				void *buf, uint32_t *size)
1820{
1821	uint32_t size_remaining = *size;
1822	uint32_t cmd_id;
1823
1824	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1825	switch (cmd_id) {
1826	case SVGA_CMD_UPDATE:
1827		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1828		break;
1829	case SVGA_CMD_DEFINE_GMRFB:
1830		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1831		break;
1832	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1833		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1834		break;
1835	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1836		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1837		break;
1838	default:
1839		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1840		return -EINVAL;
1841	}
1842
1843	if (*size > size_remaining) {
1844		DRM_ERROR("Invalid SVGA command (size mismatch):"
1845			  " %u.\n", cmd_id);
1846		return -EINVAL;
1847	}
1848
1849	if (unlikely(!sw_context->kernel)) {
1850		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1851		return -EPERM;
1852	}
1853
1854	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1855		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1856
1857	return 0;
1858}
1859
1860static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1861	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1862		    false, false, false),
1863	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1864		    false, false, false),
1865	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1866		    true, false, false),
1867	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1868		    true, false, false),
1869	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1870		    true, false, false),
1871	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1872		    false, false, false),
1873	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1874		    false, false, false),
1875	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1876		    true, false, false),
1877	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1878		    true, false, false),
1879	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1880		    true, false, false),
1881	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1882		    &vmw_cmd_set_render_target_check, true, false, false),
1883	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1884		    true, false, false),
1885	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1886		    true, false, false),
1887	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1888		    true, false, false),
1889	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1890		    true, false, false),
1891	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1892		    true, false, false),
1893	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1894		    true, false, false),
1895	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1896		    true, false, false),
1897	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1898		    false, false, false),
1899	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1900		    true, false, false),
1901	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1902		    true, false, false),
1903	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1904		    true, false, false),
1905	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1906		    true, false, false),
1907	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1908		    true, false, false),
1909	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1910		    true, false, false),
1911	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1912		    true, false, false),
1913	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1914		    true, false, false),
1915	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1916		    true, false, false),
1917	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1918		    true, false, false),
1919	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1920		    &vmw_cmd_blt_surf_screen_check, false, false, false),
1921	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1922		    false, false, false),
1923	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1924		    false, false, false),
1925	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1926		    false, false, false),
1927	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1928		    false, false, false),
1929	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1930		    false, false, false),
1931	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1932		    false, false, false),
1933	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1934		    false, false, false),
1935	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1936		    false, false, false),
1937	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1938		    false, false, false),
1939	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1940		    false, false, false),
1941	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1942		    false, false, false),
1943	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1944		    false, false, false),
1945	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1946		    false, false, false),
1947	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1948		    false, false, true),
1949	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1950		    false, false, true),
1951	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1952		    false, false, true),
1953	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1954		    false, false, true),
1955	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1956		    false, false, true),
1957	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1958		    false, false, true),
1959	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1960		    false, false, true),
1961	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1962		    false, false, true),
1963	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1964		    true, false, true),
1965	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1966		    false, false, true),
1967	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1968		    true, false, true),
1969	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1970		    &vmw_cmd_update_gb_surface, true, false, true),
1971	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1972		    &vmw_cmd_readback_gb_image, true, false, true),
1973	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1974		    &vmw_cmd_readback_gb_surface, true, false, true),
1975	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1976		    &vmw_cmd_invalidate_gb_image, true, false, true),
1977	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1978		    &vmw_cmd_invalidate_gb_surface, true, false, true),
1979	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1980		    false, false, true),
1981	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1982		    false, false, true),
1983	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1984		    false, false, true),
1985	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1986		    false, false, true),
1987	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1988		    false, false, true),
1989	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1990		    false, false, true),
1991	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1992		    true, false, true),
1993	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1994		    false, false, true),
1995	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1996		    false, false, false),
1997	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1998		    true, false, true),
1999	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2000		    true, false, true),
2001	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2002		    true, false, true),
2003	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2004		    true, false, true),
 
 
2005	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2006		    false, false, true),
2007	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2008		    false, false, true),
2009	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2010		    false, false, true),
2011	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2012		    false, false, true),
2013	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2014		    false, false, true),
2015	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2016		    false, false, true),
2017	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2018		    false, false, true),
2019	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2020		    false, false, true),
2021	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2022		    false, false, true),
2023	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2024		    false, false, true),
2025	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2026		    true, false, true)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2027};
2028
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2029static int vmw_cmd_check(struct vmw_private *dev_priv,
2030			 struct vmw_sw_context *sw_context,
2031			 void *buf, uint32_t *size)
2032{
2033	uint32_t cmd_id;
2034	uint32_t size_remaining = *size;
2035	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2036	int ret;
2037	const struct vmw_cmd_entry *entry;
2038	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2039
2040	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2041	/* Handle any none 3D commands */
2042	if (unlikely(cmd_id < SVGA_CMD_MAX))
2043		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2044
2045
2046	cmd_id = le32_to_cpu(header->id);
2047	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2048
2049	cmd_id -= SVGA_3D_CMD_BASE;
2050	if (unlikely(*size > size_remaining))
2051		goto out_invalid;
2052
2053	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2054		goto out_invalid;
2055
2056	entry = &vmw_cmd_entries[cmd_id];
2057	if (unlikely(!entry->func))
2058		goto out_invalid;
2059
2060	if (unlikely(!entry->user_allow && !sw_context->kernel))
2061		goto out_privileged;
2062
2063	if (unlikely(entry->gb_disable && gb))
2064		goto out_old;
2065
2066	if (unlikely(entry->gb_enable && !gb))
2067		goto out_new;
2068
2069	ret = entry->func(dev_priv, sw_context, header);
2070	if (unlikely(ret != 0))
2071		goto out_invalid;
 
 
 
2072
2073	return 0;
2074out_invalid:
2075	DRM_ERROR("Invalid SVGA3D command: %d\n",
2076		  cmd_id + SVGA_3D_CMD_BASE);
2077	return -EINVAL;
2078out_privileged:
2079	DRM_ERROR("Privileged SVGA3D command: %d\n",
2080		  cmd_id + SVGA_3D_CMD_BASE);
2081	return -EPERM;
2082out_old:
2083	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2084		  cmd_id + SVGA_3D_CMD_BASE);
2085	return -EINVAL;
2086out_new:
2087	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2088		  cmd_id + SVGA_3D_CMD_BASE);
2089	return -EINVAL;
2090}
2091
2092static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2093			     struct vmw_sw_context *sw_context,
2094			     void *buf,
2095			     uint32_t size)
2096{
2097	int32_t cur_size = size;
2098	int ret;
2099
2100	sw_context->buf_start = buf;
2101
2102	while (cur_size > 0) {
2103		size = cur_size;
2104		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2105		if (unlikely(ret != 0))
2106			return ret;
2107		buf = (void *)((unsigned long) buf + size);
2108		cur_size -= size;
2109	}
2110
2111	if (unlikely(cur_size != 0)) {
2112		DRM_ERROR("Command verifier out of sync.\n");
2113		return -EINVAL;
2114	}
2115
2116	return 0;
2117}
2118
2119static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2120{
2121	sw_context->cur_reloc = 0;
 
2122}
2123
2124static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2125{
2126	uint32_t i;
2127	struct vmw_relocation *reloc;
2128	struct ttm_validate_buffer *validate;
2129	struct ttm_buffer_object *bo;
2130
2131	for (i = 0; i < sw_context->cur_reloc; ++i) {
2132		reloc = &sw_context->relocs[i];
2133		validate = &sw_context->val_bufs[reloc->index].base;
2134		bo = validate->bo;
2135		switch (bo->mem.mem_type) {
2136		case TTM_PL_VRAM:
2137			reloc->location->offset += bo->offset;
2138			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2139			break;
2140		case VMW_PL_GMR:
2141			reloc->location->gmrId = bo->mem.start;
2142			break;
2143		case VMW_PL_MOB:
2144			*reloc->mob_loc = bo->mem.start;
2145			break;
2146		default:
2147			BUG();
2148		}
2149	}
2150	vmw_free_relocations(sw_context);
2151}
2152
2153/**
2154 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2155 * all resources referenced by it.
2156 *
2157 * @list: The resource list.
2158 */
2159static void vmw_resource_list_unreference(struct list_head *list)
2160{
2161	struct vmw_resource_val_node *val, *val_next;
2162
2163	/*
2164	 * Drop references to resources held during command submission.
2165	 */
2166
2167	list_for_each_entry_safe(val, val_next, list, head) {
2168		list_del_init(&val->head);
2169		vmw_resource_unreference(&val->res);
2170		if (unlikely(val->staged_bindings))
2171			kfree(val->staged_bindings);
2172		kfree(val);
2173	}
2174}
2175
2176static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2177{
2178	struct vmw_validate_buffer *entry, *next;
2179	struct vmw_resource_val_node *val;
2180
2181	/*
2182	 * Drop references to DMA buffers held during command submission.
2183	 */
2184	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2185				 base.head) {
2186		list_del(&entry->base.head);
2187		ttm_bo_unref(&entry->base.bo);
2188		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2189		sw_context->cur_val_buf--;
2190	}
2191	BUG_ON(sw_context->cur_val_buf != 0);
2192
2193	list_for_each_entry(val, &sw_context->resource_list, head)
2194		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2195}
2196
2197static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2198				      struct ttm_buffer_object *bo,
2199				      bool validate_as_mob)
2200{
2201	int ret;
2202
2203
2204	/*
2205	 * Don't validate pinned buffers.
2206	 */
2207
2208	if (bo == dev_priv->pinned_bo ||
2209	    (bo == dev_priv->dummy_query_bo &&
2210	     dev_priv->dummy_query_bo_pinned))
2211		return 0;
2212
2213	if (validate_as_mob)
2214		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2215
2216	/**
2217	 * Put BO in VRAM if there is space, otherwise as a GMR.
2218	 * If there is no space in VRAM and GMR ids are all used up,
2219	 * start evicting GMRs to make room. If the DMA buffer can't be
2220	 * used as a GMR, this will return -ENOMEM.
2221	 */
2222
2223	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2224	if (likely(ret == 0 || ret == -ERESTARTSYS))
2225		return ret;
2226
2227	/**
2228	 * If that failed, try VRAM again, this time evicting
2229	 * previous contents.
2230	 */
2231
2232	DRM_INFO("Falling through to VRAM.\n");
2233	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2234	return ret;
2235}
2236
2237static int vmw_validate_buffers(struct vmw_private *dev_priv,
2238				struct vmw_sw_context *sw_context)
2239{
2240	struct vmw_validate_buffer *entry;
2241	int ret;
2242
2243	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2244		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2245						 entry->validate_as_mob);
2246		if (unlikely(ret != 0))
2247			return ret;
2248	}
2249	return 0;
2250}
2251
2252static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2253				 uint32_t size)
2254{
2255	if (likely(sw_context->cmd_bounce_size >= size))
2256		return 0;
2257
2258	if (sw_context->cmd_bounce_size == 0)
2259		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2260
2261	while (sw_context->cmd_bounce_size < size) {
2262		sw_context->cmd_bounce_size =
2263			PAGE_ALIGN(sw_context->cmd_bounce_size +
2264				   (sw_context->cmd_bounce_size >> 1));
2265	}
2266
2267	if (sw_context->cmd_bounce != NULL)
2268		vfree(sw_context->cmd_bounce);
2269
2270	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2271
2272	if (sw_context->cmd_bounce == NULL) {
2273		DRM_ERROR("Failed to allocate command bounce buffer.\n");
2274		sw_context->cmd_bounce_size = 0;
2275		return -ENOMEM;
2276	}
2277
2278	return 0;
2279}
2280
2281/**
2282 * vmw_execbuf_fence_commands - create and submit a command stream fence
2283 *
2284 * Creates a fence object and submits a command stream marker.
2285 * If this fails for some reason, We sync the fifo and return NULL.
2286 * It is then safe to fence buffers with a NULL pointer.
2287 *
2288 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2289 * a userspace handle if @p_handle is not NULL, otherwise not.
2290 */
2291
2292int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2293			       struct vmw_private *dev_priv,
2294			       struct vmw_fence_obj **p_fence,
2295			       uint32_t *p_handle)
2296{
2297	uint32_t sequence;
2298	int ret;
2299	bool synced = false;
2300
2301	/* p_handle implies file_priv. */
2302	BUG_ON(p_handle != NULL && file_priv == NULL);
2303
2304	ret = vmw_fifo_send_fence(dev_priv, &sequence);
2305	if (unlikely(ret != 0)) {
2306		DRM_ERROR("Fence submission error. Syncing.\n");
2307		synced = true;
2308	}
2309
2310	if (p_handle != NULL)
2311		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2312					    sequence,
2313					    DRM_VMW_FENCE_FLAG_EXEC,
2314					    p_fence, p_handle);
2315	else
2316		ret = vmw_fence_create(dev_priv->fman, sequence,
2317				       DRM_VMW_FENCE_FLAG_EXEC,
2318				       p_fence);
2319
2320	if (unlikely(ret != 0 && !synced)) {
2321		(void) vmw_fallback_wait(dev_priv, false, false,
2322					 sequence, false,
2323					 VMW_FENCE_WAIT_TIMEOUT);
2324		*p_fence = NULL;
2325	}
2326
2327	return 0;
2328}
2329
2330/**
2331 * vmw_execbuf_copy_fence_user - copy fence object information to
2332 * user-space.
2333 *
2334 * @dev_priv: Pointer to a vmw_private struct.
2335 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2336 * @ret: Return value from fence object creation.
2337 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2338 * which the information should be copied.
2339 * @fence: Pointer to the fenc object.
2340 * @fence_handle: User-space fence handle.
 
 
 
 
 
 
 
2341 *
2342 * This function copies fence information to user-space. If copying fails,
2343 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2344 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2345 * the error will hopefully be detected.
2346 * Also if copying fails, user-space will be unable to signal the fence
2347 * object so we wait for it immediately, and then unreference the
2348 * user-space reference.
2349 */
2350void
2351vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2352			    struct vmw_fpriv *vmw_fp,
2353			    int ret,
2354			    struct drm_vmw_fence_rep __user *user_fence_rep,
2355			    struct vmw_fence_obj *fence,
2356			    uint32_t fence_handle)
2357{
2358	struct drm_vmw_fence_rep fence_rep;
2359
2360	if (user_fence_rep == NULL)
2361		return;
2362
2363	memset(&fence_rep, 0, sizeof(fence_rep));
2364
2365	fence_rep.error = ret;
 
2366	if (ret == 0) {
2367		BUG_ON(fence == NULL);
2368
2369		fence_rep.handle = fence_handle;
2370		fence_rep.seqno = fence->seqno;
2371		vmw_update_seqno(dev_priv, &dev_priv->fifo);
2372		fence_rep.passed_seqno = dev_priv->last_read_seqno;
2373	}
2374
2375	/*
2376	 * copy_to_user errors will be detected by user space not
2377	 * seeing fence_rep::error filled in. Typically
2378	 * user-space would have pre-set that member to -EFAULT.
2379	 */
2380	ret = copy_to_user(user_fence_rep, &fence_rep,
2381			   sizeof(fence_rep));
2382
2383	/*
2384	 * User-space lost the fence object. We need to sync
2385	 * and unreference the handle.
2386	 */
2387	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2388		ttm_ref_object_base_unref(vmw_fp->tfile,
2389					  fence_handle, TTM_REF_USAGE);
2390		DRM_ERROR("Fence copy error. Syncing.\n");
2391		(void) vmw_fence_obj_wait(fence, fence->signal_mask,
2392					  false, false,
 
 
 
 
 
 
 
2393					  VMW_FENCE_WAIT_TIMEOUT);
2394	}
2395}
2396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2397int vmw_execbuf_process(struct drm_file *file_priv,
2398			struct vmw_private *dev_priv,
2399			void __user *user_commands,
2400			void *kernel_commands,
2401			uint32_t command_size,
2402			uint64_t throttle_us,
2403			struct drm_vmw_fence_rep __user *user_fence_rep,
2404			struct vmw_fence_obj **out_fence)
2405{
2406	struct vmw_sw_context *sw_context = &dev_priv->ctx;
2407	struct vmw_fence_obj *fence = NULL;
2408	struct vmw_resource *error_resource;
2409	struct list_head resource_list;
2410	struct ww_acquire_ctx ticket;
2411	uint32_t handle;
2412	void *cmd;
2413	int ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2414
2415	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2416	if (unlikely(ret != 0))
2417		return -ERESTARTSYS;
 
 
2418
 
2419	if (kernel_commands == NULL) {
2420		sw_context->kernel = false;
2421
2422		ret = vmw_resize_cmd_bounce(sw_context, command_size);
2423		if (unlikely(ret != 0))
2424			goto out_unlock;
2425
2426
2427		ret = copy_from_user(sw_context->cmd_bounce,
2428				     user_commands, command_size);
2429
2430		if (unlikely(ret != 0)) {
2431			ret = -EFAULT;
2432			DRM_ERROR("Failed copying commands.\n");
2433			goto out_unlock;
2434		}
 
2435		kernel_commands = sw_context->cmd_bounce;
2436	} else
2437		sw_context->kernel = true;
 
2438
2439	sw_context->fp = vmw_fpriv(file_priv);
2440	sw_context->cur_reloc = 0;
2441	sw_context->cur_val_buf = 0;
2442	sw_context->fence_flags = 0;
2443	INIT_LIST_HEAD(&sw_context->resource_list);
2444	sw_context->cur_query_bo = dev_priv->pinned_bo;
2445	sw_context->last_query_ctx = NULL;
2446	sw_context->needs_post_query_barrier = false;
 
 
 
2447	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2448	INIT_LIST_HEAD(&sw_context->validate_nodes);
2449	INIT_LIST_HEAD(&sw_context->res_relocations);
 
 
 
 
 
2450	if (!sw_context->res_ht_initialized) {
2451		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2452		if (unlikely(ret != 0))
2453			goto out_unlock;
 
2454		sw_context->res_ht_initialized = true;
2455	}
2456	INIT_LIST_HEAD(&sw_context->staged_shaders);
2457
2458	INIT_LIST_HEAD(&resource_list);
 
 
 
 
 
2459	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2460				command_size);
2461	if (unlikely(ret != 0))
2462		goto out_err_nores;
2463
2464	ret = vmw_resources_reserve(sw_context);
2465	if (unlikely(ret != 0))
2466		goto out_err_nores;
2467
2468	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2469	if (unlikely(ret != 0))
2470		goto out_err;
2471
2472	ret = vmw_validate_buffers(dev_priv, sw_context);
2473	if (unlikely(ret != 0))
2474		goto out_err;
2475
2476	ret = vmw_resources_validate(sw_context);
2477	if (unlikely(ret != 0))
2478		goto out_err;
2479
2480	if (throttle_us) {
2481		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2482				   throttle_us);
2483
2484		if (unlikely(ret != 0))
2485			goto out_err;
2486	}
2487
2488	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2489	if (unlikely(ret != 0)) {
2490		ret = -ERESTARTSYS;
2491		goto out_err;
2492	}
2493
2494	if (dev_priv->has_mob) {
2495		ret = vmw_rebind_contexts(sw_context);
2496		if (unlikely(ret != 0))
2497			goto out_unlock_binding;
2498	}
2499
2500	cmd = vmw_fifo_reserve(dev_priv, command_size);
2501	if (unlikely(cmd == NULL)) {
2502		DRM_ERROR("Failed reserving fifo space for commands.\n");
2503		ret = -ENOMEM;
2504		goto out_unlock_binding;
 
 
2505	}
2506
2507	vmw_apply_relocations(sw_context);
2508	memcpy(cmd, kernel_commands, command_size);
2509
2510	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2511	vmw_resource_relocations_free(&sw_context->res_relocations);
2512
2513	vmw_fifo_commit(dev_priv, command_size);
2514
2515	vmw_query_bo_switch_commit(dev_priv, sw_context);
2516	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2517					 &fence,
2518					 (user_fence_rep) ? &handle : NULL);
2519	/*
2520	 * This error is harmless, because if fence submission fails,
2521	 * vmw_fifo_send_fence will sync. The error will be propagated to
2522	 * user-space in @fence_rep
2523	 */
2524
2525	if (ret != 0)
2526		DRM_ERROR("Fence submission error. Syncing.\n");
2527
2528	vmw_resource_list_unreserve(&sw_context->resource_list, false);
2529	mutex_unlock(&dev_priv->binding_mutex);
 
2530
2531	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2532				    (void *) fence);
2533
2534	if (unlikely(dev_priv->pinned_bo != NULL &&
2535		     !dev_priv->query_cid_valid))
2536		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
2537
2538	vmw_clear_validations(sw_context);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2539	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2540				    user_fence_rep, fence, handle);
 
2541
2542	/* Don't unreference when handing fence out */
2543	if (unlikely(out_fence != NULL)) {
2544		*out_fence = fence;
2545		fence = NULL;
2546	} else if (likely(fence != NULL)) {
2547		vmw_fence_obj_unreference(&fence);
2548	}
2549
2550	list_splice_init(&sw_context->resource_list, &resource_list);
2551	vmw_compat_shaders_commit(sw_context->fp->shman,
2552				  &sw_context->staged_shaders);
2553	mutex_unlock(&dev_priv->cmdbuf_mutex);
2554
2555	/*
2556	 * Unreference resources outside of the cmdbuf_mutex to
2557	 * avoid deadlocks in resource destruction paths.
2558	 */
2559	vmw_resource_list_unreference(&resource_list);
2560
2561	return 0;
2562
2563out_unlock_binding:
2564	mutex_unlock(&dev_priv->binding_mutex);
2565out_err:
2566	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2567out_err_nores:
2568	vmw_resource_list_unreserve(&sw_context->resource_list, true);
 
2569	vmw_resource_relocations_free(&sw_context->res_relocations);
2570	vmw_free_relocations(sw_context);
2571	vmw_clear_validations(sw_context);
2572	if (unlikely(dev_priv->pinned_bo != NULL &&
2573		     !dev_priv->query_cid_valid))
2574		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2575out_unlock:
2576	list_splice_init(&sw_context->resource_list, &resource_list);
2577	error_resource = sw_context->error_resource;
2578	sw_context->error_resource = NULL;
2579	vmw_compat_shaders_revert(sw_context->fp->shman,
2580				  &sw_context->staged_shaders);
2581	mutex_unlock(&dev_priv->cmdbuf_mutex);
2582
2583	/*
2584	 * Unreference resources outside of the cmdbuf_mutex to
2585	 * avoid deadlocks in resource destruction paths.
2586	 */
2587	vmw_resource_list_unreference(&resource_list);
2588	if (unlikely(error_resource != NULL))
2589		vmw_resource_unreference(&error_resource);
 
 
 
 
2590
2591	return ret;
2592}
2593
2594/**
2595 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2596 *
2597 * @dev_priv: The device private structure.
2598 *
2599 * This function is called to idle the fifo and unpin the query buffer
2600 * if the normal way to do this hits an error, which should typically be
2601 * extremely rare.
2602 */
2603static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2604{
2605	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2606
2607	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2608	vmw_bo_pin(dev_priv->pinned_bo, false);
2609	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2610	dev_priv->dummy_query_bo_pinned = false;
 
 
2611}
2612
2613
2614/**
2615 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2616 * query bo.
2617 *
2618 * @dev_priv: The device private structure.
2619 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2620 * _after_ a query barrier that flushes all queries touching the current
2621 * buffer pointed to by @dev_priv->pinned_bo
2622 *
2623 * This function should be used to unpin the pinned query bo, or
2624 * as a query barrier when we need to make sure that all queries have
2625 * finished before the next fifo command. (For example on hardware
2626 * context destructions where the hardware may otherwise leak unfinished
2627 * queries).
2628 *
2629 * This function does not return any failure codes, but make attempts
2630 * to do safe unpinning in case of errors.
2631 *
2632 * The function will synchronize on the previous query barrier, and will
2633 * thus not finish until that barrier has executed.
2634 *
2635 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2636 * before calling this function.
2637 */
2638void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2639				     struct vmw_fence_obj *fence)
2640{
2641	int ret = 0;
2642	struct list_head validate_list;
2643	struct ttm_validate_buffer pinned_val, query_val;
2644	struct vmw_fence_obj *lfence = NULL;
2645	struct ww_acquire_ctx ticket;
2646
2647	if (dev_priv->pinned_bo == NULL)
2648		goto out_unlock;
2649
2650	INIT_LIST_HEAD(&validate_list);
 
 
 
2651
2652	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2653	list_add_tail(&pinned_val.head, &validate_list);
2654
2655	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2656	list_add_tail(&query_val.head, &validate_list);
2657
2658	do {
2659		ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2660	} while (ret == -ERESTARTSYS);
2661
2662	if (unlikely(ret != 0)) {
2663		vmw_execbuf_unpin_panic(dev_priv);
2664		goto out_no_reserve;
2665	}
2666
2667	if (dev_priv->query_cid_valid) {
2668		BUG_ON(fence != NULL);
2669		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2670		if (unlikely(ret != 0)) {
2671			vmw_execbuf_unpin_panic(dev_priv);
2672			goto out_no_emit;
2673		}
2674		dev_priv->query_cid_valid = false;
2675	}
2676
2677	vmw_bo_pin(dev_priv->pinned_bo, false);
2678	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2679	dev_priv->dummy_query_bo_pinned = false;
2680
 
2681	if (fence == NULL) {
2682		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2683						  NULL);
2684		fence = lfence;
2685	}
2686	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2687	if (lfence != NULL)
2688		vmw_fence_obj_unreference(&lfence);
2689
2690	ttm_bo_unref(&query_val.bo);
2691	ttm_bo_unref(&pinned_val.bo);
2692	ttm_bo_unref(&dev_priv->pinned_bo);
2693
2694out_unlock:
2695	return;
2696
2697out_no_emit:
2698	ttm_eu_backoff_reservation(&ticket, &validate_list);
2699out_no_reserve:
2700	ttm_bo_unref(&query_val.bo);
2701	ttm_bo_unref(&pinned_val.bo);
2702	ttm_bo_unref(&dev_priv->pinned_bo);
2703}
2704
2705/**
2706 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2707 * query bo.
2708 *
2709 * @dev_priv: The device private structure.
2710 *
2711 * This function should be used to unpin the pinned query bo, or
2712 * as a query barrier when we need to make sure that all queries have
2713 * finished before the next fifo command. (For example on hardware
2714 * context destructions where the hardware may otherwise leak unfinished
2715 * queries).
2716 *
2717 * This function does not return any failure codes, but make attempts
2718 * to do safe unpinning in case of errors.
2719 *
2720 * The function will synchronize on the previous query barrier, and will
2721 * thus not finish until that barrier has executed.
2722 */
2723void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2724{
2725	mutex_lock(&dev_priv->cmdbuf_mutex);
2726	if (dev_priv->query_cid_valid)
2727		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2728	mutex_unlock(&dev_priv->cmdbuf_mutex);
2729}
2730
2731
2732int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2733		      struct drm_file *file_priv)
2734{
2735	struct vmw_private *dev_priv = vmw_priv(dev);
2736	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2737	int ret;
 
2738
2739	/*
2740	 * This will allow us to extend the ioctl argument while
2741	 * maintaining backwards compatibility:
2742	 * We take different code paths depending on the value of
2743	 * arg->version.
2744	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2745
2746	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2747		DRM_ERROR("Incorrect execbuf version.\n");
2748		DRM_ERROR("You're running outdated experimental "
2749			  "vmwgfx user-space drivers.");
2750		return -EINVAL;
 
 
 
 
 
 
 
2751	}
2752
2753	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2754	if (unlikely(ret != 0))
2755		return ret;
2756
2757	ret = vmw_execbuf_process(file_priv, dev_priv,
2758				  (void __user *)(unsigned long)arg->commands,
2759				  NULL, arg->command_size, arg->throttle_us,
 
2760				  (void __user *)(unsigned long)arg->fence_rep,
2761				  NULL);
2762
 
2763	if (unlikely(ret != 0))
2764		goto out_unlock;
2765
2766	vmw_kms_cursor_post_execbuf(dev_priv);
2767
2768out_unlock:
2769	ttm_read_unlock(&dev_priv->reservation_sem);
 
2770	return ret;
2771}