Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v4.6
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * This file implements the vmwgfx context binding manager,
  29 * The sole reason for having to use this code is that vmware guest
  30 * backed contexts can be swapped out to their backing mobs by the device
  31 * at any time, also swapped in at any time. At swapin time, the device
  32 * validates the context bindings to make sure they point to valid resources.
  33 * It's this outside-of-drawcall validation (that can happen at any time),
  34 * that makes this code necessary.
  35 *
  36 * We therefore need to kill any context bindings pointing to a resource
  37 * when the resource is swapped out. Furthermore, if the vmwgfx driver has
  38 * swapped out the context we can't swap it in again to kill bindings because
  39 * of backing mob reservation lockdep violations, so as part of
  40 * context swapout, also kill all bindings of a context, so that they are
  41 * already killed if a resource to which a binding points
  42 * needs to be swapped out.
  43 *
  44 * Note that a resource can be pointed to by bindings from multiple contexts,
  45 * Therefore we can't easily protect this data by a per context mutex
  46 * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
  47 * to protect all binding manager data.
  48 *
  49 * Finally, any association between a context and a global resource
  50 * (surface, shader or even DX query) is conceptually a context binding that
  51 * needs to be tracked by this code.
  52 */
  53
  54#include "vmwgfx_drv.h"
  55#include "vmwgfx_binding.h"
  56#include "device_include/svga3d_reg.h"
  57
  58#define VMW_BINDING_RT_BIT     0
  59#define VMW_BINDING_PS_BIT     1
  60#define VMW_BINDING_SO_BIT     2
  61#define VMW_BINDING_VB_BIT     3
  62#define VMW_BINDING_NUM_BITS   4
 
 
  63
  64#define VMW_BINDING_PS_SR_BIT  0
  65
  66/**
  67 * struct vmw_ctx_binding_state - per context binding state
  68 *
  69 * @dev_priv: Pointer to device private structure.
  70 * @list: linked list of individual active bindings.
  71 * @render_targets: Render target bindings.
  72 * @texture_units: Texture units bindings.
  73 * @ds_view: Depth-stencil view binding.
  74 * @so_targets: StreamOutput target bindings.
  75 * @vertex_buffers: Vertex buffer bindings.
  76 * @index_buffer: Index buffer binding.
  77 * @per_shader: Per shader-type bindings.
 
 
  78 * @dirty: Bitmap tracking per binding-type changes that have not yet
  79 * been emitted to the device.
  80 * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
  81 * have not yet been emitted to the device.
  82 * @bind_cmd_buffer: Scratch space used to construct binding commands.
  83 * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
  84 * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
  85 * device binding slot of the first command data entry in @bind_cmd_buffer.
  86 *
  87 * Note that this structure also provides storage space for the individual
  88 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
  89 * for individual bindings.
  90 *
  91 */
  92struct vmw_ctx_binding_state {
  93	struct vmw_private *dev_priv;
  94	struct list_head list;
  95	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
  96	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
  97	struct vmw_ctx_bindinfo_view ds_view;
  98	struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS];
  99	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
 100	struct vmw_ctx_bindinfo_ib index_buffer;
 101	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10];
 
 
 102
 103	unsigned long dirty;
 104	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
 105
 106	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
 107	u32 bind_cmd_count;
 108	u32 bind_first_slot;
 109};
 110
 111static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
 112static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
 113					   bool rebind);
 114static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
 115static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
 116static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
 117static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
 118static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
 119static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
 120static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
 121				       bool rebind);
 122static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
 123static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
 
 
 
 
 124static void vmw_binding_build_asserts(void) __attribute__ ((unused));
 125
 126typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
 127
 128/**
 129 * struct vmw_binding_info - Per binding type information for the binding
 130 * manager
 131 *
 132 * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
 133 * @offsets: array[shader_slot] of offsets to the array[slot]
 134 * of struct bindings for the binding type.
 135 * @scrub_func: Pointer to the scrub function for this binding type.
 136 *
 137 * Holds static information to help optimize the binding manager and avoid
 138 * an excessive amount of switch statements.
 139 */
 140struct vmw_binding_info {
 141	size_t size;
 142	const size_t *offsets;
 143	vmw_scrub_func scrub_func;
 144};
 145
 146/*
 147 * A number of static variables that help determine the scrub func and the
 148 * location of the struct vmw_ctx_bindinfo slots for each binding type.
 149 */
 150static const size_t vmw_binding_shader_offsets[] = {
 151	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
 152	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
 153	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
 
 
 
 154};
 155static const size_t vmw_binding_rt_offsets[] = {
 156	offsetof(struct vmw_ctx_binding_state, render_targets),
 157};
 158static const size_t vmw_binding_tex_offsets[] = {
 159	offsetof(struct vmw_ctx_binding_state, texture_units),
 160};
 161static const size_t vmw_binding_cb_offsets[] = {
 162	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
 163	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
 164	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
 
 
 
 165};
 166static const size_t vmw_binding_dx_ds_offsets[] = {
 167	offsetof(struct vmw_ctx_binding_state, ds_view),
 168};
 169static const size_t vmw_binding_sr_offsets[] = {
 170	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
 171	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
 172	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
 
 
 
 173};
 174static const size_t vmw_binding_so_offsets[] = {
 175	offsetof(struct vmw_ctx_binding_state, so_targets),
 176};
 177static const size_t vmw_binding_vb_offsets[] = {
 178	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
 179};
 180static const size_t vmw_binding_ib_offsets[] = {
 181	offsetof(struct vmw_ctx_binding_state, index_buffer),
 182};
 
 
 
 
 
 
 
 
 
 183
 184static const struct vmw_binding_info vmw_binding_infos[] = {
 185	[vmw_ctx_binding_shader] = {
 186		.size = sizeof(struct vmw_ctx_bindinfo_shader),
 187		.offsets = vmw_binding_shader_offsets,
 188		.scrub_func = vmw_binding_scrub_shader},
 189	[vmw_ctx_binding_rt] = {
 190		.size = sizeof(struct vmw_ctx_bindinfo_view),
 191		.offsets = vmw_binding_rt_offsets,
 192		.scrub_func = vmw_binding_scrub_render_target},
 193	[vmw_ctx_binding_tex] = {
 194		.size = sizeof(struct vmw_ctx_bindinfo_tex),
 195		.offsets = vmw_binding_tex_offsets,
 196		.scrub_func = vmw_binding_scrub_texture},
 197	[vmw_ctx_binding_cb] = {
 198		.size = sizeof(struct vmw_ctx_bindinfo_cb),
 199		.offsets = vmw_binding_cb_offsets,
 200		.scrub_func = vmw_binding_scrub_cb},
 201	[vmw_ctx_binding_dx_shader] = {
 202		.size = sizeof(struct vmw_ctx_bindinfo_shader),
 203		.offsets = vmw_binding_shader_offsets,
 204		.scrub_func = vmw_binding_scrub_dx_shader},
 205	[vmw_ctx_binding_dx_rt] = {
 206		.size = sizeof(struct vmw_ctx_bindinfo_view),
 207		.offsets = vmw_binding_rt_offsets,
 208		.scrub_func = vmw_binding_scrub_dx_rt},
 209	[vmw_ctx_binding_sr] = {
 210		.size = sizeof(struct vmw_ctx_bindinfo_view),
 211		.offsets = vmw_binding_sr_offsets,
 212		.scrub_func = vmw_binding_scrub_sr},
 213	[vmw_ctx_binding_ds] = {
 214		.size = sizeof(struct vmw_ctx_bindinfo_view),
 215		.offsets = vmw_binding_dx_ds_offsets,
 216		.scrub_func = vmw_binding_scrub_dx_rt},
 217	[vmw_ctx_binding_so] = {
 218		.size = sizeof(struct vmw_ctx_bindinfo_so),
 219		.offsets = vmw_binding_so_offsets,
 220		.scrub_func = vmw_binding_scrub_so},
 221	[vmw_ctx_binding_vb] = {
 222		.size = sizeof(struct vmw_ctx_bindinfo_vb),
 223		.offsets = vmw_binding_vb_offsets,
 224		.scrub_func = vmw_binding_scrub_vb},
 225	[vmw_ctx_binding_ib] = {
 226		.size = sizeof(struct vmw_ctx_bindinfo_ib),
 227		.offsets = vmw_binding_ib_offsets,
 228		.scrub_func = vmw_binding_scrub_ib},
 
 
 
 
 
 
 
 
 
 
 
 
 229};
 230
 231/**
 232 * vmw_cbs_context - Return a pointer to the context resource of a
 233 * context binding state tracker.
 234 *
 235 * @cbs: The context binding state tracker.
 236 *
 237 * Provided there are any active bindings, this function will return an
 238 * unreferenced pointer to the context resource that owns the context
 239 * binding state tracker. If there are no active bindings, this function
 240 * will return NULL. Note that the caller must somehow ensure that a reference
 241 * is held on the context resource prior to calling this function.
 242 */
 243static const struct vmw_resource *
 244vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
 245{
 246	if (list_empty(&cbs->list))
 247		return NULL;
 248
 249	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
 250				ctx_list)->ctx;
 251}
 252
 253/**
 254 * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
 255 *
 256 * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
 257 * @bt: The binding type.
 258 * @shader_slot: The shader slot of the binding. If none, then set to 0.
 259 * @slot: The slot of the binding.
 260 */
 261static struct vmw_ctx_bindinfo *
 262vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
 263		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
 264{
 265	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
 266	size_t offset = b->offsets[shader_slot] + b->size*slot;
 267
 268	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
 269}
 270
 271/**
 272 * vmw_binding_drop: Stop tracking a context binding
 273 *
 274 * @bi: Pointer to binding tracker storage.
 275 *
 276 * Stops tracking a context binding, and re-initializes its storage.
 277 * Typically used when the context binding is replaced with a binding to
 278 * another (or the same, for that matter) resource.
 279 */
 280static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
 281{
 282	list_del(&bi->ctx_list);
 283	if (!list_empty(&bi->res_list))
 284		list_del(&bi->res_list);
 285	bi->ctx = NULL;
 286}
 287
 288/**
 289 * vmw_binding_add: Start tracking a context binding
 290 *
 291 * @cbs: Pointer to the context binding state tracker.
 292 * @bi: Information about the binding to track.
 293 *
 294 * Starts tracking the binding in the context binding
 295 * state structure @cbs.
 296 */
 297void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
 298		    const struct vmw_ctx_bindinfo *bi,
 299		    u32 shader_slot, u32 slot)
 300{
 301	struct vmw_ctx_bindinfo *loc =
 302		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
 303	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
 304
 305	if (loc->ctx != NULL)
 306		vmw_binding_drop(loc);
 307
 308	memcpy(loc, bi, b->size);
 309	loc->scrubbed = false;
 310	list_add(&loc->ctx_list, &cbs->list);
 311	INIT_LIST_HEAD(&loc->res_list);
 312}
 313
 314/**
 
 
 
 
 
 
 
 
 
 
 
 
 315 * vmw_binding_transfer: Transfer a context binding tracking entry.
 316 *
 317 * @cbs: Pointer to the persistent context binding state tracker.
 318 * @bi: Information about the binding to track.
 319 *
 320 */
 321static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
 322				 const struct vmw_ctx_binding_state *from,
 323				 const struct vmw_ctx_bindinfo *bi)
 324{
 325	size_t offset = (unsigned long)bi - (unsigned long)from;
 326	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
 327		((unsigned long) cbs + offset);
 328
 329	if (loc->ctx != NULL) {
 330		WARN_ON(bi->scrubbed);
 331
 332		vmw_binding_drop(loc);
 333	}
 334
 335	if (bi->res != NULL) {
 336		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
 337		list_add_tail(&loc->ctx_list, &cbs->list);
 338		list_add_tail(&loc->res_list, &loc->res->binding_head);
 339	}
 340}
 341
 342/**
 343 * vmw_binding_state_kill - Kill all bindings associated with a
 344 * struct vmw_ctx_binding state structure, and re-initialize the structure.
 345 *
 346 * @cbs: Pointer to the context binding state tracker.
 347 *
 348 * Emits commands to scrub all bindings associated with the
 349 * context binding state tracker. Then re-initializes the whole structure.
 350 */
 351void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
 352{
 353	struct vmw_ctx_bindinfo *entry, *next;
 354
 355	vmw_binding_state_scrub(cbs);
 356	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
 357		vmw_binding_drop(entry);
 358}
 359
 360/**
 361 * vmw_binding_state_scrub - Scrub all bindings associated with a
 362 * struct vmw_ctx_binding state structure.
 363 *
 364 * @cbs: Pointer to the context binding state tracker.
 365 *
 366 * Emits commands to scrub all bindings associated with the
 367 * context binding state tracker.
 368 */
 369void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
 370{
 371	struct vmw_ctx_bindinfo *entry;
 372
 373	list_for_each_entry(entry, &cbs->list, ctx_list) {
 374		if (!entry->scrubbed) {
 375			(void) vmw_binding_infos[entry->bt].scrub_func
 376				(entry, false);
 377			entry->scrubbed = true;
 378		}
 379	}
 380
 381	(void) vmw_binding_emit_dirty(cbs);
 382}
 383
 384/**
 385 * vmw_binding_res_list_kill - Kill all bindings on a
 386 * resource binding list
 387 *
 388 * @head: list head of resource binding list
 389 *
 390 * Kills all bindings associated with a specific resource. Typically
 391 * called before the resource is destroyed.
 392 */
 393void vmw_binding_res_list_kill(struct list_head *head)
 394{
 395	struct vmw_ctx_bindinfo *entry, *next;
 396
 397	vmw_binding_res_list_scrub(head);
 398	list_for_each_entry_safe(entry, next, head, res_list)
 399		vmw_binding_drop(entry);
 400}
 401
 402/**
 403 * vmw_binding_res_list_scrub - Scrub all bindings on a
 404 * resource binding list
 405 *
 406 * @head: list head of resource binding list
 407 *
 408 * Scrub all bindings associated with a specific resource. Typically
 409 * called before the resource is evicted.
 410 */
 411void vmw_binding_res_list_scrub(struct list_head *head)
 412{
 413	struct vmw_ctx_bindinfo *entry;
 414
 415	list_for_each_entry(entry, head, res_list) {
 416		if (!entry->scrubbed) {
 417			(void) vmw_binding_infos[entry->bt].scrub_func
 418				(entry, false);
 419			entry->scrubbed = true;
 420		}
 421	}
 422
 423	list_for_each_entry(entry, head, res_list) {
 424		struct vmw_ctx_binding_state *cbs =
 425			vmw_context_binding_state(entry->ctx);
 426
 427		(void) vmw_binding_emit_dirty(cbs);
 428	}
 429}
 430
 431
 432/**
 433 * vmw_binding_state_commit - Commit staged binding info
 434 *
 435 * @ctx: Pointer to context to commit the staged binding info to.
 436 * @from: Staged binding info built during execbuf.
 437 * @scrubbed: Transfer only scrubbed bindings.
 438 *
 439 * Transfers binding info from a temporary structure
 440 * (typically used by execbuf) to the persistent
 441 * structure in the context. This can be done once commands have been
 442 * submitted to hardware
 443 */
 444void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
 445			      struct vmw_ctx_binding_state *from)
 446{
 447	struct vmw_ctx_bindinfo *entry, *next;
 448
 449	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
 450		vmw_binding_transfer(to, from, entry);
 451		vmw_binding_drop(entry);
 452	}
 
 
 
 
 453}
 454
 455/**
 456 * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
 457 *
 458 * @ctx: The context resource
 459 *
 460 * Walks through the context binding list and rebinds all scrubbed
 461 * resources.
 462 */
 463int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
 464{
 465	struct vmw_ctx_bindinfo *entry;
 466	int ret;
 467
 468	list_for_each_entry(entry, &cbs->list, ctx_list) {
 469		if (likely(!entry->scrubbed))
 470			continue;
 471
 472		if ((entry->res == NULL || entry->res->id ==
 473			    SVGA3D_INVALID_ID))
 474			continue;
 475
 476		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
 477		if (unlikely(ret != 0))
 478			return ret;
 479
 480		entry->scrubbed = false;
 481	}
 482
 483	return vmw_binding_emit_dirty(cbs);
 484}
 485
 486/**
 487 * vmw_binding_scrub_shader - scrub a shader binding from a context.
 488 *
 489 * @bi: single binding information.
 490 * @rebind: Whether to issue a bind instead of scrub command.
 491 */
 492static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 493{
 494	struct vmw_ctx_bindinfo_shader *binding =
 495		container_of(bi, typeof(*binding), bi);
 496	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 497	struct {
 498		SVGA3dCmdHeader header;
 499		SVGA3dCmdSetShader body;
 500	} *cmd;
 501
 502	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 503	if (unlikely(cmd == NULL)) {
 504		DRM_ERROR("Failed reserving FIFO space for shader "
 505			  "unbinding.\n");
 506		return -ENOMEM;
 507	}
 508
 509	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
 510	cmd->header.size = sizeof(cmd->body);
 511	cmd->body.cid = bi->ctx->id;
 512	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 513	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 514	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 515
 516	return 0;
 517}
 518
 519/**
 520 * vmw_binding_scrub_render_target - scrub a render target binding
 521 * from a context.
 522 *
 523 * @bi: single binding information.
 524 * @rebind: Whether to issue a bind instead of scrub command.
 525 */
 526static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
 527					   bool rebind)
 528{
 529	struct vmw_ctx_bindinfo_view *binding =
 530		container_of(bi, typeof(*binding), bi);
 531	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 532	struct {
 533		SVGA3dCmdHeader header;
 534		SVGA3dCmdSetRenderTarget body;
 535	} *cmd;
 536
 537	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 538	if (unlikely(cmd == NULL)) {
 539		DRM_ERROR("Failed reserving FIFO space for render target "
 540			  "unbinding.\n");
 541		return -ENOMEM;
 542	}
 543
 544	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
 545	cmd->header.size = sizeof(cmd->body);
 546	cmd->body.cid = bi->ctx->id;
 547	cmd->body.type = binding->slot;
 548	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 549	cmd->body.target.face = 0;
 550	cmd->body.target.mipmap = 0;
 551	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 552
 553	return 0;
 554}
 555
 556/**
 557 * vmw_binding_scrub_texture - scrub a texture binding from a context.
 558 *
 559 * @bi: single binding information.
 560 * @rebind: Whether to issue a bind instead of scrub command.
 561 *
 562 * TODO: Possibly complement this function with a function that takes
 563 * a list of texture bindings and combines them to a single command.
 564 */
 565static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
 566				     bool rebind)
 567{
 568	struct vmw_ctx_bindinfo_tex *binding =
 569		container_of(bi, typeof(*binding), bi);
 570	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 571	struct {
 572		SVGA3dCmdHeader header;
 573		struct {
 574			SVGA3dCmdSetTextureState c;
 575			SVGA3dTextureState s1;
 576		} body;
 577	} *cmd;
 578
 579	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
 580	if (unlikely(cmd == NULL)) {
 581		DRM_ERROR("Failed reserving FIFO space for texture "
 582			  "unbinding.\n");
 583		return -ENOMEM;
 584	}
 585
 586	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
 587	cmd->header.size = sizeof(cmd->body);
 588	cmd->body.c.cid = bi->ctx->id;
 589	cmd->body.s1.stage = binding->texture_stage;
 590	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
 591	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 592	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 593
 594	return 0;
 595}
 596
 597/**
 598 * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
 599 *
 600 * @bi: single binding information.
 601 * @rebind: Whether to issue a bind instead of scrub command.
 602 */
 603static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 604{
 605	struct vmw_ctx_bindinfo_shader *binding =
 606		container_of(bi, typeof(*binding), bi);
 607	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 608	struct {
 609		SVGA3dCmdHeader header;
 610		SVGA3dCmdDXSetShader body;
 611	} *cmd;
 612
 613	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
 614	if (unlikely(cmd == NULL)) {
 615		DRM_ERROR("Failed reserving FIFO space for DX shader "
 616			  "unbinding.\n");
 617		return -ENOMEM;
 618	}
 619	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
 620	cmd->header.size = sizeof(cmd->body);
 621	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 622	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 623	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 624
 625	return 0;
 626}
 627
 628/**
 629 * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
 630 *
 631 * @bi: single binding information.
 632 * @rebind: Whether to issue a bind instead of scrub command.
 633 */
 634static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
 635{
 636	struct vmw_ctx_bindinfo_cb *binding =
 637		container_of(bi, typeof(*binding), bi);
 638	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 639	struct {
 640		SVGA3dCmdHeader header;
 641		SVGA3dCmdDXSetSingleConstantBuffer body;
 642	} *cmd;
 643
 644	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
 645	if (unlikely(cmd == NULL)) {
 646		DRM_ERROR("Failed reserving FIFO space for DX shader "
 647			  "unbinding.\n");
 648		return -ENOMEM;
 649	}
 650
 651	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
 652	cmd->header.size = sizeof(cmd->body);
 653	cmd->body.slot = binding->slot;
 654	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 655	if (rebind) {
 656		cmd->body.offsetInBytes = binding->offset;
 657		cmd->body.sizeInBytes = binding->size;
 658		cmd->body.sid = bi->res->id;
 659	} else {
 660		cmd->body.offsetInBytes = 0;
 661		cmd->body.sizeInBytes = 0;
 662		cmd->body.sid = SVGA3D_INVALID_ID;
 663	}
 664	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 665
 666	return 0;
 667}
 668
 669/**
 670 * vmw_collect_view_ids - Build view id data for a view binding command
 671 * without checking which bindings actually need to be emitted
 672 *
 673 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 674 * @bi: Pointer to where the binding info array is stored in @cbs
 675 * @max_num: Maximum number of entries in the @bi array.
 676 *
 677 * Scans the @bi array for bindings and builds a buffer of view id data.
 678 * Stops at the first non-existing binding in the @bi array.
 679 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 680 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
 681 * contains the command data.
 682 */
 683static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
 684				 const struct vmw_ctx_bindinfo *bi,
 685				 u32 max_num)
 686{
 687	const struct vmw_ctx_bindinfo_view *biv =
 688		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
 689	unsigned long i;
 690
 691	cbs->bind_cmd_count = 0;
 692	cbs->bind_first_slot = 0;
 693
 694	for (i = 0; i < max_num; ++i, ++biv) {
 695		if (!biv->bi.ctx)
 696			break;
 697
 698		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
 699			((biv->bi.scrubbed) ?
 700			 SVGA3D_INVALID_ID : biv->bi.res->id);
 701	}
 702}
 703
 704/**
 705 * vmw_collect_dirty_view_ids - Build view id data for a view binding command
 706 *
 707 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 708 * @bi: Pointer to where the binding info array is stored in @cbs
 709 * @dirty: Bitmap indicating which bindings need to be emitted.
 710 * @max_num: Maximum number of entries in the @bi array.
 711 *
 712 * Scans the @bi array for bindings that need to be emitted and
 713 * builds a buffer of view id data.
 714 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 715 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
 716 * binding, and @cbs->bind_cmd_buffer contains the command data.
 717 */
 718static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
 719				       const struct vmw_ctx_bindinfo *bi,
 720				       unsigned long *dirty,
 721				       u32 max_num)
 722{
 723	const struct vmw_ctx_bindinfo_view *biv =
 724		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
 725	unsigned long i, next_bit;
 726
 727	cbs->bind_cmd_count = 0;
 728	i = find_first_bit(dirty, max_num);
 729	next_bit = i;
 730	cbs->bind_first_slot = i;
 731
 732	biv += i;
 733	for (; i < max_num; ++i, ++biv) {
 734		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
 735			((!biv->bi.ctx || biv->bi.scrubbed) ?
 736			 SVGA3D_INVALID_ID : biv->bi.res->id);
 737
 738		if (next_bit == i) {
 739			next_bit = find_next_bit(dirty, max_num, i + 1);
 740			if (next_bit >= max_num)
 741				break;
 742		}
 743	}
 744}
 745
 746/**
 747 * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
 748 *
 749 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 750 */
 751static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
 752			   int shader_slot)
 753{
 754	const struct vmw_ctx_bindinfo *loc =
 755		&cbs->per_shader[shader_slot].shader_res[0].bi;
 756	struct {
 757		SVGA3dCmdHeader header;
 758		SVGA3dCmdDXSetShaderResources body;
 759	} *cmd;
 760	size_t cmd_size, view_id_size;
 761	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 762
 763	vmw_collect_dirty_view_ids(cbs, loc,
 764				   cbs->per_shader[shader_slot].dirty_sr,
 765				   SVGA3D_DX_MAX_SRVIEWS);
 766	if (cbs->bind_cmd_count == 0)
 767		return 0;
 768
 769	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 770	cmd_size = sizeof(*cmd) + view_id_size;
 771	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
 772	if (unlikely(cmd == NULL)) {
 773		DRM_ERROR("Failed reserving FIFO space for DX shader"
 774			  " resource binding.\n");
 775		return -ENOMEM;
 776	}
 777
 778	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
 779	cmd->header.size = sizeof(cmd->body) + view_id_size;
 780	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
 781	cmd->body.startView = cbs->bind_first_slot;
 782
 783	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 784
 785	vmw_fifo_commit(ctx->dev_priv, cmd_size);
 786	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
 787		     cbs->bind_first_slot, cbs->bind_cmd_count);
 788
 789	return 0;
 790}
 791
 792/**
 793 * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
 794 *
 795 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 796 */
 797static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
 798{
 799	const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
 800	struct {
 801		SVGA3dCmdHeader header;
 802		SVGA3dCmdDXSetRenderTargets body;
 803	} *cmd;
 804	size_t cmd_size, view_id_size;
 805	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 806
 807	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
 808	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 809	cmd_size = sizeof(*cmd) + view_id_size;
 810	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
 811	if (unlikely(cmd == NULL)) {
 812		DRM_ERROR("Failed reserving FIFO space for DX render-target"
 813			  " binding.\n");
 814		return -ENOMEM;
 815	}
 816
 817	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
 818	cmd->header.size = sizeof(cmd->body) + view_id_size;
 819
 820	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
 821		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
 822	else
 823		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
 824
 825	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 826
 827	vmw_fifo_commit(ctx->dev_priv, cmd_size);
 828
 829	return 0;
 830
 831}
 832
 833/**
 834 * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
 835 * without checking which bindings actually need to be emitted
 836 *
 837 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 838 * @bi: Pointer to where the binding info array is stored in @cbs
 839 * @max_num: Maximum number of entries in the @bi array.
 840 *
 841 * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
 842 * Stops at the first non-existing binding in the @bi array.
 843 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 844 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
 845 * contains the command data.
 846 */
 847static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
 848				   const struct vmw_ctx_bindinfo *bi,
 849				   u32 max_num)
 850{
 851	const struct vmw_ctx_bindinfo_so *biso =
 852		container_of(bi, struct vmw_ctx_bindinfo_so, bi);
 853	unsigned long i;
 854	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
 855
 856	cbs->bind_cmd_count = 0;
 857	cbs->bind_first_slot = 0;
 858
 859	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
 860		    ++cbs->bind_cmd_count) {
 861		if (!biso->bi.ctx)
 862			break;
 863
 864		if (!biso->bi.scrubbed) {
 865			so_buffer->sid = biso->bi.res->id;
 866			so_buffer->offset = biso->offset;
 867			so_buffer->sizeInBytes = biso->size;
 868		} else {
 869			so_buffer->sid = SVGA3D_INVALID_ID;
 870			so_buffer->offset = 0;
 871			so_buffer->sizeInBytes = 0;
 872		}
 873	}
 874}
 875
 876/**
 877 * vmw_binding_emit_set_so - Issue delayed streamout binding commands
 878 *
 879 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 880 */
 881static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
 882{
 883	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
 884	struct {
 885		SVGA3dCmdHeader header;
 886		SVGA3dCmdDXSetSOTargets body;
 887	} *cmd;
 888	size_t cmd_size, so_target_size;
 889	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 890
 891	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
 892	if (cbs->bind_cmd_count == 0)
 893		return 0;
 894
 895	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
 896	cmd_size = sizeof(*cmd) + so_target_size;
 897	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
 898	if (unlikely(cmd == NULL)) {
 899		DRM_ERROR("Failed reserving FIFO space for DX SO target"
 900			  " binding.\n");
 901		return -ENOMEM;
 902	}
 903
 904	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
 905	cmd->header.size = sizeof(cmd->body) + so_target_size;
 906	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
 907
 908	vmw_fifo_commit(ctx->dev_priv, cmd_size);
 909
 910	return 0;
 911
 912}
 913
 914/**
 915 * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
 916 *
 917 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 918 *
 919 */
 920static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
 921{
 922	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
 923	u32 i;
 924	int ret;
 925
 926	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
 927		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
 928			continue;
 929
 930		ret = vmw_emit_set_sr(cbs, i);
 931		if (ret)
 932			break;
 933
 934		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
 935	}
 936
 937	return 0;
 938}
 939
 940/**
 941 * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
 942 * SVGA3dCmdDXSetVertexBuffers command
 943 *
 944 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 945 * @bi: Pointer to where the binding info array is stored in @cbs
 946 * @dirty: Bitmap indicating which bindings need to be emitted.
 947 * @max_num: Maximum number of entries in the @bi array.
 948 *
 949 * Scans the @bi array for bindings that need to be emitted and
 950 * builds a buffer of SVGA3dVertexBuffer data.
 951 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 952 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
 953 * binding, and @cbs->bind_cmd_buffer contains the command data.
 954 */
 955static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
 956				  const struct vmw_ctx_bindinfo *bi,
 957				  unsigned long *dirty,
 958				  u32 max_num)
 959{
 960	const struct vmw_ctx_bindinfo_vb *biv =
 961		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
 962	unsigned long i, next_bit;
 963	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
 964
 965	cbs->bind_cmd_count = 0;
 966	i = find_first_bit(dirty, max_num);
 967	next_bit = i;
 968	cbs->bind_first_slot = i;
 969
 970	biv += i;
 971	for (; i < max_num; ++i, ++biv, ++vbs) {
 972		if (!biv->bi.ctx || biv->bi.scrubbed) {
 973			vbs->sid = SVGA3D_INVALID_ID;
 974			vbs->stride = 0;
 975			vbs->offset = 0;
 976		} else {
 977			vbs->sid = biv->bi.res->id;
 978			vbs->stride = biv->stride;
 979			vbs->offset = biv->offset;
 980		}
 981		cbs->bind_cmd_count++;
 982		if (next_bit == i) {
 983			next_bit = find_next_bit(dirty, max_num, i + 1);
 984			if (next_bit >= max_num)
 985				break;
 986		}
 987	}
 988}
 989
 990/**
 991 * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
 992 *
 993 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 994 *
 995 */
 996static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
 997{
 998	const struct vmw_ctx_bindinfo *loc =
 999		&cbs->vertex_buffers[0].bi;
1000	struct {
1001		SVGA3dCmdHeader header;
1002		SVGA3dCmdDXSetVertexBuffers body;
1003	} *cmd;
1004	size_t cmd_size, set_vb_size;
1005	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1006
1007	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
1008			     SVGA3D_DX_MAX_VERTEXBUFFERS);
1009	if (cbs->bind_cmd_count == 0)
1010		return 0;
1011
1012	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
1013	cmd_size = sizeof(*cmd) + set_vb_size;
1014	cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
1015	if (unlikely(cmd == NULL)) {
1016		DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
1017			  " binding.\n");
1018		return -ENOMEM;
1019	}
1020
1021	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
1022	cmd->header.size = sizeof(cmd->body) + set_vb_size;
1023	cmd->body.startBuffer = cbs->bind_first_slot;
1024
1025	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
1026
1027	vmw_fifo_commit(ctx->dev_priv, cmd_size);
1028	bitmap_clear(cbs->dirty_vb,
1029		     cbs->bind_first_slot, cbs->bind_cmd_count);
1030
1031	return 0;
1032}
1033
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034/**
1035 * vmw_binding_emit_dirty - Issue delayed binding commands
1036 *
1037 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1038 *
1039 * This function issues the delayed binding commands that arise from
1040 * previous scrub / unscrub calls. These binding commands are typically
1041 * commands that batch a number of bindings and therefore it makes sense
1042 * to delay them.
1043 */
1044static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
1045{
1046	int ret = 0;
1047	unsigned long hit = 0;
1048
1049	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
1050	      < VMW_BINDING_NUM_BITS) {
1051
1052		switch (hit) {
1053		case VMW_BINDING_RT_BIT:
1054			ret = vmw_emit_set_rt(cbs);
1055			break;
1056		case VMW_BINDING_PS_BIT:
1057			ret = vmw_binding_emit_dirty_ps(cbs);
1058			break;
1059		case VMW_BINDING_SO_BIT:
1060			ret = vmw_emit_set_so(cbs);
1061			break;
1062		case VMW_BINDING_VB_BIT:
1063			ret = vmw_emit_set_vb(cbs);
1064			break;
 
 
 
 
 
 
1065		default:
1066			BUG();
1067		}
1068		if (ret)
1069			return ret;
1070
1071		__clear_bit(hit, &cbs->dirty);
1072		hit++;
1073	}
1074
1075	return 0;
1076}
1077
1078/**
1079 * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
1080 * scrub from a context
1081 *
1082 * @bi: single binding information.
1083 * @rebind: Whether to issue a bind instead of scrub command.
1084 */
1085static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
1086{
1087	struct vmw_ctx_bindinfo_view *biv =
1088		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
1089	struct vmw_ctx_binding_state *cbs =
1090		vmw_context_binding_state(bi->ctx);
1091
1092	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
1093	__set_bit(VMW_BINDING_PS_SR_BIT,
1094		  &cbs->per_shader[biv->shader_slot].dirty);
1095	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
1096
1097	return 0;
1098}
1099
1100/**
1101 * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
1102 * scrub from a context
1103 *
1104 * @bi: single binding information.
1105 * @rebind: Whether to issue a bind instead of scrub command.
1106 */
1107static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
1108{
1109	struct vmw_ctx_binding_state *cbs =
1110		vmw_context_binding_state(bi->ctx);
1111
1112	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
1113
1114	return 0;
1115}
1116
1117/**
1118 * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding
1119 * scrub from a context
1120 *
1121 * @bi: single binding information.
1122 * @rebind: Whether to issue a bind instead of scrub command.
1123 */
1124static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
1125{
1126	struct vmw_ctx_binding_state *cbs =
1127		vmw_context_binding_state(bi->ctx);
1128
1129	__set_bit(VMW_BINDING_SO_BIT, &cbs->dirty);
1130
1131	return 0;
1132}
1133
1134/**
1135 * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
1136 * scrub from a context
1137 *
1138 * @bi: single binding information.
1139 * @rebind: Whether to issue a bind instead of scrub command.
1140 */
1141static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
1142{
1143	struct vmw_ctx_bindinfo_vb *bivb =
1144		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1145	struct vmw_ctx_binding_state *cbs =
1146		vmw_context_binding_state(bi->ctx);
1147
1148	__set_bit(bivb->slot, cbs->dirty_vb);
1149	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
1150
1151	return 0;
1152}
1153
1154/**
1155 * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
1156 *
1157 * @bi: single binding information.
1158 * @rebind: Whether to issue a bind instead of scrub command.
1159 */
1160static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
1161{
1162	struct vmw_ctx_bindinfo_ib *binding =
1163		container_of(bi, typeof(*binding), bi);
1164	struct vmw_private *dev_priv = bi->ctx->dev_priv;
1165	struct {
1166		SVGA3dCmdHeader header;
1167		SVGA3dCmdDXSetIndexBuffer body;
1168	} *cmd;
1169
1170	cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
1171	if (unlikely(cmd == NULL)) {
1172		DRM_ERROR("Failed reserving FIFO space for DX index buffer "
1173			  "binding.\n");
1174		return -ENOMEM;
1175	}
1176	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
1177	cmd->header.size = sizeof(cmd->body);
1178	if (rebind) {
1179		cmd->body.sid = bi->res->id;
1180		cmd->body.format = binding->format;
1181		cmd->body.offset = binding->offset;
1182	} else {
1183		cmd->body.sid = SVGA3D_INVALID_ID;
1184		cmd->body.format = 0;
1185		cmd->body.offset = 0;
1186	}
1187
1188	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1189
1190	return 0;
1191}
1192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193/**
1194 * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
1195 * memory accounting.
1196 *
1197 * @dev_priv: Pointer to a device private structure.
1198 *
1199 * Returns a pointer to a newly allocated struct or an error pointer on error.
1200 */
1201struct vmw_ctx_binding_state *
1202vmw_binding_state_alloc(struct vmw_private *dev_priv)
1203{
1204	struct vmw_ctx_binding_state *cbs;
 
 
 
 
1205	int ret;
1206
1207	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
1208				   false, false);
1209	if (ret)
1210		return ERR_PTR(ret);
1211
1212	cbs = vzalloc(sizeof(*cbs));
1213	if (!cbs) {
1214		ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1215		return ERR_PTR(-ENOMEM);
1216	}
1217
1218	cbs->dev_priv = dev_priv;
1219	INIT_LIST_HEAD(&cbs->list);
1220
1221	return cbs;
1222}
1223
1224/**
1225 * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
1226 * memory accounting info.
1227 *
1228 * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
1229 */
1230void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
1231{
1232	struct vmw_private *dev_priv = cbs->dev_priv;
1233
1234	vfree(cbs);
1235	ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1236}
1237
1238/**
1239 * vmw_binding_state_list - Get the binding list of a
1240 * struct vmw_ctx_binding_state
1241 *
1242 * @cbs: Pointer to the struct vmw_ctx_binding_state
1243 *
1244 * Returns the binding list which can be used to traverse through the bindings
1245 * and access the resource information of all bindings.
1246 */
1247struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
1248{
1249	return &cbs->list;
1250}
1251
1252/**
1253 * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
1254 *
1255 * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
1256 *
1257 * Drops all bindings registered in @cbs. No device binding actions are
1258 * performed.
1259 */
1260void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
1261{
1262	struct vmw_ctx_bindinfo *entry, *next;
1263
1264	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
1265		vmw_binding_drop(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1266}
1267
1268/*
1269 * This function is unused at run-time, and only used to hold various build
1270 * asserts important for code optimization assumptions.
1271 */
1272static void vmw_binding_build_asserts(void)
1273{
1274	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
1275	BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
1276	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
1277
1278	/*
1279	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
1280	 * view id arrays.
1281	 */
1282	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
1283	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
1284	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
1285
1286	/*
1287	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
1288	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
1289	 */
1290	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
1291		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1292	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
1293		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1294}
v5.9
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27/*
  28 * This file implements the vmwgfx context binding manager,
  29 * The sole reason for having to use this code is that vmware guest
  30 * backed contexts can be swapped out to their backing mobs by the device
  31 * at any time, also swapped in at any time. At swapin time, the device
  32 * validates the context bindings to make sure they point to valid resources.
  33 * It's this outside-of-drawcall validation (that can happen at any time),
  34 * that makes this code necessary.
  35 *
  36 * We therefore need to kill any context bindings pointing to a resource
  37 * when the resource is swapped out. Furthermore, if the vmwgfx driver has
  38 * swapped out the context we can't swap it in again to kill bindings because
  39 * of backing mob reservation lockdep violations, so as part of
  40 * context swapout, also kill all bindings of a context, so that they are
  41 * already killed if a resource to which a binding points
  42 * needs to be swapped out.
  43 *
  44 * Note that a resource can be pointed to by bindings from multiple contexts,
  45 * Therefore we can't easily protect this data by a per context mutex
  46 * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex
  47 * to protect all binding manager data.
  48 *
  49 * Finally, any association between a context and a global resource
  50 * (surface, shader or even DX query) is conceptually a context binding that
  51 * needs to be tracked by this code.
  52 */
  53
  54#include "vmwgfx_drv.h"
  55#include "vmwgfx_binding.h"
  56#include "device_include/svga3d_reg.h"
  57
  58#define VMW_BINDING_RT_BIT     0
  59#define VMW_BINDING_PS_BIT     1
  60#define VMW_BINDING_SO_T_BIT   2
  61#define VMW_BINDING_VB_BIT     3
  62#define VMW_BINDING_UAV_BIT    4
  63#define VMW_BINDING_CS_UAV_BIT 5
  64#define VMW_BINDING_NUM_BITS   6
  65
  66#define VMW_BINDING_PS_SR_BIT  0
  67
  68/**
  69 * struct vmw_ctx_binding_state - per context binding state
  70 *
  71 * @dev_priv: Pointer to device private structure.
  72 * @list: linked list of individual active bindings.
  73 * @render_targets: Render target bindings.
  74 * @texture_units: Texture units bindings.
  75 * @ds_view: Depth-stencil view binding.
  76 * @so_targets: StreamOutput target bindings.
  77 * @vertex_buffers: Vertex buffer bindings.
  78 * @index_buffer: Index buffer binding.
  79 * @per_shader: Per shader-type bindings.
  80 * @ua_views: UAV bindings.
  81 * @so_state: StreamOutput bindings.
  82 * @dirty: Bitmap tracking per binding-type changes that have not yet
  83 * been emitted to the device.
  84 * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that
  85 * have not yet been emitted to the device.
  86 * @bind_cmd_buffer: Scratch space used to construct binding commands.
  87 * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer
  88 * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the
  89 * device binding slot of the first command data entry in @bind_cmd_buffer.
  90 *
  91 * Note that this structure also provides storage space for the individual
  92 * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
  93 * for individual bindings.
  94 *
  95 */
  96struct vmw_ctx_binding_state {
  97	struct vmw_private *dev_priv;
  98	struct list_head list;
  99	struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX];
 100	struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS];
 101	struct vmw_ctx_bindinfo_view ds_view;
 102	struct vmw_ctx_bindinfo_so_target so_targets[SVGA3D_DX_MAX_SOTARGETS];
 103	struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS];
 104	struct vmw_ctx_bindinfo_ib index_buffer;
 105	struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE];
 106	struct vmw_ctx_bindinfo_uav ua_views[VMW_MAX_UAV_BIND_TYPE];
 107	struct vmw_ctx_bindinfo_so so_state;
 108
 109	unsigned long dirty;
 110	DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS);
 111
 112	u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS];
 113	u32 bind_cmd_count;
 114	u32 bind_first_slot;
 115};
 116
 117static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
 118static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
 119					   bool rebind);
 120static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
 121static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind);
 122static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind);
 123static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind);
 124static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind);
 125static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs);
 126static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi,
 127				       bool rebind);
 128static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind);
 129static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind);
 130static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
 131static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind);
 132static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind);
 133
 134static void vmw_binding_build_asserts(void) __attribute__ ((unused));
 135
 136typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
 137
 138/**
 139 * struct vmw_binding_info - Per binding type information for the binding
 140 * manager
 141 *
 142 * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo.
 143 * @offsets: array[shader_slot] of offsets to the array[slot]
 144 * of struct bindings for the binding type.
 145 * @scrub_func: Pointer to the scrub function for this binding type.
 146 *
 147 * Holds static information to help optimize the binding manager and avoid
 148 * an excessive amount of switch statements.
 149 */
 150struct vmw_binding_info {
 151	size_t size;
 152	const size_t *offsets;
 153	vmw_scrub_func scrub_func;
 154};
 155
 156/*
 157 * A number of static variables that help determine the scrub func and the
 158 * location of the struct vmw_ctx_bindinfo slots for each binding type.
 159 */
 160static const size_t vmw_binding_shader_offsets[] = {
 161	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader),
 162	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader),
 163	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader),
 164	offsetof(struct vmw_ctx_binding_state, per_shader[3].shader),
 165	offsetof(struct vmw_ctx_binding_state, per_shader[4].shader),
 166	offsetof(struct vmw_ctx_binding_state, per_shader[5].shader),
 167};
 168static const size_t vmw_binding_rt_offsets[] = {
 169	offsetof(struct vmw_ctx_binding_state, render_targets),
 170};
 171static const size_t vmw_binding_tex_offsets[] = {
 172	offsetof(struct vmw_ctx_binding_state, texture_units),
 173};
 174static const size_t vmw_binding_cb_offsets[] = {
 175	offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers),
 176	offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers),
 177	offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers),
 178	offsetof(struct vmw_ctx_binding_state, per_shader[3].const_buffers),
 179	offsetof(struct vmw_ctx_binding_state, per_shader[4].const_buffers),
 180	offsetof(struct vmw_ctx_binding_state, per_shader[5].const_buffers),
 181};
 182static const size_t vmw_binding_dx_ds_offsets[] = {
 183	offsetof(struct vmw_ctx_binding_state, ds_view),
 184};
 185static const size_t vmw_binding_sr_offsets[] = {
 186	offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res),
 187	offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res),
 188	offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res),
 189	offsetof(struct vmw_ctx_binding_state, per_shader[3].shader_res),
 190	offsetof(struct vmw_ctx_binding_state, per_shader[4].shader_res),
 191	offsetof(struct vmw_ctx_binding_state, per_shader[5].shader_res),
 192};
 193static const size_t vmw_binding_so_target_offsets[] = {
 194	offsetof(struct vmw_ctx_binding_state, so_targets),
 195};
 196static const size_t vmw_binding_vb_offsets[] = {
 197	offsetof(struct vmw_ctx_binding_state, vertex_buffers),
 198};
 199static const size_t vmw_binding_ib_offsets[] = {
 200	offsetof(struct vmw_ctx_binding_state, index_buffer),
 201};
 202static const size_t vmw_binding_uav_offsets[] = {
 203	offsetof(struct vmw_ctx_binding_state, ua_views[0].views),
 204};
 205static const size_t vmw_binding_cs_uav_offsets[] = {
 206	offsetof(struct vmw_ctx_binding_state, ua_views[1].views),
 207};
 208static const size_t vmw_binding_so_offsets[] = {
 209	offsetof(struct vmw_ctx_binding_state, so_state),
 210};
 211
 212static const struct vmw_binding_info vmw_binding_infos[] = {
 213	[vmw_ctx_binding_shader] = {
 214		.size = sizeof(struct vmw_ctx_bindinfo_shader),
 215		.offsets = vmw_binding_shader_offsets,
 216		.scrub_func = vmw_binding_scrub_shader},
 217	[vmw_ctx_binding_rt] = {
 218		.size = sizeof(struct vmw_ctx_bindinfo_view),
 219		.offsets = vmw_binding_rt_offsets,
 220		.scrub_func = vmw_binding_scrub_render_target},
 221	[vmw_ctx_binding_tex] = {
 222		.size = sizeof(struct vmw_ctx_bindinfo_tex),
 223		.offsets = vmw_binding_tex_offsets,
 224		.scrub_func = vmw_binding_scrub_texture},
 225	[vmw_ctx_binding_cb] = {
 226		.size = sizeof(struct vmw_ctx_bindinfo_cb),
 227		.offsets = vmw_binding_cb_offsets,
 228		.scrub_func = vmw_binding_scrub_cb},
 229	[vmw_ctx_binding_dx_shader] = {
 230		.size = sizeof(struct vmw_ctx_bindinfo_shader),
 231		.offsets = vmw_binding_shader_offsets,
 232		.scrub_func = vmw_binding_scrub_dx_shader},
 233	[vmw_ctx_binding_dx_rt] = {
 234		.size = sizeof(struct vmw_ctx_bindinfo_view),
 235		.offsets = vmw_binding_rt_offsets,
 236		.scrub_func = vmw_binding_scrub_dx_rt},
 237	[vmw_ctx_binding_sr] = {
 238		.size = sizeof(struct vmw_ctx_bindinfo_view),
 239		.offsets = vmw_binding_sr_offsets,
 240		.scrub_func = vmw_binding_scrub_sr},
 241	[vmw_ctx_binding_ds] = {
 242		.size = sizeof(struct vmw_ctx_bindinfo_view),
 243		.offsets = vmw_binding_dx_ds_offsets,
 244		.scrub_func = vmw_binding_scrub_dx_rt},
 245	[vmw_ctx_binding_so_target] = {
 246		.size = sizeof(struct vmw_ctx_bindinfo_so_target),
 247		.offsets = vmw_binding_so_target_offsets,
 248		.scrub_func = vmw_binding_scrub_so_target},
 249	[vmw_ctx_binding_vb] = {
 250		.size = sizeof(struct vmw_ctx_bindinfo_vb),
 251		.offsets = vmw_binding_vb_offsets,
 252		.scrub_func = vmw_binding_scrub_vb},
 253	[vmw_ctx_binding_ib] = {
 254		.size = sizeof(struct vmw_ctx_bindinfo_ib),
 255		.offsets = vmw_binding_ib_offsets,
 256		.scrub_func = vmw_binding_scrub_ib},
 257	[vmw_ctx_binding_uav] = {
 258		.size = sizeof(struct vmw_ctx_bindinfo_view),
 259		.offsets = vmw_binding_uav_offsets,
 260		.scrub_func = vmw_binding_scrub_uav},
 261	[vmw_ctx_binding_cs_uav] = {
 262		.size = sizeof(struct vmw_ctx_bindinfo_view),
 263		.offsets = vmw_binding_cs_uav_offsets,
 264		.scrub_func = vmw_binding_scrub_cs_uav},
 265	[vmw_ctx_binding_so] = {
 266		.size = sizeof(struct vmw_ctx_bindinfo_so),
 267		.offsets = vmw_binding_so_offsets,
 268		.scrub_func = vmw_binding_scrub_so},
 269};
 270
 271/**
 272 * vmw_cbs_context - Return a pointer to the context resource of a
 273 * context binding state tracker.
 274 *
 275 * @cbs: The context binding state tracker.
 276 *
 277 * Provided there are any active bindings, this function will return an
 278 * unreferenced pointer to the context resource that owns the context
 279 * binding state tracker. If there are no active bindings, this function
 280 * will return NULL. Note that the caller must somehow ensure that a reference
 281 * is held on the context resource prior to calling this function.
 282 */
 283static const struct vmw_resource *
 284vmw_cbs_context(const struct vmw_ctx_binding_state *cbs)
 285{
 286	if (list_empty(&cbs->list))
 287		return NULL;
 288
 289	return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo,
 290				ctx_list)->ctx;
 291}
 292
 293/**
 294 * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location.
 295 *
 296 * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot.
 297 * @bt: The binding type.
 298 * @shader_slot: The shader slot of the binding. If none, then set to 0.
 299 * @slot: The slot of the binding.
 300 */
 301static struct vmw_ctx_bindinfo *
 302vmw_binding_loc(struct vmw_ctx_binding_state *cbs,
 303		enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot)
 304{
 305	const struct vmw_binding_info *b = &vmw_binding_infos[bt];
 306	size_t offset = b->offsets[shader_slot] + b->size*slot;
 307
 308	return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset);
 309}
 310
 311/**
 312 * vmw_binding_drop: Stop tracking a context binding
 313 *
 314 * @bi: Pointer to binding tracker storage.
 315 *
 316 * Stops tracking a context binding, and re-initializes its storage.
 317 * Typically used when the context binding is replaced with a binding to
 318 * another (or the same, for that matter) resource.
 319 */
 320static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
 321{
 322	list_del(&bi->ctx_list);
 323	if (!list_empty(&bi->res_list))
 324		list_del(&bi->res_list);
 325	bi->ctx = NULL;
 326}
 327
 328/**
 329 * vmw_binding_add: Start tracking a context binding
 330 *
 331 * @cbs: Pointer to the context binding state tracker.
 332 * @bi: Information about the binding to track.
 333 *
 334 * Starts tracking the binding in the context binding
 335 * state structure @cbs.
 336 */
 337void vmw_binding_add(struct vmw_ctx_binding_state *cbs,
 338		    const struct vmw_ctx_bindinfo *bi,
 339		    u32 shader_slot, u32 slot)
 340{
 341	struct vmw_ctx_bindinfo *loc =
 342		vmw_binding_loc(cbs, bi->bt, shader_slot, slot);
 343	const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt];
 344
 345	if (loc->ctx != NULL)
 346		vmw_binding_drop(loc);
 347
 348	memcpy(loc, bi, b->size);
 349	loc->scrubbed = false;
 350	list_add(&loc->ctx_list, &cbs->list);
 351	INIT_LIST_HEAD(&loc->res_list);
 352}
 353
 354/**
 355 * vmw_binding_add_uav_index - Add UAV index for tracking.
 356 * @cbs: Pointer to the context binding state tracker.
 357 * @slot: UAV type to which bind this index.
 358 * @index: The splice index to track.
 359 */
 360void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
 361			       uint32 index)
 362{
 363	cbs->ua_views[slot].index = index;
 364}
 365
 366/**
 367 * vmw_binding_transfer: Transfer a context binding tracking entry.
 368 *
 369 * @cbs: Pointer to the persistent context binding state tracker.
 370 * @bi: Information about the binding to track.
 371 *
 372 */
 373static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs,
 374				 const struct vmw_ctx_binding_state *from,
 375				 const struct vmw_ctx_bindinfo *bi)
 376{
 377	size_t offset = (unsigned long)bi - (unsigned long)from;
 378	struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *)
 379		((unsigned long) cbs + offset);
 380
 381	if (loc->ctx != NULL) {
 382		WARN_ON(bi->scrubbed);
 383
 384		vmw_binding_drop(loc);
 385	}
 386
 387	if (bi->res != NULL) {
 388		memcpy(loc, bi, vmw_binding_infos[bi->bt].size);
 389		list_add_tail(&loc->ctx_list, &cbs->list);
 390		list_add_tail(&loc->res_list, &loc->res->binding_head);
 391	}
 392}
 393
 394/**
 395 * vmw_binding_state_kill - Kill all bindings associated with a
 396 * struct vmw_ctx_binding state structure, and re-initialize the structure.
 397 *
 398 * @cbs: Pointer to the context binding state tracker.
 399 *
 400 * Emits commands to scrub all bindings associated with the
 401 * context binding state tracker. Then re-initializes the whole structure.
 402 */
 403void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs)
 404{
 405	struct vmw_ctx_bindinfo *entry, *next;
 406
 407	vmw_binding_state_scrub(cbs);
 408	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
 409		vmw_binding_drop(entry);
 410}
 411
 412/**
 413 * vmw_binding_state_scrub - Scrub all bindings associated with a
 414 * struct vmw_ctx_binding state structure.
 415 *
 416 * @cbs: Pointer to the context binding state tracker.
 417 *
 418 * Emits commands to scrub all bindings associated with the
 419 * context binding state tracker.
 420 */
 421void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
 422{
 423	struct vmw_ctx_bindinfo *entry;
 424
 425	list_for_each_entry(entry, &cbs->list, ctx_list) {
 426		if (!entry->scrubbed) {
 427			(void) vmw_binding_infos[entry->bt].scrub_func
 428				(entry, false);
 429			entry->scrubbed = true;
 430		}
 431	}
 432
 433	(void) vmw_binding_emit_dirty(cbs);
 434}
 435
 436/**
 437 * vmw_binding_res_list_kill - Kill all bindings on a
 438 * resource binding list
 439 *
 440 * @head: list head of resource binding list
 441 *
 442 * Kills all bindings associated with a specific resource. Typically
 443 * called before the resource is destroyed.
 444 */
 445void vmw_binding_res_list_kill(struct list_head *head)
 446{
 447	struct vmw_ctx_bindinfo *entry, *next;
 448
 449	vmw_binding_res_list_scrub(head);
 450	list_for_each_entry_safe(entry, next, head, res_list)
 451		vmw_binding_drop(entry);
 452}
 453
 454/**
 455 * vmw_binding_res_list_scrub - Scrub all bindings on a
 456 * resource binding list
 457 *
 458 * @head: list head of resource binding list
 459 *
 460 * Scrub all bindings associated with a specific resource. Typically
 461 * called before the resource is evicted.
 462 */
 463void vmw_binding_res_list_scrub(struct list_head *head)
 464{
 465	struct vmw_ctx_bindinfo *entry;
 466
 467	list_for_each_entry(entry, head, res_list) {
 468		if (!entry->scrubbed) {
 469			(void) vmw_binding_infos[entry->bt].scrub_func
 470				(entry, false);
 471			entry->scrubbed = true;
 472		}
 473	}
 474
 475	list_for_each_entry(entry, head, res_list) {
 476		struct vmw_ctx_binding_state *cbs =
 477			vmw_context_binding_state(entry->ctx);
 478
 479		(void) vmw_binding_emit_dirty(cbs);
 480	}
 481}
 482
 483
 484/**
 485 * vmw_binding_state_commit - Commit staged binding info
 486 *
 487 * @ctx: Pointer to context to commit the staged binding info to.
 488 * @from: Staged binding info built during execbuf.
 489 * @scrubbed: Transfer only scrubbed bindings.
 490 *
 491 * Transfers binding info from a temporary structure
 492 * (typically used by execbuf) to the persistent
 493 * structure in the context. This can be done once commands have been
 494 * submitted to hardware
 495 */
 496void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
 497			      struct vmw_ctx_binding_state *from)
 498{
 499	struct vmw_ctx_bindinfo *entry, *next;
 500
 501	list_for_each_entry_safe(entry, next, &from->list, ctx_list) {
 502		vmw_binding_transfer(to, from, entry);
 503		vmw_binding_drop(entry);
 504	}
 505
 506	/* Also transfer uav splice indices */
 507	to->ua_views[0].index = from->ua_views[0].index;
 508	to->ua_views[1].index = from->ua_views[1].index;
 509}
 510
 511/**
 512 * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
 513 *
 514 * @ctx: The context resource
 515 *
 516 * Walks through the context binding list and rebinds all scrubbed
 517 * resources.
 518 */
 519int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs)
 520{
 521	struct vmw_ctx_bindinfo *entry;
 522	int ret;
 523
 524	list_for_each_entry(entry, &cbs->list, ctx_list) {
 525		if (likely(!entry->scrubbed))
 526			continue;
 527
 528		if ((entry->res == NULL || entry->res->id ==
 529			    SVGA3D_INVALID_ID))
 530			continue;
 531
 532		ret = vmw_binding_infos[entry->bt].scrub_func(entry, true);
 533		if (unlikely(ret != 0))
 534			return ret;
 535
 536		entry->scrubbed = false;
 537	}
 538
 539	return vmw_binding_emit_dirty(cbs);
 540}
 541
 542/**
 543 * vmw_binding_scrub_shader - scrub a shader binding from a context.
 544 *
 545 * @bi: single binding information.
 546 * @rebind: Whether to issue a bind instead of scrub command.
 547 */
 548static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 549{
 550	struct vmw_ctx_bindinfo_shader *binding =
 551		container_of(bi, typeof(*binding), bi);
 552	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 553	struct {
 554		SVGA3dCmdHeader header;
 555		SVGA3dCmdSetShader body;
 556	} *cmd;
 557
 558	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 559	if (unlikely(cmd == NULL))
 
 
 560		return -ENOMEM;
 
 561
 562	cmd->header.id = SVGA_3D_CMD_SET_SHADER;
 563	cmd->header.size = sizeof(cmd->body);
 564	cmd->body.cid = bi->ctx->id;
 565	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 566	cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 567	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 568
 569	return 0;
 570}
 571
 572/**
 573 * vmw_binding_scrub_render_target - scrub a render target binding
 574 * from a context.
 575 *
 576 * @bi: single binding information.
 577 * @rebind: Whether to issue a bind instead of scrub command.
 578 */
 579static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
 580					   bool rebind)
 581{
 582	struct vmw_ctx_bindinfo_view *binding =
 583		container_of(bi, typeof(*binding), bi);
 584	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 585	struct {
 586		SVGA3dCmdHeader header;
 587		SVGA3dCmdSetRenderTarget body;
 588	} *cmd;
 589
 590	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 591	if (unlikely(cmd == NULL))
 
 
 592		return -ENOMEM;
 
 593
 594	cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
 595	cmd->header.size = sizeof(cmd->body);
 596	cmd->body.cid = bi->ctx->id;
 597	cmd->body.type = binding->slot;
 598	cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 599	cmd->body.target.face = 0;
 600	cmd->body.target.mipmap = 0;
 601	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 602
 603	return 0;
 604}
 605
 606/**
 607 * vmw_binding_scrub_texture - scrub a texture binding from a context.
 608 *
 609 * @bi: single binding information.
 610 * @rebind: Whether to issue a bind instead of scrub command.
 611 *
 612 * TODO: Possibly complement this function with a function that takes
 613 * a list of texture bindings and combines them to a single command.
 614 */
 615static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
 616				     bool rebind)
 617{
 618	struct vmw_ctx_bindinfo_tex *binding =
 619		container_of(bi, typeof(*binding), bi);
 620	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 621	struct {
 622		SVGA3dCmdHeader header;
 623		struct {
 624			SVGA3dCmdSetTextureState c;
 625			SVGA3dTextureState s1;
 626		} body;
 627	} *cmd;
 628
 629	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
 630	if (unlikely(cmd == NULL))
 
 
 631		return -ENOMEM;
 
 632
 633	cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
 634	cmd->header.size = sizeof(cmd->body);
 635	cmd->body.c.cid = bi->ctx->id;
 636	cmd->body.s1.stage = binding->texture_stage;
 637	cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
 638	cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 639	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 640
 641	return 0;
 642}
 643
 644/**
 645 * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context.
 646 *
 647 * @bi: single binding information.
 648 * @rebind: Whether to issue a bind instead of scrub command.
 649 */
 650static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
 651{
 652	struct vmw_ctx_bindinfo_shader *binding =
 653		container_of(bi, typeof(*binding), bi);
 654	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 655	struct {
 656		SVGA3dCmdHeader header;
 657		SVGA3dCmdDXSetShader body;
 658	} *cmd;
 659
 660	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
 661	if (unlikely(cmd == NULL))
 
 
 662		return -ENOMEM;
 663
 664	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
 665	cmd->header.size = sizeof(cmd->body);
 666	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 667	cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
 668	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 669
 670	return 0;
 671}
 672
 673/**
 674 * vmw_binding_scrub_cb - scrub a constant buffer binding from a context.
 675 *
 676 * @bi: single binding information.
 677 * @rebind: Whether to issue a bind instead of scrub command.
 678 */
 679static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
 680{
 681	struct vmw_ctx_bindinfo_cb *binding =
 682		container_of(bi, typeof(*binding), bi);
 683	struct vmw_private *dev_priv = bi->ctx->dev_priv;
 684	struct {
 685		SVGA3dCmdHeader header;
 686		SVGA3dCmdDXSetSingleConstantBuffer body;
 687	} *cmd;
 688
 689	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
 690	if (unlikely(cmd == NULL))
 
 
 691		return -ENOMEM;
 
 692
 693	cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
 694	cmd->header.size = sizeof(cmd->body);
 695	cmd->body.slot = binding->slot;
 696	cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
 697	if (rebind) {
 698		cmd->body.offsetInBytes = binding->offset;
 699		cmd->body.sizeInBytes = binding->size;
 700		cmd->body.sid = bi->res->id;
 701	} else {
 702		cmd->body.offsetInBytes = 0;
 703		cmd->body.sizeInBytes = 0;
 704		cmd->body.sid = SVGA3D_INVALID_ID;
 705	}
 706	vmw_fifo_commit(dev_priv, sizeof(*cmd));
 707
 708	return 0;
 709}
 710
 711/**
 712 * vmw_collect_view_ids - Build view id data for a view binding command
 713 * without checking which bindings actually need to be emitted
 714 *
 715 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 716 * @bi: Pointer to where the binding info array is stored in @cbs
 717 * @max_num: Maximum number of entries in the @bi array.
 718 *
 719 * Scans the @bi array for bindings and builds a buffer of view id data.
 720 * Stops at the first non-existing binding in the @bi array.
 721 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 722 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
 723 * contains the command data.
 724 */
 725static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs,
 726				 const struct vmw_ctx_bindinfo *bi,
 727				 u32 max_num)
 728{
 729	const struct vmw_ctx_bindinfo_view *biv =
 730		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
 731	unsigned long i;
 732
 733	cbs->bind_cmd_count = 0;
 734	cbs->bind_first_slot = 0;
 735
 736	for (i = 0; i < max_num; ++i, ++biv) {
 737		if (!biv->bi.ctx)
 738			break;
 739
 740		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
 741			((biv->bi.scrubbed) ?
 742			 SVGA3D_INVALID_ID : biv->bi.res->id);
 743	}
 744}
 745
 746/**
 747 * vmw_collect_dirty_view_ids - Build view id data for a view binding command
 748 *
 749 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 750 * @bi: Pointer to where the binding info array is stored in @cbs
 751 * @dirty: Bitmap indicating which bindings need to be emitted.
 752 * @max_num: Maximum number of entries in the @bi array.
 753 *
 754 * Scans the @bi array for bindings that need to be emitted and
 755 * builds a buffer of view id data.
 756 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 757 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
 758 * binding, and @cbs->bind_cmd_buffer contains the command data.
 759 */
 760static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
 761				       const struct vmw_ctx_bindinfo *bi,
 762				       unsigned long *dirty,
 763				       u32 max_num)
 764{
 765	const struct vmw_ctx_bindinfo_view *biv =
 766		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
 767	unsigned long i, next_bit;
 768
 769	cbs->bind_cmd_count = 0;
 770	i = find_first_bit(dirty, max_num);
 771	next_bit = i;
 772	cbs->bind_first_slot = i;
 773
 774	biv += i;
 775	for (; i < max_num; ++i, ++biv) {
 776		cbs->bind_cmd_buffer[cbs->bind_cmd_count++] =
 777			((!biv->bi.ctx || biv->bi.scrubbed) ?
 778			 SVGA3D_INVALID_ID : biv->bi.res->id);
 779
 780		if (next_bit == i) {
 781			next_bit = find_next_bit(dirty, max_num, i + 1);
 782			if (next_bit >= max_num)
 783				break;
 784		}
 785	}
 786}
 787
 788/**
 789 * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
 790 *
 791 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 792 */
 793static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
 794			   int shader_slot)
 795{
 796	const struct vmw_ctx_bindinfo *loc =
 797		&cbs->per_shader[shader_slot].shader_res[0].bi;
 798	struct {
 799		SVGA3dCmdHeader header;
 800		SVGA3dCmdDXSetShaderResources body;
 801	} *cmd;
 802	size_t cmd_size, view_id_size;
 803	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 804
 805	vmw_collect_dirty_view_ids(cbs, loc,
 806				   cbs->per_shader[shader_slot].dirty_sr,
 807				   SVGA3D_DX_MAX_SRVIEWS);
 808	if (cbs->bind_cmd_count == 0)
 809		return 0;
 810
 811	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 812	cmd_size = sizeof(*cmd) + view_id_size;
 813	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
 814	if (unlikely(cmd == NULL))
 
 
 815		return -ENOMEM;
 
 816
 817	cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
 818	cmd->header.size = sizeof(cmd->body) + view_id_size;
 819	cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN;
 820	cmd->body.startView = cbs->bind_first_slot;
 821
 822	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 823
 824	vmw_fifo_commit(ctx->dev_priv, cmd_size);
 825	bitmap_clear(cbs->per_shader[shader_slot].dirty_sr,
 826		     cbs->bind_first_slot, cbs->bind_cmd_count);
 827
 828	return 0;
 829}
 830
 831/**
 832 * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
 833 *
 834 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 835 */
 836static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
 837{
 838	const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi;
 839	struct {
 840		SVGA3dCmdHeader header;
 841		SVGA3dCmdDXSetRenderTargets body;
 842	} *cmd;
 843	size_t cmd_size, view_id_size;
 844	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 845
 846	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
 847	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
 848	cmd_size = sizeof(*cmd) + view_id_size;
 849	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
 850	if (unlikely(cmd == NULL))
 
 
 851		return -ENOMEM;
 
 852
 853	cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
 854	cmd->header.size = sizeof(cmd->body) + view_id_size;
 855
 856	if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed)
 857		cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id;
 858	else
 859		cmd->body.depthStencilViewId = SVGA3D_INVALID_ID;
 860
 861	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
 862
 863	vmw_fifo_commit(ctx->dev_priv, cmd_size);
 864
 865	return 0;
 866
 867}
 868
 869/**
 870 * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command
 871 * without checking which bindings actually need to be emitted
 872 *
 873 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 874 * @bi: Pointer to where the binding info array is stored in @cbs
 875 * @max_num: Maximum number of entries in the @bi array.
 876 *
 877 * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data.
 878 * Stops at the first non-existing binding in the @bi array.
 879 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 880 * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer
 881 * contains the command data.
 882 */
 883static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs,
 884				   const struct vmw_ctx_bindinfo *bi,
 885				   u32 max_num)
 886{
 887	const struct vmw_ctx_bindinfo_so_target *biso =
 888		container_of(bi, struct vmw_ctx_bindinfo_so_target, bi);
 889	unsigned long i;
 890	SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer;
 891
 892	cbs->bind_cmd_count = 0;
 893	cbs->bind_first_slot = 0;
 894
 895	for (i = 0; i < max_num; ++i, ++biso, ++so_buffer,
 896		    ++cbs->bind_cmd_count) {
 897		if (!biso->bi.ctx)
 898			break;
 899
 900		if (!biso->bi.scrubbed) {
 901			so_buffer->sid = biso->bi.res->id;
 902			so_buffer->offset = biso->offset;
 903			so_buffer->sizeInBytes = biso->size;
 904		} else {
 905			so_buffer->sid = SVGA3D_INVALID_ID;
 906			so_buffer->offset = 0;
 907			so_buffer->sizeInBytes = 0;
 908		}
 909	}
 910}
 911
 912/**
 913 * vmw_emit_set_so_target - Issue delayed streamout binding commands
 914 *
 915 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 916 */
 917static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs)
 918{
 919	const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi;
 920	struct {
 921		SVGA3dCmdHeader header;
 922		SVGA3dCmdDXSetSOTargets body;
 923	} *cmd;
 924	size_t cmd_size, so_target_size;
 925	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
 926
 927	vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS);
 928	if (cbs->bind_cmd_count == 0)
 929		return 0;
 930
 931	so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
 932	cmd_size = sizeof(*cmd) + so_target_size;
 933	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
 934	if (unlikely(cmd == NULL))
 
 
 935		return -ENOMEM;
 
 936
 937	cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
 938	cmd->header.size = sizeof(cmd->body) + so_target_size;
 939	memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size);
 940
 941	vmw_fifo_commit(ctx->dev_priv, cmd_size);
 942
 943	return 0;
 944
 945}
 946
 947/**
 948 * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands
 949 *
 950 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 951 *
 952 */
 953static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs)
 954{
 955	struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0];
 956	u32 i;
 957	int ret;
 958
 959	for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) {
 960		if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty))
 961			continue;
 962
 963		ret = vmw_emit_set_sr(cbs, i);
 964		if (ret)
 965			break;
 966
 967		__clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty);
 968	}
 969
 970	return 0;
 971}
 972
 973/**
 974 * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a
 975 * SVGA3dCmdDXSetVertexBuffers command
 976 *
 977 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
 978 * @bi: Pointer to where the binding info array is stored in @cbs
 979 * @dirty: Bitmap indicating which bindings need to be emitted.
 980 * @max_num: Maximum number of entries in the @bi array.
 981 *
 982 * Scans the @bi array for bindings that need to be emitted and
 983 * builds a buffer of SVGA3dVertexBuffer data.
 984 * On output, @cbs->bind_cmd_count contains the number of bindings to be
 985 * emitted, @cbs->bind_first_slot indicates the index of the first emitted
 986 * binding, and @cbs->bind_cmd_buffer contains the command data.
 987 */
 988static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
 989				  const struct vmw_ctx_bindinfo *bi,
 990				  unsigned long *dirty,
 991				  u32 max_num)
 992{
 993	const struct vmw_ctx_bindinfo_vb *biv =
 994		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
 995	unsigned long i, next_bit;
 996	SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer;
 997
 998	cbs->bind_cmd_count = 0;
 999	i = find_first_bit(dirty, max_num);
1000	next_bit = i;
1001	cbs->bind_first_slot = i;
1002
1003	biv += i;
1004	for (; i < max_num; ++i, ++biv, ++vbs) {
1005		if (!biv->bi.ctx || biv->bi.scrubbed) {
1006			vbs->sid = SVGA3D_INVALID_ID;
1007			vbs->stride = 0;
1008			vbs->offset = 0;
1009		} else {
1010			vbs->sid = biv->bi.res->id;
1011			vbs->stride = biv->stride;
1012			vbs->offset = biv->offset;
1013		}
1014		cbs->bind_cmd_count++;
1015		if (next_bit == i) {
1016			next_bit = find_next_bit(dirty, max_num, i + 1);
1017			if (next_bit >= max_num)
1018				break;
1019		}
1020	}
1021}
1022
1023/**
1024 * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
1025 *
1026 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1027 *
1028 */
1029static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
1030{
1031	const struct vmw_ctx_bindinfo *loc =
1032		&cbs->vertex_buffers[0].bi;
1033	struct {
1034		SVGA3dCmdHeader header;
1035		SVGA3dCmdDXSetVertexBuffers body;
1036	} *cmd;
1037	size_t cmd_size, set_vb_size;
1038	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1039
1040	vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb,
1041			     SVGA3D_DX_MAX_VERTEXBUFFERS);
1042	if (cbs->bind_cmd_count == 0)
1043		return 0;
1044
1045	set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
1046	cmd_size = sizeof(*cmd) + set_vb_size;
1047	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
1048	if (unlikely(cmd == NULL))
 
 
1049		return -ENOMEM;
 
1050
1051	cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
1052	cmd->header.size = sizeof(cmd->body) + set_vb_size;
1053	cmd->body.startBuffer = cbs->bind_first_slot;
1054
1055	memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size);
1056
1057	vmw_fifo_commit(ctx->dev_priv, cmd_size);
1058	bitmap_clear(cbs->dirty_vb,
1059		     cbs->bind_first_slot, cbs->bind_cmd_count);
1060
1061	return 0;
1062}
1063
1064static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs)
1065{
1066	const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[0].views[0].bi;
1067	struct {
1068		SVGA3dCmdHeader header;
1069		SVGA3dCmdDXSetUAViews body;
1070	} *cmd;
1071	size_t cmd_size, view_id_size;
1072	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1073
1074	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
1075	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1076	cmd_size = sizeof(*cmd) + view_id_size;
1077	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
1078	if (!cmd)
1079		return -ENOMEM;
1080
1081	cmd->header.id = SVGA_3D_CMD_DX_SET_UA_VIEWS;
1082	cmd->header.size = sizeof(cmd->body) + view_id_size;
1083
1084	/* Splice index is specified user-space   */
1085	cmd->body.uavSpliceIndex = cbs->ua_views[0].index;
1086
1087	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1088
1089	vmw_fifo_commit(ctx->dev_priv, cmd_size);
1090
1091	return 0;
1092}
1093
1094static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs)
1095{
1096	const struct vmw_ctx_bindinfo *loc = &cbs->ua_views[1].views[0].bi;
1097	struct {
1098		SVGA3dCmdHeader header;
1099		SVGA3dCmdDXSetCSUAViews body;
1100	} *cmd;
1101	size_t cmd_size, view_id_size;
1102	const struct vmw_resource *ctx = vmw_cbs_context(cbs);
1103
1104	vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS);
1105	view_id_size = cbs->bind_cmd_count*sizeof(uint32);
1106	cmd_size = sizeof(*cmd) + view_id_size;
1107	cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
1108	if (!cmd)
1109		return -ENOMEM;
1110
1111	cmd->header.id = SVGA_3D_CMD_DX_SET_CS_UA_VIEWS;
1112	cmd->header.size = sizeof(cmd->body) + view_id_size;
1113
1114	/* Start index is specified user-space */
1115	cmd->body.startIndex = cbs->ua_views[1].index;
1116
1117	memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size);
1118
1119	vmw_fifo_commit(ctx->dev_priv, cmd_size);
1120
1121	return 0;
1122}
1123
1124/**
1125 * vmw_binding_emit_dirty - Issue delayed binding commands
1126 *
1127 * @cbs: Pointer to the context's struct vmw_ctx_binding_state
1128 *
1129 * This function issues the delayed binding commands that arise from
1130 * previous scrub / unscrub calls. These binding commands are typically
1131 * commands that batch a number of bindings and therefore it makes sense
1132 * to delay them.
1133 */
1134static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs)
1135{
1136	int ret = 0;
1137	unsigned long hit = 0;
1138
1139	while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit))
1140	      < VMW_BINDING_NUM_BITS) {
1141
1142		switch (hit) {
1143		case VMW_BINDING_RT_BIT:
1144			ret = vmw_emit_set_rt(cbs);
1145			break;
1146		case VMW_BINDING_PS_BIT:
1147			ret = vmw_binding_emit_dirty_ps(cbs);
1148			break;
1149		case VMW_BINDING_SO_T_BIT:
1150			ret = vmw_emit_set_so_target(cbs);
1151			break;
1152		case VMW_BINDING_VB_BIT:
1153			ret = vmw_emit_set_vb(cbs);
1154			break;
1155		case VMW_BINDING_UAV_BIT:
1156			ret = vmw_emit_set_uav(cbs);
1157			break;
1158		case VMW_BINDING_CS_UAV_BIT:
1159			ret = vmw_emit_set_cs_uav(cbs);
1160			break;
1161		default:
1162			BUG();
1163		}
1164		if (ret)
1165			return ret;
1166
1167		__clear_bit(hit, &cbs->dirty);
1168		hit++;
1169	}
1170
1171	return 0;
1172}
1173
1174/**
1175 * vmw_binding_scrub_sr - Schedule a dx shaderresource binding
1176 * scrub from a context
1177 *
1178 * @bi: single binding information.
1179 * @rebind: Whether to issue a bind instead of scrub command.
1180 */
1181static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind)
1182{
1183	struct vmw_ctx_bindinfo_view *biv =
1184		container_of(bi, struct vmw_ctx_bindinfo_view, bi);
1185	struct vmw_ctx_binding_state *cbs =
1186		vmw_context_binding_state(bi->ctx);
1187
1188	__set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr);
1189	__set_bit(VMW_BINDING_PS_SR_BIT,
1190		  &cbs->per_shader[biv->shader_slot].dirty);
1191	__set_bit(VMW_BINDING_PS_BIT, &cbs->dirty);
1192
1193	return 0;
1194}
1195
1196/**
1197 * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding
1198 * scrub from a context
1199 *
1200 * @bi: single binding information.
1201 * @rebind: Whether to issue a bind instead of scrub command.
1202 */
1203static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind)
1204{
1205	struct vmw_ctx_binding_state *cbs =
1206		vmw_context_binding_state(bi->ctx);
1207
1208	__set_bit(VMW_BINDING_RT_BIT, &cbs->dirty);
1209
1210	return 0;
1211}
1212
1213/**
1214 * vmw_binding_scrub_so_target - Schedule a dx streamoutput buffer binding
1215 * scrub from a context
1216 *
1217 * @bi: single binding information.
1218 * @rebind: Whether to issue a bind instead of scrub command.
1219 */
1220static int vmw_binding_scrub_so_target(struct vmw_ctx_bindinfo *bi, bool rebind)
1221{
1222	struct vmw_ctx_binding_state *cbs =
1223		vmw_context_binding_state(bi->ctx);
1224
1225	__set_bit(VMW_BINDING_SO_T_BIT, &cbs->dirty);
1226
1227	return 0;
1228}
1229
1230/**
1231 * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding
1232 * scrub from a context
1233 *
1234 * @bi: single binding information.
1235 * @rebind: Whether to issue a bind instead of scrub command.
1236 */
1237static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind)
1238{
1239	struct vmw_ctx_bindinfo_vb *bivb =
1240		container_of(bi, struct vmw_ctx_bindinfo_vb, bi);
1241	struct vmw_ctx_binding_state *cbs =
1242		vmw_context_binding_state(bi->ctx);
1243
1244	__set_bit(bivb->slot, cbs->dirty_vb);
1245	__set_bit(VMW_BINDING_VB_BIT, &cbs->dirty);
1246
1247	return 0;
1248}
1249
1250/**
1251 * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context
1252 *
1253 * @bi: single binding information.
1254 * @rebind: Whether to issue a bind instead of scrub command.
1255 */
1256static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
1257{
1258	struct vmw_ctx_bindinfo_ib *binding =
1259		container_of(bi, typeof(*binding), bi);
1260	struct vmw_private *dev_priv = bi->ctx->dev_priv;
1261	struct {
1262		SVGA3dCmdHeader header;
1263		SVGA3dCmdDXSetIndexBuffer body;
1264	} *cmd;
1265
1266	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
1267	if (unlikely(cmd == NULL))
 
 
1268		return -ENOMEM;
1269
1270	cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
1271	cmd->header.size = sizeof(cmd->body);
1272	if (rebind) {
1273		cmd->body.sid = bi->res->id;
1274		cmd->body.format = binding->format;
1275		cmd->body.offset = binding->offset;
1276	} else {
1277		cmd->body.sid = SVGA3D_INVALID_ID;
1278		cmd->body.format = 0;
1279		cmd->body.offset = 0;
1280	}
1281
1282	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1283
1284	return 0;
1285}
1286
1287static int vmw_binding_scrub_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1288{
1289	struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1290
1291	__set_bit(VMW_BINDING_UAV_BIT, &cbs->dirty);
1292	return 0;
1293}
1294
1295static int vmw_binding_scrub_cs_uav(struct vmw_ctx_bindinfo *bi, bool rebind)
1296{
1297	struct vmw_ctx_binding_state *cbs = vmw_context_binding_state(bi->ctx);
1298
1299	__set_bit(VMW_BINDING_CS_UAV_BIT, &cbs->dirty);
1300	return 0;
1301}
1302
1303/**
1304 * vmw_binding_scrub_so - Scrub a streamoutput binding from context.
1305 * @bi: Single binding information.
1306 * @rebind: Whether to issue a bind instead of scrub command.
1307 */
1308static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind)
1309{
1310	struct vmw_ctx_bindinfo_so *binding =
1311		container_of(bi, typeof(*binding), bi);
1312	struct vmw_private *dev_priv = bi->ctx->dev_priv;
1313	struct {
1314		SVGA3dCmdHeader header;
1315		SVGA3dCmdDXSetStreamOutput body;
1316	} *cmd;
1317
1318	cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
1319	if (!cmd)
1320		return -ENOMEM;
1321
1322	cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT;
1323	cmd->header.size = sizeof(cmd->body);
1324	cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID;
1325	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1326
1327	return 0;
1328}
1329
1330/**
1331 * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with
1332 * memory accounting.
1333 *
1334 * @dev_priv: Pointer to a device private structure.
1335 *
1336 * Returns a pointer to a newly allocated struct or an error pointer on error.
1337 */
1338struct vmw_ctx_binding_state *
1339vmw_binding_state_alloc(struct vmw_private *dev_priv)
1340{
1341	struct vmw_ctx_binding_state *cbs;
1342	struct ttm_operation_ctx ctx = {
1343		.interruptible = false,
1344		.no_wait_gpu = false
1345	};
1346	int ret;
1347
1348	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs),
1349				&ctx);
1350	if (ret)
1351		return ERR_PTR(ret);
1352
1353	cbs = vzalloc(sizeof(*cbs));
1354	if (!cbs) {
1355		ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1356		return ERR_PTR(-ENOMEM);
1357	}
1358
1359	cbs->dev_priv = dev_priv;
1360	INIT_LIST_HEAD(&cbs->list);
1361
1362	return cbs;
1363}
1364
1365/**
1366 * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its
1367 * memory accounting info.
1368 *
1369 * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed.
1370 */
1371void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs)
1372{
1373	struct vmw_private *dev_priv = cbs->dev_priv;
1374
1375	vfree(cbs);
1376	ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs));
1377}
1378
1379/**
1380 * vmw_binding_state_list - Get the binding list of a
1381 * struct vmw_ctx_binding_state
1382 *
1383 * @cbs: Pointer to the struct vmw_ctx_binding_state
1384 *
1385 * Returns the binding list which can be used to traverse through the bindings
1386 * and access the resource information of all bindings.
1387 */
1388struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
1389{
1390	return &cbs->list;
1391}
1392
1393/**
1394 * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
1395 *
1396 * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
1397 *
1398 * Drops all bindings registered in @cbs. No device binding actions are
1399 * performed.
1400 */
1401void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
1402{
1403	struct vmw_ctx_bindinfo *entry, *next;
1404
1405	list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
1406		vmw_binding_drop(entry);
1407}
1408
1409/**
1410 * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
1411 * @binding_type: The binding type
1412 *
1413 * Each time a resource is put on the validation list as the result of a
1414 * context binding referencing it, we need to determine whether that resource
1415 * will be dirtied (written to by the GPU) as a result of the corresponding
1416 * GPU operation. Currently rendertarget-, depth-stencil-, stream-output-target
1417 * and unordered access view bindings are capable of dirtying its resource.
1418 *
1419 * Return: Whether the binding type dirties the resource its binding points to.
1420 */
1421u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
1422{
1423	static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
1424		[vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
1425		[vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
1426		[vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
1427		[vmw_ctx_binding_so_target] = VMW_RES_DIRTY_SET,
1428		[vmw_ctx_binding_uav] = VMW_RES_DIRTY_SET,
1429		[vmw_ctx_binding_cs_uav] = VMW_RES_DIRTY_SET,
1430	};
1431
1432	/* Review this function as new bindings are added. */
1433	BUILD_BUG_ON(vmw_ctx_binding_max != 14);
1434	return is_binding_dirtying[binding_type];
1435}
1436
1437/*
1438 * This function is unused at run-time, and only used to hold various build
1439 * asserts important for code optimization assumptions.
1440 */
1441static void vmw_binding_build_asserts(void)
1442{
1443	BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3);
1444	BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX);
1445	BUILD_BUG_ON(sizeof(uint32) != sizeof(u32));
1446
1447	/*
1448	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various
1449	 * view id arrays.
1450	 */
1451	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX);
1452	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS);
1453	BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS);
1454
1455	/*
1456	 * struct vmw_ctx_binding_state::bind_cmd_buffer is used for
1457	 * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers
1458	 */
1459	BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) >
1460		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1461	BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) >
1462		     VMW_MAX_VIEW_BINDINGS*sizeof(u32));
1463}