Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v4.6
   1/**************************************************************************
   2 *
   3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
  28#include "vmwgfx_drv.h"
  29#include "ttm/ttm_bo_api.h"
  30
  31/*
  32 * Size of inline command buffers. Try to make sure that a page size is a
  33 * multiple of the DMA pool allocation size.
  34 */
  35#define VMW_CMDBUF_INLINE_ALIGN 64
  36#define VMW_CMDBUF_INLINE_SIZE \
  37	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  38
  39/**
  40 * struct vmw_cmdbuf_context - Command buffer context queues
  41 *
  42 * @submitted: List of command buffers that have been submitted to the
  43 * manager but not yet submitted to hardware.
  44 * @hw_submitted: List of command buffers submitted to hardware.
  45 * @preempted: List of preempted command buffers.
  46 * @num_hw_submitted: Number of buffers currently being processed by hardware
  47 */
  48struct vmw_cmdbuf_context {
  49	struct list_head submitted;
  50	struct list_head hw_submitted;
  51	struct list_head preempted;
  52	unsigned num_hw_submitted;
 
  53};
  54
  55/**
  56 * struct vmw_cmdbuf_man: - Command buffer manager
  57 *
  58 * @cur_mutex: Mutex protecting the command buffer used for incremental small
  59 * kernel command submissions, @cur.
  60 * @space_mutex: Mutex to protect against starvation when we allocate
  61 * main pool buffer space.
 
 
 
  62 * @work: A struct work_struct implementeing command buffer error handling.
  63 * Immutable.
  64 * @dev_priv: Pointer to the device private struct. Immutable.
  65 * @ctx: Array of command buffer context queues. The queues and the context
  66 * data is protected by @lock.
  67 * @error: List of command buffers that have caused device errors.
  68 * Protected by @lock.
  69 * @mm: Range manager for the command buffer space. Manager allocations and
  70 * frees are protected by @lock.
  71 * @cmd_space: Buffer object for the command buffer space, unless we were
  72 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
  73 * @map_obj: Mapping state for @cmd_space. Immutable.
  74 * @map: Pointer to command buffer space. May be a mapped buffer object or
  75 * a contigous coherent DMA memory allocation. Immutable.
  76 * @cur: Command buffer for small kernel command submissions. Protected by
  77 * the @cur_mutex.
  78 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  79 * @default_size: Default size for the @cur command buffer. Immutable.
  80 * @max_hw_submitted: Max number of in-flight command buffers the device can
  81 * handle. Immutable.
  82 * @lock: Spinlock protecting command submission queues.
  83 * @header: Pool of DMA memory for device command buffer headers.
  84 * Internal protection.
  85 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  86 * space for inline data. Internal protection.
  87 * @tasklet: Tasklet struct for irq processing. Immutable.
  88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  89 * space.
  90 * @idle_queue: Wait queue for processes waiting for command buffer idle.
  91 * @irq_on: Whether the process function has requested irq to be turned on.
  92 * Protected by @lock.
  93 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
  94 * allocation. Immutable.
  95 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
  96 * Typically this is false only during bootstrap.
  97 * @handle: DMA address handle for the command buffer space if @using_mob is
  98 * false. Immutable.
  99 * @size: The size of the command buffer space. Immutable.
 
 100 */
 101struct vmw_cmdbuf_man {
 102	struct mutex cur_mutex;
 103	struct mutex space_mutex;
 
 104	struct work_struct work;
 105	struct vmw_private *dev_priv;
 106	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 107	struct list_head error;
 108	struct drm_mm mm;
 109	struct ttm_buffer_object *cmd_space;
 110	struct ttm_bo_kmap_obj map_obj;
 111	u8 *map;
 112	struct vmw_cmdbuf_header *cur;
 113	size_t cur_pos;
 114	size_t default_size;
 115	unsigned max_hw_submitted;
 116	spinlock_t lock;
 117	struct dma_pool *headers;
 118	struct dma_pool *dheaders;
 119	struct tasklet_struct tasklet;
 120	wait_queue_head_t alloc_queue;
 121	wait_queue_head_t idle_queue;
 122	bool irq_on;
 123	bool using_mob;
 124	bool has_pool;
 125	dma_addr_t handle;
 126	size_t size;
 
 127};
 128
 129/**
 130 * struct vmw_cmdbuf_header - Command buffer metadata
 131 *
 132 * @man: The command buffer manager.
 133 * @cb_header: Device command buffer header, allocated from a DMA pool.
 134 * @cb_context: The device command buffer context.
 135 * @list: List head for attaching to the manager lists.
 136 * @node: The range manager node.
 137 * @handle. The DMA address of @cb_header. Handed to the device on command
 138 * buffer submission.
 139 * @cmd: Pointer to the command buffer space of this buffer.
 140 * @size: Size of the command buffer space of this buffer.
 141 * @reserved: Reserved space of this buffer.
 142 * @inline_space: Whether inline command buffer space is used.
 143 */
 144struct vmw_cmdbuf_header {
 145	struct vmw_cmdbuf_man *man;
 146	SVGACBHeader *cb_header;
 147	SVGACBContext cb_context;
 148	struct list_head list;
 149	struct drm_mm_node node;
 150	dma_addr_t handle;
 151	u8 *cmd;
 152	size_t size;
 153	size_t reserved;
 154	bool inline_space;
 155};
 156
 157/**
 158 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
 159 * command buffer space.
 160 *
 161 * @cb_header: Device command buffer header.
 162 * @cmd: Inline command buffer space.
 163 */
 164struct vmw_cmdbuf_dheader {
 165	SVGACBHeader cb_header;
 166	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
 167};
 168
 169/**
 170 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
 171 *
 172 * @page_size: Size of requested command buffer space in pages.
 173 * @node: Pointer to the range manager node.
 174 * @done: True if this allocation has succeeded.
 175 */
 176struct vmw_cmdbuf_alloc_info {
 177	size_t page_size;
 178	struct drm_mm_node *node;
 179	bool done;
 180};
 181
 182/* Loop over each context in the command buffer manager. */
 183#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
 184	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
 185	     ++(_i), ++(_ctx))
 186
 187static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
 188
 
 189
 190/**
 191 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
 192 *
 193 * @man: The range manager.
 194 * @interruptible: Whether to wait interruptible when locking.
 195 */
 196static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
 197{
 198	if (interruptible) {
 199		if (mutex_lock_interruptible(&man->cur_mutex))
 200			return -ERESTARTSYS;
 201	} else {
 202		mutex_lock(&man->cur_mutex);
 203	}
 204
 205	return 0;
 206}
 207
 208/**
 209 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
 210 *
 211 * @man: The range manager.
 212 */
 213static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
 214{
 215	mutex_unlock(&man->cur_mutex);
 216}
 217
 218/**
 219 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
 220 * been used for the device context with inline command buffers.
 221 * Need not be called locked.
 222 *
 223 * @header: Pointer to the header to free.
 224 */
 225static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
 226{
 227	struct vmw_cmdbuf_dheader *dheader;
 228
 229	if (WARN_ON_ONCE(!header->inline_space))
 230		return;
 231
 232	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
 233			       cb_header);
 234	dma_pool_free(header->man->dheaders, dheader, header->handle);
 235	kfree(header);
 236}
 237
 238/**
 239 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 240 * associated structures.
 241 *
 242 * header: Pointer to the header to free.
 243 *
 244 * For internal use. Must be called with man::lock held.
 245 */
 246static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 247{
 248	struct vmw_cmdbuf_man *man = header->man;
 249
 250	lockdep_assert_held_once(&man->lock);
 251
 252	if (header->inline_space) {
 253		vmw_cmdbuf_header_inline_free(header);
 254		return;
 255	}
 256
 257	drm_mm_remove_node(&header->node);
 258	wake_up_all(&man->alloc_queue);
 259	if (header->cb_header)
 260		dma_pool_free(man->headers, header->cb_header,
 261			      header->handle);
 262	kfree(header);
 263}
 264
 265/**
 266 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 267 * associated structures.
 268 *
 269 * @header: Pointer to the header to free.
 270 */
 271void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 272{
 273	struct vmw_cmdbuf_man *man = header->man;
 274
 275	/* Avoid locking if inline_space */
 276	if (header->inline_space) {
 277		vmw_cmdbuf_header_inline_free(header);
 278		return;
 279	}
 280	spin_lock_bh(&man->lock);
 281	__vmw_cmdbuf_header_free(header);
 282	spin_unlock_bh(&man->lock);
 283}
 284
 285
 286/**
 287 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
 288 *
 289 * @header: The header of the buffer to submit.
 290 */
 291static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
 292{
 293	struct vmw_cmdbuf_man *man = header->man;
 294	u32 val;
 295
 296	if (sizeof(header->handle) > 4)
 297		val = (header->handle >> 32);
 298	else
 299		val = 0;
 300	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 301
 302	val = (header->handle & 0xFFFFFFFFULL);
 303	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
 304	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 305
 306	return header->cb_header->status;
 307}
 308
 309/**
 310 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
 311 *
 312 * @ctx: The command buffer context to initialize
 313 */
 314static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
 315{
 316	INIT_LIST_HEAD(&ctx->hw_submitted);
 317	INIT_LIST_HEAD(&ctx->submitted);
 318	INIT_LIST_HEAD(&ctx->preempted);
 319	ctx->num_hw_submitted = 0;
 320}
 321
 322/**
 323 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
 324 * context.
 325 *
 326 * @man: The command buffer manager.
 327 * @ctx: The command buffer context.
 328 *
 329 * Submits command buffers to hardware until there are no more command
 330 * buffers to submit or the hardware can't handle more command buffers.
 331 */
 332static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 333				  struct vmw_cmdbuf_context *ctx)
 334{
 335	while (ctx->num_hw_submitted < man->max_hw_submitted &&
 336	      !list_empty(&ctx->submitted)) {
 
 337		struct vmw_cmdbuf_header *entry;
 338		SVGACBStatus status;
 339
 340		entry = list_first_entry(&ctx->submitted,
 341					 struct vmw_cmdbuf_header,
 342					 list);
 343
 344		status = vmw_cmdbuf_header_submit(entry);
 345
 346		/* This should never happen */
 347		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
 348			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 349			break;
 350		}
 351
 352		list_del(&entry->list);
 353		list_add_tail(&entry->list, &ctx->hw_submitted);
 354		ctx->num_hw_submitted++;
 355	}
 356
 357}
 358
 359/**
 360 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
 361 *
 362 * @man: The command buffer manager.
 363 * @ctx: The command buffer context.
 364 *
 365 * Submit command buffers to hardware if possible, and process finished
 366 * buffers. Typically freeing them, but on preemption or error take
 367 * appropriate action. Wake up waiters if appropriate.
 368 */
 369static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 370				   struct vmw_cmdbuf_context *ctx,
 371				   int *notempty)
 372{
 373	struct vmw_cmdbuf_header *entry, *next;
 374
 375	vmw_cmdbuf_ctx_submit(man, ctx);
 376
 377	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
 378		SVGACBStatus status = entry->cb_header->status;
 379
 380		if (status == SVGA_CB_STATUS_NONE)
 381			break;
 382
 383		list_del(&entry->list);
 384		wake_up_all(&man->idle_queue);
 385		ctx->num_hw_submitted--;
 386		switch (status) {
 387		case SVGA_CB_STATUS_COMPLETED:
 388			__vmw_cmdbuf_header_free(entry);
 389			break;
 390		case SVGA_CB_STATUS_COMMAND_ERROR:
 391		case SVGA_CB_STATUS_CB_HEADER_ERROR:
 392			list_add_tail(&entry->list, &man->error);
 393			schedule_work(&man->work);
 394			break;
 395		case SVGA_CB_STATUS_PREEMPTED:
 396			list_add(&entry->list, &ctx->preempted);
 
 
 
 
 
 397			break;
 398		default:
 399			WARN_ONCE(true, "Undefined command buffer status.\n");
 400			__vmw_cmdbuf_header_free(entry);
 401			break;
 402		}
 403	}
 404
 405	vmw_cmdbuf_ctx_submit(man, ctx);
 406	if (!list_empty(&ctx->submitted))
 407		(*notempty)++;
 408}
 409
 410/**
 411 * vmw_cmdbuf_man_process - Process all command buffer contexts and
 412 * switch on and off irqs as appropriate.
 413 *
 414 * @man: The command buffer manager.
 415 *
 416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
 417 * command buffers left that are not submitted to hardware, Make sure
 418 * IRQ handling is turned on. Otherwise, make sure it's turned off.
 419 */
 420static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 421{
 422	int notempty;
 423	struct vmw_cmdbuf_context *ctx;
 424	int i;
 425
 426retry:
 427	notempty = 0;
 428	for_each_cmdbuf_ctx(man, i, ctx)
 429		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 430
 431	if (man->irq_on && !notempty) {
 432		vmw_generic_waiter_remove(man->dev_priv,
 433					  SVGA_IRQFLAG_COMMAND_BUFFER,
 434					  &man->dev_priv->cmdbuf_waiters);
 435		man->irq_on = false;
 436	} else if (!man->irq_on && notempty) {
 437		vmw_generic_waiter_add(man->dev_priv,
 438				       SVGA_IRQFLAG_COMMAND_BUFFER,
 439				       &man->dev_priv->cmdbuf_waiters);
 440		man->irq_on = true;
 441
 442		/* Rerun in case we just missed an irq. */
 443		goto retry;
 444	}
 445}
 446
 447/**
 448 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
 449 * command buffer context
 450 *
 451 * @man: The command buffer manager.
 452 * @header: The header of the buffer to submit.
 453 * @cb_context: The command buffer context to use.
 454 *
 455 * This function adds @header to the "submitted" queue of the command
 456 * buffer context identified by @cb_context. It then calls the command buffer
 457 * manager processing to potentially submit the buffer to hardware.
 458 * @man->lock needs to be held when calling this function.
 459 */
 460static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 461			       struct vmw_cmdbuf_header *header,
 462			       SVGACBContext cb_context)
 463{
 464	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
 465		header->cb_header->dxContext = 0;
 466	header->cb_context = cb_context;
 467	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 468
 469	vmw_cmdbuf_man_process(man);
 470}
 471
 472/**
 473 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
 474 * handler implemented as a tasklet.
 475 *
 476 * @data: Tasklet closure. A pointer to the command buffer manager cast to
 477 * an unsigned long.
 478 *
 479 * The bottom half (tasklet) of the interrupt handler simply calls into the
 480 * command buffer processor to free finished buffers and submit any
 481 * queued buffers to hardware.
 482 */
 483static void vmw_cmdbuf_man_tasklet(unsigned long data)
 484{
 485	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
 486
 487	spin_lock(&man->lock);
 488	vmw_cmdbuf_man_process(man);
 489	spin_unlock(&man->lock);
 490}
 491
 492/**
 493 * vmw_cmdbuf_work_func - The deferred work function that handles
 494 * command buffer errors.
 495 *
 496 * @work: The work func closure argument.
 497 *
 498 * Restarting the command buffer context after an error requires process
 499 * context, so it is deferred to this work function.
 500 */
 501static void vmw_cmdbuf_work_func(struct work_struct *work)
 502{
 503	struct vmw_cmdbuf_man *man =
 504		container_of(work, struct vmw_cmdbuf_man, work);
 505	struct vmw_cmdbuf_header *entry, *next;
 506	uint32_t dummy;
 507	bool restart = false;
 
 
 
 
 
 
 
 
 
 
 508
 509	spin_lock_bh(&man->lock);
 
 510	list_for_each_entry_safe(entry, next, &man->error, list) {
 511		restart = true;
 512		DRM_ERROR("Command buffer error.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513
 514		list_del(&entry->list);
 515		__vmw_cmdbuf_header_free(entry);
 516		wake_up_all(&man->idle_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517	}
 518	spin_unlock_bh(&man->lock);
 519
 520	if (restart && vmw_cmdbuf_startstop(man, true))
 521		DRM_ERROR("Failed restarting command buffer context 0.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522
 523	/* Send a new fence in case one was removed */
 524	vmw_fifo_send_fence(man->dev_priv, &dummy);
 
 
 
 
 
 525}
 526
 527/**
 528 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
 529 *
 530 * @man: The command buffer manager.
 531 * @check_preempted: Check also the preempted queue for pending command buffers.
 532 *
 533 */
 534static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 535				bool check_preempted)
 536{
 537	struct vmw_cmdbuf_context *ctx;
 538	bool idle = false;
 539	int i;
 540
 541	spin_lock_bh(&man->lock);
 542	vmw_cmdbuf_man_process(man);
 543	for_each_cmdbuf_ctx(man, i, ctx) {
 544		if (!list_empty(&ctx->submitted) ||
 545		    !list_empty(&ctx->hw_submitted) ||
 546		    (check_preempted && !list_empty(&ctx->preempted)))
 547			goto out_unlock;
 548	}
 549
 550	idle = list_empty(&man->error);
 551
 552out_unlock:
 553	spin_unlock_bh(&man->lock);
 554
 555	return idle;
 556}
 557
 558/**
 559 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 560 * command submissions
 561 *
 562 * @man: The command buffer manager.
 563 *
 564 * Flushes the current command buffer without allocating a new one. A new one
 565 * is automatically allocated when needed. Call with @man->cur_mutex held.
 566 */
 567static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 568{
 569	struct vmw_cmdbuf_header *cur = man->cur;
 570
 571	WARN_ON(!mutex_is_locked(&man->cur_mutex));
 572
 573	if (!cur)
 574		return;
 575
 576	spin_lock_bh(&man->lock);
 577	if (man->cur_pos == 0) {
 578		__vmw_cmdbuf_header_free(cur);
 579		goto out_unlock;
 580	}
 581
 582	man->cur->cb_header->length = man->cur_pos;
 583	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 584out_unlock:
 585	spin_unlock_bh(&man->lock);
 586	man->cur = NULL;
 587	man->cur_pos = 0;
 588}
 589
 590/**
 591 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 592 * command submissions
 593 *
 594 * @man: The command buffer manager.
 595 * @interruptible: Whether to sleep interruptible when sleeping.
 596 *
 597 * Flushes the current command buffer without allocating a new one. A new one
 598 * is automatically allocated when needed.
 599 */
 600int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 601			 bool interruptible)
 602{
 603	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 604
 605	if (ret)
 606		return ret;
 607
 608	__vmw_cmdbuf_cur_flush(man);
 609	vmw_cmdbuf_cur_unlock(man);
 610
 611	return 0;
 612}
 613
 614/**
 615 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
 616 *
 617 * @man: The command buffer manager.
 618 * @interruptible: Sleep interruptible while waiting.
 619 * @timeout: Time out after this many ticks.
 620 *
 621 * Wait until the command buffer manager has processed all command buffers,
 622 * or until a timeout occurs. If a timeout occurs, the function will return
 623 * -EBUSY.
 624 */
 625int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 626		    unsigned long timeout)
 627{
 628	int ret;
 629
 630	ret = vmw_cmdbuf_cur_flush(man, interruptible);
 631	vmw_generic_waiter_add(man->dev_priv,
 632			       SVGA_IRQFLAG_COMMAND_BUFFER,
 633			       &man->dev_priv->cmdbuf_waiters);
 634
 635	if (interruptible) {
 636		ret = wait_event_interruptible_timeout
 637			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 638			 timeout);
 639	} else {
 640		ret = wait_event_timeout
 641			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 642			 timeout);
 643	}
 644	vmw_generic_waiter_remove(man->dev_priv,
 645				  SVGA_IRQFLAG_COMMAND_BUFFER,
 646				  &man->dev_priv->cmdbuf_waiters);
 647	if (ret == 0) {
 648		if (!vmw_cmdbuf_man_idle(man, true))
 649			ret = -EBUSY;
 650		else
 651			ret = 0;
 652	}
 653	if (ret > 0)
 654		ret = 0;
 655
 656	return ret;
 657}
 658
 659/**
 660 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
 661 *
 662 * @man: The command buffer manager.
 663 * @info: Allocation info. Will hold the size on entry and allocated mm node
 664 * on successful return.
 665 *
 666 * Try to allocate buffer space from the main pool. Returns true if succeeded.
 667 * If a fatal error was hit, the error code is returned in @info->ret.
 668 */
 669static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 670				 struct vmw_cmdbuf_alloc_info *info)
 671{
 672	int ret;
 673
 674	if (info->done)
 675		return true;
 676 
 677	memset(info->node, 0, sizeof(*info->node));
 678	spin_lock_bh(&man->lock);
 679	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
 680					 0, 0,
 681					 DRM_MM_SEARCH_DEFAULT,
 682					 DRM_MM_CREATE_DEFAULT);
 683	if (ret) {
 684		vmw_cmdbuf_man_process(man);
 685		ret = drm_mm_insert_node_generic(&man->mm, info->node,
 686						 info->page_size, 0, 0,
 687						 DRM_MM_SEARCH_DEFAULT,
 688						 DRM_MM_CREATE_DEFAULT);
 689	}
 690
 691	spin_unlock_bh(&man->lock);
 692	info->done = !ret;
 693
 694	return info->done;
 695}
 696
 697/**
 698 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
 699 *
 700 * @man: The command buffer manager.
 701 * @node: Pointer to pre-allocated range-manager node.
 702 * @size: The size of the allocation.
 703 * @interruptible: Whether to sleep interruptible while waiting for space.
 704 *
 705 * This function allocates buffer space from the main pool, and if there is
 706 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
 707 * become available.
 708 */
 709static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 710				  struct drm_mm_node *node,
 711				  size_t size,
 712				  bool interruptible)
 713{
 714	struct vmw_cmdbuf_alloc_info info;
 715
 716	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
 717	info.node = node;
 718	info.done = false;
 719
 720	/*
 721	 * To prevent starvation of large requests, only one allocating call
 722	 * at a time waiting for space.
 723	 */
 724	if (interruptible) {
 725		if (mutex_lock_interruptible(&man->space_mutex))
 726			return -ERESTARTSYS;
 727	} else {
 728		mutex_lock(&man->space_mutex);
 729	}
 730
 731	/* Try to allocate space without waiting. */
 732	if (vmw_cmdbuf_try_alloc(man, &info))
 733		goto out_unlock;
 734
 735	vmw_generic_waiter_add(man->dev_priv,
 736			       SVGA_IRQFLAG_COMMAND_BUFFER,
 737			       &man->dev_priv->cmdbuf_waiters);
 738
 739	if (interruptible) {
 740		int ret;
 741
 742		ret = wait_event_interruptible
 743			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 744		if (ret) {
 745			vmw_generic_waiter_remove
 746				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
 747				 &man->dev_priv->cmdbuf_waiters);
 748			mutex_unlock(&man->space_mutex);
 749			return ret;
 750		}
 751	} else {
 752		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 753	}
 754	vmw_generic_waiter_remove(man->dev_priv,
 755				  SVGA_IRQFLAG_COMMAND_BUFFER,
 756				  &man->dev_priv->cmdbuf_waiters);
 757
 758out_unlock:
 759	mutex_unlock(&man->space_mutex);
 760
 761	return 0;
 762}
 763
 764/**
 765 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
 766 * space from the main pool.
 767 *
 768 * @man: The command buffer manager.
 769 * @header: Pointer to the header to set up.
 770 * @size: The requested size of the buffer space.
 771 * @interruptible: Whether to sleep interruptible while waiting for space.
 772 */
 773static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 774				 struct vmw_cmdbuf_header *header,
 775				 size_t size,
 776				 bool interruptible)
 777{
 778	SVGACBHeader *cb_hdr;
 779	size_t offset;
 780	int ret;
 781
 782	if (!man->has_pool)
 783		return -ENOMEM;
 784
 785	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
 786
 787	if (ret)
 788		return ret;
 789
 790	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
 791					   &header->handle);
 792	if (!header->cb_header) {
 793		ret = -ENOMEM;
 794		goto out_no_cb_header;
 795	}
 796
 797	header->size = header->node.size << PAGE_SHIFT;
 798	cb_hdr = header->cb_header;
 799	offset = header->node.start << PAGE_SHIFT;
 800	header->cmd = man->map + offset;
 801	memset(cb_hdr, 0, sizeof(*cb_hdr));
 802	if (man->using_mob) {
 803		cb_hdr->flags = SVGA_CB_FLAG_MOB;
 804		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
 805		cb_hdr->ptr.mob.mobOffset = offset;
 806	} else {
 807		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
 808	}
 809
 810	return 0;
 811
 812out_no_cb_header:
 813	spin_lock_bh(&man->lock);
 814	drm_mm_remove_node(&header->node);
 815	spin_unlock_bh(&man->lock);
 816
 817	return ret;
 818}
 819
 820/**
 821 * vmw_cmdbuf_space_inline - Set up a command buffer header with
 822 * inline command buffer space.
 823 *
 824 * @man: The command buffer manager.
 825 * @header: Pointer to the header to set up.
 826 * @size: The requested size of the buffer space.
 827 */
 828static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
 829				   struct vmw_cmdbuf_header *header,
 830				   int size)
 831{
 832	struct vmw_cmdbuf_dheader *dheader;
 833	SVGACBHeader *cb_hdr;
 834
 835	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
 836		return -ENOMEM;
 837
 838	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
 839				 &header->handle);
 840	if (!dheader)
 841		return -ENOMEM;
 842
 843	header->inline_space = true;
 844	header->size = VMW_CMDBUF_INLINE_SIZE;
 845	cb_hdr = &dheader->cb_header;
 846	header->cb_header = cb_hdr;
 847	header->cmd = dheader->cmd;
 848	memset(dheader, 0, sizeof(*dheader));
 849	cb_hdr->status = SVGA_CB_STATUS_NONE;
 850	cb_hdr->flags = SVGA_CB_FLAG_NONE;
 851	cb_hdr->ptr.pa = (u64)header->handle +
 852		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 853
 854	return 0;
 855}
 856
 857/**
 858 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
 859 * command buffer space.
 860 *
 861 * @man: The command buffer manager.
 862 * @size: The requested size of the buffer space.
 863 * @interruptible: Whether to sleep interruptible while waiting for space.
 864 * @p_header: points to a header pointer to populate on successful return.
 865 *
 866 * Returns a pointer to command buffer space if successful. Otherwise
 867 * returns an error pointer. The header pointer returned in @p_header should
 868 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
 869 */
 870void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 871		       size_t size, bool interruptible,
 872		       struct vmw_cmdbuf_header **p_header)
 873{
 874	struct vmw_cmdbuf_header *header;
 875	int ret = 0;
 876
 877	*p_header = NULL;
 878
 879	header = kzalloc(sizeof(*header), GFP_KERNEL);
 880	if (!header)
 881		return ERR_PTR(-ENOMEM);
 882
 883	if (size <= VMW_CMDBUF_INLINE_SIZE)
 884		ret = vmw_cmdbuf_space_inline(man, header, size);
 885	else
 886		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 887
 888	if (ret) {
 889		kfree(header);
 890		return ERR_PTR(ret);
 891	}
 892
 893	header->man = man;
 894	INIT_LIST_HEAD(&header->list);
 895	header->cb_header->status = SVGA_CB_STATUS_NONE;
 896	*p_header = header;
 897
 898	return header->cmd;
 899}
 900
 901/**
 902 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
 903 * command buffer.
 904 *
 905 * @man: The command buffer manager.
 906 * @size: The requested size of the commands.
 907 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 908 * @interruptible: Whether to sleep interruptible while waiting for space.
 909 *
 910 * Returns a pointer to command buffer space if successful. Otherwise
 911 * returns an error pointer.
 912 */
 913static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
 914				    size_t size,
 915				    int ctx_id,
 916				    bool interruptible)
 917{
 918	struct vmw_cmdbuf_header *cur;
 919	void *ret;
 920
 921	if (vmw_cmdbuf_cur_lock(man, interruptible))
 922		return ERR_PTR(-ERESTARTSYS);
 923
 924	cur = man->cur;
 925	if (cur && (size + man->cur_pos > cur->size ||
 926		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
 927		     ctx_id != cur->cb_header->dxContext)))
 928		__vmw_cmdbuf_cur_flush(man);
 929
 930	if (!man->cur) {
 931		ret = vmw_cmdbuf_alloc(man,
 932				       max_t(size_t, size, man->default_size),
 933				       interruptible, &man->cur);
 934		if (IS_ERR(ret)) {
 935			vmw_cmdbuf_cur_unlock(man);
 936			return ret;
 937		}
 938
 939		cur = man->cur;
 940	}
 941
 942	if (ctx_id != SVGA3D_INVALID_ID) {
 943		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
 944		cur->cb_header->dxContext = ctx_id;
 945	}
 946
 947	cur->reserved = size;
 948
 949	return (void *) (man->cur->cmd + man->cur_pos);
 950}
 951
 952/**
 953 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
 954 *
 955 * @man: The command buffer manager.
 956 * @size: The size of the commands actually written.
 957 * @flush: Whether to flush the command buffer immediately.
 958 */
 959static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
 960				  size_t size, bool flush)
 961{
 962	struct vmw_cmdbuf_header *cur = man->cur;
 963
 964	WARN_ON(!mutex_is_locked(&man->cur_mutex));
 965
 966	WARN_ON(size > cur->reserved);
 967	man->cur_pos += size;
 968	if (!size)
 969		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
 970	if (flush)
 971		__vmw_cmdbuf_cur_flush(man);
 972	vmw_cmdbuf_cur_unlock(man);
 973}
 974
 975/**
 976 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
 977 *
 978 * @man: The command buffer manager.
 979 * @size: The requested size of the commands.
 980 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 981 * @interruptible: Whether to sleep interruptible while waiting for space.
 982 * @header: Header of the command buffer. NULL if the current command buffer
 983 * should be used.
 984 *
 985 * Returns a pointer to command buffer space if successful. Otherwise
 986 * returns an error pointer.
 987 */
 988void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
 989			 int ctx_id, bool interruptible,
 990			 struct vmw_cmdbuf_header *header)
 991{
 992	if (!header)
 993		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
 994
 995	if (size > header->size)
 996		return ERR_PTR(-EINVAL);
 997
 998	if (ctx_id != SVGA3D_INVALID_ID) {
 999		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1000		header->cb_header->dxContext = ctx_id;
1001	}
1002
1003	header->reserved = size;
1004	return header->cmd;
1005}
1006
1007/**
1008 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1009 *
1010 * @man: The command buffer manager.
1011 * @size: The size of the commands actually written.
1012 * @header: Header of the command buffer. NULL if the current command buffer
1013 * should be used.
1014 * @flush: Whether to flush the command buffer immediately.
1015 */
1016void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1017		       struct vmw_cmdbuf_header *header, bool flush)
1018{
1019	if (!header) {
1020		vmw_cmdbuf_commit_cur(man, size, flush);
1021		return;
1022	}
1023
1024	(void) vmw_cmdbuf_cur_lock(man, false);
1025	__vmw_cmdbuf_cur_flush(man);
1026	WARN_ON(size > header->reserved);
1027	man->cur = header;
1028	man->cur_pos = size;
1029	if (!size)
1030		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1031	if (flush)
1032		__vmw_cmdbuf_cur_flush(man);
1033	vmw_cmdbuf_cur_unlock(man);
1034}
1035
1036/**
1037 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1038 *
1039 * @man: The command buffer manager.
1040 */
1041void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1042{
1043	if (!man)
1044		return;
1045
1046	tasklet_schedule(&man->tasklet);
1047}
1048
1049/**
1050 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1051 *
1052 * @man: The command buffer manager.
1053 * @command: Pointer to the command to send.
1054 * @size: Size of the command.
1055 *
1056 * Synchronously sends a device context command.
1057 */
1058static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1059					  const void *command,
1060					  size_t size)
1061{
1062	struct vmw_cmdbuf_header *header;
1063	int status;
1064	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1065
1066	if (IS_ERR(cmd))
1067		return PTR_ERR(cmd);
1068
1069	memcpy(cmd, command, size);
1070	header->cb_header->length = size;
1071	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1072	spin_lock_bh(&man->lock);
1073	status = vmw_cmdbuf_header_submit(header);
1074	spin_unlock_bh(&man->lock);
1075	vmw_cmdbuf_header_free(header);
1076
1077	if (status != SVGA_CB_STATUS_COMPLETED) {
1078		DRM_ERROR("Device context command failed with status %d\n",
1079			  status);
1080		return -EINVAL;
1081	}
1082
1083	return 0;
1084}
1085
1086/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1088 * context.
1089 *
1090 * @man: The command buffer manager.
1091 * @enable: Whether to enable or disable the context.
1092 *
1093 * Synchronously sends a device start / stop context command.
1094 */
1095static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1096				bool enable)
1097{
1098	struct {
1099		uint32 id;
1100		SVGADCCmdStartStop body;
1101	} __packed cmd;
1102
1103	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1104	cmd.body.enable = (enable) ? 1 : 0;
1105	cmd.body.context = SVGA_CB_CONTEXT_0;
1106
1107	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1108}
1109
1110/**
1111 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1112 *
1113 * @man: The command buffer manager.
1114 * @size: The size of the main space pool.
1115 * @default_size: The default size of the command buffer for small kernel
1116 * submissions.
1117 *
1118 * Set the size and allocate the main command buffer space pool,
1119 * as well as the default size of the command buffer for
1120 * small kernel submissions. If successful, this enables large command
1121 * submissions. Note that this function requires that rudimentary command
1122 * submission is already available and that the MOB memory manager is alive.
1123 * Returns 0 on success. Negative error code on failure.
1124 */
1125int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1126			     size_t size, size_t default_size)
1127{
1128	struct vmw_private *dev_priv = man->dev_priv;
1129	bool dummy;
1130	int ret;
1131
1132	if (man->has_pool)
1133		return -EINVAL;
1134
1135	/* First, try to allocate a huge chunk of DMA memory */
1136	size = PAGE_ALIGN(size);
1137	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1138				      &man->handle, GFP_KERNEL);
1139	if (man->map) {
1140		man->using_mob = false;
1141	} else {
1142		/*
1143		 * DMA memory failed. If we can have command buffers in a
1144		 * MOB, try to use that instead. Note that this will
1145		 * actually call into the already enabled manager, when
1146		 * binding the MOB.
1147		 */
1148		if (!(dev_priv->capabilities & SVGA_CAP_DX))
1149			return -ENOMEM;
1150
1151		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1152				    &vmw_mob_ne_placement, 0, false, NULL,
1153				    &man->cmd_space);
1154		if (ret)
1155			return ret;
1156
1157		man->using_mob = true;
1158		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1159				  &man->map_obj);
1160		if (ret)
1161			goto out_no_map;
1162
1163		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1164	}
1165
1166	man->size = size;
1167	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1168
1169	man->has_pool = true;
1170
1171	/*
1172	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1173	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1174	 * needs to wait for space and we block on further command
1175	 * submissions to be able to free up space.
1176	 */
1177	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1178	DRM_INFO("Using command buffers with %s pool.\n",
1179		 (man->using_mob) ? "MOB" : "DMA");
1180
1181	return 0;
1182
1183out_no_map:
1184	if (man->using_mob)
1185		ttm_bo_unref(&man->cmd_space);
1186
1187	return ret;
1188}
1189
1190/**
1191 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1192 * inline command buffer submissions only.
1193 *
1194 * @dev_priv: Pointer to device private structure.
1195 *
1196 * Returns a pointer to a cummand buffer manager to success or error pointer
1197 * on failure. The command buffer manager will be enabled for submissions of
1198 * size VMW_CMDBUF_INLINE_SIZE only.
1199 */
1200struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1201{
1202	struct vmw_cmdbuf_man *man;
1203	struct vmw_cmdbuf_context *ctx;
1204	int i;
1205	int ret;
1206
1207	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1208		return ERR_PTR(-ENOSYS);
1209
1210	man = kzalloc(sizeof(*man), GFP_KERNEL);
1211	if (!man)
1212		return ERR_PTR(-ENOMEM);
1213
 
 
1214	man->headers = dma_pool_create("vmwgfx cmdbuf",
1215				       &dev_priv->dev->pdev->dev,
1216				       sizeof(SVGACBHeader),
1217				       64, PAGE_SIZE);
1218	if (!man->headers) {
1219		ret = -ENOMEM;
1220		goto out_no_pool;
1221	}
1222
1223	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1224					&dev_priv->dev->pdev->dev,
1225					sizeof(struct vmw_cmdbuf_dheader),
1226					64, PAGE_SIZE);
1227	if (!man->dheaders) {
1228		ret = -ENOMEM;
1229		goto out_no_dpool;
1230	}
1231
1232	for_each_cmdbuf_ctx(man, i, ctx)
1233		vmw_cmdbuf_ctx_init(ctx);
1234
1235	INIT_LIST_HEAD(&man->error);
1236	spin_lock_init(&man->lock);
1237	mutex_init(&man->cur_mutex);
1238	mutex_init(&man->space_mutex);
1239	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1240		     (unsigned long) man);
1241	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1242	init_waitqueue_head(&man->alloc_queue);
1243	init_waitqueue_head(&man->idle_queue);
1244	man->dev_priv = dev_priv;
1245	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1246	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1247	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1248			       &dev_priv->error_waiters);
1249	ret = vmw_cmdbuf_startstop(man, true);
1250	if (ret) {
1251		DRM_ERROR("Failed starting command buffer context 0.\n");
1252		vmw_cmdbuf_man_destroy(man);
1253		return ERR_PTR(ret);
1254	}
1255
1256	return man;
1257
1258out_no_dpool:
1259	dma_pool_destroy(man->headers);
1260out_no_pool:
1261	kfree(man);
1262
1263	return ERR_PTR(ret);
1264}
1265
1266/**
1267 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1268 *
1269 * @man: Pointer to a command buffer manager.
1270 *
1271 * This function removes the main buffer space pool, and should be called
1272 * before MOB memory management is removed. When this function has been called,
1273 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1274 * less are allowed, and the default size of the command buffer for small kernel
1275 * submissions is also set to this size.
1276 */
1277void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1278{
1279	if (!man->has_pool)
1280		return;
1281
1282	man->has_pool = false;
1283	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1284	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1285	if (man->using_mob) {
1286		(void) ttm_bo_kunmap(&man->map_obj);
1287		ttm_bo_unref(&man->cmd_space);
1288	} else {
1289		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1290				  man->size, man->map, man->handle);
1291	}
1292}
1293
1294/**
1295 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1296 *
1297 * @man: Pointer to a command buffer manager.
1298 *
1299 * This function idles and then destroys a command buffer manager.
1300 */
1301void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1302{
1303	WARN_ON_ONCE(man->has_pool);
1304	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1305	if (vmw_cmdbuf_startstop(man, false))
1306		DRM_ERROR("Failed stopping command buffer context 0.\n");
 
1307
1308	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1309				  &man->dev_priv->error_waiters);
1310	tasklet_kill(&man->tasklet);
1311	(void) cancel_work_sync(&man->work);
1312	dma_pool_destroy(man->dheaders);
1313	dma_pool_destroy(man->headers);
1314	mutex_destroy(&man->cur_mutex);
1315	mutex_destroy(&man->space_mutex);
 
1316	kfree(man);
1317}
v4.17
   1/**************************************************************************
   2 *
   3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/ttm/ttm_bo_api.h>
  29
  30#include "vmwgfx_drv.h"
 
  31
  32/*
  33 * Size of inline command buffers. Try to make sure that a page size is a
  34 * multiple of the DMA pool allocation size.
  35 */
  36#define VMW_CMDBUF_INLINE_ALIGN 64
  37#define VMW_CMDBUF_INLINE_SIZE \
  38	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  39
  40/**
  41 * struct vmw_cmdbuf_context - Command buffer context queues
  42 *
  43 * @submitted: List of command buffers that have been submitted to the
  44 * manager but not yet submitted to hardware.
  45 * @hw_submitted: List of command buffers submitted to hardware.
  46 * @preempted: List of preempted command buffers.
  47 * @num_hw_submitted: Number of buffers currently being processed by hardware
  48 */
  49struct vmw_cmdbuf_context {
  50	struct list_head submitted;
  51	struct list_head hw_submitted;
  52	struct list_head preempted;
  53	unsigned num_hw_submitted;
  54	bool block_submission;
  55};
  56
  57/**
  58 * struct vmw_cmdbuf_man: - Command buffer manager
  59 *
  60 * @cur_mutex: Mutex protecting the command buffer used for incremental small
  61 * kernel command submissions, @cur.
  62 * @space_mutex: Mutex to protect against starvation when we allocate
  63 * main pool buffer space.
  64 * @error_mutex: Mutex to serialize the work queue error handling.
  65 * Note this is not needed if the same workqueue handler
  66 * can't race with itself...
  67 * @work: A struct work_struct implementeing command buffer error handling.
  68 * Immutable.
  69 * @dev_priv: Pointer to the device private struct. Immutable.
  70 * @ctx: Array of command buffer context queues. The queues and the context
  71 * data is protected by @lock.
  72 * @error: List of command buffers that have caused device errors.
  73 * Protected by @lock.
  74 * @mm: Range manager for the command buffer space. Manager allocations and
  75 * frees are protected by @lock.
  76 * @cmd_space: Buffer object for the command buffer space, unless we were
  77 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
  78 * @map_obj: Mapping state for @cmd_space. Immutable.
  79 * @map: Pointer to command buffer space. May be a mapped buffer object or
  80 * a contigous coherent DMA memory allocation. Immutable.
  81 * @cur: Command buffer for small kernel command submissions. Protected by
  82 * the @cur_mutex.
  83 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  84 * @default_size: Default size for the @cur command buffer. Immutable.
  85 * @max_hw_submitted: Max number of in-flight command buffers the device can
  86 * handle. Immutable.
  87 * @lock: Spinlock protecting command submission queues.
  88 * @header: Pool of DMA memory for device command buffer headers.
  89 * Internal protection.
  90 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  91 * space for inline data. Internal protection.
 
  92 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  93 * space.
  94 * @idle_queue: Wait queue for processes waiting for command buffer idle.
  95 * @irq_on: Whether the process function has requested irq to be turned on.
  96 * Protected by @lock.
  97 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
  98 * allocation. Immutable.
  99 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
 100 * Typically this is false only during bootstrap.
 101 * @handle: DMA address handle for the command buffer space if @using_mob is
 102 * false. Immutable.
 103 * @size: The size of the command buffer space. Immutable.
 104 * @num_contexts: Number of contexts actually enabled.
 105 */
 106struct vmw_cmdbuf_man {
 107	struct mutex cur_mutex;
 108	struct mutex space_mutex;
 109	struct mutex error_mutex;
 110	struct work_struct work;
 111	struct vmw_private *dev_priv;
 112	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 113	struct list_head error;
 114	struct drm_mm mm;
 115	struct ttm_buffer_object *cmd_space;
 116	struct ttm_bo_kmap_obj map_obj;
 117	u8 *map;
 118	struct vmw_cmdbuf_header *cur;
 119	size_t cur_pos;
 120	size_t default_size;
 121	unsigned max_hw_submitted;
 122	spinlock_t lock;
 123	struct dma_pool *headers;
 124	struct dma_pool *dheaders;
 
 125	wait_queue_head_t alloc_queue;
 126	wait_queue_head_t idle_queue;
 127	bool irq_on;
 128	bool using_mob;
 129	bool has_pool;
 130	dma_addr_t handle;
 131	size_t size;
 132	u32 num_contexts;
 133};
 134
 135/**
 136 * struct vmw_cmdbuf_header - Command buffer metadata
 137 *
 138 * @man: The command buffer manager.
 139 * @cb_header: Device command buffer header, allocated from a DMA pool.
 140 * @cb_context: The device command buffer context.
 141 * @list: List head for attaching to the manager lists.
 142 * @node: The range manager node.
 143 * @handle. The DMA address of @cb_header. Handed to the device on command
 144 * buffer submission.
 145 * @cmd: Pointer to the command buffer space of this buffer.
 146 * @size: Size of the command buffer space of this buffer.
 147 * @reserved: Reserved space of this buffer.
 148 * @inline_space: Whether inline command buffer space is used.
 149 */
 150struct vmw_cmdbuf_header {
 151	struct vmw_cmdbuf_man *man;
 152	SVGACBHeader *cb_header;
 153	SVGACBContext cb_context;
 154	struct list_head list;
 155	struct drm_mm_node node;
 156	dma_addr_t handle;
 157	u8 *cmd;
 158	size_t size;
 159	size_t reserved;
 160	bool inline_space;
 161};
 162
 163/**
 164 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
 165 * command buffer space.
 166 *
 167 * @cb_header: Device command buffer header.
 168 * @cmd: Inline command buffer space.
 169 */
 170struct vmw_cmdbuf_dheader {
 171	SVGACBHeader cb_header;
 172	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
 173};
 174
 175/**
 176 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
 177 *
 178 * @page_size: Size of requested command buffer space in pages.
 179 * @node: Pointer to the range manager node.
 180 * @done: True if this allocation has succeeded.
 181 */
 182struct vmw_cmdbuf_alloc_info {
 183	size_t page_size;
 184	struct drm_mm_node *node;
 185	bool done;
 186};
 187
 188/* Loop over each context in the command buffer manager. */
 189#define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
 190	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
 191	     ++(_i), ++(_ctx))
 192
 193static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
 194				bool enable);
 195static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
 196
 197/**
 198 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
 199 *
 200 * @man: The range manager.
 201 * @interruptible: Whether to wait interruptible when locking.
 202 */
 203static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
 204{
 205	if (interruptible) {
 206		if (mutex_lock_interruptible(&man->cur_mutex))
 207			return -ERESTARTSYS;
 208	} else {
 209		mutex_lock(&man->cur_mutex);
 210	}
 211
 212	return 0;
 213}
 214
 215/**
 216 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
 217 *
 218 * @man: The range manager.
 219 */
 220static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
 221{
 222	mutex_unlock(&man->cur_mutex);
 223}
 224
 225/**
 226 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
 227 * been used for the device context with inline command buffers.
 228 * Need not be called locked.
 229 *
 230 * @header: Pointer to the header to free.
 231 */
 232static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
 233{
 234	struct vmw_cmdbuf_dheader *dheader;
 235
 236	if (WARN_ON_ONCE(!header->inline_space))
 237		return;
 238
 239	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
 240			       cb_header);
 241	dma_pool_free(header->man->dheaders, dheader, header->handle);
 242	kfree(header);
 243}
 244
 245/**
 246 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 247 * associated structures.
 248 *
 249 * header: Pointer to the header to free.
 250 *
 251 * For internal use. Must be called with man::lock held.
 252 */
 253static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 254{
 255	struct vmw_cmdbuf_man *man = header->man;
 256
 257	lockdep_assert_held_once(&man->lock);
 258
 259	if (header->inline_space) {
 260		vmw_cmdbuf_header_inline_free(header);
 261		return;
 262	}
 263
 264	drm_mm_remove_node(&header->node);
 265	wake_up_all(&man->alloc_queue);
 266	if (header->cb_header)
 267		dma_pool_free(man->headers, header->cb_header,
 268			      header->handle);
 269	kfree(header);
 270}
 271
 272/**
 273 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 274 * associated structures.
 275 *
 276 * @header: Pointer to the header to free.
 277 */
 278void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 279{
 280	struct vmw_cmdbuf_man *man = header->man;
 281
 282	/* Avoid locking if inline_space */
 283	if (header->inline_space) {
 284		vmw_cmdbuf_header_inline_free(header);
 285		return;
 286	}
 287	spin_lock(&man->lock);
 288	__vmw_cmdbuf_header_free(header);
 289	spin_unlock(&man->lock);
 290}
 291
 292
 293/**
 294 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
 295 *
 296 * @header: The header of the buffer to submit.
 297 */
 298static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
 299{
 300	struct vmw_cmdbuf_man *man = header->man;
 301	u32 val;
 302
 303	val = upper_32_bits(header->handle);
 
 
 
 304	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 305
 306	val = lower_32_bits(header->handle);
 307	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
 308	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 309
 310	return header->cb_header->status;
 311}
 312
 313/**
 314 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
 315 *
 316 * @ctx: The command buffer context to initialize
 317 */
 318static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
 319{
 320	INIT_LIST_HEAD(&ctx->hw_submitted);
 321	INIT_LIST_HEAD(&ctx->submitted);
 322	INIT_LIST_HEAD(&ctx->preempted);
 323	ctx->num_hw_submitted = 0;
 324}
 325
 326/**
 327 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
 328 * context.
 329 *
 330 * @man: The command buffer manager.
 331 * @ctx: The command buffer context.
 332 *
 333 * Submits command buffers to hardware until there are no more command
 334 * buffers to submit or the hardware can't handle more command buffers.
 335 */
 336static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 337				  struct vmw_cmdbuf_context *ctx)
 338{
 339	while (ctx->num_hw_submitted < man->max_hw_submitted &&
 340	       !list_empty(&ctx->submitted) &&
 341	       !ctx->block_submission) {
 342		struct vmw_cmdbuf_header *entry;
 343		SVGACBStatus status;
 344
 345		entry = list_first_entry(&ctx->submitted,
 346					 struct vmw_cmdbuf_header,
 347					 list);
 348
 349		status = vmw_cmdbuf_header_submit(entry);
 350
 351		/* This should never happen */
 352		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
 353			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 354			break;
 355		}
 356
 357		list_del(&entry->list);
 358		list_add_tail(&entry->list, &ctx->hw_submitted);
 359		ctx->num_hw_submitted++;
 360	}
 361
 362}
 363
 364/**
 365 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
 366 *
 367 * @man: The command buffer manager.
 368 * @ctx: The command buffer context.
 369 *
 370 * Submit command buffers to hardware if possible, and process finished
 371 * buffers. Typically freeing them, but on preemption or error take
 372 * appropriate action. Wake up waiters if appropriate.
 373 */
 374static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 375				   struct vmw_cmdbuf_context *ctx,
 376				   int *notempty)
 377{
 378	struct vmw_cmdbuf_header *entry, *next;
 379
 380	vmw_cmdbuf_ctx_submit(man, ctx);
 381
 382	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
 383		SVGACBStatus status = entry->cb_header->status;
 384
 385		if (status == SVGA_CB_STATUS_NONE)
 386			break;
 387
 388		list_del(&entry->list);
 389		wake_up_all(&man->idle_queue);
 390		ctx->num_hw_submitted--;
 391		switch (status) {
 392		case SVGA_CB_STATUS_COMPLETED:
 393			__vmw_cmdbuf_header_free(entry);
 394			break;
 395		case SVGA_CB_STATUS_COMMAND_ERROR:
 396			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 397			list_add_tail(&entry->list, &man->error);
 398			schedule_work(&man->work);
 399			break;
 400		case SVGA_CB_STATUS_PREEMPTED:
 401			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 402			list_add_tail(&entry->list, &ctx->preempted);
 403			break;
 404		case SVGA_CB_STATUS_CB_HEADER_ERROR:
 405			WARN_ONCE(true, "Command buffer header error.\n");
 406			__vmw_cmdbuf_header_free(entry);
 407			break;
 408		default:
 409			WARN_ONCE(true, "Undefined command buffer status.\n");
 410			__vmw_cmdbuf_header_free(entry);
 411			break;
 412		}
 413	}
 414
 415	vmw_cmdbuf_ctx_submit(man, ctx);
 416	if (!list_empty(&ctx->submitted))
 417		(*notempty)++;
 418}
 419
 420/**
 421 * vmw_cmdbuf_man_process - Process all command buffer contexts and
 422 * switch on and off irqs as appropriate.
 423 *
 424 * @man: The command buffer manager.
 425 *
 426 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
 427 * command buffers left that are not submitted to hardware, Make sure
 428 * IRQ handling is turned on. Otherwise, make sure it's turned off.
 429 */
 430static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 431{
 432	int notempty;
 433	struct vmw_cmdbuf_context *ctx;
 434	int i;
 435
 436retry:
 437	notempty = 0;
 438	for_each_cmdbuf_ctx(man, i, ctx)
 439		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 440
 441	if (man->irq_on && !notempty) {
 442		vmw_generic_waiter_remove(man->dev_priv,
 443					  SVGA_IRQFLAG_COMMAND_BUFFER,
 444					  &man->dev_priv->cmdbuf_waiters);
 445		man->irq_on = false;
 446	} else if (!man->irq_on && notempty) {
 447		vmw_generic_waiter_add(man->dev_priv,
 448				       SVGA_IRQFLAG_COMMAND_BUFFER,
 449				       &man->dev_priv->cmdbuf_waiters);
 450		man->irq_on = true;
 451
 452		/* Rerun in case we just missed an irq. */
 453		goto retry;
 454	}
 455}
 456
 457/**
 458 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
 459 * command buffer context
 460 *
 461 * @man: The command buffer manager.
 462 * @header: The header of the buffer to submit.
 463 * @cb_context: The command buffer context to use.
 464 *
 465 * This function adds @header to the "submitted" queue of the command
 466 * buffer context identified by @cb_context. It then calls the command buffer
 467 * manager processing to potentially submit the buffer to hardware.
 468 * @man->lock needs to be held when calling this function.
 469 */
 470static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 471			       struct vmw_cmdbuf_header *header,
 472			       SVGACBContext cb_context)
 473{
 474	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
 475		header->cb_header->dxContext = 0;
 476	header->cb_context = cb_context;
 477	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 478
 479	vmw_cmdbuf_man_process(man);
 480}
 481
 482/**
 483 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
 484 * handler implemented as a threaded irq task.
 485 *
 486 * @man: Pointer to the command buffer manager.
 
 487 *
 488 * The bottom half of the interrupt handler simply calls into the
 489 * command buffer processor to free finished buffers and submit any
 490 * queued buffers to hardware.
 491 */
 492void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
 493{
 
 
 494	spin_lock(&man->lock);
 495	vmw_cmdbuf_man_process(man);
 496	spin_unlock(&man->lock);
 497}
 498
 499/**
 500 * vmw_cmdbuf_work_func - The deferred work function that handles
 501 * command buffer errors.
 502 *
 503 * @work: The work func closure argument.
 504 *
 505 * Restarting the command buffer context after an error requires process
 506 * context, so it is deferred to this work function.
 507 */
 508static void vmw_cmdbuf_work_func(struct work_struct *work)
 509{
 510	struct vmw_cmdbuf_man *man =
 511		container_of(work, struct vmw_cmdbuf_man, work);
 512	struct vmw_cmdbuf_header *entry, *next;
 513	uint32_t dummy;
 514	bool restart[SVGA_CB_CONTEXT_MAX];
 515	bool send_fence = false;
 516	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
 517	int i;
 518	struct vmw_cmdbuf_context *ctx;
 519	bool global_block = false;
 520
 521	for_each_cmdbuf_ctx(man, i, ctx) {
 522		INIT_LIST_HEAD(&restart_head[i]);
 523		restart[i] = false;
 524	}
 525
 526	mutex_lock(&man->error_mutex);
 527	spin_lock(&man->lock);
 528	list_for_each_entry_safe(entry, next, &man->error, list) {
 529		SVGACBHeader *cb_hdr = entry->cb_header;
 530		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
 531			(entry->cmd + cb_hdr->errorOffset);
 532		u32 error_cmd_size, new_start_offset;
 533		const char *cmd_name;
 534
 535		list_del_init(&entry->list);
 536		restart[entry->cb_context] = true;
 537		global_block = true;
 538
 539		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
 540			DRM_ERROR("Unknown command causing device error.\n");
 541			DRM_ERROR("Command buffer offset is %lu\n",
 542				  (unsigned long) cb_hdr->errorOffset);
 543			__vmw_cmdbuf_header_free(entry);
 544			send_fence = true;
 545			continue;
 546		}
 547
 548		DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
 549		DRM_ERROR("Command buffer offset is %lu\n",
 550			  (unsigned long) cb_hdr->errorOffset);
 551		DRM_ERROR("Command size is %lu\n",
 552			  (unsigned long) error_cmd_size);
 553
 554		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
 555
 556		if (new_start_offset >= cb_hdr->length) {
 557			__vmw_cmdbuf_header_free(entry);
 558			send_fence = true;
 559			continue;
 560		}
 561
 562		if (man->using_mob)
 563			cb_hdr->ptr.mob.mobOffset += new_start_offset;
 564		else
 565			cb_hdr->ptr.pa += (u64) new_start_offset;
 566
 567		entry->cmd += new_start_offset;
 568		cb_hdr->length -= new_start_offset;
 569		cb_hdr->errorOffset = 0;
 570		cb_hdr->offset = 0;
 571
 572		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
 573	}
 
 574
 575	for_each_cmdbuf_ctx(man, i, ctx)
 576		man->ctx[i].block_submission = true;
 577
 578	spin_unlock(&man->lock);
 579
 580	/* Preempt all contexts */
 581	if (global_block && vmw_cmdbuf_preempt(man, 0))
 582		DRM_ERROR("Failed preempting command buffer contexts\n");
 583
 584	spin_lock(&man->lock);
 585	for_each_cmdbuf_ctx(man, i, ctx) {
 586		/* Move preempted command buffers to the preempted queue. */
 587		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
 588
 589		/*
 590		 * Add the preempted queue after the command buffer
 591		 * that caused an error.
 592		 */
 593		list_splice_init(&ctx->preempted, restart_head[i].prev);
 594
 595		/*
 596		 * Finally add all command buffers first in the submitted
 597		 * queue, to rerun them.
 598		 */
 599
 600		ctx->block_submission = false;
 601		list_splice_init(&restart_head[i], &ctx->submitted);
 602	}
 603
 604	vmw_cmdbuf_man_process(man);
 605	spin_unlock(&man->lock);
 606
 607	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
 608		DRM_ERROR("Failed restarting command buffer contexts\n");
 609
 610	/* Send a new fence in case one was removed */
 611	if (send_fence) {
 612		vmw_fifo_send_fence(man->dev_priv, &dummy);
 613		wake_up_all(&man->idle_queue);
 614	}
 615
 616	mutex_unlock(&man->error_mutex);
 617}
 618
 619/**
 620 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
 621 *
 622 * @man: The command buffer manager.
 623 * @check_preempted: Check also the preempted queue for pending command buffers.
 624 *
 625 */
 626static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 627				bool check_preempted)
 628{
 629	struct vmw_cmdbuf_context *ctx;
 630	bool idle = false;
 631	int i;
 632
 633	spin_lock(&man->lock);
 634	vmw_cmdbuf_man_process(man);
 635	for_each_cmdbuf_ctx(man, i, ctx) {
 636		if (!list_empty(&ctx->submitted) ||
 637		    !list_empty(&ctx->hw_submitted) ||
 638		    (check_preempted && !list_empty(&ctx->preempted)))
 639			goto out_unlock;
 640	}
 641
 642	idle = list_empty(&man->error);
 643
 644out_unlock:
 645	spin_unlock(&man->lock);
 646
 647	return idle;
 648}
 649
 650/**
 651 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 652 * command submissions
 653 *
 654 * @man: The command buffer manager.
 655 *
 656 * Flushes the current command buffer without allocating a new one. A new one
 657 * is automatically allocated when needed. Call with @man->cur_mutex held.
 658 */
 659static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 660{
 661	struct vmw_cmdbuf_header *cur = man->cur;
 662
 663	WARN_ON(!mutex_is_locked(&man->cur_mutex));
 664
 665	if (!cur)
 666		return;
 667
 668	spin_lock(&man->lock);
 669	if (man->cur_pos == 0) {
 670		__vmw_cmdbuf_header_free(cur);
 671		goto out_unlock;
 672	}
 673
 674	man->cur->cb_header->length = man->cur_pos;
 675	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 676out_unlock:
 677	spin_unlock(&man->lock);
 678	man->cur = NULL;
 679	man->cur_pos = 0;
 680}
 681
 682/**
 683 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 684 * command submissions
 685 *
 686 * @man: The command buffer manager.
 687 * @interruptible: Whether to sleep interruptible when sleeping.
 688 *
 689 * Flushes the current command buffer without allocating a new one. A new one
 690 * is automatically allocated when needed.
 691 */
 692int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 693			 bool interruptible)
 694{
 695	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 696
 697	if (ret)
 698		return ret;
 699
 700	__vmw_cmdbuf_cur_flush(man);
 701	vmw_cmdbuf_cur_unlock(man);
 702
 703	return 0;
 704}
 705
 706/**
 707 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
 708 *
 709 * @man: The command buffer manager.
 710 * @interruptible: Sleep interruptible while waiting.
 711 * @timeout: Time out after this many ticks.
 712 *
 713 * Wait until the command buffer manager has processed all command buffers,
 714 * or until a timeout occurs. If a timeout occurs, the function will return
 715 * -EBUSY.
 716 */
 717int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 718		    unsigned long timeout)
 719{
 720	int ret;
 721
 722	ret = vmw_cmdbuf_cur_flush(man, interruptible);
 723	vmw_generic_waiter_add(man->dev_priv,
 724			       SVGA_IRQFLAG_COMMAND_BUFFER,
 725			       &man->dev_priv->cmdbuf_waiters);
 726
 727	if (interruptible) {
 728		ret = wait_event_interruptible_timeout
 729			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 730			 timeout);
 731	} else {
 732		ret = wait_event_timeout
 733			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 734			 timeout);
 735	}
 736	vmw_generic_waiter_remove(man->dev_priv,
 737				  SVGA_IRQFLAG_COMMAND_BUFFER,
 738				  &man->dev_priv->cmdbuf_waiters);
 739	if (ret == 0) {
 740		if (!vmw_cmdbuf_man_idle(man, true))
 741			ret = -EBUSY;
 742		else
 743			ret = 0;
 744	}
 745	if (ret > 0)
 746		ret = 0;
 747
 748	return ret;
 749}
 750
 751/**
 752 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
 753 *
 754 * @man: The command buffer manager.
 755 * @info: Allocation info. Will hold the size on entry and allocated mm node
 756 * on successful return.
 757 *
 758 * Try to allocate buffer space from the main pool. Returns true if succeeded.
 759 * If a fatal error was hit, the error code is returned in @info->ret.
 760 */
 761static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 762				 struct vmw_cmdbuf_alloc_info *info)
 763{
 764	int ret;
 765
 766	if (info->done)
 767		return true;
 768 
 769	memset(info->node, 0, sizeof(*info->node));
 770	spin_lock(&man->lock);
 771	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 
 
 
 772	if (ret) {
 773		vmw_cmdbuf_man_process(man);
 774		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 
 
 
 775	}
 776
 777	spin_unlock(&man->lock);
 778	info->done = !ret;
 779
 780	return info->done;
 781}
 782
 783/**
 784 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
 785 *
 786 * @man: The command buffer manager.
 787 * @node: Pointer to pre-allocated range-manager node.
 788 * @size: The size of the allocation.
 789 * @interruptible: Whether to sleep interruptible while waiting for space.
 790 *
 791 * This function allocates buffer space from the main pool, and if there is
 792 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
 793 * become available.
 794 */
 795static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 796				  struct drm_mm_node *node,
 797				  size_t size,
 798				  bool interruptible)
 799{
 800	struct vmw_cmdbuf_alloc_info info;
 801
 802	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
 803	info.node = node;
 804	info.done = false;
 805
 806	/*
 807	 * To prevent starvation of large requests, only one allocating call
 808	 * at a time waiting for space.
 809	 */
 810	if (interruptible) {
 811		if (mutex_lock_interruptible(&man->space_mutex))
 812			return -ERESTARTSYS;
 813	} else {
 814		mutex_lock(&man->space_mutex);
 815	}
 816
 817	/* Try to allocate space without waiting. */
 818	if (vmw_cmdbuf_try_alloc(man, &info))
 819		goto out_unlock;
 820
 821	vmw_generic_waiter_add(man->dev_priv,
 822			       SVGA_IRQFLAG_COMMAND_BUFFER,
 823			       &man->dev_priv->cmdbuf_waiters);
 824
 825	if (interruptible) {
 826		int ret;
 827
 828		ret = wait_event_interruptible
 829			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 830		if (ret) {
 831			vmw_generic_waiter_remove
 832				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
 833				 &man->dev_priv->cmdbuf_waiters);
 834			mutex_unlock(&man->space_mutex);
 835			return ret;
 836		}
 837	} else {
 838		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 839	}
 840	vmw_generic_waiter_remove(man->dev_priv,
 841				  SVGA_IRQFLAG_COMMAND_BUFFER,
 842				  &man->dev_priv->cmdbuf_waiters);
 843
 844out_unlock:
 845	mutex_unlock(&man->space_mutex);
 846
 847	return 0;
 848}
 849
 850/**
 851 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
 852 * space from the main pool.
 853 *
 854 * @man: The command buffer manager.
 855 * @header: Pointer to the header to set up.
 856 * @size: The requested size of the buffer space.
 857 * @interruptible: Whether to sleep interruptible while waiting for space.
 858 */
 859static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 860				 struct vmw_cmdbuf_header *header,
 861				 size_t size,
 862				 bool interruptible)
 863{
 864	SVGACBHeader *cb_hdr;
 865	size_t offset;
 866	int ret;
 867
 868	if (!man->has_pool)
 869		return -ENOMEM;
 870
 871	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
 872
 873	if (ret)
 874		return ret;
 875
 876	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
 877					    &header->handle);
 878	if (!header->cb_header) {
 879		ret = -ENOMEM;
 880		goto out_no_cb_header;
 881	}
 882
 883	header->size = header->node.size << PAGE_SHIFT;
 884	cb_hdr = header->cb_header;
 885	offset = header->node.start << PAGE_SHIFT;
 886	header->cmd = man->map + offset;
 
 887	if (man->using_mob) {
 888		cb_hdr->flags = SVGA_CB_FLAG_MOB;
 889		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
 890		cb_hdr->ptr.mob.mobOffset = offset;
 891	} else {
 892		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
 893	}
 894
 895	return 0;
 896
 897out_no_cb_header:
 898	spin_lock(&man->lock);
 899	drm_mm_remove_node(&header->node);
 900	spin_unlock(&man->lock);
 901
 902	return ret;
 903}
 904
 905/**
 906 * vmw_cmdbuf_space_inline - Set up a command buffer header with
 907 * inline command buffer space.
 908 *
 909 * @man: The command buffer manager.
 910 * @header: Pointer to the header to set up.
 911 * @size: The requested size of the buffer space.
 912 */
 913static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
 914				   struct vmw_cmdbuf_header *header,
 915				   int size)
 916{
 917	struct vmw_cmdbuf_dheader *dheader;
 918	SVGACBHeader *cb_hdr;
 919
 920	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
 921		return -ENOMEM;
 922
 923	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
 924				  &header->handle);
 925	if (!dheader)
 926		return -ENOMEM;
 927
 928	header->inline_space = true;
 929	header->size = VMW_CMDBUF_INLINE_SIZE;
 930	cb_hdr = &dheader->cb_header;
 931	header->cb_header = cb_hdr;
 932	header->cmd = dheader->cmd;
 
 933	cb_hdr->status = SVGA_CB_STATUS_NONE;
 934	cb_hdr->flags = SVGA_CB_FLAG_NONE;
 935	cb_hdr->ptr.pa = (u64)header->handle +
 936		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 937
 938	return 0;
 939}
 940
 941/**
 942 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
 943 * command buffer space.
 944 *
 945 * @man: The command buffer manager.
 946 * @size: The requested size of the buffer space.
 947 * @interruptible: Whether to sleep interruptible while waiting for space.
 948 * @p_header: points to a header pointer to populate on successful return.
 949 *
 950 * Returns a pointer to command buffer space if successful. Otherwise
 951 * returns an error pointer. The header pointer returned in @p_header should
 952 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
 953 */
 954void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 955		       size_t size, bool interruptible,
 956		       struct vmw_cmdbuf_header **p_header)
 957{
 958	struct vmw_cmdbuf_header *header;
 959	int ret = 0;
 960
 961	*p_header = NULL;
 962
 963	header = kzalloc(sizeof(*header), GFP_KERNEL);
 964	if (!header)
 965		return ERR_PTR(-ENOMEM);
 966
 967	if (size <= VMW_CMDBUF_INLINE_SIZE)
 968		ret = vmw_cmdbuf_space_inline(man, header, size);
 969	else
 970		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 971
 972	if (ret) {
 973		kfree(header);
 974		return ERR_PTR(ret);
 975	}
 976
 977	header->man = man;
 978	INIT_LIST_HEAD(&header->list);
 979	header->cb_header->status = SVGA_CB_STATUS_NONE;
 980	*p_header = header;
 981
 982	return header->cmd;
 983}
 984
 985/**
 986 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
 987 * command buffer.
 988 *
 989 * @man: The command buffer manager.
 990 * @size: The requested size of the commands.
 991 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 992 * @interruptible: Whether to sleep interruptible while waiting for space.
 993 *
 994 * Returns a pointer to command buffer space if successful. Otherwise
 995 * returns an error pointer.
 996 */
 997static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
 998				    size_t size,
 999				    int ctx_id,
1000				    bool interruptible)
1001{
1002	struct vmw_cmdbuf_header *cur;
1003	void *ret;
1004
1005	if (vmw_cmdbuf_cur_lock(man, interruptible))
1006		return ERR_PTR(-ERESTARTSYS);
1007
1008	cur = man->cur;
1009	if (cur && (size + man->cur_pos > cur->size ||
1010		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1011		     ctx_id != cur->cb_header->dxContext)))
1012		__vmw_cmdbuf_cur_flush(man);
1013
1014	if (!man->cur) {
1015		ret = vmw_cmdbuf_alloc(man,
1016				       max_t(size_t, size, man->default_size),
1017				       interruptible, &man->cur);
1018		if (IS_ERR(ret)) {
1019			vmw_cmdbuf_cur_unlock(man);
1020			return ret;
1021		}
1022
1023		cur = man->cur;
1024	}
1025
1026	if (ctx_id != SVGA3D_INVALID_ID) {
1027		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1028		cur->cb_header->dxContext = ctx_id;
1029	}
1030
1031	cur->reserved = size;
1032
1033	return (void *) (man->cur->cmd + man->cur_pos);
1034}
1035
1036/**
1037 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1038 *
1039 * @man: The command buffer manager.
1040 * @size: The size of the commands actually written.
1041 * @flush: Whether to flush the command buffer immediately.
1042 */
1043static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1044				  size_t size, bool flush)
1045{
1046	struct vmw_cmdbuf_header *cur = man->cur;
1047
1048	WARN_ON(!mutex_is_locked(&man->cur_mutex));
1049
1050	WARN_ON(size > cur->reserved);
1051	man->cur_pos += size;
1052	if (!size)
1053		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1054	if (flush)
1055		__vmw_cmdbuf_cur_flush(man);
1056	vmw_cmdbuf_cur_unlock(man);
1057}
1058
1059/**
1060 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1061 *
1062 * @man: The command buffer manager.
1063 * @size: The requested size of the commands.
1064 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1065 * @interruptible: Whether to sleep interruptible while waiting for space.
1066 * @header: Header of the command buffer. NULL if the current command buffer
1067 * should be used.
1068 *
1069 * Returns a pointer to command buffer space if successful. Otherwise
1070 * returns an error pointer.
1071 */
1072void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1073			 int ctx_id, bool interruptible,
1074			 struct vmw_cmdbuf_header *header)
1075{
1076	if (!header)
1077		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1078
1079	if (size > header->size)
1080		return ERR_PTR(-EINVAL);
1081
1082	if (ctx_id != SVGA3D_INVALID_ID) {
1083		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1084		header->cb_header->dxContext = ctx_id;
1085	}
1086
1087	header->reserved = size;
1088	return header->cmd;
1089}
1090
1091/**
1092 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1093 *
1094 * @man: The command buffer manager.
1095 * @size: The size of the commands actually written.
1096 * @header: Header of the command buffer. NULL if the current command buffer
1097 * should be used.
1098 * @flush: Whether to flush the command buffer immediately.
1099 */
1100void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1101		       struct vmw_cmdbuf_header *header, bool flush)
1102{
1103	if (!header) {
1104		vmw_cmdbuf_commit_cur(man, size, flush);
1105		return;
1106	}
1107
1108	(void) vmw_cmdbuf_cur_lock(man, false);
1109	__vmw_cmdbuf_cur_flush(man);
1110	WARN_ON(size > header->reserved);
1111	man->cur = header;
1112	man->cur_pos = size;
1113	if (!size)
1114		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1115	if (flush)
1116		__vmw_cmdbuf_cur_flush(man);
1117	vmw_cmdbuf_cur_unlock(man);
1118}
1119
 
 
 
 
 
 
 
 
 
 
 
 
1120
1121/**
1122 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1123 *
1124 * @man: The command buffer manager.
1125 * @command: Pointer to the command to send.
1126 * @size: Size of the command.
1127 *
1128 * Synchronously sends a device context command.
1129 */
1130static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1131					  const void *command,
1132					  size_t size)
1133{
1134	struct vmw_cmdbuf_header *header;
1135	int status;
1136	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1137
1138	if (IS_ERR(cmd))
1139		return PTR_ERR(cmd);
1140
1141	memcpy(cmd, command, size);
1142	header->cb_header->length = size;
1143	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1144	spin_lock(&man->lock);
1145	status = vmw_cmdbuf_header_submit(header);
1146	spin_unlock(&man->lock);
1147	vmw_cmdbuf_header_free(header);
1148
1149	if (status != SVGA_CB_STATUS_COMPLETED) {
1150		DRM_ERROR("Device context command failed with status %d\n",
1151			  status);
1152		return -EINVAL;
1153	}
1154
1155	return 0;
1156}
1157
1158/**
1159 * vmw_cmdbuf_preempt - Send a preempt command through the device
1160 * context.
1161 *
1162 * @man: The command buffer manager.
1163 *
1164 * Synchronously sends a preempt command.
1165 */
1166static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1167{
1168	struct {
1169		uint32 id;
1170		SVGADCCmdPreempt body;
1171	} __packed cmd;
1172
1173	cmd.id = SVGA_DC_CMD_PREEMPT;
1174	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1175	cmd.body.ignoreIDZero = 0;
1176
1177	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1178}
1179
1180
1181/**
1182 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1183 * context.
1184 *
1185 * @man: The command buffer manager.
1186 * @enable: Whether to enable or disable the context.
1187 *
1188 * Synchronously sends a device start / stop context command.
1189 */
1190static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1191				bool enable)
1192{
1193	struct {
1194		uint32 id;
1195		SVGADCCmdStartStop body;
1196	} __packed cmd;
1197
1198	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1199	cmd.body.enable = (enable) ? 1 : 0;
1200	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1201
1202	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1203}
1204
1205/**
1206 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1207 *
1208 * @man: The command buffer manager.
1209 * @size: The size of the main space pool.
1210 * @default_size: The default size of the command buffer for small kernel
1211 * submissions.
1212 *
1213 * Set the size and allocate the main command buffer space pool,
1214 * as well as the default size of the command buffer for
1215 * small kernel submissions. If successful, this enables large command
1216 * submissions. Note that this function requires that rudimentary command
1217 * submission is already available and that the MOB memory manager is alive.
1218 * Returns 0 on success. Negative error code on failure.
1219 */
1220int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1221			     size_t size, size_t default_size)
1222{
1223	struct vmw_private *dev_priv = man->dev_priv;
1224	bool dummy;
1225	int ret;
1226
1227	if (man->has_pool)
1228		return -EINVAL;
1229
1230	/* First, try to allocate a huge chunk of DMA memory */
1231	size = PAGE_ALIGN(size);
1232	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1233				      &man->handle, GFP_KERNEL);
1234	if (man->map) {
1235		man->using_mob = false;
1236	} else {
1237		/*
1238		 * DMA memory failed. If we can have command buffers in a
1239		 * MOB, try to use that instead. Note that this will
1240		 * actually call into the already enabled manager, when
1241		 * binding the MOB.
1242		 */
1243		if (!(dev_priv->capabilities & SVGA_CAP_DX))
1244			return -ENOMEM;
1245
1246		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1247				    &vmw_mob_ne_placement, 0, false,
1248				    &man->cmd_space);
1249		if (ret)
1250			return ret;
1251
1252		man->using_mob = true;
1253		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1254				  &man->map_obj);
1255		if (ret)
1256			goto out_no_map;
1257
1258		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1259	}
1260
1261	man->size = size;
1262	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1263
1264	man->has_pool = true;
1265
1266	/*
1267	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1268	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1269	 * needs to wait for space and we block on further command
1270	 * submissions to be able to free up space.
1271	 */
1272	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1273	DRM_INFO("Using command buffers with %s pool.\n",
1274		 (man->using_mob) ? "MOB" : "DMA");
1275
1276	return 0;
1277
1278out_no_map:
1279	if (man->using_mob)
1280		ttm_bo_unref(&man->cmd_space);
1281
1282	return ret;
1283}
1284
1285/**
1286 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1287 * inline command buffer submissions only.
1288 *
1289 * @dev_priv: Pointer to device private structure.
1290 *
1291 * Returns a pointer to a cummand buffer manager to success or error pointer
1292 * on failure. The command buffer manager will be enabled for submissions of
1293 * size VMW_CMDBUF_INLINE_SIZE only.
1294 */
1295struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1296{
1297	struct vmw_cmdbuf_man *man;
1298	struct vmw_cmdbuf_context *ctx;
1299	unsigned int i;
1300	int ret;
1301
1302	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1303		return ERR_PTR(-ENOSYS);
1304
1305	man = kzalloc(sizeof(*man), GFP_KERNEL);
1306	if (!man)
1307		return ERR_PTR(-ENOMEM);
1308
1309	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1310		2 : 1;
1311	man->headers = dma_pool_create("vmwgfx cmdbuf",
1312				       &dev_priv->dev->pdev->dev,
1313				       sizeof(SVGACBHeader),
1314				       64, PAGE_SIZE);
1315	if (!man->headers) {
1316		ret = -ENOMEM;
1317		goto out_no_pool;
1318	}
1319
1320	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1321					&dev_priv->dev->pdev->dev,
1322					sizeof(struct vmw_cmdbuf_dheader),
1323					64, PAGE_SIZE);
1324	if (!man->dheaders) {
1325		ret = -ENOMEM;
1326		goto out_no_dpool;
1327	}
1328
1329	for_each_cmdbuf_ctx(man, i, ctx)
1330		vmw_cmdbuf_ctx_init(ctx);
1331
1332	INIT_LIST_HEAD(&man->error);
1333	spin_lock_init(&man->lock);
1334	mutex_init(&man->cur_mutex);
1335	mutex_init(&man->space_mutex);
1336	mutex_init(&man->error_mutex);
 
1337	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1338	init_waitqueue_head(&man->alloc_queue);
1339	init_waitqueue_head(&man->idle_queue);
1340	man->dev_priv = dev_priv;
1341	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1342	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1343	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1344			       &dev_priv->error_waiters);
1345	ret = vmw_cmdbuf_startstop(man, 0, true);
1346	if (ret) {
1347		DRM_ERROR("Failed starting command buffer contexts\n");
1348		vmw_cmdbuf_man_destroy(man);
1349		return ERR_PTR(ret);
1350	}
1351
1352	return man;
1353
1354out_no_dpool:
1355	dma_pool_destroy(man->headers);
1356out_no_pool:
1357	kfree(man);
1358
1359	return ERR_PTR(ret);
1360}
1361
1362/**
1363 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1364 *
1365 * @man: Pointer to a command buffer manager.
1366 *
1367 * This function removes the main buffer space pool, and should be called
1368 * before MOB memory management is removed. When this function has been called,
1369 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1370 * less are allowed, and the default size of the command buffer for small kernel
1371 * submissions is also set to this size.
1372 */
1373void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1374{
1375	if (!man->has_pool)
1376		return;
1377
1378	man->has_pool = false;
1379	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1380	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1381	if (man->using_mob) {
1382		(void) ttm_bo_kunmap(&man->map_obj);
1383		ttm_bo_unref(&man->cmd_space);
1384	} else {
1385		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1386				  man->size, man->map, man->handle);
1387	}
1388}
1389
1390/**
1391 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1392 *
1393 * @man: Pointer to a command buffer manager.
1394 *
1395 * This function idles and then destroys a command buffer manager.
1396 */
1397void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1398{
1399	WARN_ON_ONCE(man->has_pool);
1400	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1401
1402	if (vmw_cmdbuf_startstop(man, 0, false))
1403		DRM_ERROR("Failed stopping command buffer contexts.\n");
1404
1405	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1406				  &man->dev_priv->error_waiters);
 
1407	(void) cancel_work_sync(&man->work);
1408	dma_pool_destroy(man->dheaders);
1409	dma_pool_destroy(man->headers);
1410	mutex_destroy(&man->cur_mutex);
1411	mutex_destroy(&man->space_mutex);
1412	mutex_destroy(&man->error_mutex);
1413	kfree(man);
1414}