Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include "vmwgfx_bo.h"
  29#include "vmwgfx_drv.h"
  30
  31#include <drm/ttm/ttm_bo.h>
  32
  33#include <linux/dmapool.h>
  34#include <linux/pci.h>
  35
 
 
 
 
  36/*
  37 * Size of inline command buffers. Try to make sure that a page size is a
  38 * multiple of the DMA pool allocation size.
  39 */
  40#define VMW_CMDBUF_INLINE_ALIGN 64
  41#define VMW_CMDBUF_INLINE_SIZE \
  42	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  43
  44/**
  45 * struct vmw_cmdbuf_context - Command buffer context queues
  46 *
  47 * @submitted: List of command buffers that have been submitted to the
  48 * manager but not yet submitted to hardware.
  49 * @hw_submitted: List of command buffers submitted to hardware.
  50 * @preempted: List of preempted command buffers.
  51 * @num_hw_submitted: Number of buffers currently being processed by hardware
  52 * @block_submission: Identifies a block command submission.
  53 */
  54struct vmw_cmdbuf_context {
  55	struct list_head submitted;
  56	struct list_head hw_submitted;
  57	struct list_head preempted;
  58	unsigned num_hw_submitted;
  59	bool block_submission;
  60};
  61
  62/**
  63 * struct vmw_cmdbuf_man - Command buffer manager
  64 *
  65 * @cur_mutex: Mutex protecting the command buffer used for incremental small
  66 * kernel command submissions, @cur.
  67 * @space_mutex: Mutex to protect against starvation when we allocate
  68 * main pool buffer space.
  69 * @error_mutex: Mutex to serialize the work queue error handling.
  70 * Note this is not needed if the same workqueue handler
  71 * can't race with itself...
  72 * @work: A struct work_struct implementeing command buffer error handling.
  73 * Immutable.
  74 * @dev_priv: Pointer to the device private struct. Immutable.
  75 * @ctx: Array of command buffer context queues. The queues and the context
  76 * data is protected by @lock.
  77 * @error: List of command buffers that have caused device errors.
  78 * Protected by @lock.
  79 * @mm: Range manager for the command buffer space. Manager allocations and
  80 * frees are protected by @lock.
  81 * @cmd_space: Buffer object for the command buffer space, unless we were
  82 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
 
  83 * @map: Pointer to command buffer space. May be a mapped buffer object or
  84 * a contigous coherent DMA memory allocation. Immutable.
  85 * @cur: Command buffer for small kernel command submissions. Protected by
  86 * the @cur_mutex.
  87 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  88 * @default_size: Default size for the @cur command buffer. Immutable.
  89 * @max_hw_submitted: Max number of in-flight command buffers the device can
  90 * handle. Immutable.
  91 * @lock: Spinlock protecting command submission queues.
  92 * @headers: Pool of DMA memory for device command buffer headers.
  93 * Internal protection.
  94 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  95 * space for inline data. Internal protection.
  96 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  97 * space.
  98 * @idle_queue: Wait queue for processes waiting for command buffer idle.
  99 * @irq_on: Whether the process function has requested irq to be turned on.
 100 * Protected by @lock.
 101 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
 102 * allocation. Immutable.
 103 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
 104 * Typically this is false only during bootstrap.
 105 * @handle: DMA address handle for the command buffer space if @using_mob is
 106 * false. Immutable.
 107 * @size: The size of the command buffer space. Immutable.
 108 * @num_contexts: Number of contexts actually enabled.
 109 */
 110struct vmw_cmdbuf_man {
 111	struct mutex cur_mutex;
 112	struct mutex space_mutex;
 113	struct mutex error_mutex;
 114	struct work_struct work;
 115	struct vmw_private *dev_priv;
 116	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 117	struct list_head error;
 118	struct drm_mm mm;
 119	struct vmw_bo *cmd_space;
 
 120	u8 *map;
 121	struct vmw_cmdbuf_header *cur;
 122	size_t cur_pos;
 123	size_t default_size;
 124	unsigned max_hw_submitted;
 125	spinlock_t lock;
 126	struct dma_pool *headers;
 127	struct dma_pool *dheaders;
 128	wait_queue_head_t alloc_queue;
 129	wait_queue_head_t idle_queue;
 130	bool irq_on;
 131	bool using_mob;
 132	bool has_pool;
 133	dma_addr_t handle;
 134	size_t size;
 135	u32 num_contexts;
 136};
 137
 138/**
 139 * struct vmw_cmdbuf_header - Command buffer metadata
 140 *
 141 * @man: The command buffer manager.
 142 * @cb_header: Device command buffer header, allocated from a DMA pool.
 143 * @cb_context: The device command buffer context.
 144 * @list: List head for attaching to the manager lists.
 145 * @node: The range manager node.
 146 * @handle: The DMA address of @cb_header. Handed to the device on command
 147 * buffer submission.
 148 * @cmd: Pointer to the command buffer space of this buffer.
 149 * @size: Size of the command buffer space of this buffer.
 150 * @reserved: Reserved space of this buffer.
 151 * @inline_space: Whether inline command buffer space is used.
 152 */
 153struct vmw_cmdbuf_header {
 154	struct vmw_cmdbuf_man *man;
 155	SVGACBHeader *cb_header;
 156	SVGACBContext cb_context;
 157	struct list_head list;
 158	struct drm_mm_node node;
 159	dma_addr_t handle;
 160	u8 *cmd;
 161	size_t size;
 162	size_t reserved;
 163	bool inline_space;
 164};
 165
 166/**
 167 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
 168 * command buffer space.
 169 *
 170 * @cb_header: Device command buffer header.
 171 * @cmd: Inline command buffer space.
 172 */
 173struct vmw_cmdbuf_dheader {
 174	SVGACBHeader cb_header;
 175	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
 176};
 177
 178/**
 179 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
 180 *
 181 * @page_size: Size of requested command buffer space in pages.
 182 * @node: Pointer to the range manager node.
 183 * @done: True if this allocation has succeeded.
 184 */
 185struct vmw_cmdbuf_alloc_info {
 186	size_t page_size;
 187	struct drm_mm_node *node;
 188	bool done;
 189};
 190
 191/* Loop over each context in the command buffer manager. */
 192#define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
 193	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
 194	     ++(_i), ++(_ctx))
 195
 196static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
 197				bool enable);
 198static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
 199
 200/**
 201 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
 202 *
 203 * @man: The range manager.
 204 * @interruptible: Whether to wait interruptible when locking.
 205 */
 206static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
 207{
 208	if (interruptible) {
 209		if (mutex_lock_interruptible(&man->cur_mutex))
 210			return -ERESTARTSYS;
 211	} else {
 212		mutex_lock(&man->cur_mutex);
 213	}
 214
 215	return 0;
 216}
 217
 218/**
 219 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
 220 *
 221 * @man: The range manager.
 222 */
 223static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
 224{
 225	mutex_unlock(&man->cur_mutex);
 226}
 227
 228/**
 229 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
 230 * been used for the device context with inline command buffers.
 231 * Need not be called locked.
 232 *
 233 * @header: Pointer to the header to free.
 234 */
 235static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
 236{
 237	struct vmw_cmdbuf_dheader *dheader;
 238
 239	if (WARN_ON_ONCE(!header->inline_space))
 240		return;
 241
 242	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
 243			       cb_header);
 244	dma_pool_free(header->man->dheaders, dheader, header->handle);
 245	kfree(header);
 246}
 247
 248/**
 249 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 250 * associated structures.
 251 *
 252 * @header: Pointer to the header to free.
 253 *
 254 * For internal use. Must be called with man::lock held.
 255 */
 256static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 257{
 258	struct vmw_cmdbuf_man *man = header->man;
 259
 260	lockdep_assert_held_once(&man->lock);
 261
 262	if (header->inline_space) {
 263		vmw_cmdbuf_header_inline_free(header);
 264		return;
 265	}
 266
 267	drm_mm_remove_node(&header->node);
 268	wake_up_all(&man->alloc_queue);
 269	if (header->cb_header)
 270		dma_pool_free(man->headers, header->cb_header,
 271			      header->handle);
 272	kfree(header);
 273}
 274
 275/**
 276 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 277 * associated structures.
 278 *
 279 * @header: Pointer to the header to free.
 280 */
 281void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 282{
 283	struct vmw_cmdbuf_man *man = header->man;
 284
 285	/* Avoid locking if inline_space */
 286	if (header->inline_space) {
 287		vmw_cmdbuf_header_inline_free(header);
 288		return;
 289	}
 290	spin_lock(&man->lock);
 291	__vmw_cmdbuf_header_free(header);
 292	spin_unlock(&man->lock);
 293}
 294
 295
 296/**
 297 * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
 298 *
 299 * @header: The header of the buffer to submit.
 300 */
 301static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
 302{
 303	struct vmw_cmdbuf_man *man = header->man;
 304	u32 val;
 305
 306	val = upper_32_bits(header->handle);
 307	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 308
 309	val = lower_32_bits(header->handle);
 310	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
 311	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 312
 313	return header->cb_header->status;
 314}
 315
 316/**
 317 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
 318 *
 319 * @ctx: The command buffer context to initialize
 320 */
 321static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
 322{
 323	INIT_LIST_HEAD(&ctx->hw_submitted);
 324	INIT_LIST_HEAD(&ctx->submitted);
 325	INIT_LIST_HEAD(&ctx->preempted);
 326	ctx->num_hw_submitted = 0;
 327}
 328
 329/**
 330 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
 331 * context.
 332 *
 333 * @man: The command buffer manager.
 334 * @ctx: The command buffer context.
 335 *
 336 * Submits command buffers to hardware until there are no more command
 337 * buffers to submit or the hardware can't handle more command buffers.
 338 */
 339static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 340				  struct vmw_cmdbuf_context *ctx)
 341{
 342	while (ctx->num_hw_submitted < man->max_hw_submitted &&
 343	       !list_empty(&ctx->submitted) &&
 344	       !ctx->block_submission) {
 345		struct vmw_cmdbuf_header *entry;
 346		SVGACBStatus status;
 347
 348		entry = list_first_entry(&ctx->submitted,
 349					 struct vmw_cmdbuf_header,
 350					 list);
 351
 352		status = vmw_cmdbuf_header_submit(entry);
 353
 354		/* This should never happen */
 355		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
 356			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 357			break;
 358		}
 359
 360		list_move_tail(&entry->list, &ctx->hw_submitted);
 361		ctx->num_hw_submitted++;
 362	}
 363
 364}
 365
 366/**
 367 * vmw_cmdbuf_ctx_process - Process a command buffer context.
 368 *
 369 * @man: The command buffer manager.
 370 * @ctx: The command buffer context.
 371 * @notempty: Pass back count of non-empty command submitted lists.
 372 *
 373 * Submit command buffers to hardware if possible, and process finished
 374 * buffers. Typically freeing them, but on preemption or error take
 375 * appropriate action. Wake up waiters if appropriate.
 376 */
 377static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 378				   struct vmw_cmdbuf_context *ctx,
 379				   int *notempty)
 380{
 381	struct vmw_cmdbuf_header *entry, *next;
 382
 383	vmw_cmdbuf_ctx_submit(man, ctx);
 384
 385	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
 386		SVGACBStatus status = entry->cb_header->status;
 387
 388		if (status == SVGA_CB_STATUS_NONE)
 389			break;
 390
 391		list_del(&entry->list);
 392		wake_up_all(&man->idle_queue);
 393		ctx->num_hw_submitted--;
 394		switch (status) {
 395		case SVGA_CB_STATUS_COMPLETED:
 396			__vmw_cmdbuf_header_free(entry);
 397			break;
 398		case SVGA_CB_STATUS_COMMAND_ERROR:
 399			WARN_ONCE(true, "Command buffer error.\n");
 400			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 401			list_add_tail(&entry->list, &man->error);
 402			schedule_work(&man->work);
 403			break;
 404		case SVGA_CB_STATUS_PREEMPTED:
 405			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 406			list_add_tail(&entry->list, &ctx->preempted);
 407			break;
 408		case SVGA_CB_STATUS_CB_HEADER_ERROR:
 409			WARN_ONCE(true, "Command buffer header error.\n");
 410			__vmw_cmdbuf_header_free(entry);
 411			break;
 412		default:
 413			WARN_ONCE(true, "Undefined command buffer status.\n");
 414			__vmw_cmdbuf_header_free(entry);
 415			break;
 416		}
 417	}
 418
 419	vmw_cmdbuf_ctx_submit(man, ctx);
 420	if (!list_empty(&ctx->submitted))
 421		(*notempty)++;
 422}
 423
 424/**
 425 * vmw_cmdbuf_man_process - Process all command buffer contexts and
 426 * switch on and off irqs as appropriate.
 427 *
 428 * @man: The command buffer manager.
 429 *
 430 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
 431 * command buffers left that are not submitted to hardware, Make sure
 432 * IRQ handling is turned on. Otherwise, make sure it's turned off.
 433 */
 434static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 435{
 436	int notempty;
 437	struct vmw_cmdbuf_context *ctx;
 438	int i;
 439
 440retry:
 441	notempty = 0;
 442	for_each_cmdbuf_ctx(man, i, ctx)
 443		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 444
 445	if (man->irq_on && !notempty) {
 446		vmw_generic_waiter_remove(man->dev_priv,
 447					  SVGA_IRQFLAG_COMMAND_BUFFER,
 448					  &man->dev_priv->cmdbuf_waiters);
 449		man->irq_on = false;
 450	} else if (!man->irq_on && notempty) {
 451		vmw_generic_waiter_add(man->dev_priv,
 452				       SVGA_IRQFLAG_COMMAND_BUFFER,
 453				       &man->dev_priv->cmdbuf_waiters);
 454		man->irq_on = true;
 455
 456		/* Rerun in case we just missed an irq. */
 457		goto retry;
 458	}
 459}
 460
 461/**
 462 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
 463 * command buffer context
 464 *
 465 * @man: The command buffer manager.
 466 * @header: The header of the buffer to submit.
 467 * @cb_context: The command buffer context to use.
 468 *
 469 * This function adds @header to the "submitted" queue of the command
 470 * buffer context identified by @cb_context. It then calls the command buffer
 471 * manager processing to potentially submit the buffer to hardware.
 472 * @man->lock needs to be held when calling this function.
 473 */
 474static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 475			       struct vmw_cmdbuf_header *header,
 476			       SVGACBContext cb_context)
 477{
 478	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
 479		header->cb_header->dxContext = 0;
 480	header->cb_context = cb_context;
 481	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 482
 483	vmw_cmdbuf_man_process(man);
 484}
 485
 486/**
 487 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
 488 * handler implemented as a threaded irq task.
 489 *
 490 * @man: Pointer to the command buffer manager.
 491 *
 492 * The bottom half of the interrupt handler simply calls into the
 493 * command buffer processor to free finished buffers and submit any
 494 * queued buffers to hardware.
 495 */
 496void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
 497{
 498	spin_lock(&man->lock);
 499	vmw_cmdbuf_man_process(man);
 500	spin_unlock(&man->lock);
 501}
 502
 503/**
 504 * vmw_cmdbuf_work_func - The deferred work function that handles
 505 * command buffer errors.
 506 *
 507 * @work: The work func closure argument.
 508 *
 509 * Restarting the command buffer context after an error requires process
 510 * context, so it is deferred to this work function.
 511 */
 512static void vmw_cmdbuf_work_func(struct work_struct *work)
 513{
 514	struct vmw_cmdbuf_man *man =
 515		container_of(work, struct vmw_cmdbuf_man, work);
 516	struct vmw_cmdbuf_header *entry, *next;
 517	uint32_t dummy = 0;
 518	bool send_fence = false;
 519	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
 520	int i;
 521	struct vmw_cmdbuf_context *ctx;
 522	bool global_block = false;
 523
 524	for_each_cmdbuf_ctx(man, i, ctx)
 525		INIT_LIST_HEAD(&restart_head[i]);
 526
 527	mutex_lock(&man->error_mutex);
 528	spin_lock(&man->lock);
 529	list_for_each_entry_safe(entry, next, &man->error, list) {
 530		SVGACBHeader *cb_hdr = entry->cb_header;
 531		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
 532			(entry->cmd + cb_hdr->errorOffset);
 533		u32 error_cmd_size, new_start_offset;
 534		const char *cmd_name;
 535
 536		list_del_init(&entry->list);
 537		global_block = true;
 538
 539		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
 540			VMW_DEBUG_USER("Unknown command causing device error.\n");
 541			VMW_DEBUG_USER("Command buffer offset is %lu\n",
 542				       (unsigned long) cb_hdr->errorOffset);
 543			__vmw_cmdbuf_header_free(entry);
 544			send_fence = true;
 545			continue;
 546		}
 547
 548		VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
 549			       cmd_name);
 550		VMW_DEBUG_USER("Command buffer offset is %lu\n",
 551			       (unsigned long) cb_hdr->errorOffset);
 552		VMW_DEBUG_USER("Command size is %lu\n",
 553			       (unsigned long) error_cmd_size);
 554
 555		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
 556
 557		if (new_start_offset >= cb_hdr->length) {
 558			__vmw_cmdbuf_header_free(entry);
 559			send_fence = true;
 560			continue;
 561		}
 562
 563		if (man->using_mob)
 564			cb_hdr->ptr.mob.mobOffset += new_start_offset;
 565		else
 566			cb_hdr->ptr.pa += (u64) new_start_offset;
 567
 568		entry->cmd += new_start_offset;
 569		cb_hdr->length -= new_start_offset;
 570		cb_hdr->errorOffset = 0;
 571		cb_hdr->offset = 0;
 572
 573		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
 574	}
 575
 576	for_each_cmdbuf_ctx(man, i, ctx)
 577		man->ctx[i].block_submission = true;
 578
 579	spin_unlock(&man->lock);
 580
 581	/* Preempt all contexts */
 582	if (global_block && vmw_cmdbuf_preempt(man, 0))
 583		DRM_ERROR("Failed preempting command buffer contexts\n");
 584
 585	spin_lock(&man->lock);
 586	for_each_cmdbuf_ctx(man, i, ctx) {
 587		/* Move preempted command buffers to the preempted queue. */
 588		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
 589
 590		/*
 591		 * Add the preempted queue after the command buffer
 592		 * that caused an error.
 593		 */
 594		list_splice_init(&ctx->preempted, restart_head[i].prev);
 595
 596		/*
 597		 * Finally add all command buffers first in the submitted
 598		 * queue, to rerun them.
 599		 */
 600
 601		ctx->block_submission = false;
 602		list_splice_init(&restart_head[i], &ctx->submitted);
 603	}
 604
 605	vmw_cmdbuf_man_process(man);
 606	spin_unlock(&man->lock);
 607
 608	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
 609		DRM_ERROR("Failed restarting command buffer contexts\n");
 610
 611	/* Send a new fence in case one was removed */
 612	if (send_fence) {
 613		vmw_cmd_send_fence(man->dev_priv, &dummy);
 614		wake_up_all(&man->idle_queue);
 615	}
 616
 617	mutex_unlock(&man->error_mutex);
 618}
 619
 620/**
 621 * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
 622 *
 623 * @man: The command buffer manager.
 624 * @check_preempted: Check also the preempted queue for pending command buffers.
 625 *
 626 */
 627static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 628				bool check_preempted)
 629{
 630	struct vmw_cmdbuf_context *ctx;
 631	bool idle = false;
 632	int i;
 633
 634	spin_lock(&man->lock);
 635	vmw_cmdbuf_man_process(man);
 636	for_each_cmdbuf_ctx(man, i, ctx) {
 637		if (!list_empty(&ctx->submitted) ||
 638		    !list_empty(&ctx->hw_submitted) ||
 639		    (check_preempted && !list_empty(&ctx->preempted)))
 640			goto out_unlock;
 641	}
 642
 643	idle = list_empty(&man->error);
 644
 645out_unlock:
 646	spin_unlock(&man->lock);
 647
 648	return idle;
 649}
 650
 651/**
 652 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 653 * command submissions
 654 *
 655 * @man: The command buffer manager.
 656 *
 657 * Flushes the current command buffer without allocating a new one. A new one
 658 * is automatically allocated when needed. Call with @man->cur_mutex held.
 659 */
 660static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 661{
 662	struct vmw_cmdbuf_header *cur = man->cur;
 663
 664	lockdep_assert_held_once(&man->cur_mutex);
 665
 666	if (!cur)
 667		return;
 668
 669	spin_lock(&man->lock);
 670	if (man->cur_pos == 0) {
 671		__vmw_cmdbuf_header_free(cur);
 672		goto out_unlock;
 673	}
 674
 675	man->cur->cb_header->length = man->cur_pos;
 676	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 677out_unlock:
 678	spin_unlock(&man->lock);
 679	man->cur = NULL;
 680	man->cur_pos = 0;
 681}
 682
 683/**
 684 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 685 * command submissions
 686 *
 687 * @man: The command buffer manager.
 688 * @interruptible: Whether to sleep interruptible when sleeping.
 689 *
 690 * Flushes the current command buffer without allocating a new one. A new one
 691 * is automatically allocated when needed.
 692 */
 693int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 694			 bool interruptible)
 695{
 696	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 697
 698	if (ret)
 699		return ret;
 700
 701	__vmw_cmdbuf_cur_flush(man);
 702	vmw_cmdbuf_cur_unlock(man);
 703
 704	return 0;
 705}
 706
 707/**
 708 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
 709 *
 710 * @man: The command buffer manager.
 711 * @interruptible: Sleep interruptible while waiting.
 712 * @timeout: Time out after this many ticks.
 713 *
 714 * Wait until the command buffer manager has processed all command buffers,
 715 * or until a timeout occurs. If a timeout occurs, the function will return
 716 * -EBUSY.
 717 */
 718int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 719		    unsigned long timeout)
 720{
 721	int ret;
 722
 723	ret = vmw_cmdbuf_cur_flush(man, interruptible);
 724	vmw_generic_waiter_add(man->dev_priv,
 725			       SVGA_IRQFLAG_COMMAND_BUFFER,
 726			       &man->dev_priv->cmdbuf_waiters);
 727
 728	if (interruptible) {
 729		ret = wait_event_interruptible_timeout
 730			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 731			 timeout);
 732	} else {
 733		ret = wait_event_timeout
 734			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 735			 timeout);
 736	}
 737	vmw_generic_waiter_remove(man->dev_priv,
 738				  SVGA_IRQFLAG_COMMAND_BUFFER,
 739				  &man->dev_priv->cmdbuf_waiters);
 740	if (ret == 0) {
 741		if (!vmw_cmdbuf_man_idle(man, true))
 742			ret = -EBUSY;
 743		else
 744			ret = 0;
 745	}
 746	if (ret > 0)
 747		ret = 0;
 748
 749	return ret;
 750}
 751
 752/**
 753 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
 754 *
 755 * @man: The command buffer manager.
 756 * @info: Allocation info. Will hold the size on entry and allocated mm node
 757 * on successful return.
 758 *
 759 * Try to allocate buffer space from the main pool. Returns true if succeeded.
 760 * If a fatal error was hit, the error code is returned in @info->ret.
 761 */
 762static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 763				 struct vmw_cmdbuf_alloc_info *info)
 764{
 765	int ret;
 766
 767	if (info->done)
 768		return true;
 769
 770	memset(info->node, 0, sizeof(*info->node));
 771	spin_lock(&man->lock);
 772	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 773	if (ret) {
 774		vmw_cmdbuf_man_process(man);
 775		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 776	}
 777
 778	spin_unlock(&man->lock);
 779	info->done = !ret;
 780
 781	return info->done;
 782}
 783
 784/**
 785 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
 786 *
 787 * @man: The command buffer manager.
 788 * @node: Pointer to pre-allocated range-manager node.
 789 * @size: The size of the allocation.
 790 * @interruptible: Whether to sleep interruptible while waiting for space.
 791 *
 792 * This function allocates buffer space from the main pool, and if there is
 793 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
 794 * become available.
 795 */
 796static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 797				  struct drm_mm_node *node,
 798				  size_t size,
 799				  bool interruptible)
 800{
 801	struct vmw_cmdbuf_alloc_info info;
 802
 803	info.page_size = PFN_UP(size);
 804	info.node = node;
 805	info.done = false;
 806
 807	/*
 808	 * To prevent starvation of large requests, only one allocating call
 809	 * at a time waiting for space.
 810	 */
 811	if (interruptible) {
 812		if (mutex_lock_interruptible(&man->space_mutex))
 813			return -ERESTARTSYS;
 814	} else {
 815		mutex_lock(&man->space_mutex);
 816	}
 817
 818	/* Try to allocate space without waiting. */
 819	if (vmw_cmdbuf_try_alloc(man, &info))
 820		goto out_unlock;
 821
 822	vmw_generic_waiter_add(man->dev_priv,
 823			       SVGA_IRQFLAG_COMMAND_BUFFER,
 824			       &man->dev_priv->cmdbuf_waiters);
 825
 826	if (interruptible) {
 827		int ret;
 828
 829		ret = wait_event_interruptible
 830			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 831		if (ret) {
 832			vmw_generic_waiter_remove
 833				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
 834				 &man->dev_priv->cmdbuf_waiters);
 835			mutex_unlock(&man->space_mutex);
 836			return ret;
 837		}
 838	} else {
 839		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 840	}
 841	vmw_generic_waiter_remove(man->dev_priv,
 842				  SVGA_IRQFLAG_COMMAND_BUFFER,
 843				  &man->dev_priv->cmdbuf_waiters);
 844
 845out_unlock:
 846	mutex_unlock(&man->space_mutex);
 847
 848	return 0;
 849}
 850
 851/**
 852 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
 853 * space from the main pool.
 854 *
 855 * @man: The command buffer manager.
 856 * @header: Pointer to the header to set up.
 857 * @size: The requested size of the buffer space.
 858 * @interruptible: Whether to sleep interruptible while waiting for space.
 859 */
 860static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 861				 struct vmw_cmdbuf_header *header,
 862				 size_t size,
 863				 bool interruptible)
 864{
 865	SVGACBHeader *cb_hdr;
 866	size_t offset;
 867	int ret;
 868
 869	if (!man->has_pool)
 870		return -ENOMEM;
 871
 872	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
 873
 874	if (ret)
 875		return ret;
 876
 877	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
 878					    &header->handle);
 879	if (!header->cb_header) {
 880		ret = -ENOMEM;
 881		goto out_no_cb_header;
 882	}
 883
 884	header->size = header->node.size << PAGE_SHIFT;
 885	cb_hdr = header->cb_header;
 886	offset = header->node.start << PAGE_SHIFT;
 887	header->cmd = man->map + offset;
 888	if (man->using_mob) {
 889		cb_hdr->flags = SVGA_CB_FLAG_MOB;
 890		cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
 891		cb_hdr->ptr.mob.mobOffset = offset;
 892	} else {
 893		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
 894	}
 895
 896	return 0;
 897
 898out_no_cb_header:
 899	spin_lock(&man->lock);
 900	drm_mm_remove_node(&header->node);
 901	spin_unlock(&man->lock);
 902
 903	return ret;
 904}
 905
 906/**
 907 * vmw_cmdbuf_space_inline - Set up a command buffer header with
 908 * inline command buffer space.
 909 *
 910 * @man: The command buffer manager.
 911 * @header: Pointer to the header to set up.
 912 * @size: The requested size of the buffer space.
 913 */
 914static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
 915				   struct vmw_cmdbuf_header *header,
 916				   int size)
 917{
 918	struct vmw_cmdbuf_dheader *dheader;
 919	SVGACBHeader *cb_hdr;
 920
 921	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
 922		return -ENOMEM;
 923
 924	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
 925				  &header->handle);
 926	if (!dheader)
 927		return -ENOMEM;
 928
 929	header->inline_space = true;
 930	header->size = VMW_CMDBUF_INLINE_SIZE;
 931	cb_hdr = &dheader->cb_header;
 932	header->cb_header = cb_hdr;
 933	header->cmd = dheader->cmd;
 934	cb_hdr->status = SVGA_CB_STATUS_NONE;
 935	cb_hdr->flags = SVGA_CB_FLAG_NONE;
 936	cb_hdr->ptr.pa = (u64)header->handle +
 937		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 938
 939	return 0;
 940}
 941
 942/**
 943 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
 944 * command buffer space.
 945 *
 946 * @man: The command buffer manager.
 947 * @size: The requested size of the buffer space.
 948 * @interruptible: Whether to sleep interruptible while waiting for space.
 949 * @p_header: points to a header pointer to populate on successful return.
 950 *
 951 * Returns a pointer to command buffer space if successful. Otherwise
 952 * returns an error pointer. The header pointer returned in @p_header should
 953 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
 954 */
 955void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 956		       size_t size, bool interruptible,
 957		       struct vmw_cmdbuf_header **p_header)
 958{
 959	struct vmw_cmdbuf_header *header;
 960	int ret = 0;
 961
 962	*p_header = NULL;
 963
 964	header = kzalloc(sizeof(*header), GFP_KERNEL);
 965	if (!header)
 966		return ERR_PTR(-ENOMEM);
 967
 968	if (size <= VMW_CMDBUF_INLINE_SIZE)
 969		ret = vmw_cmdbuf_space_inline(man, header, size);
 970	else
 971		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 972
 973	if (ret) {
 974		kfree(header);
 975		return ERR_PTR(ret);
 976	}
 977
 978	header->man = man;
 979	INIT_LIST_HEAD(&header->list);
 980	header->cb_header->status = SVGA_CB_STATUS_NONE;
 981	*p_header = header;
 982
 983	return header->cmd;
 984}
 985
 986/**
 987 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
 988 * command buffer.
 989 *
 990 * @man: The command buffer manager.
 991 * @size: The requested size of the commands.
 992 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 993 * @interruptible: Whether to sleep interruptible while waiting for space.
 994 *
 995 * Returns a pointer to command buffer space if successful. Otherwise
 996 * returns an error pointer.
 997 */
 998static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
 999				    size_t size,
1000				    int ctx_id,
1001				    bool interruptible)
1002{
1003	struct vmw_cmdbuf_header *cur;
1004	void *ret;
1005
1006	if (vmw_cmdbuf_cur_lock(man, interruptible))
1007		return ERR_PTR(-ERESTARTSYS);
1008
1009	cur = man->cur;
1010	if (cur && (size + man->cur_pos > cur->size ||
1011		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1012		     ctx_id != cur->cb_header->dxContext)))
1013		__vmw_cmdbuf_cur_flush(man);
1014
1015	if (!man->cur) {
1016		ret = vmw_cmdbuf_alloc(man,
1017				       max_t(size_t, size, man->default_size),
1018				       interruptible, &man->cur);
1019		if (IS_ERR(ret)) {
1020			vmw_cmdbuf_cur_unlock(man);
1021			return ret;
1022		}
1023
1024		cur = man->cur;
1025	}
1026
1027	if (ctx_id != SVGA3D_INVALID_ID) {
1028		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1029		cur->cb_header->dxContext = ctx_id;
1030	}
1031
1032	cur->reserved = size;
1033
1034	return (void *) (man->cur->cmd + man->cur_pos);
1035}
1036
1037/**
1038 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1039 *
1040 * @man: The command buffer manager.
1041 * @size: The size of the commands actually written.
1042 * @flush: Whether to flush the command buffer immediately.
1043 */
1044static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1045				  size_t size, bool flush)
1046{
1047	struct vmw_cmdbuf_header *cur = man->cur;
1048
1049	lockdep_assert_held_once(&man->cur_mutex);
1050
1051	WARN_ON(size > cur->reserved);
1052	man->cur_pos += size;
1053	if (!size)
1054		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1055	if (flush)
1056		__vmw_cmdbuf_cur_flush(man);
1057	vmw_cmdbuf_cur_unlock(man);
1058}
1059
1060/**
1061 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1062 *
1063 * @man: The command buffer manager.
1064 * @size: The requested size of the commands.
1065 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1066 * @interruptible: Whether to sleep interruptible while waiting for space.
1067 * @header: Header of the command buffer. NULL if the current command buffer
1068 * should be used.
1069 *
1070 * Returns a pointer to command buffer space if successful. Otherwise
1071 * returns an error pointer.
1072 */
1073void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1074			 int ctx_id, bool interruptible,
1075			 struct vmw_cmdbuf_header *header)
1076{
1077	if (!header)
1078		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1079
1080	if (size > header->size)
1081		return ERR_PTR(-EINVAL);
1082
1083	if (ctx_id != SVGA3D_INVALID_ID) {
1084		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1085		header->cb_header->dxContext = ctx_id;
1086	}
1087
1088	header->reserved = size;
1089	return header->cmd;
1090}
1091
1092/**
1093 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1094 *
1095 * @man: The command buffer manager.
1096 * @size: The size of the commands actually written.
1097 * @header: Header of the command buffer. NULL if the current command buffer
1098 * should be used.
1099 * @flush: Whether to flush the command buffer immediately.
1100 */
1101void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1102		       struct vmw_cmdbuf_header *header, bool flush)
1103{
1104	if (!header) {
1105		vmw_cmdbuf_commit_cur(man, size, flush);
1106		return;
1107	}
1108
1109	(void) vmw_cmdbuf_cur_lock(man, false);
1110	__vmw_cmdbuf_cur_flush(man);
1111	WARN_ON(size > header->reserved);
1112	man->cur = header;
1113	man->cur_pos = size;
1114	if (!size)
1115		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1116	if (flush)
1117		__vmw_cmdbuf_cur_flush(man);
1118	vmw_cmdbuf_cur_unlock(man);
1119}
1120
1121
1122/**
1123 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1124 *
1125 * @man: The command buffer manager.
1126 * @command: Pointer to the command to send.
1127 * @size: Size of the command.
1128 *
1129 * Synchronously sends a device context command.
1130 */
1131static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1132					  const void *command,
1133					  size_t size)
1134{
1135	struct vmw_cmdbuf_header *header;
1136	int status;
1137	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1138
1139	if (IS_ERR(cmd))
1140		return PTR_ERR(cmd);
1141
1142	memcpy(cmd, command, size);
1143	header->cb_header->length = size;
1144	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1145	spin_lock(&man->lock);
1146	status = vmw_cmdbuf_header_submit(header);
1147	spin_unlock(&man->lock);
1148	vmw_cmdbuf_header_free(header);
1149
1150	if (status != SVGA_CB_STATUS_COMPLETED) {
1151		DRM_ERROR("Device context command failed with status %d\n",
1152			  status);
1153		return -EINVAL;
1154	}
1155
1156	return 0;
1157}
1158
1159/**
1160 * vmw_cmdbuf_preempt - Send a preempt command through the device
1161 * context.
1162 *
1163 * @man: The command buffer manager.
1164 * @context: Device context to pass command through.
1165 *
1166 * Synchronously sends a preempt command.
1167 */
1168static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1169{
1170	struct {
1171		uint32 id;
1172		SVGADCCmdPreempt body;
1173	} __packed cmd;
1174
1175	cmd.id = SVGA_DC_CMD_PREEMPT;
1176	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1177	cmd.body.ignoreIDZero = 0;
1178
1179	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1180}
1181
1182
1183/**
1184 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1185 * context.
1186 *
1187 * @man: The command buffer manager.
1188 * @context: Device context to start/stop.
1189 * @enable: Whether to enable or disable the context.
1190 *
1191 * Synchronously sends a device start / stop context command.
1192 */
1193static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1194				bool enable)
1195{
1196	struct {
1197		uint32 id;
1198		SVGADCCmdStartStop body;
1199	} __packed cmd;
1200
1201	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1202	cmd.body.enable = (enable) ? 1 : 0;
1203	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1204
1205	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1206}
1207
1208/**
1209 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1210 *
1211 * @man: The command buffer manager.
1212 * @size: The size of the main space pool.
1213 *
1214 * Set the size and allocate the main command buffer space pool.
1215 * If successful, this enables large command submissions.
1216 * Note that this function requires that rudimentary command
1217 * submission is already available and that the MOB memory manager is alive.
1218 * Returns 0 on success. Negative error code on failure.
1219 */
1220int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1221{
1222	struct vmw_private *dev_priv = man->dev_priv;
 
1223	int ret;
1224
1225	if (man->has_pool)
1226		return -EINVAL;
1227
1228	/* First, try to allocate a huge chunk of DMA memory */
1229	size = PAGE_ALIGN(size);
1230	man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1231				      &man->handle, GFP_KERNEL);
1232	if (man->map) {
1233		man->using_mob = false;
1234	} else {
1235		struct vmw_bo_params bo_params = {
1236			.domain = VMW_BO_DOMAIN_MOB,
1237			.busy_domain = VMW_BO_DOMAIN_MOB,
1238			.bo_type = ttm_bo_type_kernel,
1239			.size = size,
1240			.pin = true
1241		};
1242		/*
1243		 * DMA memory failed. If we can have command buffers in a
1244		 * MOB, try to use that instead. Note that this will
1245		 * actually call into the already enabled manager, when
1246		 * binding the MOB.
1247		 */
1248		if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1249		    !dev_priv->has_mob)
1250			return -ENOMEM;
1251
1252		ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
 
 
1253		if (ret)
1254			return ret;
1255
1256		man->map = vmw_bo_map_and_cache(man->cmd_space);
1257		man->using_mob = man->map;
 
 
 
 
 
1258	}
1259
1260	man->size = size;
1261	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1262
1263	man->has_pool = true;
1264
1265	/*
1266	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1267	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1268	 * needs to wait for space and we block on further command
1269	 * submissions to be able to free up space.
1270	 */
1271	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1272	drm_info(&dev_priv->drm,
1273		 "Using command buffers with %s pool.\n",
1274		 (man->using_mob) ? "MOB" : "DMA");
1275
1276	return 0;
 
 
 
 
 
 
 
 
1277}
1278
1279/**
1280 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1281 * inline command buffer submissions only.
1282 *
1283 * @dev_priv: Pointer to device private structure.
1284 *
1285 * Returns a pointer to a cummand buffer manager to success or error pointer
1286 * on failure. The command buffer manager will be enabled for submissions of
1287 * size VMW_CMDBUF_INLINE_SIZE only.
1288 */
1289struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1290{
1291	struct vmw_cmdbuf_man *man;
1292	struct vmw_cmdbuf_context *ctx;
1293	unsigned int i;
1294	int ret;
1295
1296	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1297		return ERR_PTR(-ENOSYS);
1298
1299	man = kzalloc(sizeof(*man), GFP_KERNEL);
1300	if (!man)
1301		return ERR_PTR(-ENOMEM);
1302
1303	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1304		2 : 1;
1305	man->headers = dma_pool_create("vmwgfx cmdbuf",
1306				       dev_priv->drm.dev,
1307				       sizeof(SVGACBHeader),
1308				       64, PAGE_SIZE);
1309	if (!man->headers) {
1310		ret = -ENOMEM;
1311		goto out_no_pool;
1312	}
1313
1314	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1315					dev_priv->drm.dev,
1316					sizeof(struct vmw_cmdbuf_dheader),
1317					64, PAGE_SIZE);
1318	if (!man->dheaders) {
1319		ret = -ENOMEM;
1320		goto out_no_dpool;
1321	}
1322
1323	for_each_cmdbuf_ctx(man, i, ctx)
1324		vmw_cmdbuf_ctx_init(ctx);
1325
1326	INIT_LIST_HEAD(&man->error);
1327	spin_lock_init(&man->lock);
1328	mutex_init(&man->cur_mutex);
1329	mutex_init(&man->space_mutex);
1330	mutex_init(&man->error_mutex);
1331	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1332	init_waitqueue_head(&man->alloc_queue);
1333	init_waitqueue_head(&man->idle_queue);
1334	man->dev_priv = dev_priv;
1335	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1336	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1337	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1338			       &dev_priv->error_waiters);
1339	ret = vmw_cmdbuf_startstop(man, 0, true);
1340	if (ret) {
1341		DRM_ERROR("Failed starting command buffer contexts\n");
1342		vmw_cmdbuf_man_destroy(man);
1343		return ERR_PTR(ret);
1344	}
1345
1346	return man;
1347
1348out_no_dpool:
1349	dma_pool_destroy(man->headers);
1350out_no_pool:
1351	kfree(man);
1352
1353	return ERR_PTR(ret);
1354}
1355
1356/**
1357 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1358 *
1359 * @man: Pointer to a command buffer manager.
1360 *
1361 * This function removes the main buffer space pool, and should be called
1362 * before MOB memory management is removed. When this function has been called,
1363 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1364 * less are allowed, and the default size of the command buffer for small kernel
1365 * submissions is also set to this size.
1366 */
1367void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1368{
1369	if (!man->has_pool)
1370		return;
1371
1372	man->has_pool = false;
1373	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1374	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1375	if (man->using_mob)
1376		vmw_bo_unreference(&man->cmd_space);
1377	else
 
 
1378		dma_free_coherent(man->dev_priv->drm.dev,
1379				  man->size, man->map, man->handle);
 
1380}
1381
1382/**
1383 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1384 *
1385 * @man: Pointer to a command buffer manager.
1386 *
1387 * This function idles and then destroys a command buffer manager.
1388 */
1389void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1390{
1391	WARN_ON_ONCE(man->has_pool);
1392	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1393
1394	if (vmw_cmdbuf_startstop(man, 0, false))
1395		DRM_ERROR("Failed stopping command buffer contexts.\n");
1396
1397	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1398				  &man->dev_priv->error_waiters);
1399	(void) cancel_work_sync(&man->work);
1400	dma_pool_destroy(man->dheaders);
1401	dma_pool_destroy(man->headers);
1402	mutex_destroy(&man->cur_mutex);
1403	mutex_destroy(&man->space_mutex);
1404	mutex_destroy(&man->error_mutex);
1405	kfree(man);
1406}
v6.2
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
 
 
  28#include <linux/dmapool.h>
  29#include <linux/pci.h>
  30
  31#include <drm/ttm/ttm_bo_api.h>
  32
  33#include "vmwgfx_drv.h"
  34
  35/*
  36 * Size of inline command buffers. Try to make sure that a page size is a
  37 * multiple of the DMA pool allocation size.
  38 */
  39#define VMW_CMDBUF_INLINE_ALIGN 64
  40#define VMW_CMDBUF_INLINE_SIZE \
  41	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  42
  43/**
  44 * struct vmw_cmdbuf_context - Command buffer context queues
  45 *
  46 * @submitted: List of command buffers that have been submitted to the
  47 * manager but not yet submitted to hardware.
  48 * @hw_submitted: List of command buffers submitted to hardware.
  49 * @preempted: List of preempted command buffers.
  50 * @num_hw_submitted: Number of buffers currently being processed by hardware
  51 * @block_submission: Identifies a block command submission.
  52 */
  53struct vmw_cmdbuf_context {
  54	struct list_head submitted;
  55	struct list_head hw_submitted;
  56	struct list_head preempted;
  57	unsigned num_hw_submitted;
  58	bool block_submission;
  59};
  60
  61/**
  62 * struct vmw_cmdbuf_man - Command buffer manager
  63 *
  64 * @cur_mutex: Mutex protecting the command buffer used for incremental small
  65 * kernel command submissions, @cur.
  66 * @space_mutex: Mutex to protect against starvation when we allocate
  67 * main pool buffer space.
  68 * @error_mutex: Mutex to serialize the work queue error handling.
  69 * Note this is not needed if the same workqueue handler
  70 * can't race with itself...
  71 * @work: A struct work_struct implementeing command buffer error handling.
  72 * Immutable.
  73 * @dev_priv: Pointer to the device private struct. Immutable.
  74 * @ctx: Array of command buffer context queues. The queues and the context
  75 * data is protected by @lock.
  76 * @error: List of command buffers that have caused device errors.
  77 * Protected by @lock.
  78 * @mm: Range manager for the command buffer space. Manager allocations and
  79 * frees are protected by @lock.
  80 * @cmd_space: Buffer object for the command buffer space, unless we were
  81 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
  82 * @map_obj: Mapping state for @cmd_space. Immutable.
  83 * @map: Pointer to command buffer space. May be a mapped buffer object or
  84 * a contigous coherent DMA memory allocation. Immutable.
  85 * @cur: Command buffer for small kernel command submissions. Protected by
  86 * the @cur_mutex.
  87 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  88 * @default_size: Default size for the @cur command buffer. Immutable.
  89 * @max_hw_submitted: Max number of in-flight command buffers the device can
  90 * handle. Immutable.
  91 * @lock: Spinlock protecting command submission queues.
  92 * @headers: Pool of DMA memory for device command buffer headers.
  93 * Internal protection.
  94 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  95 * space for inline data. Internal protection.
  96 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  97 * space.
  98 * @idle_queue: Wait queue for processes waiting for command buffer idle.
  99 * @irq_on: Whether the process function has requested irq to be turned on.
 100 * Protected by @lock.
 101 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
 102 * allocation. Immutable.
 103 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
 104 * Typically this is false only during bootstrap.
 105 * @handle: DMA address handle for the command buffer space if @using_mob is
 106 * false. Immutable.
 107 * @size: The size of the command buffer space. Immutable.
 108 * @num_contexts: Number of contexts actually enabled.
 109 */
 110struct vmw_cmdbuf_man {
 111	struct mutex cur_mutex;
 112	struct mutex space_mutex;
 113	struct mutex error_mutex;
 114	struct work_struct work;
 115	struct vmw_private *dev_priv;
 116	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 117	struct list_head error;
 118	struct drm_mm mm;
 119	struct ttm_buffer_object *cmd_space;
 120	struct ttm_bo_kmap_obj map_obj;
 121	u8 *map;
 122	struct vmw_cmdbuf_header *cur;
 123	size_t cur_pos;
 124	size_t default_size;
 125	unsigned max_hw_submitted;
 126	spinlock_t lock;
 127	struct dma_pool *headers;
 128	struct dma_pool *dheaders;
 129	wait_queue_head_t alloc_queue;
 130	wait_queue_head_t idle_queue;
 131	bool irq_on;
 132	bool using_mob;
 133	bool has_pool;
 134	dma_addr_t handle;
 135	size_t size;
 136	u32 num_contexts;
 137};
 138
 139/**
 140 * struct vmw_cmdbuf_header - Command buffer metadata
 141 *
 142 * @man: The command buffer manager.
 143 * @cb_header: Device command buffer header, allocated from a DMA pool.
 144 * @cb_context: The device command buffer context.
 145 * @list: List head for attaching to the manager lists.
 146 * @node: The range manager node.
 147 * @handle: The DMA address of @cb_header. Handed to the device on command
 148 * buffer submission.
 149 * @cmd: Pointer to the command buffer space of this buffer.
 150 * @size: Size of the command buffer space of this buffer.
 151 * @reserved: Reserved space of this buffer.
 152 * @inline_space: Whether inline command buffer space is used.
 153 */
 154struct vmw_cmdbuf_header {
 155	struct vmw_cmdbuf_man *man;
 156	SVGACBHeader *cb_header;
 157	SVGACBContext cb_context;
 158	struct list_head list;
 159	struct drm_mm_node node;
 160	dma_addr_t handle;
 161	u8 *cmd;
 162	size_t size;
 163	size_t reserved;
 164	bool inline_space;
 165};
 166
 167/**
 168 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
 169 * command buffer space.
 170 *
 171 * @cb_header: Device command buffer header.
 172 * @cmd: Inline command buffer space.
 173 */
 174struct vmw_cmdbuf_dheader {
 175	SVGACBHeader cb_header;
 176	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
 177};
 178
 179/**
 180 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
 181 *
 182 * @page_size: Size of requested command buffer space in pages.
 183 * @node: Pointer to the range manager node.
 184 * @done: True if this allocation has succeeded.
 185 */
 186struct vmw_cmdbuf_alloc_info {
 187	size_t page_size;
 188	struct drm_mm_node *node;
 189	bool done;
 190};
 191
 192/* Loop over each context in the command buffer manager. */
 193#define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
 194	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
 195	     ++(_i), ++(_ctx))
 196
 197static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
 198				bool enable);
 199static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
 200
 201/**
 202 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
 203 *
 204 * @man: The range manager.
 205 * @interruptible: Whether to wait interruptible when locking.
 206 */
 207static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
 208{
 209	if (interruptible) {
 210		if (mutex_lock_interruptible(&man->cur_mutex))
 211			return -ERESTARTSYS;
 212	} else {
 213		mutex_lock(&man->cur_mutex);
 214	}
 215
 216	return 0;
 217}
 218
 219/**
 220 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
 221 *
 222 * @man: The range manager.
 223 */
 224static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
 225{
 226	mutex_unlock(&man->cur_mutex);
 227}
 228
 229/**
 230 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
 231 * been used for the device context with inline command buffers.
 232 * Need not be called locked.
 233 *
 234 * @header: Pointer to the header to free.
 235 */
 236static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
 237{
 238	struct vmw_cmdbuf_dheader *dheader;
 239
 240	if (WARN_ON_ONCE(!header->inline_space))
 241		return;
 242
 243	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
 244			       cb_header);
 245	dma_pool_free(header->man->dheaders, dheader, header->handle);
 246	kfree(header);
 247}
 248
 249/**
 250 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 251 * associated structures.
 252 *
 253 * @header: Pointer to the header to free.
 254 *
 255 * For internal use. Must be called with man::lock held.
 256 */
 257static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 258{
 259	struct vmw_cmdbuf_man *man = header->man;
 260
 261	lockdep_assert_held_once(&man->lock);
 262
 263	if (header->inline_space) {
 264		vmw_cmdbuf_header_inline_free(header);
 265		return;
 266	}
 267
 268	drm_mm_remove_node(&header->node);
 269	wake_up_all(&man->alloc_queue);
 270	if (header->cb_header)
 271		dma_pool_free(man->headers, header->cb_header,
 272			      header->handle);
 273	kfree(header);
 274}
 275
 276/**
 277 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 278 * associated structures.
 279 *
 280 * @header: Pointer to the header to free.
 281 */
 282void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 283{
 284	struct vmw_cmdbuf_man *man = header->man;
 285
 286	/* Avoid locking if inline_space */
 287	if (header->inline_space) {
 288		vmw_cmdbuf_header_inline_free(header);
 289		return;
 290	}
 291	spin_lock(&man->lock);
 292	__vmw_cmdbuf_header_free(header);
 293	spin_unlock(&man->lock);
 294}
 295
 296
 297/**
 298 * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
 299 *
 300 * @header: The header of the buffer to submit.
 301 */
 302static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
 303{
 304	struct vmw_cmdbuf_man *man = header->man;
 305	u32 val;
 306
 307	val = upper_32_bits(header->handle);
 308	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 309
 310	val = lower_32_bits(header->handle);
 311	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
 312	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 313
 314	return header->cb_header->status;
 315}
 316
 317/**
 318 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
 319 *
 320 * @ctx: The command buffer context to initialize
 321 */
 322static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
 323{
 324	INIT_LIST_HEAD(&ctx->hw_submitted);
 325	INIT_LIST_HEAD(&ctx->submitted);
 326	INIT_LIST_HEAD(&ctx->preempted);
 327	ctx->num_hw_submitted = 0;
 328}
 329
 330/**
 331 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
 332 * context.
 333 *
 334 * @man: The command buffer manager.
 335 * @ctx: The command buffer context.
 336 *
 337 * Submits command buffers to hardware until there are no more command
 338 * buffers to submit or the hardware can't handle more command buffers.
 339 */
 340static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 341				  struct vmw_cmdbuf_context *ctx)
 342{
 343	while (ctx->num_hw_submitted < man->max_hw_submitted &&
 344	       !list_empty(&ctx->submitted) &&
 345	       !ctx->block_submission) {
 346		struct vmw_cmdbuf_header *entry;
 347		SVGACBStatus status;
 348
 349		entry = list_first_entry(&ctx->submitted,
 350					 struct vmw_cmdbuf_header,
 351					 list);
 352
 353		status = vmw_cmdbuf_header_submit(entry);
 354
 355		/* This should never happen */
 356		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
 357			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 358			break;
 359		}
 360
 361		list_move_tail(&entry->list, &ctx->hw_submitted);
 362		ctx->num_hw_submitted++;
 363	}
 364
 365}
 366
 367/**
 368 * vmw_cmdbuf_ctx_process - Process a command buffer context.
 369 *
 370 * @man: The command buffer manager.
 371 * @ctx: The command buffer context.
 372 * @notempty: Pass back count of non-empty command submitted lists.
 373 *
 374 * Submit command buffers to hardware if possible, and process finished
 375 * buffers. Typically freeing them, but on preemption or error take
 376 * appropriate action. Wake up waiters if appropriate.
 377 */
 378static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 379				   struct vmw_cmdbuf_context *ctx,
 380				   int *notempty)
 381{
 382	struct vmw_cmdbuf_header *entry, *next;
 383
 384	vmw_cmdbuf_ctx_submit(man, ctx);
 385
 386	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
 387		SVGACBStatus status = entry->cb_header->status;
 388
 389		if (status == SVGA_CB_STATUS_NONE)
 390			break;
 391
 392		list_del(&entry->list);
 393		wake_up_all(&man->idle_queue);
 394		ctx->num_hw_submitted--;
 395		switch (status) {
 396		case SVGA_CB_STATUS_COMPLETED:
 397			__vmw_cmdbuf_header_free(entry);
 398			break;
 399		case SVGA_CB_STATUS_COMMAND_ERROR:
 400			WARN_ONCE(true, "Command buffer error.\n");
 401			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 402			list_add_tail(&entry->list, &man->error);
 403			schedule_work(&man->work);
 404			break;
 405		case SVGA_CB_STATUS_PREEMPTED:
 406			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 407			list_add_tail(&entry->list, &ctx->preempted);
 408			break;
 409		case SVGA_CB_STATUS_CB_HEADER_ERROR:
 410			WARN_ONCE(true, "Command buffer header error.\n");
 411			__vmw_cmdbuf_header_free(entry);
 412			break;
 413		default:
 414			WARN_ONCE(true, "Undefined command buffer status.\n");
 415			__vmw_cmdbuf_header_free(entry);
 416			break;
 417		}
 418	}
 419
 420	vmw_cmdbuf_ctx_submit(man, ctx);
 421	if (!list_empty(&ctx->submitted))
 422		(*notempty)++;
 423}
 424
 425/**
 426 * vmw_cmdbuf_man_process - Process all command buffer contexts and
 427 * switch on and off irqs as appropriate.
 428 *
 429 * @man: The command buffer manager.
 430 *
 431 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
 432 * command buffers left that are not submitted to hardware, Make sure
 433 * IRQ handling is turned on. Otherwise, make sure it's turned off.
 434 */
 435static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 436{
 437	int notempty;
 438	struct vmw_cmdbuf_context *ctx;
 439	int i;
 440
 441retry:
 442	notempty = 0;
 443	for_each_cmdbuf_ctx(man, i, ctx)
 444		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 445
 446	if (man->irq_on && !notempty) {
 447		vmw_generic_waiter_remove(man->dev_priv,
 448					  SVGA_IRQFLAG_COMMAND_BUFFER,
 449					  &man->dev_priv->cmdbuf_waiters);
 450		man->irq_on = false;
 451	} else if (!man->irq_on && notempty) {
 452		vmw_generic_waiter_add(man->dev_priv,
 453				       SVGA_IRQFLAG_COMMAND_BUFFER,
 454				       &man->dev_priv->cmdbuf_waiters);
 455		man->irq_on = true;
 456
 457		/* Rerun in case we just missed an irq. */
 458		goto retry;
 459	}
 460}
 461
 462/**
 463 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
 464 * command buffer context
 465 *
 466 * @man: The command buffer manager.
 467 * @header: The header of the buffer to submit.
 468 * @cb_context: The command buffer context to use.
 469 *
 470 * This function adds @header to the "submitted" queue of the command
 471 * buffer context identified by @cb_context. It then calls the command buffer
 472 * manager processing to potentially submit the buffer to hardware.
 473 * @man->lock needs to be held when calling this function.
 474 */
 475static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 476			       struct vmw_cmdbuf_header *header,
 477			       SVGACBContext cb_context)
 478{
 479	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
 480		header->cb_header->dxContext = 0;
 481	header->cb_context = cb_context;
 482	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 483
 484	vmw_cmdbuf_man_process(man);
 485}
 486
 487/**
 488 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
 489 * handler implemented as a threaded irq task.
 490 *
 491 * @man: Pointer to the command buffer manager.
 492 *
 493 * The bottom half of the interrupt handler simply calls into the
 494 * command buffer processor to free finished buffers and submit any
 495 * queued buffers to hardware.
 496 */
 497void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
 498{
 499	spin_lock(&man->lock);
 500	vmw_cmdbuf_man_process(man);
 501	spin_unlock(&man->lock);
 502}
 503
 504/**
 505 * vmw_cmdbuf_work_func - The deferred work function that handles
 506 * command buffer errors.
 507 *
 508 * @work: The work func closure argument.
 509 *
 510 * Restarting the command buffer context after an error requires process
 511 * context, so it is deferred to this work function.
 512 */
 513static void vmw_cmdbuf_work_func(struct work_struct *work)
 514{
 515	struct vmw_cmdbuf_man *man =
 516		container_of(work, struct vmw_cmdbuf_man, work);
 517	struct vmw_cmdbuf_header *entry, *next;
 518	uint32_t dummy = 0;
 519	bool send_fence = false;
 520	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
 521	int i;
 522	struct vmw_cmdbuf_context *ctx;
 523	bool global_block = false;
 524
 525	for_each_cmdbuf_ctx(man, i, ctx)
 526		INIT_LIST_HEAD(&restart_head[i]);
 527
 528	mutex_lock(&man->error_mutex);
 529	spin_lock(&man->lock);
 530	list_for_each_entry_safe(entry, next, &man->error, list) {
 531		SVGACBHeader *cb_hdr = entry->cb_header;
 532		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
 533			(entry->cmd + cb_hdr->errorOffset);
 534		u32 error_cmd_size, new_start_offset;
 535		const char *cmd_name;
 536
 537		list_del_init(&entry->list);
 538		global_block = true;
 539
 540		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
 541			VMW_DEBUG_USER("Unknown command causing device error.\n");
 542			VMW_DEBUG_USER("Command buffer offset is %lu\n",
 543				       (unsigned long) cb_hdr->errorOffset);
 544			__vmw_cmdbuf_header_free(entry);
 545			send_fence = true;
 546			continue;
 547		}
 548
 549		VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
 550			       cmd_name);
 551		VMW_DEBUG_USER("Command buffer offset is %lu\n",
 552			       (unsigned long) cb_hdr->errorOffset);
 553		VMW_DEBUG_USER("Command size is %lu\n",
 554			       (unsigned long) error_cmd_size);
 555
 556		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
 557
 558		if (new_start_offset >= cb_hdr->length) {
 559			__vmw_cmdbuf_header_free(entry);
 560			send_fence = true;
 561			continue;
 562		}
 563
 564		if (man->using_mob)
 565			cb_hdr->ptr.mob.mobOffset += new_start_offset;
 566		else
 567			cb_hdr->ptr.pa += (u64) new_start_offset;
 568
 569		entry->cmd += new_start_offset;
 570		cb_hdr->length -= new_start_offset;
 571		cb_hdr->errorOffset = 0;
 572		cb_hdr->offset = 0;
 573
 574		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
 575	}
 576
 577	for_each_cmdbuf_ctx(man, i, ctx)
 578		man->ctx[i].block_submission = true;
 579
 580	spin_unlock(&man->lock);
 581
 582	/* Preempt all contexts */
 583	if (global_block && vmw_cmdbuf_preempt(man, 0))
 584		DRM_ERROR("Failed preempting command buffer contexts\n");
 585
 586	spin_lock(&man->lock);
 587	for_each_cmdbuf_ctx(man, i, ctx) {
 588		/* Move preempted command buffers to the preempted queue. */
 589		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
 590
 591		/*
 592		 * Add the preempted queue after the command buffer
 593		 * that caused an error.
 594		 */
 595		list_splice_init(&ctx->preempted, restart_head[i].prev);
 596
 597		/*
 598		 * Finally add all command buffers first in the submitted
 599		 * queue, to rerun them.
 600		 */
 601
 602		ctx->block_submission = false;
 603		list_splice_init(&restart_head[i], &ctx->submitted);
 604	}
 605
 606	vmw_cmdbuf_man_process(man);
 607	spin_unlock(&man->lock);
 608
 609	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
 610		DRM_ERROR("Failed restarting command buffer contexts\n");
 611
 612	/* Send a new fence in case one was removed */
 613	if (send_fence) {
 614		vmw_cmd_send_fence(man->dev_priv, &dummy);
 615		wake_up_all(&man->idle_queue);
 616	}
 617
 618	mutex_unlock(&man->error_mutex);
 619}
 620
 621/**
 622 * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
 623 *
 624 * @man: The command buffer manager.
 625 * @check_preempted: Check also the preempted queue for pending command buffers.
 626 *
 627 */
 628static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 629				bool check_preempted)
 630{
 631	struct vmw_cmdbuf_context *ctx;
 632	bool idle = false;
 633	int i;
 634
 635	spin_lock(&man->lock);
 636	vmw_cmdbuf_man_process(man);
 637	for_each_cmdbuf_ctx(man, i, ctx) {
 638		if (!list_empty(&ctx->submitted) ||
 639		    !list_empty(&ctx->hw_submitted) ||
 640		    (check_preempted && !list_empty(&ctx->preempted)))
 641			goto out_unlock;
 642	}
 643
 644	idle = list_empty(&man->error);
 645
 646out_unlock:
 647	spin_unlock(&man->lock);
 648
 649	return idle;
 650}
 651
 652/**
 653 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 654 * command submissions
 655 *
 656 * @man: The command buffer manager.
 657 *
 658 * Flushes the current command buffer without allocating a new one. A new one
 659 * is automatically allocated when needed. Call with @man->cur_mutex held.
 660 */
 661static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 662{
 663	struct vmw_cmdbuf_header *cur = man->cur;
 664
 665	lockdep_assert_held_once(&man->cur_mutex);
 666
 667	if (!cur)
 668		return;
 669
 670	spin_lock(&man->lock);
 671	if (man->cur_pos == 0) {
 672		__vmw_cmdbuf_header_free(cur);
 673		goto out_unlock;
 674	}
 675
 676	man->cur->cb_header->length = man->cur_pos;
 677	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 678out_unlock:
 679	spin_unlock(&man->lock);
 680	man->cur = NULL;
 681	man->cur_pos = 0;
 682}
 683
 684/**
 685 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 686 * command submissions
 687 *
 688 * @man: The command buffer manager.
 689 * @interruptible: Whether to sleep interruptible when sleeping.
 690 *
 691 * Flushes the current command buffer without allocating a new one. A new one
 692 * is automatically allocated when needed.
 693 */
 694int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 695			 bool interruptible)
 696{
 697	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 698
 699	if (ret)
 700		return ret;
 701
 702	__vmw_cmdbuf_cur_flush(man);
 703	vmw_cmdbuf_cur_unlock(man);
 704
 705	return 0;
 706}
 707
 708/**
 709 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
 710 *
 711 * @man: The command buffer manager.
 712 * @interruptible: Sleep interruptible while waiting.
 713 * @timeout: Time out after this many ticks.
 714 *
 715 * Wait until the command buffer manager has processed all command buffers,
 716 * or until a timeout occurs. If a timeout occurs, the function will return
 717 * -EBUSY.
 718 */
 719int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 720		    unsigned long timeout)
 721{
 722	int ret;
 723
 724	ret = vmw_cmdbuf_cur_flush(man, interruptible);
 725	vmw_generic_waiter_add(man->dev_priv,
 726			       SVGA_IRQFLAG_COMMAND_BUFFER,
 727			       &man->dev_priv->cmdbuf_waiters);
 728
 729	if (interruptible) {
 730		ret = wait_event_interruptible_timeout
 731			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 732			 timeout);
 733	} else {
 734		ret = wait_event_timeout
 735			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 736			 timeout);
 737	}
 738	vmw_generic_waiter_remove(man->dev_priv,
 739				  SVGA_IRQFLAG_COMMAND_BUFFER,
 740				  &man->dev_priv->cmdbuf_waiters);
 741	if (ret == 0) {
 742		if (!vmw_cmdbuf_man_idle(man, true))
 743			ret = -EBUSY;
 744		else
 745			ret = 0;
 746	}
 747	if (ret > 0)
 748		ret = 0;
 749
 750	return ret;
 751}
 752
 753/**
 754 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
 755 *
 756 * @man: The command buffer manager.
 757 * @info: Allocation info. Will hold the size on entry and allocated mm node
 758 * on successful return.
 759 *
 760 * Try to allocate buffer space from the main pool. Returns true if succeeded.
 761 * If a fatal error was hit, the error code is returned in @info->ret.
 762 */
 763static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 764				 struct vmw_cmdbuf_alloc_info *info)
 765{
 766	int ret;
 767
 768	if (info->done)
 769		return true;
 770
 771	memset(info->node, 0, sizeof(*info->node));
 772	spin_lock(&man->lock);
 773	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 774	if (ret) {
 775		vmw_cmdbuf_man_process(man);
 776		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 777	}
 778
 779	spin_unlock(&man->lock);
 780	info->done = !ret;
 781
 782	return info->done;
 783}
 784
 785/**
 786 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
 787 *
 788 * @man: The command buffer manager.
 789 * @node: Pointer to pre-allocated range-manager node.
 790 * @size: The size of the allocation.
 791 * @interruptible: Whether to sleep interruptible while waiting for space.
 792 *
 793 * This function allocates buffer space from the main pool, and if there is
 794 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
 795 * become available.
 796 */
 797static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 798				  struct drm_mm_node *node,
 799				  size_t size,
 800				  bool interruptible)
 801{
 802	struct vmw_cmdbuf_alloc_info info;
 803
 804	info.page_size = PFN_UP(size);
 805	info.node = node;
 806	info.done = false;
 807
 808	/*
 809	 * To prevent starvation of large requests, only one allocating call
 810	 * at a time waiting for space.
 811	 */
 812	if (interruptible) {
 813		if (mutex_lock_interruptible(&man->space_mutex))
 814			return -ERESTARTSYS;
 815	} else {
 816		mutex_lock(&man->space_mutex);
 817	}
 818
 819	/* Try to allocate space without waiting. */
 820	if (vmw_cmdbuf_try_alloc(man, &info))
 821		goto out_unlock;
 822
 823	vmw_generic_waiter_add(man->dev_priv,
 824			       SVGA_IRQFLAG_COMMAND_BUFFER,
 825			       &man->dev_priv->cmdbuf_waiters);
 826
 827	if (interruptible) {
 828		int ret;
 829
 830		ret = wait_event_interruptible
 831			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 832		if (ret) {
 833			vmw_generic_waiter_remove
 834				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
 835				 &man->dev_priv->cmdbuf_waiters);
 836			mutex_unlock(&man->space_mutex);
 837			return ret;
 838		}
 839	} else {
 840		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 841	}
 842	vmw_generic_waiter_remove(man->dev_priv,
 843				  SVGA_IRQFLAG_COMMAND_BUFFER,
 844				  &man->dev_priv->cmdbuf_waiters);
 845
 846out_unlock:
 847	mutex_unlock(&man->space_mutex);
 848
 849	return 0;
 850}
 851
 852/**
 853 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
 854 * space from the main pool.
 855 *
 856 * @man: The command buffer manager.
 857 * @header: Pointer to the header to set up.
 858 * @size: The requested size of the buffer space.
 859 * @interruptible: Whether to sleep interruptible while waiting for space.
 860 */
 861static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 862				 struct vmw_cmdbuf_header *header,
 863				 size_t size,
 864				 bool interruptible)
 865{
 866	SVGACBHeader *cb_hdr;
 867	size_t offset;
 868	int ret;
 869
 870	if (!man->has_pool)
 871		return -ENOMEM;
 872
 873	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
 874
 875	if (ret)
 876		return ret;
 877
 878	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
 879					    &header->handle);
 880	if (!header->cb_header) {
 881		ret = -ENOMEM;
 882		goto out_no_cb_header;
 883	}
 884
 885	header->size = header->node.size << PAGE_SHIFT;
 886	cb_hdr = header->cb_header;
 887	offset = header->node.start << PAGE_SHIFT;
 888	header->cmd = man->map + offset;
 889	if (man->using_mob) {
 890		cb_hdr->flags = SVGA_CB_FLAG_MOB;
 891		cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
 892		cb_hdr->ptr.mob.mobOffset = offset;
 893	} else {
 894		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
 895	}
 896
 897	return 0;
 898
 899out_no_cb_header:
 900	spin_lock(&man->lock);
 901	drm_mm_remove_node(&header->node);
 902	spin_unlock(&man->lock);
 903
 904	return ret;
 905}
 906
 907/**
 908 * vmw_cmdbuf_space_inline - Set up a command buffer header with
 909 * inline command buffer space.
 910 *
 911 * @man: The command buffer manager.
 912 * @header: Pointer to the header to set up.
 913 * @size: The requested size of the buffer space.
 914 */
 915static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
 916				   struct vmw_cmdbuf_header *header,
 917				   int size)
 918{
 919	struct vmw_cmdbuf_dheader *dheader;
 920	SVGACBHeader *cb_hdr;
 921
 922	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
 923		return -ENOMEM;
 924
 925	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
 926				  &header->handle);
 927	if (!dheader)
 928		return -ENOMEM;
 929
 930	header->inline_space = true;
 931	header->size = VMW_CMDBUF_INLINE_SIZE;
 932	cb_hdr = &dheader->cb_header;
 933	header->cb_header = cb_hdr;
 934	header->cmd = dheader->cmd;
 935	cb_hdr->status = SVGA_CB_STATUS_NONE;
 936	cb_hdr->flags = SVGA_CB_FLAG_NONE;
 937	cb_hdr->ptr.pa = (u64)header->handle +
 938		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 939
 940	return 0;
 941}
 942
 943/**
 944 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
 945 * command buffer space.
 946 *
 947 * @man: The command buffer manager.
 948 * @size: The requested size of the buffer space.
 949 * @interruptible: Whether to sleep interruptible while waiting for space.
 950 * @p_header: points to a header pointer to populate on successful return.
 951 *
 952 * Returns a pointer to command buffer space if successful. Otherwise
 953 * returns an error pointer. The header pointer returned in @p_header should
 954 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
 955 */
 956void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 957		       size_t size, bool interruptible,
 958		       struct vmw_cmdbuf_header **p_header)
 959{
 960	struct vmw_cmdbuf_header *header;
 961	int ret = 0;
 962
 963	*p_header = NULL;
 964
 965	header = kzalloc(sizeof(*header), GFP_KERNEL);
 966	if (!header)
 967		return ERR_PTR(-ENOMEM);
 968
 969	if (size <= VMW_CMDBUF_INLINE_SIZE)
 970		ret = vmw_cmdbuf_space_inline(man, header, size);
 971	else
 972		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 973
 974	if (ret) {
 975		kfree(header);
 976		return ERR_PTR(ret);
 977	}
 978
 979	header->man = man;
 980	INIT_LIST_HEAD(&header->list);
 981	header->cb_header->status = SVGA_CB_STATUS_NONE;
 982	*p_header = header;
 983
 984	return header->cmd;
 985}
 986
 987/**
 988 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
 989 * command buffer.
 990 *
 991 * @man: The command buffer manager.
 992 * @size: The requested size of the commands.
 993 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 994 * @interruptible: Whether to sleep interruptible while waiting for space.
 995 *
 996 * Returns a pointer to command buffer space if successful. Otherwise
 997 * returns an error pointer.
 998 */
 999static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1000				    size_t size,
1001				    int ctx_id,
1002				    bool interruptible)
1003{
1004	struct vmw_cmdbuf_header *cur;
1005	void *ret;
1006
1007	if (vmw_cmdbuf_cur_lock(man, interruptible))
1008		return ERR_PTR(-ERESTARTSYS);
1009
1010	cur = man->cur;
1011	if (cur && (size + man->cur_pos > cur->size ||
1012		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1013		     ctx_id != cur->cb_header->dxContext)))
1014		__vmw_cmdbuf_cur_flush(man);
1015
1016	if (!man->cur) {
1017		ret = vmw_cmdbuf_alloc(man,
1018				       max_t(size_t, size, man->default_size),
1019				       interruptible, &man->cur);
1020		if (IS_ERR(ret)) {
1021			vmw_cmdbuf_cur_unlock(man);
1022			return ret;
1023		}
1024
1025		cur = man->cur;
1026	}
1027
1028	if (ctx_id != SVGA3D_INVALID_ID) {
1029		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1030		cur->cb_header->dxContext = ctx_id;
1031	}
1032
1033	cur->reserved = size;
1034
1035	return (void *) (man->cur->cmd + man->cur_pos);
1036}
1037
1038/**
1039 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1040 *
1041 * @man: The command buffer manager.
1042 * @size: The size of the commands actually written.
1043 * @flush: Whether to flush the command buffer immediately.
1044 */
1045static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1046				  size_t size, bool flush)
1047{
1048	struct vmw_cmdbuf_header *cur = man->cur;
1049
1050	lockdep_assert_held_once(&man->cur_mutex);
1051
1052	WARN_ON(size > cur->reserved);
1053	man->cur_pos += size;
1054	if (!size)
1055		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1056	if (flush)
1057		__vmw_cmdbuf_cur_flush(man);
1058	vmw_cmdbuf_cur_unlock(man);
1059}
1060
1061/**
1062 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1063 *
1064 * @man: The command buffer manager.
1065 * @size: The requested size of the commands.
1066 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1067 * @interruptible: Whether to sleep interruptible while waiting for space.
1068 * @header: Header of the command buffer. NULL if the current command buffer
1069 * should be used.
1070 *
1071 * Returns a pointer to command buffer space if successful. Otherwise
1072 * returns an error pointer.
1073 */
1074void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1075			 int ctx_id, bool interruptible,
1076			 struct vmw_cmdbuf_header *header)
1077{
1078	if (!header)
1079		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1080
1081	if (size > header->size)
1082		return ERR_PTR(-EINVAL);
1083
1084	if (ctx_id != SVGA3D_INVALID_ID) {
1085		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1086		header->cb_header->dxContext = ctx_id;
1087	}
1088
1089	header->reserved = size;
1090	return header->cmd;
1091}
1092
1093/**
1094 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1095 *
1096 * @man: The command buffer manager.
1097 * @size: The size of the commands actually written.
1098 * @header: Header of the command buffer. NULL if the current command buffer
1099 * should be used.
1100 * @flush: Whether to flush the command buffer immediately.
1101 */
1102void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1103		       struct vmw_cmdbuf_header *header, bool flush)
1104{
1105	if (!header) {
1106		vmw_cmdbuf_commit_cur(man, size, flush);
1107		return;
1108	}
1109
1110	(void) vmw_cmdbuf_cur_lock(man, false);
1111	__vmw_cmdbuf_cur_flush(man);
1112	WARN_ON(size > header->reserved);
1113	man->cur = header;
1114	man->cur_pos = size;
1115	if (!size)
1116		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1117	if (flush)
1118		__vmw_cmdbuf_cur_flush(man);
1119	vmw_cmdbuf_cur_unlock(man);
1120}
1121
1122
1123/**
1124 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1125 *
1126 * @man: The command buffer manager.
1127 * @command: Pointer to the command to send.
1128 * @size: Size of the command.
1129 *
1130 * Synchronously sends a device context command.
1131 */
1132static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1133					  const void *command,
1134					  size_t size)
1135{
1136	struct vmw_cmdbuf_header *header;
1137	int status;
1138	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1139
1140	if (IS_ERR(cmd))
1141		return PTR_ERR(cmd);
1142
1143	memcpy(cmd, command, size);
1144	header->cb_header->length = size;
1145	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1146	spin_lock(&man->lock);
1147	status = vmw_cmdbuf_header_submit(header);
1148	spin_unlock(&man->lock);
1149	vmw_cmdbuf_header_free(header);
1150
1151	if (status != SVGA_CB_STATUS_COMPLETED) {
1152		DRM_ERROR("Device context command failed with status %d\n",
1153			  status);
1154		return -EINVAL;
1155	}
1156
1157	return 0;
1158}
1159
1160/**
1161 * vmw_cmdbuf_preempt - Send a preempt command through the device
1162 * context.
1163 *
1164 * @man: The command buffer manager.
1165 * @context: Device context to pass command through.
1166 *
1167 * Synchronously sends a preempt command.
1168 */
1169static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1170{
1171	struct {
1172		uint32 id;
1173		SVGADCCmdPreempt body;
1174	} __packed cmd;
1175
1176	cmd.id = SVGA_DC_CMD_PREEMPT;
1177	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1178	cmd.body.ignoreIDZero = 0;
1179
1180	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1181}
1182
1183
1184/**
1185 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1186 * context.
1187 *
1188 * @man: The command buffer manager.
1189 * @context: Device context to start/stop.
1190 * @enable: Whether to enable or disable the context.
1191 *
1192 * Synchronously sends a device start / stop context command.
1193 */
1194static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1195				bool enable)
1196{
1197	struct {
1198		uint32 id;
1199		SVGADCCmdStartStop body;
1200	} __packed cmd;
1201
1202	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1203	cmd.body.enable = (enable) ? 1 : 0;
1204	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1205
1206	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1207}
1208
1209/**
1210 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1211 *
1212 * @man: The command buffer manager.
1213 * @size: The size of the main space pool.
1214 *
1215 * Set the size and allocate the main command buffer space pool.
1216 * If successful, this enables large command submissions.
1217 * Note that this function requires that rudimentary command
1218 * submission is already available and that the MOB memory manager is alive.
1219 * Returns 0 on success. Negative error code on failure.
1220 */
1221int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1222{
1223	struct vmw_private *dev_priv = man->dev_priv;
1224	bool dummy;
1225	int ret;
1226
1227	if (man->has_pool)
1228		return -EINVAL;
1229
1230	/* First, try to allocate a huge chunk of DMA memory */
1231	size = PAGE_ALIGN(size);
1232	man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1233				      &man->handle, GFP_KERNEL);
1234	if (man->map) {
1235		man->using_mob = false;
1236	} else {
 
 
 
 
 
 
 
1237		/*
1238		 * DMA memory failed. If we can have command buffers in a
1239		 * MOB, try to use that instead. Note that this will
1240		 * actually call into the already enabled manager, when
1241		 * binding the MOB.
1242		 */
1243		if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1244		    !dev_priv->has_mob)
1245			return -ENOMEM;
1246
1247		ret = vmw_bo_create_kernel(dev_priv, size,
1248					   &vmw_mob_placement,
1249					   &man->cmd_space);
1250		if (ret)
1251			return ret;
1252
1253		man->using_mob = true;
1254		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1255				  &man->map_obj);
1256		if (ret)
1257			goto out_no_map;
1258
1259		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1260	}
1261
1262	man->size = size;
1263	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1264
1265	man->has_pool = true;
1266
1267	/*
1268	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1269	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1270	 * needs to wait for space and we block on further command
1271	 * submissions to be able to free up space.
1272	 */
1273	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1274	drm_info(&dev_priv->drm,
1275		 "Using command buffers with %s pool.\n",
1276		 (man->using_mob) ? "MOB" : "DMA");
1277
1278	return 0;
1279
1280out_no_map:
1281	if (man->using_mob) {
1282		ttm_bo_put(man->cmd_space);
1283		man->cmd_space = NULL;
1284	}
1285
1286	return ret;
1287}
1288
1289/**
1290 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291 * inline command buffer submissions only.
1292 *
1293 * @dev_priv: Pointer to device private structure.
1294 *
1295 * Returns a pointer to a cummand buffer manager to success or error pointer
1296 * on failure. The command buffer manager will be enabled for submissions of
1297 * size VMW_CMDBUF_INLINE_SIZE only.
1298 */
1299struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300{
1301	struct vmw_cmdbuf_man *man;
1302	struct vmw_cmdbuf_context *ctx;
1303	unsigned int i;
1304	int ret;
1305
1306	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307		return ERR_PTR(-ENOSYS);
1308
1309	man = kzalloc(sizeof(*man), GFP_KERNEL);
1310	if (!man)
1311		return ERR_PTR(-ENOMEM);
1312
1313	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314		2 : 1;
1315	man->headers = dma_pool_create("vmwgfx cmdbuf",
1316				       dev_priv->drm.dev,
1317				       sizeof(SVGACBHeader),
1318				       64, PAGE_SIZE);
1319	if (!man->headers) {
1320		ret = -ENOMEM;
1321		goto out_no_pool;
1322	}
1323
1324	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325					dev_priv->drm.dev,
1326					sizeof(struct vmw_cmdbuf_dheader),
1327					64, PAGE_SIZE);
1328	if (!man->dheaders) {
1329		ret = -ENOMEM;
1330		goto out_no_dpool;
1331	}
1332
1333	for_each_cmdbuf_ctx(man, i, ctx)
1334		vmw_cmdbuf_ctx_init(ctx);
1335
1336	INIT_LIST_HEAD(&man->error);
1337	spin_lock_init(&man->lock);
1338	mutex_init(&man->cur_mutex);
1339	mutex_init(&man->space_mutex);
1340	mutex_init(&man->error_mutex);
1341	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342	init_waitqueue_head(&man->alloc_queue);
1343	init_waitqueue_head(&man->idle_queue);
1344	man->dev_priv = dev_priv;
1345	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348			       &dev_priv->error_waiters);
1349	ret = vmw_cmdbuf_startstop(man, 0, true);
1350	if (ret) {
1351		DRM_ERROR("Failed starting command buffer contexts\n");
1352		vmw_cmdbuf_man_destroy(man);
1353		return ERR_PTR(ret);
1354	}
1355
1356	return man;
1357
1358out_no_dpool:
1359	dma_pool_destroy(man->headers);
1360out_no_pool:
1361	kfree(man);
1362
1363	return ERR_PTR(ret);
1364}
1365
1366/**
1367 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368 *
1369 * @man: Pointer to a command buffer manager.
1370 *
1371 * This function removes the main buffer space pool, and should be called
1372 * before MOB memory management is removed. When this function has been called,
1373 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374 * less are allowed, and the default size of the command buffer for small kernel
1375 * submissions is also set to this size.
1376 */
1377void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378{
1379	if (!man->has_pool)
1380		return;
1381
1382	man->has_pool = false;
1383	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385	if (man->using_mob) {
1386		(void) ttm_bo_kunmap(&man->map_obj);
1387		ttm_bo_put(man->cmd_space);
1388		man->cmd_space = NULL;
1389	} else {
1390		dma_free_coherent(man->dev_priv->drm.dev,
1391				  man->size, man->map, man->handle);
1392	}
1393}
1394
1395/**
1396 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397 *
1398 * @man: Pointer to a command buffer manager.
1399 *
1400 * This function idles and then destroys a command buffer manager.
1401 */
1402void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403{
1404	WARN_ON_ONCE(man->has_pool);
1405	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407	if (vmw_cmdbuf_startstop(man, 0, false))
1408		DRM_ERROR("Failed stopping command buffer contexts.\n");
1409
1410	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411				  &man->dev_priv->error_waiters);
1412	(void) cancel_work_sync(&man->work);
1413	dma_pool_destroy(man->dheaders);
1414	dma_pool_destroy(man->headers);
1415	mutex_destroy(&man->cur_mutex);
1416	mutex_destroy(&man->space_mutex);
1417	mutex_destroy(&man->error_mutex);
1418	kfree(man);
1419}