Linux Audio

Check our new training course

Loading...
v4.6
 
   1/**************************************************************************
   2 *
   3 * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
 
 
 
 
 
  28#include "vmwgfx_drv.h"
  29#include "ttm/ttm_bo_api.h"
  30
  31/*
  32 * Size of inline command buffers. Try to make sure that a page size is a
  33 * multiple of the DMA pool allocation size.
  34 */
  35#define VMW_CMDBUF_INLINE_ALIGN 64
  36#define VMW_CMDBUF_INLINE_SIZE \
  37	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  38
  39/**
  40 * struct vmw_cmdbuf_context - Command buffer context queues
  41 *
  42 * @submitted: List of command buffers that have been submitted to the
  43 * manager but not yet submitted to hardware.
  44 * @hw_submitted: List of command buffers submitted to hardware.
  45 * @preempted: List of preempted command buffers.
  46 * @num_hw_submitted: Number of buffers currently being processed by hardware
 
  47 */
  48struct vmw_cmdbuf_context {
  49	struct list_head submitted;
  50	struct list_head hw_submitted;
  51	struct list_head preempted;
  52	unsigned num_hw_submitted;
 
  53};
  54
  55/**
  56 * struct vmw_cmdbuf_man: - Command buffer manager
  57 *
  58 * @cur_mutex: Mutex protecting the command buffer used for incremental small
  59 * kernel command submissions, @cur.
  60 * @space_mutex: Mutex to protect against starvation when we allocate
  61 * main pool buffer space.
 
 
 
  62 * @work: A struct work_struct implementeing command buffer error handling.
  63 * Immutable.
  64 * @dev_priv: Pointer to the device private struct. Immutable.
  65 * @ctx: Array of command buffer context queues. The queues and the context
  66 * data is protected by @lock.
  67 * @error: List of command buffers that have caused device errors.
  68 * Protected by @lock.
  69 * @mm: Range manager for the command buffer space. Manager allocations and
  70 * frees are protected by @lock.
  71 * @cmd_space: Buffer object for the command buffer space, unless we were
  72 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
  73 * @map_obj: Mapping state for @cmd_space. Immutable.
  74 * @map: Pointer to command buffer space. May be a mapped buffer object or
  75 * a contigous coherent DMA memory allocation. Immutable.
  76 * @cur: Command buffer for small kernel command submissions. Protected by
  77 * the @cur_mutex.
  78 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  79 * @default_size: Default size for the @cur command buffer. Immutable.
  80 * @max_hw_submitted: Max number of in-flight command buffers the device can
  81 * handle. Immutable.
  82 * @lock: Spinlock protecting command submission queues.
  83 * @header: Pool of DMA memory for device command buffer headers.
  84 * Internal protection.
  85 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  86 * space for inline data. Internal protection.
  87 * @tasklet: Tasklet struct for irq processing. Immutable.
  88 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  89 * space.
  90 * @idle_queue: Wait queue for processes waiting for command buffer idle.
  91 * @irq_on: Whether the process function has requested irq to be turned on.
  92 * Protected by @lock.
  93 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
  94 * allocation. Immutable.
  95 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
  96 * Typically this is false only during bootstrap.
  97 * @handle: DMA address handle for the command buffer space if @using_mob is
  98 * false. Immutable.
  99 * @size: The size of the command buffer space. Immutable.
 
 100 */
 101struct vmw_cmdbuf_man {
 102	struct mutex cur_mutex;
 103	struct mutex space_mutex;
 
 104	struct work_struct work;
 105	struct vmw_private *dev_priv;
 106	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 107	struct list_head error;
 108	struct drm_mm mm;
 109	struct ttm_buffer_object *cmd_space;
 110	struct ttm_bo_kmap_obj map_obj;
 111	u8 *map;
 112	struct vmw_cmdbuf_header *cur;
 113	size_t cur_pos;
 114	size_t default_size;
 115	unsigned max_hw_submitted;
 116	spinlock_t lock;
 117	struct dma_pool *headers;
 118	struct dma_pool *dheaders;
 119	struct tasklet_struct tasklet;
 120	wait_queue_head_t alloc_queue;
 121	wait_queue_head_t idle_queue;
 122	bool irq_on;
 123	bool using_mob;
 124	bool has_pool;
 125	dma_addr_t handle;
 126	size_t size;
 
 127};
 128
 129/**
 130 * struct vmw_cmdbuf_header - Command buffer metadata
 131 *
 132 * @man: The command buffer manager.
 133 * @cb_header: Device command buffer header, allocated from a DMA pool.
 134 * @cb_context: The device command buffer context.
 135 * @list: List head for attaching to the manager lists.
 136 * @node: The range manager node.
 137 * @handle. The DMA address of @cb_header. Handed to the device on command
 138 * buffer submission.
 139 * @cmd: Pointer to the command buffer space of this buffer.
 140 * @size: Size of the command buffer space of this buffer.
 141 * @reserved: Reserved space of this buffer.
 142 * @inline_space: Whether inline command buffer space is used.
 143 */
 144struct vmw_cmdbuf_header {
 145	struct vmw_cmdbuf_man *man;
 146	SVGACBHeader *cb_header;
 147	SVGACBContext cb_context;
 148	struct list_head list;
 149	struct drm_mm_node node;
 150	dma_addr_t handle;
 151	u8 *cmd;
 152	size_t size;
 153	size_t reserved;
 154	bool inline_space;
 155};
 156
 157/**
 158 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
 159 * command buffer space.
 160 *
 161 * @cb_header: Device command buffer header.
 162 * @cmd: Inline command buffer space.
 163 */
 164struct vmw_cmdbuf_dheader {
 165	SVGACBHeader cb_header;
 166	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
 167};
 168
 169/**
 170 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
 171 *
 172 * @page_size: Size of requested command buffer space in pages.
 173 * @node: Pointer to the range manager node.
 174 * @done: True if this allocation has succeeded.
 175 */
 176struct vmw_cmdbuf_alloc_info {
 177	size_t page_size;
 178	struct drm_mm_node *node;
 179	bool done;
 180};
 181
 182/* Loop over each context in the command buffer manager. */
 183#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
 184	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
 185	     ++(_i), ++(_ctx))
 186
 187static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, bool enable);
 188
 
 189
 190/**
 191 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
 192 *
 193 * @man: The range manager.
 194 * @interruptible: Whether to wait interruptible when locking.
 195 */
 196static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
 197{
 198	if (interruptible) {
 199		if (mutex_lock_interruptible(&man->cur_mutex))
 200			return -ERESTARTSYS;
 201	} else {
 202		mutex_lock(&man->cur_mutex);
 203	}
 204
 205	return 0;
 206}
 207
 208/**
 209 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
 210 *
 211 * @man: The range manager.
 212 */
 213static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
 214{
 215	mutex_unlock(&man->cur_mutex);
 216}
 217
 218/**
 219 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
 220 * been used for the device context with inline command buffers.
 221 * Need not be called locked.
 222 *
 223 * @header: Pointer to the header to free.
 224 */
 225static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
 226{
 227	struct vmw_cmdbuf_dheader *dheader;
 228
 229	if (WARN_ON_ONCE(!header->inline_space))
 230		return;
 231
 232	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
 233			       cb_header);
 234	dma_pool_free(header->man->dheaders, dheader, header->handle);
 235	kfree(header);
 236}
 237
 238/**
 239 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 240 * associated structures.
 241 *
 242 * header: Pointer to the header to free.
 243 *
 244 * For internal use. Must be called with man::lock held.
 245 */
 246static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 247{
 248	struct vmw_cmdbuf_man *man = header->man;
 249
 250	lockdep_assert_held_once(&man->lock);
 251
 252	if (header->inline_space) {
 253		vmw_cmdbuf_header_inline_free(header);
 254		return;
 255	}
 256
 257	drm_mm_remove_node(&header->node);
 258	wake_up_all(&man->alloc_queue);
 259	if (header->cb_header)
 260		dma_pool_free(man->headers, header->cb_header,
 261			      header->handle);
 262	kfree(header);
 263}
 264
 265/**
 266 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 267 * associated structures.
 268 *
 269 * @header: Pointer to the header to free.
 270 */
 271void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 272{
 273	struct vmw_cmdbuf_man *man = header->man;
 274
 275	/* Avoid locking if inline_space */
 276	if (header->inline_space) {
 277		vmw_cmdbuf_header_inline_free(header);
 278		return;
 279	}
 280	spin_lock_bh(&man->lock);
 281	__vmw_cmdbuf_header_free(header);
 282	spin_unlock_bh(&man->lock);
 283}
 284
 285
 286/**
 287 * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
 288 *
 289 * @header: The header of the buffer to submit.
 290 */
 291static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
 292{
 293	struct vmw_cmdbuf_man *man = header->man;
 294	u32 val;
 295
 296	if (sizeof(header->handle) > 4)
 297		val = (header->handle >> 32);
 298	else
 299		val = 0;
 300	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 301
 302	val = (header->handle & 0xFFFFFFFFULL);
 303	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
 304	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 305
 306	return header->cb_header->status;
 307}
 308
 309/**
 310 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
 311 *
 312 * @ctx: The command buffer context to initialize
 313 */
 314static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
 315{
 316	INIT_LIST_HEAD(&ctx->hw_submitted);
 317	INIT_LIST_HEAD(&ctx->submitted);
 318	INIT_LIST_HEAD(&ctx->preempted);
 319	ctx->num_hw_submitted = 0;
 320}
 321
 322/**
 323 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
 324 * context.
 325 *
 326 * @man: The command buffer manager.
 327 * @ctx: The command buffer context.
 328 *
 329 * Submits command buffers to hardware until there are no more command
 330 * buffers to submit or the hardware can't handle more command buffers.
 331 */
 332static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 333				  struct vmw_cmdbuf_context *ctx)
 334{
 335	while (ctx->num_hw_submitted < man->max_hw_submitted &&
 336	      !list_empty(&ctx->submitted)) {
 
 337		struct vmw_cmdbuf_header *entry;
 338		SVGACBStatus status;
 339
 340		entry = list_first_entry(&ctx->submitted,
 341					 struct vmw_cmdbuf_header,
 342					 list);
 343
 344		status = vmw_cmdbuf_header_submit(entry);
 345
 346		/* This should never happen */
 347		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
 348			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 349			break;
 350		}
 351
 352		list_del(&entry->list);
 353		list_add_tail(&entry->list, &ctx->hw_submitted);
 354		ctx->num_hw_submitted++;
 355	}
 356
 357}
 358
 359/**
 360 * vmw_cmdbuf_ctx_submit: Process a command buffer context.
 361 *
 362 * @man: The command buffer manager.
 363 * @ctx: The command buffer context.
 
 364 *
 365 * Submit command buffers to hardware if possible, and process finished
 366 * buffers. Typically freeing them, but on preemption or error take
 367 * appropriate action. Wake up waiters if appropriate.
 368 */
 369static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 370				   struct vmw_cmdbuf_context *ctx,
 371				   int *notempty)
 372{
 373	struct vmw_cmdbuf_header *entry, *next;
 374
 375	vmw_cmdbuf_ctx_submit(man, ctx);
 376
 377	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
 378		SVGACBStatus status = entry->cb_header->status;
 379
 380		if (status == SVGA_CB_STATUS_NONE)
 381			break;
 382
 383		list_del(&entry->list);
 384		wake_up_all(&man->idle_queue);
 385		ctx->num_hw_submitted--;
 386		switch (status) {
 387		case SVGA_CB_STATUS_COMPLETED:
 388			__vmw_cmdbuf_header_free(entry);
 389			break;
 390		case SVGA_CB_STATUS_COMMAND_ERROR:
 391		case SVGA_CB_STATUS_CB_HEADER_ERROR:
 
 392			list_add_tail(&entry->list, &man->error);
 393			schedule_work(&man->work);
 394			break;
 395		case SVGA_CB_STATUS_PREEMPTED:
 396			list_add(&entry->list, &ctx->preempted);
 
 
 
 
 
 397			break;
 398		default:
 399			WARN_ONCE(true, "Undefined command buffer status.\n");
 400			__vmw_cmdbuf_header_free(entry);
 401			break;
 402		}
 403	}
 404
 405	vmw_cmdbuf_ctx_submit(man, ctx);
 406	if (!list_empty(&ctx->submitted))
 407		(*notempty)++;
 408}
 409
 410/**
 411 * vmw_cmdbuf_man_process - Process all command buffer contexts and
 412 * switch on and off irqs as appropriate.
 413 *
 414 * @man: The command buffer manager.
 415 *
 416 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
 417 * command buffers left that are not submitted to hardware, Make sure
 418 * IRQ handling is turned on. Otherwise, make sure it's turned off.
 419 */
 420static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 421{
 422	int notempty;
 423	struct vmw_cmdbuf_context *ctx;
 424	int i;
 425
 426retry:
 427	notempty = 0;
 428	for_each_cmdbuf_ctx(man, i, ctx)
 429		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 430
 431	if (man->irq_on && !notempty) {
 432		vmw_generic_waiter_remove(man->dev_priv,
 433					  SVGA_IRQFLAG_COMMAND_BUFFER,
 434					  &man->dev_priv->cmdbuf_waiters);
 435		man->irq_on = false;
 436	} else if (!man->irq_on && notempty) {
 437		vmw_generic_waiter_add(man->dev_priv,
 438				       SVGA_IRQFLAG_COMMAND_BUFFER,
 439				       &man->dev_priv->cmdbuf_waiters);
 440		man->irq_on = true;
 441
 442		/* Rerun in case we just missed an irq. */
 443		goto retry;
 444	}
 445}
 446
 447/**
 448 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
 449 * command buffer context
 450 *
 451 * @man: The command buffer manager.
 452 * @header: The header of the buffer to submit.
 453 * @cb_context: The command buffer context to use.
 454 *
 455 * This function adds @header to the "submitted" queue of the command
 456 * buffer context identified by @cb_context. It then calls the command buffer
 457 * manager processing to potentially submit the buffer to hardware.
 458 * @man->lock needs to be held when calling this function.
 459 */
 460static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 461			       struct vmw_cmdbuf_header *header,
 462			       SVGACBContext cb_context)
 463{
 464	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
 465		header->cb_header->dxContext = 0;
 466	header->cb_context = cb_context;
 467	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 468
 469	vmw_cmdbuf_man_process(man);
 470}
 471
 472/**
 473 * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
 474 * handler implemented as a tasklet.
 475 *
 476 * @data: Tasklet closure. A pointer to the command buffer manager cast to
 477 * an unsigned long.
 478 *
 479 * The bottom half (tasklet) of the interrupt handler simply calls into the
 480 * command buffer processor to free finished buffers and submit any
 481 * queued buffers to hardware.
 482 */
 483static void vmw_cmdbuf_man_tasklet(unsigned long data)
 484{
 485	struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
 486
 487	spin_lock(&man->lock);
 488	vmw_cmdbuf_man_process(man);
 489	spin_unlock(&man->lock);
 490}
 491
 492/**
 493 * vmw_cmdbuf_work_func - The deferred work function that handles
 494 * command buffer errors.
 495 *
 496 * @work: The work func closure argument.
 497 *
 498 * Restarting the command buffer context after an error requires process
 499 * context, so it is deferred to this work function.
 500 */
 501static void vmw_cmdbuf_work_func(struct work_struct *work)
 502{
 503	struct vmw_cmdbuf_man *man =
 504		container_of(work, struct vmw_cmdbuf_man, work);
 505	struct vmw_cmdbuf_header *entry, *next;
 506	uint32_t dummy;
 507	bool restart = false;
 
 
 
 
 
 
 
 508
 509	spin_lock_bh(&man->lock);
 
 510	list_for_each_entry_safe(entry, next, &man->error, list) {
 511		restart = true;
 512		DRM_ERROR("Command buffer error.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 513
 514		list_del(&entry->list);
 515		__vmw_cmdbuf_header_free(entry);
 516		wake_up_all(&man->idle_queue);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 517	}
 518	spin_unlock_bh(&man->lock);
 519
 520	if (restart && vmw_cmdbuf_startstop(man, true))
 521		DRM_ERROR("Failed restarting command buffer context 0.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 522
 523	/* Send a new fence in case one was removed */
 524	vmw_fifo_send_fence(man->dev_priv, &dummy);
 
 
 
 
 
 525}
 526
 527/**
 528 * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
 529 *
 530 * @man: The command buffer manager.
 531 * @check_preempted: Check also the preempted queue for pending command buffers.
 532 *
 533 */
 534static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 535				bool check_preempted)
 536{
 537	struct vmw_cmdbuf_context *ctx;
 538	bool idle = false;
 539	int i;
 540
 541	spin_lock_bh(&man->lock);
 542	vmw_cmdbuf_man_process(man);
 543	for_each_cmdbuf_ctx(man, i, ctx) {
 544		if (!list_empty(&ctx->submitted) ||
 545		    !list_empty(&ctx->hw_submitted) ||
 546		    (check_preempted && !list_empty(&ctx->preempted)))
 547			goto out_unlock;
 548	}
 549
 550	idle = list_empty(&man->error);
 551
 552out_unlock:
 553	spin_unlock_bh(&man->lock);
 554
 555	return idle;
 556}
 557
 558/**
 559 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 560 * command submissions
 561 *
 562 * @man: The command buffer manager.
 563 *
 564 * Flushes the current command buffer without allocating a new one. A new one
 565 * is automatically allocated when needed. Call with @man->cur_mutex held.
 566 */
 567static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 568{
 569	struct vmw_cmdbuf_header *cur = man->cur;
 570
 571	WARN_ON(!mutex_is_locked(&man->cur_mutex));
 572
 573	if (!cur)
 574		return;
 575
 576	spin_lock_bh(&man->lock);
 577	if (man->cur_pos == 0) {
 578		__vmw_cmdbuf_header_free(cur);
 579		goto out_unlock;
 580	}
 581
 582	man->cur->cb_header->length = man->cur_pos;
 583	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 584out_unlock:
 585	spin_unlock_bh(&man->lock);
 586	man->cur = NULL;
 587	man->cur_pos = 0;
 588}
 589
 590/**
 591 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 592 * command submissions
 593 *
 594 * @man: The command buffer manager.
 595 * @interruptible: Whether to sleep interruptible when sleeping.
 596 *
 597 * Flushes the current command buffer without allocating a new one. A new one
 598 * is automatically allocated when needed.
 599 */
 600int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 601			 bool interruptible)
 602{
 603	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 604
 605	if (ret)
 606		return ret;
 607
 608	__vmw_cmdbuf_cur_flush(man);
 609	vmw_cmdbuf_cur_unlock(man);
 610
 611	return 0;
 612}
 613
 614/**
 615 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
 616 *
 617 * @man: The command buffer manager.
 618 * @interruptible: Sleep interruptible while waiting.
 619 * @timeout: Time out after this many ticks.
 620 *
 621 * Wait until the command buffer manager has processed all command buffers,
 622 * or until a timeout occurs. If a timeout occurs, the function will return
 623 * -EBUSY.
 624 */
 625int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 626		    unsigned long timeout)
 627{
 628	int ret;
 629
 630	ret = vmw_cmdbuf_cur_flush(man, interruptible);
 631	vmw_generic_waiter_add(man->dev_priv,
 632			       SVGA_IRQFLAG_COMMAND_BUFFER,
 633			       &man->dev_priv->cmdbuf_waiters);
 634
 635	if (interruptible) {
 636		ret = wait_event_interruptible_timeout
 637			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 638			 timeout);
 639	} else {
 640		ret = wait_event_timeout
 641			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 642			 timeout);
 643	}
 644	vmw_generic_waiter_remove(man->dev_priv,
 645				  SVGA_IRQFLAG_COMMAND_BUFFER,
 646				  &man->dev_priv->cmdbuf_waiters);
 647	if (ret == 0) {
 648		if (!vmw_cmdbuf_man_idle(man, true))
 649			ret = -EBUSY;
 650		else
 651			ret = 0;
 652	}
 653	if (ret > 0)
 654		ret = 0;
 655
 656	return ret;
 657}
 658
 659/**
 660 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
 661 *
 662 * @man: The command buffer manager.
 663 * @info: Allocation info. Will hold the size on entry and allocated mm node
 664 * on successful return.
 665 *
 666 * Try to allocate buffer space from the main pool. Returns true if succeeded.
 667 * If a fatal error was hit, the error code is returned in @info->ret.
 668 */
 669static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 670				 struct vmw_cmdbuf_alloc_info *info)
 671{
 672	int ret;
 673
 674	if (info->done)
 675		return true;
 676 
 677	memset(info->node, 0, sizeof(*info->node));
 678	spin_lock_bh(&man->lock);
 679	ret = drm_mm_insert_node_generic(&man->mm, info->node, info->page_size,
 680					 0, 0,
 681					 DRM_MM_SEARCH_DEFAULT,
 682					 DRM_MM_CREATE_DEFAULT);
 683	if (ret) {
 684		vmw_cmdbuf_man_process(man);
 685		ret = drm_mm_insert_node_generic(&man->mm, info->node,
 686						 info->page_size, 0, 0,
 687						 DRM_MM_SEARCH_DEFAULT,
 688						 DRM_MM_CREATE_DEFAULT);
 689	}
 690
 691	spin_unlock_bh(&man->lock);
 692	info->done = !ret;
 693
 694	return info->done;
 695}
 696
 697/**
 698 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
 699 *
 700 * @man: The command buffer manager.
 701 * @node: Pointer to pre-allocated range-manager node.
 702 * @size: The size of the allocation.
 703 * @interruptible: Whether to sleep interruptible while waiting for space.
 704 *
 705 * This function allocates buffer space from the main pool, and if there is
 706 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
 707 * become available.
 708 */
 709static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 710				  struct drm_mm_node *node,
 711				  size_t size,
 712				  bool interruptible)
 713{
 714	struct vmw_cmdbuf_alloc_info info;
 715
 716	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
 717	info.node = node;
 718	info.done = false;
 719
 720	/*
 721	 * To prevent starvation of large requests, only one allocating call
 722	 * at a time waiting for space.
 723	 */
 724	if (interruptible) {
 725		if (mutex_lock_interruptible(&man->space_mutex))
 726			return -ERESTARTSYS;
 727	} else {
 728		mutex_lock(&man->space_mutex);
 729	}
 730
 731	/* Try to allocate space without waiting. */
 732	if (vmw_cmdbuf_try_alloc(man, &info))
 733		goto out_unlock;
 734
 735	vmw_generic_waiter_add(man->dev_priv,
 736			       SVGA_IRQFLAG_COMMAND_BUFFER,
 737			       &man->dev_priv->cmdbuf_waiters);
 738
 739	if (interruptible) {
 740		int ret;
 741
 742		ret = wait_event_interruptible
 743			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 744		if (ret) {
 745			vmw_generic_waiter_remove
 746				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
 747				 &man->dev_priv->cmdbuf_waiters);
 748			mutex_unlock(&man->space_mutex);
 749			return ret;
 750		}
 751	} else {
 752		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 753	}
 754	vmw_generic_waiter_remove(man->dev_priv,
 755				  SVGA_IRQFLAG_COMMAND_BUFFER,
 756				  &man->dev_priv->cmdbuf_waiters);
 757
 758out_unlock:
 759	mutex_unlock(&man->space_mutex);
 760
 761	return 0;
 762}
 763
 764/**
 765 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
 766 * space from the main pool.
 767 *
 768 * @man: The command buffer manager.
 769 * @header: Pointer to the header to set up.
 770 * @size: The requested size of the buffer space.
 771 * @interruptible: Whether to sleep interruptible while waiting for space.
 772 */
 773static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 774				 struct vmw_cmdbuf_header *header,
 775				 size_t size,
 776				 bool interruptible)
 777{
 778	SVGACBHeader *cb_hdr;
 779	size_t offset;
 780	int ret;
 781
 782	if (!man->has_pool)
 783		return -ENOMEM;
 784
 785	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
 786
 787	if (ret)
 788		return ret;
 789
 790	header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL,
 791					   &header->handle);
 792	if (!header->cb_header) {
 793		ret = -ENOMEM;
 794		goto out_no_cb_header;
 795	}
 796
 797	header->size = header->node.size << PAGE_SHIFT;
 798	cb_hdr = header->cb_header;
 799	offset = header->node.start << PAGE_SHIFT;
 800	header->cmd = man->map + offset;
 801	memset(cb_hdr, 0, sizeof(*cb_hdr));
 802	if (man->using_mob) {
 803		cb_hdr->flags = SVGA_CB_FLAG_MOB;
 804		cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
 805		cb_hdr->ptr.mob.mobOffset = offset;
 806	} else {
 807		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
 808	}
 809
 810	return 0;
 811
 812out_no_cb_header:
 813	spin_lock_bh(&man->lock);
 814	drm_mm_remove_node(&header->node);
 815	spin_unlock_bh(&man->lock);
 816
 817	return ret;
 818}
 819
 820/**
 821 * vmw_cmdbuf_space_inline - Set up a command buffer header with
 822 * inline command buffer space.
 823 *
 824 * @man: The command buffer manager.
 825 * @header: Pointer to the header to set up.
 826 * @size: The requested size of the buffer space.
 827 */
 828static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
 829				   struct vmw_cmdbuf_header *header,
 830				   int size)
 831{
 832	struct vmw_cmdbuf_dheader *dheader;
 833	SVGACBHeader *cb_hdr;
 834
 835	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
 836		return -ENOMEM;
 837
 838	dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL,
 839				 &header->handle);
 840	if (!dheader)
 841		return -ENOMEM;
 842
 843	header->inline_space = true;
 844	header->size = VMW_CMDBUF_INLINE_SIZE;
 845	cb_hdr = &dheader->cb_header;
 846	header->cb_header = cb_hdr;
 847	header->cmd = dheader->cmd;
 848	memset(dheader, 0, sizeof(*dheader));
 849	cb_hdr->status = SVGA_CB_STATUS_NONE;
 850	cb_hdr->flags = SVGA_CB_FLAG_NONE;
 851	cb_hdr->ptr.pa = (u64)header->handle +
 852		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 853
 854	return 0;
 855}
 856
 857/**
 858 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
 859 * command buffer space.
 860 *
 861 * @man: The command buffer manager.
 862 * @size: The requested size of the buffer space.
 863 * @interruptible: Whether to sleep interruptible while waiting for space.
 864 * @p_header: points to a header pointer to populate on successful return.
 865 *
 866 * Returns a pointer to command buffer space if successful. Otherwise
 867 * returns an error pointer. The header pointer returned in @p_header should
 868 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
 869 */
 870void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 871		       size_t size, bool interruptible,
 872		       struct vmw_cmdbuf_header **p_header)
 873{
 874	struct vmw_cmdbuf_header *header;
 875	int ret = 0;
 876
 877	*p_header = NULL;
 878
 879	header = kzalloc(sizeof(*header), GFP_KERNEL);
 880	if (!header)
 881		return ERR_PTR(-ENOMEM);
 882
 883	if (size <= VMW_CMDBUF_INLINE_SIZE)
 884		ret = vmw_cmdbuf_space_inline(man, header, size);
 885	else
 886		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 887
 888	if (ret) {
 889		kfree(header);
 890		return ERR_PTR(ret);
 891	}
 892
 893	header->man = man;
 894	INIT_LIST_HEAD(&header->list);
 895	header->cb_header->status = SVGA_CB_STATUS_NONE;
 896	*p_header = header;
 897
 898	return header->cmd;
 899}
 900
 901/**
 902 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
 903 * command buffer.
 904 *
 905 * @man: The command buffer manager.
 906 * @size: The requested size of the commands.
 907 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 908 * @interruptible: Whether to sleep interruptible while waiting for space.
 909 *
 910 * Returns a pointer to command buffer space if successful. Otherwise
 911 * returns an error pointer.
 912 */
 913static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
 914				    size_t size,
 915				    int ctx_id,
 916				    bool interruptible)
 917{
 918	struct vmw_cmdbuf_header *cur;
 919	void *ret;
 920
 921	if (vmw_cmdbuf_cur_lock(man, interruptible))
 922		return ERR_PTR(-ERESTARTSYS);
 923
 924	cur = man->cur;
 925	if (cur && (size + man->cur_pos > cur->size ||
 926		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
 927		     ctx_id != cur->cb_header->dxContext)))
 928		__vmw_cmdbuf_cur_flush(man);
 929
 930	if (!man->cur) {
 931		ret = vmw_cmdbuf_alloc(man,
 932				       max_t(size_t, size, man->default_size),
 933				       interruptible, &man->cur);
 934		if (IS_ERR(ret)) {
 935			vmw_cmdbuf_cur_unlock(man);
 936			return ret;
 937		}
 938
 939		cur = man->cur;
 940	}
 941
 942	if (ctx_id != SVGA3D_INVALID_ID) {
 943		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
 944		cur->cb_header->dxContext = ctx_id;
 945	}
 946
 947	cur->reserved = size;
 948
 949	return (void *) (man->cur->cmd + man->cur_pos);
 950}
 951
 952/**
 953 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
 954 *
 955 * @man: The command buffer manager.
 956 * @size: The size of the commands actually written.
 957 * @flush: Whether to flush the command buffer immediately.
 958 */
 959static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
 960				  size_t size, bool flush)
 961{
 962	struct vmw_cmdbuf_header *cur = man->cur;
 963
 964	WARN_ON(!mutex_is_locked(&man->cur_mutex));
 965
 966	WARN_ON(size > cur->reserved);
 967	man->cur_pos += size;
 968	if (!size)
 969		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
 970	if (flush)
 971		__vmw_cmdbuf_cur_flush(man);
 972	vmw_cmdbuf_cur_unlock(man);
 973}
 974
 975/**
 976 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
 977 *
 978 * @man: The command buffer manager.
 979 * @size: The requested size of the commands.
 980 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 981 * @interruptible: Whether to sleep interruptible while waiting for space.
 982 * @header: Header of the command buffer. NULL if the current command buffer
 983 * should be used.
 984 *
 985 * Returns a pointer to command buffer space if successful. Otherwise
 986 * returns an error pointer.
 987 */
 988void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
 989			 int ctx_id, bool interruptible,
 990			 struct vmw_cmdbuf_header *header)
 991{
 992	if (!header)
 993		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
 994
 995	if (size > header->size)
 996		return ERR_PTR(-EINVAL);
 997
 998	if (ctx_id != SVGA3D_INVALID_ID) {
 999		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1000		header->cb_header->dxContext = ctx_id;
1001	}
1002
1003	header->reserved = size;
1004	return header->cmd;
1005}
1006
1007/**
1008 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1009 *
1010 * @man: The command buffer manager.
1011 * @size: The size of the commands actually written.
1012 * @header: Header of the command buffer. NULL if the current command buffer
1013 * should be used.
1014 * @flush: Whether to flush the command buffer immediately.
1015 */
1016void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1017		       struct vmw_cmdbuf_header *header, bool flush)
1018{
1019	if (!header) {
1020		vmw_cmdbuf_commit_cur(man, size, flush);
1021		return;
1022	}
1023
1024	(void) vmw_cmdbuf_cur_lock(man, false);
1025	__vmw_cmdbuf_cur_flush(man);
1026	WARN_ON(size > header->reserved);
1027	man->cur = header;
1028	man->cur_pos = size;
1029	if (!size)
1030		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1031	if (flush)
1032		__vmw_cmdbuf_cur_flush(man);
1033	vmw_cmdbuf_cur_unlock(man);
1034}
1035
1036/**
1037 * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
1038 *
1039 * @man: The command buffer manager.
1040 */
1041void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
1042{
1043	if (!man)
1044		return;
1045
1046	tasklet_schedule(&man->tasklet);
1047}
1048
1049/**
1050 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1051 *
1052 * @man: The command buffer manager.
1053 * @command: Pointer to the command to send.
1054 * @size: Size of the command.
1055 *
1056 * Synchronously sends a device context command.
1057 */
1058static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1059					  const void *command,
1060					  size_t size)
1061{
1062	struct vmw_cmdbuf_header *header;
1063	int status;
1064	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1065
1066	if (IS_ERR(cmd))
1067		return PTR_ERR(cmd);
1068
1069	memcpy(cmd, command, size);
1070	header->cb_header->length = size;
1071	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1072	spin_lock_bh(&man->lock);
1073	status = vmw_cmdbuf_header_submit(header);
1074	spin_unlock_bh(&man->lock);
1075	vmw_cmdbuf_header_free(header);
1076
1077	if (status != SVGA_CB_STATUS_COMPLETED) {
1078		DRM_ERROR("Device context command failed with status %d\n",
1079			  status);
1080		return -EINVAL;
1081	}
1082
1083	return 0;
1084}
1085
1086/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1088 * context.
1089 *
1090 * @man: The command buffer manager.
 
1091 * @enable: Whether to enable or disable the context.
1092 *
1093 * Synchronously sends a device start / stop context command.
1094 */
1095static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man,
1096				bool enable)
1097{
1098	struct {
1099		uint32 id;
1100		SVGADCCmdStartStop body;
1101	} __packed cmd;
1102
1103	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1104	cmd.body.enable = (enable) ? 1 : 0;
1105	cmd.body.context = SVGA_CB_CONTEXT_0;
1106
1107	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1108}
1109
1110/**
1111 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1112 *
1113 * @man: The command buffer manager.
1114 * @size: The size of the main space pool.
1115 * @default_size: The default size of the command buffer for small kernel
1116 * submissions.
1117 *
1118 * Set the size and allocate the main command buffer space pool,
1119 * as well as the default size of the command buffer for
1120 * small kernel submissions. If successful, this enables large command
1121 * submissions. Note that this function requires that rudimentary command
1122 * submission is already available and that the MOB memory manager is alive.
1123 * Returns 0 on success. Negative error code on failure.
1124 */
1125int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1126			     size_t size, size_t default_size)
1127{
1128	struct vmw_private *dev_priv = man->dev_priv;
1129	bool dummy;
1130	int ret;
1131
1132	if (man->has_pool)
1133		return -EINVAL;
1134
1135	/* First, try to allocate a huge chunk of DMA memory */
1136	size = PAGE_ALIGN(size);
1137	man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size,
1138				      &man->handle, GFP_KERNEL);
1139	if (man->map) {
1140		man->using_mob = false;
1141	} else {
1142		/*
1143		 * DMA memory failed. If we can have command buffers in a
1144		 * MOB, try to use that instead. Note that this will
1145		 * actually call into the already enabled manager, when
1146		 * binding the MOB.
1147		 */
1148		if (!(dev_priv->capabilities & SVGA_CAP_DX))
 
1149			return -ENOMEM;
1150
1151		ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
1152				    &vmw_mob_ne_placement, 0, false, NULL,
1153				    &man->cmd_space);
1154		if (ret)
1155			return ret;
1156
1157		man->using_mob = true;
1158		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1159				  &man->map_obj);
1160		if (ret)
1161			goto out_no_map;
1162
1163		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1164	}
1165
1166	man->size = size;
1167	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1168
1169	man->has_pool = true;
1170
1171	/*
1172	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1173	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1174	 * needs to wait for space and we block on further command
1175	 * submissions to be able to free up space.
1176	 */
1177	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1178	DRM_INFO("Using command buffers with %s pool.\n",
1179		 (man->using_mob) ? "MOB" : "DMA");
1180
1181	return 0;
1182
1183out_no_map:
1184	if (man->using_mob)
1185		ttm_bo_unref(&man->cmd_space);
 
 
1186
1187	return ret;
1188}
1189
1190/**
1191 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1192 * inline command buffer submissions only.
1193 *
1194 * @dev_priv: Pointer to device private structure.
1195 *
1196 * Returns a pointer to a cummand buffer manager to success or error pointer
1197 * on failure. The command buffer manager will be enabled for submissions of
1198 * size VMW_CMDBUF_INLINE_SIZE only.
1199 */
1200struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1201{
1202	struct vmw_cmdbuf_man *man;
1203	struct vmw_cmdbuf_context *ctx;
1204	int i;
1205	int ret;
1206
1207	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1208		return ERR_PTR(-ENOSYS);
1209
1210	man = kzalloc(sizeof(*man), GFP_KERNEL);
1211	if (!man)
1212		return ERR_PTR(-ENOMEM);
1213
 
 
1214	man->headers = dma_pool_create("vmwgfx cmdbuf",
1215				       &dev_priv->dev->pdev->dev,
1216				       sizeof(SVGACBHeader),
1217				       64, PAGE_SIZE);
1218	if (!man->headers) {
1219		ret = -ENOMEM;
1220		goto out_no_pool;
1221	}
1222
1223	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1224					&dev_priv->dev->pdev->dev,
1225					sizeof(struct vmw_cmdbuf_dheader),
1226					64, PAGE_SIZE);
1227	if (!man->dheaders) {
1228		ret = -ENOMEM;
1229		goto out_no_dpool;
1230	}
1231
1232	for_each_cmdbuf_ctx(man, i, ctx)
1233		vmw_cmdbuf_ctx_init(ctx);
1234
1235	INIT_LIST_HEAD(&man->error);
1236	spin_lock_init(&man->lock);
1237	mutex_init(&man->cur_mutex);
1238	mutex_init(&man->space_mutex);
1239	tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
1240		     (unsigned long) man);
1241	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1242	init_waitqueue_head(&man->alloc_queue);
1243	init_waitqueue_head(&man->idle_queue);
1244	man->dev_priv = dev_priv;
1245	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1246	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1247	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1248			       &dev_priv->error_waiters);
1249	ret = vmw_cmdbuf_startstop(man, true);
1250	if (ret) {
1251		DRM_ERROR("Failed starting command buffer context 0.\n");
1252		vmw_cmdbuf_man_destroy(man);
1253		return ERR_PTR(ret);
1254	}
1255
1256	return man;
1257
1258out_no_dpool:
1259	dma_pool_destroy(man->headers);
1260out_no_pool:
1261	kfree(man);
1262
1263	return ERR_PTR(ret);
1264}
1265
1266/**
1267 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1268 *
1269 * @man: Pointer to a command buffer manager.
1270 *
1271 * This function removes the main buffer space pool, and should be called
1272 * before MOB memory management is removed. When this function has been called,
1273 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1274 * less are allowed, and the default size of the command buffer for small kernel
1275 * submissions is also set to this size.
1276 */
1277void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1278{
1279	if (!man->has_pool)
1280		return;
1281
1282	man->has_pool = false;
1283	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1284	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1285	if (man->using_mob) {
1286		(void) ttm_bo_kunmap(&man->map_obj);
1287		ttm_bo_unref(&man->cmd_space);
 
1288	} else {
1289		dma_free_coherent(&man->dev_priv->dev->pdev->dev,
1290				  man->size, man->map, man->handle);
1291	}
1292}
1293
1294/**
1295 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1296 *
1297 * @man: Pointer to a command buffer manager.
1298 *
1299 * This function idles and then destroys a command buffer manager.
1300 */
1301void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1302{
1303	WARN_ON_ONCE(man->has_pool);
1304	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1305	if (vmw_cmdbuf_startstop(man, false))
1306		DRM_ERROR("Failed stopping command buffer context 0.\n");
 
1307
1308	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1309				  &man->dev_priv->error_waiters);
1310	tasklet_kill(&man->tasklet);
1311	(void) cancel_work_sync(&man->work);
1312	dma_pool_destroy(man->dheaders);
1313	dma_pool_destroy(man->headers);
1314	mutex_destroy(&man->cur_mutex);
1315	mutex_destroy(&man->space_mutex);
 
1316	kfree(man);
1317}
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
 
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <linux/dmapool.h>
  29#include <linux/pci.h>
  30
  31#include <drm/ttm/ttm_bo_api.h>
  32
  33#include "vmwgfx_drv.h"
 
  34
  35/*
  36 * Size of inline command buffers. Try to make sure that a page size is a
  37 * multiple of the DMA pool allocation size.
  38 */
  39#define VMW_CMDBUF_INLINE_ALIGN 64
  40#define VMW_CMDBUF_INLINE_SIZE \
  41	(1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
  42
  43/**
  44 * struct vmw_cmdbuf_context - Command buffer context queues
  45 *
  46 * @submitted: List of command buffers that have been submitted to the
  47 * manager but not yet submitted to hardware.
  48 * @hw_submitted: List of command buffers submitted to hardware.
  49 * @preempted: List of preempted command buffers.
  50 * @num_hw_submitted: Number of buffers currently being processed by hardware
  51 * @block_submission: Identifies a block command submission.
  52 */
  53struct vmw_cmdbuf_context {
  54	struct list_head submitted;
  55	struct list_head hw_submitted;
  56	struct list_head preempted;
  57	unsigned num_hw_submitted;
  58	bool block_submission;
  59};
  60
  61/**
  62 * struct vmw_cmdbuf_man - Command buffer manager
  63 *
  64 * @cur_mutex: Mutex protecting the command buffer used for incremental small
  65 * kernel command submissions, @cur.
  66 * @space_mutex: Mutex to protect against starvation when we allocate
  67 * main pool buffer space.
  68 * @error_mutex: Mutex to serialize the work queue error handling.
  69 * Note this is not needed if the same workqueue handler
  70 * can't race with itself...
  71 * @work: A struct work_struct implementeing command buffer error handling.
  72 * Immutable.
  73 * @dev_priv: Pointer to the device private struct. Immutable.
  74 * @ctx: Array of command buffer context queues. The queues and the context
  75 * data is protected by @lock.
  76 * @error: List of command buffers that have caused device errors.
  77 * Protected by @lock.
  78 * @mm: Range manager for the command buffer space. Manager allocations and
  79 * frees are protected by @lock.
  80 * @cmd_space: Buffer object for the command buffer space, unless we were
  81 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
  82 * @map_obj: Mapping state for @cmd_space. Immutable.
  83 * @map: Pointer to command buffer space. May be a mapped buffer object or
  84 * a contigous coherent DMA memory allocation. Immutable.
  85 * @cur: Command buffer for small kernel command submissions. Protected by
  86 * the @cur_mutex.
  87 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
  88 * @default_size: Default size for the @cur command buffer. Immutable.
  89 * @max_hw_submitted: Max number of in-flight command buffers the device can
  90 * handle. Immutable.
  91 * @lock: Spinlock protecting command submission queues.
  92 * @headers: Pool of DMA memory for device command buffer headers.
  93 * Internal protection.
  94 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
  95 * space for inline data. Internal protection.
 
  96 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
  97 * space.
  98 * @idle_queue: Wait queue for processes waiting for command buffer idle.
  99 * @irq_on: Whether the process function has requested irq to be turned on.
 100 * Protected by @lock.
 101 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
 102 * allocation. Immutable.
 103 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
 104 * Typically this is false only during bootstrap.
 105 * @handle: DMA address handle for the command buffer space if @using_mob is
 106 * false. Immutable.
 107 * @size: The size of the command buffer space. Immutable.
 108 * @num_contexts: Number of contexts actually enabled.
 109 */
 110struct vmw_cmdbuf_man {
 111	struct mutex cur_mutex;
 112	struct mutex space_mutex;
 113	struct mutex error_mutex;
 114	struct work_struct work;
 115	struct vmw_private *dev_priv;
 116	struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
 117	struct list_head error;
 118	struct drm_mm mm;
 119	struct ttm_buffer_object *cmd_space;
 120	struct ttm_bo_kmap_obj map_obj;
 121	u8 *map;
 122	struct vmw_cmdbuf_header *cur;
 123	size_t cur_pos;
 124	size_t default_size;
 125	unsigned max_hw_submitted;
 126	spinlock_t lock;
 127	struct dma_pool *headers;
 128	struct dma_pool *dheaders;
 
 129	wait_queue_head_t alloc_queue;
 130	wait_queue_head_t idle_queue;
 131	bool irq_on;
 132	bool using_mob;
 133	bool has_pool;
 134	dma_addr_t handle;
 135	size_t size;
 136	u32 num_contexts;
 137};
 138
 139/**
 140 * struct vmw_cmdbuf_header - Command buffer metadata
 141 *
 142 * @man: The command buffer manager.
 143 * @cb_header: Device command buffer header, allocated from a DMA pool.
 144 * @cb_context: The device command buffer context.
 145 * @list: List head for attaching to the manager lists.
 146 * @node: The range manager node.
 147 * @handle: The DMA address of @cb_header. Handed to the device on command
 148 * buffer submission.
 149 * @cmd: Pointer to the command buffer space of this buffer.
 150 * @size: Size of the command buffer space of this buffer.
 151 * @reserved: Reserved space of this buffer.
 152 * @inline_space: Whether inline command buffer space is used.
 153 */
 154struct vmw_cmdbuf_header {
 155	struct vmw_cmdbuf_man *man;
 156	SVGACBHeader *cb_header;
 157	SVGACBContext cb_context;
 158	struct list_head list;
 159	struct drm_mm_node node;
 160	dma_addr_t handle;
 161	u8 *cmd;
 162	size_t size;
 163	size_t reserved;
 164	bool inline_space;
 165};
 166
 167/**
 168 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
 169 * command buffer space.
 170 *
 171 * @cb_header: Device command buffer header.
 172 * @cmd: Inline command buffer space.
 173 */
 174struct vmw_cmdbuf_dheader {
 175	SVGACBHeader cb_header;
 176	u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
 177};
 178
 179/**
 180 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
 181 *
 182 * @page_size: Size of requested command buffer space in pages.
 183 * @node: Pointer to the range manager node.
 184 * @done: True if this allocation has succeeded.
 185 */
 186struct vmw_cmdbuf_alloc_info {
 187	size_t page_size;
 188	struct drm_mm_node *node;
 189	bool done;
 190};
 191
 192/* Loop over each context in the command buffer manager. */
 193#define for_each_cmdbuf_ctx(_man, _i, _ctx)				\
 194	for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
 195	     ++(_i), ++(_ctx))
 196
 197static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
 198				bool enable);
 199static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
 200
 201/**
 202 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
 203 *
 204 * @man: The range manager.
 205 * @interruptible: Whether to wait interruptible when locking.
 206 */
 207static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
 208{
 209	if (interruptible) {
 210		if (mutex_lock_interruptible(&man->cur_mutex))
 211			return -ERESTARTSYS;
 212	} else {
 213		mutex_lock(&man->cur_mutex);
 214	}
 215
 216	return 0;
 217}
 218
 219/**
 220 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
 221 *
 222 * @man: The range manager.
 223 */
 224static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
 225{
 226	mutex_unlock(&man->cur_mutex);
 227}
 228
 229/**
 230 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
 231 * been used for the device context with inline command buffers.
 232 * Need not be called locked.
 233 *
 234 * @header: Pointer to the header to free.
 235 */
 236static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
 237{
 238	struct vmw_cmdbuf_dheader *dheader;
 239
 240	if (WARN_ON_ONCE(!header->inline_space))
 241		return;
 242
 243	dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
 244			       cb_header);
 245	dma_pool_free(header->man->dheaders, dheader, header->handle);
 246	kfree(header);
 247}
 248
 249/**
 250 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 251 * associated structures.
 252 *
 253 * @header: Pointer to the header to free.
 254 *
 255 * For internal use. Must be called with man::lock held.
 256 */
 257static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 258{
 259	struct vmw_cmdbuf_man *man = header->man;
 260
 261	lockdep_assert_held_once(&man->lock);
 262
 263	if (header->inline_space) {
 264		vmw_cmdbuf_header_inline_free(header);
 265		return;
 266	}
 267
 268	drm_mm_remove_node(&header->node);
 269	wake_up_all(&man->alloc_queue);
 270	if (header->cb_header)
 271		dma_pool_free(man->headers, header->cb_header,
 272			      header->handle);
 273	kfree(header);
 274}
 275
 276/**
 277 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header  and its
 278 * associated structures.
 279 *
 280 * @header: Pointer to the header to free.
 281 */
 282void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
 283{
 284	struct vmw_cmdbuf_man *man = header->man;
 285
 286	/* Avoid locking if inline_space */
 287	if (header->inline_space) {
 288		vmw_cmdbuf_header_inline_free(header);
 289		return;
 290	}
 291	spin_lock(&man->lock);
 292	__vmw_cmdbuf_header_free(header);
 293	spin_unlock(&man->lock);
 294}
 295
 296
 297/**
 298 * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
 299 *
 300 * @header: The header of the buffer to submit.
 301 */
 302static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
 303{
 304	struct vmw_cmdbuf_man *man = header->man;
 305	u32 val;
 306
 307	val = upper_32_bits(header->handle);
 
 
 
 308	vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
 309
 310	val = lower_32_bits(header->handle);
 311	val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
 312	vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
 313
 314	return header->cb_header->status;
 315}
 316
 317/**
 318 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
 319 *
 320 * @ctx: The command buffer context to initialize
 321 */
 322static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
 323{
 324	INIT_LIST_HEAD(&ctx->hw_submitted);
 325	INIT_LIST_HEAD(&ctx->submitted);
 326	INIT_LIST_HEAD(&ctx->preempted);
 327	ctx->num_hw_submitted = 0;
 328}
 329
 330/**
 331 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
 332 * context.
 333 *
 334 * @man: The command buffer manager.
 335 * @ctx: The command buffer context.
 336 *
 337 * Submits command buffers to hardware until there are no more command
 338 * buffers to submit or the hardware can't handle more command buffers.
 339 */
 340static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
 341				  struct vmw_cmdbuf_context *ctx)
 342{
 343	while (ctx->num_hw_submitted < man->max_hw_submitted &&
 344	       !list_empty(&ctx->submitted) &&
 345	       !ctx->block_submission) {
 346		struct vmw_cmdbuf_header *entry;
 347		SVGACBStatus status;
 348
 349		entry = list_first_entry(&ctx->submitted,
 350					 struct vmw_cmdbuf_header,
 351					 list);
 352
 353		status = vmw_cmdbuf_header_submit(entry);
 354
 355		/* This should never happen */
 356		if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
 357			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 358			break;
 359		}
 360
 361		list_del(&entry->list);
 362		list_add_tail(&entry->list, &ctx->hw_submitted);
 363		ctx->num_hw_submitted++;
 364	}
 365
 366}
 367
 368/**
 369 * vmw_cmdbuf_ctx_process - Process a command buffer context.
 370 *
 371 * @man: The command buffer manager.
 372 * @ctx: The command buffer context.
 373 * @notempty: Pass back count of non-empty command submitted lists.
 374 *
 375 * Submit command buffers to hardware if possible, and process finished
 376 * buffers. Typically freeing them, but on preemption or error take
 377 * appropriate action. Wake up waiters if appropriate.
 378 */
 379static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
 380				   struct vmw_cmdbuf_context *ctx,
 381				   int *notempty)
 382{
 383	struct vmw_cmdbuf_header *entry, *next;
 384
 385	vmw_cmdbuf_ctx_submit(man, ctx);
 386
 387	list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
 388		SVGACBStatus status = entry->cb_header->status;
 389
 390		if (status == SVGA_CB_STATUS_NONE)
 391			break;
 392
 393		list_del(&entry->list);
 394		wake_up_all(&man->idle_queue);
 395		ctx->num_hw_submitted--;
 396		switch (status) {
 397		case SVGA_CB_STATUS_COMPLETED:
 398			__vmw_cmdbuf_header_free(entry);
 399			break;
 400		case SVGA_CB_STATUS_COMMAND_ERROR:
 401			WARN_ONCE(true, "Command buffer error.\n");
 402			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 403			list_add_tail(&entry->list, &man->error);
 404			schedule_work(&man->work);
 405			break;
 406		case SVGA_CB_STATUS_PREEMPTED:
 407			entry->cb_header->status = SVGA_CB_STATUS_NONE;
 408			list_add_tail(&entry->list, &ctx->preempted);
 409			break;
 410		case SVGA_CB_STATUS_CB_HEADER_ERROR:
 411			WARN_ONCE(true, "Command buffer header error.\n");
 412			__vmw_cmdbuf_header_free(entry);
 413			break;
 414		default:
 415			WARN_ONCE(true, "Undefined command buffer status.\n");
 416			__vmw_cmdbuf_header_free(entry);
 417			break;
 418		}
 419	}
 420
 421	vmw_cmdbuf_ctx_submit(man, ctx);
 422	if (!list_empty(&ctx->submitted))
 423		(*notempty)++;
 424}
 425
 426/**
 427 * vmw_cmdbuf_man_process - Process all command buffer contexts and
 428 * switch on and off irqs as appropriate.
 429 *
 430 * @man: The command buffer manager.
 431 *
 432 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
 433 * command buffers left that are not submitted to hardware, Make sure
 434 * IRQ handling is turned on. Otherwise, make sure it's turned off.
 435 */
 436static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
 437{
 438	int notempty;
 439	struct vmw_cmdbuf_context *ctx;
 440	int i;
 441
 442retry:
 443	notempty = 0;
 444	for_each_cmdbuf_ctx(man, i, ctx)
 445		vmw_cmdbuf_ctx_process(man, ctx, &notempty);
 446
 447	if (man->irq_on && !notempty) {
 448		vmw_generic_waiter_remove(man->dev_priv,
 449					  SVGA_IRQFLAG_COMMAND_BUFFER,
 450					  &man->dev_priv->cmdbuf_waiters);
 451		man->irq_on = false;
 452	} else if (!man->irq_on && notempty) {
 453		vmw_generic_waiter_add(man->dev_priv,
 454				       SVGA_IRQFLAG_COMMAND_BUFFER,
 455				       &man->dev_priv->cmdbuf_waiters);
 456		man->irq_on = true;
 457
 458		/* Rerun in case we just missed an irq. */
 459		goto retry;
 460	}
 461}
 462
 463/**
 464 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
 465 * command buffer context
 466 *
 467 * @man: The command buffer manager.
 468 * @header: The header of the buffer to submit.
 469 * @cb_context: The command buffer context to use.
 470 *
 471 * This function adds @header to the "submitted" queue of the command
 472 * buffer context identified by @cb_context. It then calls the command buffer
 473 * manager processing to potentially submit the buffer to hardware.
 474 * @man->lock needs to be held when calling this function.
 475 */
 476static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
 477			       struct vmw_cmdbuf_header *header,
 478			       SVGACBContext cb_context)
 479{
 480	if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
 481		header->cb_header->dxContext = 0;
 482	header->cb_context = cb_context;
 483	list_add_tail(&header->list, &man->ctx[cb_context].submitted);
 484
 485	vmw_cmdbuf_man_process(man);
 486}
 487
 488/**
 489 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
 490 * handler implemented as a threaded irq task.
 491 *
 492 * @man: Pointer to the command buffer manager.
 
 493 *
 494 * The bottom half of the interrupt handler simply calls into the
 495 * command buffer processor to free finished buffers and submit any
 496 * queued buffers to hardware.
 497 */
 498void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
 499{
 
 
 500	spin_lock(&man->lock);
 501	vmw_cmdbuf_man_process(man);
 502	spin_unlock(&man->lock);
 503}
 504
 505/**
 506 * vmw_cmdbuf_work_func - The deferred work function that handles
 507 * command buffer errors.
 508 *
 509 * @work: The work func closure argument.
 510 *
 511 * Restarting the command buffer context after an error requires process
 512 * context, so it is deferred to this work function.
 513 */
 514static void vmw_cmdbuf_work_func(struct work_struct *work)
 515{
 516	struct vmw_cmdbuf_man *man =
 517		container_of(work, struct vmw_cmdbuf_man, work);
 518	struct vmw_cmdbuf_header *entry, *next;
 519	uint32_t dummy = 0;
 520	bool send_fence = false;
 521	struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
 522	int i;
 523	struct vmw_cmdbuf_context *ctx;
 524	bool global_block = false;
 525
 526	for_each_cmdbuf_ctx(man, i, ctx)
 527		INIT_LIST_HEAD(&restart_head[i]);
 528
 529	mutex_lock(&man->error_mutex);
 530	spin_lock(&man->lock);
 531	list_for_each_entry_safe(entry, next, &man->error, list) {
 532		SVGACBHeader *cb_hdr = entry->cb_header;
 533		SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
 534			(entry->cmd + cb_hdr->errorOffset);
 535		u32 error_cmd_size, new_start_offset;
 536		const char *cmd_name;
 537
 538		list_del_init(&entry->list);
 539		global_block = true;
 540
 541		if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
 542			VMW_DEBUG_USER("Unknown command causing device error.\n");
 543			VMW_DEBUG_USER("Command buffer offset is %lu\n",
 544				       (unsigned long) cb_hdr->errorOffset);
 545			__vmw_cmdbuf_header_free(entry);
 546			send_fence = true;
 547			continue;
 548		}
 549
 550		VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
 551			       cmd_name);
 552		VMW_DEBUG_USER("Command buffer offset is %lu\n",
 553			       (unsigned long) cb_hdr->errorOffset);
 554		VMW_DEBUG_USER("Command size is %lu\n",
 555			       (unsigned long) error_cmd_size);
 556
 557		new_start_offset = cb_hdr->errorOffset + error_cmd_size;
 558
 559		if (new_start_offset >= cb_hdr->length) {
 560			__vmw_cmdbuf_header_free(entry);
 561			send_fence = true;
 562			continue;
 563		}
 564
 565		if (man->using_mob)
 566			cb_hdr->ptr.mob.mobOffset += new_start_offset;
 567		else
 568			cb_hdr->ptr.pa += (u64) new_start_offset;
 569
 570		entry->cmd += new_start_offset;
 571		cb_hdr->length -= new_start_offset;
 572		cb_hdr->errorOffset = 0;
 573		cb_hdr->offset = 0;
 574
 575		list_add_tail(&entry->list, &restart_head[entry->cb_context]);
 576	}
 
 577
 578	for_each_cmdbuf_ctx(man, i, ctx)
 579		man->ctx[i].block_submission = true;
 580
 581	spin_unlock(&man->lock);
 582
 583	/* Preempt all contexts */
 584	if (global_block && vmw_cmdbuf_preempt(man, 0))
 585		DRM_ERROR("Failed preempting command buffer contexts\n");
 586
 587	spin_lock(&man->lock);
 588	for_each_cmdbuf_ctx(man, i, ctx) {
 589		/* Move preempted command buffers to the preempted queue. */
 590		vmw_cmdbuf_ctx_process(man, ctx, &dummy);
 591
 592		/*
 593		 * Add the preempted queue after the command buffer
 594		 * that caused an error.
 595		 */
 596		list_splice_init(&ctx->preempted, restart_head[i].prev);
 597
 598		/*
 599		 * Finally add all command buffers first in the submitted
 600		 * queue, to rerun them.
 601		 */
 602
 603		ctx->block_submission = false;
 604		list_splice_init(&restart_head[i], &ctx->submitted);
 605	}
 606
 607	vmw_cmdbuf_man_process(man);
 608	spin_unlock(&man->lock);
 609
 610	if (global_block && vmw_cmdbuf_startstop(man, 0, true))
 611		DRM_ERROR("Failed restarting command buffer contexts\n");
 612
 613	/* Send a new fence in case one was removed */
 614	if (send_fence) {
 615		vmw_cmd_send_fence(man->dev_priv, &dummy);
 616		wake_up_all(&man->idle_queue);
 617	}
 618
 619	mutex_unlock(&man->error_mutex);
 620}
 621
 622/**
 623 * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
 624 *
 625 * @man: The command buffer manager.
 626 * @check_preempted: Check also the preempted queue for pending command buffers.
 627 *
 628 */
 629static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
 630				bool check_preempted)
 631{
 632	struct vmw_cmdbuf_context *ctx;
 633	bool idle = false;
 634	int i;
 635
 636	spin_lock(&man->lock);
 637	vmw_cmdbuf_man_process(man);
 638	for_each_cmdbuf_ctx(man, i, ctx) {
 639		if (!list_empty(&ctx->submitted) ||
 640		    !list_empty(&ctx->hw_submitted) ||
 641		    (check_preempted && !list_empty(&ctx->preempted)))
 642			goto out_unlock;
 643	}
 644
 645	idle = list_empty(&man->error);
 646
 647out_unlock:
 648	spin_unlock(&man->lock);
 649
 650	return idle;
 651}
 652
 653/**
 654 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 655 * command submissions
 656 *
 657 * @man: The command buffer manager.
 658 *
 659 * Flushes the current command buffer without allocating a new one. A new one
 660 * is automatically allocated when needed. Call with @man->cur_mutex held.
 661 */
 662static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
 663{
 664	struct vmw_cmdbuf_header *cur = man->cur;
 665
 666	lockdep_assert_held_once(&man->cur_mutex);
 667
 668	if (!cur)
 669		return;
 670
 671	spin_lock(&man->lock);
 672	if (man->cur_pos == 0) {
 673		__vmw_cmdbuf_header_free(cur);
 674		goto out_unlock;
 675	}
 676
 677	man->cur->cb_header->length = man->cur_pos;
 678	vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
 679out_unlock:
 680	spin_unlock(&man->lock);
 681	man->cur = NULL;
 682	man->cur_pos = 0;
 683}
 684
 685/**
 686 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
 687 * command submissions
 688 *
 689 * @man: The command buffer manager.
 690 * @interruptible: Whether to sleep interruptible when sleeping.
 691 *
 692 * Flushes the current command buffer without allocating a new one. A new one
 693 * is automatically allocated when needed.
 694 */
 695int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
 696			 bool interruptible)
 697{
 698	int ret = vmw_cmdbuf_cur_lock(man, interruptible);
 699
 700	if (ret)
 701		return ret;
 702
 703	__vmw_cmdbuf_cur_flush(man);
 704	vmw_cmdbuf_cur_unlock(man);
 705
 706	return 0;
 707}
 708
 709/**
 710 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
 711 *
 712 * @man: The command buffer manager.
 713 * @interruptible: Sleep interruptible while waiting.
 714 * @timeout: Time out after this many ticks.
 715 *
 716 * Wait until the command buffer manager has processed all command buffers,
 717 * or until a timeout occurs. If a timeout occurs, the function will return
 718 * -EBUSY.
 719 */
 720int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
 721		    unsigned long timeout)
 722{
 723	int ret;
 724
 725	ret = vmw_cmdbuf_cur_flush(man, interruptible);
 726	vmw_generic_waiter_add(man->dev_priv,
 727			       SVGA_IRQFLAG_COMMAND_BUFFER,
 728			       &man->dev_priv->cmdbuf_waiters);
 729
 730	if (interruptible) {
 731		ret = wait_event_interruptible_timeout
 732			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 733			 timeout);
 734	} else {
 735		ret = wait_event_timeout
 736			(man->idle_queue, vmw_cmdbuf_man_idle(man, true),
 737			 timeout);
 738	}
 739	vmw_generic_waiter_remove(man->dev_priv,
 740				  SVGA_IRQFLAG_COMMAND_BUFFER,
 741				  &man->dev_priv->cmdbuf_waiters);
 742	if (ret == 0) {
 743		if (!vmw_cmdbuf_man_idle(man, true))
 744			ret = -EBUSY;
 745		else
 746			ret = 0;
 747	}
 748	if (ret > 0)
 749		ret = 0;
 750
 751	return ret;
 752}
 753
 754/**
 755 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
 756 *
 757 * @man: The command buffer manager.
 758 * @info: Allocation info. Will hold the size on entry and allocated mm node
 759 * on successful return.
 760 *
 761 * Try to allocate buffer space from the main pool. Returns true if succeeded.
 762 * If a fatal error was hit, the error code is returned in @info->ret.
 763 */
 764static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
 765				 struct vmw_cmdbuf_alloc_info *info)
 766{
 767	int ret;
 768
 769	if (info->done)
 770		return true;
 771
 772	memset(info->node, 0, sizeof(*info->node));
 773	spin_lock(&man->lock);
 774	ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 
 
 
 775	if (ret) {
 776		vmw_cmdbuf_man_process(man);
 777		ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
 
 
 
 778	}
 779
 780	spin_unlock(&man->lock);
 781	info->done = !ret;
 782
 783	return info->done;
 784}
 785
 786/**
 787 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
 788 *
 789 * @man: The command buffer manager.
 790 * @node: Pointer to pre-allocated range-manager node.
 791 * @size: The size of the allocation.
 792 * @interruptible: Whether to sleep interruptible while waiting for space.
 793 *
 794 * This function allocates buffer space from the main pool, and if there is
 795 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
 796 * become available.
 797 */
 798static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
 799				  struct drm_mm_node *node,
 800				  size_t size,
 801				  bool interruptible)
 802{
 803	struct vmw_cmdbuf_alloc_info info;
 804
 805	info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
 806	info.node = node;
 807	info.done = false;
 808
 809	/*
 810	 * To prevent starvation of large requests, only one allocating call
 811	 * at a time waiting for space.
 812	 */
 813	if (interruptible) {
 814		if (mutex_lock_interruptible(&man->space_mutex))
 815			return -ERESTARTSYS;
 816	} else {
 817		mutex_lock(&man->space_mutex);
 818	}
 819
 820	/* Try to allocate space without waiting. */
 821	if (vmw_cmdbuf_try_alloc(man, &info))
 822		goto out_unlock;
 823
 824	vmw_generic_waiter_add(man->dev_priv,
 825			       SVGA_IRQFLAG_COMMAND_BUFFER,
 826			       &man->dev_priv->cmdbuf_waiters);
 827
 828	if (interruptible) {
 829		int ret;
 830
 831		ret = wait_event_interruptible
 832			(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 833		if (ret) {
 834			vmw_generic_waiter_remove
 835				(man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
 836				 &man->dev_priv->cmdbuf_waiters);
 837			mutex_unlock(&man->space_mutex);
 838			return ret;
 839		}
 840	} else {
 841		wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
 842	}
 843	vmw_generic_waiter_remove(man->dev_priv,
 844				  SVGA_IRQFLAG_COMMAND_BUFFER,
 845				  &man->dev_priv->cmdbuf_waiters);
 846
 847out_unlock:
 848	mutex_unlock(&man->space_mutex);
 849
 850	return 0;
 851}
 852
 853/**
 854 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
 855 * space from the main pool.
 856 *
 857 * @man: The command buffer manager.
 858 * @header: Pointer to the header to set up.
 859 * @size: The requested size of the buffer space.
 860 * @interruptible: Whether to sleep interruptible while waiting for space.
 861 */
 862static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
 863				 struct vmw_cmdbuf_header *header,
 864				 size_t size,
 865				 bool interruptible)
 866{
 867	SVGACBHeader *cb_hdr;
 868	size_t offset;
 869	int ret;
 870
 871	if (!man->has_pool)
 872		return -ENOMEM;
 873
 874	ret = vmw_cmdbuf_alloc_space(man, &header->node,  size, interruptible);
 875
 876	if (ret)
 877		return ret;
 878
 879	header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
 880					    &header->handle);
 881	if (!header->cb_header) {
 882		ret = -ENOMEM;
 883		goto out_no_cb_header;
 884	}
 885
 886	header->size = header->node.size << PAGE_SHIFT;
 887	cb_hdr = header->cb_header;
 888	offset = header->node.start << PAGE_SHIFT;
 889	header->cmd = man->map + offset;
 
 890	if (man->using_mob) {
 891		cb_hdr->flags = SVGA_CB_FLAG_MOB;
 892		cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
 893		cb_hdr->ptr.mob.mobOffset = offset;
 894	} else {
 895		cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
 896	}
 897
 898	return 0;
 899
 900out_no_cb_header:
 901	spin_lock(&man->lock);
 902	drm_mm_remove_node(&header->node);
 903	spin_unlock(&man->lock);
 904
 905	return ret;
 906}
 907
 908/**
 909 * vmw_cmdbuf_space_inline - Set up a command buffer header with
 910 * inline command buffer space.
 911 *
 912 * @man: The command buffer manager.
 913 * @header: Pointer to the header to set up.
 914 * @size: The requested size of the buffer space.
 915 */
 916static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
 917				   struct vmw_cmdbuf_header *header,
 918				   int size)
 919{
 920	struct vmw_cmdbuf_dheader *dheader;
 921	SVGACBHeader *cb_hdr;
 922
 923	if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
 924		return -ENOMEM;
 925
 926	dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
 927				  &header->handle);
 928	if (!dheader)
 929		return -ENOMEM;
 930
 931	header->inline_space = true;
 932	header->size = VMW_CMDBUF_INLINE_SIZE;
 933	cb_hdr = &dheader->cb_header;
 934	header->cb_header = cb_hdr;
 935	header->cmd = dheader->cmd;
 
 936	cb_hdr->status = SVGA_CB_STATUS_NONE;
 937	cb_hdr->flags = SVGA_CB_FLAG_NONE;
 938	cb_hdr->ptr.pa = (u64)header->handle +
 939		(u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
 940
 941	return 0;
 942}
 943
 944/**
 945 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
 946 * command buffer space.
 947 *
 948 * @man: The command buffer manager.
 949 * @size: The requested size of the buffer space.
 950 * @interruptible: Whether to sleep interruptible while waiting for space.
 951 * @p_header: points to a header pointer to populate on successful return.
 952 *
 953 * Returns a pointer to command buffer space if successful. Otherwise
 954 * returns an error pointer. The header pointer returned in @p_header should
 955 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
 956 */
 957void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
 958		       size_t size, bool interruptible,
 959		       struct vmw_cmdbuf_header **p_header)
 960{
 961	struct vmw_cmdbuf_header *header;
 962	int ret = 0;
 963
 964	*p_header = NULL;
 965
 966	header = kzalloc(sizeof(*header), GFP_KERNEL);
 967	if (!header)
 968		return ERR_PTR(-ENOMEM);
 969
 970	if (size <= VMW_CMDBUF_INLINE_SIZE)
 971		ret = vmw_cmdbuf_space_inline(man, header, size);
 972	else
 973		ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
 974
 975	if (ret) {
 976		kfree(header);
 977		return ERR_PTR(ret);
 978	}
 979
 980	header->man = man;
 981	INIT_LIST_HEAD(&header->list);
 982	header->cb_header->status = SVGA_CB_STATUS_NONE;
 983	*p_header = header;
 984
 985	return header->cmd;
 986}
 987
 988/**
 989 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
 990 * command buffer.
 991 *
 992 * @man: The command buffer manager.
 993 * @size: The requested size of the commands.
 994 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
 995 * @interruptible: Whether to sleep interruptible while waiting for space.
 996 *
 997 * Returns a pointer to command buffer space if successful. Otherwise
 998 * returns an error pointer.
 999 */
1000static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1001				    size_t size,
1002				    int ctx_id,
1003				    bool interruptible)
1004{
1005	struct vmw_cmdbuf_header *cur;
1006	void *ret;
1007
1008	if (vmw_cmdbuf_cur_lock(man, interruptible))
1009		return ERR_PTR(-ERESTARTSYS);
1010
1011	cur = man->cur;
1012	if (cur && (size + man->cur_pos > cur->size ||
1013		    ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1014		     ctx_id != cur->cb_header->dxContext)))
1015		__vmw_cmdbuf_cur_flush(man);
1016
1017	if (!man->cur) {
1018		ret = vmw_cmdbuf_alloc(man,
1019				       max_t(size_t, size, man->default_size),
1020				       interruptible, &man->cur);
1021		if (IS_ERR(ret)) {
1022			vmw_cmdbuf_cur_unlock(man);
1023			return ret;
1024		}
1025
1026		cur = man->cur;
1027	}
1028
1029	if (ctx_id != SVGA3D_INVALID_ID) {
1030		cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1031		cur->cb_header->dxContext = ctx_id;
1032	}
1033
1034	cur->reserved = size;
1035
1036	return (void *) (man->cur->cmd + man->cur_pos);
1037}
1038
1039/**
1040 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1041 *
1042 * @man: The command buffer manager.
1043 * @size: The size of the commands actually written.
1044 * @flush: Whether to flush the command buffer immediately.
1045 */
1046static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1047				  size_t size, bool flush)
1048{
1049	struct vmw_cmdbuf_header *cur = man->cur;
1050
1051	lockdep_assert_held_once(&man->cur_mutex);
1052
1053	WARN_ON(size > cur->reserved);
1054	man->cur_pos += size;
1055	if (!size)
1056		cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1057	if (flush)
1058		__vmw_cmdbuf_cur_flush(man);
1059	vmw_cmdbuf_cur_unlock(man);
1060}
1061
1062/**
1063 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1064 *
1065 * @man: The command buffer manager.
1066 * @size: The requested size of the commands.
1067 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1068 * @interruptible: Whether to sleep interruptible while waiting for space.
1069 * @header: Header of the command buffer. NULL if the current command buffer
1070 * should be used.
1071 *
1072 * Returns a pointer to command buffer space if successful. Otherwise
1073 * returns an error pointer.
1074 */
1075void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1076			 int ctx_id, bool interruptible,
1077			 struct vmw_cmdbuf_header *header)
1078{
1079	if (!header)
1080		return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1081
1082	if (size > header->size)
1083		return ERR_PTR(-EINVAL);
1084
1085	if (ctx_id != SVGA3D_INVALID_ID) {
1086		header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1087		header->cb_header->dxContext = ctx_id;
1088	}
1089
1090	header->reserved = size;
1091	return header->cmd;
1092}
1093
1094/**
1095 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1096 *
1097 * @man: The command buffer manager.
1098 * @size: The size of the commands actually written.
1099 * @header: Header of the command buffer. NULL if the current command buffer
1100 * should be used.
1101 * @flush: Whether to flush the command buffer immediately.
1102 */
1103void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1104		       struct vmw_cmdbuf_header *header, bool flush)
1105{
1106	if (!header) {
1107		vmw_cmdbuf_commit_cur(man, size, flush);
1108		return;
1109	}
1110
1111	(void) vmw_cmdbuf_cur_lock(man, false);
1112	__vmw_cmdbuf_cur_flush(man);
1113	WARN_ON(size > header->reserved);
1114	man->cur = header;
1115	man->cur_pos = size;
1116	if (!size)
1117		header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1118	if (flush)
1119		__vmw_cmdbuf_cur_flush(man);
1120	vmw_cmdbuf_cur_unlock(man);
1121}
1122
 
 
 
 
 
 
 
 
 
 
 
 
1123
1124/**
1125 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1126 *
1127 * @man: The command buffer manager.
1128 * @command: Pointer to the command to send.
1129 * @size: Size of the command.
1130 *
1131 * Synchronously sends a device context command.
1132 */
1133static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1134					  const void *command,
1135					  size_t size)
1136{
1137	struct vmw_cmdbuf_header *header;
1138	int status;
1139	void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1140
1141	if (IS_ERR(cmd))
1142		return PTR_ERR(cmd);
1143
1144	memcpy(cmd, command, size);
1145	header->cb_header->length = size;
1146	header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1147	spin_lock(&man->lock);
1148	status = vmw_cmdbuf_header_submit(header);
1149	spin_unlock(&man->lock);
1150	vmw_cmdbuf_header_free(header);
1151
1152	if (status != SVGA_CB_STATUS_COMPLETED) {
1153		DRM_ERROR("Device context command failed with status %d\n",
1154			  status);
1155		return -EINVAL;
1156	}
1157
1158	return 0;
1159}
1160
1161/**
1162 * vmw_cmdbuf_preempt - Send a preempt command through the device
1163 * context.
1164 *
1165 * @man: The command buffer manager.
1166 * @context: Device context to pass command through.
1167 *
1168 * Synchronously sends a preempt command.
1169 */
1170static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1171{
1172	struct {
1173		uint32 id;
1174		SVGADCCmdPreempt body;
1175	} __packed cmd;
1176
1177	cmd.id = SVGA_DC_CMD_PREEMPT;
1178	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1179	cmd.body.ignoreIDZero = 0;
1180
1181	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1182}
1183
1184
1185/**
1186 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1187 * context.
1188 *
1189 * @man: The command buffer manager.
1190 * @context: Device context to start/stop.
1191 * @enable: Whether to enable or disable the context.
1192 *
1193 * Synchronously sends a device start / stop context command.
1194 */
1195static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1196				bool enable)
1197{
1198	struct {
1199		uint32 id;
1200		SVGADCCmdStartStop body;
1201	} __packed cmd;
1202
1203	cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1204	cmd.body.enable = (enable) ? 1 : 0;
1205	cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1206
1207	return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1208}
1209
1210/**
1211 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1212 *
1213 * @man: The command buffer manager.
1214 * @size: The size of the main space pool.
 
 
1215 *
1216 * Set the size and allocate the main command buffer space pool.
1217 * If successful, this enables large command submissions.
1218 * Note that this function requires that rudimentary command
 
1219 * submission is already available and that the MOB memory manager is alive.
1220 * Returns 0 on success. Negative error code on failure.
1221 */
1222int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
 
1223{
1224	struct vmw_private *dev_priv = man->dev_priv;
1225	bool dummy;
1226	int ret;
1227
1228	if (man->has_pool)
1229		return -EINVAL;
1230
1231	/* First, try to allocate a huge chunk of DMA memory */
1232	size = PAGE_ALIGN(size);
1233	man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1234				      &man->handle, GFP_KERNEL);
1235	if (man->map) {
1236		man->using_mob = false;
1237	} else {
1238		/*
1239		 * DMA memory failed. If we can have command buffers in a
1240		 * MOB, try to use that instead. Note that this will
1241		 * actually call into the already enabled manager, when
1242		 * binding the MOB.
1243		 */
1244		if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1245		    !dev_priv->has_mob)
1246			return -ENOMEM;
1247
1248		ret = vmw_bo_create_kernel(dev_priv, size,
1249					   &vmw_mob_placement,
1250					   &man->cmd_space);
1251		if (ret)
1252			return ret;
1253
1254		man->using_mob = true;
1255		ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1256				  &man->map_obj);
1257		if (ret)
1258			goto out_no_map;
1259
1260		man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1261	}
1262
1263	man->size = size;
1264	drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1265
1266	man->has_pool = true;
1267
1268	/*
1269	 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1270	 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1271	 * needs to wait for space and we block on further command
1272	 * submissions to be able to free up space.
1273	 */
1274	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1275	DRM_INFO("Using command buffers with %s pool.\n",
1276		 (man->using_mob) ? "MOB" : "DMA");
1277
1278	return 0;
1279
1280out_no_map:
1281	if (man->using_mob) {
1282		ttm_bo_put(man->cmd_space);
1283		man->cmd_space = NULL;
1284	}
1285
1286	return ret;
1287}
1288
1289/**
1290 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291 * inline command buffer submissions only.
1292 *
1293 * @dev_priv: Pointer to device private structure.
1294 *
1295 * Returns a pointer to a cummand buffer manager to success or error pointer
1296 * on failure. The command buffer manager will be enabled for submissions of
1297 * size VMW_CMDBUF_INLINE_SIZE only.
1298 */
1299struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300{
1301	struct vmw_cmdbuf_man *man;
1302	struct vmw_cmdbuf_context *ctx;
1303	unsigned int i;
1304	int ret;
1305
1306	if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307		return ERR_PTR(-ENOSYS);
1308
1309	man = kzalloc(sizeof(*man), GFP_KERNEL);
1310	if (!man)
1311		return ERR_PTR(-ENOMEM);
1312
1313	man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314		2 : 1;
1315	man->headers = dma_pool_create("vmwgfx cmdbuf",
1316				       dev_priv->drm.dev,
1317				       sizeof(SVGACBHeader),
1318				       64, PAGE_SIZE);
1319	if (!man->headers) {
1320		ret = -ENOMEM;
1321		goto out_no_pool;
1322	}
1323
1324	man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325					dev_priv->drm.dev,
1326					sizeof(struct vmw_cmdbuf_dheader),
1327					64, PAGE_SIZE);
1328	if (!man->dheaders) {
1329		ret = -ENOMEM;
1330		goto out_no_dpool;
1331	}
1332
1333	for_each_cmdbuf_ctx(man, i, ctx)
1334		vmw_cmdbuf_ctx_init(ctx);
1335
1336	INIT_LIST_HEAD(&man->error);
1337	spin_lock_init(&man->lock);
1338	mutex_init(&man->cur_mutex);
1339	mutex_init(&man->space_mutex);
1340	mutex_init(&man->error_mutex);
 
1341	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342	init_waitqueue_head(&man->alloc_queue);
1343	init_waitqueue_head(&man->idle_queue);
1344	man->dev_priv = dev_priv;
1345	man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346	INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347	vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348			       &dev_priv->error_waiters);
1349	ret = vmw_cmdbuf_startstop(man, 0, true);
1350	if (ret) {
1351		DRM_ERROR("Failed starting command buffer contexts\n");
1352		vmw_cmdbuf_man_destroy(man);
1353		return ERR_PTR(ret);
1354	}
1355
1356	return man;
1357
1358out_no_dpool:
1359	dma_pool_destroy(man->headers);
1360out_no_pool:
1361	kfree(man);
1362
1363	return ERR_PTR(ret);
1364}
1365
1366/**
1367 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368 *
1369 * @man: Pointer to a command buffer manager.
1370 *
1371 * This function removes the main buffer space pool, and should be called
1372 * before MOB memory management is removed. When this function has been called,
1373 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374 * less are allowed, and the default size of the command buffer for small kernel
1375 * submissions is also set to this size.
1376 */
1377void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378{
1379	if (!man->has_pool)
1380		return;
1381
1382	man->has_pool = false;
1383	man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385	if (man->using_mob) {
1386		(void) ttm_bo_kunmap(&man->map_obj);
1387		ttm_bo_put(man->cmd_space);
1388		man->cmd_space = NULL;
1389	} else {
1390		dma_free_coherent(man->dev_priv->drm.dev,
1391				  man->size, man->map, man->handle);
1392	}
1393}
1394
1395/**
1396 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397 *
1398 * @man: Pointer to a command buffer manager.
1399 *
1400 * This function idles and then destroys a command buffer manager.
1401 */
1402void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403{
1404	WARN_ON_ONCE(man->has_pool);
1405	(void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407	if (vmw_cmdbuf_startstop(man, 0, false))
1408		DRM_ERROR("Failed stopping command buffer contexts.\n");
1409
1410	vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411				  &man->dev_priv->error_waiters);
 
1412	(void) cancel_work_sync(&man->work);
1413	dma_pool_destroy(man->dheaders);
1414	dma_pool_destroy(man->headers);
1415	mutex_destroy(&man->cur_mutex);
1416	mutex_destroy(&man->space_mutex);
1417	mutex_destroy(&man->error_mutex);
1418	kfree(man);
1419}