Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_bo.h"
29#include "vmwgfx_drv.h"
30
31#include <drm/ttm/ttm_bo.h>
32
33#include <linux/dmapool.h>
34#include <linux/pci.h>
35
36/*
37 * Size of inline command buffers. Try to make sure that a page size is a
38 * multiple of the DMA pool allocation size.
39 */
40#define VMW_CMDBUF_INLINE_ALIGN 64
41#define VMW_CMDBUF_INLINE_SIZE \
42 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
43
44/**
45 * struct vmw_cmdbuf_context - Command buffer context queues
46 *
47 * @submitted: List of command buffers that have been submitted to the
48 * manager but not yet submitted to hardware.
49 * @hw_submitted: List of command buffers submitted to hardware.
50 * @preempted: List of preempted command buffers.
51 * @num_hw_submitted: Number of buffers currently being processed by hardware
52 * @block_submission: Identifies a block command submission.
53 */
54struct vmw_cmdbuf_context {
55 struct list_head submitted;
56 struct list_head hw_submitted;
57 struct list_head preempted;
58 unsigned num_hw_submitted;
59 bool block_submission;
60};
61
62/**
63 * struct vmw_cmdbuf_man - Command buffer manager
64 *
65 * @cur_mutex: Mutex protecting the command buffer used for incremental small
66 * kernel command submissions, @cur.
67 * @space_mutex: Mutex to protect against starvation when we allocate
68 * main pool buffer space.
69 * @error_mutex: Mutex to serialize the work queue error handling.
70 * Note this is not needed if the same workqueue handler
71 * can't race with itself...
72 * @work: A struct work_struct implementeing command buffer error handling.
73 * Immutable.
74 * @dev_priv: Pointer to the device private struct. Immutable.
75 * @ctx: Array of command buffer context queues. The queues and the context
76 * data is protected by @lock.
77 * @error: List of command buffers that have caused device errors.
78 * Protected by @lock.
79 * @mm: Range manager for the command buffer space. Manager allocations and
80 * frees are protected by @lock.
81 * @cmd_space: Buffer object for the command buffer space, unless we were
82 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
83 * @map: Pointer to command buffer space. May be a mapped buffer object or
84 * a contigous coherent DMA memory allocation. Immutable.
85 * @cur: Command buffer for small kernel command submissions. Protected by
86 * the @cur_mutex.
87 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
88 * @default_size: Default size for the @cur command buffer. Immutable.
89 * @max_hw_submitted: Max number of in-flight command buffers the device can
90 * handle. Immutable.
91 * @lock: Spinlock protecting command submission queues.
92 * @headers: Pool of DMA memory for device command buffer headers.
93 * Internal protection.
94 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
95 * space for inline data. Internal protection.
96 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
97 * space.
98 * @idle_queue: Wait queue for processes waiting for command buffer idle.
99 * @irq_on: Whether the process function has requested irq to be turned on.
100 * Protected by @lock.
101 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
102 * allocation. Immutable.
103 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
104 * Typically this is false only during bootstrap.
105 * @handle: DMA address handle for the command buffer space if @using_mob is
106 * false. Immutable.
107 * @size: The size of the command buffer space. Immutable.
108 * @num_contexts: Number of contexts actually enabled.
109 */
110struct vmw_cmdbuf_man {
111 struct mutex cur_mutex;
112 struct mutex space_mutex;
113 struct mutex error_mutex;
114 struct work_struct work;
115 struct vmw_private *dev_priv;
116 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
117 struct list_head error;
118 struct drm_mm mm;
119 struct vmw_bo *cmd_space;
120 u8 *map;
121 struct vmw_cmdbuf_header *cur;
122 size_t cur_pos;
123 size_t default_size;
124 unsigned max_hw_submitted;
125 spinlock_t lock;
126 struct dma_pool *headers;
127 struct dma_pool *dheaders;
128 wait_queue_head_t alloc_queue;
129 wait_queue_head_t idle_queue;
130 bool irq_on;
131 bool using_mob;
132 bool has_pool;
133 dma_addr_t handle;
134 size_t size;
135 u32 num_contexts;
136};
137
138/**
139 * struct vmw_cmdbuf_header - Command buffer metadata
140 *
141 * @man: The command buffer manager.
142 * @cb_header: Device command buffer header, allocated from a DMA pool.
143 * @cb_context: The device command buffer context.
144 * @list: List head for attaching to the manager lists.
145 * @node: The range manager node.
146 * @handle: The DMA address of @cb_header. Handed to the device on command
147 * buffer submission.
148 * @cmd: Pointer to the command buffer space of this buffer.
149 * @size: Size of the command buffer space of this buffer.
150 * @reserved: Reserved space of this buffer.
151 * @inline_space: Whether inline command buffer space is used.
152 */
153struct vmw_cmdbuf_header {
154 struct vmw_cmdbuf_man *man;
155 SVGACBHeader *cb_header;
156 SVGACBContext cb_context;
157 struct list_head list;
158 struct drm_mm_node node;
159 dma_addr_t handle;
160 u8 *cmd;
161 size_t size;
162 size_t reserved;
163 bool inline_space;
164};
165
166/**
167 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
168 * command buffer space.
169 *
170 * @cb_header: Device command buffer header.
171 * @cmd: Inline command buffer space.
172 */
173struct vmw_cmdbuf_dheader {
174 SVGACBHeader cb_header;
175 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
176};
177
178/**
179 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
180 *
181 * @page_size: Size of requested command buffer space in pages.
182 * @node: Pointer to the range manager node.
183 * @done: True if this allocation has succeeded.
184 */
185struct vmw_cmdbuf_alloc_info {
186 size_t page_size;
187 struct drm_mm_node *node;
188 bool done;
189};
190
191/* Loop over each context in the command buffer manager. */
192#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
193 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
194 ++(_i), ++(_ctx))
195
196static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
197 bool enable);
198static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
199
200/**
201 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
202 *
203 * @man: The range manager.
204 * @interruptible: Whether to wait interruptible when locking.
205 */
206static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
207{
208 if (interruptible) {
209 if (mutex_lock_interruptible(&man->cur_mutex))
210 return -ERESTARTSYS;
211 } else {
212 mutex_lock(&man->cur_mutex);
213 }
214
215 return 0;
216}
217
218/**
219 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
220 *
221 * @man: The range manager.
222 */
223static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
224{
225 mutex_unlock(&man->cur_mutex);
226}
227
228/**
229 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
230 * been used for the device context with inline command buffers.
231 * Need not be called locked.
232 *
233 * @header: Pointer to the header to free.
234 */
235static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
236{
237 struct vmw_cmdbuf_dheader *dheader;
238
239 if (WARN_ON_ONCE(!header->inline_space))
240 return;
241
242 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
243 cb_header);
244 dma_pool_free(header->man->dheaders, dheader, header->handle);
245 kfree(header);
246}
247
248/**
249 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
250 * associated structures.
251 *
252 * @header: Pointer to the header to free.
253 *
254 * For internal use. Must be called with man::lock held.
255 */
256static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
257{
258 struct vmw_cmdbuf_man *man = header->man;
259
260 lockdep_assert_held_once(&man->lock);
261
262 if (header->inline_space) {
263 vmw_cmdbuf_header_inline_free(header);
264 return;
265 }
266
267 drm_mm_remove_node(&header->node);
268 wake_up_all(&man->alloc_queue);
269 if (header->cb_header)
270 dma_pool_free(man->headers, header->cb_header,
271 header->handle);
272 kfree(header);
273}
274
275/**
276 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
277 * associated structures.
278 *
279 * @header: Pointer to the header to free.
280 */
281void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
282{
283 struct vmw_cmdbuf_man *man = header->man;
284
285 /* Avoid locking if inline_space */
286 if (header->inline_space) {
287 vmw_cmdbuf_header_inline_free(header);
288 return;
289 }
290 spin_lock(&man->lock);
291 __vmw_cmdbuf_header_free(header);
292 spin_unlock(&man->lock);
293}
294
295
296/**
297 * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
298 *
299 * @header: The header of the buffer to submit.
300 */
301static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
302{
303 struct vmw_cmdbuf_man *man = header->man;
304 u32 val;
305
306 val = upper_32_bits(header->handle);
307 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
308
309 val = lower_32_bits(header->handle);
310 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
311 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
312
313 return header->cb_header->status;
314}
315
316/**
317 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
318 *
319 * @ctx: The command buffer context to initialize
320 */
321static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
322{
323 INIT_LIST_HEAD(&ctx->hw_submitted);
324 INIT_LIST_HEAD(&ctx->submitted);
325 INIT_LIST_HEAD(&ctx->preempted);
326 ctx->num_hw_submitted = 0;
327}
328
329/**
330 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
331 * context.
332 *
333 * @man: The command buffer manager.
334 * @ctx: The command buffer context.
335 *
336 * Submits command buffers to hardware until there are no more command
337 * buffers to submit or the hardware can't handle more command buffers.
338 */
339static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
340 struct vmw_cmdbuf_context *ctx)
341{
342 while (ctx->num_hw_submitted < man->max_hw_submitted &&
343 !list_empty(&ctx->submitted) &&
344 !ctx->block_submission) {
345 struct vmw_cmdbuf_header *entry;
346 SVGACBStatus status;
347
348 entry = list_first_entry(&ctx->submitted,
349 struct vmw_cmdbuf_header,
350 list);
351
352 status = vmw_cmdbuf_header_submit(entry);
353
354 /* This should never happen */
355 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
356 entry->cb_header->status = SVGA_CB_STATUS_NONE;
357 break;
358 }
359
360 list_move_tail(&entry->list, &ctx->hw_submitted);
361 ctx->num_hw_submitted++;
362 }
363
364}
365
366/**
367 * vmw_cmdbuf_ctx_process - Process a command buffer context.
368 *
369 * @man: The command buffer manager.
370 * @ctx: The command buffer context.
371 * @notempty: Pass back count of non-empty command submitted lists.
372 *
373 * Submit command buffers to hardware if possible, and process finished
374 * buffers. Typically freeing them, but on preemption or error take
375 * appropriate action. Wake up waiters if appropriate.
376 */
377static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
378 struct vmw_cmdbuf_context *ctx,
379 int *notempty)
380{
381 struct vmw_cmdbuf_header *entry, *next;
382
383 vmw_cmdbuf_ctx_submit(man, ctx);
384
385 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
386 SVGACBStatus status = entry->cb_header->status;
387
388 if (status == SVGA_CB_STATUS_NONE)
389 break;
390
391 list_del(&entry->list);
392 wake_up_all(&man->idle_queue);
393 ctx->num_hw_submitted--;
394 switch (status) {
395 case SVGA_CB_STATUS_COMPLETED:
396 __vmw_cmdbuf_header_free(entry);
397 break;
398 case SVGA_CB_STATUS_COMMAND_ERROR:
399 WARN_ONCE(true, "Command buffer error.\n");
400 entry->cb_header->status = SVGA_CB_STATUS_NONE;
401 list_add_tail(&entry->list, &man->error);
402 schedule_work(&man->work);
403 break;
404 case SVGA_CB_STATUS_PREEMPTED:
405 entry->cb_header->status = SVGA_CB_STATUS_NONE;
406 list_add_tail(&entry->list, &ctx->preempted);
407 break;
408 case SVGA_CB_STATUS_CB_HEADER_ERROR:
409 WARN_ONCE(true, "Command buffer header error.\n");
410 __vmw_cmdbuf_header_free(entry);
411 break;
412 default:
413 WARN_ONCE(true, "Undefined command buffer status.\n");
414 __vmw_cmdbuf_header_free(entry);
415 break;
416 }
417 }
418
419 vmw_cmdbuf_ctx_submit(man, ctx);
420 if (!list_empty(&ctx->submitted))
421 (*notempty)++;
422}
423
424/**
425 * vmw_cmdbuf_man_process - Process all command buffer contexts and
426 * switch on and off irqs as appropriate.
427 *
428 * @man: The command buffer manager.
429 *
430 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
431 * command buffers left that are not submitted to hardware, Make sure
432 * IRQ handling is turned on. Otherwise, make sure it's turned off.
433 */
434static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
435{
436 int notempty;
437 struct vmw_cmdbuf_context *ctx;
438 int i;
439
440retry:
441 notempty = 0;
442 for_each_cmdbuf_ctx(man, i, ctx)
443 vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
444
445 if (man->irq_on && !notempty) {
446 vmw_generic_waiter_remove(man->dev_priv,
447 SVGA_IRQFLAG_COMMAND_BUFFER,
448 &man->dev_priv->cmdbuf_waiters);
449 man->irq_on = false;
450 } else if (!man->irq_on && notempty) {
451 vmw_generic_waiter_add(man->dev_priv,
452 SVGA_IRQFLAG_COMMAND_BUFFER,
453 &man->dev_priv->cmdbuf_waiters);
454 man->irq_on = true;
455
456 /* Rerun in case we just missed an irq. */
457 goto retry;
458 }
459}
460
461/**
462 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
463 * command buffer context
464 *
465 * @man: The command buffer manager.
466 * @header: The header of the buffer to submit.
467 * @cb_context: The command buffer context to use.
468 *
469 * This function adds @header to the "submitted" queue of the command
470 * buffer context identified by @cb_context. It then calls the command buffer
471 * manager processing to potentially submit the buffer to hardware.
472 * @man->lock needs to be held when calling this function.
473 */
474static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
475 struct vmw_cmdbuf_header *header,
476 SVGACBContext cb_context)
477{
478 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
479 header->cb_header->dxContext = 0;
480 header->cb_context = cb_context;
481 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
482
483 vmw_cmdbuf_man_process(man);
484}
485
486/**
487 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
488 * handler implemented as a threaded irq task.
489 *
490 * @man: Pointer to the command buffer manager.
491 *
492 * The bottom half of the interrupt handler simply calls into the
493 * command buffer processor to free finished buffers and submit any
494 * queued buffers to hardware.
495 */
496void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
497{
498 spin_lock(&man->lock);
499 vmw_cmdbuf_man_process(man);
500 spin_unlock(&man->lock);
501}
502
503/**
504 * vmw_cmdbuf_work_func - The deferred work function that handles
505 * command buffer errors.
506 *
507 * @work: The work func closure argument.
508 *
509 * Restarting the command buffer context after an error requires process
510 * context, so it is deferred to this work function.
511 */
512static void vmw_cmdbuf_work_func(struct work_struct *work)
513{
514 struct vmw_cmdbuf_man *man =
515 container_of(work, struct vmw_cmdbuf_man, work);
516 struct vmw_cmdbuf_header *entry, *next;
517 uint32_t dummy = 0;
518 bool send_fence = false;
519 struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
520 int i;
521 struct vmw_cmdbuf_context *ctx;
522 bool global_block = false;
523
524 for_each_cmdbuf_ctx(man, i, ctx)
525 INIT_LIST_HEAD(&restart_head[i]);
526
527 mutex_lock(&man->error_mutex);
528 spin_lock(&man->lock);
529 list_for_each_entry_safe(entry, next, &man->error, list) {
530 SVGACBHeader *cb_hdr = entry->cb_header;
531 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
532 (entry->cmd + cb_hdr->errorOffset);
533 u32 error_cmd_size, new_start_offset;
534 const char *cmd_name;
535
536 list_del_init(&entry->list);
537 global_block = true;
538
539 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
540 VMW_DEBUG_USER("Unknown command causing device error.\n");
541 VMW_DEBUG_USER("Command buffer offset is %lu\n",
542 (unsigned long) cb_hdr->errorOffset);
543 __vmw_cmdbuf_header_free(entry);
544 send_fence = true;
545 continue;
546 }
547
548 VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
549 cmd_name);
550 VMW_DEBUG_USER("Command buffer offset is %lu\n",
551 (unsigned long) cb_hdr->errorOffset);
552 VMW_DEBUG_USER("Command size is %lu\n",
553 (unsigned long) error_cmd_size);
554
555 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
556
557 if (new_start_offset >= cb_hdr->length) {
558 __vmw_cmdbuf_header_free(entry);
559 send_fence = true;
560 continue;
561 }
562
563 if (man->using_mob)
564 cb_hdr->ptr.mob.mobOffset += new_start_offset;
565 else
566 cb_hdr->ptr.pa += (u64) new_start_offset;
567
568 entry->cmd += new_start_offset;
569 cb_hdr->length -= new_start_offset;
570 cb_hdr->errorOffset = 0;
571 cb_hdr->offset = 0;
572
573 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
574 }
575
576 for_each_cmdbuf_ctx(man, i, ctx)
577 man->ctx[i].block_submission = true;
578
579 spin_unlock(&man->lock);
580
581 /* Preempt all contexts */
582 if (global_block && vmw_cmdbuf_preempt(man, 0))
583 DRM_ERROR("Failed preempting command buffer contexts\n");
584
585 spin_lock(&man->lock);
586 for_each_cmdbuf_ctx(man, i, ctx) {
587 /* Move preempted command buffers to the preempted queue. */
588 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
589
590 /*
591 * Add the preempted queue after the command buffer
592 * that caused an error.
593 */
594 list_splice_init(&ctx->preempted, restart_head[i].prev);
595
596 /*
597 * Finally add all command buffers first in the submitted
598 * queue, to rerun them.
599 */
600
601 ctx->block_submission = false;
602 list_splice_init(&restart_head[i], &ctx->submitted);
603 }
604
605 vmw_cmdbuf_man_process(man);
606 spin_unlock(&man->lock);
607
608 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
609 DRM_ERROR("Failed restarting command buffer contexts\n");
610
611 /* Send a new fence in case one was removed */
612 if (send_fence) {
613 vmw_cmd_send_fence(man->dev_priv, &dummy);
614 wake_up_all(&man->idle_queue);
615 }
616
617 mutex_unlock(&man->error_mutex);
618}
619
620/**
621 * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
622 *
623 * @man: The command buffer manager.
624 * @check_preempted: Check also the preempted queue for pending command buffers.
625 *
626 */
627static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
628 bool check_preempted)
629{
630 struct vmw_cmdbuf_context *ctx;
631 bool idle = false;
632 int i;
633
634 spin_lock(&man->lock);
635 vmw_cmdbuf_man_process(man);
636 for_each_cmdbuf_ctx(man, i, ctx) {
637 if (!list_empty(&ctx->submitted) ||
638 !list_empty(&ctx->hw_submitted) ||
639 (check_preempted && !list_empty(&ctx->preempted)))
640 goto out_unlock;
641 }
642
643 idle = list_empty(&man->error);
644
645out_unlock:
646 spin_unlock(&man->lock);
647
648 return idle;
649}
650
651/**
652 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
653 * command submissions
654 *
655 * @man: The command buffer manager.
656 *
657 * Flushes the current command buffer without allocating a new one. A new one
658 * is automatically allocated when needed. Call with @man->cur_mutex held.
659 */
660static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
661{
662 struct vmw_cmdbuf_header *cur = man->cur;
663
664 lockdep_assert_held_once(&man->cur_mutex);
665
666 if (!cur)
667 return;
668
669 spin_lock(&man->lock);
670 if (man->cur_pos == 0) {
671 __vmw_cmdbuf_header_free(cur);
672 goto out_unlock;
673 }
674
675 man->cur->cb_header->length = man->cur_pos;
676 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
677out_unlock:
678 spin_unlock(&man->lock);
679 man->cur = NULL;
680 man->cur_pos = 0;
681}
682
683/**
684 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
685 * command submissions
686 *
687 * @man: The command buffer manager.
688 * @interruptible: Whether to sleep interruptible when sleeping.
689 *
690 * Flushes the current command buffer without allocating a new one. A new one
691 * is automatically allocated when needed.
692 */
693int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
694 bool interruptible)
695{
696 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
697
698 if (ret)
699 return ret;
700
701 __vmw_cmdbuf_cur_flush(man);
702 vmw_cmdbuf_cur_unlock(man);
703
704 return 0;
705}
706
707/**
708 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
709 *
710 * @man: The command buffer manager.
711 * @interruptible: Sleep interruptible while waiting.
712 * @timeout: Time out after this many ticks.
713 *
714 * Wait until the command buffer manager has processed all command buffers,
715 * or until a timeout occurs. If a timeout occurs, the function will return
716 * -EBUSY.
717 */
718int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
719 unsigned long timeout)
720{
721 int ret;
722
723 ret = vmw_cmdbuf_cur_flush(man, interruptible);
724 vmw_generic_waiter_add(man->dev_priv,
725 SVGA_IRQFLAG_COMMAND_BUFFER,
726 &man->dev_priv->cmdbuf_waiters);
727
728 if (interruptible) {
729 ret = wait_event_interruptible_timeout
730 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
731 timeout);
732 } else {
733 ret = wait_event_timeout
734 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
735 timeout);
736 }
737 vmw_generic_waiter_remove(man->dev_priv,
738 SVGA_IRQFLAG_COMMAND_BUFFER,
739 &man->dev_priv->cmdbuf_waiters);
740 if (ret == 0) {
741 if (!vmw_cmdbuf_man_idle(man, true))
742 ret = -EBUSY;
743 else
744 ret = 0;
745 }
746 if (ret > 0)
747 ret = 0;
748
749 return ret;
750}
751
752/**
753 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
754 *
755 * @man: The command buffer manager.
756 * @info: Allocation info. Will hold the size on entry and allocated mm node
757 * on successful return.
758 *
759 * Try to allocate buffer space from the main pool. Returns true if succeeded.
760 * If a fatal error was hit, the error code is returned in @info->ret.
761 */
762static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
763 struct vmw_cmdbuf_alloc_info *info)
764{
765 int ret;
766
767 if (info->done)
768 return true;
769
770 memset(info->node, 0, sizeof(*info->node));
771 spin_lock(&man->lock);
772 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
773 if (ret) {
774 vmw_cmdbuf_man_process(man);
775 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
776 }
777
778 spin_unlock(&man->lock);
779 info->done = !ret;
780
781 return info->done;
782}
783
784/**
785 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
786 *
787 * @man: The command buffer manager.
788 * @node: Pointer to pre-allocated range-manager node.
789 * @size: The size of the allocation.
790 * @interruptible: Whether to sleep interruptible while waiting for space.
791 *
792 * This function allocates buffer space from the main pool, and if there is
793 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
794 * become available.
795 */
796static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
797 struct drm_mm_node *node,
798 size_t size,
799 bool interruptible)
800{
801 struct vmw_cmdbuf_alloc_info info;
802
803 info.page_size = PFN_UP(size);
804 info.node = node;
805 info.done = false;
806
807 /*
808 * To prevent starvation of large requests, only one allocating call
809 * at a time waiting for space.
810 */
811 if (interruptible) {
812 if (mutex_lock_interruptible(&man->space_mutex))
813 return -ERESTARTSYS;
814 } else {
815 mutex_lock(&man->space_mutex);
816 }
817
818 /* Try to allocate space without waiting. */
819 if (vmw_cmdbuf_try_alloc(man, &info))
820 goto out_unlock;
821
822 vmw_generic_waiter_add(man->dev_priv,
823 SVGA_IRQFLAG_COMMAND_BUFFER,
824 &man->dev_priv->cmdbuf_waiters);
825
826 if (interruptible) {
827 int ret;
828
829 ret = wait_event_interruptible
830 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
831 if (ret) {
832 vmw_generic_waiter_remove
833 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
834 &man->dev_priv->cmdbuf_waiters);
835 mutex_unlock(&man->space_mutex);
836 return ret;
837 }
838 } else {
839 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
840 }
841 vmw_generic_waiter_remove(man->dev_priv,
842 SVGA_IRQFLAG_COMMAND_BUFFER,
843 &man->dev_priv->cmdbuf_waiters);
844
845out_unlock:
846 mutex_unlock(&man->space_mutex);
847
848 return 0;
849}
850
851/**
852 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
853 * space from the main pool.
854 *
855 * @man: The command buffer manager.
856 * @header: Pointer to the header to set up.
857 * @size: The requested size of the buffer space.
858 * @interruptible: Whether to sleep interruptible while waiting for space.
859 */
860static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
861 struct vmw_cmdbuf_header *header,
862 size_t size,
863 bool interruptible)
864{
865 SVGACBHeader *cb_hdr;
866 size_t offset;
867 int ret;
868
869 if (!man->has_pool)
870 return -ENOMEM;
871
872 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
873
874 if (ret)
875 return ret;
876
877 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
878 &header->handle);
879 if (!header->cb_header) {
880 ret = -ENOMEM;
881 goto out_no_cb_header;
882 }
883
884 header->size = header->node.size << PAGE_SHIFT;
885 cb_hdr = header->cb_header;
886 offset = header->node.start << PAGE_SHIFT;
887 header->cmd = man->map + offset;
888 if (man->using_mob) {
889 cb_hdr->flags = SVGA_CB_FLAG_MOB;
890 cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
891 cb_hdr->ptr.mob.mobOffset = offset;
892 } else {
893 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
894 }
895
896 return 0;
897
898out_no_cb_header:
899 spin_lock(&man->lock);
900 drm_mm_remove_node(&header->node);
901 spin_unlock(&man->lock);
902
903 return ret;
904}
905
906/**
907 * vmw_cmdbuf_space_inline - Set up a command buffer header with
908 * inline command buffer space.
909 *
910 * @man: The command buffer manager.
911 * @header: Pointer to the header to set up.
912 * @size: The requested size of the buffer space.
913 */
914static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
915 struct vmw_cmdbuf_header *header,
916 int size)
917{
918 struct vmw_cmdbuf_dheader *dheader;
919 SVGACBHeader *cb_hdr;
920
921 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
922 return -ENOMEM;
923
924 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
925 &header->handle);
926 if (!dheader)
927 return -ENOMEM;
928
929 header->inline_space = true;
930 header->size = VMW_CMDBUF_INLINE_SIZE;
931 cb_hdr = &dheader->cb_header;
932 header->cb_header = cb_hdr;
933 header->cmd = dheader->cmd;
934 cb_hdr->status = SVGA_CB_STATUS_NONE;
935 cb_hdr->flags = SVGA_CB_FLAG_NONE;
936 cb_hdr->ptr.pa = (u64)header->handle +
937 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
938
939 return 0;
940}
941
942/**
943 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
944 * command buffer space.
945 *
946 * @man: The command buffer manager.
947 * @size: The requested size of the buffer space.
948 * @interruptible: Whether to sleep interruptible while waiting for space.
949 * @p_header: points to a header pointer to populate on successful return.
950 *
951 * Returns a pointer to command buffer space if successful. Otherwise
952 * returns an error pointer. The header pointer returned in @p_header should
953 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
954 */
955void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
956 size_t size, bool interruptible,
957 struct vmw_cmdbuf_header **p_header)
958{
959 struct vmw_cmdbuf_header *header;
960 int ret = 0;
961
962 *p_header = NULL;
963
964 header = kzalloc(sizeof(*header), GFP_KERNEL);
965 if (!header)
966 return ERR_PTR(-ENOMEM);
967
968 if (size <= VMW_CMDBUF_INLINE_SIZE)
969 ret = vmw_cmdbuf_space_inline(man, header, size);
970 else
971 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
972
973 if (ret) {
974 kfree(header);
975 return ERR_PTR(ret);
976 }
977
978 header->man = man;
979 INIT_LIST_HEAD(&header->list);
980 header->cb_header->status = SVGA_CB_STATUS_NONE;
981 *p_header = header;
982
983 return header->cmd;
984}
985
986/**
987 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
988 * command buffer.
989 *
990 * @man: The command buffer manager.
991 * @size: The requested size of the commands.
992 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
993 * @interruptible: Whether to sleep interruptible while waiting for space.
994 *
995 * Returns a pointer to command buffer space if successful. Otherwise
996 * returns an error pointer.
997 */
998static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
999 size_t size,
1000 int ctx_id,
1001 bool interruptible)
1002{
1003 struct vmw_cmdbuf_header *cur;
1004 void *ret;
1005
1006 if (vmw_cmdbuf_cur_lock(man, interruptible))
1007 return ERR_PTR(-ERESTARTSYS);
1008
1009 cur = man->cur;
1010 if (cur && (size + man->cur_pos > cur->size ||
1011 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1012 ctx_id != cur->cb_header->dxContext)))
1013 __vmw_cmdbuf_cur_flush(man);
1014
1015 if (!man->cur) {
1016 ret = vmw_cmdbuf_alloc(man,
1017 max_t(size_t, size, man->default_size),
1018 interruptible, &man->cur);
1019 if (IS_ERR(ret)) {
1020 vmw_cmdbuf_cur_unlock(man);
1021 return ret;
1022 }
1023
1024 cur = man->cur;
1025 }
1026
1027 if (ctx_id != SVGA3D_INVALID_ID) {
1028 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1029 cur->cb_header->dxContext = ctx_id;
1030 }
1031
1032 cur->reserved = size;
1033
1034 return (void *) (man->cur->cmd + man->cur_pos);
1035}
1036
1037/**
1038 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1039 *
1040 * @man: The command buffer manager.
1041 * @size: The size of the commands actually written.
1042 * @flush: Whether to flush the command buffer immediately.
1043 */
1044static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1045 size_t size, bool flush)
1046{
1047 struct vmw_cmdbuf_header *cur = man->cur;
1048
1049 lockdep_assert_held_once(&man->cur_mutex);
1050
1051 WARN_ON(size > cur->reserved);
1052 man->cur_pos += size;
1053 if (!size)
1054 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1055 if (flush)
1056 __vmw_cmdbuf_cur_flush(man);
1057 vmw_cmdbuf_cur_unlock(man);
1058}
1059
1060/**
1061 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1062 *
1063 * @man: The command buffer manager.
1064 * @size: The requested size of the commands.
1065 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1066 * @interruptible: Whether to sleep interruptible while waiting for space.
1067 * @header: Header of the command buffer. NULL if the current command buffer
1068 * should be used.
1069 *
1070 * Returns a pointer to command buffer space if successful. Otherwise
1071 * returns an error pointer.
1072 */
1073void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1074 int ctx_id, bool interruptible,
1075 struct vmw_cmdbuf_header *header)
1076{
1077 if (!header)
1078 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1079
1080 if (size > header->size)
1081 return ERR_PTR(-EINVAL);
1082
1083 if (ctx_id != SVGA3D_INVALID_ID) {
1084 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1085 header->cb_header->dxContext = ctx_id;
1086 }
1087
1088 header->reserved = size;
1089 return header->cmd;
1090}
1091
1092/**
1093 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1094 *
1095 * @man: The command buffer manager.
1096 * @size: The size of the commands actually written.
1097 * @header: Header of the command buffer. NULL if the current command buffer
1098 * should be used.
1099 * @flush: Whether to flush the command buffer immediately.
1100 */
1101void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1102 struct vmw_cmdbuf_header *header, bool flush)
1103{
1104 if (!header) {
1105 vmw_cmdbuf_commit_cur(man, size, flush);
1106 return;
1107 }
1108
1109 (void) vmw_cmdbuf_cur_lock(man, false);
1110 __vmw_cmdbuf_cur_flush(man);
1111 WARN_ON(size > header->reserved);
1112 man->cur = header;
1113 man->cur_pos = size;
1114 if (!size)
1115 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1116 if (flush)
1117 __vmw_cmdbuf_cur_flush(man);
1118 vmw_cmdbuf_cur_unlock(man);
1119}
1120
1121
1122/**
1123 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1124 *
1125 * @man: The command buffer manager.
1126 * @command: Pointer to the command to send.
1127 * @size: Size of the command.
1128 *
1129 * Synchronously sends a device context command.
1130 */
1131static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1132 const void *command,
1133 size_t size)
1134{
1135 struct vmw_cmdbuf_header *header;
1136 int status;
1137 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1138
1139 if (IS_ERR(cmd))
1140 return PTR_ERR(cmd);
1141
1142 memcpy(cmd, command, size);
1143 header->cb_header->length = size;
1144 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1145 spin_lock(&man->lock);
1146 status = vmw_cmdbuf_header_submit(header);
1147 spin_unlock(&man->lock);
1148 vmw_cmdbuf_header_free(header);
1149
1150 if (status != SVGA_CB_STATUS_COMPLETED) {
1151 DRM_ERROR("Device context command failed with status %d\n",
1152 status);
1153 return -EINVAL;
1154 }
1155
1156 return 0;
1157}
1158
1159/**
1160 * vmw_cmdbuf_preempt - Send a preempt command through the device
1161 * context.
1162 *
1163 * @man: The command buffer manager.
1164 * @context: Device context to pass command through.
1165 *
1166 * Synchronously sends a preempt command.
1167 */
1168static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1169{
1170 struct {
1171 uint32 id;
1172 SVGADCCmdPreempt body;
1173 } __packed cmd;
1174
1175 cmd.id = SVGA_DC_CMD_PREEMPT;
1176 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1177 cmd.body.ignoreIDZero = 0;
1178
1179 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1180}
1181
1182
1183/**
1184 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1185 * context.
1186 *
1187 * @man: The command buffer manager.
1188 * @context: Device context to start/stop.
1189 * @enable: Whether to enable or disable the context.
1190 *
1191 * Synchronously sends a device start / stop context command.
1192 */
1193static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1194 bool enable)
1195{
1196 struct {
1197 uint32 id;
1198 SVGADCCmdStartStop body;
1199 } __packed cmd;
1200
1201 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1202 cmd.body.enable = (enable) ? 1 : 0;
1203 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1204
1205 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1206}
1207
1208/**
1209 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1210 *
1211 * @man: The command buffer manager.
1212 * @size: The size of the main space pool.
1213 *
1214 * Set the size and allocate the main command buffer space pool.
1215 * If successful, this enables large command submissions.
1216 * Note that this function requires that rudimentary command
1217 * submission is already available and that the MOB memory manager is alive.
1218 * Returns 0 on success. Negative error code on failure.
1219 */
1220int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1221{
1222 struct vmw_private *dev_priv = man->dev_priv;
1223 int ret;
1224
1225 if (man->has_pool)
1226 return -EINVAL;
1227
1228 /* First, try to allocate a huge chunk of DMA memory */
1229 size = PAGE_ALIGN(size);
1230 man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1231 &man->handle, GFP_KERNEL);
1232 if (man->map) {
1233 man->using_mob = false;
1234 } else {
1235 struct vmw_bo_params bo_params = {
1236 .domain = VMW_BO_DOMAIN_MOB,
1237 .busy_domain = VMW_BO_DOMAIN_MOB,
1238 .bo_type = ttm_bo_type_kernel,
1239 .size = size,
1240 .pin = true
1241 };
1242 /*
1243 * DMA memory failed. If we can have command buffers in a
1244 * MOB, try to use that instead. Note that this will
1245 * actually call into the already enabled manager, when
1246 * binding the MOB.
1247 */
1248 if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1249 !dev_priv->has_mob)
1250 return -ENOMEM;
1251
1252 ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
1253 if (ret)
1254 return ret;
1255
1256 man->map = vmw_bo_map_and_cache(man->cmd_space);
1257 man->using_mob = man->map;
1258 }
1259
1260 man->size = size;
1261 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1262
1263 man->has_pool = true;
1264
1265 /*
1266 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1267 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1268 * needs to wait for space and we block on further command
1269 * submissions to be able to free up space.
1270 */
1271 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1272 drm_info(&dev_priv->drm,
1273 "Using command buffers with %s pool.\n",
1274 (man->using_mob) ? "MOB" : "DMA");
1275
1276 return 0;
1277}
1278
1279/**
1280 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1281 * inline command buffer submissions only.
1282 *
1283 * @dev_priv: Pointer to device private structure.
1284 *
1285 * Returns a pointer to a cummand buffer manager to success or error pointer
1286 * on failure. The command buffer manager will be enabled for submissions of
1287 * size VMW_CMDBUF_INLINE_SIZE only.
1288 */
1289struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1290{
1291 struct vmw_cmdbuf_man *man;
1292 struct vmw_cmdbuf_context *ctx;
1293 unsigned int i;
1294 int ret;
1295
1296 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1297 return ERR_PTR(-ENOSYS);
1298
1299 man = kzalloc(sizeof(*man), GFP_KERNEL);
1300 if (!man)
1301 return ERR_PTR(-ENOMEM);
1302
1303 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1304 2 : 1;
1305 man->headers = dma_pool_create("vmwgfx cmdbuf",
1306 dev_priv->drm.dev,
1307 sizeof(SVGACBHeader),
1308 64, PAGE_SIZE);
1309 if (!man->headers) {
1310 ret = -ENOMEM;
1311 goto out_no_pool;
1312 }
1313
1314 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1315 dev_priv->drm.dev,
1316 sizeof(struct vmw_cmdbuf_dheader),
1317 64, PAGE_SIZE);
1318 if (!man->dheaders) {
1319 ret = -ENOMEM;
1320 goto out_no_dpool;
1321 }
1322
1323 for_each_cmdbuf_ctx(man, i, ctx)
1324 vmw_cmdbuf_ctx_init(ctx);
1325
1326 INIT_LIST_HEAD(&man->error);
1327 spin_lock_init(&man->lock);
1328 mutex_init(&man->cur_mutex);
1329 mutex_init(&man->space_mutex);
1330 mutex_init(&man->error_mutex);
1331 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1332 init_waitqueue_head(&man->alloc_queue);
1333 init_waitqueue_head(&man->idle_queue);
1334 man->dev_priv = dev_priv;
1335 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1336 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1337 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1338 &dev_priv->error_waiters);
1339 ret = vmw_cmdbuf_startstop(man, 0, true);
1340 if (ret) {
1341 DRM_ERROR("Failed starting command buffer contexts\n");
1342 vmw_cmdbuf_man_destroy(man);
1343 return ERR_PTR(ret);
1344 }
1345
1346 return man;
1347
1348out_no_dpool:
1349 dma_pool_destroy(man->headers);
1350out_no_pool:
1351 kfree(man);
1352
1353 return ERR_PTR(ret);
1354}
1355
1356/**
1357 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1358 *
1359 * @man: Pointer to a command buffer manager.
1360 *
1361 * This function removes the main buffer space pool, and should be called
1362 * before MOB memory management is removed. When this function has been called,
1363 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1364 * less are allowed, and the default size of the command buffer for small kernel
1365 * submissions is also set to this size.
1366 */
1367void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1368{
1369 if (!man->has_pool)
1370 return;
1371
1372 man->has_pool = false;
1373 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1374 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1375 if (man->using_mob)
1376 vmw_bo_unreference(&man->cmd_space);
1377 else
1378 dma_free_coherent(man->dev_priv->drm.dev,
1379 man->size, man->map, man->handle);
1380}
1381
1382/**
1383 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1384 *
1385 * @man: Pointer to a command buffer manager.
1386 *
1387 * This function idles and then destroys a command buffer manager.
1388 */
1389void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1390{
1391 WARN_ON_ONCE(man->has_pool);
1392 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1393
1394 if (vmw_cmdbuf_startstop(man, 0, false))
1395 DRM_ERROR("Failed stopping command buffer contexts.\n");
1396
1397 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1398 &man->dev_priv->error_waiters);
1399 (void) cancel_work_sync(&man->work);
1400 dma_pool_destroy(man->dheaders);
1401 dma_pool_destroy(man->headers);
1402 mutex_destroy(&man->cur_mutex);
1403 mutex_destroy(&man->space_mutex);
1404 mutex_destroy(&man->error_mutex);
1405 kfree(man);
1406}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/dmapool.h>
29#include <linux/pci.h>
30
31#include <drm/ttm/ttm_bo_api.h>
32
33#include "vmwgfx_drv.h"
34
35/*
36 * Size of inline command buffers. Try to make sure that a page size is a
37 * multiple of the DMA pool allocation size.
38 */
39#define VMW_CMDBUF_INLINE_ALIGN 64
40#define VMW_CMDBUF_INLINE_SIZE \
41 (1024 - ALIGN(sizeof(SVGACBHeader), VMW_CMDBUF_INLINE_ALIGN))
42
43/**
44 * struct vmw_cmdbuf_context - Command buffer context queues
45 *
46 * @submitted: List of command buffers that have been submitted to the
47 * manager but not yet submitted to hardware.
48 * @hw_submitted: List of command buffers submitted to hardware.
49 * @preempted: List of preempted command buffers.
50 * @num_hw_submitted: Number of buffers currently being processed by hardware
51 * @block_submission: Identifies a block command submission.
52 */
53struct vmw_cmdbuf_context {
54 struct list_head submitted;
55 struct list_head hw_submitted;
56 struct list_head preempted;
57 unsigned num_hw_submitted;
58 bool block_submission;
59};
60
61/**
62 * struct vmw_cmdbuf_man - Command buffer manager
63 *
64 * @cur_mutex: Mutex protecting the command buffer used for incremental small
65 * kernel command submissions, @cur.
66 * @space_mutex: Mutex to protect against starvation when we allocate
67 * main pool buffer space.
68 * @error_mutex: Mutex to serialize the work queue error handling.
69 * Note this is not needed if the same workqueue handler
70 * can't race with itself...
71 * @work: A struct work_struct implementeing command buffer error handling.
72 * Immutable.
73 * @dev_priv: Pointer to the device private struct. Immutable.
74 * @ctx: Array of command buffer context queues. The queues and the context
75 * data is protected by @lock.
76 * @error: List of command buffers that have caused device errors.
77 * Protected by @lock.
78 * @mm: Range manager for the command buffer space. Manager allocations and
79 * frees are protected by @lock.
80 * @cmd_space: Buffer object for the command buffer space, unless we were
81 * able to make a contigous coherent DMA memory allocation, @handle. Immutable.
82 * @map_obj: Mapping state for @cmd_space. Immutable.
83 * @map: Pointer to command buffer space. May be a mapped buffer object or
84 * a contigous coherent DMA memory allocation. Immutable.
85 * @cur: Command buffer for small kernel command submissions. Protected by
86 * the @cur_mutex.
87 * @cur_pos: Space already used in @cur. Protected by @cur_mutex.
88 * @default_size: Default size for the @cur command buffer. Immutable.
89 * @max_hw_submitted: Max number of in-flight command buffers the device can
90 * handle. Immutable.
91 * @lock: Spinlock protecting command submission queues.
92 * @headers: Pool of DMA memory for device command buffer headers.
93 * Internal protection.
94 * @dheaders: Pool of DMA memory for device command buffer headers with trailing
95 * space for inline data. Internal protection.
96 * @alloc_queue: Wait queue for processes waiting to allocate command buffer
97 * space.
98 * @idle_queue: Wait queue for processes waiting for command buffer idle.
99 * @irq_on: Whether the process function has requested irq to be turned on.
100 * Protected by @lock.
101 * @using_mob: Whether the command buffer space is a MOB or a contigous DMA
102 * allocation. Immutable.
103 * @has_pool: Has a large pool of DMA memory which allows larger allocations.
104 * Typically this is false only during bootstrap.
105 * @handle: DMA address handle for the command buffer space if @using_mob is
106 * false. Immutable.
107 * @size: The size of the command buffer space. Immutable.
108 * @num_contexts: Number of contexts actually enabled.
109 */
110struct vmw_cmdbuf_man {
111 struct mutex cur_mutex;
112 struct mutex space_mutex;
113 struct mutex error_mutex;
114 struct work_struct work;
115 struct vmw_private *dev_priv;
116 struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
117 struct list_head error;
118 struct drm_mm mm;
119 struct ttm_buffer_object *cmd_space;
120 struct ttm_bo_kmap_obj map_obj;
121 u8 *map;
122 struct vmw_cmdbuf_header *cur;
123 size_t cur_pos;
124 size_t default_size;
125 unsigned max_hw_submitted;
126 spinlock_t lock;
127 struct dma_pool *headers;
128 struct dma_pool *dheaders;
129 wait_queue_head_t alloc_queue;
130 wait_queue_head_t idle_queue;
131 bool irq_on;
132 bool using_mob;
133 bool has_pool;
134 dma_addr_t handle;
135 size_t size;
136 u32 num_contexts;
137};
138
139/**
140 * struct vmw_cmdbuf_header - Command buffer metadata
141 *
142 * @man: The command buffer manager.
143 * @cb_header: Device command buffer header, allocated from a DMA pool.
144 * @cb_context: The device command buffer context.
145 * @list: List head for attaching to the manager lists.
146 * @node: The range manager node.
147 * @handle: The DMA address of @cb_header. Handed to the device on command
148 * buffer submission.
149 * @cmd: Pointer to the command buffer space of this buffer.
150 * @size: Size of the command buffer space of this buffer.
151 * @reserved: Reserved space of this buffer.
152 * @inline_space: Whether inline command buffer space is used.
153 */
154struct vmw_cmdbuf_header {
155 struct vmw_cmdbuf_man *man;
156 SVGACBHeader *cb_header;
157 SVGACBContext cb_context;
158 struct list_head list;
159 struct drm_mm_node node;
160 dma_addr_t handle;
161 u8 *cmd;
162 size_t size;
163 size_t reserved;
164 bool inline_space;
165};
166
167/**
168 * struct vmw_cmdbuf_dheader - Device command buffer header with inline
169 * command buffer space.
170 *
171 * @cb_header: Device command buffer header.
172 * @cmd: Inline command buffer space.
173 */
174struct vmw_cmdbuf_dheader {
175 SVGACBHeader cb_header;
176 u8 cmd[VMW_CMDBUF_INLINE_SIZE] __aligned(VMW_CMDBUF_INLINE_ALIGN);
177};
178
179/**
180 * struct vmw_cmdbuf_alloc_info - Command buffer space allocation metadata
181 *
182 * @page_size: Size of requested command buffer space in pages.
183 * @node: Pointer to the range manager node.
184 * @done: True if this allocation has succeeded.
185 */
186struct vmw_cmdbuf_alloc_info {
187 size_t page_size;
188 struct drm_mm_node *node;
189 bool done;
190};
191
192/* Loop over each context in the command buffer manager. */
193#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
194 for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
195 ++(_i), ++(_ctx))
196
197static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
198 bool enable);
199static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context);
200
201/**
202 * vmw_cmdbuf_cur_lock - Helper to lock the cur_mutex.
203 *
204 * @man: The range manager.
205 * @interruptible: Whether to wait interruptible when locking.
206 */
207static int vmw_cmdbuf_cur_lock(struct vmw_cmdbuf_man *man, bool interruptible)
208{
209 if (interruptible) {
210 if (mutex_lock_interruptible(&man->cur_mutex))
211 return -ERESTARTSYS;
212 } else {
213 mutex_lock(&man->cur_mutex);
214 }
215
216 return 0;
217}
218
219/**
220 * vmw_cmdbuf_cur_unlock - Helper to unlock the cur_mutex.
221 *
222 * @man: The range manager.
223 */
224static void vmw_cmdbuf_cur_unlock(struct vmw_cmdbuf_man *man)
225{
226 mutex_unlock(&man->cur_mutex);
227}
228
229/**
230 * vmw_cmdbuf_header_inline_free - Free a struct vmw_cmdbuf_header that has
231 * been used for the device context with inline command buffers.
232 * Need not be called locked.
233 *
234 * @header: Pointer to the header to free.
235 */
236static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
237{
238 struct vmw_cmdbuf_dheader *dheader;
239
240 if (WARN_ON_ONCE(!header->inline_space))
241 return;
242
243 dheader = container_of(header->cb_header, struct vmw_cmdbuf_dheader,
244 cb_header);
245 dma_pool_free(header->man->dheaders, dheader, header->handle);
246 kfree(header);
247}
248
249/**
250 * __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
251 * associated structures.
252 *
253 * @header: Pointer to the header to free.
254 *
255 * For internal use. Must be called with man::lock held.
256 */
257static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
258{
259 struct vmw_cmdbuf_man *man = header->man;
260
261 lockdep_assert_held_once(&man->lock);
262
263 if (header->inline_space) {
264 vmw_cmdbuf_header_inline_free(header);
265 return;
266 }
267
268 drm_mm_remove_node(&header->node);
269 wake_up_all(&man->alloc_queue);
270 if (header->cb_header)
271 dma_pool_free(man->headers, header->cb_header,
272 header->handle);
273 kfree(header);
274}
275
276/**
277 * vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
278 * associated structures.
279 *
280 * @header: Pointer to the header to free.
281 */
282void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
283{
284 struct vmw_cmdbuf_man *man = header->man;
285
286 /* Avoid locking if inline_space */
287 if (header->inline_space) {
288 vmw_cmdbuf_header_inline_free(header);
289 return;
290 }
291 spin_lock(&man->lock);
292 __vmw_cmdbuf_header_free(header);
293 spin_unlock(&man->lock);
294}
295
296
297/**
298 * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
299 *
300 * @header: The header of the buffer to submit.
301 */
302static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
303{
304 struct vmw_cmdbuf_man *man = header->man;
305 u32 val;
306
307 val = upper_32_bits(header->handle);
308 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
309
310 val = lower_32_bits(header->handle);
311 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
312 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
313
314 return header->cb_header->status;
315}
316
317/**
318 * vmw_cmdbuf_ctx_init: Initialize a command buffer context.
319 *
320 * @ctx: The command buffer context to initialize
321 */
322static void vmw_cmdbuf_ctx_init(struct vmw_cmdbuf_context *ctx)
323{
324 INIT_LIST_HEAD(&ctx->hw_submitted);
325 INIT_LIST_HEAD(&ctx->submitted);
326 INIT_LIST_HEAD(&ctx->preempted);
327 ctx->num_hw_submitted = 0;
328}
329
330/**
331 * vmw_cmdbuf_ctx_submit: Submit command buffers from a command buffer
332 * context.
333 *
334 * @man: The command buffer manager.
335 * @ctx: The command buffer context.
336 *
337 * Submits command buffers to hardware until there are no more command
338 * buffers to submit or the hardware can't handle more command buffers.
339 */
340static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
341 struct vmw_cmdbuf_context *ctx)
342{
343 while (ctx->num_hw_submitted < man->max_hw_submitted &&
344 !list_empty(&ctx->submitted) &&
345 !ctx->block_submission) {
346 struct vmw_cmdbuf_header *entry;
347 SVGACBStatus status;
348
349 entry = list_first_entry(&ctx->submitted,
350 struct vmw_cmdbuf_header,
351 list);
352
353 status = vmw_cmdbuf_header_submit(entry);
354
355 /* This should never happen */
356 if (WARN_ON_ONCE(status == SVGA_CB_STATUS_QUEUE_FULL)) {
357 entry->cb_header->status = SVGA_CB_STATUS_NONE;
358 break;
359 }
360
361 list_del(&entry->list);
362 list_add_tail(&entry->list, &ctx->hw_submitted);
363 ctx->num_hw_submitted++;
364 }
365
366}
367
368/**
369 * vmw_cmdbuf_ctx_process - Process a command buffer context.
370 *
371 * @man: The command buffer manager.
372 * @ctx: The command buffer context.
373 * @notempty: Pass back count of non-empty command submitted lists.
374 *
375 * Submit command buffers to hardware if possible, and process finished
376 * buffers. Typically freeing them, but on preemption or error take
377 * appropriate action. Wake up waiters if appropriate.
378 */
379static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
380 struct vmw_cmdbuf_context *ctx,
381 int *notempty)
382{
383 struct vmw_cmdbuf_header *entry, *next;
384
385 vmw_cmdbuf_ctx_submit(man, ctx);
386
387 list_for_each_entry_safe(entry, next, &ctx->hw_submitted, list) {
388 SVGACBStatus status = entry->cb_header->status;
389
390 if (status == SVGA_CB_STATUS_NONE)
391 break;
392
393 list_del(&entry->list);
394 wake_up_all(&man->idle_queue);
395 ctx->num_hw_submitted--;
396 switch (status) {
397 case SVGA_CB_STATUS_COMPLETED:
398 __vmw_cmdbuf_header_free(entry);
399 break;
400 case SVGA_CB_STATUS_COMMAND_ERROR:
401 WARN_ONCE(true, "Command buffer error.\n");
402 entry->cb_header->status = SVGA_CB_STATUS_NONE;
403 list_add_tail(&entry->list, &man->error);
404 schedule_work(&man->work);
405 break;
406 case SVGA_CB_STATUS_PREEMPTED:
407 entry->cb_header->status = SVGA_CB_STATUS_NONE;
408 list_add_tail(&entry->list, &ctx->preempted);
409 break;
410 case SVGA_CB_STATUS_CB_HEADER_ERROR:
411 WARN_ONCE(true, "Command buffer header error.\n");
412 __vmw_cmdbuf_header_free(entry);
413 break;
414 default:
415 WARN_ONCE(true, "Undefined command buffer status.\n");
416 __vmw_cmdbuf_header_free(entry);
417 break;
418 }
419 }
420
421 vmw_cmdbuf_ctx_submit(man, ctx);
422 if (!list_empty(&ctx->submitted))
423 (*notempty)++;
424}
425
426/**
427 * vmw_cmdbuf_man_process - Process all command buffer contexts and
428 * switch on and off irqs as appropriate.
429 *
430 * @man: The command buffer manager.
431 *
432 * Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
433 * command buffers left that are not submitted to hardware, Make sure
434 * IRQ handling is turned on. Otherwise, make sure it's turned off.
435 */
436static void vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
437{
438 int notempty;
439 struct vmw_cmdbuf_context *ctx;
440 int i;
441
442retry:
443 notempty = 0;
444 for_each_cmdbuf_ctx(man, i, ctx)
445 vmw_cmdbuf_ctx_process(man, ctx, ¬empty);
446
447 if (man->irq_on && !notempty) {
448 vmw_generic_waiter_remove(man->dev_priv,
449 SVGA_IRQFLAG_COMMAND_BUFFER,
450 &man->dev_priv->cmdbuf_waiters);
451 man->irq_on = false;
452 } else if (!man->irq_on && notempty) {
453 vmw_generic_waiter_add(man->dev_priv,
454 SVGA_IRQFLAG_COMMAND_BUFFER,
455 &man->dev_priv->cmdbuf_waiters);
456 man->irq_on = true;
457
458 /* Rerun in case we just missed an irq. */
459 goto retry;
460 }
461}
462
463/**
464 * vmw_cmdbuf_ctx_add - Schedule a command buffer for submission on a
465 * command buffer context
466 *
467 * @man: The command buffer manager.
468 * @header: The header of the buffer to submit.
469 * @cb_context: The command buffer context to use.
470 *
471 * This function adds @header to the "submitted" queue of the command
472 * buffer context identified by @cb_context. It then calls the command buffer
473 * manager processing to potentially submit the buffer to hardware.
474 * @man->lock needs to be held when calling this function.
475 */
476static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
477 struct vmw_cmdbuf_header *header,
478 SVGACBContext cb_context)
479{
480 if (!(header->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT))
481 header->cb_header->dxContext = 0;
482 header->cb_context = cb_context;
483 list_add_tail(&header->list, &man->ctx[cb_context].submitted);
484
485 vmw_cmdbuf_man_process(man);
486}
487
488/**
489 * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
490 * handler implemented as a threaded irq task.
491 *
492 * @man: Pointer to the command buffer manager.
493 *
494 * The bottom half of the interrupt handler simply calls into the
495 * command buffer processor to free finished buffers and submit any
496 * queued buffers to hardware.
497 */
498void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
499{
500 spin_lock(&man->lock);
501 vmw_cmdbuf_man_process(man);
502 spin_unlock(&man->lock);
503}
504
505/**
506 * vmw_cmdbuf_work_func - The deferred work function that handles
507 * command buffer errors.
508 *
509 * @work: The work func closure argument.
510 *
511 * Restarting the command buffer context after an error requires process
512 * context, so it is deferred to this work function.
513 */
514static void vmw_cmdbuf_work_func(struct work_struct *work)
515{
516 struct vmw_cmdbuf_man *man =
517 container_of(work, struct vmw_cmdbuf_man, work);
518 struct vmw_cmdbuf_header *entry, *next;
519 uint32_t dummy = 0;
520 bool send_fence = false;
521 struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
522 int i;
523 struct vmw_cmdbuf_context *ctx;
524 bool global_block = false;
525
526 for_each_cmdbuf_ctx(man, i, ctx)
527 INIT_LIST_HEAD(&restart_head[i]);
528
529 mutex_lock(&man->error_mutex);
530 spin_lock(&man->lock);
531 list_for_each_entry_safe(entry, next, &man->error, list) {
532 SVGACBHeader *cb_hdr = entry->cb_header;
533 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *)
534 (entry->cmd + cb_hdr->errorOffset);
535 u32 error_cmd_size, new_start_offset;
536 const char *cmd_name;
537
538 list_del_init(&entry->list);
539 global_block = true;
540
541 if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
542 VMW_DEBUG_USER("Unknown command causing device error.\n");
543 VMW_DEBUG_USER("Command buffer offset is %lu\n",
544 (unsigned long) cb_hdr->errorOffset);
545 __vmw_cmdbuf_header_free(entry);
546 send_fence = true;
547 continue;
548 }
549
550 VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
551 cmd_name);
552 VMW_DEBUG_USER("Command buffer offset is %lu\n",
553 (unsigned long) cb_hdr->errorOffset);
554 VMW_DEBUG_USER("Command size is %lu\n",
555 (unsigned long) error_cmd_size);
556
557 new_start_offset = cb_hdr->errorOffset + error_cmd_size;
558
559 if (new_start_offset >= cb_hdr->length) {
560 __vmw_cmdbuf_header_free(entry);
561 send_fence = true;
562 continue;
563 }
564
565 if (man->using_mob)
566 cb_hdr->ptr.mob.mobOffset += new_start_offset;
567 else
568 cb_hdr->ptr.pa += (u64) new_start_offset;
569
570 entry->cmd += new_start_offset;
571 cb_hdr->length -= new_start_offset;
572 cb_hdr->errorOffset = 0;
573 cb_hdr->offset = 0;
574
575 list_add_tail(&entry->list, &restart_head[entry->cb_context]);
576 }
577
578 for_each_cmdbuf_ctx(man, i, ctx)
579 man->ctx[i].block_submission = true;
580
581 spin_unlock(&man->lock);
582
583 /* Preempt all contexts */
584 if (global_block && vmw_cmdbuf_preempt(man, 0))
585 DRM_ERROR("Failed preempting command buffer contexts\n");
586
587 spin_lock(&man->lock);
588 for_each_cmdbuf_ctx(man, i, ctx) {
589 /* Move preempted command buffers to the preempted queue. */
590 vmw_cmdbuf_ctx_process(man, ctx, &dummy);
591
592 /*
593 * Add the preempted queue after the command buffer
594 * that caused an error.
595 */
596 list_splice_init(&ctx->preempted, restart_head[i].prev);
597
598 /*
599 * Finally add all command buffers first in the submitted
600 * queue, to rerun them.
601 */
602
603 ctx->block_submission = false;
604 list_splice_init(&restart_head[i], &ctx->submitted);
605 }
606
607 vmw_cmdbuf_man_process(man);
608 spin_unlock(&man->lock);
609
610 if (global_block && vmw_cmdbuf_startstop(man, 0, true))
611 DRM_ERROR("Failed restarting command buffer contexts\n");
612
613 /* Send a new fence in case one was removed */
614 if (send_fence) {
615 vmw_cmd_send_fence(man->dev_priv, &dummy);
616 wake_up_all(&man->idle_queue);
617 }
618
619 mutex_unlock(&man->error_mutex);
620}
621
622/**
623 * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
624 *
625 * @man: The command buffer manager.
626 * @check_preempted: Check also the preempted queue for pending command buffers.
627 *
628 */
629static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
630 bool check_preempted)
631{
632 struct vmw_cmdbuf_context *ctx;
633 bool idle = false;
634 int i;
635
636 spin_lock(&man->lock);
637 vmw_cmdbuf_man_process(man);
638 for_each_cmdbuf_ctx(man, i, ctx) {
639 if (!list_empty(&ctx->submitted) ||
640 !list_empty(&ctx->hw_submitted) ||
641 (check_preempted && !list_empty(&ctx->preempted)))
642 goto out_unlock;
643 }
644
645 idle = list_empty(&man->error);
646
647out_unlock:
648 spin_unlock(&man->lock);
649
650 return idle;
651}
652
653/**
654 * __vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
655 * command submissions
656 *
657 * @man: The command buffer manager.
658 *
659 * Flushes the current command buffer without allocating a new one. A new one
660 * is automatically allocated when needed. Call with @man->cur_mutex held.
661 */
662static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
663{
664 struct vmw_cmdbuf_header *cur = man->cur;
665
666 lockdep_assert_held_once(&man->cur_mutex);
667
668 if (!cur)
669 return;
670
671 spin_lock(&man->lock);
672 if (man->cur_pos == 0) {
673 __vmw_cmdbuf_header_free(cur);
674 goto out_unlock;
675 }
676
677 man->cur->cb_header->length = man->cur_pos;
678 vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
679out_unlock:
680 spin_unlock(&man->lock);
681 man->cur = NULL;
682 man->cur_pos = 0;
683}
684
685/**
686 * vmw_cmdbuf_cur_flush - Flush the current command buffer for small kernel
687 * command submissions
688 *
689 * @man: The command buffer manager.
690 * @interruptible: Whether to sleep interruptible when sleeping.
691 *
692 * Flushes the current command buffer without allocating a new one. A new one
693 * is automatically allocated when needed.
694 */
695int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
696 bool interruptible)
697{
698 int ret = vmw_cmdbuf_cur_lock(man, interruptible);
699
700 if (ret)
701 return ret;
702
703 __vmw_cmdbuf_cur_flush(man);
704 vmw_cmdbuf_cur_unlock(man);
705
706 return 0;
707}
708
709/**
710 * vmw_cmdbuf_idle - Wait for command buffer manager idle.
711 *
712 * @man: The command buffer manager.
713 * @interruptible: Sleep interruptible while waiting.
714 * @timeout: Time out after this many ticks.
715 *
716 * Wait until the command buffer manager has processed all command buffers,
717 * or until a timeout occurs. If a timeout occurs, the function will return
718 * -EBUSY.
719 */
720int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible,
721 unsigned long timeout)
722{
723 int ret;
724
725 ret = vmw_cmdbuf_cur_flush(man, interruptible);
726 vmw_generic_waiter_add(man->dev_priv,
727 SVGA_IRQFLAG_COMMAND_BUFFER,
728 &man->dev_priv->cmdbuf_waiters);
729
730 if (interruptible) {
731 ret = wait_event_interruptible_timeout
732 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
733 timeout);
734 } else {
735 ret = wait_event_timeout
736 (man->idle_queue, vmw_cmdbuf_man_idle(man, true),
737 timeout);
738 }
739 vmw_generic_waiter_remove(man->dev_priv,
740 SVGA_IRQFLAG_COMMAND_BUFFER,
741 &man->dev_priv->cmdbuf_waiters);
742 if (ret == 0) {
743 if (!vmw_cmdbuf_man_idle(man, true))
744 ret = -EBUSY;
745 else
746 ret = 0;
747 }
748 if (ret > 0)
749 ret = 0;
750
751 return ret;
752}
753
754/**
755 * vmw_cmdbuf_try_alloc - Try to allocate buffer space from the main pool.
756 *
757 * @man: The command buffer manager.
758 * @info: Allocation info. Will hold the size on entry and allocated mm node
759 * on successful return.
760 *
761 * Try to allocate buffer space from the main pool. Returns true if succeeded.
762 * If a fatal error was hit, the error code is returned in @info->ret.
763 */
764static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
765 struct vmw_cmdbuf_alloc_info *info)
766{
767 int ret;
768
769 if (info->done)
770 return true;
771
772 memset(info->node, 0, sizeof(*info->node));
773 spin_lock(&man->lock);
774 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
775 if (ret) {
776 vmw_cmdbuf_man_process(man);
777 ret = drm_mm_insert_node(&man->mm, info->node, info->page_size);
778 }
779
780 spin_unlock(&man->lock);
781 info->done = !ret;
782
783 return info->done;
784}
785
786/**
787 * vmw_cmdbuf_alloc_space - Allocate buffer space from the main pool.
788 *
789 * @man: The command buffer manager.
790 * @node: Pointer to pre-allocated range-manager node.
791 * @size: The size of the allocation.
792 * @interruptible: Whether to sleep interruptible while waiting for space.
793 *
794 * This function allocates buffer space from the main pool, and if there is
795 * no space available ATM, it turns on IRQ handling and sleeps waiting for it to
796 * become available.
797 */
798static int vmw_cmdbuf_alloc_space(struct vmw_cmdbuf_man *man,
799 struct drm_mm_node *node,
800 size_t size,
801 bool interruptible)
802{
803 struct vmw_cmdbuf_alloc_info info;
804
805 info.page_size = PAGE_ALIGN(size) >> PAGE_SHIFT;
806 info.node = node;
807 info.done = false;
808
809 /*
810 * To prevent starvation of large requests, only one allocating call
811 * at a time waiting for space.
812 */
813 if (interruptible) {
814 if (mutex_lock_interruptible(&man->space_mutex))
815 return -ERESTARTSYS;
816 } else {
817 mutex_lock(&man->space_mutex);
818 }
819
820 /* Try to allocate space without waiting. */
821 if (vmw_cmdbuf_try_alloc(man, &info))
822 goto out_unlock;
823
824 vmw_generic_waiter_add(man->dev_priv,
825 SVGA_IRQFLAG_COMMAND_BUFFER,
826 &man->dev_priv->cmdbuf_waiters);
827
828 if (interruptible) {
829 int ret;
830
831 ret = wait_event_interruptible
832 (man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
833 if (ret) {
834 vmw_generic_waiter_remove
835 (man->dev_priv, SVGA_IRQFLAG_COMMAND_BUFFER,
836 &man->dev_priv->cmdbuf_waiters);
837 mutex_unlock(&man->space_mutex);
838 return ret;
839 }
840 } else {
841 wait_event(man->alloc_queue, vmw_cmdbuf_try_alloc(man, &info));
842 }
843 vmw_generic_waiter_remove(man->dev_priv,
844 SVGA_IRQFLAG_COMMAND_BUFFER,
845 &man->dev_priv->cmdbuf_waiters);
846
847out_unlock:
848 mutex_unlock(&man->space_mutex);
849
850 return 0;
851}
852
853/**
854 * vmw_cmdbuf_space_pool - Set up a command buffer header with command buffer
855 * space from the main pool.
856 *
857 * @man: The command buffer manager.
858 * @header: Pointer to the header to set up.
859 * @size: The requested size of the buffer space.
860 * @interruptible: Whether to sleep interruptible while waiting for space.
861 */
862static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
863 struct vmw_cmdbuf_header *header,
864 size_t size,
865 bool interruptible)
866{
867 SVGACBHeader *cb_hdr;
868 size_t offset;
869 int ret;
870
871 if (!man->has_pool)
872 return -ENOMEM;
873
874 ret = vmw_cmdbuf_alloc_space(man, &header->node, size, interruptible);
875
876 if (ret)
877 return ret;
878
879 header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL,
880 &header->handle);
881 if (!header->cb_header) {
882 ret = -ENOMEM;
883 goto out_no_cb_header;
884 }
885
886 header->size = header->node.size << PAGE_SHIFT;
887 cb_hdr = header->cb_header;
888 offset = header->node.start << PAGE_SHIFT;
889 header->cmd = man->map + offset;
890 if (man->using_mob) {
891 cb_hdr->flags = SVGA_CB_FLAG_MOB;
892 cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
893 cb_hdr->ptr.mob.mobOffset = offset;
894 } else {
895 cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
896 }
897
898 return 0;
899
900out_no_cb_header:
901 spin_lock(&man->lock);
902 drm_mm_remove_node(&header->node);
903 spin_unlock(&man->lock);
904
905 return ret;
906}
907
908/**
909 * vmw_cmdbuf_space_inline - Set up a command buffer header with
910 * inline command buffer space.
911 *
912 * @man: The command buffer manager.
913 * @header: Pointer to the header to set up.
914 * @size: The requested size of the buffer space.
915 */
916static int vmw_cmdbuf_space_inline(struct vmw_cmdbuf_man *man,
917 struct vmw_cmdbuf_header *header,
918 int size)
919{
920 struct vmw_cmdbuf_dheader *dheader;
921 SVGACBHeader *cb_hdr;
922
923 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE))
924 return -ENOMEM;
925
926 dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL,
927 &header->handle);
928 if (!dheader)
929 return -ENOMEM;
930
931 header->inline_space = true;
932 header->size = VMW_CMDBUF_INLINE_SIZE;
933 cb_hdr = &dheader->cb_header;
934 header->cb_header = cb_hdr;
935 header->cmd = dheader->cmd;
936 cb_hdr->status = SVGA_CB_STATUS_NONE;
937 cb_hdr->flags = SVGA_CB_FLAG_NONE;
938 cb_hdr->ptr.pa = (u64)header->handle +
939 (u64)offsetof(struct vmw_cmdbuf_dheader, cmd);
940
941 return 0;
942}
943
944/**
945 * vmw_cmdbuf_alloc - Allocate a command buffer header complete with
946 * command buffer space.
947 *
948 * @man: The command buffer manager.
949 * @size: The requested size of the buffer space.
950 * @interruptible: Whether to sleep interruptible while waiting for space.
951 * @p_header: points to a header pointer to populate on successful return.
952 *
953 * Returns a pointer to command buffer space if successful. Otherwise
954 * returns an error pointer. The header pointer returned in @p_header should
955 * be used for upcoming calls to vmw_cmdbuf_reserve() and vmw_cmdbuf_commit().
956 */
957void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
958 size_t size, bool interruptible,
959 struct vmw_cmdbuf_header **p_header)
960{
961 struct vmw_cmdbuf_header *header;
962 int ret = 0;
963
964 *p_header = NULL;
965
966 header = kzalloc(sizeof(*header), GFP_KERNEL);
967 if (!header)
968 return ERR_PTR(-ENOMEM);
969
970 if (size <= VMW_CMDBUF_INLINE_SIZE)
971 ret = vmw_cmdbuf_space_inline(man, header, size);
972 else
973 ret = vmw_cmdbuf_space_pool(man, header, size, interruptible);
974
975 if (ret) {
976 kfree(header);
977 return ERR_PTR(ret);
978 }
979
980 header->man = man;
981 INIT_LIST_HEAD(&header->list);
982 header->cb_header->status = SVGA_CB_STATUS_NONE;
983 *p_header = header;
984
985 return header->cmd;
986}
987
988/**
989 * vmw_cmdbuf_reserve_cur - Reserve space for commands in the current
990 * command buffer.
991 *
992 * @man: The command buffer manager.
993 * @size: The requested size of the commands.
994 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
995 * @interruptible: Whether to sleep interruptible while waiting for space.
996 *
997 * Returns a pointer to command buffer space if successful. Otherwise
998 * returns an error pointer.
999 */
1000static void *vmw_cmdbuf_reserve_cur(struct vmw_cmdbuf_man *man,
1001 size_t size,
1002 int ctx_id,
1003 bool interruptible)
1004{
1005 struct vmw_cmdbuf_header *cur;
1006 void *ret;
1007
1008 if (vmw_cmdbuf_cur_lock(man, interruptible))
1009 return ERR_PTR(-ERESTARTSYS);
1010
1011 cur = man->cur;
1012 if (cur && (size + man->cur_pos > cur->size ||
1013 ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) &&
1014 ctx_id != cur->cb_header->dxContext)))
1015 __vmw_cmdbuf_cur_flush(man);
1016
1017 if (!man->cur) {
1018 ret = vmw_cmdbuf_alloc(man,
1019 max_t(size_t, size, man->default_size),
1020 interruptible, &man->cur);
1021 if (IS_ERR(ret)) {
1022 vmw_cmdbuf_cur_unlock(man);
1023 return ret;
1024 }
1025
1026 cur = man->cur;
1027 }
1028
1029 if (ctx_id != SVGA3D_INVALID_ID) {
1030 cur->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1031 cur->cb_header->dxContext = ctx_id;
1032 }
1033
1034 cur->reserved = size;
1035
1036 return (void *) (man->cur->cmd + man->cur_pos);
1037}
1038
1039/**
1040 * vmw_cmdbuf_commit_cur - Commit commands in the current command buffer.
1041 *
1042 * @man: The command buffer manager.
1043 * @size: The size of the commands actually written.
1044 * @flush: Whether to flush the command buffer immediately.
1045 */
1046static void vmw_cmdbuf_commit_cur(struct vmw_cmdbuf_man *man,
1047 size_t size, bool flush)
1048{
1049 struct vmw_cmdbuf_header *cur = man->cur;
1050
1051 lockdep_assert_held_once(&man->cur_mutex);
1052
1053 WARN_ON(size > cur->reserved);
1054 man->cur_pos += size;
1055 if (!size)
1056 cur->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1057 if (flush)
1058 __vmw_cmdbuf_cur_flush(man);
1059 vmw_cmdbuf_cur_unlock(man);
1060}
1061
1062/**
1063 * vmw_cmdbuf_reserve - Reserve space for commands in a command buffer.
1064 *
1065 * @man: The command buffer manager.
1066 * @size: The requested size of the commands.
1067 * @ctx_id: The context id if any. Otherwise set to SVGA3D_REG_INVALID.
1068 * @interruptible: Whether to sleep interruptible while waiting for space.
1069 * @header: Header of the command buffer. NULL if the current command buffer
1070 * should be used.
1071 *
1072 * Returns a pointer to command buffer space if successful. Otherwise
1073 * returns an error pointer.
1074 */
1075void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
1076 int ctx_id, bool interruptible,
1077 struct vmw_cmdbuf_header *header)
1078{
1079 if (!header)
1080 return vmw_cmdbuf_reserve_cur(man, size, ctx_id, interruptible);
1081
1082 if (size > header->size)
1083 return ERR_PTR(-EINVAL);
1084
1085 if (ctx_id != SVGA3D_INVALID_ID) {
1086 header->cb_header->flags |= SVGA_CB_FLAG_DX_CONTEXT;
1087 header->cb_header->dxContext = ctx_id;
1088 }
1089
1090 header->reserved = size;
1091 return header->cmd;
1092}
1093
1094/**
1095 * vmw_cmdbuf_commit - Commit commands in a command buffer.
1096 *
1097 * @man: The command buffer manager.
1098 * @size: The size of the commands actually written.
1099 * @header: Header of the command buffer. NULL if the current command buffer
1100 * should be used.
1101 * @flush: Whether to flush the command buffer immediately.
1102 */
1103void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
1104 struct vmw_cmdbuf_header *header, bool flush)
1105{
1106 if (!header) {
1107 vmw_cmdbuf_commit_cur(man, size, flush);
1108 return;
1109 }
1110
1111 (void) vmw_cmdbuf_cur_lock(man, false);
1112 __vmw_cmdbuf_cur_flush(man);
1113 WARN_ON(size > header->reserved);
1114 man->cur = header;
1115 man->cur_pos = size;
1116 if (!size)
1117 header->cb_header->flags &= ~SVGA_CB_FLAG_DX_CONTEXT;
1118 if (flush)
1119 __vmw_cmdbuf_cur_flush(man);
1120 vmw_cmdbuf_cur_unlock(man);
1121}
1122
1123
1124/**
1125 * vmw_cmdbuf_send_device_command - Send a command through the device context.
1126 *
1127 * @man: The command buffer manager.
1128 * @command: Pointer to the command to send.
1129 * @size: Size of the command.
1130 *
1131 * Synchronously sends a device context command.
1132 */
1133static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
1134 const void *command,
1135 size_t size)
1136{
1137 struct vmw_cmdbuf_header *header;
1138 int status;
1139 void *cmd = vmw_cmdbuf_alloc(man, size, false, &header);
1140
1141 if (IS_ERR(cmd))
1142 return PTR_ERR(cmd);
1143
1144 memcpy(cmd, command, size);
1145 header->cb_header->length = size;
1146 header->cb_context = SVGA_CB_CONTEXT_DEVICE;
1147 spin_lock(&man->lock);
1148 status = vmw_cmdbuf_header_submit(header);
1149 spin_unlock(&man->lock);
1150 vmw_cmdbuf_header_free(header);
1151
1152 if (status != SVGA_CB_STATUS_COMPLETED) {
1153 DRM_ERROR("Device context command failed with status %d\n",
1154 status);
1155 return -EINVAL;
1156 }
1157
1158 return 0;
1159}
1160
1161/**
1162 * vmw_cmdbuf_preempt - Send a preempt command through the device
1163 * context.
1164 *
1165 * @man: The command buffer manager.
1166 * @context: Device context to pass command through.
1167 *
1168 * Synchronously sends a preempt command.
1169 */
1170static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
1171{
1172 struct {
1173 uint32 id;
1174 SVGADCCmdPreempt body;
1175 } __packed cmd;
1176
1177 cmd.id = SVGA_DC_CMD_PREEMPT;
1178 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1179 cmd.body.ignoreIDZero = 0;
1180
1181 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1182}
1183
1184
1185/**
1186 * vmw_cmdbuf_startstop - Send a start / stop command through the device
1187 * context.
1188 *
1189 * @man: The command buffer manager.
1190 * @context: Device context to start/stop.
1191 * @enable: Whether to enable or disable the context.
1192 *
1193 * Synchronously sends a device start / stop context command.
1194 */
1195static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
1196 bool enable)
1197{
1198 struct {
1199 uint32 id;
1200 SVGADCCmdStartStop body;
1201 } __packed cmd;
1202
1203 cmd.id = SVGA_DC_CMD_START_STOP_CONTEXT;
1204 cmd.body.enable = (enable) ? 1 : 0;
1205 cmd.body.context = SVGA_CB_CONTEXT_0 + context;
1206
1207 return vmw_cmdbuf_send_device_command(man, &cmd, sizeof(cmd));
1208}
1209
1210/**
1211 * vmw_cmdbuf_set_pool_size - Set command buffer manager sizes
1212 *
1213 * @man: The command buffer manager.
1214 * @size: The size of the main space pool.
1215 *
1216 * Set the size and allocate the main command buffer space pool.
1217 * If successful, this enables large command submissions.
1218 * Note that this function requires that rudimentary command
1219 * submission is already available and that the MOB memory manager is alive.
1220 * Returns 0 on success. Negative error code on failure.
1221 */
1222int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
1223{
1224 struct vmw_private *dev_priv = man->dev_priv;
1225 bool dummy;
1226 int ret;
1227
1228 if (man->has_pool)
1229 return -EINVAL;
1230
1231 /* First, try to allocate a huge chunk of DMA memory */
1232 size = PAGE_ALIGN(size);
1233 man->map = dma_alloc_coherent(dev_priv->drm.dev, size,
1234 &man->handle, GFP_KERNEL);
1235 if (man->map) {
1236 man->using_mob = false;
1237 } else {
1238 /*
1239 * DMA memory failed. If we can have command buffers in a
1240 * MOB, try to use that instead. Note that this will
1241 * actually call into the already enabled manager, when
1242 * binding the MOB.
1243 */
1244 if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
1245 !dev_priv->has_mob)
1246 return -ENOMEM;
1247
1248 ret = vmw_bo_create_kernel(dev_priv, size,
1249 &vmw_mob_placement,
1250 &man->cmd_space);
1251 if (ret)
1252 return ret;
1253
1254 man->using_mob = true;
1255 ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
1256 &man->map_obj);
1257 if (ret)
1258 goto out_no_map;
1259
1260 man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
1261 }
1262
1263 man->size = size;
1264 drm_mm_init(&man->mm, 0, size >> PAGE_SHIFT);
1265
1266 man->has_pool = true;
1267
1268 /*
1269 * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1270 * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1271 * needs to wait for space and we block on further command
1272 * submissions to be able to free up space.
1273 */
1274 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1275 DRM_INFO("Using command buffers with %s pool.\n",
1276 (man->using_mob) ? "MOB" : "DMA");
1277
1278 return 0;
1279
1280out_no_map:
1281 if (man->using_mob) {
1282 ttm_bo_put(man->cmd_space);
1283 man->cmd_space = NULL;
1284 }
1285
1286 return ret;
1287}
1288
1289/**
1290 * vmw_cmdbuf_man_create: Create a command buffer manager and enable it for
1291 * inline command buffer submissions only.
1292 *
1293 * @dev_priv: Pointer to device private structure.
1294 *
1295 * Returns a pointer to a cummand buffer manager to success or error pointer
1296 * on failure. The command buffer manager will be enabled for submissions of
1297 * size VMW_CMDBUF_INLINE_SIZE only.
1298 */
1299struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
1300{
1301 struct vmw_cmdbuf_man *man;
1302 struct vmw_cmdbuf_context *ctx;
1303 unsigned int i;
1304 int ret;
1305
1306 if (!(dev_priv->capabilities & SVGA_CAP_COMMAND_BUFFERS))
1307 return ERR_PTR(-ENOSYS);
1308
1309 man = kzalloc(sizeof(*man), GFP_KERNEL);
1310 if (!man)
1311 return ERR_PTR(-ENOMEM);
1312
1313 man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
1314 2 : 1;
1315 man->headers = dma_pool_create("vmwgfx cmdbuf",
1316 dev_priv->drm.dev,
1317 sizeof(SVGACBHeader),
1318 64, PAGE_SIZE);
1319 if (!man->headers) {
1320 ret = -ENOMEM;
1321 goto out_no_pool;
1322 }
1323
1324 man->dheaders = dma_pool_create("vmwgfx inline cmdbuf",
1325 dev_priv->drm.dev,
1326 sizeof(struct vmw_cmdbuf_dheader),
1327 64, PAGE_SIZE);
1328 if (!man->dheaders) {
1329 ret = -ENOMEM;
1330 goto out_no_dpool;
1331 }
1332
1333 for_each_cmdbuf_ctx(man, i, ctx)
1334 vmw_cmdbuf_ctx_init(ctx);
1335
1336 INIT_LIST_HEAD(&man->error);
1337 spin_lock_init(&man->lock);
1338 mutex_init(&man->cur_mutex);
1339 mutex_init(&man->space_mutex);
1340 mutex_init(&man->error_mutex);
1341 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1342 init_waitqueue_head(&man->alloc_queue);
1343 init_waitqueue_head(&man->idle_queue);
1344 man->dev_priv = dev_priv;
1345 man->max_hw_submitted = SVGA_CB_MAX_QUEUED_PER_CONTEXT - 1;
1346 INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
1347 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
1348 &dev_priv->error_waiters);
1349 ret = vmw_cmdbuf_startstop(man, 0, true);
1350 if (ret) {
1351 DRM_ERROR("Failed starting command buffer contexts\n");
1352 vmw_cmdbuf_man_destroy(man);
1353 return ERR_PTR(ret);
1354 }
1355
1356 return man;
1357
1358out_no_dpool:
1359 dma_pool_destroy(man->headers);
1360out_no_pool:
1361 kfree(man);
1362
1363 return ERR_PTR(ret);
1364}
1365
1366/**
1367 * vmw_cmdbuf_remove_pool - Take down the main buffer space pool.
1368 *
1369 * @man: Pointer to a command buffer manager.
1370 *
1371 * This function removes the main buffer space pool, and should be called
1372 * before MOB memory management is removed. When this function has been called,
1373 * only small command buffer submissions of size VMW_CMDBUF_INLINE_SIZE or
1374 * less are allowed, and the default size of the command buffer for small kernel
1375 * submissions is also set to this size.
1376 */
1377void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
1378{
1379 if (!man->has_pool)
1380 return;
1381
1382 man->has_pool = false;
1383 man->default_size = VMW_CMDBUF_INLINE_SIZE;
1384 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1385 if (man->using_mob) {
1386 (void) ttm_bo_kunmap(&man->map_obj);
1387 ttm_bo_put(man->cmd_space);
1388 man->cmd_space = NULL;
1389 } else {
1390 dma_free_coherent(man->dev_priv->drm.dev,
1391 man->size, man->map, man->handle);
1392 }
1393}
1394
1395/**
1396 * vmw_cmdbuf_man_destroy - Take down a command buffer manager.
1397 *
1398 * @man: Pointer to a command buffer manager.
1399 *
1400 * This function idles and then destroys a command buffer manager.
1401 */
1402void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
1403{
1404 WARN_ON_ONCE(man->has_pool);
1405 (void) vmw_cmdbuf_idle(man, false, 10*HZ);
1406
1407 if (vmw_cmdbuf_startstop(man, 0, false))
1408 DRM_ERROR("Failed stopping command buffer contexts.\n");
1409
1410 vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
1411 &man->dev_priv->error_waiters);
1412 (void) cancel_work_sync(&man->work);
1413 dma_pool_destroy(man->dheaders);
1414 dma_pool_destroy(man->headers);
1415 mutex_destroy(&man->cur_mutex);
1416 mutex_destroy(&man->space_mutex);
1417 mutex_destroy(&man->error_mutex);
1418 kfree(man);
1419}