Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
32#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
34
35#define VMW_RES_HT_ORDER 12
36
37/**
38 * struct vmw_resource_relocation - Relocation info for resources
39 *
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the
43 * id that needs fixup is located.
44 */
45struct vmw_resource_relocation {
46 struct list_head head;
47 const struct vmw_resource *res;
48 unsigned long offset;
49};
50
51/**
52 * struct vmw_resource_val_node - Validation info for resources
53 *
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
59 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
63 * the command stream.
64 * @switching_backup: The command stream provides a new backup buffer for a
65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
69 */
70struct vmw_resource_val_node {
71 struct list_head head;
72 struct drm_hash_item hash;
73 struct vmw_resource *res;
74 struct vmw_dma_buffer *new_backup;
75 struct vmw_ctx_binding_state *staged_bindings;
76 unsigned long new_backup_offset;
77 u32 first_usage : 1;
78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
80};
81
82/**
83 * struct vmw_cmd_entry - Describe a command for the verifier
84 *
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
88 */
89struct vmw_cmd_entry {
90 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 SVGA3dCmdHeader *);
92 bool user_allow;
93 bool gb_disable;
94 bool gb_enable;
95};
96
97#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
100
101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105 struct vmw_sw_context *sw_context,
106 SVGAMobId *id,
107 struct vmw_dma_buffer **vmw_bo_p);
108static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob,
111 uint32_t *p_val_node);
112
113
114/**
115 * vmw_resources_unreserve - unreserve resources previously reserved for
116 * command submission.
117 *
118 * @sw_context: pointer to the software context
119 * @backoff: Whether command submission failed.
120 */
121static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
122 bool backoff)
123{
124 struct vmw_resource_val_node *val;
125 struct list_head *list = &sw_context->resource_list;
126
127 if (sw_context->dx_query_mob && !backoff)
128 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129 sw_context->dx_query_mob);
130
131 list_for_each_entry(val, list, head) {
132 struct vmw_resource *res = val->res;
133 bool switch_backup =
134 (backoff) ? false : val->switching_backup;
135
136 /*
137 * Transfer staged context bindings to the
138 * persistent context binding tracker.
139 */
140 if (unlikely(val->staged_bindings)) {
141 if (!backoff) {
142 vmw_binding_state_commit
143 (vmw_context_binding_state(val->res),
144 val->staged_bindings);
145 }
146
147 if (val->staged_bindings != sw_context->staged_bindings)
148 vmw_binding_state_free(val->staged_bindings);
149 else
150 sw_context->staged_bindings_inuse = false;
151 val->staged_bindings = NULL;
152 }
153 vmw_resource_unreserve(res, switch_backup, val->new_backup,
154 val->new_backup_offset);
155 vmw_dmabuf_unreference(&val->new_backup);
156 }
157}
158
159/**
160 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161 * added to the validate list.
162 *
163 * @dev_priv: Pointer to the device private:
164 * @sw_context: The validation context:
165 * @node: The validation node holding this context.
166 */
167static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168 struct vmw_sw_context *sw_context,
169 struct vmw_resource_val_node *node)
170{
171 int ret;
172
173 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174 if (unlikely(ret != 0))
175 goto out_err;
176
177 if (!sw_context->staged_bindings) {
178 sw_context->staged_bindings =
179 vmw_binding_state_alloc(dev_priv);
180 if (IS_ERR(sw_context->staged_bindings)) {
181 DRM_ERROR("Failed to allocate context binding "
182 "information.\n");
183 ret = PTR_ERR(sw_context->staged_bindings);
184 sw_context->staged_bindings = NULL;
185 goto out_err;
186 }
187 }
188
189 if (sw_context->staged_bindings_inuse) {
190 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191 if (IS_ERR(node->staged_bindings)) {
192 DRM_ERROR("Failed to allocate context binding "
193 "information.\n");
194 ret = PTR_ERR(node->staged_bindings);
195 node->staged_bindings = NULL;
196 goto out_err;
197 }
198 } else {
199 node->staged_bindings = sw_context->staged_bindings;
200 sw_context->staged_bindings_inuse = true;
201 }
202
203 return 0;
204out_err:
205 return ret;
206}
207
208/**
209 * vmw_resource_val_add - Add a resource to the software context's
210 * resource list if it's not already on it.
211 *
212 * @sw_context: Pointer to the software context.
213 * @res: Pointer to the resource.
214 * @p_node On successful return points to a valid pointer to a
215 * struct vmw_resource_val_node, if non-NULL on entry.
216 */
217static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
218 struct vmw_resource *res,
219 struct vmw_resource_val_node **p_node)
220{
221 struct vmw_private *dev_priv = res->dev_priv;
222 struct vmw_resource_val_node *node;
223 struct drm_hash_item *hash;
224 int ret;
225
226 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
227 &hash) == 0)) {
228 node = container_of(hash, struct vmw_resource_val_node, hash);
229 node->first_usage = false;
230 if (unlikely(p_node != NULL))
231 *p_node = node;
232 return 0;
233 }
234
235 node = kzalloc(sizeof(*node), GFP_KERNEL);
236 if (unlikely(node == NULL)) {
237 DRM_ERROR("Failed to allocate a resource validation "
238 "entry.\n");
239 return -ENOMEM;
240 }
241
242 node->hash.key = (unsigned long) res;
243 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
244 if (unlikely(ret != 0)) {
245 DRM_ERROR("Failed to initialize a resource validation "
246 "entry.\n");
247 kfree(node);
248 return ret;
249 }
250 node->res = vmw_resource_reference(res);
251 node->first_usage = true;
252 if (unlikely(p_node != NULL))
253 *p_node = node;
254
255 if (!dev_priv->has_mob) {
256 list_add_tail(&node->head, &sw_context->resource_list);
257 return 0;
258 }
259
260 switch (vmw_res_type(res)) {
261 case vmw_res_context:
262 case vmw_res_dx_context:
263 list_add(&node->head, &sw_context->ctx_resource_list);
264 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
265 break;
266 case vmw_res_cotable:
267 list_add_tail(&node->head, &sw_context->ctx_resource_list);
268 break;
269 default:
270 list_add_tail(&node->head, &sw_context->resource_list);
271 break;
272 }
273
274 return ret;
275}
276
277/**
278 * vmw_view_res_val_add - Add a view and the surface it's pointing to
279 * to the validation list
280 *
281 * @sw_context: The software context holding the validation list.
282 * @view: Pointer to the view resource.
283 *
284 * Returns 0 if success, negative error code otherwise.
285 */
286static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287 struct vmw_resource *view)
288{
289 int ret;
290
291 /*
292 * First add the resource the view is pointing to, otherwise
293 * it may be swapped out when the view is validated.
294 */
295 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
296 if (ret)
297 return ret;
298
299 return vmw_resource_val_add(sw_context, view, NULL);
300}
301
302/**
303 * vmw_view_id_val_add - Look up a view and add it and the surface it's
304 * pointing to to the validation list.
305 *
306 * @sw_context: The software context holding the validation list.
307 * @view_type: The view type to look up.
308 * @id: view id of the view.
309 *
310 * The view is represented by a view id and the DX context it's created on,
311 * or scheduled for creation on. If there is no DX context set, the function
312 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
313 */
314static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315 enum vmw_view_type view_type, u32 id)
316{
317 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318 struct vmw_resource *view;
319 int ret;
320
321 if (!ctx_node) {
322 DRM_ERROR("DX Context not set.\n");
323 return -EINVAL;
324 }
325
326 view = vmw_view_lookup(sw_context->man, view_type, id);
327 if (IS_ERR(view))
328 return PTR_ERR(view);
329
330 ret = vmw_view_res_val_add(sw_context, view);
331 vmw_resource_unreference(&view);
332
333 return ret;
334}
335
336/**
337 * vmw_resource_context_res_add - Put resources previously bound to a context on
338 * the validation list
339 *
340 * @dev_priv: Pointer to a device private structure
341 * @sw_context: Pointer to a software context used for this command submission
342 * @ctx: Pointer to the context resource
343 *
344 * This function puts all resources that were previously bound to @ctx on
345 * the resource validation list. This is part of the context state reemission
346 */
347static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
348 struct vmw_sw_context *sw_context,
349 struct vmw_resource *ctx)
350{
351 struct list_head *binding_list;
352 struct vmw_ctx_bindinfo *entry;
353 int ret = 0;
354 struct vmw_resource *res;
355 u32 i;
356
357 /* Add all cotables to the validation list. */
358 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360 res = vmw_context_cotable(ctx, i);
361 if (IS_ERR(res))
362 continue;
363
364 ret = vmw_resource_val_add(sw_context, res, NULL);
365 vmw_resource_unreference(&res);
366 if (unlikely(ret != 0))
367 return ret;
368 }
369 }
370
371
372 /* Add all resources bound to the context to the validation list */
373 mutex_lock(&dev_priv->binding_mutex);
374 binding_list = vmw_context_binding_list(ctx);
375
376 list_for_each_entry(entry, binding_list, ctx_list) {
377 /* entry->res is not refcounted */
378 res = vmw_resource_reference_unless_doomed(entry->res);
379 if (unlikely(res == NULL))
380 continue;
381
382 if (vmw_res_type(entry->res) == vmw_res_view)
383 ret = vmw_view_res_val_add(sw_context, entry->res);
384 else
385 ret = vmw_resource_val_add(sw_context, entry->res,
386 NULL);
387 vmw_resource_unreference(&res);
388 if (unlikely(ret != 0))
389 break;
390 }
391
392 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393 struct vmw_dma_buffer *dx_query_mob;
394
395 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
396 if (dx_query_mob)
397 ret = vmw_bo_to_validate_list(sw_context,
398 dx_query_mob,
399 true, NULL);
400 }
401
402 mutex_unlock(&dev_priv->binding_mutex);
403 return ret;
404}
405
406/**
407 * vmw_resource_relocation_add - Add a relocation to the relocation list
408 *
409 * @list: Pointer to head of relocation list.
410 * @res: The resource.
411 * @offset: Offset into the command buffer currently being parsed where the
412 * id that needs fixup is located. Granularity is 4 bytes.
413 */
414static int vmw_resource_relocation_add(struct list_head *list,
415 const struct vmw_resource *res,
416 unsigned long offset)
417{
418 struct vmw_resource_relocation *rel;
419
420 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
421 if (unlikely(rel == NULL)) {
422 DRM_ERROR("Failed to allocate a resource relocation.\n");
423 return -ENOMEM;
424 }
425
426 rel->res = res;
427 rel->offset = offset;
428 list_add_tail(&rel->head, list);
429
430 return 0;
431}
432
433/**
434 * vmw_resource_relocations_free - Free all relocations on a list
435 *
436 * @list: Pointer to the head of the relocation list.
437 */
438static void vmw_resource_relocations_free(struct list_head *list)
439{
440 struct vmw_resource_relocation *rel, *n;
441
442 list_for_each_entry_safe(rel, n, list, head) {
443 list_del(&rel->head);
444 kfree(rel);
445 }
446}
447
448/**
449 * vmw_resource_relocations_apply - Apply all relocations on a list
450 *
451 * @cb: Pointer to the start of the command buffer bein patch. This need
452 * not be the same buffer as the one being parsed when the relocation
453 * list was built, but the contents must be the same modulo the
454 * resource ids.
455 * @list: Pointer to the head of the relocation list.
456 */
457static void vmw_resource_relocations_apply(uint32_t *cb,
458 struct list_head *list)
459{
460 struct vmw_resource_relocation *rel;
461
462 list_for_each_entry(rel, list, head) {
463 if (likely(rel->res != NULL))
464 cb[rel->offset] = rel->res->id;
465 else
466 cb[rel->offset] = SVGA_3D_CMD_NOP;
467 }
468}
469
470static int vmw_cmd_invalid(struct vmw_private *dev_priv,
471 struct vmw_sw_context *sw_context,
472 SVGA3dCmdHeader *header)
473{
474 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
475}
476
477static int vmw_cmd_ok(struct vmw_private *dev_priv,
478 struct vmw_sw_context *sw_context,
479 SVGA3dCmdHeader *header)
480{
481 return 0;
482}
483
484/**
485 * vmw_bo_to_validate_list - add a bo to a validate list
486 *
487 * @sw_context: The software context used for this command submission batch.
488 * @bo: The buffer object to add.
489 * @validate_as_mob: Validate this buffer as a MOB.
490 * @p_val_node: If non-NULL Will be updated with the validate node number
491 * on return.
492 *
493 * Returns -EINVAL if the limit of number of buffer objects per command
494 * submission is reached.
495 */
496static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
497 struct vmw_dma_buffer *vbo,
498 bool validate_as_mob,
499 uint32_t *p_val_node)
500{
501 uint32_t val_node;
502 struct vmw_validate_buffer *vval_buf;
503 struct ttm_validate_buffer *val_buf;
504 struct drm_hash_item *hash;
505 int ret;
506
507 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
508 &hash) == 0)) {
509 vval_buf = container_of(hash, struct vmw_validate_buffer,
510 hash);
511 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
512 DRM_ERROR("Inconsistent buffer usage.\n");
513 return -EINVAL;
514 }
515 val_buf = &vval_buf->base;
516 val_node = vval_buf - sw_context->val_bufs;
517 } else {
518 val_node = sw_context->cur_val_buf;
519 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
520 DRM_ERROR("Max number of DMA buffers per submission "
521 "exceeded.\n");
522 return -EINVAL;
523 }
524 vval_buf = &sw_context->val_bufs[val_node];
525 vval_buf->hash.key = (unsigned long) vbo;
526 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
527 if (unlikely(ret != 0)) {
528 DRM_ERROR("Failed to initialize a buffer validation "
529 "entry.\n");
530 return ret;
531 }
532 ++sw_context->cur_val_buf;
533 val_buf = &vval_buf->base;
534 val_buf->bo = ttm_bo_reference(&vbo->base);
535 val_buf->shared = false;
536 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
537 vval_buf->validate_as_mob = validate_as_mob;
538 }
539
540 if (p_val_node)
541 *p_val_node = val_node;
542
543 return 0;
544}
545
546/**
547 * vmw_resources_reserve - Reserve all resources on the sw_context's
548 * resource list.
549 *
550 * @sw_context: Pointer to the software context.
551 *
552 * Note that since vmware's command submission currently is protected by
553 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
554 * since only a single thread at once will attempt this.
555 */
556static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
557{
558 struct vmw_resource_val_node *val;
559 int ret = 0;
560
561 list_for_each_entry(val, &sw_context->resource_list, head) {
562 struct vmw_resource *res = val->res;
563
564 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
565 if (unlikely(ret != 0))
566 return ret;
567
568 if (res->backup) {
569 struct vmw_dma_buffer *vbo = res->backup;
570
571 ret = vmw_bo_to_validate_list
572 (sw_context, vbo,
573 vmw_resource_needs_backup(res), NULL);
574
575 if (unlikely(ret != 0))
576 return ret;
577 }
578 }
579
580 if (sw_context->dx_query_mob) {
581 struct vmw_dma_buffer *expected_dx_query_mob;
582
583 expected_dx_query_mob =
584 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585 if (expected_dx_query_mob &&
586 expected_dx_query_mob != sw_context->dx_query_mob) {
587 ret = -EINVAL;
588 }
589 }
590
591 return ret;
592}
593
594/**
595 * vmw_resources_validate - Validate all resources on the sw_context's
596 * resource list.
597 *
598 * @sw_context: Pointer to the software context.
599 *
600 * Before this function is called, all resource backup buffers must have
601 * been validated.
602 */
603static int vmw_resources_validate(struct vmw_sw_context *sw_context)
604{
605 struct vmw_resource_val_node *val;
606 int ret;
607
608 list_for_each_entry(val, &sw_context->resource_list, head) {
609 struct vmw_resource *res = val->res;
610 struct vmw_dma_buffer *backup = res->backup;
611
612 ret = vmw_resource_validate(res);
613 if (unlikely(ret != 0)) {
614 if (ret != -ERESTARTSYS)
615 DRM_ERROR("Failed to validate resource.\n");
616 return ret;
617 }
618
619 /* Check if the resource switched backup buffer */
620 if (backup && res->backup && (backup != res->backup)) {
621 struct vmw_dma_buffer *vbo = res->backup;
622
623 ret = vmw_bo_to_validate_list
624 (sw_context, vbo,
625 vmw_resource_needs_backup(res), NULL);
626 if (ret) {
627 ttm_bo_unreserve(&vbo->base);
628 return ret;
629 }
630 }
631 }
632 return 0;
633}
634
635/**
636 * vmw_cmd_res_reloc_add - Add a resource to a software context's
637 * relocation- and validation lists.
638 *
639 * @dev_priv: Pointer to a struct vmw_private identifying the device.
640 * @sw_context: Pointer to the software context.
641 * @id_loc: Pointer to where the id that needs translation is located.
642 * @res: Valid pointer to a struct vmw_resource.
643 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
644 * used for this resource is returned here.
645 */
646static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
647 struct vmw_sw_context *sw_context,
648 uint32_t *id_loc,
649 struct vmw_resource *res,
650 struct vmw_resource_val_node **p_val)
651{
652 int ret;
653 struct vmw_resource_val_node *node;
654
655 *p_val = NULL;
656 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657 res,
658 id_loc - sw_context->buf_start);
659 if (unlikely(ret != 0))
660 return ret;
661
662 ret = vmw_resource_val_add(sw_context, res, &node);
663 if (unlikely(ret != 0))
664 return ret;
665
666 if (p_val)
667 *p_val = node;
668
669 return 0;
670}
671
672
673/**
674 * vmw_cmd_res_check - Check that a resource is present and if so, put it
675 * on the resource validate list unless it's already there.
676 *
677 * @dev_priv: Pointer to a device private structure.
678 * @sw_context: Pointer to the software context.
679 * @res_type: Resource type.
680 * @converter: User-space visisble type specific information.
681 * @id_loc: Pointer to the location in the command buffer currently being
682 * parsed from where the user-space resource id handle is located.
683 * @p_val: Pointer to pointer to resource validalidation node. Populated
684 * on exit.
685 */
686static int
687vmw_cmd_res_check(struct vmw_private *dev_priv,
688 struct vmw_sw_context *sw_context,
689 enum vmw_res_type res_type,
690 const struct vmw_user_resource_conv *converter,
691 uint32_t *id_loc,
692 struct vmw_resource_val_node **p_val)
693{
694 struct vmw_res_cache_entry *rcache =
695 &sw_context->res_cache[res_type];
696 struct vmw_resource *res;
697 struct vmw_resource_val_node *node;
698 int ret;
699
700 if (*id_loc == SVGA3D_INVALID_ID) {
701 if (p_val)
702 *p_val = NULL;
703 if (res_type == vmw_res_context) {
704 DRM_ERROR("Illegal context invalid id.\n");
705 return -EINVAL;
706 }
707 return 0;
708 }
709
710 /*
711 * Fastpath in case of repeated commands referencing the same
712 * resource
713 */
714
715 if (likely(rcache->valid && *id_loc == rcache->handle)) {
716 const struct vmw_resource *res = rcache->res;
717
718 rcache->node->first_usage = false;
719 if (p_val)
720 *p_val = rcache->node;
721
722 return vmw_resource_relocation_add
723 (&sw_context->res_relocations, res,
724 id_loc - sw_context->buf_start);
725 }
726
727 ret = vmw_user_resource_lookup_handle(dev_priv,
728 sw_context->fp->tfile,
729 *id_loc,
730 converter,
731 &res);
732 if (unlikely(ret != 0)) {
733 DRM_ERROR("Could not find or use resource 0x%08x.\n",
734 (unsigned) *id_loc);
735 dump_stack();
736 return ret;
737 }
738
739 rcache->valid = true;
740 rcache->res = res;
741 rcache->handle = *id_loc;
742
743 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
744 res, &node);
745 if (unlikely(ret != 0))
746 goto out_no_reloc;
747
748 rcache->node = node;
749 if (p_val)
750 *p_val = node;
751 vmw_resource_unreference(&res);
752 return 0;
753
754out_no_reloc:
755 BUG_ON(sw_context->error_resource != NULL);
756 sw_context->error_resource = res;
757
758 return ret;
759}
760
761/**
762 * vmw_rebind_dx_query - Rebind DX query associated with the context
763 *
764 * @ctx_res: context the query belongs to
765 *
766 * This function assumes binding_mutex is held.
767 */
768static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
769{
770 struct vmw_private *dev_priv = ctx_res->dev_priv;
771 struct vmw_dma_buffer *dx_query_mob;
772 struct {
773 SVGA3dCmdHeader header;
774 SVGA3dCmdDXBindAllQuery body;
775 } *cmd;
776
777
778 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
779
780 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
781 return 0;
782
783 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
784
785 if (cmd == NULL) {
786 DRM_ERROR("Failed to rebind queries.\n");
787 return -ENOMEM;
788 }
789
790 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791 cmd->header.size = sizeof(cmd->body);
792 cmd->body.cid = ctx_res->id;
793 cmd->body.mobid = dx_query_mob->base.mem.start;
794 vmw_fifo_commit(dev_priv, sizeof(*cmd));
795
796 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
797
798 return 0;
799}
800
801/**
802 * vmw_rebind_contexts - Rebind all resources previously bound to
803 * referenced contexts.
804 *
805 * @sw_context: Pointer to the software context.
806 *
807 * Rebind context binding points that have been scrubbed because of eviction.
808 */
809static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
810{
811 struct vmw_resource_val_node *val;
812 int ret;
813
814 list_for_each_entry(val, &sw_context->resource_list, head) {
815 if (unlikely(!val->staged_bindings))
816 break;
817
818 ret = vmw_binding_rebind_all
819 (vmw_context_binding_state(val->res));
820 if (unlikely(ret != 0)) {
821 if (ret != -ERESTARTSYS)
822 DRM_ERROR("Failed to rebind context.\n");
823 return ret;
824 }
825
826 ret = vmw_rebind_all_dx_query(val->res);
827 if (ret != 0)
828 return ret;
829 }
830
831 return 0;
832}
833
834/**
835 * vmw_view_bindings_add - Add an array of view bindings to a context
836 * binding state tracker.
837 *
838 * @sw_context: The execbuf state used for this command.
839 * @view_type: View type for the bindings.
840 * @binding_type: Binding type for the bindings.
841 * @shader_slot: The shader slot to user for the bindings.
842 * @view_ids: Array of view ids to be bound.
843 * @num_views: Number of view ids in @view_ids.
844 * @first_slot: The binding slot to be used for the first view id in @view_ids.
845 */
846static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847 enum vmw_view_type view_type,
848 enum vmw_ctx_binding_type binding_type,
849 uint32 shader_slot,
850 uint32 view_ids[], u32 num_views,
851 u32 first_slot)
852{
853 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854 struct vmw_cmdbuf_res_manager *man;
855 u32 i;
856 int ret;
857
858 if (!ctx_node) {
859 DRM_ERROR("DX Context not set.\n");
860 return -EINVAL;
861 }
862
863 man = sw_context->man;
864 for (i = 0; i < num_views; ++i) {
865 struct vmw_ctx_bindinfo_view binding;
866 struct vmw_resource *view = NULL;
867
868 if (view_ids[i] != SVGA3D_INVALID_ID) {
869 view = vmw_view_lookup(man, view_type, view_ids[i]);
870 if (IS_ERR(view)) {
871 DRM_ERROR("View not found.\n");
872 return PTR_ERR(view);
873 }
874
875 ret = vmw_view_res_val_add(sw_context, view);
876 if (ret) {
877 DRM_ERROR("Could not add view to "
878 "validation list.\n");
879 vmw_resource_unreference(&view);
880 return ret;
881 }
882 }
883 binding.bi.ctx = ctx_node->res;
884 binding.bi.res = view;
885 binding.bi.bt = binding_type;
886 binding.shader_slot = shader_slot;
887 binding.slot = first_slot + i;
888 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889 shader_slot, binding.slot);
890 if (view)
891 vmw_resource_unreference(&view);
892 }
893
894 return 0;
895}
896
897/**
898 * vmw_cmd_cid_check - Check a command header for valid context information.
899 *
900 * @dev_priv: Pointer to a device private structure.
901 * @sw_context: Pointer to the software context.
902 * @header: A command header with an embedded user-space context handle.
903 *
904 * Convenience function: Call vmw_cmd_res_check with the user-space context
905 * handle embedded in @header.
906 */
907static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
908 struct vmw_sw_context *sw_context,
909 SVGA3dCmdHeader *header)
910{
911 struct vmw_cid_cmd {
912 SVGA3dCmdHeader header;
913 uint32_t cid;
914 } *cmd;
915
916 cmd = container_of(header, struct vmw_cid_cmd, header);
917 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
918 user_context_converter, &cmd->cid, NULL);
919}
920
921static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
922 struct vmw_sw_context *sw_context,
923 SVGA3dCmdHeader *header)
924{
925 struct vmw_sid_cmd {
926 SVGA3dCmdHeader header;
927 SVGA3dCmdSetRenderTarget body;
928 } *cmd;
929 struct vmw_resource_val_node *ctx_node;
930 struct vmw_resource_val_node *res_node;
931 int ret;
932
933 cmd = container_of(header, struct vmw_sid_cmd, header);
934
935 if (cmd->body.type >= SVGA3D_RT_MAX) {
936 DRM_ERROR("Illegal render target type %u.\n",
937 (unsigned) cmd->body.type);
938 return -EINVAL;
939 }
940
941 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
942 user_context_converter, &cmd->body.cid,
943 &ctx_node);
944 if (unlikely(ret != 0))
945 return ret;
946
947 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948 user_surface_converter,
949 &cmd->body.target.sid, &res_node);
950 if (unlikely(ret != 0))
951 return ret;
952
953 if (dev_priv->has_mob) {
954 struct vmw_ctx_bindinfo_view binding;
955
956 binding.bi.ctx = ctx_node->res;
957 binding.bi.res = res_node ? res_node->res : NULL;
958 binding.bi.bt = vmw_ctx_binding_rt;
959 binding.slot = cmd->body.type;
960 vmw_binding_add(ctx_node->staged_bindings,
961 &binding.bi, 0, binding.slot);
962 }
963
964 return 0;
965}
966
967static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
968 struct vmw_sw_context *sw_context,
969 SVGA3dCmdHeader *header)
970{
971 struct vmw_sid_cmd {
972 SVGA3dCmdHeader header;
973 SVGA3dCmdSurfaceCopy body;
974 } *cmd;
975 int ret;
976
977 cmd = container_of(header, struct vmw_sid_cmd, header);
978
979 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
980 user_surface_converter,
981 &cmd->body.src.sid, NULL);
982 if (ret)
983 return ret;
984
985 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986 user_surface_converter,
987 &cmd->body.dest.sid, NULL);
988}
989
990static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991 struct vmw_sw_context *sw_context,
992 SVGA3dCmdHeader *header)
993{
994 struct {
995 SVGA3dCmdHeader header;
996 SVGA3dCmdDXBufferCopy body;
997 } *cmd;
998 int ret;
999
1000 cmd = container_of(header, typeof(*cmd), header);
1001 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002 user_surface_converter,
1003 &cmd->body.src, NULL);
1004 if (ret != 0)
1005 return ret;
1006
1007 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 user_surface_converter,
1009 &cmd->body.dest, NULL);
1010}
1011
1012static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013 struct vmw_sw_context *sw_context,
1014 SVGA3dCmdHeader *header)
1015{
1016 struct {
1017 SVGA3dCmdHeader header;
1018 SVGA3dCmdDXPredCopyRegion body;
1019 } *cmd;
1020 int ret;
1021
1022 cmd = container_of(header, typeof(*cmd), header);
1023 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024 user_surface_converter,
1025 &cmd->body.srcSid, NULL);
1026 if (ret != 0)
1027 return ret;
1028
1029 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030 user_surface_converter,
1031 &cmd->body.dstSid, NULL);
1032}
1033
1034static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1035 struct vmw_sw_context *sw_context,
1036 SVGA3dCmdHeader *header)
1037{
1038 struct vmw_sid_cmd {
1039 SVGA3dCmdHeader header;
1040 SVGA3dCmdSurfaceStretchBlt body;
1041 } *cmd;
1042 int ret;
1043
1044 cmd = container_of(header, struct vmw_sid_cmd, header);
1045 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1046 user_surface_converter,
1047 &cmd->body.src.sid, NULL);
1048 if (unlikely(ret != 0))
1049 return ret;
1050 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051 user_surface_converter,
1052 &cmd->body.dest.sid, NULL);
1053}
1054
1055static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1056 struct vmw_sw_context *sw_context,
1057 SVGA3dCmdHeader *header)
1058{
1059 struct vmw_sid_cmd {
1060 SVGA3dCmdHeader header;
1061 SVGA3dCmdBlitSurfaceToScreen body;
1062 } *cmd;
1063
1064 cmd = container_of(header, struct vmw_sid_cmd, header);
1065
1066 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1067 user_surface_converter,
1068 &cmd->body.srcImage.sid, NULL);
1069}
1070
1071static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1072 struct vmw_sw_context *sw_context,
1073 SVGA3dCmdHeader *header)
1074{
1075 struct vmw_sid_cmd {
1076 SVGA3dCmdHeader header;
1077 SVGA3dCmdPresent body;
1078 } *cmd;
1079
1080
1081 cmd = container_of(header, struct vmw_sid_cmd, header);
1082
1083 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1084 user_surface_converter, &cmd->body.sid,
1085 NULL);
1086}
1087
1088/**
1089 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1090 *
1091 * @dev_priv: The device private structure.
1092 * @new_query_bo: The new buffer holding query results.
1093 * @sw_context: The software context used for this command submission.
1094 *
1095 * This function checks whether @new_query_bo is suitable for holding
1096 * query results, and if another buffer currently is pinned for query
1097 * results. If so, the function prepares the state of @sw_context for
1098 * switching pinned buffers after successful submission of the current
1099 * command batch.
1100 */
1101static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1102 struct vmw_dma_buffer *new_query_bo,
1103 struct vmw_sw_context *sw_context)
1104{
1105 struct vmw_res_cache_entry *ctx_entry =
1106 &sw_context->res_cache[vmw_res_context];
1107 int ret;
1108
1109 BUG_ON(!ctx_entry->valid);
1110 sw_context->last_query_ctx = ctx_entry->res;
1111
1112 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1113
1114 if (unlikely(new_query_bo->base.num_pages > 4)) {
1115 DRM_ERROR("Query buffer too large.\n");
1116 return -EINVAL;
1117 }
1118
1119 if (unlikely(sw_context->cur_query_bo != NULL)) {
1120 sw_context->needs_post_query_barrier = true;
1121 ret = vmw_bo_to_validate_list(sw_context,
1122 sw_context->cur_query_bo,
1123 dev_priv->has_mob, NULL);
1124 if (unlikely(ret != 0))
1125 return ret;
1126 }
1127 sw_context->cur_query_bo = new_query_bo;
1128
1129 ret = vmw_bo_to_validate_list(sw_context,
1130 dev_priv->dummy_query_bo,
1131 dev_priv->has_mob, NULL);
1132 if (unlikely(ret != 0))
1133 return ret;
1134
1135 }
1136
1137 return 0;
1138}
1139
1140
1141/**
1142 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1143 *
1144 * @dev_priv: The device private structure.
1145 * @sw_context: The software context used for this command submission batch.
1146 *
1147 * This function will check if we're switching query buffers, and will then,
1148 * issue a dummy occlusion query wait used as a query barrier. When the fence
1149 * object following that query wait has signaled, we are sure that all
1150 * preceding queries have finished, and the old query buffer can be unpinned.
1151 * However, since both the new query buffer and the old one are fenced with
1152 * that fence, we can do an asynchronus unpin now, and be sure that the
1153 * old query buffer won't be moved until the fence has signaled.
1154 *
1155 * As mentioned above, both the new - and old query buffers need to be fenced
1156 * using a sequence emitted *after* calling this function.
1157 */
1158static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1159 struct vmw_sw_context *sw_context)
1160{
1161 /*
1162 * The validate list should still hold references to all
1163 * contexts here.
1164 */
1165
1166 if (sw_context->needs_post_query_barrier) {
1167 struct vmw_res_cache_entry *ctx_entry =
1168 &sw_context->res_cache[vmw_res_context];
1169 struct vmw_resource *ctx;
1170 int ret;
1171
1172 BUG_ON(!ctx_entry->valid);
1173 ctx = ctx_entry->res;
1174
1175 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1176
1177 if (unlikely(ret != 0))
1178 DRM_ERROR("Out of fifo space for dummy query.\n");
1179 }
1180
1181 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1182 if (dev_priv->pinned_bo) {
1183 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1184 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1185 }
1186
1187 if (!sw_context->needs_post_query_barrier) {
1188 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1189
1190 /*
1191 * We pin also the dummy_query_bo buffer so that we
1192 * don't need to validate it when emitting
1193 * dummy queries in context destroy paths.
1194 */
1195
1196 if (!dev_priv->dummy_query_bo_pinned) {
1197 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198 true);
1199 dev_priv->dummy_query_bo_pinned = true;
1200 }
1201
1202 BUG_ON(sw_context->last_query_ctx == NULL);
1203 dev_priv->query_cid = sw_context->last_query_ctx->id;
1204 dev_priv->query_cid_valid = true;
1205 dev_priv->pinned_bo =
1206 vmw_dmabuf_reference(sw_context->cur_query_bo);
1207 }
1208 }
1209}
1210
1211/**
1212 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213 * handle to a MOB id.
1214 *
1215 * @dev_priv: Pointer to a device private structure.
1216 * @sw_context: The software context used for this command batch validation.
1217 * @id: Pointer to the user-space handle to be translated.
1218 * @vmw_bo_p: Points to a location that, on successful return will carry
1219 * a reference-counted pointer to the DMA buffer identified by the
1220 * user-space handle in @id.
1221 *
1222 * This function saves information needed to translate a user-space buffer
1223 * handle to a MOB id. The translation does not take place immediately, but
1224 * during a call to vmw_apply_relocations(). This function builds a relocation
1225 * list and a list of buffers to validate. The former needs to be freed using
1226 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227 * needs to be freed using vmw_clear_validations.
1228 */
1229static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1230 struct vmw_sw_context *sw_context,
1231 SVGAMobId *id,
1232 struct vmw_dma_buffer **vmw_bo_p)
1233{
1234 struct vmw_dma_buffer *vmw_bo = NULL;
1235 uint32_t handle = *id;
1236 struct vmw_relocation *reloc;
1237 int ret;
1238
1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240 NULL);
1241 if (unlikely(ret != 0)) {
1242 DRM_ERROR("Could not find or use MOB buffer.\n");
1243 ret = -EINVAL;
1244 goto out_no_reloc;
1245 }
1246
1247 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1248 DRM_ERROR("Max number relocations per submission"
1249 " exceeded\n");
1250 ret = -EINVAL;
1251 goto out_no_reloc;
1252 }
1253
1254 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1255 reloc->mob_loc = id;
1256 reloc->location = NULL;
1257
1258 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1259 if (unlikely(ret != 0))
1260 goto out_no_reloc;
1261
1262 *vmw_bo_p = vmw_bo;
1263 return 0;
1264
1265out_no_reloc:
1266 vmw_dmabuf_unreference(&vmw_bo);
1267 *vmw_bo_p = NULL;
1268 return ret;
1269}
1270
1271/**
1272 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1273 * handle to a valid SVGAGuestPtr
1274 *
1275 * @dev_priv: Pointer to a device private structure.
1276 * @sw_context: The software context used for this command batch validation.
1277 * @ptr: Pointer to the user-space handle to be translated.
1278 * @vmw_bo_p: Points to a location that, on successful return will carry
1279 * a reference-counted pointer to the DMA buffer identified by the
1280 * user-space handle in @id.
1281 *
1282 * This function saves information needed to translate a user-space buffer
1283 * handle to a valid SVGAGuestPtr. The translation does not take place
1284 * immediately, but during a call to vmw_apply_relocations().
1285 * This function builds a relocation list and a list of buffers to validate.
1286 * The former needs to be freed using either vmw_apply_relocations() or
1287 * vmw_free_relocations(). The latter needs to be freed using
1288 * vmw_clear_validations.
1289 */
1290static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1291 struct vmw_sw_context *sw_context,
1292 SVGAGuestPtr *ptr,
1293 struct vmw_dma_buffer **vmw_bo_p)
1294{
1295 struct vmw_dma_buffer *vmw_bo = NULL;
1296 uint32_t handle = ptr->gmrId;
1297 struct vmw_relocation *reloc;
1298 int ret;
1299
1300 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301 NULL);
1302 if (unlikely(ret != 0)) {
1303 DRM_ERROR("Could not find or use GMR region.\n");
1304 ret = -EINVAL;
1305 goto out_no_reloc;
1306 }
1307
1308 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1309 DRM_ERROR("Max number relocations per submission"
1310 " exceeded\n");
1311 ret = -EINVAL;
1312 goto out_no_reloc;
1313 }
1314
1315 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1316 reloc->location = ptr;
1317
1318 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1319 if (unlikely(ret != 0))
1320 goto out_no_reloc;
1321
1322 *vmw_bo_p = vmw_bo;
1323 return 0;
1324
1325out_no_reloc:
1326 vmw_dmabuf_unreference(&vmw_bo);
1327 *vmw_bo_p = NULL;
1328 return ret;
1329}
1330
1331
1332
1333/**
1334 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1335 *
1336 * @dev_priv: Pointer to a device private struct.
1337 * @sw_context: The software context used for this command submission.
1338 * @header: Pointer to the command header in the command stream.
1339 *
1340 * This function adds the new query into the query COTABLE
1341 */
1342static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1343 struct vmw_sw_context *sw_context,
1344 SVGA3dCmdHeader *header)
1345{
1346 struct vmw_dx_define_query_cmd {
1347 SVGA3dCmdHeader header;
1348 SVGA3dCmdDXDefineQuery q;
1349 } *cmd;
1350
1351 int ret;
1352 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1353 struct vmw_resource *cotable_res;
1354
1355
1356 if (ctx_node == NULL) {
1357 DRM_ERROR("DX Context not set for query.\n");
1358 return -EINVAL;
1359 }
1360
1361 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1362
1363 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1364 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1365 return -EINVAL;
1366
1367 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1368 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1369 vmw_resource_unreference(&cotable_res);
1370
1371 return ret;
1372}
1373
1374
1375
1376/**
1377 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1378 *
1379 * @dev_priv: Pointer to a device private struct.
1380 * @sw_context: The software context used for this command submission.
1381 * @header: Pointer to the command header in the command stream.
1382 *
1383 * The query bind operation will eventually associate the query ID
1384 * with its backing MOB. In this function, we take the user mode
1385 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1386 * kernel mode equivalent.
1387 */
1388static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1389 struct vmw_sw_context *sw_context,
1390 SVGA3dCmdHeader *header)
1391{
1392 struct vmw_dx_bind_query_cmd {
1393 SVGA3dCmdHeader header;
1394 SVGA3dCmdDXBindQuery q;
1395 } *cmd;
1396
1397 struct vmw_dma_buffer *vmw_bo;
1398 int ret;
1399
1400
1401 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1402
1403 /*
1404 * Look up the buffer pointed to by q.mobid, put it on the relocation
1405 * list so its kernel mode MOB ID can be filled in later
1406 */
1407 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1408 &vmw_bo);
1409
1410 if (ret != 0)
1411 return ret;
1412
1413 sw_context->dx_query_mob = vmw_bo;
1414 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1415
1416 vmw_dmabuf_unreference(&vmw_bo);
1417
1418 return ret;
1419}
1420
1421
1422
1423/**
1424 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1425 *
1426 * @dev_priv: Pointer to a device private struct.
1427 * @sw_context: The software context used for this command submission.
1428 * @header: Pointer to the command header in the command stream.
1429 */
1430static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1431 struct vmw_sw_context *sw_context,
1432 SVGA3dCmdHeader *header)
1433{
1434 struct vmw_begin_gb_query_cmd {
1435 SVGA3dCmdHeader header;
1436 SVGA3dCmdBeginGBQuery q;
1437 } *cmd;
1438
1439 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1440 header);
1441
1442 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1443 user_context_converter, &cmd->q.cid,
1444 NULL);
1445}
1446
1447/**
1448 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1449 *
1450 * @dev_priv: Pointer to a device private struct.
1451 * @sw_context: The software context used for this command submission.
1452 * @header: Pointer to the command header in the command stream.
1453 */
1454static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1455 struct vmw_sw_context *sw_context,
1456 SVGA3dCmdHeader *header)
1457{
1458 struct vmw_begin_query_cmd {
1459 SVGA3dCmdHeader header;
1460 SVGA3dCmdBeginQuery q;
1461 } *cmd;
1462
1463 cmd = container_of(header, struct vmw_begin_query_cmd,
1464 header);
1465
1466 if (unlikely(dev_priv->has_mob)) {
1467 struct {
1468 SVGA3dCmdHeader header;
1469 SVGA3dCmdBeginGBQuery q;
1470 } gb_cmd;
1471
1472 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1473
1474 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1475 gb_cmd.header.size = cmd->header.size;
1476 gb_cmd.q.cid = cmd->q.cid;
1477 gb_cmd.q.type = cmd->q.type;
1478
1479 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1480 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1481 }
1482
1483 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1484 user_context_converter, &cmd->q.cid,
1485 NULL);
1486}
1487
1488/**
1489 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1490 *
1491 * @dev_priv: Pointer to a device private struct.
1492 * @sw_context: The software context used for this command submission.
1493 * @header: Pointer to the command header in the command stream.
1494 */
1495static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1496 struct vmw_sw_context *sw_context,
1497 SVGA3dCmdHeader *header)
1498{
1499 struct vmw_dma_buffer *vmw_bo;
1500 struct vmw_query_cmd {
1501 SVGA3dCmdHeader header;
1502 SVGA3dCmdEndGBQuery q;
1503 } *cmd;
1504 int ret;
1505
1506 cmd = container_of(header, struct vmw_query_cmd, header);
1507 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1508 if (unlikely(ret != 0))
1509 return ret;
1510
1511 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1512 &cmd->q.mobid,
1513 &vmw_bo);
1514 if (unlikely(ret != 0))
1515 return ret;
1516
1517 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1518
1519 vmw_dmabuf_unreference(&vmw_bo);
1520 return ret;
1521}
1522
1523/**
1524 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1525 *
1526 * @dev_priv: Pointer to a device private struct.
1527 * @sw_context: The software context used for this command submission.
1528 * @header: Pointer to the command header in the command stream.
1529 */
1530static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1531 struct vmw_sw_context *sw_context,
1532 SVGA3dCmdHeader *header)
1533{
1534 struct vmw_dma_buffer *vmw_bo;
1535 struct vmw_query_cmd {
1536 SVGA3dCmdHeader header;
1537 SVGA3dCmdEndQuery q;
1538 } *cmd;
1539 int ret;
1540
1541 cmd = container_of(header, struct vmw_query_cmd, header);
1542 if (dev_priv->has_mob) {
1543 struct {
1544 SVGA3dCmdHeader header;
1545 SVGA3dCmdEndGBQuery q;
1546 } gb_cmd;
1547
1548 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1549
1550 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1551 gb_cmd.header.size = cmd->header.size;
1552 gb_cmd.q.cid = cmd->q.cid;
1553 gb_cmd.q.type = cmd->q.type;
1554 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1555 gb_cmd.q.offset = cmd->q.guestResult.offset;
1556
1557 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1558 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1559 }
1560
1561 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1562 if (unlikely(ret != 0))
1563 return ret;
1564
1565 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1566 &cmd->q.guestResult,
1567 &vmw_bo);
1568 if (unlikely(ret != 0))
1569 return ret;
1570
1571 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1572
1573 vmw_dmabuf_unreference(&vmw_bo);
1574 return ret;
1575}
1576
1577/**
1578 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1579 *
1580 * @dev_priv: Pointer to a device private struct.
1581 * @sw_context: The software context used for this command submission.
1582 * @header: Pointer to the command header in the command stream.
1583 */
1584static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1585 struct vmw_sw_context *sw_context,
1586 SVGA3dCmdHeader *header)
1587{
1588 struct vmw_dma_buffer *vmw_bo;
1589 struct vmw_query_cmd {
1590 SVGA3dCmdHeader header;
1591 SVGA3dCmdWaitForGBQuery q;
1592 } *cmd;
1593 int ret;
1594
1595 cmd = container_of(header, struct vmw_query_cmd, header);
1596 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1597 if (unlikely(ret != 0))
1598 return ret;
1599
1600 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1601 &cmd->q.mobid,
1602 &vmw_bo);
1603 if (unlikely(ret != 0))
1604 return ret;
1605
1606 vmw_dmabuf_unreference(&vmw_bo);
1607 return 0;
1608}
1609
1610/**
1611 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1612 *
1613 * @dev_priv: Pointer to a device private struct.
1614 * @sw_context: The software context used for this command submission.
1615 * @header: Pointer to the command header in the command stream.
1616 */
1617static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1618 struct vmw_sw_context *sw_context,
1619 SVGA3dCmdHeader *header)
1620{
1621 struct vmw_dma_buffer *vmw_bo;
1622 struct vmw_query_cmd {
1623 SVGA3dCmdHeader header;
1624 SVGA3dCmdWaitForQuery q;
1625 } *cmd;
1626 int ret;
1627
1628 cmd = container_of(header, struct vmw_query_cmd, header);
1629 if (dev_priv->has_mob) {
1630 struct {
1631 SVGA3dCmdHeader header;
1632 SVGA3dCmdWaitForGBQuery q;
1633 } gb_cmd;
1634
1635 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1636
1637 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1638 gb_cmd.header.size = cmd->header.size;
1639 gb_cmd.q.cid = cmd->q.cid;
1640 gb_cmd.q.type = cmd->q.type;
1641 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1642 gb_cmd.q.offset = cmd->q.guestResult.offset;
1643
1644 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1645 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1646 }
1647
1648 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1649 if (unlikely(ret != 0))
1650 return ret;
1651
1652 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1653 &cmd->q.guestResult,
1654 &vmw_bo);
1655 if (unlikely(ret != 0))
1656 return ret;
1657
1658 vmw_dmabuf_unreference(&vmw_bo);
1659 return 0;
1660}
1661
1662static int vmw_cmd_dma(struct vmw_private *dev_priv,
1663 struct vmw_sw_context *sw_context,
1664 SVGA3dCmdHeader *header)
1665{
1666 struct vmw_dma_buffer *vmw_bo = NULL;
1667 struct vmw_surface *srf = NULL;
1668 struct vmw_dma_cmd {
1669 SVGA3dCmdHeader header;
1670 SVGA3dCmdSurfaceDMA dma;
1671 } *cmd;
1672 int ret;
1673 SVGA3dCmdSurfaceDMASuffix *suffix;
1674 uint32_t bo_size;
1675
1676 cmd = container_of(header, struct vmw_dma_cmd, header);
1677 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1678 header->size - sizeof(*suffix));
1679
1680 /* Make sure device and verifier stays in sync. */
1681 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1682 DRM_ERROR("Invalid DMA suffix size.\n");
1683 return -EINVAL;
1684 }
1685
1686 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1687 &cmd->dma.guest.ptr,
1688 &vmw_bo);
1689 if (unlikely(ret != 0))
1690 return ret;
1691
1692 /* Make sure DMA doesn't cross BO boundaries. */
1693 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1694 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1695 DRM_ERROR("Invalid DMA offset.\n");
1696 return -EINVAL;
1697 }
1698
1699 bo_size -= cmd->dma.guest.ptr.offset;
1700 if (unlikely(suffix->maximumOffset > bo_size))
1701 suffix->maximumOffset = bo_size;
1702
1703 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1704 user_surface_converter, &cmd->dma.host.sid,
1705 NULL);
1706 if (unlikely(ret != 0)) {
1707 if (unlikely(ret != -ERESTARTSYS))
1708 DRM_ERROR("could not find surface for DMA.\n");
1709 goto out_no_surface;
1710 }
1711
1712 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1713
1714 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1715 header);
1716
1717out_no_surface:
1718 vmw_dmabuf_unreference(&vmw_bo);
1719 return ret;
1720}
1721
1722static int vmw_cmd_draw(struct vmw_private *dev_priv,
1723 struct vmw_sw_context *sw_context,
1724 SVGA3dCmdHeader *header)
1725{
1726 struct vmw_draw_cmd {
1727 SVGA3dCmdHeader header;
1728 SVGA3dCmdDrawPrimitives body;
1729 } *cmd;
1730 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1731 (unsigned long)header + sizeof(*cmd));
1732 SVGA3dPrimitiveRange *range;
1733 uint32_t i;
1734 uint32_t maxnum;
1735 int ret;
1736
1737 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1738 if (unlikely(ret != 0))
1739 return ret;
1740
1741 cmd = container_of(header, struct vmw_draw_cmd, header);
1742 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1743
1744 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1745 DRM_ERROR("Illegal number of vertex declarations.\n");
1746 return -EINVAL;
1747 }
1748
1749 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1750 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1751 user_surface_converter,
1752 &decl->array.surfaceId, NULL);
1753 if (unlikely(ret != 0))
1754 return ret;
1755 }
1756
1757 maxnum = (header->size - sizeof(cmd->body) -
1758 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1759 if (unlikely(cmd->body.numRanges > maxnum)) {
1760 DRM_ERROR("Illegal number of index ranges.\n");
1761 return -EINVAL;
1762 }
1763
1764 range = (SVGA3dPrimitiveRange *) decl;
1765 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1766 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1767 user_surface_converter,
1768 &range->indexArray.surfaceId, NULL);
1769 if (unlikely(ret != 0))
1770 return ret;
1771 }
1772 return 0;
1773}
1774
1775
1776static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1777 struct vmw_sw_context *sw_context,
1778 SVGA3dCmdHeader *header)
1779{
1780 struct vmw_tex_state_cmd {
1781 SVGA3dCmdHeader header;
1782 SVGA3dCmdSetTextureState state;
1783 } *cmd;
1784
1785 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1786 ((unsigned long) header + header->size + sizeof(header));
1787 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1788 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1789 struct vmw_resource_val_node *ctx_node;
1790 struct vmw_resource_val_node *res_node;
1791 int ret;
1792
1793 cmd = container_of(header, struct vmw_tex_state_cmd,
1794 header);
1795
1796 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1797 user_context_converter, &cmd->state.cid,
1798 &ctx_node);
1799 if (unlikely(ret != 0))
1800 return ret;
1801
1802 for (; cur_state < last_state; ++cur_state) {
1803 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1804 continue;
1805
1806 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1807 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1808 (unsigned) cur_state->stage);
1809 return -EINVAL;
1810 }
1811
1812 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1813 user_surface_converter,
1814 &cur_state->value, &res_node);
1815 if (unlikely(ret != 0))
1816 return ret;
1817
1818 if (dev_priv->has_mob) {
1819 struct vmw_ctx_bindinfo_tex binding;
1820
1821 binding.bi.ctx = ctx_node->res;
1822 binding.bi.res = res_node ? res_node->res : NULL;
1823 binding.bi.bt = vmw_ctx_binding_tex;
1824 binding.texture_stage = cur_state->stage;
1825 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1826 0, binding.texture_stage);
1827 }
1828 }
1829
1830 return 0;
1831}
1832
1833static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1834 struct vmw_sw_context *sw_context,
1835 void *buf)
1836{
1837 struct vmw_dma_buffer *vmw_bo;
1838 int ret;
1839
1840 struct {
1841 uint32_t header;
1842 SVGAFifoCmdDefineGMRFB body;
1843 } *cmd = buf;
1844
1845 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1846 &cmd->body.ptr,
1847 &vmw_bo);
1848 if (unlikely(ret != 0))
1849 return ret;
1850
1851 vmw_dmabuf_unreference(&vmw_bo);
1852
1853 return ret;
1854}
1855
1856
1857/**
1858 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1859 * switching
1860 *
1861 * @dev_priv: Pointer to a device private struct.
1862 * @sw_context: The software context being used for this batch.
1863 * @val_node: The validation node representing the resource.
1864 * @buf_id: Pointer to the user-space backup buffer handle in the command
1865 * stream.
1866 * @backup_offset: Offset of backup into MOB.
1867 *
1868 * This function prepares for registering a switch of backup buffers
1869 * in the resource metadata just prior to unreserving. It's basically a wrapper
1870 * around vmw_cmd_res_switch_backup with a different interface.
1871 */
1872static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1873 struct vmw_sw_context *sw_context,
1874 struct vmw_resource_val_node *val_node,
1875 uint32_t *buf_id,
1876 unsigned long backup_offset)
1877{
1878 struct vmw_dma_buffer *dma_buf;
1879 int ret;
1880
1881 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1882 if (ret)
1883 return ret;
1884
1885 val_node->switching_backup = true;
1886 if (val_node->first_usage)
1887 val_node->no_buffer_needed = true;
1888
1889 vmw_dmabuf_unreference(&val_node->new_backup);
1890 val_node->new_backup = dma_buf;
1891 val_node->new_backup_offset = backup_offset;
1892
1893 return 0;
1894}
1895
1896
1897/**
1898 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1899 *
1900 * @dev_priv: Pointer to a device private struct.
1901 * @sw_context: The software context being used for this batch.
1902 * @res_type: The resource type.
1903 * @converter: Information about user-space binding for this resource type.
1904 * @res_id: Pointer to the user-space resource handle in the command stream.
1905 * @buf_id: Pointer to the user-space backup buffer handle in the command
1906 * stream.
1907 * @backup_offset: Offset of backup into MOB.
1908 *
1909 * This function prepares for registering a switch of backup buffers
1910 * in the resource metadata just prior to unreserving. It's basically a wrapper
1911 * around vmw_cmd_res_switch_backup with a different interface.
1912 */
1913static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1914 struct vmw_sw_context *sw_context,
1915 enum vmw_res_type res_type,
1916 const struct vmw_user_resource_conv
1917 *converter,
1918 uint32_t *res_id,
1919 uint32_t *buf_id,
1920 unsigned long backup_offset)
1921{
1922 struct vmw_resource_val_node *val_node;
1923 int ret;
1924
1925 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1926 converter, res_id, &val_node);
1927 if (ret)
1928 return ret;
1929
1930 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1931 buf_id, backup_offset);
1932}
1933
1934/**
1935 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1936 * command
1937 *
1938 * @dev_priv: Pointer to a device private struct.
1939 * @sw_context: The software context being used for this batch.
1940 * @header: Pointer to the command header in the command stream.
1941 */
1942static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1943 struct vmw_sw_context *sw_context,
1944 SVGA3dCmdHeader *header)
1945{
1946 struct vmw_bind_gb_surface_cmd {
1947 SVGA3dCmdHeader header;
1948 SVGA3dCmdBindGBSurface body;
1949 } *cmd;
1950
1951 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1952
1953 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1954 user_surface_converter,
1955 &cmd->body.sid, &cmd->body.mobid,
1956 0);
1957}
1958
1959/**
1960 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1961 * command
1962 *
1963 * @dev_priv: Pointer to a device private struct.
1964 * @sw_context: The software context being used for this batch.
1965 * @header: Pointer to the command header in the command stream.
1966 */
1967static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1968 struct vmw_sw_context *sw_context,
1969 SVGA3dCmdHeader *header)
1970{
1971 struct vmw_gb_surface_cmd {
1972 SVGA3dCmdHeader header;
1973 SVGA3dCmdUpdateGBImage body;
1974 } *cmd;
1975
1976 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1977
1978 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1979 user_surface_converter,
1980 &cmd->body.image.sid, NULL);
1981}
1982
1983/**
1984 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1985 * command
1986 *
1987 * @dev_priv: Pointer to a device private struct.
1988 * @sw_context: The software context being used for this batch.
1989 * @header: Pointer to the command header in the command stream.
1990 */
1991static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1992 struct vmw_sw_context *sw_context,
1993 SVGA3dCmdHeader *header)
1994{
1995 struct vmw_gb_surface_cmd {
1996 SVGA3dCmdHeader header;
1997 SVGA3dCmdUpdateGBSurface body;
1998 } *cmd;
1999
2000 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2001
2002 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2003 user_surface_converter,
2004 &cmd->body.sid, NULL);
2005}
2006
2007/**
2008 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2009 * command
2010 *
2011 * @dev_priv: Pointer to a device private struct.
2012 * @sw_context: The software context being used for this batch.
2013 * @header: Pointer to the command header in the command stream.
2014 */
2015static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2016 struct vmw_sw_context *sw_context,
2017 SVGA3dCmdHeader *header)
2018{
2019 struct vmw_gb_surface_cmd {
2020 SVGA3dCmdHeader header;
2021 SVGA3dCmdReadbackGBImage body;
2022 } *cmd;
2023
2024 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2025
2026 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2027 user_surface_converter,
2028 &cmd->body.image.sid, NULL);
2029}
2030
2031/**
2032 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2033 * command
2034 *
2035 * @dev_priv: Pointer to a device private struct.
2036 * @sw_context: The software context being used for this batch.
2037 * @header: Pointer to the command header in the command stream.
2038 */
2039static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2040 struct vmw_sw_context *sw_context,
2041 SVGA3dCmdHeader *header)
2042{
2043 struct vmw_gb_surface_cmd {
2044 SVGA3dCmdHeader header;
2045 SVGA3dCmdReadbackGBSurface body;
2046 } *cmd;
2047
2048 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2049
2050 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2051 user_surface_converter,
2052 &cmd->body.sid, NULL);
2053}
2054
2055/**
2056 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2057 * command
2058 *
2059 * @dev_priv: Pointer to a device private struct.
2060 * @sw_context: The software context being used for this batch.
2061 * @header: Pointer to the command header in the command stream.
2062 */
2063static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2064 struct vmw_sw_context *sw_context,
2065 SVGA3dCmdHeader *header)
2066{
2067 struct vmw_gb_surface_cmd {
2068 SVGA3dCmdHeader header;
2069 SVGA3dCmdInvalidateGBImage body;
2070 } *cmd;
2071
2072 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2073
2074 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2075 user_surface_converter,
2076 &cmd->body.image.sid, NULL);
2077}
2078
2079/**
2080 * vmw_cmd_invalidate_gb_surface - Validate an
2081 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2082 *
2083 * @dev_priv: Pointer to a device private struct.
2084 * @sw_context: The software context being used for this batch.
2085 * @header: Pointer to the command header in the command stream.
2086 */
2087static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2088 struct vmw_sw_context *sw_context,
2089 SVGA3dCmdHeader *header)
2090{
2091 struct vmw_gb_surface_cmd {
2092 SVGA3dCmdHeader header;
2093 SVGA3dCmdInvalidateGBSurface body;
2094 } *cmd;
2095
2096 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2097
2098 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2099 user_surface_converter,
2100 &cmd->body.sid, NULL);
2101}
2102
2103
2104/**
2105 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2106 * command
2107 *
2108 * @dev_priv: Pointer to a device private struct.
2109 * @sw_context: The software context being used for this batch.
2110 * @header: Pointer to the command header in the command stream.
2111 */
2112static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2113 struct vmw_sw_context *sw_context,
2114 SVGA3dCmdHeader *header)
2115{
2116 struct vmw_shader_define_cmd {
2117 SVGA3dCmdHeader header;
2118 SVGA3dCmdDefineShader body;
2119 } *cmd;
2120 int ret;
2121 size_t size;
2122 struct vmw_resource_val_node *val;
2123
2124 cmd = container_of(header, struct vmw_shader_define_cmd,
2125 header);
2126
2127 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2128 user_context_converter, &cmd->body.cid,
2129 &val);
2130 if (unlikely(ret != 0))
2131 return ret;
2132
2133 if (unlikely(!dev_priv->has_mob))
2134 return 0;
2135
2136 size = cmd->header.size - sizeof(cmd->body);
2137 ret = vmw_compat_shader_add(dev_priv,
2138 vmw_context_res_man(val->res),
2139 cmd->body.shid, cmd + 1,
2140 cmd->body.type, size,
2141 &sw_context->staged_cmd_res);
2142 if (unlikely(ret != 0))
2143 return ret;
2144
2145 return vmw_resource_relocation_add(&sw_context->res_relocations,
2146 NULL, &cmd->header.id -
2147 sw_context->buf_start);
2148
2149 return 0;
2150}
2151
2152/**
2153 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2154 * command
2155 *
2156 * @dev_priv: Pointer to a device private struct.
2157 * @sw_context: The software context being used for this batch.
2158 * @header: Pointer to the command header in the command stream.
2159 */
2160static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2161 struct vmw_sw_context *sw_context,
2162 SVGA3dCmdHeader *header)
2163{
2164 struct vmw_shader_destroy_cmd {
2165 SVGA3dCmdHeader header;
2166 SVGA3dCmdDestroyShader body;
2167 } *cmd;
2168 int ret;
2169 struct vmw_resource_val_node *val;
2170
2171 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2172 header);
2173
2174 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2175 user_context_converter, &cmd->body.cid,
2176 &val);
2177 if (unlikely(ret != 0))
2178 return ret;
2179
2180 if (unlikely(!dev_priv->has_mob))
2181 return 0;
2182
2183 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2184 cmd->body.shid,
2185 cmd->body.type,
2186 &sw_context->staged_cmd_res);
2187 if (unlikely(ret != 0))
2188 return ret;
2189
2190 return vmw_resource_relocation_add(&sw_context->res_relocations,
2191 NULL, &cmd->header.id -
2192 sw_context->buf_start);
2193
2194 return 0;
2195}
2196
2197/**
2198 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2199 * command
2200 *
2201 * @dev_priv: Pointer to a device private struct.
2202 * @sw_context: The software context being used for this batch.
2203 * @header: Pointer to the command header in the command stream.
2204 */
2205static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2206 struct vmw_sw_context *sw_context,
2207 SVGA3dCmdHeader *header)
2208{
2209 struct vmw_set_shader_cmd {
2210 SVGA3dCmdHeader header;
2211 SVGA3dCmdSetShader body;
2212 } *cmd;
2213 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2214 struct vmw_ctx_bindinfo_shader binding;
2215 struct vmw_resource *res = NULL;
2216 int ret;
2217
2218 cmd = container_of(header, struct vmw_set_shader_cmd,
2219 header);
2220
2221 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2222 DRM_ERROR("Illegal shader type %u.\n",
2223 (unsigned) cmd->body.type);
2224 return -EINVAL;
2225 }
2226
2227 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228 user_context_converter, &cmd->body.cid,
2229 &ctx_node);
2230 if (unlikely(ret != 0))
2231 return ret;
2232
2233 if (!dev_priv->has_mob)
2234 return 0;
2235
2236 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2237 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2238 cmd->body.shid,
2239 cmd->body.type);
2240
2241 if (!IS_ERR(res)) {
2242 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2243 &cmd->body.shid, res,
2244 &res_node);
2245 vmw_resource_unreference(&res);
2246 if (unlikely(ret != 0))
2247 return ret;
2248 }
2249 }
2250
2251 if (!res_node) {
2252 ret = vmw_cmd_res_check(dev_priv, sw_context,
2253 vmw_res_shader,
2254 user_shader_converter,
2255 &cmd->body.shid, &res_node);
2256 if (unlikely(ret != 0))
2257 return ret;
2258 }
2259
2260 binding.bi.ctx = ctx_node->res;
2261 binding.bi.res = res_node ? res_node->res : NULL;
2262 binding.bi.bt = vmw_ctx_binding_shader;
2263 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2264 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2265 binding.shader_slot, 0);
2266 return 0;
2267}
2268
2269/**
2270 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2271 * command
2272 *
2273 * @dev_priv: Pointer to a device private struct.
2274 * @sw_context: The software context being used for this batch.
2275 * @header: Pointer to the command header in the command stream.
2276 */
2277static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2278 struct vmw_sw_context *sw_context,
2279 SVGA3dCmdHeader *header)
2280{
2281 struct vmw_set_shader_const_cmd {
2282 SVGA3dCmdHeader header;
2283 SVGA3dCmdSetShaderConst body;
2284 } *cmd;
2285 int ret;
2286
2287 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2288 header);
2289
2290 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2291 user_context_converter, &cmd->body.cid,
2292 NULL);
2293 if (unlikely(ret != 0))
2294 return ret;
2295
2296 if (dev_priv->has_mob)
2297 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2298
2299 return 0;
2300}
2301
2302/**
2303 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2304 * command
2305 *
2306 * @dev_priv: Pointer to a device private struct.
2307 * @sw_context: The software context being used for this batch.
2308 * @header: Pointer to the command header in the command stream.
2309 */
2310static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2311 struct vmw_sw_context *sw_context,
2312 SVGA3dCmdHeader *header)
2313{
2314 struct vmw_bind_gb_shader_cmd {
2315 SVGA3dCmdHeader header;
2316 SVGA3dCmdBindGBShader body;
2317 } *cmd;
2318
2319 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2320 header);
2321
2322 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2323 user_shader_converter,
2324 &cmd->body.shid, &cmd->body.mobid,
2325 cmd->body.offsetInBytes);
2326}
2327
2328/**
2329 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2330 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2331 *
2332 * @dev_priv: Pointer to a device private struct.
2333 * @sw_context: The software context being used for this batch.
2334 * @header: Pointer to the command header in the command stream.
2335 */
2336static int
2337vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2338 struct vmw_sw_context *sw_context,
2339 SVGA3dCmdHeader *header)
2340{
2341 struct {
2342 SVGA3dCmdHeader header;
2343 SVGA3dCmdDXSetSingleConstantBuffer body;
2344 } *cmd;
2345 struct vmw_resource_val_node *res_node = NULL;
2346 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2347 struct vmw_ctx_bindinfo_cb binding;
2348 int ret;
2349
2350 if (unlikely(ctx_node == NULL)) {
2351 DRM_ERROR("DX Context not set.\n");
2352 return -EINVAL;
2353 }
2354
2355 cmd = container_of(header, typeof(*cmd), header);
2356 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2357 user_surface_converter,
2358 &cmd->body.sid, &res_node);
2359 if (unlikely(ret != 0))
2360 return ret;
2361
2362 binding.bi.ctx = ctx_node->res;
2363 binding.bi.res = res_node ? res_node->res : NULL;
2364 binding.bi.bt = vmw_ctx_binding_cb;
2365 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2366 binding.offset = cmd->body.offsetInBytes;
2367 binding.size = cmd->body.sizeInBytes;
2368 binding.slot = cmd->body.slot;
2369
2370 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2371 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2372 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2373 (unsigned) cmd->body.type,
2374 (unsigned) binding.slot);
2375 return -EINVAL;
2376 }
2377
2378 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2379 binding.shader_slot, binding.slot);
2380
2381 return 0;
2382}
2383
2384/**
2385 * vmw_cmd_dx_set_shader_res - Validate an
2386 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2387 *
2388 * @dev_priv: Pointer to a device private struct.
2389 * @sw_context: The software context being used for this batch.
2390 * @header: Pointer to the command header in the command stream.
2391 */
2392static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2393 struct vmw_sw_context *sw_context,
2394 SVGA3dCmdHeader *header)
2395{
2396 struct {
2397 SVGA3dCmdHeader header;
2398 SVGA3dCmdDXSetShaderResources body;
2399 } *cmd = container_of(header, typeof(*cmd), header);
2400 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2401 sizeof(SVGA3dShaderResourceViewId);
2402
2403 if ((u64) cmd->body.startView + (u64) num_sr_view >
2404 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2405 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2406 DRM_ERROR("Invalid shader binding.\n");
2407 return -EINVAL;
2408 }
2409
2410 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2411 vmw_ctx_binding_sr,
2412 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2413 (void *) &cmd[1], num_sr_view,
2414 cmd->body.startView);
2415}
2416
2417/**
2418 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2419 * command
2420 *
2421 * @dev_priv: Pointer to a device private struct.
2422 * @sw_context: The software context being used for this batch.
2423 * @header: Pointer to the command header in the command stream.
2424 */
2425static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2426 struct vmw_sw_context *sw_context,
2427 SVGA3dCmdHeader *header)
2428{
2429 struct {
2430 SVGA3dCmdHeader header;
2431 SVGA3dCmdDXSetShader body;
2432 } *cmd;
2433 struct vmw_resource *res = NULL;
2434 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2435 struct vmw_ctx_bindinfo_shader binding;
2436 int ret = 0;
2437
2438 if (unlikely(ctx_node == NULL)) {
2439 DRM_ERROR("DX Context not set.\n");
2440 return -EINVAL;
2441 }
2442
2443 cmd = container_of(header, typeof(*cmd), header);
2444
2445 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2446 DRM_ERROR("Illegal shader type %u.\n",
2447 (unsigned) cmd->body.type);
2448 return -EINVAL;
2449 }
2450
2451 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2452 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2453 if (IS_ERR(res)) {
2454 DRM_ERROR("Could not find shader for binding.\n");
2455 return PTR_ERR(res);
2456 }
2457
2458 ret = vmw_resource_val_add(sw_context, res, NULL);
2459 if (ret)
2460 goto out_unref;
2461 }
2462
2463 binding.bi.ctx = ctx_node->res;
2464 binding.bi.res = res;
2465 binding.bi.bt = vmw_ctx_binding_dx_shader;
2466 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2467
2468 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2469 binding.shader_slot, 0);
2470out_unref:
2471 if (res)
2472 vmw_resource_unreference(&res);
2473
2474 return ret;
2475}
2476
2477/**
2478 * vmw_cmd_dx_set_vertex_buffers - Validates an
2479 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2480 *
2481 * @dev_priv: Pointer to a device private struct.
2482 * @sw_context: The software context being used for this batch.
2483 * @header: Pointer to the command header in the command stream.
2484 */
2485static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2486 struct vmw_sw_context *sw_context,
2487 SVGA3dCmdHeader *header)
2488{
2489 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2490 struct vmw_ctx_bindinfo_vb binding;
2491 struct vmw_resource_val_node *res_node;
2492 struct {
2493 SVGA3dCmdHeader header;
2494 SVGA3dCmdDXSetVertexBuffers body;
2495 SVGA3dVertexBuffer buf[];
2496 } *cmd;
2497 int i, ret, num;
2498
2499 if (unlikely(ctx_node == NULL)) {
2500 DRM_ERROR("DX Context not set.\n");
2501 return -EINVAL;
2502 }
2503
2504 cmd = container_of(header, typeof(*cmd), header);
2505 num = (cmd->header.size - sizeof(cmd->body)) /
2506 sizeof(SVGA3dVertexBuffer);
2507 if ((u64)num + (u64)cmd->body.startBuffer >
2508 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2509 DRM_ERROR("Invalid number of vertex buffers.\n");
2510 return -EINVAL;
2511 }
2512
2513 for (i = 0; i < num; i++) {
2514 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2515 user_surface_converter,
2516 &cmd->buf[i].sid, &res_node);
2517 if (unlikely(ret != 0))
2518 return ret;
2519
2520 binding.bi.ctx = ctx_node->res;
2521 binding.bi.bt = vmw_ctx_binding_vb;
2522 binding.bi.res = ((res_node) ? res_node->res : NULL);
2523 binding.offset = cmd->buf[i].offset;
2524 binding.stride = cmd->buf[i].stride;
2525 binding.slot = i + cmd->body.startBuffer;
2526
2527 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2528 0, binding.slot);
2529 }
2530
2531 return 0;
2532}
2533
2534/**
2535 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2536 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2537 *
2538 * @dev_priv: Pointer to a device private struct.
2539 * @sw_context: The software context being used for this batch.
2540 * @header: Pointer to the command header in the command stream.
2541 */
2542static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2543 struct vmw_sw_context *sw_context,
2544 SVGA3dCmdHeader *header)
2545{
2546 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2547 struct vmw_ctx_bindinfo_ib binding;
2548 struct vmw_resource_val_node *res_node;
2549 struct {
2550 SVGA3dCmdHeader header;
2551 SVGA3dCmdDXSetIndexBuffer body;
2552 } *cmd;
2553 int ret;
2554
2555 if (unlikely(ctx_node == NULL)) {
2556 DRM_ERROR("DX Context not set.\n");
2557 return -EINVAL;
2558 }
2559
2560 cmd = container_of(header, typeof(*cmd), header);
2561 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2562 user_surface_converter,
2563 &cmd->body.sid, &res_node);
2564 if (unlikely(ret != 0))
2565 return ret;
2566
2567 binding.bi.ctx = ctx_node->res;
2568 binding.bi.res = ((res_node) ? res_node->res : NULL);
2569 binding.bi.bt = vmw_ctx_binding_ib;
2570 binding.offset = cmd->body.offset;
2571 binding.format = cmd->body.format;
2572
2573 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2574
2575 return 0;
2576}
2577
2578/**
2579 * vmw_cmd_dx_set_rendertarget - Validate an
2580 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2581 *
2582 * @dev_priv: Pointer to a device private struct.
2583 * @sw_context: The software context being used for this batch.
2584 * @header: Pointer to the command header in the command stream.
2585 */
2586static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2587 struct vmw_sw_context *sw_context,
2588 SVGA3dCmdHeader *header)
2589{
2590 struct {
2591 SVGA3dCmdHeader header;
2592 SVGA3dCmdDXSetRenderTargets body;
2593 } *cmd = container_of(header, typeof(*cmd), header);
2594 int ret;
2595 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2596 sizeof(SVGA3dRenderTargetViewId);
2597
2598 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2599 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2600 return -EINVAL;
2601 }
2602
2603 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2604 vmw_ctx_binding_ds, 0,
2605 &cmd->body.depthStencilViewId, 1, 0);
2606 if (ret)
2607 return ret;
2608
2609 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2610 vmw_ctx_binding_dx_rt, 0,
2611 (void *)&cmd[1], num_rt_view, 0);
2612}
2613
2614/**
2615 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2616 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2617 *
2618 * @dev_priv: Pointer to a device private struct.
2619 * @sw_context: The software context being used for this batch.
2620 * @header: Pointer to the command header in the command stream.
2621 */
2622static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2623 struct vmw_sw_context *sw_context,
2624 SVGA3dCmdHeader *header)
2625{
2626 struct {
2627 SVGA3dCmdHeader header;
2628 SVGA3dCmdDXClearRenderTargetView body;
2629 } *cmd = container_of(header, typeof(*cmd), header);
2630
2631 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2632 cmd->body.renderTargetViewId);
2633}
2634
2635/**
2636 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2637 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2638 *
2639 * @dev_priv: Pointer to a device private struct.
2640 * @sw_context: The software context being used for this batch.
2641 * @header: Pointer to the command header in the command stream.
2642 */
2643static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2644 struct vmw_sw_context *sw_context,
2645 SVGA3dCmdHeader *header)
2646{
2647 struct {
2648 SVGA3dCmdHeader header;
2649 SVGA3dCmdDXClearDepthStencilView body;
2650 } *cmd = container_of(header, typeof(*cmd), header);
2651
2652 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2653 cmd->body.depthStencilViewId);
2654}
2655
2656static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2657 struct vmw_sw_context *sw_context,
2658 SVGA3dCmdHeader *header)
2659{
2660 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2661 struct vmw_resource_val_node *srf_node;
2662 struct vmw_resource *res;
2663 enum vmw_view_type view_type;
2664 int ret;
2665 /*
2666 * This is based on the fact that all affected define commands have
2667 * the same initial command body layout.
2668 */
2669 struct {
2670 SVGA3dCmdHeader header;
2671 uint32 defined_id;
2672 uint32 sid;
2673 } *cmd;
2674
2675 if (unlikely(ctx_node == NULL)) {
2676 DRM_ERROR("DX Context not set.\n");
2677 return -EINVAL;
2678 }
2679
2680 view_type = vmw_view_cmd_to_type(header->id);
2681 cmd = container_of(header, typeof(*cmd), header);
2682 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2683 user_surface_converter,
2684 &cmd->sid, &srf_node);
2685 if (unlikely(ret != 0))
2686 return ret;
2687
2688 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2689 ret = vmw_cotable_notify(res, cmd->defined_id);
2690 vmw_resource_unreference(&res);
2691 if (unlikely(ret != 0))
2692 return ret;
2693
2694 return vmw_view_add(sw_context->man,
2695 ctx_node->res,
2696 srf_node->res,
2697 view_type,
2698 cmd->defined_id,
2699 header,
2700 header->size + sizeof(*header),
2701 &sw_context->staged_cmd_res);
2702}
2703
2704/**
2705 * vmw_cmd_dx_set_so_targets - Validate an
2706 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2707 *
2708 * @dev_priv: Pointer to a device private struct.
2709 * @sw_context: The software context being used for this batch.
2710 * @header: Pointer to the command header in the command stream.
2711 */
2712static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2713 struct vmw_sw_context *sw_context,
2714 SVGA3dCmdHeader *header)
2715{
2716 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2717 struct vmw_ctx_bindinfo_so binding;
2718 struct vmw_resource_val_node *res_node;
2719 struct {
2720 SVGA3dCmdHeader header;
2721 SVGA3dCmdDXSetSOTargets body;
2722 SVGA3dSoTarget targets[];
2723 } *cmd;
2724 int i, ret, num;
2725
2726 if (unlikely(ctx_node == NULL)) {
2727 DRM_ERROR("DX Context not set.\n");
2728 return -EINVAL;
2729 }
2730
2731 cmd = container_of(header, typeof(*cmd), header);
2732 num = (cmd->header.size - sizeof(cmd->body)) /
2733 sizeof(SVGA3dSoTarget);
2734
2735 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2736 DRM_ERROR("Invalid DX SO binding.\n");
2737 return -EINVAL;
2738 }
2739
2740 for (i = 0; i < num; i++) {
2741 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2742 user_surface_converter,
2743 &cmd->targets[i].sid, &res_node);
2744 if (unlikely(ret != 0))
2745 return ret;
2746
2747 binding.bi.ctx = ctx_node->res;
2748 binding.bi.res = ((res_node) ? res_node->res : NULL);
2749 binding.bi.bt = vmw_ctx_binding_so,
2750 binding.offset = cmd->targets[i].offset;
2751 binding.size = cmd->targets[i].sizeInBytes;
2752 binding.slot = i;
2753
2754 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2755 0, binding.slot);
2756 }
2757
2758 return 0;
2759}
2760
2761static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2762 struct vmw_sw_context *sw_context,
2763 SVGA3dCmdHeader *header)
2764{
2765 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2766 struct vmw_resource *res;
2767 /*
2768 * This is based on the fact that all affected define commands have
2769 * the same initial command body layout.
2770 */
2771 struct {
2772 SVGA3dCmdHeader header;
2773 uint32 defined_id;
2774 } *cmd;
2775 enum vmw_so_type so_type;
2776 int ret;
2777
2778 if (unlikely(ctx_node == NULL)) {
2779 DRM_ERROR("DX Context not set.\n");
2780 return -EINVAL;
2781 }
2782
2783 so_type = vmw_so_cmd_to_type(header->id);
2784 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2785 cmd = container_of(header, typeof(*cmd), header);
2786 ret = vmw_cotable_notify(res, cmd->defined_id);
2787 vmw_resource_unreference(&res);
2788
2789 return ret;
2790}
2791
2792/**
2793 * vmw_cmd_dx_check_subresource - Validate an
2794 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2795 *
2796 * @dev_priv: Pointer to a device private struct.
2797 * @sw_context: The software context being used for this batch.
2798 * @header: Pointer to the command header in the command stream.
2799 */
2800static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2801 struct vmw_sw_context *sw_context,
2802 SVGA3dCmdHeader *header)
2803{
2804 struct {
2805 SVGA3dCmdHeader header;
2806 union {
2807 SVGA3dCmdDXReadbackSubResource r_body;
2808 SVGA3dCmdDXInvalidateSubResource i_body;
2809 SVGA3dCmdDXUpdateSubResource u_body;
2810 SVGA3dSurfaceId sid;
2811 };
2812 } *cmd;
2813
2814 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2815 offsetof(typeof(*cmd), sid));
2816 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2817 offsetof(typeof(*cmd), sid));
2818 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2819 offsetof(typeof(*cmd), sid));
2820
2821 cmd = container_of(header, typeof(*cmd), header);
2822
2823 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2824 user_surface_converter,
2825 &cmd->sid, NULL);
2826}
2827
2828static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2829 struct vmw_sw_context *sw_context,
2830 SVGA3dCmdHeader *header)
2831{
2832 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2833
2834 if (unlikely(ctx_node == NULL)) {
2835 DRM_ERROR("DX Context not set.\n");
2836 return -EINVAL;
2837 }
2838
2839 return 0;
2840}
2841
2842/**
2843 * vmw_cmd_dx_view_remove - validate a view remove command and
2844 * schedule the view resource for removal.
2845 *
2846 * @dev_priv: Pointer to a device private struct.
2847 * @sw_context: The software context being used for this batch.
2848 * @header: Pointer to the command header in the command stream.
2849 *
2850 * Check that the view exists, and if it was not created using this
2851 * command batch, make sure it's validated (present in the device) so that
2852 * the remove command will not confuse the device.
2853 */
2854static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2855 struct vmw_sw_context *sw_context,
2856 SVGA3dCmdHeader *header)
2857{
2858 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2859 struct {
2860 SVGA3dCmdHeader header;
2861 union vmw_view_destroy body;
2862 } *cmd = container_of(header, typeof(*cmd), header);
2863 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2864 struct vmw_resource *view;
2865 int ret;
2866
2867 if (!ctx_node) {
2868 DRM_ERROR("DX Context not set.\n");
2869 return -EINVAL;
2870 }
2871
2872 ret = vmw_view_remove(sw_context->man,
2873 cmd->body.view_id, view_type,
2874 &sw_context->staged_cmd_res,
2875 &view);
2876 if (ret || !view)
2877 return ret;
2878
2879 /*
2880 * Add view to the validate list iff it was not created using this
2881 * command batch.
2882 */
2883 return vmw_view_res_val_add(sw_context, view);
2884}
2885
2886/**
2887 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2888 * command
2889 *
2890 * @dev_priv: Pointer to a device private struct.
2891 * @sw_context: The software context being used for this batch.
2892 * @header: Pointer to the command header in the command stream.
2893 */
2894static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2895 struct vmw_sw_context *sw_context,
2896 SVGA3dCmdHeader *header)
2897{
2898 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2899 struct vmw_resource *res;
2900 struct {
2901 SVGA3dCmdHeader header;
2902 SVGA3dCmdDXDefineShader body;
2903 } *cmd = container_of(header, typeof(*cmd), header);
2904 int ret;
2905
2906 if (!ctx_node) {
2907 DRM_ERROR("DX Context not set.\n");
2908 return -EINVAL;
2909 }
2910
2911 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2912 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2913 vmw_resource_unreference(&res);
2914 if (ret)
2915 return ret;
2916
2917 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2918 cmd->body.shaderId, cmd->body.type,
2919 &sw_context->staged_cmd_res);
2920}
2921
2922/**
2923 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2924 * command
2925 *
2926 * @dev_priv: Pointer to a device private struct.
2927 * @sw_context: The software context being used for this batch.
2928 * @header: Pointer to the command header in the command stream.
2929 */
2930static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2931 struct vmw_sw_context *sw_context,
2932 SVGA3dCmdHeader *header)
2933{
2934 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2935 struct {
2936 SVGA3dCmdHeader header;
2937 SVGA3dCmdDXDestroyShader body;
2938 } *cmd = container_of(header, typeof(*cmd), header);
2939 int ret;
2940
2941 if (!ctx_node) {
2942 DRM_ERROR("DX Context not set.\n");
2943 return -EINVAL;
2944 }
2945
2946 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2947 &sw_context->staged_cmd_res);
2948 if (ret)
2949 DRM_ERROR("Could not find shader to remove.\n");
2950
2951 return ret;
2952}
2953
2954/**
2955 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2956 * command
2957 *
2958 * @dev_priv: Pointer to a device private struct.
2959 * @sw_context: The software context being used for this batch.
2960 * @header: Pointer to the command header in the command stream.
2961 */
2962static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2963 struct vmw_sw_context *sw_context,
2964 SVGA3dCmdHeader *header)
2965{
2966 struct vmw_resource_val_node *ctx_node;
2967 struct vmw_resource_val_node *res_node;
2968 struct vmw_resource *res;
2969 struct {
2970 SVGA3dCmdHeader header;
2971 SVGA3dCmdDXBindShader body;
2972 } *cmd = container_of(header, typeof(*cmd), header);
2973 int ret;
2974
2975 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2976 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2977 user_context_converter,
2978 &cmd->body.cid, &ctx_node);
2979 if (ret)
2980 return ret;
2981 } else {
2982 ctx_node = sw_context->dx_ctx_node;
2983 if (!ctx_node) {
2984 DRM_ERROR("DX Context not set.\n");
2985 return -EINVAL;
2986 }
2987 }
2988
2989 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2990 cmd->body.shid, 0);
2991 if (IS_ERR(res)) {
2992 DRM_ERROR("Could not find shader to bind.\n");
2993 return PTR_ERR(res);
2994 }
2995
2996 ret = vmw_resource_val_add(sw_context, res, &res_node);
2997 if (ret) {
2998 DRM_ERROR("Error creating resource validation node.\n");
2999 goto out_unref;
3000 }
3001
3002
3003 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3004 &cmd->body.mobid,
3005 cmd->body.offsetInBytes);
3006out_unref:
3007 vmw_resource_unreference(&res);
3008
3009 return ret;
3010}
3011
3012/**
3013 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3014 *
3015 * @dev_priv: Pointer to a device private struct.
3016 * @sw_context: The software context being used for this batch.
3017 * @header: Pointer to the command header in the command stream.
3018 */
3019static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3020 struct vmw_sw_context *sw_context,
3021 SVGA3dCmdHeader *header)
3022{
3023 struct {
3024 SVGA3dCmdHeader header;
3025 SVGA3dCmdDXGenMips body;
3026 } *cmd = container_of(header, typeof(*cmd), header);
3027
3028 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3029 cmd->body.shaderResourceViewId);
3030}
3031
3032static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3033 struct vmw_sw_context *sw_context,
3034 void *buf, uint32_t *size)
3035{
3036 uint32_t size_remaining = *size;
3037 uint32_t cmd_id;
3038
3039 cmd_id = ((uint32_t *)buf)[0];
3040 switch (cmd_id) {
3041 case SVGA_CMD_UPDATE:
3042 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3043 break;
3044 case SVGA_CMD_DEFINE_GMRFB:
3045 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3046 break;
3047 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3048 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3049 break;
3050 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3051 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3052 break;
3053 default:
3054 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3055 return -EINVAL;
3056 }
3057
3058 if (*size > size_remaining) {
3059 DRM_ERROR("Invalid SVGA command (size mismatch):"
3060 " %u.\n", cmd_id);
3061 return -EINVAL;
3062 }
3063
3064 if (unlikely(!sw_context->kernel)) {
3065 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3066 return -EPERM;
3067 }
3068
3069 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3070 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3071
3072 return 0;
3073}
3074
3075static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3076 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3077 false, false, false),
3078 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3079 false, false, false),
3080 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3081 true, false, false),
3082 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3083 true, false, false),
3084 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3085 true, false, false),
3086 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3087 false, false, false),
3088 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3089 false, false, false),
3090 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3091 true, false, false),
3092 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3093 true, false, false),
3094 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3095 true, false, false),
3096 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3097 &vmw_cmd_set_render_target_check, true, false, false),
3098 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3099 true, false, false),
3100 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3101 true, false, false),
3102 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3103 true, false, false),
3104 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3105 true, false, false),
3106 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3107 true, false, false),
3108 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3109 true, false, false),
3110 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3111 true, false, false),
3112 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3113 false, false, false),
3114 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3115 true, false, false),
3116 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3117 true, false, false),
3118 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3119 true, false, false),
3120 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3121 true, false, false),
3122 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3123 true, false, false),
3124 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3125 true, false, false),
3126 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3127 true, false, false),
3128 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3129 true, false, false),
3130 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3131 true, false, false),
3132 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3133 true, false, false),
3134 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3135 &vmw_cmd_blt_surf_screen_check, false, false, false),
3136 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3137 false, false, false),
3138 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3139 false, false, false),
3140 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3141 false, false, false),
3142 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3143 false, false, false),
3144 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3145 false, false, false),
3146 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3147 false, false, false),
3148 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3149 false, false, false),
3150 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3151 false, false, false),
3152 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3153 false, false, false),
3154 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3155 false, false, false),
3156 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3157 false, false, false),
3158 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3159 false, false, false),
3160 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3161 false, false, false),
3162 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3163 false, false, true),
3164 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3165 false, false, true),
3166 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3167 false, false, true),
3168 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3169 false, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3171 false, false, true),
3172 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3173 false, false, true),
3174 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3175 false, false, true),
3176 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3177 false, false, true),
3178 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3179 true, false, true),
3180 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3181 false, false, true),
3182 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3183 true, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3185 &vmw_cmd_update_gb_surface, true, false, true),
3186 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3187 &vmw_cmd_readback_gb_image, true, false, true),
3188 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3189 &vmw_cmd_readback_gb_surface, true, false, true),
3190 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3191 &vmw_cmd_invalidate_gb_image, true, false, true),
3192 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3193 &vmw_cmd_invalidate_gb_surface, true, false, true),
3194 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3195 false, false, true),
3196 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3197 false, false, true),
3198 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3199 false, false, true),
3200 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3201 false, false, true),
3202 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3203 false, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3205 false, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3207 true, false, true),
3208 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3209 false, false, true),
3210 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3211 false, false, false),
3212 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3213 true, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3215 true, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3217 true, false, true),
3218 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3219 true, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3221 false, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3223 false, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3225 false, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3227 false, false, true),
3228 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3229 false, false, true),
3230 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3231 false, false, true),
3232 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3233 false, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3239 false, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3241 true, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3243 false, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3245 false, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3247 false, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3249 false, false, true),
3250
3251 /*
3252 * DX commands
3253 */
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3255 false, false, true),
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3257 false, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3259 false, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3261 false, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3263 false, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3265 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3267 &vmw_cmd_dx_set_shader_res, true, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3269 true, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3271 true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3273 true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3275 true, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3277 true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3279 &vmw_cmd_dx_cid_check, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3281 true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3283 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3285 &vmw_cmd_dx_set_index_buffer, true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3287 &vmw_cmd_dx_set_rendertargets, true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3289 true, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3291 &vmw_cmd_dx_cid_check, true, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3293 &vmw_cmd_dx_cid_check, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3295 true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3301 &vmw_cmd_dx_cid_check, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3305 true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3307 true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3309 true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3311 true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3313 true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3315 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3317 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3319 true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3321 true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3323 &vmw_cmd_dx_check_subresource, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3325 &vmw_cmd_dx_check_subresource, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3327 &vmw_cmd_dx_check_subresource, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3329 &vmw_cmd_dx_view_define, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3331 &vmw_cmd_dx_view_remove, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3333 &vmw_cmd_dx_view_define, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3335 &vmw_cmd_dx_view_remove, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3337 &vmw_cmd_dx_view_define, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3339 &vmw_cmd_dx_view_remove, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3341 &vmw_cmd_dx_so_define, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3343 &vmw_cmd_dx_cid_check, true, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3345 &vmw_cmd_dx_so_define, true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3347 &vmw_cmd_dx_cid_check, true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3349 &vmw_cmd_dx_so_define, true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3351 &vmw_cmd_dx_cid_check, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3353 &vmw_cmd_dx_so_define, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3355 &vmw_cmd_dx_cid_check, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3357 &vmw_cmd_dx_so_define, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3359 &vmw_cmd_dx_cid_check, true, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3361 &vmw_cmd_dx_define_shader, true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3363 &vmw_cmd_dx_destroy_shader, true, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3365 &vmw_cmd_dx_bind_shader, true, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3367 &vmw_cmd_dx_so_define, true, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3369 &vmw_cmd_dx_cid_check, true, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3371 true, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3373 &vmw_cmd_dx_set_so_targets, true, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3375 &vmw_cmd_dx_cid_check, true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3377 &vmw_cmd_dx_cid_check, true, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3379 &vmw_cmd_buffer_copy_check, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3381 &vmw_cmd_pred_copy_check, true, false, true),
3382};
3383
3384static int vmw_cmd_check(struct vmw_private *dev_priv,
3385 struct vmw_sw_context *sw_context,
3386 void *buf, uint32_t *size)
3387{
3388 uint32_t cmd_id;
3389 uint32_t size_remaining = *size;
3390 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3391 int ret;
3392 const struct vmw_cmd_entry *entry;
3393 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3394
3395 cmd_id = ((uint32_t *)buf)[0];
3396 /* Handle any none 3D commands */
3397 if (unlikely(cmd_id < SVGA_CMD_MAX))
3398 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3399
3400
3401 cmd_id = header->id;
3402 *size = header->size + sizeof(SVGA3dCmdHeader);
3403
3404 cmd_id -= SVGA_3D_CMD_BASE;
3405 if (unlikely(*size > size_remaining))
3406 goto out_invalid;
3407
3408 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3409 goto out_invalid;
3410
3411 entry = &vmw_cmd_entries[cmd_id];
3412 if (unlikely(!entry->func))
3413 goto out_invalid;
3414
3415 if (unlikely(!entry->user_allow && !sw_context->kernel))
3416 goto out_privileged;
3417
3418 if (unlikely(entry->gb_disable && gb))
3419 goto out_old;
3420
3421 if (unlikely(entry->gb_enable && !gb))
3422 goto out_new;
3423
3424 ret = entry->func(dev_priv, sw_context, header);
3425 if (unlikely(ret != 0))
3426 goto out_invalid;
3427
3428 return 0;
3429out_invalid:
3430 DRM_ERROR("Invalid SVGA3D command: %d\n",
3431 cmd_id + SVGA_3D_CMD_BASE);
3432 return -EINVAL;
3433out_privileged:
3434 DRM_ERROR("Privileged SVGA3D command: %d\n",
3435 cmd_id + SVGA_3D_CMD_BASE);
3436 return -EPERM;
3437out_old:
3438 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3439 cmd_id + SVGA_3D_CMD_BASE);
3440 return -EINVAL;
3441out_new:
3442 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3443 cmd_id + SVGA_3D_CMD_BASE);
3444 return -EINVAL;
3445}
3446
3447static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3448 struct vmw_sw_context *sw_context,
3449 void *buf,
3450 uint32_t size)
3451{
3452 int32_t cur_size = size;
3453 int ret;
3454
3455 sw_context->buf_start = buf;
3456
3457 while (cur_size > 0) {
3458 size = cur_size;
3459 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3460 if (unlikely(ret != 0))
3461 return ret;
3462 buf = (void *)((unsigned long) buf + size);
3463 cur_size -= size;
3464 }
3465
3466 if (unlikely(cur_size != 0)) {
3467 DRM_ERROR("Command verifier out of sync.\n");
3468 return -EINVAL;
3469 }
3470
3471 return 0;
3472}
3473
3474static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3475{
3476 sw_context->cur_reloc = 0;
3477}
3478
3479static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3480{
3481 uint32_t i;
3482 struct vmw_relocation *reloc;
3483 struct ttm_validate_buffer *validate;
3484 struct ttm_buffer_object *bo;
3485
3486 for (i = 0; i < sw_context->cur_reloc; ++i) {
3487 reloc = &sw_context->relocs[i];
3488 validate = &sw_context->val_bufs[reloc->index].base;
3489 bo = validate->bo;
3490 switch (bo->mem.mem_type) {
3491 case TTM_PL_VRAM:
3492 reloc->location->offset += bo->offset;
3493 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3494 break;
3495 case VMW_PL_GMR:
3496 reloc->location->gmrId = bo->mem.start;
3497 break;
3498 case VMW_PL_MOB:
3499 *reloc->mob_loc = bo->mem.start;
3500 break;
3501 default:
3502 BUG();
3503 }
3504 }
3505 vmw_free_relocations(sw_context);
3506}
3507
3508/**
3509 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3510 * all resources referenced by it.
3511 *
3512 * @list: The resource list.
3513 */
3514static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3515 struct list_head *list)
3516{
3517 struct vmw_resource_val_node *val, *val_next;
3518
3519 /*
3520 * Drop references to resources held during command submission.
3521 */
3522
3523 list_for_each_entry_safe(val, val_next, list, head) {
3524 list_del_init(&val->head);
3525 vmw_resource_unreference(&val->res);
3526
3527 if (val->staged_bindings) {
3528 if (val->staged_bindings != sw_context->staged_bindings)
3529 vmw_binding_state_free(val->staged_bindings);
3530 else
3531 sw_context->staged_bindings_inuse = false;
3532 val->staged_bindings = NULL;
3533 }
3534
3535 kfree(val);
3536 }
3537}
3538
3539static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3540{
3541 struct vmw_validate_buffer *entry, *next;
3542 struct vmw_resource_val_node *val;
3543
3544 /*
3545 * Drop references to DMA buffers held during command submission.
3546 */
3547 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3548 base.head) {
3549 list_del(&entry->base.head);
3550 ttm_bo_unref(&entry->base.bo);
3551 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3552 sw_context->cur_val_buf--;
3553 }
3554 BUG_ON(sw_context->cur_val_buf != 0);
3555
3556 list_for_each_entry(val, &sw_context->resource_list, head)
3557 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3558}
3559
3560int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3561 struct ttm_buffer_object *bo,
3562 bool interruptible,
3563 bool validate_as_mob)
3564{
3565 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3566 base);
3567 int ret;
3568
3569 if (vbo->pin_count > 0)
3570 return 0;
3571
3572 if (validate_as_mob)
3573 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3574 false);
3575
3576 /**
3577 * Put BO in VRAM if there is space, otherwise as a GMR.
3578 * If there is no space in VRAM and GMR ids are all used up,
3579 * start evicting GMRs to make room. If the DMA buffer can't be
3580 * used as a GMR, this will return -ENOMEM.
3581 */
3582
3583 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3584 false);
3585 if (likely(ret == 0 || ret == -ERESTARTSYS))
3586 return ret;
3587
3588 /**
3589 * If that failed, try VRAM again, this time evicting
3590 * previous contents.
3591 */
3592
3593 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3594 return ret;
3595}
3596
3597static int vmw_validate_buffers(struct vmw_private *dev_priv,
3598 struct vmw_sw_context *sw_context)
3599{
3600 struct vmw_validate_buffer *entry;
3601 int ret;
3602
3603 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3604 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3605 true,
3606 entry->validate_as_mob);
3607 if (unlikely(ret != 0))
3608 return ret;
3609 }
3610 return 0;
3611}
3612
3613static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3614 uint32_t size)
3615{
3616 if (likely(sw_context->cmd_bounce_size >= size))
3617 return 0;
3618
3619 if (sw_context->cmd_bounce_size == 0)
3620 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3621
3622 while (sw_context->cmd_bounce_size < size) {
3623 sw_context->cmd_bounce_size =
3624 PAGE_ALIGN(sw_context->cmd_bounce_size +
3625 (sw_context->cmd_bounce_size >> 1));
3626 }
3627
3628 if (sw_context->cmd_bounce != NULL)
3629 vfree(sw_context->cmd_bounce);
3630
3631 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3632
3633 if (sw_context->cmd_bounce == NULL) {
3634 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3635 sw_context->cmd_bounce_size = 0;
3636 return -ENOMEM;
3637 }
3638
3639 return 0;
3640}
3641
3642/**
3643 * vmw_execbuf_fence_commands - create and submit a command stream fence
3644 *
3645 * Creates a fence object and submits a command stream marker.
3646 * If this fails for some reason, We sync the fifo and return NULL.
3647 * It is then safe to fence buffers with a NULL pointer.
3648 *
3649 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3650 * a userspace handle if @p_handle is not NULL, otherwise not.
3651 */
3652
3653int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3654 struct vmw_private *dev_priv,
3655 struct vmw_fence_obj **p_fence,
3656 uint32_t *p_handle)
3657{
3658 uint32_t sequence;
3659 int ret;
3660 bool synced = false;
3661
3662 /* p_handle implies file_priv. */
3663 BUG_ON(p_handle != NULL && file_priv == NULL);
3664
3665 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3666 if (unlikely(ret != 0)) {
3667 DRM_ERROR("Fence submission error. Syncing.\n");
3668 synced = true;
3669 }
3670
3671 if (p_handle != NULL)
3672 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3673 sequence, p_fence, p_handle);
3674 else
3675 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3676
3677 if (unlikely(ret != 0 && !synced)) {
3678 (void) vmw_fallback_wait(dev_priv, false, false,
3679 sequence, false,
3680 VMW_FENCE_WAIT_TIMEOUT);
3681 *p_fence = NULL;
3682 }
3683
3684 return 0;
3685}
3686
3687/**
3688 * vmw_execbuf_copy_fence_user - copy fence object information to
3689 * user-space.
3690 *
3691 * @dev_priv: Pointer to a vmw_private struct.
3692 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3693 * @ret: Return value from fence object creation.
3694 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3695 * which the information should be copied.
3696 * @fence: Pointer to the fenc object.
3697 * @fence_handle: User-space fence handle.
3698 *
3699 * This function copies fence information to user-space. If copying fails,
3700 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3701 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3702 * the error will hopefully be detected.
3703 * Also if copying fails, user-space will be unable to signal the fence
3704 * object so we wait for it immediately, and then unreference the
3705 * user-space reference.
3706 */
3707void
3708vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3709 struct vmw_fpriv *vmw_fp,
3710 int ret,
3711 struct drm_vmw_fence_rep __user *user_fence_rep,
3712 struct vmw_fence_obj *fence,
3713 uint32_t fence_handle)
3714{
3715 struct drm_vmw_fence_rep fence_rep;
3716
3717 if (user_fence_rep == NULL)
3718 return;
3719
3720 memset(&fence_rep, 0, sizeof(fence_rep));
3721
3722 fence_rep.error = ret;
3723 if (ret == 0) {
3724 BUG_ON(fence == NULL);
3725
3726 fence_rep.handle = fence_handle;
3727 fence_rep.seqno = fence->base.seqno;
3728 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3729 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3730 }
3731
3732 /*
3733 * copy_to_user errors will be detected by user space not
3734 * seeing fence_rep::error filled in. Typically
3735 * user-space would have pre-set that member to -EFAULT.
3736 */
3737 ret = copy_to_user(user_fence_rep, &fence_rep,
3738 sizeof(fence_rep));
3739
3740 /*
3741 * User-space lost the fence object. We need to sync
3742 * and unreference the handle.
3743 */
3744 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3745 ttm_ref_object_base_unref(vmw_fp->tfile,
3746 fence_handle, TTM_REF_USAGE);
3747 DRM_ERROR("Fence copy error. Syncing.\n");
3748 (void) vmw_fence_obj_wait(fence, false, false,
3749 VMW_FENCE_WAIT_TIMEOUT);
3750 }
3751}
3752
3753/**
3754 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3755 * the fifo.
3756 *
3757 * @dev_priv: Pointer to a device private structure.
3758 * @kernel_commands: Pointer to the unpatched command batch.
3759 * @command_size: Size of the unpatched command batch.
3760 * @sw_context: Structure holding the relocation lists.
3761 *
3762 * Side effects: If this function returns 0, then the command batch
3763 * pointed to by @kernel_commands will have been modified.
3764 */
3765static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3766 void *kernel_commands,
3767 u32 command_size,
3768 struct vmw_sw_context *sw_context)
3769{
3770 void *cmd;
3771
3772 if (sw_context->dx_ctx_node)
3773 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3774 sw_context->dx_ctx_node->res->id);
3775 else
3776 cmd = vmw_fifo_reserve(dev_priv, command_size);
3777 if (!cmd) {
3778 DRM_ERROR("Failed reserving fifo space for commands.\n");
3779 return -ENOMEM;
3780 }
3781
3782 vmw_apply_relocations(sw_context);
3783 memcpy(cmd, kernel_commands, command_size);
3784 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3785 vmw_resource_relocations_free(&sw_context->res_relocations);
3786 vmw_fifo_commit(dev_priv, command_size);
3787
3788 return 0;
3789}
3790
3791/**
3792 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3793 * the command buffer manager.
3794 *
3795 * @dev_priv: Pointer to a device private structure.
3796 * @header: Opaque handle to the command buffer allocation.
3797 * @command_size: Size of the unpatched command batch.
3798 * @sw_context: Structure holding the relocation lists.
3799 *
3800 * Side effects: If this function returns 0, then the command buffer
3801 * represented by @header will have been modified.
3802 */
3803static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3804 struct vmw_cmdbuf_header *header,
3805 u32 command_size,
3806 struct vmw_sw_context *sw_context)
3807{
3808 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3809 SVGA3D_INVALID_ID);
3810 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3811 id, false, header);
3812
3813 vmw_apply_relocations(sw_context);
3814 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3815 vmw_resource_relocations_free(&sw_context->res_relocations);
3816 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3817
3818 return 0;
3819}
3820
3821/**
3822 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3823 * submission using a command buffer.
3824 *
3825 * @dev_priv: Pointer to a device private structure.
3826 * @user_commands: User-space pointer to the commands to be submitted.
3827 * @command_size: Size of the unpatched command batch.
3828 * @header: Out parameter returning the opaque pointer to the command buffer.
3829 *
3830 * This function checks whether we can use the command buffer manager for
3831 * submission and if so, creates a command buffer of suitable size and
3832 * copies the user data into that buffer.
3833 *
3834 * On successful return, the function returns a pointer to the data in the
3835 * command buffer and *@header is set to non-NULL.
3836 * If command buffers could not be used, the function will return the value
3837 * of @kernel_commands on function call. That value may be NULL. In that case,
3838 * the value of *@header will be set to NULL.
3839 * If an error is encountered, the function will return a pointer error value.
3840 * If the function is interrupted by a signal while sleeping, it will return
3841 * -ERESTARTSYS casted to a pointer error value.
3842 */
3843static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3844 void __user *user_commands,
3845 void *kernel_commands,
3846 u32 command_size,
3847 struct vmw_cmdbuf_header **header)
3848{
3849 size_t cmdbuf_size;
3850 int ret;
3851
3852 *header = NULL;
3853 if (!dev_priv->cman || kernel_commands)
3854 return kernel_commands;
3855
3856 if (command_size > SVGA_CB_MAX_SIZE) {
3857 DRM_ERROR("Command buffer is too large.\n");
3858 return ERR_PTR(-EINVAL);
3859 }
3860
3861 /* If possible, add a little space for fencing. */
3862 cmdbuf_size = command_size + 512;
3863 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3864 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3865 true, header);
3866 if (IS_ERR(kernel_commands))
3867 return kernel_commands;
3868
3869 ret = copy_from_user(kernel_commands, user_commands,
3870 command_size);
3871 if (ret) {
3872 DRM_ERROR("Failed copying commands.\n");
3873 vmw_cmdbuf_header_free(*header);
3874 *header = NULL;
3875 return ERR_PTR(-EFAULT);
3876 }
3877
3878 return kernel_commands;
3879}
3880
3881static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3882 struct vmw_sw_context *sw_context,
3883 uint32_t handle)
3884{
3885 struct vmw_resource_val_node *ctx_node;
3886 struct vmw_resource *res;
3887 int ret;
3888
3889 if (handle == SVGA3D_INVALID_ID)
3890 return 0;
3891
3892 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3893 handle, user_context_converter,
3894 &res);
3895 if (unlikely(ret != 0)) {
3896 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3897 (unsigned) handle);
3898 return ret;
3899 }
3900
3901 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3902 if (unlikely(ret != 0))
3903 goto out_err;
3904
3905 sw_context->dx_ctx_node = ctx_node;
3906 sw_context->man = vmw_context_res_man(res);
3907out_err:
3908 vmw_resource_unreference(&res);
3909 return ret;
3910}
3911
3912int vmw_execbuf_process(struct drm_file *file_priv,
3913 struct vmw_private *dev_priv,
3914 void __user *user_commands,
3915 void *kernel_commands,
3916 uint32_t command_size,
3917 uint64_t throttle_us,
3918 uint32_t dx_context_handle,
3919 struct drm_vmw_fence_rep __user *user_fence_rep,
3920 struct vmw_fence_obj **out_fence)
3921{
3922 struct vmw_sw_context *sw_context = &dev_priv->ctx;
3923 struct vmw_fence_obj *fence = NULL;
3924 struct vmw_resource *error_resource;
3925 struct list_head resource_list;
3926 struct vmw_cmdbuf_header *header;
3927 struct ww_acquire_ctx ticket;
3928 uint32_t handle;
3929 int ret;
3930
3931 if (throttle_us) {
3932 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3933 throttle_us);
3934
3935 if (ret)
3936 return ret;
3937 }
3938
3939 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3940 kernel_commands, command_size,
3941 &header);
3942 if (IS_ERR(kernel_commands))
3943 return PTR_ERR(kernel_commands);
3944
3945 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3946 if (ret) {
3947 ret = -ERESTARTSYS;
3948 goto out_free_header;
3949 }
3950
3951 sw_context->kernel = false;
3952 if (kernel_commands == NULL) {
3953 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3954 if (unlikely(ret != 0))
3955 goto out_unlock;
3956
3957
3958 ret = copy_from_user(sw_context->cmd_bounce,
3959 user_commands, command_size);
3960
3961 if (unlikely(ret != 0)) {
3962 ret = -EFAULT;
3963 DRM_ERROR("Failed copying commands.\n");
3964 goto out_unlock;
3965 }
3966 kernel_commands = sw_context->cmd_bounce;
3967 } else if (!header)
3968 sw_context->kernel = true;
3969
3970 sw_context->fp = vmw_fpriv(file_priv);
3971 sw_context->cur_reloc = 0;
3972 sw_context->cur_val_buf = 0;
3973 INIT_LIST_HEAD(&sw_context->resource_list);
3974 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3975 sw_context->cur_query_bo = dev_priv->pinned_bo;
3976 sw_context->last_query_ctx = NULL;
3977 sw_context->needs_post_query_barrier = false;
3978 sw_context->dx_ctx_node = NULL;
3979 sw_context->dx_query_mob = NULL;
3980 sw_context->dx_query_ctx = NULL;
3981 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3982 INIT_LIST_HEAD(&sw_context->validate_nodes);
3983 INIT_LIST_HEAD(&sw_context->res_relocations);
3984 if (sw_context->staged_bindings)
3985 vmw_binding_state_reset(sw_context->staged_bindings);
3986
3987 if (!sw_context->res_ht_initialized) {
3988 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3989 if (unlikely(ret != 0))
3990 goto out_unlock;
3991 sw_context->res_ht_initialized = true;
3992 }
3993 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3994 INIT_LIST_HEAD(&resource_list);
3995 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3996 if (unlikely(ret != 0)) {
3997 list_splice_init(&sw_context->ctx_resource_list,
3998 &sw_context->resource_list);
3999 goto out_err_nores;
4000 }
4001
4002 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4003 command_size);
4004 /*
4005 * Merge the resource lists before checking the return status
4006 * from vmd_cmd_check_all so that all the open hashtabs will
4007 * be handled properly even if vmw_cmd_check_all fails.
4008 */
4009 list_splice_init(&sw_context->ctx_resource_list,
4010 &sw_context->resource_list);
4011
4012 if (unlikely(ret != 0))
4013 goto out_err_nores;
4014
4015 ret = vmw_resources_reserve(sw_context);
4016 if (unlikely(ret != 0))
4017 goto out_err_nores;
4018
4019 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4020 true, NULL);
4021 if (unlikely(ret != 0))
4022 goto out_err_nores;
4023
4024 ret = vmw_validate_buffers(dev_priv, sw_context);
4025 if (unlikely(ret != 0))
4026 goto out_err;
4027
4028 ret = vmw_resources_validate(sw_context);
4029 if (unlikely(ret != 0))
4030 goto out_err;
4031
4032 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4033 if (unlikely(ret != 0)) {
4034 ret = -ERESTARTSYS;
4035 goto out_err;
4036 }
4037
4038 if (dev_priv->has_mob) {
4039 ret = vmw_rebind_contexts(sw_context);
4040 if (unlikely(ret != 0))
4041 goto out_unlock_binding;
4042 }
4043
4044 if (!header) {
4045 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4046 command_size, sw_context);
4047 } else {
4048 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4049 sw_context);
4050 header = NULL;
4051 }
4052 mutex_unlock(&dev_priv->binding_mutex);
4053 if (ret)
4054 goto out_err;
4055
4056 vmw_query_bo_switch_commit(dev_priv, sw_context);
4057 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4058 &fence,
4059 (user_fence_rep) ? &handle : NULL);
4060 /*
4061 * This error is harmless, because if fence submission fails,
4062 * vmw_fifo_send_fence will sync. The error will be propagated to
4063 * user-space in @fence_rep
4064 */
4065
4066 if (ret != 0)
4067 DRM_ERROR("Fence submission error. Syncing.\n");
4068
4069 vmw_resources_unreserve(sw_context, false);
4070
4071 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4072 (void *) fence);
4073
4074 if (unlikely(dev_priv->pinned_bo != NULL &&
4075 !dev_priv->query_cid_valid))
4076 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4077
4078 vmw_clear_validations(sw_context);
4079 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4080 user_fence_rep, fence, handle);
4081
4082 /* Don't unreference when handing fence out */
4083 if (unlikely(out_fence != NULL)) {
4084 *out_fence = fence;
4085 fence = NULL;
4086 } else if (likely(fence != NULL)) {
4087 vmw_fence_obj_unreference(&fence);
4088 }
4089
4090 list_splice_init(&sw_context->resource_list, &resource_list);
4091 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4092 mutex_unlock(&dev_priv->cmdbuf_mutex);
4093
4094 /*
4095 * Unreference resources outside of the cmdbuf_mutex to
4096 * avoid deadlocks in resource destruction paths.
4097 */
4098 vmw_resource_list_unreference(sw_context, &resource_list);
4099
4100 return 0;
4101
4102out_unlock_binding:
4103 mutex_unlock(&dev_priv->binding_mutex);
4104out_err:
4105 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4106out_err_nores:
4107 vmw_resources_unreserve(sw_context, true);
4108 vmw_resource_relocations_free(&sw_context->res_relocations);
4109 vmw_free_relocations(sw_context);
4110 vmw_clear_validations(sw_context);
4111 if (unlikely(dev_priv->pinned_bo != NULL &&
4112 !dev_priv->query_cid_valid))
4113 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4114out_unlock:
4115 list_splice_init(&sw_context->resource_list, &resource_list);
4116 error_resource = sw_context->error_resource;
4117 sw_context->error_resource = NULL;
4118 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4119 mutex_unlock(&dev_priv->cmdbuf_mutex);
4120
4121 /*
4122 * Unreference resources outside of the cmdbuf_mutex to
4123 * avoid deadlocks in resource destruction paths.
4124 */
4125 vmw_resource_list_unreference(sw_context, &resource_list);
4126 if (unlikely(error_resource != NULL))
4127 vmw_resource_unreference(&error_resource);
4128out_free_header:
4129 if (header)
4130 vmw_cmdbuf_header_free(header);
4131
4132 return ret;
4133}
4134
4135/**
4136 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4137 *
4138 * @dev_priv: The device private structure.
4139 *
4140 * This function is called to idle the fifo and unpin the query buffer
4141 * if the normal way to do this hits an error, which should typically be
4142 * extremely rare.
4143 */
4144static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4145{
4146 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4147
4148 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4149 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4150 if (dev_priv->dummy_query_bo_pinned) {
4151 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4152 dev_priv->dummy_query_bo_pinned = false;
4153 }
4154}
4155
4156
4157/**
4158 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4159 * query bo.
4160 *
4161 * @dev_priv: The device private structure.
4162 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4163 * _after_ a query barrier that flushes all queries touching the current
4164 * buffer pointed to by @dev_priv->pinned_bo
4165 *
4166 * This function should be used to unpin the pinned query bo, or
4167 * as a query barrier when we need to make sure that all queries have
4168 * finished before the next fifo command. (For example on hardware
4169 * context destructions where the hardware may otherwise leak unfinished
4170 * queries).
4171 *
4172 * This function does not return any failure codes, but make attempts
4173 * to do safe unpinning in case of errors.
4174 *
4175 * The function will synchronize on the previous query barrier, and will
4176 * thus not finish until that barrier has executed.
4177 *
4178 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4179 * before calling this function.
4180 */
4181void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4182 struct vmw_fence_obj *fence)
4183{
4184 int ret = 0;
4185 struct list_head validate_list;
4186 struct ttm_validate_buffer pinned_val, query_val;
4187 struct vmw_fence_obj *lfence = NULL;
4188 struct ww_acquire_ctx ticket;
4189
4190 if (dev_priv->pinned_bo == NULL)
4191 goto out_unlock;
4192
4193 INIT_LIST_HEAD(&validate_list);
4194
4195 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4196 pinned_val.shared = false;
4197 list_add_tail(&pinned_val.head, &validate_list);
4198
4199 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4200 query_val.shared = false;
4201 list_add_tail(&query_val.head, &validate_list);
4202
4203 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4204 false, NULL);
4205 if (unlikely(ret != 0)) {
4206 vmw_execbuf_unpin_panic(dev_priv);
4207 goto out_no_reserve;
4208 }
4209
4210 if (dev_priv->query_cid_valid) {
4211 BUG_ON(fence != NULL);
4212 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4213 if (unlikely(ret != 0)) {
4214 vmw_execbuf_unpin_panic(dev_priv);
4215 goto out_no_emit;
4216 }
4217 dev_priv->query_cid_valid = false;
4218 }
4219
4220 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4221 if (dev_priv->dummy_query_bo_pinned) {
4222 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4223 dev_priv->dummy_query_bo_pinned = false;
4224 }
4225 if (fence == NULL) {
4226 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4227 NULL);
4228 fence = lfence;
4229 }
4230 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4231 if (lfence != NULL)
4232 vmw_fence_obj_unreference(&lfence);
4233
4234 ttm_bo_unref(&query_val.bo);
4235 ttm_bo_unref(&pinned_val.bo);
4236 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4237 DRM_INFO("Dummy query bo pin count: %d\n",
4238 dev_priv->dummy_query_bo->pin_count);
4239
4240out_unlock:
4241 return;
4242
4243out_no_emit:
4244 ttm_eu_backoff_reservation(&ticket, &validate_list);
4245out_no_reserve:
4246 ttm_bo_unref(&query_val.bo);
4247 ttm_bo_unref(&pinned_val.bo);
4248 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4249}
4250
4251/**
4252 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4253 * query bo.
4254 *
4255 * @dev_priv: The device private structure.
4256 *
4257 * This function should be used to unpin the pinned query bo, or
4258 * as a query barrier when we need to make sure that all queries have
4259 * finished before the next fifo command. (For example on hardware
4260 * context destructions where the hardware may otherwise leak unfinished
4261 * queries).
4262 *
4263 * This function does not return any failure codes, but make attempts
4264 * to do safe unpinning in case of errors.
4265 *
4266 * The function will synchronize on the previous query barrier, and will
4267 * thus not finish until that barrier has executed.
4268 */
4269void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4270{
4271 mutex_lock(&dev_priv->cmdbuf_mutex);
4272 if (dev_priv->query_cid_valid)
4273 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4274 mutex_unlock(&dev_priv->cmdbuf_mutex);
4275}
4276
4277int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4278 struct drm_file *file_priv, size_t size)
4279{
4280 struct vmw_private *dev_priv = vmw_priv(dev);
4281 struct drm_vmw_execbuf_arg arg;
4282 int ret;
4283 static const size_t copy_offset[] = {
4284 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4285 sizeof(struct drm_vmw_execbuf_arg)};
4286
4287 if (unlikely(size < copy_offset[0])) {
4288 DRM_ERROR("Invalid command size, ioctl %d\n",
4289 DRM_VMW_EXECBUF);
4290 return -EINVAL;
4291 }
4292
4293 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4294 return -EFAULT;
4295
4296 /*
4297 * Extend the ioctl argument while
4298 * maintaining backwards compatibility:
4299 * We take different code paths depending on the value of
4300 * arg.version.
4301 */
4302
4303 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4304 arg.version == 0)) {
4305 DRM_ERROR("Incorrect execbuf version.\n");
4306 return -EINVAL;
4307 }
4308
4309 if (arg.version > 1 &&
4310 copy_from_user(&arg.context_handle,
4311 (void __user *) (data + copy_offset[0]),
4312 copy_offset[arg.version - 1] -
4313 copy_offset[0]) != 0)
4314 return -EFAULT;
4315
4316 switch (arg.version) {
4317 case 1:
4318 arg.context_handle = (uint32_t) -1;
4319 break;
4320 case 2:
4321 if (arg.pad64 != 0) {
4322 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4323 return -EINVAL;
4324 }
4325 break;
4326 default:
4327 break;
4328 }
4329
4330 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4331 if (unlikely(ret != 0))
4332 return ret;
4333
4334 ret = vmw_execbuf_process(file_priv, dev_priv,
4335 (void __user *)(unsigned long)arg.commands,
4336 NULL, arg.command_size, arg.throttle_us,
4337 arg.context_handle,
4338 (void __user *)(unsigned long)arg.fence_rep,
4339 NULL);
4340 ttm_read_unlock(&dev_priv->reservation_sem);
4341 if (unlikely(ret != 0))
4342 return ret;
4343
4344 vmw_kms_cursor_post_execbuf(dev_priv);
4345
4346 return 0;
4347}
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
32
33#define VMW_RES_HT_ORDER 12
34
35/**
36 * struct vmw_resource_relocation - Relocation info for resources
37 *
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
42 */
43struct vmw_resource_relocation {
44 struct list_head head;
45 const struct vmw_resource *res;
46 unsigned long offset;
47};
48
49/**
50 * struct vmw_resource_val_node - Validation info for resources
51 *
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
61 * the command stream.
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
64 */
65struct vmw_resource_val_node {
66 struct list_head head;
67 struct drm_hash_item hash;
68 struct vmw_resource *res;
69 struct vmw_dma_buffer *new_backup;
70 struct vmw_ctx_binding_state *staged_bindings;
71 unsigned long new_backup_offset;
72 bool first_usage;
73 bool no_buffer_needed;
74};
75
76/**
77 * struct vmw_cmd_entry - Describe a command for the verifier
78 *
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
82 */
83struct vmw_cmd_entry {
84 int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 SVGA3dCmdHeader *);
86 bool user_allow;
87 bool gb_disable;
88 bool gb_enable;
89};
90
91#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
94
95/**
96 * vmw_resource_unreserve - unreserve resources previously reserved for
97 * command submission.
98 *
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
101 */
102static void vmw_resource_list_unreserve(struct list_head *list,
103 bool backoff)
104{
105 struct vmw_resource_val_node *val;
106
107 list_for_each_entry(val, list, head) {
108 struct vmw_resource *res = val->res;
109 struct vmw_dma_buffer *new_backup =
110 backoff ? NULL : val->new_backup;
111
112 /*
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
115 */
116 if (unlikely(val->staged_bindings)) {
117 if (!backoff) {
118 vmw_context_binding_state_transfer
119 (val->res, val->staged_bindings);
120 }
121 kfree(val->staged_bindings);
122 val->staged_bindings = NULL;
123 }
124 vmw_resource_unreserve(res, new_backup,
125 val->new_backup_offset);
126 vmw_dmabuf_unreference(&val->new_backup);
127 }
128}
129
130
131/**
132 * vmw_resource_val_add - Add a resource to the software context's
133 * resource list if it's not already on it.
134 *
135 * @sw_context: Pointer to the software context.
136 * @res: Pointer to the resource.
137 * @p_node On successful return points to a valid pointer to a
138 * struct vmw_resource_val_node, if non-NULL on entry.
139 */
140static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 struct vmw_resource *res,
142 struct vmw_resource_val_node **p_node)
143{
144 struct vmw_resource_val_node *node;
145 struct drm_hash_item *hash;
146 int ret;
147
148 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149 &hash) == 0)) {
150 node = container_of(hash, struct vmw_resource_val_node, hash);
151 node->first_usage = false;
152 if (unlikely(p_node != NULL))
153 *p_node = node;
154 return 0;
155 }
156
157 node = kzalloc(sizeof(*node), GFP_KERNEL);
158 if (unlikely(node == NULL)) {
159 DRM_ERROR("Failed to allocate a resource validation "
160 "entry.\n");
161 return -ENOMEM;
162 }
163
164 node->hash.key = (unsigned long) res;
165 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 if (unlikely(ret != 0)) {
167 DRM_ERROR("Failed to initialize a resource validation "
168 "entry.\n");
169 kfree(node);
170 return ret;
171 }
172 list_add_tail(&node->head, &sw_context->resource_list);
173 node->res = vmw_resource_reference(res);
174 node->first_usage = true;
175
176 if (unlikely(p_node != NULL))
177 *p_node = node;
178
179 return 0;
180}
181
182/**
183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
185 *
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
189 *
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
192 */
193static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 struct vmw_sw_context *sw_context,
195 struct vmw_resource *ctx)
196{
197 struct list_head *binding_list;
198 struct vmw_ctx_binding *entry;
199 int ret = 0;
200 struct vmw_resource *res;
201
202 mutex_lock(&dev_priv->binding_mutex);
203 binding_list = vmw_context_binding_list(ctx);
204
205 list_for_each_entry(entry, binding_list, ctx_list) {
206 res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 if (unlikely(res == NULL))
208 continue;
209
210 ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 vmw_resource_unreference(&res);
212 if (unlikely(ret != 0))
213 break;
214 }
215
216 mutex_unlock(&dev_priv->binding_mutex);
217 return ret;
218}
219
220/**
221 * vmw_resource_relocation_add - Add a relocation to the relocation list
222 *
223 * @list: Pointer to head of relocation list.
224 * @res: The resource.
225 * @offset: Offset into the command buffer currently being parsed where the
226 * id that needs fixup is located. Granularity is 4 bytes.
227 */
228static int vmw_resource_relocation_add(struct list_head *list,
229 const struct vmw_resource *res,
230 unsigned long offset)
231{
232 struct vmw_resource_relocation *rel;
233
234 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235 if (unlikely(rel == NULL)) {
236 DRM_ERROR("Failed to allocate a resource relocation.\n");
237 return -ENOMEM;
238 }
239
240 rel->res = res;
241 rel->offset = offset;
242 list_add_tail(&rel->head, list);
243
244 return 0;
245}
246
247/**
248 * vmw_resource_relocations_free - Free all relocations on a list
249 *
250 * @list: Pointer to the head of the relocation list.
251 */
252static void vmw_resource_relocations_free(struct list_head *list)
253{
254 struct vmw_resource_relocation *rel, *n;
255
256 list_for_each_entry_safe(rel, n, list, head) {
257 list_del(&rel->head);
258 kfree(rel);
259 }
260}
261
262/**
263 * vmw_resource_relocations_apply - Apply all relocations on a list
264 *
265 * @cb: Pointer to the start of the command buffer bein patch. This need
266 * not be the same buffer as the one being parsed when the relocation
267 * list was built, but the contents must be the same modulo the
268 * resource ids.
269 * @list: Pointer to the head of the relocation list.
270 */
271static void vmw_resource_relocations_apply(uint32_t *cb,
272 struct list_head *list)
273{
274 struct vmw_resource_relocation *rel;
275
276 list_for_each_entry(rel, list, head) {
277 if (likely(rel->res != NULL))
278 cb[rel->offset] = rel->res->id;
279 else
280 cb[rel->offset] = SVGA_3D_CMD_NOP;
281 }
282}
283
284static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285 struct vmw_sw_context *sw_context,
286 SVGA3dCmdHeader *header)
287{
288 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289}
290
291static int vmw_cmd_ok(struct vmw_private *dev_priv,
292 struct vmw_sw_context *sw_context,
293 SVGA3dCmdHeader *header)
294{
295 return 0;
296}
297
298/**
299 * vmw_bo_to_validate_list - add a bo to a validate list
300 *
301 * @sw_context: The software context used for this command submission batch.
302 * @bo: The buffer object to add.
303 * @validate_as_mob: Validate this buffer as a MOB.
304 * @p_val_node: If non-NULL Will be updated with the validate node number
305 * on return.
306 *
307 * Returns -EINVAL if the limit of number of buffer objects per command
308 * submission is reached.
309 */
310static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 struct ttm_buffer_object *bo,
312 bool validate_as_mob,
313 uint32_t *p_val_node)
314{
315 uint32_t val_node;
316 struct vmw_validate_buffer *vval_buf;
317 struct ttm_validate_buffer *val_buf;
318 struct drm_hash_item *hash;
319 int ret;
320
321 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
322 &hash) == 0)) {
323 vval_buf = container_of(hash, struct vmw_validate_buffer,
324 hash);
325 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326 DRM_ERROR("Inconsistent buffer usage.\n");
327 return -EINVAL;
328 }
329 val_buf = &vval_buf->base;
330 val_node = vval_buf - sw_context->val_bufs;
331 } else {
332 val_node = sw_context->cur_val_buf;
333 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334 DRM_ERROR("Max number of DMA buffers per submission "
335 "exceeded.\n");
336 return -EINVAL;
337 }
338 vval_buf = &sw_context->val_bufs[val_node];
339 vval_buf->hash.key = (unsigned long) bo;
340 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 if (unlikely(ret != 0)) {
342 DRM_ERROR("Failed to initialize a buffer validation "
343 "entry.\n");
344 return ret;
345 }
346 ++sw_context->cur_val_buf;
347 val_buf = &vval_buf->base;
348 val_buf->bo = ttm_bo_reference(bo);
349 val_buf->reserved = false;
350 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 vval_buf->validate_as_mob = validate_as_mob;
352 }
353
354 sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
355
356 if (p_val_node)
357 *p_val_node = val_node;
358
359 return 0;
360}
361
362/**
363 * vmw_resources_reserve - Reserve all resources on the sw_context's
364 * resource list.
365 *
366 * @sw_context: Pointer to the software context.
367 *
368 * Note that since vmware's command submission currently is protected by
369 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
370 * since only a single thread at once will attempt this.
371 */
372static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
373{
374 struct vmw_resource_val_node *val;
375 int ret;
376
377 list_for_each_entry(val, &sw_context->resource_list, head) {
378 struct vmw_resource *res = val->res;
379
380 ret = vmw_resource_reserve(res, val->no_buffer_needed);
381 if (unlikely(ret != 0))
382 return ret;
383
384 if (res->backup) {
385 struct ttm_buffer_object *bo = &res->backup->base;
386
387 ret = vmw_bo_to_validate_list
388 (sw_context, bo,
389 vmw_resource_needs_backup(res), NULL);
390
391 if (unlikely(ret != 0))
392 return ret;
393 }
394 }
395 return 0;
396}
397
398/**
399 * vmw_resources_validate - Validate all resources on the sw_context's
400 * resource list.
401 *
402 * @sw_context: Pointer to the software context.
403 *
404 * Before this function is called, all resource backup buffers must have
405 * been validated.
406 */
407static int vmw_resources_validate(struct vmw_sw_context *sw_context)
408{
409 struct vmw_resource_val_node *val;
410 int ret;
411
412 list_for_each_entry(val, &sw_context->resource_list, head) {
413 struct vmw_resource *res = val->res;
414
415 ret = vmw_resource_validate(res);
416 if (unlikely(ret != 0)) {
417 if (ret != -ERESTARTSYS)
418 DRM_ERROR("Failed to validate resource.\n");
419 return ret;
420 }
421 }
422 return 0;
423}
424
425/**
426 * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
427 * on the resource validate list unless it's already there.
428 *
429 * @dev_priv: Pointer to a device private structure.
430 * @sw_context: Pointer to the software context.
431 * @res_type: Resource type.
432 * @converter: User-space visisble type specific information.
433 * @id: user-space resource id handle.
434 * @id_loc: Pointer to the location in the command buffer currently being
435 * parsed from where the user-space resource id handle is located.
436 * @p_val: Pointer to pointer to resource validalidation node. Populated
437 * on exit.
438 */
439static int
440vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
441 struct vmw_sw_context *sw_context,
442 enum vmw_res_type res_type,
443 const struct vmw_user_resource_conv *converter,
444 uint32_t id,
445 uint32_t *id_loc,
446 struct vmw_resource_val_node **p_val)
447{
448 struct vmw_res_cache_entry *rcache =
449 &sw_context->res_cache[res_type];
450 struct vmw_resource *res;
451 struct vmw_resource_val_node *node;
452 int ret;
453
454 if (id == SVGA3D_INVALID_ID) {
455 if (p_val)
456 *p_val = NULL;
457 if (res_type == vmw_res_context) {
458 DRM_ERROR("Illegal context invalid id.\n");
459 return -EINVAL;
460 }
461 return 0;
462 }
463
464 /*
465 * Fastpath in case of repeated commands referencing the same
466 * resource
467 */
468
469 if (likely(rcache->valid && id == rcache->handle)) {
470 const struct vmw_resource *res = rcache->res;
471
472 rcache->node->first_usage = false;
473 if (p_val)
474 *p_val = rcache->node;
475
476 return vmw_resource_relocation_add
477 (&sw_context->res_relocations, res,
478 id_loc - sw_context->buf_start);
479 }
480
481 ret = vmw_user_resource_lookup_handle(dev_priv,
482 sw_context->fp->tfile,
483 id,
484 converter,
485 &res);
486 if (unlikely(ret != 0)) {
487 DRM_ERROR("Could not find or use resource 0x%08x.\n",
488 (unsigned) id);
489 dump_stack();
490 return ret;
491 }
492
493 rcache->valid = true;
494 rcache->res = res;
495 rcache->handle = id;
496
497 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
498 res,
499 id_loc - sw_context->buf_start);
500 if (unlikely(ret != 0))
501 goto out_no_reloc;
502
503 ret = vmw_resource_val_add(sw_context, res, &node);
504 if (unlikely(ret != 0))
505 goto out_no_reloc;
506
507 rcache->node = node;
508 if (p_val)
509 *p_val = node;
510
511 if (dev_priv->has_mob && node->first_usage &&
512 res_type == vmw_res_context) {
513 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
514 if (unlikely(ret != 0))
515 goto out_no_reloc;
516 node->staged_bindings =
517 kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
518 if (node->staged_bindings == NULL) {
519 DRM_ERROR("Failed to allocate context binding "
520 "information.\n");
521 goto out_no_reloc;
522 }
523 INIT_LIST_HEAD(&node->staged_bindings->list);
524 }
525
526 vmw_resource_unreference(&res);
527 return 0;
528
529out_no_reloc:
530 BUG_ON(sw_context->error_resource != NULL);
531 sw_context->error_resource = res;
532
533 return ret;
534}
535
536/**
537 * vmw_cmd_res_check - Check that a resource is present and if so, put it
538 * on the resource validate list unless it's already there.
539 *
540 * @dev_priv: Pointer to a device private structure.
541 * @sw_context: Pointer to the software context.
542 * @res_type: Resource type.
543 * @converter: User-space visisble type specific information.
544 * @id_loc: Pointer to the location in the command buffer currently being
545 * parsed from where the user-space resource id handle is located.
546 * @p_val: Pointer to pointer to resource validalidation node. Populated
547 * on exit.
548 */
549static int
550vmw_cmd_res_check(struct vmw_private *dev_priv,
551 struct vmw_sw_context *sw_context,
552 enum vmw_res_type res_type,
553 const struct vmw_user_resource_conv *converter,
554 uint32_t *id_loc,
555 struct vmw_resource_val_node **p_val)
556{
557 return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
558 converter, *id_loc, id_loc, p_val);
559}
560
561/**
562 * vmw_rebind_contexts - Rebind all resources previously bound to
563 * referenced contexts.
564 *
565 * @sw_context: Pointer to the software context.
566 *
567 * Rebind context binding points that have been scrubbed because of eviction.
568 */
569static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
570{
571 struct vmw_resource_val_node *val;
572 int ret;
573
574 list_for_each_entry(val, &sw_context->resource_list, head) {
575 if (likely(!val->staged_bindings))
576 continue;
577
578 ret = vmw_context_rebind_all(val->res);
579 if (unlikely(ret != 0)) {
580 if (ret != -ERESTARTSYS)
581 DRM_ERROR("Failed to rebind context.\n");
582 return ret;
583 }
584 }
585
586 return 0;
587}
588
589/**
590 * vmw_cmd_cid_check - Check a command header for valid context information.
591 *
592 * @dev_priv: Pointer to a device private structure.
593 * @sw_context: Pointer to the software context.
594 * @header: A command header with an embedded user-space context handle.
595 *
596 * Convenience function: Call vmw_cmd_res_check with the user-space context
597 * handle embedded in @header.
598 */
599static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
600 struct vmw_sw_context *sw_context,
601 SVGA3dCmdHeader *header)
602{
603 struct vmw_cid_cmd {
604 SVGA3dCmdHeader header;
605 uint32_t cid;
606 } *cmd;
607
608 cmd = container_of(header, struct vmw_cid_cmd, header);
609 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
610 user_context_converter, &cmd->cid, NULL);
611}
612
613static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
614 struct vmw_sw_context *sw_context,
615 SVGA3dCmdHeader *header)
616{
617 struct vmw_sid_cmd {
618 SVGA3dCmdHeader header;
619 SVGA3dCmdSetRenderTarget body;
620 } *cmd;
621 struct vmw_resource_val_node *ctx_node;
622 struct vmw_resource_val_node *res_node;
623 int ret;
624
625 cmd = container_of(header, struct vmw_sid_cmd, header);
626
627 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
628 user_context_converter, &cmd->body.cid,
629 &ctx_node);
630 if (unlikely(ret != 0))
631 return ret;
632
633 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
634 user_surface_converter,
635 &cmd->body.target.sid, &res_node);
636 if (unlikely(ret != 0))
637 return ret;
638
639 if (dev_priv->has_mob) {
640 struct vmw_ctx_bindinfo bi;
641
642 bi.ctx = ctx_node->res;
643 bi.res = res_node ? res_node->res : NULL;
644 bi.bt = vmw_ctx_binding_rt;
645 bi.i1.rt_type = cmd->body.type;
646 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
647 }
648
649 return 0;
650}
651
652static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
653 struct vmw_sw_context *sw_context,
654 SVGA3dCmdHeader *header)
655{
656 struct vmw_sid_cmd {
657 SVGA3dCmdHeader header;
658 SVGA3dCmdSurfaceCopy body;
659 } *cmd;
660 int ret;
661
662 cmd = container_of(header, struct vmw_sid_cmd, header);
663 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
664 user_surface_converter,
665 &cmd->body.src.sid, NULL);
666 if (unlikely(ret != 0))
667 return ret;
668 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
669 user_surface_converter,
670 &cmd->body.dest.sid, NULL);
671}
672
673static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
674 struct vmw_sw_context *sw_context,
675 SVGA3dCmdHeader *header)
676{
677 struct vmw_sid_cmd {
678 SVGA3dCmdHeader header;
679 SVGA3dCmdSurfaceStretchBlt body;
680 } *cmd;
681 int ret;
682
683 cmd = container_of(header, struct vmw_sid_cmd, header);
684 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
685 user_surface_converter,
686 &cmd->body.src.sid, NULL);
687 if (unlikely(ret != 0))
688 return ret;
689 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
690 user_surface_converter,
691 &cmd->body.dest.sid, NULL);
692}
693
694static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
695 struct vmw_sw_context *sw_context,
696 SVGA3dCmdHeader *header)
697{
698 struct vmw_sid_cmd {
699 SVGA3dCmdHeader header;
700 SVGA3dCmdBlitSurfaceToScreen body;
701 } *cmd;
702
703 cmd = container_of(header, struct vmw_sid_cmd, header);
704
705 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
706 user_surface_converter,
707 &cmd->body.srcImage.sid, NULL);
708}
709
710static int vmw_cmd_present_check(struct vmw_private *dev_priv,
711 struct vmw_sw_context *sw_context,
712 SVGA3dCmdHeader *header)
713{
714 struct vmw_sid_cmd {
715 SVGA3dCmdHeader header;
716 SVGA3dCmdPresent body;
717 } *cmd;
718
719
720 cmd = container_of(header, struct vmw_sid_cmd, header);
721
722 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
723 user_surface_converter, &cmd->body.sid,
724 NULL);
725}
726
727/**
728 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
729 *
730 * @dev_priv: The device private structure.
731 * @new_query_bo: The new buffer holding query results.
732 * @sw_context: The software context used for this command submission.
733 *
734 * This function checks whether @new_query_bo is suitable for holding
735 * query results, and if another buffer currently is pinned for query
736 * results. If so, the function prepares the state of @sw_context for
737 * switching pinned buffers after successful submission of the current
738 * command batch.
739 */
740static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
741 struct ttm_buffer_object *new_query_bo,
742 struct vmw_sw_context *sw_context)
743{
744 struct vmw_res_cache_entry *ctx_entry =
745 &sw_context->res_cache[vmw_res_context];
746 int ret;
747
748 BUG_ON(!ctx_entry->valid);
749 sw_context->last_query_ctx = ctx_entry->res;
750
751 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
752
753 if (unlikely(new_query_bo->num_pages > 4)) {
754 DRM_ERROR("Query buffer too large.\n");
755 return -EINVAL;
756 }
757
758 if (unlikely(sw_context->cur_query_bo != NULL)) {
759 sw_context->needs_post_query_barrier = true;
760 ret = vmw_bo_to_validate_list(sw_context,
761 sw_context->cur_query_bo,
762 dev_priv->has_mob, NULL);
763 if (unlikely(ret != 0))
764 return ret;
765 }
766 sw_context->cur_query_bo = new_query_bo;
767
768 ret = vmw_bo_to_validate_list(sw_context,
769 dev_priv->dummy_query_bo,
770 dev_priv->has_mob, NULL);
771 if (unlikely(ret != 0))
772 return ret;
773
774 }
775
776 return 0;
777}
778
779
780/**
781 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
782 *
783 * @dev_priv: The device private structure.
784 * @sw_context: The software context used for this command submission batch.
785 *
786 * This function will check if we're switching query buffers, and will then,
787 * issue a dummy occlusion query wait used as a query barrier. When the fence
788 * object following that query wait has signaled, we are sure that all
789 * preceding queries have finished, and the old query buffer can be unpinned.
790 * However, since both the new query buffer and the old one are fenced with
791 * that fence, we can do an asynchronus unpin now, and be sure that the
792 * old query buffer won't be moved until the fence has signaled.
793 *
794 * As mentioned above, both the new - and old query buffers need to be fenced
795 * using a sequence emitted *after* calling this function.
796 */
797static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
798 struct vmw_sw_context *sw_context)
799{
800 /*
801 * The validate list should still hold references to all
802 * contexts here.
803 */
804
805 if (sw_context->needs_post_query_barrier) {
806 struct vmw_res_cache_entry *ctx_entry =
807 &sw_context->res_cache[vmw_res_context];
808 struct vmw_resource *ctx;
809 int ret;
810
811 BUG_ON(!ctx_entry->valid);
812 ctx = ctx_entry->res;
813
814 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
815
816 if (unlikely(ret != 0))
817 DRM_ERROR("Out of fifo space for dummy query.\n");
818 }
819
820 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
821 if (dev_priv->pinned_bo) {
822 vmw_bo_pin(dev_priv->pinned_bo, false);
823 ttm_bo_unref(&dev_priv->pinned_bo);
824 }
825
826 if (!sw_context->needs_post_query_barrier) {
827 vmw_bo_pin(sw_context->cur_query_bo, true);
828
829 /*
830 * We pin also the dummy_query_bo buffer so that we
831 * don't need to validate it when emitting
832 * dummy queries in context destroy paths.
833 */
834
835 vmw_bo_pin(dev_priv->dummy_query_bo, true);
836 dev_priv->dummy_query_bo_pinned = true;
837
838 BUG_ON(sw_context->last_query_ctx == NULL);
839 dev_priv->query_cid = sw_context->last_query_ctx->id;
840 dev_priv->query_cid_valid = true;
841 dev_priv->pinned_bo =
842 ttm_bo_reference(sw_context->cur_query_bo);
843 }
844 }
845}
846
847/**
848 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
849 * handle to a MOB id.
850 *
851 * @dev_priv: Pointer to a device private structure.
852 * @sw_context: The software context used for this command batch validation.
853 * @id: Pointer to the user-space handle to be translated.
854 * @vmw_bo_p: Points to a location that, on successful return will carry
855 * a reference-counted pointer to the DMA buffer identified by the
856 * user-space handle in @id.
857 *
858 * This function saves information needed to translate a user-space buffer
859 * handle to a MOB id. The translation does not take place immediately, but
860 * during a call to vmw_apply_relocations(). This function builds a relocation
861 * list and a list of buffers to validate. The former needs to be freed using
862 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
863 * needs to be freed using vmw_clear_validations.
864 */
865static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
866 struct vmw_sw_context *sw_context,
867 SVGAMobId *id,
868 struct vmw_dma_buffer **vmw_bo_p)
869{
870 struct vmw_dma_buffer *vmw_bo = NULL;
871 struct ttm_buffer_object *bo;
872 uint32_t handle = *id;
873 struct vmw_relocation *reloc;
874 int ret;
875
876 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
877 if (unlikely(ret != 0)) {
878 DRM_ERROR("Could not find or use MOB buffer.\n");
879 return -EINVAL;
880 }
881 bo = &vmw_bo->base;
882
883 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
884 DRM_ERROR("Max number relocations per submission"
885 " exceeded\n");
886 ret = -EINVAL;
887 goto out_no_reloc;
888 }
889
890 reloc = &sw_context->relocs[sw_context->cur_reloc++];
891 reloc->mob_loc = id;
892 reloc->location = NULL;
893
894 ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
895 if (unlikely(ret != 0))
896 goto out_no_reloc;
897
898 *vmw_bo_p = vmw_bo;
899 return 0;
900
901out_no_reloc:
902 vmw_dmabuf_unreference(&vmw_bo);
903 vmw_bo_p = NULL;
904 return ret;
905}
906
907/**
908 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
909 * handle to a valid SVGAGuestPtr
910 *
911 * @dev_priv: Pointer to a device private structure.
912 * @sw_context: The software context used for this command batch validation.
913 * @ptr: Pointer to the user-space handle to be translated.
914 * @vmw_bo_p: Points to a location that, on successful return will carry
915 * a reference-counted pointer to the DMA buffer identified by the
916 * user-space handle in @id.
917 *
918 * This function saves information needed to translate a user-space buffer
919 * handle to a valid SVGAGuestPtr. The translation does not take place
920 * immediately, but during a call to vmw_apply_relocations().
921 * This function builds a relocation list and a list of buffers to validate.
922 * The former needs to be freed using either vmw_apply_relocations() or
923 * vmw_free_relocations(). The latter needs to be freed using
924 * vmw_clear_validations.
925 */
926static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
927 struct vmw_sw_context *sw_context,
928 SVGAGuestPtr *ptr,
929 struct vmw_dma_buffer **vmw_bo_p)
930{
931 struct vmw_dma_buffer *vmw_bo = NULL;
932 struct ttm_buffer_object *bo;
933 uint32_t handle = ptr->gmrId;
934 struct vmw_relocation *reloc;
935 int ret;
936
937 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
938 if (unlikely(ret != 0)) {
939 DRM_ERROR("Could not find or use GMR region.\n");
940 return -EINVAL;
941 }
942 bo = &vmw_bo->base;
943
944 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
945 DRM_ERROR("Max number relocations per submission"
946 " exceeded\n");
947 ret = -EINVAL;
948 goto out_no_reloc;
949 }
950
951 reloc = &sw_context->relocs[sw_context->cur_reloc++];
952 reloc->location = ptr;
953
954 ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
955 if (unlikely(ret != 0))
956 goto out_no_reloc;
957
958 *vmw_bo_p = vmw_bo;
959 return 0;
960
961out_no_reloc:
962 vmw_dmabuf_unreference(&vmw_bo);
963 vmw_bo_p = NULL;
964 return ret;
965}
966
967/**
968 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
969 *
970 * @dev_priv: Pointer to a device private struct.
971 * @sw_context: The software context used for this command submission.
972 * @header: Pointer to the command header in the command stream.
973 */
974static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
975 struct vmw_sw_context *sw_context,
976 SVGA3dCmdHeader *header)
977{
978 struct vmw_begin_gb_query_cmd {
979 SVGA3dCmdHeader header;
980 SVGA3dCmdBeginGBQuery q;
981 } *cmd;
982
983 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
984 header);
985
986 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
987 user_context_converter, &cmd->q.cid,
988 NULL);
989}
990
991/**
992 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
993 *
994 * @dev_priv: Pointer to a device private struct.
995 * @sw_context: The software context used for this command submission.
996 * @header: Pointer to the command header in the command stream.
997 */
998static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
999 struct vmw_sw_context *sw_context,
1000 SVGA3dCmdHeader *header)
1001{
1002 struct vmw_begin_query_cmd {
1003 SVGA3dCmdHeader header;
1004 SVGA3dCmdBeginQuery q;
1005 } *cmd;
1006
1007 cmd = container_of(header, struct vmw_begin_query_cmd,
1008 header);
1009
1010 if (unlikely(dev_priv->has_mob)) {
1011 struct {
1012 SVGA3dCmdHeader header;
1013 SVGA3dCmdBeginGBQuery q;
1014 } gb_cmd;
1015
1016 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1017
1018 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1019 gb_cmd.header.size = cmd->header.size;
1020 gb_cmd.q.cid = cmd->q.cid;
1021 gb_cmd.q.type = cmd->q.type;
1022
1023 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1024 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1025 }
1026
1027 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1028 user_context_converter, &cmd->q.cid,
1029 NULL);
1030}
1031
1032/**
1033 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1034 *
1035 * @dev_priv: Pointer to a device private struct.
1036 * @sw_context: The software context used for this command submission.
1037 * @header: Pointer to the command header in the command stream.
1038 */
1039static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1040 struct vmw_sw_context *sw_context,
1041 SVGA3dCmdHeader *header)
1042{
1043 struct vmw_dma_buffer *vmw_bo;
1044 struct vmw_query_cmd {
1045 SVGA3dCmdHeader header;
1046 SVGA3dCmdEndGBQuery q;
1047 } *cmd;
1048 int ret;
1049
1050 cmd = container_of(header, struct vmw_query_cmd, header);
1051 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1052 if (unlikely(ret != 0))
1053 return ret;
1054
1055 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1056 &cmd->q.mobid,
1057 &vmw_bo);
1058 if (unlikely(ret != 0))
1059 return ret;
1060
1061 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1062
1063 vmw_dmabuf_unreference(&vmw_bo);
1064 return ret;
1065}
1066
1067/**
1068 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1069 *
1070 * @dev_priv: Pointer to a device private struct.
1071 * @sw_context: The software context used for this command submission.
1072 * @header: Pointer to the command header in the command stream.
1073 */
1074static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1075 struct vmw_sw_context *sw_context,
1076 SVGA3dCmdHeader *header)
1077{
1078 struct vmw_dma_buffer *vmw_bo;
1079 struct vmw_query_cmd {
1080 SVGA3dCmdHeader header;
1081 SVGA3dCmdEndQuery q;
1082 } *cmd;
1083 int ret;
1084
1085 cmd = container_of(header, struct vmw_query_cmd, header);
1086 if (dev_priv->has_mob) {
1087 struct {
1088 SVGA3dCmdHeader header;
1089 SVGA3dCmdEndGBQuery q;
1090 } gb_cmd;
1091
1092 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1093
1094 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1095 gb_cmd.header.size = cmd->header.size;
1096 gb_cmd.q.cid = cmd->q.cid;
1097 gb_cmd.q.type = cmd->q.type;
1098 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1099 gb_cmd.q.offset = cmd->q.guestResult.offset;
1100
1101 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1102 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1103 }
1104
1105 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1106 if (unlikely(ret != 0))
1107 return ret;
1108
1109 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1110 &cmd->q.guestResult,
1111 &vmw_bo);
1112 if (unlikely(ret != 0))
1113 return ret;
1114
1115 ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1116
1117 vmw_dmabuf_unreference(&vmw_bo);
1118 return ret;
1119}
1120
1121/**
1122 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1123 *
1124 * @dev_priv: Pointer to a device private struct.
1125 * @sw_context: The software context used for this command submission.
1126 * @header: Pointer to the command header in the command stream.
1127 */
1128static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1129 struct vmw_sw_context *sw_context,
1130 SVGA3dCmdHeader *header)
1131{
1132 struct vmw_dma_buffer *vmw_bo;
1133 struct vmw_query_cmd {
1134 SVGA3dCmdHeader header;
1135 SVGA3dCmdWaitForGBQuery q;
1136 } *cmd;
1137 int ret;
1138
1139 cmd = container_of(header, struct vmw_query_cmd, header);
1140 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1141 if (unlikely(ret != 0))
1142 return ret;
1143
1144 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1145 &cmd->q.mobid,
1146 &vmw_bo);
1147 if (unlikely(ret != 0))
1148 return ret;
1149
1150 vmw_dmabuf_unreference(&vmw_bo);
1151 return 0;
1152}
1153
1154/**
1155 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1156 *
1157 * @dev_priv: Pointer to a device private struct.
1158 * @sw_context: The software context used for this command submission.
1159 * @header: Pointer to the command header in the command stream.
1160 */
1161static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1162 struct vmw_sw_context *sw_context,
1163 SVGA3dCmdHeader *header)
1164{
1165 struct vmw_dma_buffer *vmw_bo;
1166 struct vmw_query_cmd {
1167 SVGA3dCmdHeader header;
1168 SVGA3dCmdWaitForQuery q;
1169 } *cmd;
1170 int ret;
1171
1172 cmd = container_of(header, struct vmw_query_cmd, header);
1173 if (dev_priv->has_mob) {
1174 struct {
1175 SVGA3dCmdHeader header;
1176 SVGA3dCmdWaitForGBQuery q;
1177 } gb_cmd;
1178
1179 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1180
1181 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1182 gb_cmd.header.size = cmd->header.size;
1183 gb_cmd.q.cid = cmd->q.cid;
1184 gb_cmd.q.type = cmd->q.type;
1185 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1186 gb_cmd.q.offset = cmd->q.guestResult.offset;
1187
1188 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1189 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1190 }
1191
1192 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1193 if (unlikely(ret != 0))
1194 return ret;
1195
1196 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1197 &cmd->q.guestResult,
1198 &vmw_bo);
1199 if (unlikely(ret != 0))
1200 return ret;
1201
1202 vmw_dmabuf_unreference(&vmw_bo);
1203 return 0;
1204}
1205
1206static int vmw_cmd_dma(struct vmw_private *dev_priv,
1207 struct vmw_sw_context *sw_context,
1208 SVGA3dCmdHeader *header)
1209{
1210 struct vmw_dma_buffer *vmw_bo = NULL;
1211 struct vmw_surface *srf = NULL;
1212 struct vmw_dma_cmd {
1213 SVGA3dCmdHeader header;
1214 SVGA3dCmdSurfaceDMA dma;
1215 } *cmd;
1216 int ret;
1217 SVGA3dCmdSurfaceDMASuffix *suffix;
1218 uint32_t bo_size;
1219
1220 cmd = container_of(header, struct vmw_dma_cmd, header);
1221 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1222 header->size - sizeof(*suffix));
1223
1224 /* Make sure device and verifier stays in sync. */
1225 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1226 DRM_ERROR("Invalid DMA suffix size.\n");
1227 return -EINVAL;
1228 }
1229
1230 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1231 &cmd->dma.guest.ptr,
1232 &vmw_bo);
1233 if (unlikely(ret != 0))
1234 return ret;
1235
1236 /* Make sure DMA doesn't cross BO boundaries. */
1237 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1238 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1239 DRM_ERROR("Invalid DMA offset.\n");
1240 return -EINVAL;
1241 }
1242
1243 bo_size -= cmd->dma.guest.ptr.offset;
1244 if (unlikely(suffix->maximumOffset > bo_size))
1245 suffix->maximumOffset = bo_size;
1246
1247 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1248 user_surface_converter, &cmd->dma.host.sid,
1249 NULL);
1250 if (unlikely(ret != 0)) {
1251 if (unlikely(ret != -ERESTARTSYS))
1252 DRM_ERROR("could not find surface for DMA.\n");
1253 goto out_no_surface;
1254 }
1255
1256 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1257
1258 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1259 header);
1260
1261out_no_surface:
1262 vmw_dmabuf_unreference(&vmw_bo);
1263 return ret;
1264}
1265
1266static int vmw_cmd_draw(struct vmw_private *dev_priv,
1267 struct vmw_sw_context *sw_context,
1268 SVGA3dCmdHeader *header)
1269{
1270 struct vmw_draw_cmd {
1271 SVGA3dCmdHeader header;
1272 SVGA3dCmdDrawPrimitives body;
1273 } *cmd;
1274 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1275 (unsigned long)header + sizeof(*cmd));
1276 SVGA3dPrimitiveRange *range;
1277 uint32_t i;
1278 uint32_t maxnum;
1279 int ret;
1280
1281 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1282 if (unlikely(ret != 0))
1283 return ret;
1284
1285 cmd = container_of(header, struct vmw_draw_cmd, header);
1286 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1287
1288 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1289 DRM_ERROR("Illegal number of vertex declarations.\n");
1290 return -EINVAL;
1291 }
1292
1293 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1294 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1295 user_surface_converter,
1296 &decl->array.surfaceId, NULL);
1297 if (unlikely(ret != 0))
1298 return ret;
1299 }
1300
1301 maxnum = (header->size - sizeof(cmd->body) -
1302 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1303 if (unlikely(cmd->body.numRanges > maxnum)) {
1304 DRM_ERROR("Illegal number of index ranges.\n");
1305 return -EINVAL;
1306 }
1307
1308 range = (SVGA3dPrimitiveRange *) decl;
1309 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1310 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1311 user_surface_converter,
1312 &range->indexArray.surfaceId, NULL);
1313 if (unlikely(ret != 0))
1314 return ret;
1315 }
1316 return 0;
1317}
1318
1319
1320static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1321 struct vmw_sw_context *sw_context,
1322 SVGA3dCmdHeader *header)
1323{
1324 struct vmw_tex_state_cmd {
1325 SVGA3dCmdHeader header;
1326 SVGA3dCmdSetTextureState state;
1327 } *cmd;
1328
1329 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1330 ((unsigned long) header + header->size + sizeof(header));
1331 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1332 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1333 struct vmw_resource_val_node *ctx_node;
1334 struct vmw_resource_val_node *res_node;
1335 int ret;
1336
1337 cmd = container_of(header, struct vmw_tex_state_cmd,
1338 header);
1339
1340 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1341 user_context_converter, &cmd->state.cid,
1342 &ctx_node);
1343 if (unlikely(ret != 0))
1344 return ret;
1345
1346 for (; cur_state < last_state; ++cur_state) {
1347 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1348 continue;
1349
1350 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1351 user_surface_converter,
1352 &cur_state->value, &res_node);
1353 if (unlikely(ret != 0))
1354 return ret;
1355
1356 if (dev_priv->has_mob) {
1357 struct vmw_ctx_bindinfo bi;
1358
1359 bi.ctx = ctx_node->res;
1360 bi.res = res_node ? res_node->res : NULL;
1361 bi.bt = vmw_ctx_binding_tex;
1362 bi.i1.texture_stage = cur_state->stage;
1363 vmw_context_binding_add(ctx_node->staged_bindings,
1364 &bi);
1365 }
1366 }
1367
1368 return 0;
1369}
1370
1371static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1372 struct vmw_sw_context *sw_context,
1373 void *buf)
1374{
1375 struct vmw_dma_buffer *vmw_bo;
1376 int ret;
1377
1378 struct {
1379 uint32_t header;
1380 SVGAFifoCmdDefineGMRFB body;
1381 } *cmd = buf;
1382
1383 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1384 &cmd->body.ptr,
1385 &vmw_bo);
1386 if (unlikely(ret != 0))
1387 return ret;
1388
1389 vmw_dmabuf_unreference(&vmw_bo);
1390
1391 return ret;
1392}
1393
1394/**
1395 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context being used for this batch.
1399 * @res_type: The resource type.
1400 * @converter: Information about user-space binding for this resource type.
1401 * @res_id: Pointer to the user-space resource handle in the command stream.
1402 * @buf_id: Pointer to the user-space backup buffer handle in the command
1403 * stream.
1404 * @backup_offset: Offset of backup into MOB.
1405 *
1406 * This function prepares for registering a switch of backup buffers
1407 * in the resource metadata just prior to unreserving.
1408 */
1409static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1410 struct vmw_sw_context *sw_context,
1411 enum vmw_res_type res_type,
1412 const struct vmw_user_resource_conv
1413 *converter,
1414 uint32_t *res_id,
1415 uint32_t *buf_id,
1416 unsigned long backup_offset)
1417{
1418 int ret;
1419 struct vmw_dma_buffer *dma_buf;
1420 struct vmw_resource_val_node *val_node;
1421
1422 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1423 converter, res_id, &val_node);
1424 if (unlikely(ret != 0))
1425 return ret;
1426
1427 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1428 if (unlikely(ret != 0))
1429 return ret;
1430
1431 if (val_node->first_usage)
1432 val_node->no_buffer_needed = true;
1433
1434 vmw_dmabuf_unreference(&val_node->new_backup);
1435 val_node->new_backup = dma_buf;
1436 val_node->new_backup_offset = backup_offset;
1437
1438 return 0;
1439}
1440
1441/**
1442 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1443 * command
1444 *
1445 * @dev_priv: Pointer to a device private struct.
1446 * @sw_context: The software context being used for this batch.
1447 * @header: Pointer to the command header in the command stream.
1448 */
1449static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1450 struct vmw_sw_context *sw_context,
1451 SVGA3dCmdHeader *header)
1452{
1453 struct vmw_bind_gb_surface_cmd {
1454 SVGA3dCmdHeader header;
1455 SVGA3dCmdBindGBSurface body;
1456 } *cmd;
1457
1458 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1459
1460 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1461 user_surface_converter,
1462 &cmd->body.sid, &cmd->body.mobid,
1463 0);
1464}
1465
1466/**
1467 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1468 * command
1469 *
1470 * @dev_priv: Pointer to a device private struct.
1471 * @sw_context: The software context being used for this batch.
1472 * @header: Pointer to the command header in the command stream.
1473 */
1474static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1475 struct vmw_sw_context *sw_context,
1476 SVGA3dCmdHeader *header)
1477{
1478 struct vmw_gb_surface_cmd {
1479 SVGA3dCmdHeader header;
1480 SVGA3dCmdUpdateGBImage body;
1481 } *cmd;
1482
1483 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1484
1485 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1486 user_surface_converter,
1487 &cmd->body.image.sid, NULL);
1488}
1489
1490/**
1491 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1492 * command
1493 *
1494 * @dev_priv: Pointer to a device private struct.
1495 * @sw_context: The software context being used for this batch.
1496 * @header: Pointer to the command header in the command stream.
1497 */
1498static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1499 struct vmw_sw_context *sw_context,
1500 SVGA3dCmdHeader *header)
1501{
1502 struct vmw_gb_surface_cmd {
1503 SVGA3dCmdHeader header;
1504 SVGA3dCmdUpdateGBSurface body;
1505 } *cmd;
1506
1507 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1508
1509 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1510 user_surface_converter,
1511 &cmd->body.sid, NULL);
1512}
1513
1514/**
1515 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1516 * command
1517 *
1518 * @dev_priv: Pointer to a device private struct.
1519 * @sw_context: The software context being used for this batch.
1520 * @header: Pointer to the command header in the command stream.
1521 */
1522static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1523 struct vmw_sw_context *sw_context,
1524 SVGA3dCmdHeader *header)
1525{
1526 struct vmw_gb_surface_cmd {
1527 SVGA3dCmdHeader header;
1528 SVGA3dCmdReadbackGBImage body;
1529 } *cmd;
1530
1531 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1532
1533 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1534 user_surface_converter,
1535 &cmd->body.image.sid, NULL);
1536}
1537
1538/**
1539 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1540 * command
1541 *
1542 * @dev_priv: Pointer to a device private struct.
1543 * @sw_context: The software context being used for this batch.
1544 * @header: Pointer to the command header in the command stream.
1545 */
1546static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1547 struct vmw_sw_context *sw_context,
1548 SVGA3dCmdHeader *header)
1549{
1550 struct vmw_gb_surface_cmd {
1551 SVGA3dCmdHeader header;
1552 SVGA3dCmdReadbackGBSurface body;
1553 } *cmd;
1554
1555 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1556
1557 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1558 user_surface_converter,
1559 &cmd->body.sid, NULL);
1560}
1561
1562/**
1563 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1564 * command
1565 *
1566 * @dev_priv: Pointer to a device private struct.
1567 * @sw_context: The software context being used for this batch.
1568 * @header: Pointer to the command header in the command stream.
1569 */
1570static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1571 struct vmw_sw_context *sw_context,
1572 SVGA3dCmdHeader *header)
1573{
1574 struct vmw_gb_surface_cmd {
1575 SVGA3dCmdHeader header;
1576 SVGA3dCmdInvalidateGBImage body;
1577 } *cmd;
1578
1579 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1580
1581 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1582 user_surface_converter,
1583 &cmd->body.image.sid, NULL);
1584}
1585
1586/**
1587 * vmw_cmd_invalidate_gb_surface - Validate an
1588 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1589 *
1590 * @dev_priv: Pointer to a device private struct.
1591 * @sw_context: The software context being used for this batch.
1592 * @header: Pointer to the command header in the command stream.
1593 */
1594static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1595 struct vmw_sw_context *sw_context,
1596 SVGA3dCmdHeader *header)
1597{
1598 struct vmw_gb_surface_cmd {
1599 SVGA3dCmdHeader header;
1600 SVGA3dCmdInvalidateGBSurface body;
1601 } *cmd;
1602
1603 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1604
1605 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1606 user_surface_converter,
1607 &cmd->body.sid, NULL);
1608}
1609
1610
1611/**
1612 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1613 * command
1614 *
1615 * @dev_priv: Pointer to a device private struct.
1616 * @sw_context: The software context being used for this batch.
1617 * @header: Pointer to the command header in the command stream.
1618 */
1619static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1620 struct vmw_sw_context *sw_context,
1621 SVGA3dCmdHeader *header)
1622{
1623 struct vmw_shader_define_cmd {
1624 SVGA3dCmdHeader header;
1625 SVGA3dCmdDefineShader body;
1626 } *cmd;
1627 int ret;
1628 size_t size;
1629
1630 cmd = container_of(header, struct vmw_shader_define_cmd,
1631 header);
1632
1633 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1634 user_context_converter, &cmd->body.cid,
1635 NULL);
1636 if (unlikely(ret != 0))
1637 return ret;
1638
1639 if (unlikely(!dev_priv->has_mob))
1640 return 0;
1641
1642 size = cmd->header.size - sizeof(cmd->body);
1643 ret = vmw_compat_shader_add(sw_context->fp->shman,
1644 cmd->body.shid, cmd + 1,
1645 cmd->body.type, size,
1646 sw_context->fp->tfile,
1647 &sw_context->staged_shaders);
1648 if (unlikely(ret != 0))
1649 return ret;
1650
1651 return vmw_resource_relocation_add(&sw_context->res_relocations,
1652 NULL, &cmd->header.id -
1653 sw_context->buf_start);
1654
1655 return 0;
1656}
1657
1658/**
1659 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1660 * command
1661 *
1662 * @dev_priv: Pointer to a device private struct.
1663 * @sw_context: The software context being used for this batch.
1664 * @header: Pointer to the command header in the command stream.
1665 */
1666static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1667 struct vmw_sw_context *sw_context,
1668 SVGA3dCmdHeader *header)
1669{
1670 struct vmw_shader_destroy_cmd {
1671 SVGA3dCmdHeader header;
1672 SVGA3dCmdDestroyShader body;
1673 } *cmd;
1674 int ret;
1675
1676 cmd = container_of(header, struct vmw_shader_destroy_cmd,
1677 header);
1678
1679 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1680 user_context_converter, &cmd->body.cid,
1681 NULL);
1682 if (unlikely(ret != 0))
1683 return ret;
1684
1685 if (unlikely(!dev_priv->has_mob))
1686 return 0;
1687
1688 ret = vmw_compat_shader_remove(sw_context->fp->shman,
1689 cmd->body.shid,
1690 cmd->body.type,
1691 &sw_context->staged_shaders);
1692 if (unlikely(ret != 0))
1693 return ret;
1694
1695 return vmw_resource_relocation_add(&sw_context->res_relocations,
1696 NULL, &cmd->header.id -
1697 sw_context->buf_start);
1698
1699 return 0;
1700}
1701
1702/**
1703 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1704 * command
1705 *
1706 * @dev_priv: Pointer to a device private struct.
1707 * @sw_context: The software context being used for this batch.
1708 * @header: Pointer to the command header in the command stream.
1709 */
1710static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1711 struct vmw_sw_context *sw_context,
1712 SVGA3dCmdHeader *header)
1713{
1714 struct vmw_set_shader_cmd {
1715 SVGA3dCmdHeader header;
1716 SVGA3dCmdSetShader body;
1717 } *cmd;
1718 struct vmw_resource_val_node *ctx_node;
1719 int ret;
1720
1721 cmd = container_of(header, struct vmw_set_shader_cmd,
1722 header);
1723
1724 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1725 user_context_converter, &cmd->body.cid,
1726 &ctx_node);
1727 if (unlikely(ret != 0))
1728 return ret;
1729
1730 if (dev_priv->has_mob) {
1731 struct vmw_ctx_bindinfo bi;
1732 struct vmw_resource_val_node *res_node;
1733 u32 shid = cmd->body.shid;
1734
1735 if (shid != SVGA3D_INVALID_ID)
1736 (void) vmw_compat_shader_lookup(sw_context->fp->shman,
1737 cmd->body.type,
1738 &shid);
1739
1740 ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
1741 vmw_res_shader,
1742 user_shader_converter,
1743 shid,
1744 &cmd->body.shid, &res_node);
1745 if (unlikely(ret != 0))
1746 return ret;
1747
1748 bi.ctx = ctx_node->res;
1749 bi.res = res_node ? res_node->res : NULL;
1750 bi.bt = vmw_ctx_binding_shader;
1751 bi.i1.shader_type = cmd->body.type;
1752 return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1753 }
1754
1755 return 0;
1756}
1757
1758/**
1759 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1760 * command
1761 *
1762 * @dev_priv: Pointer to a device private struct.
1763 * @sw_context: The software context being used for this batch.
1764 * @header: Pointer to the command header in the command stream.
1765 */
1766static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1767 struct vmw_sw_context *sw_context,
1768 SVGA3dCmdHeader *header)
1769{
1770 struct vmw_set_shader_const_cmd {
1771 SVGA3dCmdHeader header;
1772 SVGA3dCmdSetShaderConst body;
1773 } *cmd;
1774 int ret;
1775
1776 cmd = container_of(header, struct vmw_set_shader_const_cmd,
1777 header);
1778
1779 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1780 user_context_converter, &cmd->body.cid,
1781 NULL);
1782 if (unlikely(ret != 0))
1783 return ret;
1784
1785 if (dev_priv->has_mob)
1786 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1787
1788 return 0;
1789}
1790
1791/**
1792 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1793 * command
1794 *
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @header: Pointer to the command header in the command stream.
1798 */
1799static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1800 struct vmw_sw_context *sw_context,
1801 SVGA3dCmdHeader *header)
1802{
1803 struct vmw_bind_gb_shader_cmd {
1804 SVGA3dCmdHeader header;
1805 SVGA3dCmdBindGBShader body;
1806 } *cmd;
1807
1808 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1809 header);
1810
1811 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1812 user_shader_converter,
1813 &cmd->body.shid, &cmd->body.mobid,
1814 cmd->body.offsetInBytes);
1815}
1816
1817static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1818 struct vmw_sw_context *sw_context,
1819 void *buf, uint32_t *size)
1820{
1821 uint32_t size_remaining = *size;
1822 uint32_t cmd_id;
1823
1824 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1825 switch (cmd_id) {
1826 case SVGA_CMD_UPDATE:
1827 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1828 break;
1829 case SVGA_CMD_DEFINE_GMRFB:
1830 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1831 break;
1832 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1833 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1834 break;
1835 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1836 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1837 break;
1838 default:
1839 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1840 return -EINVAL;
1841 }
1842
1843 if (*size > size_remaining) {
1844 DRM_ERROR("Invalid SVGA command (size mismatch):"
1845 " %u.\n", cmd_id);
1846 return -EINVAL;
1847 }
1848
1849 if (unlikely(!sw_context->kernel)) {
1850 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1851 return -EPERM;
1852 }
1853
1854 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1855 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1856
1857 return 0;
1858}
1859
1860static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1861 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1862 false, false, false),
1863 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1864 false, false, false),
1865 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1866 true, false, false),
1867 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1868 true, false, false),
1869 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1870 true, false, false),
1871 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1872 false, false, false),
1873 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1874 false, false, false),
1875 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1876 true, false, false),
1877 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1878 true, false, false),
1879 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1880 true, false, false),
1881 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1882 &vmw_cmd_set_render_target_check, true, false, false),
1883 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1884 true, false, false),
1885 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1886 true, false, false),
1887 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1888 true, false, false),
1889 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1890 true, false, false),
1891 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1892 true, false, false),
1893 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1894 true, false, false),
1895 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1896 true, false, false),
1897 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1898 false, false, false),
1899 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1900 true, false, false),
1901 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1902 true, false, false),
1903 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1904 true, false, false),
1905 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1906 true, false, false),
1907 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1908 true, false, false),
1909 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1910 true, false, false),
1911 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1912 true, false, false),
1913 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1914 true, false, false),
1915 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1916 true, false, false),
1917 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1918 true, false, false),
1919 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1920 &vmw_cmd_blt_surf_screen_check, false, false, false),
1921 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1922 false, false, false),
1923 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1924 false, false, false),
1925 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1926 false, false, false),
1927 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1928 false, false, false),
1929 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1930 false, false, false),
1931 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1932 false, false, false),
1933 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1934 false, false, false),
1935 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1936 false, false, false),
1937 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1938 false, false, false),
1939 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1940 false, false, false),
1941 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1942 false, false, false),
1943 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1944 false, false, false),
1945 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1946 false, false, false),
1947 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1948 false, false, true),
1949 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1950 false, false, true),
1951 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1952 false, false, true),
1953 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1954 false, false, true),
1955 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1956 false, false, true),
1957 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1958 false, false, true),
1959 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1960 false, false, true),
1961 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1962 false, false, true),
1963 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1964 true, false, true),
1965 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1966 false, false, true),
1967 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1968 true, false, true),
1969 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
1970 &vmw_cmd_update_gb_surface, true, false, true),
1971 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
1972 &vmw_cmd_readback_gb_image, true, false, true),
1973 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
1974 &vmw_cmd_readback_gb_surface, true, false, true),
1975 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
1976 &vmw_cmd_invalidate_gb_image, true, false, true),
1977 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
1978 &vmw_cmd_invalidate_gb_surface, true, false, true),
1979 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
1980 false, false, true),
1981 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
1982 false, false, true),
1983 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
1984 false, false, true),
1985 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
1986 false, false, true),
1987 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
1988 false, false, true),
1989 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
1990 false, false, true),
1991 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
1992 true, false, true),
1993 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
1994 false, false, true),
1995 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
1996 false, false, false),
1997 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
1998 true, false, true),
1999 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2000 true, false, true),
2001 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2002 true, false, true),
2003 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2004 true, false, true),
2005 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2006 false, false, true),
2007 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2008 false, false, true),
2009 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2010 false, false, true),
2011 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2012 false, false, true),
2013 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2014 false, false, true),
2015 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2016 false, false, true),
2017 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2018 false, false, true),
2019 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2020 false, false, true),
2021 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2022 false, false, true),
2023 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2024 false, false, true),
2025 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2026 true, false, true)
2027};
2028
2029static int vmw_cmd_check(struct vmw_private *dev_priv,
2030 struct vmw_sw_context *sw_context,
2031 void *buf, uint32_t *size)
2032{
2033 uint32_t cmd_id;
2034 uint32_t size_remaining = *size;
2035 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2036 int ret;
2037 const struct vmw_cmd_entry *entry;
2038 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2039
2040 cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2041 /* Handle any none 3D commands */
2042 if (unlikely(cmd_id < SVGA_CMD_MAX))
2043 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2044
2045
2046 cmd_id = le32_to_cpu(header->id);
2047 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2048
2049 cmd_id -= SVGA_3D_CMD_BASE;
2050 if (unlikely(*size > size_remaining))
2051 goto out_invalid;
2052
2053 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2054 goto out_invalid;
2055
2056 entry = &vmw_cmd_entries[cmd_id];
2057 if (unlikely(!entry->func))
2058 goto out_invalid;
2059
2060 if (unlikely(!entry->user_allow && !sw_context->kernel))
2061 goto out_privileged;
2062
2063 if (unlikely(entry->gb_disable && gb))
2064 goto out_old;
2065
2066 if (unlikely(entry->gb_enable && !gb))
2067 goto out_new;
2068
2069 ret = entry->func(dev_priv, sw_context, header);
2070 if (unlikely(ret != 0))
2071 goto out_invalid;
2072
2073 return 0;
2074out_invalid:
2075 DRM_ERROR("Invalid SVGA3D command: %d\n",
2076 cmd_id + SVGA_3D_CMD_BASE);
2077 return -EINVAL;
2078out_privileged:
2079 DRM_ERROR("Privileged SVGA3D command: %d\n",
2080 cmd_id + SVGA_3D_CMD_BASE);
2081 return -EPERM;
2082out_old:
2083 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2084 cmd_id + SVGA_3D_CMD_BASE);
2085 return -EINVAL;
2086out_new:
2087 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2088 cmd_id + SVGA_3D_CMD_BASE);
2089 return -EINVAL;
2090}
2091
2092static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2093 struct vmw_sw_context *sw_context,
2094 void *buf,
2095 uint32_t size)
2096{
2097 int32_t cur_size = size;
2098 int ret;
2099
2100 sw_context->buf_start = buf;
2101
2102 while (cur_size > 0) {
2103 size = cur_size;
2104 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2105 if (unlikely(ret != 0))
2106 return ret;
2107 buf = (void *)((unsigned long) buf + size);
2108 cur_size -= size;
2109 }
2110
2111 if (unlikely(cur_size != 0)) {
2112 DRM_ERROR("Command verifier out of sync.\n");
2113 return -EINVAL;
2114 }
2115
2116 return 0;
2117}
2118
2119static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2120{
2121 sw_context->cur_reloc = 0;
2122}
2123
2124static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2125{
2126 uint32_t i;
2127 struct vmw_relocation *reloc;
2128 struct ttm_validate_buffer *validate;
2129 struct ttm_buffer_object *bo;
2130
2131 for (i = 0; i < sw_context->cur_reloc; ++i) {
2132 reloc = &sw_context->relocs[i];
2133 validate = &sw_context->val_bufs[reloc->index].base;
2134 bo = validate->bo;
2135 switch (bo->mem.mem_type) {
2136 case TTM_PL_VRAM:
2137 reloc->location->offset += bo->offset;
2138 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2139 break;
2140 case VMW_PL_GMR:
2141 reloc->location->gmrId = bo->mem.start;
2142 break;
2143 case VMW_PL_MOB:
2144 *reloc->mob_loc = bo->mem.start;
2145 break;
2146 default:
2147 BUG();
2148 }
2149 }
2150 vmw_free_relocations(sw_context);
2151}
2152
2153/**
2154 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2155 * all resources referenced by it.
2156 *
2157 * @list: The resource list.
2158 */
2159static void vmw_resource_list_unreference(struct list_head *list)
2160{
2161 struct vmw_resource_val_node *val, *val_next;
2162
2163 /*
2164 * Drop references to resources held during command submission.
2165 */
2166
2167 list_for_each_entry_safe(val, val_next, list, head) {
2168 list_del_init(&val->head);
2169 vmw_resource_unreference(&val->res);
2170 if (unlikely(val->staged_bindings))
2171 kfree(val->staged_bindings);
2172 kfree(val);
2173 }
2174}
2175
2176static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2177{
2178 struct vmw_validate_buffer *entry, *next;
2179 struct vmw_resource_val_node *val;
2180
2181 /*
2182 * Drop references to DMA buffers held during command submission.
2183 */
2184 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2185 base.head) {
2186 list_del(&entry->base.head);
2187 ttm_bo_unref(&entry->base.bo);
2188 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2189 sw_context->cur_val_buf--;
2190 }
2191 BUG_ON(sw_context->cur_val_buf != 0);
2192
2193 list_for_each_entry(val, &sw_context->resource_list, head)
2194 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2195}
2196
2197static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2198 struct ttm_buffer_object *bo,
2199 bool validate_as_mob)
2200{
2201 int ret;
2202
2203
2204 /*
2205 * Don't validate pinned buffers.
2206 */
2207
2208 if (bo == dev_priv->pinned_bo ||
2209 (bo == dev_priv->dummy_query_bo &&
2210 dev_priv->dummy_query_bo_pinned))
2211 return 0;
2212
2213 if (validate_as_mob)
2214 return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2215
2216 /**
2217 * Put BO in VRAM if there is space, otherwise as a GMR.
2218 * If there is no space in VRAM and GMR ids are all used up,
2219 * start evicting GMRs to make room. If the DMA buffer can't be
2220 * used as a GMR, this will return -ENOMEM.
2221 */
2222
2223 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2224 if (likely(ret == 0 || ret == -ERESTARTSYS))
2225 return ret;
2226
2227 /**
2228 * If that failed, try VRAM again, this time evicting
2229 * previous contents.
2230 */
2231
2232 DRM_INFO("Falling through to VRAM.\n");
2233 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2234 return ret;
2235}
2236
2237static int vmw_validate_buffers(struct vmw_private *dev_priv,
2238 struct vmw_sw_context *sw_context)
2239{
2240 struct vmw_validate_buffer *entry;
2241 int ret;
2242
2243 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2244 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2245 entry->validate_as_mob);
2246 if (unlikely(ret != 0))
2247 return ret;
2248 }
2249 return 0;
2250}
2251
2252static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2253 uint32_t size)
2254{
2255 if (likely(sw_context->cmd_bounce_size >= size))
2256 return 0;
2257
2258 if (sw_context->cmd_bounce_size == 0)
2259 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2260
2261 while (sw_context->cmd_bounce_size < size) {
2262 sw_context->cmd_bounce_size =
2263 PAGE_ALIGN(sw_context->cmd_bounce_size +
2264 (sw_context->cmd_bounce_size >> 1));
2265 }
2266
2267 if (sw_context->cmd_bounce != NULL)
2268 vfree(sw_context->cmd_bounce);
2269
2270 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2271
2272 if (sw_context->cmd_bounce == NULL) {
2273 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2274 sw_context->cmd_bounce_size = 0;
2275 return -ENOMEM;
2276 }
2277
2278 return 0;
2279}
2280
2281/**
2282 * vmw_execbuf_fence_commands - create and submit a command stream fence
2283 *
2284 * Creates a fence object and submits a command stream marker.
2285 * If this fails for some reason, We sync the fifo and return NULL.
2286 * It is then safe to fence buffers with a NULL pointer.
2287 *
2288 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2289 * a userspace handle if @p_handle is not NULL, otherwise not.
2290 */
2291
2292int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2293 struct vmw_private *dev_priv,
2294 struct vmw_fence_obj **p_fence,
2295 uint32_t *p_handle)
2296{
2297 uint32_t sequence;
2298 int ret;
2299 bool synced = false;
2300
2301 /* p_handle implies file_priv. */
2302 BUG_ON(p_handle != NULL && file_priv == NULL);
2303
2304 ret = vmw_fifo_send_fence(dev_priv, &sequence);
2305 if (unlikely(ret != 0)) {
2306 DRM_ERROR("Fence submission error. Syncing.\n");
2307 synced = true;
2308 }
2309
2310 if (p_handle != NULL)
2311 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2312 sequence,
2313 DRM_VMW_FENCE_FLAG_EXEC,
2314 p_fence, p_handle);
2315 else
2316 ret = vmw_fence_create(dev_priv->fman, sequence,
2317 DRM_VMW_FENCE_FLAG_EXEC,
2318 p_fence);
2319
2320 if (unlikely(ret != 0 && !synced)) {
2321 (void) vmw_fallback_wait(dev_priv, false, false,
2322 sequence, false,
2323 VMW_FENCE_WAIT_TIMEOUT);
2324 *p_fence = NULL;
2325 }
2326
2327 return 0;
2328}
2329
2330/**
2331 * vmw_execbuf_copy_fence_user - copy fence object information to
2332 * user-space.
2333 *
2334 * @dev_priv: Pointer to a vmw_private struct.
2335 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2336 * @ret: Return value from fence object creation.
2337 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2338 * which the information should be copied.
2339 * @fence: Pointer to the fenc object.
2340 * @fence_handle: User-space fence handle.
2341 *
2342 * This function copies fence information to user-space. If copying fails,
2343 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2344 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2345 * the error will hopefully be detected.
2346 * Also if copying fails, user-space will be unable to signal the fence
2347 * object so we wait for it immediately, and then unreference the
2348 * user-space reference.
2349 */
2350void
2351vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2352 struct vmw_fpriv *vmw_fp,
2353 int ret,
2354 struct drm_vmw_fence_rep __user *user_fence_rep,
2355 struct vmw_fence_obj *fence,
2356 uint32_t fence_handle)
2357{
2358 struct drm_vmw_fence_rep fence_rep;
2359
2360 if (user_fence_rep == NULL)
2361 return;
2362
2363 memset(&fence_rep, 0, sizeof(fence_rep));
2364
2365 fence_rep.error = ret;
2366 if (ret == 0) {
2367 BUG_ON(fence == NULL);
2368
2369 fence_rep.handle = fence_handle;
2370 fence_rep.seqno = fence->seqno;
2371 vmw_update_seqno(dev_priv, &dev_priv->fifo);
2372 fence_rep.passed_seqno = dev_priv->last_read_seqno;
2373 }
2374
2375 /*
2376 * copy_to_user errors will be detected by user space not
2377 * seeing fence_rep::error filled in. Typically
2378 * user-space would have pre-set that member to -EFAULT.
2379 */
2380 ret = copy_to_user(user_fence_rep, &fence_rep,
2381 sizeof(fence_rep));
2382
2383 /*
2384 * User-space lost the fence object. We need to sync
2385 * and unreference the handle.
2386 */
2387 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2388 ttm_ref_object_base_unref(vmw_fp->tfile,
2389 fence_handle, TTM_REF_USAGE);
2390 DRM_ERROR("Fence copy error. Syncing.\n");
2391 (void) vmw_fence_obj_wait(fence, fence->signal_mask,
2392 false, false,
2393 VMW_FENCE_WAIT_TIMEOUT);
2394 }
2395}
2396
2397int vmw_execbuf_process(struct drm_file *file_priv,
2398 struct vmw_private *dev_priv,
2399 void __user *user_commands,
2400 void *kernel_commands,
2401 uint32_t command_size,
2402 uint64_t throttle_us,
2403 struct drm_vmw_fence_rep __user *user_fence_rep,
2404 struct vmw_fence_obj **out_fence)
2405{
2406 struct vmw_sw_context *sw_context = &dev_priv->ctx;
2407 struct vmw_fence_obj *fence = NULL;
2408 struct vmw_resource *error_resource;
2409 struct list_head resource_list;
2410 struct ww_acquire_ctx ticket;
2411 uint32_t handle;
2412 void *cmd;
2413 int ret;
2414
2415 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2416 if (unlikely(ret != 0))
2417 return -ERESTARTSYS;
2418
2419 if (kernel_commands == NULL) {
2420 sw_context->kernel = false;
2421
2422 ret = vmw_resize_cmd_bounce(sw_context, command_size);
2423 if (unlikely(ret != 0))
2424 goto out_unlock;
2425
2426
2427 ret = copy_from_user(sw_context->cmd_bounce,
2428 user_commands, command_size);
2429
2430 if (unlikely(ret != 0)) {
2431 ret = -EFAULT;
2432 DRM_ERROR("Failed copying commands.\n");
2433 goto out_unlock;
2434 }
2435 kernel_commands = sw_context->cmd_bounce;
2436 } else
2437 sw_context->kernel = true;
2438
2439 sw_context->fp = vmw_fpriv(file_priv);
2440 sw_context->cur_reloc = 0;
2441 sw_context->cur_val_buf = 0;
2442 sw_context->fence_flags = 0;
2443 INIT_LIST_HEAD(&sw_context->resource_list);
2444 sw_context->cur_query_bo = dev_priv->pinned_bo;
2445 sw_context->last_query_ctx = NULL;
2446 sw_context->needs_post_query_barrier = false;
2447 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2448 INIT_LIST_HEAD(&sw_context->validate_nodes);
2449 INIT_LIST_HEAD(&sw_context->res_relocations);
2450 if (!sw_context->res_ht_initialized) {
2451 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2452 if (unlikely(ret != 0))
2453 goto out_unlock;
2454 sw_context->res_ht_initialized = true;
2455 }
2456 INIT_LIST_HEAD(&sw_context->staged_shaders);
2457
2458 INIT_LIST_HEAD(&resource_list);
2459 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2460 command_size);
2461 if (unlikely(ret != 0))
2462 goto out_err_nores;
2463
2464 ret = vmw_resources_reserve(sw_context);
2465 if (unlikely(ret != 0))
2466 goto out_err_nores;
2467
2468 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
2469 if (unlikely(ret != 0))
2470 goto out_err;
2471
2472 ret = vmw_validate_buffers(dev_priv, sw_context);
2473 if (unlikely(ret != 0))
2474 goto out_err;
2475
2476 ret = vmw_resources_validate(sw_context);
2477 if (unlikely(ret != 0))
2478 goto out_err;
2479
2480 if (throttle_us) {
2481 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2482 throttle_us);
2483
2484 if (unlikely(ret != 0))
2485 goto out_err;
2486 }
2487
2488 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2489 if (unlikely(ret != 0)) {
2490 ret = -ERESTARTSYS;
2491 goto out_err;
2492 }
2493
2494 if (dev_priv->has_mob) {
2495 ret = vmw_rebind_contexts(sw_context);
2496 if (unlikely(ret != 0))
2497 goto out_unlock_binding;
2498 }
2499
2500 cmd = vmw_fifo_reserve(dev_priv, command_size);
2501 if (unlikely(cmd == NULL)) {
2502 DRM_ERROR("Failed reserving fifo space for commands.\n");
2503 ret = -ENOMEM;
2504 goto out_unlock_binding;
2505 }
2506
2507 vmw_apply_relocations(sw_context);
2508 memcpy(cmd, kernel_commands, command_size);
2509
2510 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2511 vmw_resource_relocations_free(&sw_context->res_relocations);
2512
2513 vmw_fifo_commit(dev_priv, command_size);
2514
2515 vmw_query_bo_switch_commit(dev_priv, sw_context);
2516 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2517 &fence,
2518 (user_fence_rep) ? &handle : NULL);
2519 /*
2520 * This error is harmless, because if fence submission fails,
2521 * vmw_fifo_send_fence will sync. The error will be propagated to
2522 * user-space in @fence_rep
2523 */
2524
2525 if (ret != 0)
2526 DRM_ERROR("Fence submission error. Syncing.\n");
2527
2528 vmw_resource_list_unreserve(&sw_context->resource_list, false);
2529 mutex_unlock(&dev_priv->binding_mutex);
2530
2531 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2532 (void *) fence);
2533
2534 if (unlikely(dev_priv->pinned_bo != NULL &&
2535 !dev_priv->query_cid_valid))
2536 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
2537
2538 vmw_clear_validations(sw_context);
2539 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2540 user_fence_rep, fence, handle);
2541
2542 /* Don't unreference when handing fence out */
2543 if (unlikely(out_fence != NULL)) {
2544 *out_fence = fence;
2545 fence = NULL;
2546 } else if (likely(fence != NULL)) {
2547 vmw_fence_obj_unreference(&fence);
2548 }
2549
2550 list_splice_init(&sw_context->resource_list, &resource_list);
2551 vmw_compat_shaders_commit(sw_context->fp->shman,
2552 &sw_context->staged_shaders);
2553 mutex_unlock(&dev_priv->cmdbuf_mutex);
2554
2555 /*
2556 * Unreference resources outside of the cmdbuf_mutex to
2557 * avoid deadlocks in resource destruction paths.
2558 */
2559 vmw_resource_list_unreference(&resource_list);
2560
2561 return 0;
2562
2563out_unlock_binding:
2564 mutex_unlock(&dev_priv->binding_mutex);
2565out_err:
2566 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2567out_err_nores:
2568 vmw_resource_list_unreserve(&sw_context->resource_list, true);
2569 vmw_resource_relocations_free(&sw_context->res_relocations);
2570 vmw_free_relocations(sw_context);
2571 vmw_clear_validations(sw_context);
2572 if (unlikely(dev_priv->pinned_bo != NULL &&
2573 !dev_priv->query_cid_valid))
2574 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2575out_unlock:
2576 list_splice_init(&sw_context->resource_list, &resource_list);
2577 error_resource = sw_context->error_resource;
2578 sw_context->error_resource = NULL;
2579 vmw_compat_shaders_revert(sw_context->fp->shman,
2580 &sw_context->staged_shaders);
2581 mutex_unlock(&dev_priv->cmdbuf_mutex);
2582
2583 /*
2584 * Unreference resources outside of the cmdbuf_mutex to
2585 * avoid deadlocks in resource destruction paths.
2586 */
2587 vmw_resource_list_unreference(&resource_list);
2588 if (unlikely(error_resource != NULL))
2589 vmw_resource_unreference(&error_resource);
2590
2591 return ret;
2592}
2593
2594/**
2595 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2596 *
2597 * @dev_priv: The device private structure.
2598 *
2599 * This function is called to idle the fifo and unpin the query buffer
2600 * if the normal way to do this hits an error, which should typically be
2601 * extremely rare.
2602 */
2603static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2604{
2605 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2606
2607 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2608 vmw_bo_pin(dev_priv->pinned_bo, false);
2609 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2610 dev_priv->dummy_query_bo_pinned = false;
2611}
2612
2613
2614/**
2615 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2616 * query bo.
2617 *
2618 * @dev_priv: The device private structure.
2619 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2620 * _after_ a query barrier that flushes all queries touching the current
2621 * buffer pointed to by @dev_priv->pinned_bo
2622 *
2623 * This function should be used to unpin the pinned query bo, or
2624 * as a query barrier when we need to make sure that all queries have
2625 * finished before the next fifo command. (For example on hardware
2626 * context destructions where the hardware may otherwise leak unfinished
2627 * queries).
2628 *
2629 * This function does not return any failure codes, but make attempts
2630 * to do safe unpinning in case of errors.
2631 *
2632 * The function will synchronize on the previous query barrier, and will
2633 * thus not finish until that barrier has executed.
2634 *
2635 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2636 * before calling this function.
2637 */
2638void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2639 struct vmw_fence_obj *fence)
2640{
2641 int ret = 0;
2642 struct list_head validate_list;
2643 struct ttm_validate_buffer pinned_val, query_val;
2644 struct vmw_fence_obj *lfence = NULL;
2645 struct ww_acquire_ctx ticket;
2646
2647 if (dev_priv->pinned_bo == NULL)
2648 goto out_unlock;
2649
2650 INIT_LIST_HEAD(&validate_list);
2651
2652 pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2653 list_add_tail(&pinned_val.head, &validate_list);
2654
2655 query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2656 list_add_tail(&query_val.head, &validate_list);
2657
2658 do {
2659 ret = ttm_eu_reserve_buffers(&ticket, &validate_list);
2660 } while (ret == -ERESTARTSYS);
2661
2662 if (unlikely(ret != 0)) {
2663 vmw_execbuf_unpin_panic(dev_priv);
2664 goto out_no_reserve;
2665 }
2666
2667 if (dev_priv->query_cid_valid) {
2668 BUG_ON(fence != NULL);
2669 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2670 if (unlikely(ret != 0)) {
2671 vmw_execbuf_unpin_panic(dev_priv);
2672 goto out_no_emit;
2673 }
2674 dev_priv->query_cid_valid = false;
2675 }
2676
2677 vmw_bo_pin(dev_priv->pinned_bo, false);
2678 vmw_bo_pin(dev_priv->dummy_query_bo, false);
2679 dev_priv->dummy_query_bo_pinned = false;
2680
2681 if (fence == NULL) {
2682 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2683 NULL);
2684 fence = lfence;
2685 }
2686 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2687 if (lfence != NULL)
2688 vmw_fence_obj_unreference(&lfence);
2689
2690 ttm_bo_unref(&query_val.bo);
2691 ttm_bo_unref(&pinned_val.bo);
2692 ttm_bo_unref(&dev_priv->pinned_bo);
2693
2694out_unlock:
2695 return;
2696
2697out_no_emit:
2698 ttm_eu_backoff_reservation(&ticket, &validate_list);
2699out_no_reserve:
2700 ttm_bo_unref(&query_val.bo);
2701 ttm_bo_unref(&pinned_val.bo);
2702 ttm_bo_unref(&dev_priv->pinned_bo);
2703}
2704
2705/**
2706 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2707 * query bo.
2708 *
2709 * @dev_priv: The device private structure.
2710 *
2711 * This function should be used to unpin the pinned query bo, or
2712 * as a query barrier when we need to make sure that all queries have
2713 * finished before the next fifo command. (For example on hardware
2714 * context destructions where the hardware may otherwise leak unfinished
2715 * queries).
2716 *
2717 * This function does not return any failure codes, but make attempts
2718 * to do safe unpinning in case of errors.
2719 *
2720 * The function will synchronize on the previous query barrier, and will
2721 * thus not finish until that barrier has executed.
2722 */
2723void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2724{
2725 mutex_lock(&dev_priv->cmdbuf_mutex);
2726 if (dev_priv->query_cid_valid)
2727 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2728 mutex_unlock(&dev_priv->cmdbuf_mutex);
2729}
2730
2731
2732int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2733 struct drm_file *file_priv)
2734{
2735 struct vmw_private *dev_priv = vmw_priv(dev);
2736 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2737 int ret;
2738
2739 /*
2740 * This will allow us to extend the ioctl argument while
2741 * maintaining backwards compatibility:
2742 * We take different code paths depending on the value of
2743 * arg->version.
2744 */
2745
2746 if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2747 DRM_ERROR("Incorrect execbuf version.\n");
2748 DRM_ERROR("You're running outdated experimental "
2749 "vmwgfx user-space drivers.");
2750 return -EINVAL;
2751 }
2752
2753 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2754 if (unlikely(ret != 0))
2755 return ret;
2756
2757 ret = vmw_execbuf_process(file_priv, dev_priv,
2758 (void __user *)(unsigned long)arg->commands,
2759 NULL, arg->command_size, arg->throttle_us,
2760 (void __user *)(unsigned long)arg->fence_rep,
2761 NULL);
2762
2763 if (unlikely(ret != 0))
2764 goto out_unlock;
2765
2766 vmw_kms_cursor_post_execbuf(dev_priv);
2767
2768out_unlock:
2769 ttm_read_unlock(&dev_priv->reservation_sem);
2770 return ret;
2771}