Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include <drm/ttm/ttm_bo_api.h>
31#include <drm/ttm/ttm_placement.h>
32#include "vmwgfx_so.h"
33#include "vmwgfx_binding.h"
34
35#define VMW_RES_HT_ORDER 12
36
37/**
38 * struct vmw_resource_relocation - Relocation info for resources
39 *
40 * @head: List head for the software context's relocation list.
41 * @res: Non-ref-counted pointer to the resource.
42 * @offset: Offset of 4 byte entries into the command buffer where the
43 * id that needs fixup is located.
44 */
45struct vmw_resource_relocation {
46 struct list_head head;
47 const struct vmw_resource *res;
48 unsigned long offset;
49};
50
51/**
52 * struct vmw_resource_val_node - Validation info for resources
53 *
54 * @head: List head for the software context's resource list.
55 * @hash: Hash entry for quick resouce to val_node lookup.
56 * @res: Ref-counted pointer to the resource.
57 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
58 * @new_backup: Refcounted pointer to the new backup buffer.
59 * @staged_bindings: If @res is a context, tracks bindings set up during
60 * the command batch. Otherwise NULL.
61 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
62 * @first_usage: Set to true the first time the resource is referenced in
63 * the command stream.
64 * @switching_backup: The command stream provides a new backup buffer for a
65 * resource.
66 * @no_buffer_needed: This means @switching_backup is true on first buffer
67 * reference. So resource reservation does not need to allocate a backup
68 * buffer for the resource.
69 */
70struct vmw_resource_val_node {
71 struct list_head head;
72 struct drm_hash_item hash;
73 struct vmw_resource *res;
74 struct vmw_dma_buffer *new_backup;
75 struct vmw_ctx_binding_state *staged_bindings;
76 unsigned long new_backup_offset;
77 u32 first_usage : 1;
78 u32 switching_backup : 1;
79 u32 no_buffer_needed : 1;
80};
81
82/**
83 * struct vmw_cmd_entry - Describe a command for the verifier
84 *
85 * @user_allow: Whether allowed from the execbuf ioctl.
86 * @gb_disable: Whether disabled if guest-backed objects are available.
87 * @gb_enable: Whether enabled iff guest-backed objects are available.
88 */
89struct vmw_cmd_entry {
90 int (*func) (struct vmw_private *, struct vmw_sw_context *,
91 SVGA3dCmdHeader *);
92 bool user_allow;
93 bool gb_disable;
94 bool gb_enable;
95};
96
97#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
98 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
99 (_gb_disable), (_gb_enable)}
100
101static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
102 struct vmw_sw_context *sw_context,
103 struct vmw_resource *ctx);
104static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
105 struct vmw_sw_context *sw_context,
106 SVGAMobId *id,
107 struct vmw_dma_buffer **vmw_bo_p);
108static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
109 struct vmw_dma_buffer *vbo,
110 bool validate_as_mob,
111 uint32_t *p_val_node);
112
113
114/**
115 * vmw_resources_unreserve - unreserve resources previously reserved for
116 * command submission.
117 *
118 * @sw_context: pointer to the software context
119 * @backoff: Whether command submission failed.
120 */
121static void vmw_resources_unreserve(struct vmw_sw_context *sw_context,
122 bool backoff)
123{
124 struct vmw_resource_val_node *val;
125 struct list_head *list = &sw_context->resource_list;
126
127 if (sw_context->dx_query_mob && !backoff)
128 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
129 sw_context->dx_query_mob);
130
131 list_for_each_entry(val, list, head) {
132 struct vmw_resource *res = val->res;
133 bool switch_backup =
134 (backoff) ? false : val->switching_backup;
135
136 /*
137 * Transfer staged context bindings to the
138 * persistent context binding tracker.
139 */
140 if (unlikely(val->staged_bindings)) {
141 if (!backoff) {
142 vmw_binding_state_commit
143 (vmw_context_binding_state(val->res),
144 val->staged_bindings);
145 }
146
147 if (val->staged_bindings != sw_context->staged_bindings)
148 vmw_binding_state_free(val->staged_bindings);
149 else
150 sw_context->staged_bindings_inuse = false;
151 val->staged_bindings = NULL;
152 }
153 vmw_resource_unreserve(res, switch_backup, val->new_backup,
154 val->new_backup_offset);
155 vmw_dmabuf_unreference(&val->new_backup);
156 }
157}
158
159/**
160 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
161 * added to the validate list.
162 *
163 * @dev_priv: Pointer to the device private:
164 * @sw_context: The validation context:
165 * @node: The validation node holding this context.
166 */
167static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
168 struct vmw_sw_context *sw_context,
169 struct vmw_resource_val_node *node)
170{
171 int ret;
172
173 ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res);
174 if (unlikely(ret != 0))
175 goto out_err;
176
177 if (!sw_context->staged_bindings) {
178 sw_context->staged_bindings =
179 vmw_binding_state_alloc(dev_priv);
180 if (IS_ERR(sw_context->staged_bindings)) {
181 DRM_ERROR("Failed to allocate context binding "
182 "information.\n");
183 ret = PTR_ERR(sw_context->staged_bindings);
184 sw_context->staged_bindings = NULL;
185 goto out_err;
186 }
187 }
188
189 if (sw_context->staged_bindings_inuse) {
190 node->staged_bindings = vmw_binding_state_alloc(dev_priv);
191 if (IS_ERR(node->staged_bindings)) {
192 DRM_ERROR("Failed to allocate context binding "
193 "information.\n");
194 ret = PTR_ERR(node->staged_bindings);
195 node->staged_bindings = NULL;
196 goto out_err;
197 }
198 } else {
199 node->staged_bindings = sw_context->staged_bindings;
200 sw_context->staged_bindings_inuse = true;
201 }
202
203 return 0;
204out_err:
205 return ret;
206}
207
208/**
209 * vmw_resource_val_add - Add a resource to the software context's
210 * resource list if it's not already on it.
211 *
212 * @sw_context: Pointer to the software context.
213 * @res: Pointer to the resource.
214 * @p_node On successful return points to a valid pointer to a
215 * struct vmw_resource_val_node, if non-NULL on entry.
216 */
217static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
218 struct vmw_resource *res,
219 struct vmw_resource_val_node **p_node)
220{
221 struct vmw_private *dev_priv = res->dev_priv;
222 struct vmw_resource_val_node *node;
223 struct drm_hash_item *hash;
224 int ret;
225
226 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
227 &hash) == 0)) {
228 node = container_of(hash, struct vmw_resource_val_node, hash);
229 node->first_usage = false;
230 if (unlikely(p_node != NULL))
231 *p_node = node;
232 return 0;
233 }
234
235 node = kzalloc(sizeof(*node), GFP_KERNEL);
236 if (unlikely(node == NULL)) {
237 DRM_ERROR("Failed to allocate a resource validation "
238 "entry.\n");
239 return -ENOMEM;
240 }
241
242 node->hash.key = (unsigned long) res;
243 ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
244 if (unlikely(ret != 0)) {
245 DRM_ERROR("Failed to initialize a resource validation "
246 "entry.\n");
247 kfree(node);
248 return ret;
249 }
250 node->res = vmw_resource_reference(res);
251 node->first_usage = true;
252 if (unlikely(p_node != NULL))
253 *p_node = node;
254
255 if (!dev_priv->has_mob) {
256 list_add_tail(&node->head, &sw_context->resource_list);
257 return 0;
258 }
259
260 switch (vmw_res_type(res)) {
261 case vmw_res_context:
262 case vmw_res_dx_context:
263 list_add(&node->head, &sw_context->ctx_resource_list);
264 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node);
265 break;
266 case vmw_res_cotable:
267 list_add_tail(&node->head, &sw_context->ctx_resource_list);
268 break;
269 default:
270 list_add_tail(&node->head, &sw_context->resource_list);
271 break;
272 }
273
274 return ret;
275}
276
277/**
278 * vmw_view_res_val_add - Add a view and the surface it's pointing to
279 * to the validation list
280 *
281 * @sw_context: The software context holding the validation list.
282 * @view: Pointer to the view resource.
283 *
284 * Returns 0 if success, negative error code otherwise.
285 */
286static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
287 struct vmw_resource *view)
288{
289 int ret;
290
291 /*
292 * First add the resource the view is pointing to, otherwise
293 * it may be swapped out when the view is validated.
294 */
295 ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL);
296 if (ret)
297 return ret;
298
299 return vmw_resource_val_add(sw_context, view, NULL);
300}
301
302/**
303 * vmw_view_id_val_add - Look up a view and add it and the surface it's
304 * pointing to to the validation list.
305 *
306 * @sw_context: The software context holding the validation list.
307 * @view_type: The view type to look up.
308 * @id: view id of the view.
309 *
310 * The view is represented by a view id and the DX context it's created on,
311 * or scheduled for creation on. If there is no DX context set, the function
312 * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure.
313 */
314static int vmw_view_id_val_add(struct vmw_sw_context *sw_context,
315 enum vmw_view_type view_type, u32 id)
316{
317 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
318 struct vmw_resource *view;
319 int ret;
320
321 if (!ctx_node) {
322 DRM_ERROR("DX Context not set.\n");
323 return -EINVAL;
324 }
325
326 view = vmw_view_lookup(sw_context->man, view_type, id);
327 if (IS_ERR(view))
328 return PTR_ERR(view);
329
330 ret = vmw_view_res_val_add(sw_context, view);
331 vmw_resource_unreference(&view);
332
333 return ret;
334}
335
336/**
337 * vmw_resource_context_res_add - Put resources previously bound to a context on
338 * the validation list
339 *
340 * @dev_priv: Pointer to a device private structure
341 * @sw_context: Pointer to a software context used for this command submission
342 * @ctx: Pointer to the context resource
343 *
344 * This function puts all resources that were previously bound to @ctx on
345 * the resource validation list. This is part of the context state reemission
346 */
347static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
348 struct vmw_sw_context *sw_context,
349 struct vmw_resource *ctx)
350{
351 struct list_head *binding_list;
352 struct vmw_ctx_bindinfo *entry;
353 int ret = 0;
354 struct vmw_resource *res;
355 u32 i;
356
357 /* Add all cotables to the validation list. */
358 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
359 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
360 res = vmw_context_cotable(ctx, i);
361 if (IS_ERR(res))
362 continue;
363
364 ret = vmw_resource_val_add(sw_context, res, NULL);
365 vmw_resource_unreference(&res);
366 if (unlikely(ret != 0))
367 return ret;
368 }
369 }
370
371
372 /* Add all resources bound to the context to the validation list */
373 mutex_lock(&dev_priv->binding_mutex);
374 binding_list = vmw_context_binding_list(ctx);
375
376 list_for_each_entry(entry, binding_list, ctx_list) {
377 /* entry->res is not refcounted */
378 res = vmw_resource_reference_unless_doomed(entry->res);
379 if (unlikely(res == NULL))
380 continue;
381
382 if (vmw_res_type(entry->res) == vmw_res_view)
383 ret = vmw_view_res_val_add(sw_context, entry->res);
384 else
385 ret = vmw_resource_val_add(sw_context, entry->res,
386 NULL);
387 vmw_resource_unreference(&res);
388 if (unlikely(ret != 0))
389 break;
390 }
391
392 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
393 struct vmw_dma_buffer *dx_query_mob;
394
395 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
396 if (dx_query_mob)
397 ret = vmw_bo_to_validate_list(sw_context,
398 dx_query_mob,
399 true, NULL);
400 }
401
402 mutex_unlock(&dev_priv->binding_mutex);
403 return ret;
404}
405
406/**
407 * vmw_resource_relocation_add - Add a relocation to the relocation list
408 *
409 * @list: Pointer to head of relocation list.
410 * @res: The resource.
411 * @offset: Offset into the command buffer currently being parsed where the
412 * id that needs fixup is located. Granularity is 4 bytes.
413 */
414static int vmw_resource_relocation_add(struct list_head *list,
415 const struct vmw_resource *res,
416 unsigned long offset)
417{
418 struct vmw_resource_relocation *rel;
419
420 rel = kmalloc(sizeof(*rel), GFP_KERNEL);
421 if (unlikely(rel == NULL)) {
422 DRM_ERROR("Failed to allocate a resource relocation.\n");
423 return -ENOMEM;
424 }
425
426 rel->res = res;
427 rel->offset = offset;
428 list_add_tail(&rel->head, list);
429
430 return 0;
431}
432
433/**
434 * vmw_resource_relocations_free - Free all relocations on a list
435 *
436 * @list: Pointer to the head of the relocation list.
437 */
438static void vmw_resource_relocations_free(struct list_head *list)
439{
440 struct vmw_resource_relocation *rel, *n;
441
442 list_for_each_entry_safe(rel, n, list, head) {
443 list_del(&rel->head);
444 kfree(rel);
445 }
446}
447
448/**
449 * vmw_resource_relocations_apply - Apply all relocations on a list
450 *
451 * @cb: Pointer to the start of the command buffer bein patch. This need
452 * not be the same buffer as the one being parsed when the relocation
453 * list was built, but the contents must be the same modulo the
454 * resource ids.
455 * @list: Pointer to the head of the relocation list.
456 */
457static void vmw_resource_relocations_apply(uint32_t *cb,
458 struct list_head *list)
459{
460 struct vmw_resource_relocation *rel;
461
462 list_for_each_entry(rel, list, head) {
463 if (likely(rel->res != NULL))
464 cb[rel->offset] = rel->res->id;
465 else
466 cb[rel->offset] = SVGA_3D_CMD_NOP;
467 }
468}
469
470static int vmw_cmd_invalid(struct vmw_private *dev_priv,
471 struct vmw_sw_context *sw_context,
472 SVGA3dCmdHeader *header)
473{
474 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
475}
476
477static int vmw_cmd_ok(struct vmw_private *dev_priv,
478 struct vmw_sw_context *sw_context,
479 SVGA3dCmdHeader *header)
480{
481 return 0;
482}
483
484/**
485 * vmw_bo_to_validate_list - add a bo to a validate list
486 *
487 * @sw_context: The software context used for this command submission batch.
488 * @bo: The buffer object to add.
489 * @validate_as_mob: Validate this buffer as a MOB.
490 * @p_val_node: If non-NULL Will be updated with the validate node number
491 * on return.
492 *
493 * Returns -EINVAL if the limit of number of buffer objects per command
494 * submission is reached.
495 */
496static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
497 struct vmw_dma_buffer *vbo,
498 bool validate_as_mob,
499 uint32_t *p_val_node)
500{
501 uint32_t val_node;
502 struct vmw_validate_buffer *vval_buf;
503 struct ttm_validate_buffer *val_buf;
504 struct drm_hash_item *hash;
505 int ret;
506
507 if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) vbo,
508 &hash) == 0)) {
509 vval_buf = container_of(hash, struct vmw_validate_buffer,
510 hash);
511 if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
512 DRM_ERROR("Inconsistent buffer usage.\n");
513 return -EINVAL;
514 }
515 val_buf = &vval_buf->base;
516 val_node = vval_buf - sw_context->val_bufs;
517 } else {
518 val_node = sw_context->cur_val_buf;
519 if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
520 DRM_ERROR("Max number of DMA buffers per submission "
521 "exceeded.\n");
522 return -EINVAL;
523 }
524 vval_buf = &sw_context->val_bufs[val_node];
525 vval_buf->hash.key = (unsigned long) vbo;
526 ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
527 if (unlikely(ret != 0)) {
528 DRM_ERROR("Failed to initialize a buffer validation "
529 "entry.\n");
530 return ret;
531 }
532 ++sw_context->cur_val_buf;
533 val_buf = &vval_buf->base;
534 val_buf->bo = ttm_bo_reference(&vbo->base);
535 val_buf->shared = false;
536 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
537 vval_buf->validate_as_mob = validate_as_mob;
538 }
539
540 if (p_val_node)
541 *p_val_node = val_node;
542
543 return 0;
544}
545
546/**
547 * vmw_resources_reserve - Reserve all resources on the sw_context's
548 * resource list.
549 *
550 * @sw_context: Pointer to the software context.
551 *
552 * Note that since vmware's command submission currently is protected by
553 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
554 * since only a single thread at once will attempt this.
555 */
556static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
557{
558 struct vmw_resource_val_node *val;
559 int ret = 0;
560
561 list_for_each_entry(val, &sw_context->resource_list, head) {
562 struct vmw_resource *res = val->res;
563
564 ret = vmw_resource_reserve(res, true, val->no_buffer_needed);
565 if (unlikely(ret != 0))
566 return ret;
567
568 if (res->backup) {
569 struct vmw_dma_buffer *vbo = res->backup;
570
571 ret = vmw_bo_to_validate_list
572 (sw_context, vbo,
573 vmw_resource_needs_backup(res), NULL);
574
575 if (unlikely(ret != 0))
576 return ret;
577 }
578 }
579
580 if (sw_context->dx_query_mob) {
581 struct vmw_dma_buffer *expected_dx_query_mob;
582
583 expected_dx_query_mob =
584 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
585 if (expected_dx_query_mob &&
586 expected_dx_query_mob != sw_context->dx_query_mob) {
587 ret = -EINVAL;
588 }
589 }
590
591 return ret;
592}
593
594/**
595 * vmw_resources_validate - Validate all resources on the sw_context's
596 * resource list.
597 *
598 * @sw_context: Pointer to the software context.
599 *
600 * Before this function is called, all resource backup buffers must have
601 * been validated.
602 */
603static int vmw_resources_validate(struct vmw_sw_context *sw_context)
604{
605 struct vmw_resource_val_node *val;
606 int ret;
607
608 list_for_each_entry(val, &sw_context->resource_list, head) {
609 struct vmw_resource *res = val->res;
610 struct vmw_dma_buffer *backup = res->backup;
611
612 ret = vmw_resource_validate(res);
613 if (unlikely(ret != 0)) {
614 if (ret != -ERESTARTSYS)
615 DRM_ERROR("Failed to validate resource.\n");
616 return ret;
617 }
618
619 /* Check if the resource switched backup buffer */
620 if (backup && res->backup && (backup != res->backup)) {
621 struct vmw_dma_buffer *vbo = res->backup;
622
623 ret = vmw_bo_to_validate_list
624 (sw_context, vbo,
625 vmw_resource_needs_backup(res), NULL);
626 if (ret) {
627 ttm_bo_unreserve(&vbo->base);
628 return ret;
629 }
630 }
631 }
632 return 0;
633}
634
635/**
636 * vmw_cmd_res_reloc_add - Add a resource to a software context's
637 * relocation- and validation lists.
638 *
639 * @dev_priv: Pointer to a struct vmw_private identifying the device.
640 * @sw_context: Pointer to the software context.
641 * @id_loc: Pointer to where the id that needs translation is located.
642 * @res: Valid pointer to a struct vmw_resource.
643 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
644 * used for this resource is returned here.
645 */
646static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
647 struct vmw_sw_context *sw_context,
648 uint32_t *id_loc,
649 struct vmw_resource *res,
650 struct vmw_resource_val_node **p_val)
651{
652 int ret;
653 struct vmw_resource_val_node *node;
654
655 *p_val = NULL;
656 ret = vmw_resource_relocation_add(&sw_context->res_relocations,
657 res,
658 id_loc - sw_context->buf_start);
659 if (unlikely(ret != 0))
660 return ret;
661
662 ret = vmw_resource_val_add(sw_context, res, &node);
663 if (unlikely(ret != 0))
664 return ret;
665
666 if (p_val)
667 *p_val = node;
668
669 return 0;
670}
671
672
673/**
674 * vmw_cmd_res_check - Check that a resource is present and if so, put it
675 * on the resource validate list unless it's already there.
676 *
677 * @dev_priv: Pointer to a device private structure.
678 * @sw_context: Pointer to the software context.
679 * @res_type: Resource type.
680 * @converter: User-space visisble type specific information.
681 * @id_loc: Pointer to the location in the command buffer currently being
682 * parsed from where the user-space resource id handle is located.
683 * @p_val: Pointer to pointer to resource validalidation node. Populated
684 * on exit.
685 */
686static int
687vmw_cmd_res_check(struct vmw_private *dev_priv,
688 struct vmw_sw_context *sw_context,
689 enum vmw_res_type res_type,
690 const struct vmw_user_resource_conv *converter,
691 uint32_t *id_loc,
692 struct vmw_resource_val_node **p_val)
693{
694 struct vmw_res_cache_entry *rcache =
695 &sw_context->res_cache[res_type];
696 struct vmw_resource *res;
697 struct vmw_resource_val_node *node;
698 int ret;
699
700 if (*id_loc == SVGA3D_INVALID_ID) {
701 if (p_val)
702 *p_val = NULL;
703 if (res_type == vmw_res_context) {
704 DRM_ERROR("Illegal context invalid id.\n");
705 return -EINVAL;
706 }
707 return 0;
708 }
709
710 /*
711 * Fastpath in case of repeated commands referencing the same
712 * resource
713 */
714
715 if (likely(rcache->valid && *id_loc == rcache->handle)) {
716 const struct vmw_resource *res = rcache->res;
717
718 rcache->node->first_usage = false;
719 if (p_val)
720 *p_val = rcache->node;
721
722 return vmw_resource_relocation_add
723 (&sw_context->res_relocations, res,
724 id_loc - sw_context->buf_start);
725 }
726
727 ret = vmw_user_resource_lookup_handle(dev_priv,
728 sw_context->fp->tfile,
729 *id_loc,
730 converter,
731 &res);
732 if (unlikely(ret != 0)) {
733 DRM_ERROR("Could not find or use resource 0x%08x.\n",
734 (unsigned) *id_loc);
735 dump_stack();
736 return ret;
737 }
738
739 rcache->valid = true;
740 rcache->res = res;
741 rcache->handle = *id_loc;
742
743 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc,
744 res, &node);
745 if (unlikely(ret != 0))
746 goto out_no_reloc;
747
748 rcache->node = node;
749 if (p_val)
750 *p_val = node;
751 vmw_resource_unreference(&res);
752 return 0;
753
754out_no_reloc:
755 BUG_ON(sw_context->error_resource != NULL);
756 sw_context->error_resource = res;
757
758 return ret;
759}
760
761/**
762 * vmw_rebind_dx_query - Rebind DX query associated with the context
763 *
764 * @ctx_res: context the query belongs to
765 *
766 * This function assumes binding_mutex is held.
767 */
768static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
769{
770 struct vmw_private *dev_priv = ctx_res->dev_priv;
771 struct vmw_dma_buffer *dx_query_mob;
772 struct {
773 SVGA3dCmdHeader header;
774 SVGA3dCmdDXBindAllQuery body;
775 } *cmd;
776
777
778 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
779
780 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
781 return 0;
782
783 cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
784
785 if (cmd == NULL) {
786 DRM_ERROR("Failed to rebind queries.\n");
787 return -ENOMEM;
788 }
789
790 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
791 cmd->header.size = sizeof(cmd->body);
792 cmd->body.cid = ctx_res->id;
793 cmd->body.mobid = dx_query_mob->base.mem.start;
794 vmw_fifo_commit(dev_priv, sizeof(*cmd));
795
796 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
797
798 return 0;
799}
800
801/**
802 * vmw_rebind_contexts - Rebind all resources previously bound to
803 * referenced contexts.
804 *
805 * @sw_context: Pointer to the software context.
806 *
807 * Rebind context binding points that have been scrubbed because of eviction.
808 */
809static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
810{
811 struct vmw_resource_val_node *val;
812 int ret;
813
814 list_for_each_entry(val, &sw_context->resource_list, head) {
815 if (unlikely(!val->staged_bindings))
816 break;
817
818 ret = vmw_binding_rebind_all
819 (vmw_context_binding_state(val->res));
820 if (unlikely(ret != 0)) {
821 if (ret != -ERESTARTSYS)
822 DRM_ERROR("Failed to rebind context.\n");
823 return ret;
824 }
825
826 ret = vmw_rebind_all_dx_query(val->res);
827 if (ret != 0)
828 return ret;
829 }
830
831 return 0;
832}
833
834/**
835 * vmw_view_bindings_add - Add an array of view bindings to a context
836 * binding state tracker.
837 *
838 * @sw_context: The execbuf state used for this command.
839 * @view_type: View type for the bindings.
840 * @binding_type: Binding type for the bindings.
841 * @shader_slot: The shader slot to user for the bindings.
842 * @view_ids: Array of view ids to be bound.
843 * @num_views: Number of view ids in @view_ids.
844 * @first_slot: The binding slot to be used for the first view id in @view_ids.
845 */
846static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
847 enum vmw_view_type view_type,
848 enum vmw_ctx_binding_type binding_type,
849 uint32 shader_slot,
850 uint32 view_ids[], u32 num_views,
851 u32 first_slot)
852{
853 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
854 struct vmw_cmdbuf_res_manager *man;
855 u32 i;
856 int ret;
857
858 if (!ctx_node) {
859 DRM_ERROR("DX Context not set.\n");
860 return -EINVAL;
861 }
862
863 man = sw_context->man;
864 for (i = 0; i < num_views; ++i) {
865 struct vmw_ctx_bindinfo_view binding;
866 struct vmw_resource *view = NULL;
867
868 if (view_ids[i] != SVGA3D_INVALID_ID) {
869 view = vmw_view_lookup(man, view_type, view_ids[i]);
870 if (IS_ERR(view)) {
871 DRM_ERROR("View not found.\n");
872 return PTR_ERR(view);
873 }
874
875 ret = vmw_view_res_val_add(sw_context, view);
876 if (ret) {
877 DRM_ERROR("Could not add view to "
878 "validation list.\n");
879 vmw_resource_unreference(&view);
880 return ret;
881 }
882 }
883 binding.bi.ctx = ctx_node->res;
884 binding.bi.res = view;
885 binding.bi.bt = binding_type;
886 binding.shader_slot = shader_slot;
887 binding.slot = first_slot + i;
888 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
889 shader_slot, binding.slot);
890 if (view)
891 vmw_resource_unreference(&view);
892 }
893
894 return 0;
895}
896
897/**
898 * vmw_cmd_cid_check - Check a command header for valid context information.
899 *
900 * @dev_priv: Pointer to a device private structure.
901 * @sw_context: Pointer to the software context.
902 * @header: A command header with an embedded user-space context handle.
903 *
904 * Convenience function: Call vmw_cmd_res_check with the user-space context
905 * handle embedded in @header.
906 */
907static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
908 struct vmw_sw_context *sw_context,
909 SVGA3dCmdHeader *header)
910{
911 struct vmw_cid_cmd {
912 SVGA3dCmdHeader header;
913 uint32_t cid;
914 } *cmd;
915
916 cmd = container_of(header, struct vmw_cid_cmd, header);
917 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
918 user_context_converter, &cmd->cid, NULL);
919}
920
921static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
922 struct vmw_sw_context *sw_context,
923 SVGA3dCmdHeader *header)
924{
925 struct vmw_sid_cmd {
926 SVGA3dCmdHeader header;
927 SVGA3dCmdSetRenderTarget body;
928 } *cmd;
929 struct vmw_resource_val_node *ctx_node;
930 struct vmw_resource_val_node *res_node;
931 int ret;
932
933 cmd = container_of(header, struct vmw_sid_cmd, header);
934
935 if (cmd->body.type >= SVGA3D_RT_MAX) {
936 DRM_ERROR("Illegal render target type %u.\n",
937 (unsigned) cmd->body.type);
938 return -EINVAL;
939 }
940
941 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
942 user_context_converter, &cmd->body.cid,
943 &ctx_node);
944 if (unlikely(ret != 0))
945 return ret;
946
947 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
948 user_surface_converter,
949 &cmd->body.target.sid, &res_node);
950 if (unlikely(ret != 0))
951 return ret;
952
953 if (dev_priv->has_mob) {
954 struct vmw_ctx_bindinfo_view binding;
955
956 binding.bi.ctx = ctx_node->res;
957 binding.bi.res = res_node ? res_node->res : NULL;
958 binding.bi.bt = vmw_ctx_binding_rt;
959 binding.slot = cmd->body.type;
960 vmw_binding_add(ctx_node->staged_bindings,
961 &binding.bi, 0, binding.slot);
962 }
963
964 return 0;
965}
966
967static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
968 struct vmw_sw_context *sw_context,
969 SVGA3dCmdHeader *header)
970{
971 struct vmw_sid_cmd {
972 SVGA3dCmdHeader header;
973 SVGA3dCmdSurfaceCopy body;
974 } *cmd;
975 int ret;
976
977 cmd = container_of(header, struct vmw_sid_cmd, header);
978
979 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
980 user_surface_converter,
981 &cmd->body.src.sid, NULL);
982 if (ret)
983 return ret;
984
985 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
986 user_surface_converter,
987 &cmd->body.dest.sid, NULL);
988}
989
990static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
991 struct vmw_sw_context *sw_context,
992 SVGA3dCmdHeader *header)
993{
994 struct {
995 SVGA3dCmdHeader header;
996 SVGA3dCmdDXBufferCopy body;
997 } *cmd;
998 int ret;
999
1000 cmd = container_of(header, typeof(*cmd), header);
1001 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1002 user_surface_converter,
1003 &cmd->body.src, NULL);
1004 if (ret != 0)
1005 return ret;
1006
1007 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 user_surface_converter,
1009 &cmd->body.dest, NULL);
1010}
1011
1012static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
1013 struct vmw_sw_context *sw_context,
1014 SVGA3dCmdHeader *header)
1015{
1016 struct {
1017 SVGA3dCmdHeader header;
1018 SVGA3dCmdDXPredCopyRegion body;
1019 } *cmd;
1020 int ret;
1021
1022 cmd = container_of(header, typeof(*cmd), header);
1023 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1024 user_surface_converter,
1025 &cmd->body.srcSid, NULL);
1026 if (ret != 0)
1027 return ret;
1028
1029 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1030 user_surface_converter,
1031 &cmd->body.dstSid, NULL);
1032}
1033
1034static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
1035 struct vmw_sw_context *sw_context,
1036 SVGA3dCmdHeader *header)
1037{
1038 struct vmw_sid_cmd {
1039 SVGA3dCmdHeader header;
1040 SVGA3dCmdSurfaceStretchBlt body;
1041 } *cmd;
1042 int ret;
1043
1044 cmd = container_of(header, struct vmw_sid_cmd, header);
1045 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1046 user_surface_converter,
1047 &cmd->body.src.sid, NULL);
1048 if (unlikely(ret != 0))
1049 return ret;
1050 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1051 user_surface_converter,
1052 &cmd->body.dest.sid, NULL);
1053}
1054
1055static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1056 struct vmw_sw_context *sw_context,
1057 SVGA3dCmdHeader *header)
1058{
1059 struct vmw_sid_cmd {
1060 SVGA3dCmdHeader header;
1061 SVGA3dCmdBlitSurfaceToScreen body;
1062 } *cmd;
1063
1064 cmd = container_of(header, struct vmw_sid_cmd, header);
1065
1066 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1067 user_surface_converter,
1068 &cmd->body.srcImage.sid, NULL);
1069}
1070
1071static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1072 struct vmw_sw_context *sw_context,
1073 SVGA3dCmdHeader *header)
1074{
1075 struct vmw_sid_cmd {
1076 SVGA3dCmdHeader header;
1077 SVGA3dCmdPresent body;
1078 } *cmd;
1079
1080
1081 cmd = container_of(header, struct vmw_sid_cmd, header);
1082
1083 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1084 user_surface_converter, &cmd->body.sid,
1085 NULL);
1086}
1087
1088/**
1089 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1090 *
1091 * @dev_priv: The device private structure.
1092 * @new_query_bo: The new buffer holding query results.
1093 * @sw_context: The software context used for this command submission.
1094 *
1095 * This function checks whether @new_query_bo is suitable for holding
1096 * query results, and if another buffer currently is pinned for query
1097 * results. If so, the function prepares the state of @sw_context for
1098 * switching pinned buffers after successful submission of the current
1099 * command batch.
1100 */
1101static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1102 struct vmw_dma_buffer *new_query_bo,
1103 struct vmw_sw_context *sw_context)
1104{
1105 struct vmw_res_cache_entry *ctx_entry =
1106 &sw_context->res_cache[vmw_res_context];
1107 int ret;
1108
1109 BUG_ON(!ctx_entry->valid);
1110 sw_context->last_query_ctx = ctx_entry->res;
1111
1112 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1113
1114 if (unlikely(new_query_bo->base.num_pages > 4)) {
1115 DRM_ERROR("Query buffer too large.\n");
1116 return -EINVAL;
1117 }
1118
1119 if (unlikely(sw_context->cur_query_bo != NULL)) {
1120 sw_context->needs_post_query_barrier = true;
1121 ret = vmw_bo_to_validate_list(sw_context,
1122 sw_context->cur_query_bo,
1123 dev_priv->has_mob, NULL);
1124 if (unlikely(ret != 0))
1125 return ret;
1126 }
1127 sw_context->cur_query_bo = new_query_bo;
1128
1129 ret = vmw_bo_to_validate_list(sw_context,
1130 dev_priv->dummy_query_bo,
1131 dev_priv->has_mob, NULL);
1132 if (unlikely(ret != 0))
1133 return ret;
1134
1135 }
1136
1137 return 0;
1138}
1139
1140
1141/**
1142 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1143 *
1144 * @dev_priv: The device private structure.
1145 * @sw_context: The software context used for this command submission batch.
1146 *
1147 * This function will check if we're switching query buffers, and will then,
1148 * issue a dummy occlusion query wait used as a query barrier. When the fence
1149 * object following that query wait has signaled, we are sure that all
1150 * preceding queries have finished, and the old query buffer can be unpinned.
1151 * However, since both the new query buffer and the old one are fenced with
1152 * that fence, we can do an asynchronus unpin now, and be sure that the
1153 * old query buffer won't be moved until the fence has signaled.
1154 *
1155 * As mentioned above, both the new - and old query buffers need to be fenced
1156 * using a sequence emitted *after* calling this function.
1157 */
1158static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1159 struct vmw_sw_context *sw_context)
1160{
1161 /*
1162 * The validate list should still hold references to all
1163 * contexts here.
1164 */
1165
1166 if (sw_context->needs_post_query_barrier) {
1167 struct vmw_res_cache_entry *ctx_entry =
1168 &sw_context->res_cache[vmw_res_context];
1169 struct vmw_resource *ctx;
1170 int ret;
1171
1172 BUG_ON(!ctx_entry->valid);
1173 ctx = ctx_entry->res;
1174
1175 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1176
1177 if (unlikely(ret != 0))
1178 DRM_ERROR("Out of fifo space for dummy query.\n");
1179 }
1180
1181 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1182 if (dev_priv->pinned_bo) {
1183 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1184 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
1185 }
1186
1187 if (!sw_context->needs_post_query_barrier) {
1188 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1189
1190 /*
1191 * We pin also the dummy_query_bo buffer so that we
1192 * don't need to validate it when emitting
1193 * dummy queries in context destroy paths.
1194 */
1195
1196 if (!dev_priv->dummy_query_bo_pinned) {
1197 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1198 true);
1199 dev_priv->dummy_query_bo_pinned = true;
1200 }
1201
1202 BUG_ON(sw_context->last_query_ctx == NULL);
1203 dev_priv->query_cid = sw_context->last_query_ctx->id;
1204 dev_priv->query_cid_valid = true;
1205 dev_priv->pinned_bo =
1206 vmw_dmabuf_reference(sw_context->cur_query_bo);
1207 }
1208 }
1209}
1210
1211/**
1212 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
1213 * handle to a MOB id.
1214 *
1215 * @dev_priv: Pointer to a device private structure.
1216 * @sw_context: The software context used for this command batch validation.
1217 * @id: Pointer to the user-space handle to be translated.
1218 * @vmw_bo_p: Points to a location that, on successful return will carry
1219 * a reference-counted pointer to the DMA buffer identified by the
1220 * user-space handle in @id.
1221 *
1222 * This function saves information needed to translate a user-space buffer
1223 * handle to a MOB id. The translation does not take place immediately, but
1224 * during a call to vmw_apply_relocations(). This function builds a relocation
1225 * list and a list of buffers to validate. The former needs to be freed using
1226 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
1227 * needs to be freed using vmw_clear_validations.
1228 */
1229static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1230 struct vmw_sw_context *sw_context,
1231 SVGAMobId *id,
1232 struct vmw_dma_buffer **vmw_bo_p)
1233{
1234 struct vmw_dma_buffer *vmw_bo = NULL;
1235 uint32_t handle = *id;
1236 struct vmw_relocation *reloc;
1237 int ret;
1238
1239 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1240 NULL);
1241 if (unlikely(ret != 0)) {
1242 DRM_ERROR("Could not find or use MOB buffer.\n");
1243 ret = -EINVAL;
1244 goto out_no_reloc;
1245 }
1246
1247 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1248 DRM_ERROR("Max number relocations per submission"
1249 " exceeded\n");
1250 ret = -EINVAL;
1251 goto out_no_reloc;
1252 }
1253
1254 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1255 reloc->mob_loc = id;
1256 reloc->location = NULL;
1257
1258 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, true, &reloc->index);
1259 if (unlikely(ret != 0))
1260 goto out_no_reloc;
1261
1262 *vmw_bo_p = vmw_bo;
1263 return 0;
1264
1265out_no_reloc:
1266 vmw_dmabuf_unreference(&vmw_bo);
1267 *vmw_bo_p = NULL;
1268 return ret;
1269}
1270
1271/**
1272 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
1273 * handle to a valid SVGAGuestPtr
1274 *
1275 * @dev_priv: Pointer to a device private structure.
1276 * @sw_context: The software context used for this command batch validation.
1277 * @ptr: Pointer to the user-space handle to be translated.
1278 * @vmw_bo_p: Points to a location that, on successful return will carry
1279 * a reference-counted pointer to the DMA buffer identified by the
1280 * user-space handle in @id.
1281 *
1282 * This function saves information needed to translate a user-space buffer
1283 * handle to a valid SVGAGuestPtr. The translation does not take place
1284 * immediately, but during a call to vmw_apply_relocations().
1285 * This function builds a relocation list and a list of buffers to validate.
1286 * The former needs to be freed using either vmw_apply_relocations() or
1287 * vmw_free_relocations(). The latter needs to be freed using
1288 * vmw_clear_validations.
1289 */
1290static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1291 struct vmw_sw_context *sw_context,
1292 SVGAGuestPtr *ptr,
1293 struct vmw_dma_buffer **vmw_bo_p)
1294{
1295 struct vmw_dma_buffer *vmw_bo = NULL;
1296 uint32_t handle = ptr->gmrId;
1297 struct vmw_relocation *reloc;
1298 int ret;
1299
1300 ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
1301 NULL);
1302 if (unlikely(ret != 0)) {
1303 DRM_ERROR("Could not find or use GMR region.\n");
1304 ret = -EINVAL;
1305 goto out_no_reloc;
1306 }
1307
1308 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
1309 DRM_ERROR("Max number relocations per submission"
1310 " exceeded\n");
1311 ret = -EINVAL;
1312 goto out_no_reloc;
1313 }
1314
1315 reloc = &sw_context->relocs[sw_context->cur_reloc++];
1316 reloc->location = ptr;
1317
1318 ret = vmw_bo_to_validate_list(sw_context, vmw_bo, false, &reloc->index);
1319 if (unlikely(ret != 0))
1320 goto out_no_reloc;
1321
1322 *vmw_bo_p = vmw_bo;
1323 return 0;
1324
1325out_no_reloc:
1326 vmw_dmabuf_unreference(&vmw_bo);
1327 *vmw_bo_p = NULL;
1328 return ret;
1329}
1330
1331
1332
1333/**
1334 * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
1335 *
1336 * @dev_priv: Pointer to a device private struct.
1337 * @sw_context: The software context used for this command submission.
1338 * @header: Pointer to the command header in the command stream.
1339 *
1340 * This function adds the new query into the query COTABLE
1341 */
1342static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1343 struct vmw_sw_context *sw_context,
1344 SVGA3dCmdHeader *header)
1345{
1346 struct vmw_dx_define_query_cmd {
1347 SVGA3dCmdHeader header;
1348 SVGA3dCmdDXDefineQuery q;
1349 } *cmd;
1350
1351 int ret;
1352 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
1353 struct vmw_resource *cotable_res;
1354
1355
1356 if (ctx_node == NULL) {
1357 DRM_ERROR("DX Context not set for query.\n");
1358 return -EINVAL;
1359 }
1360
1361 cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
1362
1363 if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
1364 cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
1365 return -EINVAL;
1366
1367 cotable_res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXQUERY);
1368 ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
1369 vmw_resource_unreference(&cotable_res);
1370
1371 return ret;
1372}
1373
1374
1375
1376/**
1377 * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
1378 *
1379 * @dev_priv: Pointer to a device private struct.
1380 * @sw_context: The software context used for this command submission.
1381 * @header: Pointer to the command header in the command stream.
1382 *
1383 * The query bind operation will eventually associate the query ID
1384 * with its backing MOB. In this function, we take the user mode
1385 * MOB ID and use vmw_translate_mob_ptr() to translate it to its
1386 * kernel mode equivalent.
1387 */
1388static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1389 struct vmw_sw_context *sw_context,
1390 SVGA3dCmdHeader *header)
1391{
1392 struct vmw_dx_bind_query_cmd {
1393 SVGA3dCmdHeader header;
1394 SVGA3dCmdDXBindQuery q;
1395 } *cmd;
1396
1397 struct vmw_dma_buffer *vmw_bo;
1398 int ret;
1399
1400
1401 cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
1402
1403 /*
1404 * Look up the buffer pointed to by q.mobid, put it on the relocation
1405 * list so its kernel mode MOB ID can be filled in later
1406 */
1407 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
1408 &vmw_bo);
1409
1410 if (ret != 0)
1411 return ret;
1412
1413 sw_context->dx_query_mob = vmw_bo;
1414 sw_context->dx_query_ctx = sw_context->dx_ctx_node->res;
1415
1416 vmw_dmabuf_unreference(&vmw_bo);
1417
1418 return ret;
1419}
1420
1421
1422
1423/**
1424 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
1425 *
1426 * @dev_priv: Pointer to a device private struct.
1427 * @sw_context: The software context used for this command submission.
1428 * @header: Pointer to the command header in the command stream.
1429 */
1430static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1431 struct vmw_sw_context *sw_context,
1432 SVGA3dCmdHeader *header)
1433{
1434 struct vmw_begin_gb_query_cmd {
1435 SVGA3dCmdHeader header;
1436 SVGA3dCmdBeginGBQuery q;
1437 } *cmd;
1438
1439 cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1440 header);
1441
1442 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1443 user_context_converter, &cmd->q.cid,
1444 NULL);
1445}
1446
1447/**
1448 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1449 *
1450 * @dev_priv: Pointer to a device private struct.
1451 * @sw_context: The software context used for this command submission.
1452 * @header: Pointer to the command header in the command stream.
1453 */
1454static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1455 struct vmw_sw_context *sw_context,
1456 SVGA3dCmdHeader *header)
1457{
1458 struct vmw_begin_query_cmd {
1459 SVGA3dCmdHeader header;
1460 SVGA3dCmdBeginQuery q;
1461 } *cmd;
1462
1463 cmd = container_of(header, struct vmw_begin_query_cmd,
1464 header);
1465
1466 if (unlikely(dev_priv->has_mob)) {
1467 struct {
1468 SVGA3dCmdHeader header;
1469 SVGA3dCmdBeginGBQuery q;
1470 } gb_cmd;
1471
1472 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1473
1474 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1475 gb_cmd.header.size = cmd->header.size;
1476 gb_cmd.q.cid = cmd->q.cid;
1477 gb_cmd.q.type = cmd->q.type;
1478
1479 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1480 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1481 }
1482
1483 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1484 user_context_converter, &cmd->q.cid,
1485 NULL);
1486}
1487
1488/**
1489 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1490 *
1491 * @dev_priv: Pointer to a device private struct.
1492 * @sw_context: The software context used for this command submission.
1493 * @header: Pointer to the command header in the command stream.
1494 */
1495static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1496 struct vmw_sw_context *sw_context,
1497 SVGA3dCmdHeader *header)
1498{
1499 struct vmw_dma_buffer *vmw_bo;
1500 struct vmw_query_cmd {
1501 SVGA3dCmdHeader header;
1502 SVGA3dCmdEndGBQuery q;
1503 } *cmd;
1504 int ret;
1505
1506 cmd = container_of(header, struct vmw_query_cmd, header);
1507 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1508 if (unlikely(ret != 0))
1509 return ret;
1510
1511 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1512 &cmd->q.mobid,
1513 &vmw_bo);
1514 if (unlikely(ret != 0))
1515 return ret;
1516
1517 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1518
1519 vmw_dmabuf_unreference(&vmw_bo);
1520 return ret;
1521}
1522
1523/**
1524 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1525 *
1526 * @dev_priv: Pointer to a device private struct.
1527 * @sw_context: The software context used for this command submission.
1528 * @header: Pointer to the command header in the command stream.
1529 */
1530static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1531 struct vmw_sw_context *sw_context,
1532 SVGA3dCmdHeader *header)
1533{
1534 struct vmw_dma_buffer *vmw_bo;
1535 struct vmw_query_cmd {
1536 SVGA3dCmdHeader header;
1537 SVGA3dCmdEndQuery q;
1538 } *cmd;
1539 int ret;
1540
1541 cmd = container_of(header, struct vmw_query_cmd, header);
1542 if (dev_priv->has_mob) {
1543 struct {
1544 SVGA3dCmdHeader header;
1545 SVGA3dCmdEndGBQuery q;
1546 } gb_cmd;
1547
1548 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1549
1550 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1551 gb_cmd.header.size = cmd->header.size;
1552 gb_cmd.q.cid = cmd->q.cid;
1553 gb_cmd.q.type = cmd->q.type;
1554 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1555 gb_cmd.q.offset = cmd->q.guestResult.offset;
1556
1557 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1558 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1559 }
1560
1561 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1562 if (unlikely(ret != 0))
1563 return ret;
1564
1565 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1566 &cmd->q.guestResult,
1567 &vmw_bo);
1568 if (unlikely(ret != 0))
1569 return ret;
1570
1571 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1572
1573 vmw_dmabuf_unreference(&vmw_bo);
1574 return ret;
1575}
1576
1577/**
1578 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1579 *
1580 * @dev_priv: Pointer to a device private struct.
1581 * @sw_context: The software context used for this command submission.
1582 * @header: Pointer to the command header in the command stream.
1583 */
1584static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1585 struct vmw_sw_context *sw_context,
1586 SVGA3dCmdHeader *header)
1587{
1588 struct vmw_dma_buffer *vmw_bo;
1589 struct vmw_query_cmd {
1590 SVGA3dCmdHeader header;
1591 SVGA3dCmdWaitForGBQuery q;
1592 } *cmd;
1593 int ret;
1594
1595 cmd = container_of(header, struct vmw_query_cmd, header);
1596 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1597 if (unlikely(ret != 0))
1598 return ret;
1599
1600 ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1601 &cmd->q.mobid,
1602 &vmw_bo);
1603 if (unlikely(ret != 0))
1604 return ret;
1605
1606 vmw_dmabuf_unreference(&vmw_bo);
1607 return 0;
1608}
1609
1610/**
1611 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1612 *
1613 * @dev_priv: Pointer to a device private struct.
1614 * @sw_context: The software context used for this command submission.
1615 * @header: Pointer to the command header in the command stream.
1616 */
1617static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1618 struct vmw_sw_context *sw_context,
1619 SVGA3dCmdHeader *header)
1620{
1621 struct vmw_dma_buffer *vmw_bo;
1622 struct vmw_query_cmd {
1623 SVGA3dCmdHeader header;
1624 SVGA3dCmdWaitForQuery q;
1625 } *cmd;
1626 int ret;
1627
1628 cmd = container_of(header, struct vmw_query_cmd, header);
1629 if (dev_priv->has_mob) {
1630 struct {
1631 SVGA3dCmdHeader header;
1632 SVGA3dCmdWaitForGBQuery q;
1633 } gb_cmd;
1634
1635 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1636
1637 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1638 gb_cmd.header.size = cmd->header.size;
1639 gb_cmd.q.cid = cmd->q.cid;
1640 gb_cmd.q.type = cmd->q.type;
1641 gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1642 gb_cmd.q.offset = cmd->q.guestResult.offset;
1643
1644 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1645 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1646 }
1647
1648 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1649 if (unlikely(ret != 0))
1650 return ret;
1651
1652 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1653 &cmd->q.guestResult,
1654 &vmw_bo);
1655 if (unlikely(ret != 0))
1656 return ret;
1657
1658 vmw_dmabuf_unreference(&vmw_bo);
1659 return 0;
1660}
1661
1662static int vmw_cmd_dma(struct vmw_private *dev_priv,
1663 struct vmw_sw_context *sw_context,
1664 SVGA3dCmdHeader *header)
1665{
1666 struct vmw_dma_buffer *vmw_bo = NULL;
1667 struct vmw_surface *srf = NULL;
1668 struct vmw_dma_cmd {
1669 SVGA3dCmdHeader header;
1670 SVGA3dCmdSurfaceDMA dma;
1671 } *cmd;
1672 int ret;
1673 SVGA3dCmdSurfaceDMASuffix *suffix;
1674 uint32_t bo_size;
1675
1676 cmd = container_of(header, struct vmw_dma_cmd, header);
1677 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1678 header->size - sizeof(*suffix));
1679
1680 /* Make sure device and verifier stays in sync. */
1681 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1682 DRM_ERROR("Invalid DMA suffix size.\n");
1683 return -EINVAL;
1684 }
1685
1686 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1687 &cmd->dma.guest.ptr,
1688 &vmw_bo);
1689 if (unlikely(ret != 0))
1690 return ret;
1691
1692 /* Make sure DMA doesn't cross BO boundaries. */
1693 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1694 if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1695 DRM_ERROR("Invalid DMA offset.\n");
1696 return -EINVAL;
1697 }
1698
1699 bo_size -= cmd->dma.guest.ptr.offset;
1700 if (unlikely(suffix->maximumOffset > bo_size))
1701 suffix->maximumOffset = bo_size;
1702
1703 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1704 user_surface_converter, &cmd->dma.host.sid,
1705 NULL);
1706 if (unlikely(ret != 0)) {
1707 if (unlikely(ret != -ERESTARTSYS))
1708 DRM_ERROR("could not find surface for DMA.\n");
1709 goto out_no_surface;
1710 }
1711
1712 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1713
1714 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1715 header);
1716
1717out_no_surface:
1718 vmw_dmabuf_unreference(&vmw_bo);
1719 return ret;
1720}
1721
1722static int vmw_cmd_draw(struct vmw_private *dev_priv,
1723 struct vmw_sw_context *sw_context,
1724 SVGA3dCmdHeader *header)
1725{
1726 struct vmw_draw_cmd {
1727 SVGA3dCmdHeader header;
1728 SVGA3dCmdDrawPrimitives body;
1729 } *cmd;
1730 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1731 (unsigned long)header + sizeof(*cmd));
1732 SVGA3dPrimitiveRange *range;
1733 uint32_t i;
1734 uint32_t maxnum;
1735 int ret;
1736
1737 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1738 if (unlikely(ret != 0))
1739 return ret;
1740
1741 cmd = container_of(header, struct vmw_draw_cmd, header);
1742 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1743
1744 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1745 DRM_ERROR("Illegal number of vertex declarations.\n");
1746 return -EINVAL;
1747 }
1748
1749 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1750 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1751 user_surface_converter,
1752 &decl->array.surfaceId, NULL);
1753 if (unlikely(ret != 0))
1754 return ret;
1755 }
1756
1757 maxnum = (header->size - sizeof(cmd->body) -
1758 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1759 if (unlikely(cmd->body.numRanges > maxnum)) {
1760 DRM_ERROR("Illegal number of index ranges.\n");
1761 return -EINVAL;
1762 }
1763
1764 range = (SVGA3dPrimitiveRange *) decl;
1765 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1766 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1767 user_surface_converter,
1768 &range->indexArray.surfaceId, NULL);
1769 if (unlikely(ret != 0))
1770 return ret;
1771 }
1772 return 0;
1773}
1774
1775
1776static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1777 struct vmw_sw_context *sw_context,
1778 SVGA3dCmdHeader *header)
1779{
1780 struct vmw_tex_state_cmd {
1781 SVGA3dCmdHeader header;
1782 SVGA3dCmdSetTextureState state;
1783 } *cmd;
1784
1785 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1786 ((unsigned long) header + header->size + sizeof(header));
1787 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1788 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1789 struct vmw_resource_val_node *ctx_node;
1790 struct vmw_resource_val_node *res_node;
1791 int ret;
1792
1793 cmd = container_of(header, struct vmw_tex_state_cmd,
1794 header);
1795
1796 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1797 user_context_converter, &cmd->state.cid,
1798 &ctx_node);
1799 if (unlikely(ret != 0))
1800 return ret;
1801
1802 for (; cur_state < last_state; ++cur_state) {
1803 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1804 continue;
1805
1806 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1807 DRM_ERROR("Illegal texture/sampler unit %u.\n",
1808 (unsigned) cur_state->stage);
1809 return -EINVAL;
1810 }
1811
1812 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1813 user_surface_converter,
1814 &cur_state->value, &res_node);
1815 if (unlikely(ret != 0))
1816 return ret;
1817
1818 if (dev_priv->has_mob) {
1819 struct vmw_ctx_bindinfo_tex binding;
1820
1821 binding.bi.ctx = ctx_node->res;
1822 binding.bi.res = res_node ? res_node->res : NULL;
1823 binding.bi.bt = vmw_ctx_binding_tex;
1824 binding.texture_stage = cur_state->stage;
1825 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
1826 0, binding.texture_stage);
1827 }
1828 }
1829
1830 return 0;
1831}
1832
1833static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1834 struct vmw_sw_context *sw_context,
1835 void *buf)
1836{
1837 struct vmw_dma_buffer *vmw_bo;
1838 int ret;
1839
1840 struct {
1841 uint32_t header;
1842 SVGAFifoCmdDefineGMRFB body;
1843 } *cmd = buf;
1844
1845 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1846 &cmd->body.ptr,
1847 &vmw_bo);
1848 if (unlikely(ret != 0))
1849 return ret;
1850
1851 vmw_dmabuf_unreference(&vmw_bo);
1852
1853 return ret;
1854}
1855
1856
1857/**
1858 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1859 * switching
1860 *
1861 * @dev_priv: Pointer to a device private struct.
1862 * @sw_context: The software context being used for this batch.
1863 * @val_node: The validation node representing the resource.
1864 * @buf_id: Pointer to the user-space backup buffer handle in the command
1865 * stream.
1866 * @backup_offset: Offset of backup into MOB.
1867 *
1868 * This function prepares for registering a switch of backup buffers
1869 * in the resource metadata just prior to unreserving. It's basically a wrapper
1870 * around vmw_cmd_res_switch_backup with a different interface.
1871 */
1872static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1873 struct vmw_sw_context *sw_context,
1874 struct vmw_resource_val_node *val_node,
1875 uint32_t *buf_id,
1876 unsigned long backup_offset)
1877{
1878 struct vmw_dma_buffer *dma_buf;
1879 int ret;
1880
1881 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1882 if (ret)
1883 return ret;
1884
1885 val_node->switching_backup = true;
1886 if (val_node->first_usage)
1887 val_node->no_buffer_needed = true;
1888
1889 vmw_dmabuf_unreference(&val_node->new_backup);
1890 val_node->new_backup = dma_buf;
1891 val_node->new_backup_offset = backup_offset;
1892
1893 return 0;
1894}
1895
1896
1897/**
1898 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1899 *
1900 * @dev_priv: Pointer to a device private struct.
1901 * @sw_context: The software context being used for this batch.
1902 * @res_type: The resource type.
1903 * @converter: Information about user-space binding for this resource type.
1904 * @res_id: Pointer to the user-space resource handle in the command stream.
1905 * @buf_id: Pointer to the user-space backup buffer handle in the command
1906 * stream.
1907 * @backup_offset: Offset of backup into MOB.
1908 *
1909 * This function prepares for registering a switch of backup buffers
1910 * in the resource metadata just prior to unreserving. It's basically a wrapper
1911 * around vmw_cmd_res_switch_backup with a different interface.
1912 */
1913static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1914 struct vmw_sw_context *sw_context,
1915 enum vmw_res_type res_type,
1916 const struct vmw_user_resource_conv
1917 *converter,
1918 uint32_t *res_id,
1919 uint32_t *buf_id,
1920 unsigned long backup_offset)
1921{
1922 struct vmw_resource_val_node *val_node;
1923 int ret;
1924
1925 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1926 converter, res_id, &val_node);
1927 if (ret)
1928 return ret;
1929
1930 return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node,
1931 buf_id, backup_offset);
1932}
1933
1934/**
1935 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1936 * command
1937 *
1938 * @dev_priv: Pointer to a device private struct.
1939 * @sw_context: The software context being used for this batch.
1940 * @header: Pointer to the command header in the command stream.
1941 */
1942static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1943 struct vmw_sw_context *sw_context,
1944 SVGA3dCmdHeader *header)
1945{
1946 struct vmw_bind_gb_surface_cmd {
1947 SVGA3dCmdHeader header;
1948 SVGA3dCmdBindGBSurface body;
1949 } *cmd;
1950
1951 cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1952
1953 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1954 user_surface_converter,
1955 &cmd->body.sid, &cmd->body.mobid,
1956 0);
1957}
1958
1959/**
1960 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1961 * command
1962 *
1963 * @dev_priv: Pointer to a device private struct.
1964 * @sw_context: The software context being used for this batch.
1965 * @header: Pointer to the command header in the command stream.
1966 */
1967static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1968 struct vmw_sw_context *sw_context,
1969 SVGA3dCmdHeader *header)
1970{
1971 struct vmw_gb_surface_cmd {
1972 SVGA3dCmdHeader header;
1973 SVGA3dCmdUpdateGBImage body;
1974 } *cmd;
1975
1976 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1977
1978 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1979 user_surface_converter,
1980 &cmd->body.image.sid, NULL);
1981}
1982
1983/**
1984 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1985 * command
1986 *
1987 * @dev_priv: Pointer to a device private struct.
1988 * @sw_context: The software context being used for this batch.
1989 * @header: Pointer to the command header in the command stream.
1990 */
1991static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1992 struct vmw_sw_context *sw_context,
1993 SVGA3dCmdHeader *header)
1994{
1995 struct vmw_gb_surface_cmd {
1996 SVGA3dCmdHeader header;
1997 SVGA3dCmdUpdateGBSurface body;
1998 } *cmd;
1999
2000 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2001
2002 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2003 user_surface_converter,
2004 &cmd->body.sid, NULL);
2005}
2006
2007/**
2008 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
2009 * command
2010 *
2011 * @dev_priv: Pointer to a device private struct.
2012 * @sw_context: The software context being used for this batch.
2013 * @header: Pointer to the command header in the command stream.
2014 */
2015static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
2016 struct vmw_sw_context *sw_context,
2017 SVGA3dCmdHeader *header)
2018{
2019 struct vmw_gb_surface_cmd {
2020 SVGA3dCmdHeader header;
2021 SVGA3dCmdReadbackGBImage body;
2022 } *cmd;
2023
2024 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2025
2026 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2027 user_surface_converter,
2028 &cmd->body.image.sid, NULL);
2029}
2030
2031/**
2032 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
2033 * command
2034 *
2035 * @dev_priv: Pointer to a device private struct.
2036 * @sw_context: The software context being used for this batch.
2037 * @header: Pointer to the command header in the command stream.
2038 */
2039static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
2040 struct vmw_sw_context *sw_context,
2041 SVGA3dCmdHeader *header)
2042{
2043 struct vmw_gb_surface_cmd {
2044 SVGA3dCmdHeader header;
2045 SVGA3dCmdReadbackGBSurface body;
2046 } *cmd;
2047
2048 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2049
2050 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2051 user_surface_converter,
2052 &cmd->body.sid, NULL);
2053}
2054
2055/**
2056 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
2057 * command
2058 *
2059 * @dev_priv: Pointer to a device private struct.
2060 * @sw_context: The software context being used for this batch.
2061 * @header: Pointer to the command header in the command stream.
2062 */
2063static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
2064 struct vmw_sw_context *sw_context,
2065 SVGA3dCmdHeader *header)
2066{
2067 struct vmw_gb_surface_cmd {
2068 SVGA3dCmdHeader header;
2069 SVGA3dCmdInvalidateGBImage body;
2070 } *cmd;
2071
2072 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2073
2074 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2075 user_surface_converter,
2076 &cmd->body.image.sid, NULL);
2077}
2078
2079/**
2080 * vmw_cmd_invalidate_gb_surface - Validate an
2081 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
2082 *
2083 * @dev_priv: Pointer to a device private struct.
2084 * @sw_context: The software context being used for this batch.
2085 * @header: Pointer to the command header in the command stream.
2086 */
2087static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
2088 struct vmw_sw_context *sw_context,
2089 SVGA3dCmdHeader *header)
2090{
2091 struct vmw_gb_surface_cmd {
2092 SVGA3dCmdHeader header;
2093 SVGA3dCmdInvalidateGBSurface body;
2094 } *cmd;
2095
2096 cmd = container_of(header, struct vmw_gb_surface_cmd, header);
2097
2098 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2099 user_surface_converter,
2100 &cmd->body.sid, NULL);
2101}
2102
2103
2104/**
2105 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
2106 * command
2107 *
2108 * @dev_priv: Pointer to a device private struct.
2109 * @sw_context: The software context being used for this batch.
2110 * @header: Pointer to the command header in the command stream.
2111 */
2112static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
2113 struct vmw_sw_context *sw_context,
2114 SVGA3dCmdHeader *header)
2115{
2116 struct vmw_shader_define_cmd {
2117 SVGA3dCmdHeader header;
2118 SVGA3dCmdDefineShader body;
2119 } *cmd;
2120 int ret;
2121 size_t size;
2122 struct vmw_resource_val_node *val;
2123
2124 cmd = container_of(header, struct vmw_shader_define_cmd,
2125 header);
2126
2127 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2128 user_context_converter, &cmd->body.cid,
2129 &val);
2130 if (unlikely(ret != 0))
2131 return ret;
2132
2133 if (unlikely(!dev_priv->has_mob))
2134 return 0;
2135
2136 size = cmd->header.size - sizeof(cmd->body);
2137 ret = vmw_compat_shader_add(dev_priv,
2138 vmw_context_res_man(val->res),
2139 cmd->body.shid, cmd + 1,
2140 cmd->body.type, size,
2141 &sw_context->staged_cmd_res);
2142 if (unlikely(ret != 0))
2143 return ret;
2144
2145 return vmw_resource_relocation_add(&sw_context->res_relocations,
2146 NULL, &cmd->header.id -
2147 sw_context->buf_start);
2148
2149 return 0;
2150}
2151
2152/**
2153 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
2154 * command
2155 *
2156 * @dev_priv: Pointer to a device private struct.
2157 * @sw_context: The software context being used for this batch.
2158 * @header: Pointer to the command header in the command stream.
2159 */
2160static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
2161 struct vmw_sw_context *sw_context,
2162 SVGA3dCmdHeader *header)
2163{
2164 struct vmw_shader_destroy_cmd {
2165 SVGA3dCmdHeader header;
2166 SVGA3dCmdDestroyShader body;
2167 } *cmd;
2168 int ret;
2169 struct vmw_resource_val_node *val;
2170
2171 cmd = container_of(header, struct vmw_shader_destroy_cmd,
2172 header);
2173
2174 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2175 user_context_converter, &cmd->body.cid,
2176 &val);
2177 if (unlikely(ret != 0))
2178 return ret;
2179
2180 if (unlikely(!dev_priv->has_mob))
2181 return 0;
2182
2183 ret = vmw_shader_remove(vmw_context_res_man(val->res),
2184 cmd->body.shid,
2185 cmd->body.type,
2186 &sw_context->staged_cmd_res);
2187 if (unlikely(ret != 0))
2188 return ret;
2189
2190 return vmw_resource_relocation_add(&sw_context->res_relocations,
2191 NULL, &cmd->header.id -
2192 sw_context->buf_start);
2193
2194 return 0;
2195}
2196
2197/**
2198 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
2199 * command
2200 *
2201 * @dev_priv: Pointer to a device private struct.
2202 * @sw_context: The software context being used for this batch.
2203 * @header: Pointer to the command header in the command stream.
2204 */
2205static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
2206 struct vmw_sw_context *sw_context,
2207 SVGA3dCmdHeader *header)
2208{
2209 struct vmw_set_shader_cmd {
2210 SVGA3dCmdHeader header;
2211 SVGA3dCmdSetShader body;
2212 } *cmd;
2213 struct vmw_resource_val_node *ctx_node, *res_node = NULL;
2214 struct vmw_ctx_bindinfo_shader binding;
2215 struct vmw_resource *res = NULL;
2216 int ret;
2217
2218 cmd = container_of(header, struct vmw_set_shader_cmd,
2219 header);
2220
2221 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2222 DRM_ERROR("Illegal shader type %u.\n",
2223 (unsigned) cmd->body.type);
2224 return -EINVAL;
2225 }
2226
2227 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2228 user_context_converter, &cmd->body.cid,
2229 &ctx_node);
2230 if (unlikely(ret != 0))
2231 return ret;
2232
2233 if (!dev_priv->has_mob)
2234 return 0;
2235
2236 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2237 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2238 cmd->body.shid,
2239 cmd->body.type);
2240
2241 if (!IS_ERR(res)) {
2242 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
2243 &cmd->body.shid, res,
2244 &res_node);
2245 vmw_resource_unreference(&res);
2246 if (unlikely(ret != 0))
2247 return ret;
2248 }
2249 }
2250
2251 if (!res_node) {
2252 ret = vmw_cmd_res_check(dev_priv, sw_context,
2253 vmw_res_shader,
2254 user_shader_converter,
2255 &cmd->body.shid, &res_node);
2256 if (unlikely(ret != 0))
2257 return ret;
2258 }
2259
2260 binding.bi.ctx = ctx_node->res;
2261 binding.bi.res = res_node ? res_node->res : NULL;
2262 binding.bi.bt = vmw_ctx_binding_shader;
2263 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2264 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2265 binding.shader_slot, 0);
2266 return 0;
2267}
2268
2269/**
2270 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
2271 * command
2272 *
2273 * @dev_priv: Pointer to a device private struct.
2274 * @sw_context: The software context being used for this batch.
2275 * @header: Pointer to the command header in the command stream.
2276 */
2277static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2278 struct vmw_sw_context *sw_context,
2279 SVGA3dCmdHeader *header)
2280{
2281 struct vmw_set_shader_const_cmd {
2282 SVGA3dCmdHeader header;
2283 SVGA3dCmdSetShaderConst body;
2284 } *cmd;
2285 int ret;
2286
2287 cmd = container_of(header, struct vmw_set_shader_const_cmd,
2288 header);
2289
2290 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2291 user_context_converter, &cmd->body.cid,
2292 NULL);
2293 if (unlikely(ret != 0))
2294 return ret;
2295
2296 if (dev_priv->has_mob)
2297 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2298
2299 return 0;
2300}
2301
2302/**
2303 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
2304 * command
2305 *
2306 * @dev_priv: Pointer to a device private struct.
2307 * @sw_context: The software context being used for this batch.
2308 * @header: Pointer to the command header in the command stream.
2309 */
2310static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2311 struct vmw_sw_context *sw_context,
2312 SVGA3dCmdHeader *header)
2313{
2314 struct vmw_bind_gb_shader_cmd {
2315 SVGA3dCmdHeader header;
2316 SVGA3dCmdBindGBShader body;
2317 } *cmd;
2318
2319 cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
2320 header);
2321
2322 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2323 user_shader_converter,
2324 &cmd->body.shid, &cmd->body.mobid,
2325 cmd->body.offsetInBytes);
2326}
2327
2328/**
2329 * vmw_cmd_dx_set_single_constant_buffer - Validate an
2330 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2331 *
2332 * @dev_priv: Pointer to a device private struct.
2333 * @sw_context: The software context being used for this batch.
2334 * @header: Pointer to the command header in the command stream.
2335 */
2336static int
2337vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2338 struct vmw_sw_context *sw_context,
2339 SVGA3dCmdHeader *header)
2340{
2341 struct {
2342 SVGA3dCmdHeader header;
2343 SVGA3dCmdDXSetSingleConstantBuffer body;
2344 } *cmd;
2345 struct vmw_resource_val_node *res_node = NULL;
2346 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2347 struct vmw_ctx_bindinfo_cb binding;
2348 int ret;
2349
2350 if (unlikely(ctx_node == NULL)) {
2351 DRM_ERROR("DX Context not set.\n");
2352 return -EINVAL;
2353 }
2354
2355 cmd = container_of(header, typeof(*cmd), header);
2356 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2357 user_surface_converter,
2358 &cmd->body.sid, &res_node);
2359 if (unlikely(ret != 0))
2360 return ret;
2361
2362 binding.bi.ctx = ctx_node->res;
2363 binding.bi.res = res_node ? res_node->res : NULL;
2364 binding.bi.bt = vmw_ctx_binding_cb;
2365 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2366 binding.offset = cmd->body.offsetInBytes;
2367 binding.size = cmd->body.sizeInBytes;
2368 binding.slot = cmd->body.slot;
2369
2370 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2371 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2372 DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
2373 (unsigned) cmd->body.type,
2374 (unsigned) binding.slot);
2375 return -EINVAL;
2376 }
2377
2378 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2379 binding.shader_slot, binding.slot);
2380
2381 return 0;
2382}
2383
2384/**
2385 * vmw_cmd_dx_set_shader_res - Validate an
2386 * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
2387 *
2388 * @dev_priv: Pointer to a device private struct.
2389 * @sw_context: The software context being used for this batch.
2390 * @header: Pointer to the command header in the command stream.
2391 */
2392static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2393 struct vmw_sw_context *sw_context,
2394 SVGA3dCmdHeader *header)
2395{
2396 struct {
2397 SVGA3dCmdHeader header;
2398 SVGA3dCmdDXSetShaderResources body;
2399 } *cmd = container_of(header, typeof(*cmd), header);
2400 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2401 sizeof(SVGA3dShaderResourceViewId);
2402
2403 if ((u64) cmd->body.startView + (u64) num_sr_view >
2404 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2405 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2406 DRM_ERROR("Invalid shader binding.\n");
2407 return -EINVAL;
2408 }
2409
2410 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2411 vmw_ctx_binding_sr,
2412 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2413 (void *) &cmd[1], num_sr_view,
2414 cmd->body.startView);
2415}
2416
2417/**
2418 * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
2419 * command
2420 *
2421 * @dev_priv: Pointer to a device private struct.
2422 * @sw_context: The software context being used for this batch.
2423 * @header: Pointer to the command header in the command stream.
2424 */
2425static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2426 struct vmw_sw_context *sw_context,
2427 SVGA3dCmdHeader *header)
2428{
2429 struct {
2430 SVGA3dCmdHeader header;
2431 SVGA3dCmdDXSetShader body;
2432 } *cmd;
2433 struct vmw_resource *res = NULL;
2434 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2435 struct vmw_ctx_bindinfo_shader binding;
2436 int ret = 0;
2437
2438 if (unlikely(ctx_node == NULL)) {
2439 DRM_ERROR("DX Context not set.\n");
2440 return -EINVAL;
2441 }
2442
2443 cmd = container_of(header, typeof(*cmd), header);
2444
2445 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2446 DRM_ERROR("Illegal shader type %u.\n",
2447 (unsigned) cmd->body.type);
2448 return -EINVAL;
2449 }
2450
2451 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2452 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2453 if (IS_ERR(res)) {
2454 DRM_ERROR("Could not find shader for binding.\n");
2455 return PTR_ERR(res);
2456 }
2457
2458 ret = vmw_resource_val_add(sw_context, res, NULL);
2459 if (ret)
2460 goto out_unref;
2461 }
2462
2463 binding.bi.ctx = ctx_node->res;
2464 binding.bi.res = res;
2465 binding.bi.bt = vmw_ctx_binding_dx_shader;
2466 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2467
2468 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2469 binding.shader_slot, 0);
2470out_unref:
2471 if (res)
2472 vmw_resource_unreference(&res);
2473
2474 return ret;
2475}
2476
2477/**
2478 * vmw_cmd_dx_set_vertex_buffers - Validates an
2479 * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
2480 *
2481 * @dev_priv: Pointer to a device private struct.
2482 * @sw_context: The software context being used for this batch.
2483 * @header: Pointer to the command header in the command stream.
2484 */
2485static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2486 struct vmw_sw_context *sw_context,
2487 SVGA3dCmdHeader *header)
2488{
2489 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2490 struct vmw_ctx_bindinfo_vb binding;
2491 struct vmw_resource_val_node *res_node;
2492 struct {
2493 SVGA3dCmdHeader header;
2494 SVGA3dCmdDXSetVertexBuffers body;
2495 SVGA3dVertexBuffer buf[];
2496 } *cmd;
2497 int i, ret, num;
2498
2499 if (unlikely(ctx_node == NULL)) {
2500 DRM_ERROR("DX Context not set.\n");
2501 return -EINVAL;
2502 }
2503
2504 cmd = container_of(header, typeof(*cmd), header);
2505 num = (cmd->header.size - sizeof(cmd->body)) /
2506 sizeof(SVGA3dVertexBuffer);
2507 if ((u64)num + (u64)cmd->body.startBuffer >
2508 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2509 DRM_ERROR("Invalid number of vertex buffers.\n");
2510 return -EINVAL;
2511 }
2512
2513 for (i = 0; i < num; i++) {
2514 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2515 user_surface_converter,
2516 &cmd->buf[i].sid, &res_node);
2517 if (unlikely(ret != 0))
2518 return ret;
2519
2520 binding.bi.ctx = ctx_node->res;
2521 binding.bi.bt = vmw_ctx_binding_vb;
2522 binding.bi.res = ((res_node) ? res_node->res : NULL);
2523 binding.offset = cmd->buf[i].offset;
2524 binding.stride = cmd->buf[i].stride;
2525 binding.slot = i + cmd->body.startBuffer;
2526
2527 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2528 0, binding.slot);
2529 }
2530
2531 return 0;
2532}
2533
2534/**
2535 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
2536 * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command.
2537 *
2538 * @dev_priv: Pointer to a device private struct.
2539 * @sw_context: The software context being used for this batch.
2540 * @header: Pointer to the command header in the command stream.
2541 */
2542static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2543 struct vmw_sw_context *sw_context,
2544 SVGA3dCmdHeader *header)
2545{
2546 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2547 struct vmw_ctx_bindinfo_ib binding;
2548 struct vmw_resource_val_node *res_node;
2549 struct {
2550 SVGA3dCmdHeader header;
2551 SVGA3dCmdDXSetIndexBuffer body;
2552 } *cmd;
2553 int ret;
2554
2555 if (unlikely(ctx_node == NULL)) {
2556 DRM_ERROR("DX Context not set.\n");
2557 return -EINVAL;
2558 }
2559
2560 cmd = container_of(header, typeof(*cmd), header);
2561 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2562 user_surface_converter,
2563 &cmd->body.sid, &res_node);
2564 if (unlikely(ret != 0))
2565 return ret;
2566
2567 binding.bi.ctx = ctx_node->res;
2568 binding.bi.res = ((res_node) ? res_node->res : NULL);
2569 binding.bi.bt = vmw_ctx_binding_ib;
2570 binding.offset = cmd->body.offset;
2571 binding.format = cmd->body.format;
2572
2573 vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0);
2574
2575 return 0;
2576}
2577
2578/**
2579 * vmw_cmd_dx_set_rendertarget - Validate an
2580 * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
2581 *
2582 * @dev_priv: Pointer to a device private struct.
2583 * @sw_context: The software context being used for this batch.
2584 * @header: Pointer to the command header in the command stream.
2585 */
2586static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2587 struct vmw_sw_context *sw_context,
2588 SVGA3dCmdHeader *header)
2589{
2590 struct {
2591 SVGA3dCmdHeader header;
2592 SVGA3dCmdDXSetRenderTargets body;
2593 } *cmd = container_of(header, typeof(*cmd), header);
2594 int ret;
2595 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2596 sizeof(SVGA3dRenderTargetViewId);
2597
2598 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2599 DRM_ERROR("Invalid DX Rendertarget binding.\n");
2600 return -EINVAL;
2601 }
2602
2603 ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
2604 vmw_ctx_binding_ds, 0,
2605 &cmd->body.depthStencilViewId, 1, 0);
2606 if (ret)
2607 return ret;
2608
2609 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2610 vmw_ctx_binding_dx_rt, 0,
2611 (void *)&cmd[1], num_rt_view, 0);
2612}
2613
2614/**
2615 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2616 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2617 *
2618 * @dev_priv: Pointer to a device private struct.
2619 * @sw_context: The software context being used for this batch.
2620 * @header: Pointer to the command header in the command stream.
2621 */
2622static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2623 struct vmw_sw_context *sw_context,
2624 SVGA3dCmdHeader *header)
2625{
2626 struct {
2627 SVGA3dCmdHeader header;
2628 SVGA3dCmdDXClearRenderTargetView body;
2629 } *cmd = container_of(header, typeof(*cmd), header);
2630
2631 return vmw_view_id_val_add(sw_context, vmw_view_rt,
2632 cmd->body.renderTargetViewId);
2633}
2634
2635/**
2636 * vmw_cmd_dx_clear_rendertarget_view - Validate an
2637 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2638 *
2639 * @dev_priv: Pointer to a device private struct.
2640 * @sw_context: The software context being used for this batch.
2641 * @header: Pointer to the command header in the command stream.
2642 */
2643static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2644 struct vmw_sw_context *sw_context,
2645 SVGA3dCmdHeader *header)
2646{
2647 struct {
2648 SVGA3dCmdHeader header;
2649 SVGA3dCmdDXClearDepthStencilView body;
2650 } *cmd = container_of(header, typeof(*cmd), header);
2651
2652 return vmw_view_id_val_add(sw_context, vmw_view_ds,
2653 cmd->body.depthStencilViewId);
2654}
2655
2656static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2657 struct vmw_sw_context *sw_context,
2658 SVGA3dCmdHeader *header)
2659{
2660 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2661 struct vmw_resource_val_node *srf_node;
2662 struct vmw_resource *res;
2663 enum vmw_view_type view_type;
2664 int ret;
2665 /*
2666 * This is based on the fact that all affected define commands have
2667 * the same initial command body layout.
2668 */
2669 struct {
2670 SVGA3dCmdHeader header;
2671 uint32 defined_id;
2672 uint32 sid;
2673 } *cmd;
2674
2675 if (unlikely(ctx_node == NULL)) {
2676 DRM_ERROR("DX Context not set.\n");
2677 return -EINVAL;
2678 }
2679
2680 view_type = vmw_view_cmd_to_type(header->id);
2681 cmd = container_of(header, typeof(*cmd), header);
2682 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2683 user_surface_converter,
2684 &cmd->sid, &srf_node);
2685 if (unlikely(ret != 0))
2686 return ret;
2687
2688 res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]);
2689 ret = vmw_cotable_notify(res, cmd->defined_id);
2690 vmw_resource_unreference(&res);
2691 if (unlikely(ret != 0))
2692 return ret;
2693
2694 return vmw_view_add(sw_context->man,
2695 ctx_node->res,
2696 srf_node->res,
2697 view_type,
2698 cmd->defined_id,
2699 header,
2700 header->size + sizeof(*header),
2701 &sw_context->staged_cmd_res);
2702}
2703
2704/**
2705 * vmw_cmd_dx_set_so_targets - Validate an
2706 * SVGA_3D_CMD_DX_SET_SOTARGETS command.
2707 *
2708 * @dev_priv: Pointer to a device private struct.
2709 * @sw_context: The software context being used for this batch.
2710 * @header: Pointer to the command header in the command stream.
2711 */
2712static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2713 struct vmw_sw_context *sw_context,
2714 SVGA3dCmdHeader *header)
2715{
2716 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2717 struct vmw_ctx_bindinfo_so binding;
2718 struct vmw_resource_val_node *res_node;
2719 struct {
2720 SVGA3dCmdHeader header;
2721 SVGA3dCmdDXSetSOTargets body;
2722 SVGA3dSoTarget targets[];
2723 } *cmd;
2724 int i, ret, num;
2725
2726 if (unlikely(ctx_node == NULL)) {
2727 DRM_ERROR("DX Context not set.\n");
2728 return -EINVAL;
2729 }
2730
2731 cmd = container_of(header, typeof(*cmd), header);
2732 num = (cmd->header.size - sizeof(cmd->body)) /
2733 sizeof(SVGA3dSoTarget);
2734
2735 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2736 DRM_ERROR("Invalid DX SO binding.\n");
2737 return -EINVAL;
2738 }
2739
2740 for (i = 0; i < num; i++) {
2741 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2742 user_surface_converter,
2743 &cmd->targets[i].sid, &res_node);
2744 if (unlikely(ret != 0))
2745 return ret;
2746
2747 binding.bi.ctx = ctx_node->res;
2748 binding.bi.res = ((res_node) ? res_node->res : NULL);
2749 binding.bi.bt = vmw_ctx_binding_so,
2750 binding.offset = cmd->targets[i].offset;
2751 binding.size = cmd->targets[i].sizeInBytes;
2752 binding.slot = i;
2753
2754 vmw_binding_add(ctx_node->staged_bindings, &binding.bi,
2755 0, binding.slot);
2756 }
2757
2758 return 0;
2759}
2760
2761static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2762 struct vmw_sw_context *sw_context,
2763 SVGA3dCmdHeader *header)
2764{
2765 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2766 struct vmw_resource *res;
2767 /*
2768 * This is based on the fact that all affected define commands have
2769 * the same initial command body layout.
2770 */
2771 struct {
2772 SVGA3dCmdHeader header;
2773 uint32 defined_id;
2774 } *cmd;
2775 enum vmw_so_type so_type;
2776 int ret;
2777
2778 if (unlikely(ctx_node == NULL)) {
2779 DRM_ERROR("DX Context not set.\n");
2780 return -EINVAL;
2781 }
2782
2783 so_type = vmw_so_cmd_to_type(header->id);
2784 res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]);
2785 cmd = container_of(header, typeof(*cmd), header);
2786 ret = vmw_cotable_notify(res, cmd->defined_id);
2787 vmw_resource_unreference(&res);
2788
2789 return ret;
2790}
2791
2792/**
2793 * vmw_cmd_dx_check_subresource - Validate an
2794 * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
2795 *
2796 * @dev_priv: Pointer to a device private struct.
2797 * @sw_context: The software context being used for this batch.
2798 * @header: Pointer to the command header in the command stream.
2799 */
2800static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2801 struct vmw_sw_context *sw_context,
2802 SVGA3dCmdHeader *header)
2803{
2804 struct {
2805 SVGA3dCmdHeader header;
2806 union {
2807 SVGA3dCmdDXReadbackSubResource r_body;
2808 SVGA3dCmdDXInvalidateSubResource i_body;
2809 SVGA3dCmdDXUpdateSubResource u_body;
2810 SVGA3dSurfaceId sid;
2811 };
2812 } *cmd;
2813
2814 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2815 offsetof(typeof(*cmd), sid));
2816 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2817 offsetof(typeof(*cmd), sid));
2818 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2819 offsetof(typeof(*cmd), sid));
2820
2821 cmd = container_of(header, typeof(*cmd), header);
2822
2823 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2824 user_surface_converter,
2825 &cmd->sid, NULL);
2826}
2827
2828static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2829 struct vmw_sw_context *sw_context,
2830 SVGA3dCmdHeader *header)
2831{
2832 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2833
2834 if (unlikely(ctx_node == NULL)) {
2835 DRM_ERROR("DX Context not set.\n");
2836 return -EINVAL;
2837 }
2838
2839 return 0;
2840}
2841
2842/**
2843 * vmw_cmd_dx_view_remove - validate a view remove command and
2844 * schedule the view resource for removal.
2845 *
2846 * @dev_priv: Pointer to a device private struct.
2847 * @sw_context: The software context being used for this batch.
2848 * @header: Pointer to the command header in the command stream.
2849 *
2850 * Check that the view exists, and if it was not created using this
2851 * command batch, make sure it's validated (present in the device) so that
2852 * the remove command will not confuse the device.
2853 */
2854static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2855 struct vmw_sw_context *sw_context,
2856 SVGA3dCmdHeader *header)
2857{
2858 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2859 struct {
2860 SVGA3dCmdHeader header;
2861 union vmw_view_destroy body;
2862 } *cmd = container_of(header, typeof(*cmd), header);
2863 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2864 struct vmw_resource *view;
2865 int ret;
2866
2867 if (!ctx_node) {
2868 DRM_ERROR("DX Context not set.\n");
2869 return -EINVAL;
2870 }
2871
2872 ret = vmw_view_remove(sw_context->man,
2873 cmd->body.view_id, view_type,
2874 &sw_context->staged_cmd_res,
2875 &view);
2876 if (ret || !view)
2877 return ret;
2878
2879 /*
2880 * Add view to the validate list iff it was not created using this
2881 * command batch.
2882 */
2883 return vmw_view_res_val_add(sw_context, view);
2884}
2885
2886/**
2887 * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
2888 * command
2889 *
2890 * @dev_priv: Pointer to a device private struct.
2891 * @sw_context: The software context being used for this batch.
2892 * @header: Pointer to the command header in the command stream.
2893 */
2894static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2895 struct vmw_sw_context *sw_context,
2896 SVGA3dCmdHeader *header)
2897{
2898 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2899 struct vmw_resource *res;
2900 struct {
2901 SVGA3dCmdHeader header;
2902 SVGA3dCmdDXDefineShader body;
2903 } *cmd = container_of(header, typeof(*cmd), header);
2904 int ret;
2905
2906 if (!ctx_node) {
2907 DRM_ERROR("DX Context not set.\n");
2908 return -EINVAL;
2909 }
2910
2911 res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER);
2912 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2913 vmw_resource_unreference(&res);
2914 if (ret)
2915 return ret;
2916
2917 return vmw_dx_shader_add(sw_context->man, ctx_node->res,
2918 cmd->body.shaderId, cmd->body.type,
2919 &sw_context->staged_cmd_res);
2920}
2921
2922/**
2923 * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
2924 * command
2925 *
2926 * @dev_priv: Pointer to a device private struct.
2927 * @sw_context: The software context being used for this batch.
2928 * @header: Pointer to the command header in the command stream.
2929 */
2930static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2931 struct vmw_sw_context *sw_context,
2932 SVGA3dCmdHeader *header)
2933{
2934 struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node;
2935 struct {
2936 SVGA3dCmdHeader header;
2937 SVGA3dCmdDXDestroyShader body;
2938 } *cmd = container_of(header, typeof(*cmd), header);
2939 int ret;
2940
2941 if (!ctx_node) {
2942 DRM_ERROR("DX Context not set.\n");
2943 return -EINVAL;
2944 }
2945
2946 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2947 &sw_context->staged_cmd_res);
2948 if (ret)
2949 DRM_ERROR("Could not find shader to remove.\n");
2950
2951 return ret;
2952}
2953
2954/**
2955 * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
2956 * command
2957 *
2958 * @dev_priv: Pointer to a device private struct.
2959 * @sw_context: The software context being used for this batch.
2960 * @header: Pointer to the command header in the command stream.
2961 */
2962static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2963 struct vmw_sw_context *sw_context,
2964 SVGA3dCmdHeader *header)
2965{
2966 struct vmw_resource_val_node *ctx_node;
2967 struct vmw_resource_val_node *res_node;
2968 struct vmw_resource *res;
2969 struct {
2970 SVGA3dCmdHeader header;
2971 SVGA3dCmdDXBindShader body;
2972 } *cmd = container_of(header, typeof(*cmd), header);
2973 int ret;
2974
2975 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2976 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2977 user_context_converter,
2978 &cmd->body.cid, &ctx_node);
2979 if (ret)
2980 return ret;
2981 } else {
2982 ctx_node = sw_context->dx_ctx_node;
2983 if (!ctx_node) {
2984 DRM_ERROR("DX Context not set.\n");
2985 return -EINVAL;
2986 }
2987 }
2988
2989 res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res),
2990 cmd->body.shid, 0);
2991 if (IS_ERR(res)) {
2992 DRM_ERROR("Could not find shader to bind.\n");
2993 return PTR_ERR(res);
2994 }
2995
2996 ret = vmw_resource_val_add(sw_context, res, &res_node);
2997 if (ret) {
2998 DRM_ERROR("Error creating resource validation node.\n");
2999 goto out_unref;
3000 }
3001
3002
3003 ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node,
3004 &cmd->body.mobid,
3005 cmd->body.offsetInBytes);
3006out_unref:
3007 vmw_resource_unreference(&res);
3008
3009 return ret;
3010}
3011
3012/**
3013 * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
3014 *
3015 * @dev_priv: Pointer to a device private struct.
3016 * @sw_context: The software context being used for this batch.
3017 * @header: Pointer to the command header in the command stream.
3018 */
3019static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
3020 struct vmw_sw_context *sw_context,
3021 SVGA3dCmdHeader *header)
3022{
3023 struct {
3024 SVGA3dCmdHeader header;
3025 SVGA3dCmdDXGenMips body;
3026 } *cmd = container_of(header, typeof(*cmd), header);
3027
3028 return vmw_view_id_val_add(sw_context, vmw_view_sr,
3029 cmd->body.shaderResourceViewId);
3030}
3031
3032static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3033 struct vmw_sw_context *sw_context,
3034 void *buf, uint32_t *size)
3035{
3036 uint32_t size_remaining = *size;
3037 uint32_t cmd_id;
3038
3039 cmd_id = ((uint32_t *)buf)[0];
3040 switch (cmd_id) {
3041 case SVGA_CMD_UPDATE:
3042 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3043 break;
3044 case SVGA_CMD_DEFINE_GMRFB:
3045 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3046 break;
3047 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3048 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3049 break;
3050 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3051 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3052 break;
3053 default:
3054 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
3055 return -EINVAL;
3056 }
3057
3058 if (*size > size_remaining) {
3059 DRM_ERROR("Invalid SVGA command (size mismatch):"
3060 " %u.\n", cmd_id);
3061 return -EINVAL;
3062 }
3063
3064 if (unlikely(!sw_context->kernel)) {
3065 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
3066 return -EPERM;
3067 }
3068
3069 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3070 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3071
3072 return 0;
3073}
3074
3075static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3076 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3077 false, false, false),
3078 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3079 false, false, false),
3080 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3081 true, false, false),
3082 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3083 true, false, false),
3084 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3085 true, false, false),
3086 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3087 false, false, false),
3088 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3089 false, false, false),
3090 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3091 true, false, false),
3092 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3093 true, false, false),
3094 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3095 true, false, false),
3096 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3097 &vmw_cmd_set_render_target_check, true, false, false),
3098 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3099 true, false, false),
3100 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3101 true, false, false),
3102 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3103 true, false, false),
3104 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3105 true, false, false),
3106 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3107 true, false, false),
3108 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3109 true, false, false),
3110 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3111 true, false, false),
3112 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3113 false, false, false),
3114 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3115 true, false, false),
3116 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3117 true, false, false),
3118 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3119 true, false, false),
3120 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3121 true, false, false),
3122 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3123 true, false, false),
3124 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3125 true, false, false),
3126 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3127 true, false, false),
3128 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3129 true, false, false),
3130 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3131 true, false, false),
3132 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3133 true, false, false),
3134 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3135 &vmw_cmd_blt_surf_screen_check, false, false, false),
3136 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3137 false, false, false),
3138 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3139 false, false, false),
3140 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3141 false, false, false),
3142 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3143 false, false, false),
3144 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3145 false, false, false),
3146 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
3147 false, false, false),
3148 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
3149 false, false, false),
3150 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
3151 false, false, false),
3152 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
3153 false, false, false),
3154 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
3155 false, false, false),
3156 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
3157 false, false, false),
3158 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
3159 false, false, false),
3160 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
3161 false, false, false),
3162 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3163 false, false, true),
3164 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3165 false, false, true),
3166 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3167 false, false, true),
3168 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3169 false, false, true),
3170 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3171 false, false, true),
3172 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3173 false, false, true),
3174 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3175 false, false, true),
3176 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3177 false, false, true),
3178 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3179 true, false, true),
3180 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3181 false, false, true),
3182 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3183 true, false, true),
3184 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3185 &vmw_cmd_update_gb_surface, true, false, true),
3186 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3187 &vmw_cmd_readback_gb_image, true, false, true),
3188 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3189 &vmw_cmd_readback_gb_surface, true, false, true),
3190 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3191 &vmw_cmd_invalidate_gb_image, true, false, true),
3192 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3193 &vmw_cmd_invalidate_gb_surface, true, false, true),
3194 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3195 false, false, true),
3196 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3197 false, false, true),
3198 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3199 false, false, true),
3200 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3201 false, false, true),
3202 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3203 false, false, true),
3204 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3205 false, false, true),
3206 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3207 true, false, true),
3208 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3209 false, false, true),
3210 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3211 false, false, false),
3212 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3213 true, false, true),
3214 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3215 true, false, true),
3216 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3217 true, false, true),
3218 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3219 true, false, true),
3220 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3221 false, false, true),
3222 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3223 false, false, true),
3224 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3225 false, false, true),
3226 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3227 false, false, true),
3228 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3229 false, false, true),
3230 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3231 false, false, true),
3232 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3233 false, false, true),
3234 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3235 false, false, true),
3236 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3237 false, false, true),
3238 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3239 false, false, true),
3240 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3241 true, false, true),
3242 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3243 false, false, true),
3244 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3245 false, false, true),
3246 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3247 false, false, true),
3248 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3249 false, false, true),
3250
3251 /*
3252 * DX commands
3253 */
3254 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3255 false, false, true),
3256 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3257 false, false, true),
3258 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3259 false, false, true),
3260 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3261 false, false, true),
3262 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3263 false, false, true),
3264 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3265 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3266 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3267 &vmw_cmd_dx_set_shader_res, true, false, true),
3268 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3269 true, false, true),
3270 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3271 true, false, true),
3272 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3273 true, false, true),
3274 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3275 true, false, true),
3276 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3277 true, false, true),
3278 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3279 &vmw_cmd_dx_cid_check, true, false, true),
3280 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3281 true, false, true),
3282 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3283 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3284 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3285 &vmw_cmd_dx_set_index_buffer, true, false, true),
3286 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3287 &vmw_cmd_dx_set_rendertargets, true, false, true),
3288 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3289 true, false, true),
3290 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3291 &vmw_cmd_dx_cid_check, true, false, true),
3292 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3293 &vmw_cmd_dx_cid_check, true, false, true),
3294 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3295 true, false, true),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3297 true, false, true),
3298 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3299 true, false, true),
3300 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3301 &vmw_cmd_dx_cid_check, true, false, true),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3303 true, false, true),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3305 true, false, true),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3307 true, false, true),
3308 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3309 true, false, true),
3310 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3311 true, false, true),
3312 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3313 true, false, true),
3314 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3315 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3317 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3319 true, false, true),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3321 true, false, true),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3323 &vmw_cmd_dx_check_subresource, true, false, true),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3325 &vmw_cmd_dx_check_subresource, true, false, true),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3327 &vmw_cmd_dx_check_subresource, true, false, true),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3329 &vmw_cmd_dx_view_define, true, false, true),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3331 &vmw_cmd_dx_view_remove, true, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3333 &vmw_cmd_dx_view_define, true, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3335 &vmw_cmd_dx_view_remove, true, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3337 &vmw_cmd_dx_view_define, true, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3339 &vmw_cmd_dx_view_remove, true, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3341 &vmw_cmd_dx_so_define, true, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3343 &vmw_cmd_dx_cid_check, true, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3345 &vmw_cmd_dx_so_define, true, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3347 &vmw_cmd_dx_cid_check, true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3349 &vmw_cmd_dx_so_define, true, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3351 &vmw_cmd_dx_cid_check, true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3353 &vmw_cmd_dx_so_define, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3355 &vmw_cmd_dx_cid_check, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3357 &vmw_cmd_dx_so_define, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3359 &vmw_cmd_dx_cid_check, true, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3361 &vmw_cmd_dx_define_shader, true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3363 &vmw_cmd_dx_destroy_shader, true, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3365 &vmw_cmd_dx_bind_shader, true, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3367 &vmw_cmd_dx_so_define, true, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3369 &vmw_cmd_dx_cid_check, true, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3371 true, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3373 &vmw_cmd_dx_set_so_targets, true, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3375 &vmw_cmd_dx_cid_check, true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3377 &vmw_cmd_dx_cid_check, true, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3379 &vmw_cmd_buffer_copy_check, true, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3381 &vmw_cmd_pred_copy_check, true, false, true),
3382};
3383
3384static int vmw_cmd_check(struct vmw_private *dev_priv,
3385 struct vmw_sw_context *sw_context,
3386 void *buf, uint32_t *size)
3387{
3388 uint32_t cmd_id;
3389 uint32_t size_remaining = *size;
3390 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3391 int ret;
3392 const struct vmw_cmd_entry *entry;
3393 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3394
3395 cmd_id = ((uint32_t *)buf)[0];
3396 /* Handle any none 3D commands */
3397 if (unlikely(cmd_id < SVGA_CMD_MAX))
3398 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3399
3400
3401 cmd_id = header->id;
3402 *size = header->size + sizeof(SVGA3dCmdHeader);
3403
3404 cmd_id -= SVGA_3D_CMD_BASE;
3405 if (unlikely(*size > size_remaining))
3406 goto out_invalid;
3407
3408 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3409 goto out_invalid;
3410
3411 entry = &vmw_cmd_entries[cmd_id];
3412 if (unlikely(!entry->func))
3413 goto out_invalid;
3414
3415 if (unlikely(!entry->user_allow && !sw_context->kernel))
3416 goto out_privileged;
3417
3418 if (unlikely(entry->gb_disable && gb))
3419 goto out_old;
3420
3421 if (unlikely(entry->gb_enable && !gb))
3422 goto out_new;
3423
3424 ret = entry->func(dev_priv, sw_context, header);
3425 if (unlikely(ret != 0))
3426 goto out_invalid;
3427
3428 return 0;
3429out_invalid:
3430 DRM_ERROR("Invalid SVGA3D command: %d\n",
3431 cmd_id + SVGA_3D_CMD_BASE);
3432 return -EINVAL;
3433out_privileged:
3434 DRM_ERROR("Privileged SVGA3D command: %d\n",
3435 cmd_id + SVGA_3D_CMD_BASE);
3436 return -EPERM;
3437out_old:
3438 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
3439 cmd_id + SVGA_3D_CMD_BASE);
3440 return -EINVAL;
3441out_new:
3442 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
3443 cmd_id + SVGA_3D_CMD_BASE);
3444 return -EINVAL;
3445}
3446
3447static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3448 struct vmw_sw_context *sw_context,
3449 void *buf,
3450 uint32_t size)
3451{
3452 int32_t cur_size = size;
3453 int ret;
3454
3455 sw_context->buf_start = buf;
3456
3457 while (cur_size > 0) {
3458 size = cur_size;
3459 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3460 if (unlikely(ret != 0))
3461 return ret;
3462 buf = (void *)((unsigned long) buf + size);
3463 cur_size -= size;
3464 }
3465
3466 if (unlikely(cur_size != 0)) {
3467 DRM_ERROR("Command verifier out of sync.\n");
3468 return -EINVAL;
3469 }
3470
3471 return 0;
3472}
3473
3474static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3475{
3476 sw_context->cur_reloc = 0;
3477}
3478
3479static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3480{
3481 uint32_t i;
3482 struct vmw_relocation *reloc;
3483 struct ttm_validate_buffer *validate;
3484 struct ttm_buffer_object *bo;
3485
3486 for (i = 0; i < sw_context->cur_reloc; ++i) {
3487 reloc = &sw_context->relocs[i];
3488 validate = &sw_context->val_bufs[reloc->index].base;
3489 bo = validate->bo;
3490 switch (bo->mem.mem_type) {
3491 case TTM_PL_VRAM:
3492 reloc->location->offset += bo->offset;
3493 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3494 break;
3495 case VMW_PL_GMR:
3496 reloc->location->gmrId = bo->mem.start;
3497 break;
3498 case VMW_PL_MOB:
3499 *reloc->mob_loc = bo->mem.start;
3500 break;
3501 default:
3502 BUG();
3503 }
3504 }
3505 vmw_free_relocations(sw_context);
3506}
3507
3508/**
3509 * vmw_resource_list_unrefererence - Free up a resource list and unreference
3510 * all resources referenced by it.
3511 *
3512 * @list: The resource list.
3513 */
3514static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context,
3515 struct list_head *list)
3516{
3517 struct vmw_resource_val_node *val, *val_next;
3518
3519 /*
3520 * Drop references to resources held during command submission.
3521 */
3522
3523 list_for_each_entry_safe(val, val_next, list, head) {
3524 list_del_init(&val->head);
3525 vmw_resource_unreference(&val->res);
3526
3527 if (val->staged_bindings) {
3528 if (val->staged_bindings != sw_context->staged_bindings)
3529 vmw_binding_state_free(val->staged_bindings);
3530 else
3531 sw_context->staged_bindings_inuse = false;
3532 val->staged_bindings = NULL;
3533 }
3534
3535 kfree(val);
3536 }
3537}
3538
3539static void vmw_clear_validations(struct vmw_sw_context *sw_context)
3540{
3541 struct vmw_validate_buffer *entry, *next;
3542 struct vmw_resource_val_node *val;
3543
3544 /*
3545 * Drop references to DMA buffers held during command submission.
3546 */
3547 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
3548 base.head) {
3549 list_del(&entry->base.head);
3550 ttm_bo_unref(&entry->base.bo);
3551 (void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
3552 sw_context->cur_val_buf--;
3553 }
3554 BUG_ON(sw_context->cur_val_buf != 0);
3555
3556 list_for_each_entry(val, &sw_context->resource_list, head)
3557 (void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
3558}
3559
3560int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3561 struct ttm_buffer_object *bo,
3562 bool interruptible,
3563 bool validate_as_mob)
3564{
3565 struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
3566 base);
3567 int ret;
3568
3569 if (vbo->pin_count > 0)
3570 return 0;
3571
3572 if (validate_as_mob)
3573 return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
3574 false);
3575
3576 /**
3577 * Put BO in VRAM if there is space, otherwise as a GMR.
3578 * If there is no space in VRAM and GMR ids are all used up,
3579 * start evicting GMRs to make room. If the DMA buffer can't be
3580 * used as a GMR, this will return -ENOMEM.
3581 */
3582
3583 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
3584 false);
3585 if (likely(ret == 0 || ret == -ERESTARTSYS))
3586 return ret;
3587
3588 /**
3589 * If that failed, try VRAM again, this time evicting
3590 * previous contents.
3591 */
3592
3593 ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
3594 return ret;
3595}
3596
3597static int vmw_validate_buffers(struct vmw_private *dev_priv,
3598 struct vmw_sw_context *sw_context)
3599{
3600 struct vmw_validate_buffer *entry;
3601 int ret;
3602
3603 list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
3604 ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
3605 true,
3606 entry->validate_as_mob);
3607 if (unlikely(ret != 0))
3608 return ret;
3609 }
3610 return 0;
3611}
3612
3613static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3614 uint32_t size)
3615{
3616 if (likely(sw_context->cmd_bounce_size >= size))
3617 return 0;
3618
3619 if (sw_context->cmd_bounce_size == 0)
3620 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3621
3622 while (sw_context->cmd_bounce_size < size) {
3623 sw_context->cmd_bounce_size =
3624 PAGE_ALIGN(sw_context->cmd_bounce_size +
3625 (sw_context->cmd_bounce_size >> 1));
3626 }
3627
3628 if (sw_context->cmd_bounce != NULL)
3629 vfree(sw_context->cmd_bounce);
3630
3631 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3632
3633 if (sw_context->cmd_bounce == NULL) {
3634 DRM_ERROR("Failed to allocate command bounce buffer.\n");
3635 sw_context->cmd_bounce_size = 0;
3636 return -ENOMEM;
3637 }
3638
3639 return 0;
3640}
3641
3642/**
3643 * vmw_execbuf_fence_commands - create and submit a command stream fence
3644 *
3645 * Creates a fence object and submits a command stream marker.
3646 * If this fails for some reason, We sync the fifo and return NULL.
3647 * It is then safe to fence buffers with a NULL pointer.
3648 *
3649 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
3650 * a userspace handle if @p_handle is not NULL, otherwise not.
3651 */
3652
3653int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3654 struct vmw_private *dev_priv,
3655 struct vmw_fence_obj **p_fence,
3656 uint32_t *p_handle)
3657{
3658 uint32_t sequence;
3659 int ret;
3660 bool synced = false;
3661
3662 /* p_handle implies file_priv. */
3663 BUG_ON(p_handle != NULL && file_priv == NULL);
3664
3665 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3666 if (unlikely(ret != 0)) {
3667 DRM_ERROR("Fence submission error. Syncing.\n");
3668 synced = true;
3669 }
3670
3671 if (p_handle != NULL)
3672 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3673 sequence, p_fence, p_handle);
3674 else
3675 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3676
3677 if (unlikely(ret != 0 && !synced)) {
3678 (void) vmw_fallback_wait(dev_priv, false, false,
3679 sequence, false,
3680 VMW_FENCE_WAIT_TIMEOUT);
3681 *p_fence = NULL;
3682 }
3683
3684 return 0;
3685}
3686
3687/**
3688 * vmw_execbuf_copy_fence_user - copy fence object information to
3689 * user-space.
3690 *
3691 * @dev_priv: Pointer to a vmw_private struct.
3692 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3693 * @ret: Return value from fence object creation.
3694 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
3695 * which the information should be copied.
3696 * @fence: Pointer to the fenc object.
3697 * @fence_handle: User-space fence handle.
3698 *
3699 * This function copies fence information to user-space. If copying fails,
3700 * The user-space struct drm_vmw_fence_rep::error member is hopefully
3701 * left untouched, and if it's preloaded with an -EFAULT by user-space,
3702 * the error will hopefully be detected.
3703 * Also if copying fails, user-space will be unable to signal the fence
3704 * object so we wait for it immediately, and then unreference the
3705 * user-space reference.
3706 */
3707void
3708vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3709 struct vmw_fpriv *vmw_fp,
3710 int ret,
3711 struct drm_vmw_fence_rep __user *user_fence_rep,
3712 struct vmw_fence_obj *fence,
3713 uint32_t fence_handle)
3714{
3715 struct drm_vmw_fence_rep fence_rep;
3716
3717 if (user_fence_rep == NULL)
3718 return;
3719
3720 memset(&fence_rep, 0, sizeof(fence_rep));
3721
3722 fence_rep.error = ret;
3723 if (ret == 0) {
3724 BUG_ON(fence == NULL);
3725
3726 fence_rep.handle = fence_handle;
3727 fence_rep.seqno = fence->base.seqno;
3728 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3729 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3730 }
3731
3732 /*
3733 * copy_to_user errors will be detected by user space not
3734 * seeing fence_rep::error filled in. Typically
3735 * user-space would have pre-set that member to -EFAULT.
3736 */
3737 ret = copy_to_user(user_fence_rep, &fence_rep,
3738 sizeof(fence_rep));
3739
3740 /*
3741 * User-space lost the fence object. We need to sync
3742 * and unreference the handle.
3743 */
3744 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3745 ttm_ref_object_base_unref(vmw_fp->tfile,
3746 fence_handle, TTM_REF_USAGE);
3747 DRM_ERROR("Fence copy error. Syncing.\n");
3748 (void) vmw_fence_obj_wait(fence, false, false,
3749 VMW_FENCE_WAIT_TIMEOUT);
3750 }
3751}
3752
3753/**
3754 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
3755 * the fifo.
3756 *
3757 * @dev_priv: Pointer to a device private structure.
3758 * @kernel_commands: Pointer to the unpatched command batch.
3759 * @command_size: Size of the unpatched command batch.
3760 * @sw_context: Structure holding the relocation lists.
3761 *
3762 * Side effects: If this function returns 0, then the command batch
3763 * pointed to by @kernel_commands will have been modified.
3764 */
3765static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3766 void *kernel_commands,
3767 u32 command_size,
3768 struct vmw_sw_context *sw_context)
3769{
3770 void *cmd;
3771
3772 if (sw_context->dx_ctx_node)
3773 cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
3774 sw_context->dx_ctx_node->res->id);
3775 else
3776 cmd = vmw_fifo_reserve(dev_priv, command_size);
3777 if (!cmd) {
3778 DRM_ERROR("Failed reserving fifo space for commands.\n");
3779 return -ENOMEM;
3780 }
3781
3782 vmw_apply_relocations(sw_context);
3783 memcpy(cmd, kernel_commands, command_size);
3784 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3785 vmw_resource_relocations_free(&sw_context->res_relocations);
3786 vmw_fifo_commit(dev_priv, command_size);
3787
3788 return 0;
3789}
3790
3791/**
3792 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
3793 * the command buffer manager.
3794 *
3795 * @dev_priv: Pointer to a device private structure.
3796 * @header: Opaque handle to the command buffer allocation.
3797 * @command_size: Size of the unpatched command batch.
3798 * @sw_context: Structure holding the relocation lists.
3799 *
3800 * Side effects: If this function returns 0, then the command buffer
3801 * represented by @header will have been modified.
3802 */
3803static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3804 struct vmw_cmdbuf_header *header,
3805 u32 command_size,
3806 struct vmw_sw_context *sw_context)
3807{
3808 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id :
3809 SVGA3D_INVALID_ID);
3810 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
3811 id, false, header);
3812
3813 vmw_apply_relocations(sw_context);
3814 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3815 vmw_resource_relocations_free(&sw_context->res_relocations);
3816 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3817
3818 return 0;
3819}
3820
3821/**
3822 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3823 * submission using a command buffer.
3824 *
3825 * @dev_priv: Pointer to a device private structure.
3826 * @user_commands: User-space pointer to the commands to be submitted.
3827 * @command_size: Size of the unpatched command batch.
3828 * @header: Out parameter returning the opaque pointer to the command buffer.
3829 *
3830 * This function checks whether we can use the command buffer manager for
3831 * submission and if so, creates a command buffer of suitable size and
3832 * copies the user data into that buffer.
3833 *
3834 * On successful return, the function returns a pointer to the data in the
3835 * command buffer and *@header is set to non-NULL.
3836 * If command buffers could not be used, the function will return the value
3837 * of @kernel_commands on function call. That value may be NULL. In that case,
3838 * the value of *@header will be set to NULL.
3839 * If an error is encountered, the function will return a pointer error value.
3840 * If the function is interrupted by a signal while sleeping, it will return
3841 * -ERESTARTSYS casted to a pointer error value.
3842 */
3843static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3844 void __user *user_commands,
3845 void *kernel_commands,
3846 u32 command_size,
3847 struct vmw_cmdbuf_header **header)
3848{
3849 size_t cmdbuf_size;
3850 int ret;
3851
3852 *header = NULL;
3853 if (!dev_priv->cman || kernel_commands)
3854 return kernel_commands;
3855
3856 if (command_size > SVGA_CB_MAX_SIZE) {
3857 DRM_ERROR("Command buffer is too large.\n");
3858 return ERR_PTR(-EINVAL);
3859 }
3860
3861 /* If possible, add a little space for fencing. */
3862 cmdbuf_size = command_size + 512;
3863 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3864 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
3865 true, header);
3866 if (IS_ERR(kernel_commands))
3867 return kernel_commands;
3868
3869 ret = copy_from_user(kernel_commands, user_commands,
3870 command_size);
3871 if (ret) {
3872 DRM_ERROR("Failed copying commands.\n");
3873 vmw_cmdbuf_header_free(*header);
3874 *header = NULL;
3875 return ERR_PTR(-EFAULT);
3876 }
3877
3878 return kernel_commands;
3879}
3880
3881static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3882 struct vmw_sw_context *sw_context,
3883 uint32_t handle)
3884{
3885 struct vmw_resource_val_node *ctx_node;
3886 struct vmw_resource *res;
3887 int ret;
3888
3889 if (handle == SVGA3D_INVALID_ID)
3890 return 0;
3891
3892 ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile,
3893 handle, user_context_converter,
3894 &res);
3895 if (unlikely(ret != 0)) {
3896 DRM_ERROR("Could not find or user DX context 0x%08x.\n",
3897 (unsigned) handle);
3898 return ret;
3899 }
3900
3901 ret = vmw_resource_val_add(sw_context, res, &ctx_node);
3902 if (unlikely(ret != 0))
3903 goto out_err;
3904
3905 sw_context->dx_ctx_node = ctx_node;
3906 sw_context->man = vmw_context_res_man(res);
3907out_err:
3908 vmw_resource_unreference(&res);
3909 return ret;
3910}
3911
3912int vmw_execbuf_process(struct drm_file *file_priv,
3913 struct vmw_private *dev_priv,
3914 void __user *user_commands,
3915 void *kernel_commands,
3916 uint32_t command_size,
3917 uint64_t throttle_us,
3918 uint32_t dx_context_handle,
3919 struct drm_vmw_fence_rep __user *user_fence_rep,
3920 struct vmw_fence_obj **out_fence)
3921{
3922 struct vmw_sw_context *sw_context = &dev_priv->ctx;
3923 struct vmw_fence_obj *fence = NULL;
3924 struct vmw_resource *error_resource;
3925 struct list_head resource_list;
3926 struct vmw_cmdbuf_header *header;
3927 struct ww_acquire_ctx ticket;
3928 uint32_t handle;
3929 int ret;
3930
3931 if (throttle_us) {
3932 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3933 throttle_us);
3934
3935 if (ret)
3936 return ret;
3937 }
3938
3939 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3940 kernel_commands, command_size,
3941 &header);
3942 if (IS_ERR(kernel_commands))
3943 return PTR_ERR(kernel_commands);
3944
3945 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3946 if (ret) {
3947 ret = -ERESTARTSYS;
3948 goto out_free_header;
3949 }
3950
3951 sw_context->kernel = false;
3952 if (kernel_commands == NULL) {
3953 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3954 if (unlikely(ret != 0))
3955 goto out_unlock;
3956
3957
3958 ret = copy_from_user(sw_context->cmd_bounce,
3959 user_commands, command_size);
3960
3961 if (unlikely(ret != 0)) {
3962 ret = -EFAULT;
3963 DRM_ERROR("Failed copying commands.\n");
3964 goto out_unlock;
3965 }
3966 kernel_commands = sw_context->cmd_bounce;
3967 } else if (!header)
3968 sw_context->kernel = true;
3969
3970 sw_context->fp = vmw_fpriv(file_priv);
3971 sw_context->cur_reloc = 0;
3972 sw_context->cur_val_buf = 0;
3973 INIT_LIST_HEAD(&sw_context->resource_list);
3974 INIT_LIST_HEAD(&sw_context->ctx_resource_list);
3975 sw_context->cur_query_bo = dev_priv->pinned_bo;
3976 sw_context->last_query_ctx = NULL;
3977 sw_context->needs_post_query_barrier = false;
3978 sw_context->dx_ctx_node = NULL;
3979 sw_context->dx_query_mob = NULL;
3980 sw_context->dx_query_ctx = NULL;
3981 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3982 INIT_LIST_HEAD(&sw_context->validate_nodes);
3983 INIT_LIST_HEAD(&sw_context->res_relocations);
3984 if (sw_context->staged_bindings)
3985 vmw_binding_state_reset(sw_context->staged_bindings);
3986
3987 if (!sw_context->res_ht_initialized) {
3988 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3989 if (unlikely(ret != 0))
3990 goto out_unlock;
3991 sw_context->res_ht_initialized = true;
3992 }
3993 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3994 INIT_LIST_HEAD(&resource_list);
3995 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3996 if (unlikely(ret != 0)) {
3997 list_splice_init(&sw_context->ctx_resource_list,
3998 &sw_context->resource_list);
3999 goto out_err_nores;
4000 }
4001
4002 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4003 command_size);
4004 /*
4005 * Merge the resource lists before checking the return status
4006 * from vmd_cmd_check_all so that all the open hashtabs will
4007 * be handled properly even if vmw_cmd_check_all fails.
4008 */
4009 list_splice_init(&sw_context->ctx_resource_list,
4010 &sw_context->resource_list);
4011
4012 if (unlikely(ret != 0))
4013 goto out_err_nores;
4014
4015 ret = vmw_resources_reserve(sw_context);
4016 if (unlikely(ret != 0))
4017 goto out_err_nores;
4018
4019 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
4020 true, NULL);
4021 if (unlikely(ret != 0))
4022 goto out_err_nores;
4023
4024 ret = vmw_validate_buffers(dev_priv, sw_context);
4025 if (unlikely(ret != 0))
4026 goto out_err;
4027
4028 ret = vmw_resources_validate(sw_context);
4029 if (unlikely(ret != 0))
4030 goto out_err;
4031
4032 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4033 if (unlikely(ret != 0)) {
4034 ret = -ERESTARTSYS;
4035 goto out_err;
4036 }
4037
4038 if (dev_priv->has_mob) {
4039 ret = vmw_rebind_contexts(sw_context);
4040 if (unlikely(ret != 0))
4041 goto out_unlock_binding;
4042 }
4043
4044 if (!header) {
4045 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4046 command_size, sw_context);
4047 } else {
4048 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4049 sw_context);
4050 header = NULL;
4051 }
4052 mutex_unlock(&dev_priv->binding_mutex);
4053 if (ret)
4054 goto out_err;
4055
4056 vmw_query_bo_switch_commit(dev_priv, sw_context);
4057 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
4058 &fence,
4059 (user_fence_rep) ? &handle : NULL);
4060 /*
4061 * This error is harmless, because if fence submission fails,
4062 * vmw_fifo_send_fence will sync. The error will be propagated to
4063 * user-space in @fence_rep
4064 */
4065
4066 if (ret != 0)
4067 DRM_ERROR("Fence submission error. Syncing.\n");
4068
4069 vmw_resources_unreserve(sw_context, false);
4070
4071 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
4072 (void *) fence);
4073
4074 if (unlikely(dev_priv->pinned_bo != NULL &&
4075 !dev_priv->query_cid_valid))
4076 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4077
4078 vmw_clear_validations(sw_context);
4079 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4080 user_fence_rep, fence, handle);
4081
4082 /* Don't unreference when handing fence out */
4083 if (unlikely(out_fence != NULL)) {
4084 *out_fence = fence;
4085 fence = NULL;
4086 } else if (likely(fence != NULL)) {
4087 vmw_fence_obj_unreference(&fence);
4088 }
4089
4090 list_splice_init(&sw_context->resource_list, &resource_list);
4091 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4092 mutex_unlock(&dev_priv->cmdbuf_mutex);
4093
4094 /*
4095 * Unreference resources outside of the cmdbuf_mutex to
4096 * avoid deadlocks in resource destruction paths.
4097 */
4098 vmw_resource_list_unreference(sw_context, &resource_list);
4099
4100 return 0;
4101
4102out_unlock_binding:
4103 mutex_unlock(&dev_priv->binding_mutex);
4104out_err:
4105 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
4106out_err_nores:
4107 vmw_resources_unreserve(sw_context, true);
4108 vmw_resource_relocations_free(&sw_context->res_relocations);
4109 vmw_free_relocations(sw_context);
4110 vmw_clear_validations(sw_context);
4111 if (unlikely(dev_priv->pinned_bo != NULL &&
4112 !dev_priv->query_cid_valid))
4113 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4114out_unlock:
4115 list_splice_init(&sw_context->resource_list, &resource_list);
4116 error_resource = sw_context->error_resource;
4117 sw_context->error_resource = NULL;
4118 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4119 mutex_unlock(&dev_priv->cmdbuf_mutex);
4120
4121 /*
4122 * Unreference resources outside of the cmdbuf_mutex to
4123 * avoid deadlocks in resource destruction paths.
4124 */
4125 vmw_resource_list_unreference(sw_context, &resource_list);
4126 if (unlikely(error_resource != NULL))
4127 vmw_resource_unreference(&error_resource);
4128out_free_header:
4129 if (header)
4130 vmw_cmdbuf_header_free(header);
4131
4132 return ret;
4133}
4134
4135/**
4136 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4137 *
4138 * @dev_priv: The device private structure.
4139 *
4140 * This function is called to idle the fifo and unpin the query buffer
4141 * if the normal way to do this hits an error, which should typically be
4142 * extremely rare.
4143 */
4144static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4145{
4146 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
4147
4148 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4149 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4150 if (dev_priv->dummy_query_bo_pinned) {
4151 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4152 dev_priv->dummy_query_bo_pinned = false;
4153 }
4154}
4155
4156
4157/**
4158 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4159 * query bo.
4160 *
4161 * @dev_priv: The device private structure.
4162 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
4163 * _after_ a query barrier that flushes all queries touching the current
4164 * buffer pointed to by @dev_priv->pinned_bo
4165 *
4166 * This function should be used to unpin the pinned query bo, or
4167 * as a query barrier when we need to make sure that all queries have
4168 * finished before the next fifo command. (For example on hardware
4169 * context destructions where the hardware may otherwise leak unfinished
4170 * queries).
4171 *
4172 * This function does not return any failure codes, but make attempts
4173 * to do safe unpinning in case of errors.
4174 *
4175 * The function will synchronize on the previous query barrier, and will
4176 * thus not finish until that barrier has executed.
4177 *
4178 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
4179 * before calling this function.
4180 */
4181void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4182 struct vmw_fence_obj *fence)
4183{
4184 int ret = 0;
4185 struct list_head validate_list;
4186 struct ttm_validate_buffer pinned_val, query_val;
4187 struct vmw_fence_obj *lfence = NULL;
4188 struct ww_acquire_ctx ticket;
4189
4190 if (dev_priv->pinned_bo == NULL)
4191 goto out_unlock;
4192
4193 INIT_LIST_HEAD(&validate_list);
4194
4195 pinned_val.bo = ttm_bo_reference(&dev_priv->pinned_bo->base);
4196 pinned_val.shared = false;
4197 list_add_tail(&pinned_val.head, &validate_list);
4198
4199 query_val.bo = ttm_bo_reference(&dev_priv->dummy_query_bo->base);
4200 query_val.shared = false;
4201 list_add_tail(&query_val.head, &validate_list);
4202
4203 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
4204 false, NULL);
4205 if (unlikely(ret != 0)) {
4206 vmw_execbuf_unpin_panic(dev_priv);
4207 goto out_no_reserve;
4208 }
4209
4210 if (dev_priv->query_cid_valid) {
4211 BUG_ON(fence != NULL);
4212 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
4213 if (unlikely(ret != 0)) {
4214 vmw_execbuf_unpin_panic(dev_priv);
4215 goto out_no_emit;
4216 }
4217 dev_priv->query_cid_valid = false;
4218 }
4219
4220 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4221 if (dev_priv->dummy_query_bo_pinned) {
4222 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4223 dev_priv->dummy_query_bo_pinned = false;
4224 }
4225 if (fence == NULL) {
4226 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4227 NULL);
4228 fence = lfence;
4229 }
4230 ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
4231 if (lfence != NULL)
4232 vmw_fence_obj_unreference(&lfence);
4233
4234 ttm_bo_unref(&query_val.bo);
4235 ttm_bo_unref(&pinned_val.bo);
4236 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4237 DRM_INFO("Dummy query bo pin count: %d\n",
4238 dev_priv->dummy_query_bo->pin_count);
4239
4240out_unlock:
4241 return;
4242
4243out_no_emit:
4244 ttm_eu_backoff_reservation(&ticket, &validate_list);
4245out_no_reserve:
4246 ttm_bo_unref(&query_val.bo);
4247 ttm_bo_unref(&pinned_val.bo);
4248 vmw_dmabuf_unreference(&dev_priv->pinned_bo);
4249}
4250
4251/**
4252 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
4253 * query bo.
4254 *
4255 * @dev_priv: The device private structure.
4256 *
4257 * This function should be used to unpin the pinned query bo, or
4258 * as a query barrier when we need to make sure that all queries have
4259 * finished before the next fifo command. (For example on hardware
4260 * context destructions where the hardware may otherwise leak unfinished
4261 * queries).
4262 *
4263 * This function does not return any failure codes, but make attempts
4264 * to do safe unpinning in case of errors.
4265 *
4266 * The function will synchronize on the previous query barrier, and will
4267 * thus not finish until that barrier has executed.
4268 */
4269void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4270{
4271 mutex_lock(&dev_priv->cmdbuf_mutex);
4272 if (dev_priv->query_cid_valid)
4273 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4274 mutex_unlock(&dev_priv->cmdbuf_mutex);
4275}
4276
4277int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
4278 struct drm_file *file_priv, size_t size)
4279{
4280 struct vmw_private *dev_priv = vmw_priv(dev);
4281 struct drm_vmw_execbuf_arg arg;
4282 int ret;
4283 static const size_t copy_offset[] = {
4284 offsetof(struct drm_vmw_execbuf_arg, context_handle),
4285 sizeof(struct drm_vmw_execbuf_arg)};
4286
4287 if (unlikely(size < copy_offset[0])) {
4288 DRM_ERROR("Invalid command size, ioctl %d\n",
4289 DRM_VMW_EXECBUF);
4290 return -EINVAL;
4291 }
4292
4293 if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
4294 return -EFAULT;
4295
4296 /*
4297 * Extend the ioctl argument while
4298 * maintaining backwards compatibility:
4299 * We take different code paths depending on the value of
4300 * arg.version.
4301 */
4302
4303 if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
4304 arg.version == 0)) {
4305 DRM_ERROR("Incorrect execbuf version.\n");
4306 return -EINVAL;
4307 }
4308
4309 if (arg.version > 1 &&
4310 copy_from_user(&arg.context_handle,
4311 (void __user *) (data + copy_offset[0]),
4312 copy_offset[arg.version - 1] -
4313 copy_offset[0]) != 0)
4314 return -EFAULT;
4315
4316 switch (arg.version) {
4317 case 1:
4318 arg.context_handle = (uint32_t) -1;
4319 break;
4320 case 2:
4321 if (arg.pad64 != 0) {
4322 DRM_ERROR("Unused IOCTL data not set to zero.\n");
4323 return -EINVAL;
4324 }
4325 break;
4326 default:
4327 break;
4328 }
4329
4330 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4331 if (unlikely(ret != 0))
4332 return ret;
4333
4334 ret = vmw_execbuf_process(file_priv, dev_priv,
4335 (void __user *)(unsigned long)arg.commands,
4336 NULL, arg.command_size, arg.throttle_us,
4337 arg.context_handle,
4338 (void __user *)(unsigned long)arg.fence_rep,
4339 NULL);
4340 ttm_read_unlock(&dev_priv->reservation_sem);
4341 if (unlikely(ret != 0))
4342 return ret;
4343
4344 vmw_kms_cursor_post_execbuf(dev_priv);
4345
4346 return 0;
4347}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include <linux/sync_file.h>
28
29#include "vmwgfx_drv.h"
30#include "vmwgfx_reg.h"
31#include <drm/ttm/ttm_bo_api.h>
32#include <drm/ttm/ttm_placement.h>
33#include "vmwgfx_so.h"
34#include "vmwgfx_binding.h"
35
36#define VMW_RES_HT_ORDER 12
37
38/*
39 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
42 */
43#define VMW_GET_CTX_NODE(__sw_context) \
44({ \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
47 __sw_context->dx_ctx_node; \
48 }); \
49})
50
51#define VMW_DECLARE_CMD_VAR(__var, __type) \
52 struct { \
53 SVGA3dCmdHeader header; \
54 __type body; \
55 } __var
56
57/**
58 * struct vmw_relocation - Buffer object relocation
59 *
60 * @head: List head for the command submission context's relocation list
61 * @vbo: Non ref-counted pointer to buffer object
62 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
64 */
65struct vmw_relocation {
66 struct list_head head;
67 struct vmw_buffer_object *vbo;
68 union {
69 SVGAMobId *mob_loc;
70 SVGAGuestPtr *location;
71 };
72};
73
74/**
75 * enum vmw_resource_relocation_type - Relocation type for resources
76 *
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80 * with a NOP.
81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
83 * @vmw_res_rel_max: Last value in the enum - used for error checking
84*/
85enum vmw_resource_relocation_type {
86 vmw_res_rel_normal,
87 vmw_res_rel_nop,
88 vmw_res_rel_cond_nop,
89 vmw_res_rel_max
90};
91
92/**
93 * struct vmw_resource_relocation - Relocation info for resources
94 *
95 * @head: List head for the software context's relocation list.
96 * @res: Non-ref-counted pointer to the resource.
97 * @offset: Offset of single byte entries into the command buffer where the id
98 * that needs fixup is located.
99 * @rel_type: Type of relocation.
100 */
101struct vmw_resource_relocation {
102 struct list_head head;
103 const struct vmw_resource *res;
104 u32 offset:29;
105 enum vmw_resource_relocation_type rel_type:3;
106};
107
108/**
109 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
110 *
111 * @head: List head of context list
112 * @ctx: The context resource
113 * @cur: The context's persistent binding state
114 * @staged: The binding state changes of this command buffer
115 */
116struct vmw_ctx_validation_info {
117 struct list_head head;
118 struct vmw_resource *ctx;
119 struct vmw_ctx_binding_state *cur;
120 struct vmw_ctx_binding_state *staged;
121};
122
123/**
124 * struct vmw_cmd_entry - Describe a command for the verifier
125 *
126 * @func: Call-back to handle the command.
127 * @user_allow: Whether allowed from the execbuf ioctl.
128 * @gb_disable: Whether disabled if guest-backed objects are available.
129 * @gb_enable: Whether enabled iff guest-backed objects are available.
130 * @cmd_name: Name of the command.
131 */
132struct vmw_cmd_entry {
133 int (*func) (struct vmw_private *, struct vmw_sw_context *,
134 SVGA3dCmdHeader *);
135 bool user_allow;
136 bool gb_disable;
137 bool gb_enable;
138 const char *cmd_name;
139};
140
141#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
142 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
143 (_gb_disable), (_gb_enable), #_cmd}
144
145static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
147 struct vmw_resource *ctx);
148static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
149 struct vmw_sw_context *sw_context,
150 SVGAMobId *id,
151 struct vmw_buffer_object **vmw_bo_p);
152/**
153 * vmw_ptr_diff - Compute the offset from a to b in bytes
154 *
155 * @a: A starting pointer.
156 * @b: A pointer offset in the same address space.
157 *
158 * Returns: The offset in bytes between the two pointers.
159 */
160static size_t vmw_ptr_diff(void *a, void *b)
161{
162 return (unsigned long) b - (unsigned long) a;
163}
164
165/**
166 * vmw_execbuf_bindings_commit - Commit modified binding state
167 *
168 * @sw_context: The command submission context
169 * @backoff: Whether this is part of the error path and binding state changes
170 * should be ignored
171 */
172static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
173 bool backoff)
174{
175 struct vmw_ctx_validation_info *entry;
176
177 list_for_each_entry(entry, &sw_context->ctx_list, head) {
178 if (!backoff)
179 vmw_binding_state_commit(entry->cur, entry->staged);
180
181 if (entry->staged != sw_context->staged_bindings)
182 vmw_binding_state_free(entry->staged);
183 else
184 sw_context->staged_bindings_inuse = false;
185 }
186
187 /* List entries are freed with the validation context */
188 INIT_LIST_HEAD(&sw_context->ctx_list);
189}
190
191/**
192 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
193 *
194 * @sw_context: The command submission context
195 */
196static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
197{
198 if (sw_context->dx_query_mob)
199 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
200 sw_context->dx_query_mob);
201}
202
203/**
204 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
205 * the validate list.
206 *
207 * @dev_priv: Pointer to the device private:
208 * @sw_context: The command submission context
209 * @res: Pointer to the resource
210 * @node: The validation node holding the context resource metadata
211 */
212static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
213 struct vmw_sw_context *sw_context,
214 struct vmw_resource *res,
215 struct vmw_ctx_validation_info *node)
216{
217 int ret;
218
219 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
220 if (unlikely(ret != 0))
221 goto out_err;
222
223 if (!sw_context->staged_bindings) {
224 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
225 if (IS_ERR(sw_context->staged_bindings)) {
226 ret = PTR_ERR(sw_context->staged_bindings);
227 sw_context->staged_bindings = NULL;
228 goto out_err;
229 }
230 }
231
232 if (sw_context->staged_bindings_inuse) {
233 node->staged = vmw_binding_state_alloc(dev_priv);
234 if (IS_ERR(node->staged)) {
235 ret = PTR_ERR(node->staged);
236 node->staged = NULL;
237 goto out_err;
238 }
239 } else {
240 node->staged = sw_context->staged_bindings;
241 sw_context->staged_bindings_inuse = true;
242 }
243
244 node->ctx = res;
245 node->cur = vmw_context_binding_state(res);
246 list_add_tail(&node->head, &sw_context->ctx_list);
247
248 return 0;
249
250out_err:
251 return ret;
252}
253
254/**
255 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
256 *
257 * @dev_priv: Pointer to the device private struct.
258 * @res_type: The resource type.
259 *
260 * Guest-backed contexts and DX contexts require extra size to store execbuf
261 * private information in the validation node. Typically the binding manager
262 * associated data structures.
263 *
264 * Returns: The extra size requirement based on resource type.
265 */
266static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
267 enum vmw_res_type res_type)
268{
269 return (res_type == vmw_res_dx_context ||
270 (res_type == vmw_res_context && dev_priv->has_mob)) ?
271 sizeof(struct vmw_ctx_validation_info) : 0;
272}
273
274/**
275 * vmw_execbuf_rcache_update - Update a resource-node cache entry
276 *
277 * @rcache: Pointer to the entry to update.
278 * @res: Pointer to the resource.
279 * @private: Pointer to the execbuf-private space in the resource validation
280 * node.
281 */
282static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
283 struct vmw_resource *res,
284 void *private)
285{
286 rcache->res = res;
287 rcache->private = private;
288 rcache->valid = 1;
289 rcache->valid_handle = 0;
290}
291
292/**
293 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
294 * rcu-protected pointer to the validation list.
295 *
296 * @sw_context: Pointer to the software context.
297 * @res: Unreferenced rcu-protected pointer to the resource.
298 * @dirty: Whether to change dirty status.
299 *
300 * Returns: 0 on success. Negative error code on failure. Typical error codes
301 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
302 */
303static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
304 struct vmw_resource *res,
305 u32 dirty)
306{
307 struct vmw_private *dev_priv = res->dev_priv;
308 int ret;
309 enum vmw_res_type res_type = vmw_res_type(res);
310 struct vmw_res_cache_entry *rcache;
311 struct vmw_ctx_validation_info *ctx_info;
312 bool first_usage;
313 unsigned int priv_size;
314
315 rcache = &sw_context->res_cache[res_type];
316 if (likely(rcache->valid && rcache->res == res)) {
317 if (dirty)
318 vmw_validation_res_set_dirty(sw_context->ctx,
319 rcache->private, dirty);
320 vmw_user_resource_noref_release();
321 return 0;
322 }
323
324 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
325 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
326 dirty, (void **)&ctx_info,
327 &first_usage);
328 vmw_user_resource_noref_release();
329 if (ret)
330 return ret;
331
332 if (priv_size && first_usage) {
333 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
334 ctx_info);
335 if (ret) {
336 VMW_DEBUG_USER("Failed first usage context setup.\n");
337 return ret;
338 }
339 }
340
341 vmw_execbuf_rcache_update(rcache, res, ctx_info);
342 return 0;
343}
344
345/**
346 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
347 * validation list if it's not already on it
348 *
349 * @sw_context: Pointer to the software context.
350 * @res: Pointer to the resource.
351 * @dirty: Whether to change dirty status.
352 *
353 * Returns: Zero on success. Negative error code on failure.
354 */
355static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
356 struct vmw_resource *res,
357 u32 dirty)
358{
359 struct vmw_res_cache_entry *rcache;
360 enum vmw_res_type res_type = vmw_res_type(res);
361 void *ptr;
362 int ret;
363
364 rcache = &sw_context->res_cache[res_type];
365 if (likely(rcache->valid && rcache->res == res)) {
366 if (dirty)
367 vmw_validation_res_set_dirty(sw_context->ctx,
368 rcache->private, dirty);
369 return 0;
370 }
371
372 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
373 &ptr, NULL);
374 if (ret)
375 return ret;
376
377 vmw_execbuf_rcache_update(rcache, res, ptr);
378
379 return 0;
380}
381
382/**
383 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
384 * validation list
385 *
386 * @sw_context: The software context holding the validation list.
387 * @view: Pointer to the view resource.
388 *
389 * Returns 0 if success, negative error code otherwise.
390 */
391static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
392 struct vmw_resource *view)
393{
394 int ret;
395
396 /*
397 * First add the resource the view is pointing to, otherwise it may be
398 * swapped out when the view is validated.
399 */
400 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
401 vmw_view_dirtying(view));
402 if (ret)
403 return ret;
404
405 return vmw_execbuf_res_noctx_val_add(sw_context, view,
406 VMW_RES_DIRTY_NONE);
407}
408
409/**
410 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
411 * to to the validation list.
412 *
413 * @sw_context: The software context holding the validation list.
414 * @view_type: The view type to look up.
415 * @id: view id of the view.
416 *
417 * The view is represented by a view id and the DX context it's created on, or
418 * scheduled for creation on. If there is no DX context set, the function will
419 * return an -EINVAL error pointer.
420 *
421 * Returns: Unreferenced pointer to the resource on success, negative error
422 * pointer on failure.
423 */
424static struct vmw_resource *
425vmw_view_id_val_add(struct vmw_sw_context *sw_context,
426 enum vmw_view_type view_type, u32 id)
427{
428 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
429 struct vmw_resource *view;
430 int ret;
431
432 if (!ctx_node)
433 return ERR_PTR(-EINVAL);
434
435 view = vmw_view_lookup(sw_context->man, view_type, id);
436 if (IS_ERR(view))
437 return view;
438
439 ret = vmw_view_res_val_add(sw_context, view);
440 if (ret)
441 return ERR_PTR(ret);
442
443 return view;
444}
445
446/**
447 * vmw_resource_context_res_add - Put resources previously bound to a context on
448 * the validation list
449 *
450 * @dev_priv: Pointer to a device private structure
451 * @sw_context: Pointer to a software context used for this command submission
452 * @ctx: Pointer to the context resource
453 *
454 * This function puts all resources that were previously bound to @ctx on the
455 * resource validation list. This is part of the context state reemission
456 */
457static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
458 struct vmw_sw_context *sw_context,
459 struct vmw_resource *ctx)
460{
461 struct list_head *binding_list;
462 struct vmw_ctx_bindinfo *entry;
463 int ret = 0;
464 struct vmw_resource *res;
465 u32 i;
466 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
467 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
468
469 /* Add all cotables to the validation list. */
470 if (has_sm4_context(dev_priv) &&
471 vmw_res_type(ctx) == vmw_res_dx_context) {
472 for (i = 0; i < cotable_max; ++i) {
473 res = vmw_context_cotable(ctx, i);
474 if (IS_ERR(res))
475 continue;
476
477 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
478 VMW_RES_DIRTY_SET);
479 if (unlikely(ret != 0))
480 return ret;
481 }
482 }
483
484 /* Add all resources bound to the context to the validation list */
485 mutex_lock(&dev_priv->binding_mutex);
486 binding_list = vmw_context_binding_list(ctx);
487
488 list_for_each_entry(entry, binding_list, ctx_list) {
489 if (vmw_res_type(entry->res) == vmw_res_view)
490 ret = vmw_view_res_val_add(sw_context, entry->res);
491 else
492 ret = vmw_execbuf_res_noctx_val_add
493 (sw_context, entry->res,
494 vmw_binding_dirtying(entry->bt));
495 if (unlikely(ret != 0))
496 break;
497 }
498
499 if (has_sm4_context(dev_priv) &&
500 vmw_res_type(ctx) == vmw_res_dx_context) {
501 struct vmw_buffer_object *dx_query_mob;
502
503 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
504 if (dx_query_mob)
505 ret = vmw_validation_add_bo(sw_context->ctx,
506 dx_query_mob, true, false);
507 }
508
509 mutex_unlock(&dev_priv->binding_mutex);
510 return ret;
511}
512
513/**
514 * vmw_resource_relocation_add - Add a relocation to the relocation list
515 *
516 * @sw_context: Pointer to the software context.
517 * @res: The resource.
518 * @offset: Offset into the command buffer currently being parsed where the id
519 * that needs fixup is located. Granularity is one byte.
520 * @rel_type: Relocation type.
521 */
522static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
523 const struct vmw_resource *res,
524 unsigned long offset,
525 enum vmw_resource_relocation_type
526 rel_type)
527{
528 struct vmw_resource_relocation *rel;
529
530 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
531 if (unlikely(!rel)) {
532 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
533 return -ENOMEM;
534 }
535
536 rel->res = res;
537 rel->offset = offset;
538 rel->rel_type = rel_type;
539 list_add_tail(&rel->head, &sw_context->res_relocations);
540
541 return 0;
542}
543
544/**
545 * vmw_resource_relocations_free - Free all relocations on a list
546 *
547 * @list: Pointer to the head of the relocation list
548 */
549static void vmw_resource_relocations_free(struct list_head *list)
550{
551 /* Memory is validation context memory, so no need to free it */
552 INIT_LIST_HEAD(list);
553}
554
555/**
556 * vmw_resource_relocations_apply - Apply all relocations on a list
557 *
558 * @cb: Pointer to the start of the command buffer bein patch. This need not be
559 * the same buffer as the one being parsed when the relocation list was built,
560 * but the contents must be the same modulo the resource ids.
561 * @list: Pointer to the head of the relocation list.
562 */
563static void vmw_resource_relocations_apply(uint32_t *cb,
564 struct list_head *list)
565{
566 struct vmw_resource_relocation *rel;
567
568 /* Validate the struct vmw_resource_relocation member size */
569 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
570 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
571
572 list_for_each_entry(rel, list, head) {
573 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
574 switch (rel->rel_type) {
575 case vmw_res_rel_normal:
576 *addr = rel->res->id;
577 break;
578 case vmw_res_rel_nop:
579 *addr = SVGA_3D_CMD_NOP;
580 break;
581 default:
582 if (rel->res->id == -1)
583 *addr = SVGA_3D_CMD_NOP;
584 break;
585 }
586 }
587}
588
589static int vmw_cmd_invalid(struct vmw_private *dev_priv,
590 struct vmw_sw_context *sw_context,
591 SVGA3dCmdHeader *header)
592{
593 return -EINVAL;
594}
595
596static int vmw_cmd_ok(struct vmw_private *dev_priv,
597 struct vmw_sw_context *sw_context,
598 SVGA3dCmdHeader *header)
599{
600 return 0;
601}
602
603/**
604 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
605 * list.
606 *
607 * @sw_context: Pointer to the software context.
608 *
609 * Note that since vmware's command submission currently is protected by the
610 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
611 * only a single thread at once will attempt this.
612 */
613static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
614{
615 int ret;
616
617 ret = vmw_validation_res_reserve(sw_context->ctx, true);
618 if (ret)
619 return ret;
620
621 if (sw_context->dx_query_mob) {
622 struct vmw_buffer_object *expected_dx_query_mob;
623
624 expected_dx_query_mob =
625 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
626 if (expected_dx_query_mob &&
627 expected_dx_query_mob != sw_context->dx_query_mob) {
628 ret = -EINVAL;
629 }
630 }
631
632 return ret;
633}
634
635/**
636 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
637 * resource validate list unless it's already there.
638 *
639 * @dev_priv: Pointer to a device private structure.
640 * @sw_context: Pointer to the software context.
641 * @res_type: Resource type.
642 * @dirty: Whether to change dirty status.
643 * @converter: User-space visisble type specific information.
644 * @id_loc: Pointer to the location in the command buffer currently being parsed
645 * from where the user-space resource id handle is located.
646 * @p_res: Pointer to pointer to resource validalidation node. Populated on
647 * exit.
648 */
649static int
650vmw_cmd_res_check(struct vmw_private *dev_priv,
651 struct vmw_sw_context *sw_context,
652 enum vmw_res_type res_type,
653 u32 dirty,
654 const struct vmw_user_resource_conv *converter,
655 uint32_t *id_loc,
656 struct vmw_resource **p_res)
657{
658 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
659 struct vmw_resource *res;
660 int ret;
661
662 if (p_res)
663 *p_res = NULL;
664
665 if (*id_loc == SVGA3D_INVALID_ID) {
666 if (res_type == vmw_res_context) {
667 VMW_DEBUG_USER("Illegal context invalid id.\n");
668 return -EINVAL;
669 }
670 return 0;
671 }
672
673 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
674 res = rcache->res;
675 if (dirty)
676 vmw_validation_res_set_dirty(sw_context->ctx,
677 rcache->private, dirty);
678 } else {
679 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
680
681 ret = vmw_validation_preload_res(sw_context->ctx, size);
682 if (ret)
683 return ret;
684
685 res = vmw_user_resource_noref_lookup_handle
686 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
687 if (IS_ERR(res)) {
688 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
689 (unsigned int) *id_loc);
690 return PTR_ERR(res);
691 }
692
693 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
694 if (unlikely(ret != 0))
695 return ret;
696
697 if (rcache->valid && rcache->res == res) {
698 rcache->valid_handle = true;
699 rcache->handle = *id_loc;
700 }
701 }
702
703 ret = vmw_resource_relocation_add(sw_context, res,
704 vmw_ptr_diff(sw_context->buf_start,
705 id_loc),
706 vmw_res_rel_normal);
707 if (p_res)
708 *p_res = res;
709
710 return 0;
711}
712
713/**
714 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
715 *
716 * @ctx_res: context the query belongs to
717 *
718 * This function assumes binding_mutex is held.
719 */
720static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
721{
722 struct vmw_private *dev_priv = ctx_res->dev_priv;
723 struct vmw_buffer_object *dx_query_mob;
724 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
725
726 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
727
728 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
729 return 0;
730
731 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
732 if (cmd == NULL)
733 return -ENOMEM;
734
735 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
736 cmd->header.size = sizeof(cmd->body);
737 cmd->body.cid = ctx_res->id;
738 cmd->body.mobid = dx_query_mob->base.resource->start;
739 vmw_cmd_commit(dev_priv, sizeof(*cmd));
740
741 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
742
743 return 0;
744}
745
746/**
747 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
748 * contexts.
749 *
750 * @sw_context: Pointer to the software context.
751 *
752 * Rebind context binding points that have been scrubbed because of eviction.
753 */
754static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
755{
756 struct vmw_ctx_validation_info *val;
757 int ret;
758
759 list_for_each_entry(val, &sw_context->ctx_list, head) {
760 ret = vmw_binding_rebind_all(val->cur);
761 if (unlikely(ret != 0)) {
762 if (ret != -ERESTARTSYS)
763 VMW_DEBUG_USER("Failed to rebind context.\n");
764 return ret;
765 }
766
767 ret = vmw_rebind_all_dx_query(val->ctx);
768 if (ret != 0) {
769 VMW_DEBUG_USER("Failed to rebind queries.\n");
770 return ret;
771 }
772 }
773
774 return 0;
775}
776
777/**
778 * vmw_view_bindings_add - Add an array of view bindings to a context binding
779 * state tracker.
780 *
781 * @sw_context: The execbuf state used for this command.
782 * @view_type: View type for the bindings.
783 * @binding_type: Binding type for the bindings.
784 * @shader_slot: The shader slot to user for the bindings.
785 * @view_ids: Array of view ids to be bound.
786 * @num_views: Number of view ids in @view_ids.
787 * @first_slot: The binding slot to be used for the first view id in @view_ids.
788 */
789static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
790 enum vmw_view_type view_type,
791 enum vmw_ctx_binding_type binding_type,
792 uint32 shader_slot,
793 uint32 view_ids[], u32 num_views,
794 u32 first_slot)
795{
796 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
797 u32 i;
798
799 if (!ctx_node)
800 return -EINVAL;
801
802 for (i = 0; i < num_views; ++i) {
803 struct vmw_ctx_bindinfo_view binding;
804 struct vmw_resource *view = NULL;
805
806 if (view_ids[i] != SVGA3D_INVALID_ID) {
807 view = vmw_view_id_val_add(sw_context, view_type,
808 view_ids[i]);
809 if (IS_ERR(view)) {
810 VMW_DEBUG_USER("View not found.\n");
811 return PTR_ERR(view);
812 }
813 }
814 binding.bi.ctx = ctx_node->ctx;
815 binding.bi.res = view;
816 binding.bi.bt = binding_type;
817 binding.shader_slot = shader_slot;
818 binding.slot = first_slot + i;
819 vmw_binding_add(ctx_node->staged, &binding.bi,
820 shader_slot, binding.slot);
821 }
822
823 return 0;
824}
825
826/**
827 * vmw_cmd_cid_check - Check a command header for valid context information.
828 *
829 * @dev_priv: Pointer to a device private structure.
830 * @sw_context: Pointer to the software context.
831 * @header: A command header with an embedded user-space context handle.
832 *
833 * Convenience function: Call vmw_cmd_res_check with the user-space context
834 * handle embedded in @header.
835 */
836static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
837 struct vmw_sw_context *sw_context,
838 SVGA3dCmdHeader *header)
839{
840 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
841 container_of(header, typeof(*cmd), header);
842
843 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
844 VMW_RES_DIRTY_SET, user_context_converter,
845 &cmd->body, NULL);
846}
847
848/**
849 * vmw_execbuf_info_from_res - Get the private validation metadata for a
850 * recently validated resource
851 *
852 * @sw_context: Pointer to the command submission context
853 * @res: The resource
854 *
855 * The resource pointed to by @res needs to be present in the command submission
856 * context's resource cache and hence the last resource of that type to be
857 * processed by the validation code.
858 *
859 * Return: a pointer to the private metadata of the resource, or NULL if it
860 * wasn't found
861 */
862static struct vmw_ctx_validation_info *
863vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
864 struct vmw_resource *res)
865{
866 struct vmw_res_cache_entry *rcache =
867 &sw_context->res_cache[vmw_res_type(res)];
868
869 if (rcache->valid && rcache->res == res)
870 return rcache->private;
871
872 WARN_ON_ONCE(true);
873 return NULL;
874}
875
876static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
877 struct vmw_sw_context *sw_context,
878 SVGA3dCmdHeader *header)
879{
880 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
881 struct vmw_resource *ctx;
882 struct vmw_resource *res;
883 int ret;
884
885 cmd = container_of(header, typeof(*cmd), header);
886
887 if (cmd->body.type >= SVGA3D_RT_MAX) {
888 VMW_DEBUG_USER("Illegal render target type %u.\n",
889 (unsigned int) cmd->body.type);
890 return -EINVAL;
891 }
892
893 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
894 VMW_RES_DIRTY_SET, user_context_converter,
895 &cmd->body.cid, &ctx);
896 if (unlikely(ret != 0))
897 return ret;
898
899 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
900 VMW_RES_DIRTY_SET, user_surface_converter,
901 &cmd->body.target.sid, &res);
902 if (unlikely(ret))
903 return ret;
904
905 if (dev_priv->has_mob) {
906 struct vmw_ctx_bindinfo_view binding;
907 struct vmw_ctx_validation_info *node;
908
909 node = vmw_execbuf_info_from_res(sw_context, ctx);
910 if (!node)
911 return -EINVAL;
912
913 binding.bi.ctx = ctx;
914 binding.bi.res = res;
915 binding.bi.bt = vmw_ctx_binding_rt;
916 binding.slot = cmd->body.type;
917 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
918 }
919
920 return 0;
921}
922
923static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
924 struct vmw_sw_context *sw_context,
925 SVGA3dCmdHeader *header)
926{
927 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
928 int ret;
929
930 cmd = container_of(header, typeof(*cmd), header);
931
932 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
933 VMW_RES_DIRTY_NONE, user_surface_converter,
934 &cmd->body.src.sid, NULL);
935 if (ret)
936 return ret;
937
938 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939 VMW_RES_DIRTY_SET, user_surface_converter,
940 &cmd->body.dest.sid, NULL);
941}
942
943static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
944 struct vmw_sw_context *sw_context,
945 SVGA3dCmdHeader *header)
946{
947 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
948 int ret;
949
950 cmd = container_of(header, typeof(*cmd), header);
951 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
952 VMW_RES_DIRTY_NONE, user_surface_converter,
953 &cmd->body.src, NULL);
954 if (ret != 0)
955 return ret;
956
957 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958 VMW_RES_DIRTY_SET, user_surface_converter,
959 &cmd->body.dest, NULL);
960}
961
962static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
963 struct vmw_sw_context *sw_context,
964 SVGA3dCmdHeader *header)
965{
966 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
967 int ret;
968
969 cmd = container_of(header, typeof(*cmd), header);
970 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
971 VMW_RES_DIRTY_NONE, user_surface_converter,
972 &cmd->body.srcSid, NULL);
973 if (ret != 0)
974 return ret;
975
976 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977 VMW_RES_DIRTY_SET, user_surface_converter,
978 &cmd->body.dstSid, NULL);
979}
980
981static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
982 struct vmw_sw_context *sw_context,
983 SVGA3dCmdHeader *header)
984{
985 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
986 int ret;
987
988 cmd = container_of(header, typeof(*cmd), header);
989 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
990 VMW_RES_DIRTY_NONE, user_surface_converter,
991 &cmd->body.src.sid, NULL);
992 if (unlikely(ret != 0))
993 return ret;
994
995 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
996 VMW_RES_DIRTY_SET, user_surface_converter,
997 &cmd->body.dest.sid, NULL);
998}
999
1000static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
1001 struct vmw_sw_context *sw_context,
1002 SVGA3dCmdHeader *header)
1003{
1004 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
1005 container_of(header, typeof(*cmd), header);
1006
1007 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1008 VMW_RES_DIRTY_NONE, user_surface_converter,
1009 &cmd->body.srcImage.sid, NULL);
1010}
1011
1012static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1013 struct vmw_sw_context *sw_context,
1014 SVGA3dCmdHeader *header)
1015{
1016 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1017 container_of(header, typeof(*cmd), header);
1018
1019 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1020 VMW_RES_DIRTY_NONE, user_surface_converter,
1021 &cmd->body.sid, NULL);
1022}
1023
1024/**
1025 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1026 *
1027 * @dev_priv: The device private structure.
1028 * @new_query_bo: The new buffer holding query results.
1029 * @sw_context: The software context used for this command submission.
1030 *
1031 * This function checks whether @new_query_bo is suitable for holding query
1032 * results, and if another buffer currently is pinned for query results. If so,
1033 * the function prepares the state of @sw_context for switching pinned buffers
1034 * after successful submission of the current command batch.
1035 */
1036static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1037 struct vmw_buffer_object *new_query_bo,
1038 struct vmw_sw_context *sw_context)
1039{
1040 struct vmw_res_cache_entry *ctx_entry =
1041 &sw_context->res_cache[vmw_res_context];
1042 int ret;
1043
1044 BUG_ON(!ctx_entry->valid);
1045 sw_context->last_query_ctx = ctx_entry->res;
1046
1047 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1048
1049 if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
1050 VMW_DEBUG_USER("Query buffer too large.\n");
1051 return -EINVAL;
1052 }
1053
1054 if (unlikely(sw_context->cur_query_bo != NULL)) {
1055 sw_context->needs_post_query_barrier = true;
1056 ret = vmw_validation_add_bo(sw_context->ctx,
1057 sw_context->cur_query_bo,
1058 dev_priv->has_mob, false);
1059 if (unlikely(ret != 0))
1060 return ret;
1061 }
1062 sw_context->cur_query_bo = new_query_bo;
1063
1064 ret = vmw_validation_add_bo(sw_context->ctx,
1065 dev_priv->dummy_query_bo,
1066 dev_priv->has_mob, false);
1067 if (unlikely(ret != 0))
1068 return ret;
1069 }
1070
1071 return 0;
1072}
1073
1074/**
1075 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1076 *
1077 * @dev_priv: The device private structure.
1078 * @sw_context: The software context used for this command submission batch.
1079 *
1080 * This function will check if we're switching query buffers, and will then,
1081 * issue a dummy occlusion query wait used as a query barrier. When the fence
1082 * object following that query wait has signaled, we are sure that all preceding
1083 * queries have finished, and the old query buffer can be unpinned. However,
1084 * since both the new query buffer and the old one are fenced with that fence,
1085 * we can do an asynchronus unpin now, and be sure that the old query buffer
1086 * won't be moved until the fence has signaled.
1087 *
1088 * As mentioned above, both the new - and old query buffers need to be fenced
1089 * using a sequence emitted *after* calling this function.
1090 */
1091static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1092 struct vmw_sw_context *sw_context)
1093{
1094 /*
1095 * The validate list should still hold references to all
1096 * contexts here.
1097 */
1098 if (sw_context->needs_post_query_barrier) {
1099 struct vmw_res_cache_entry *ctx_entry =
1100 &sw_context->res_cache[vmw_res_context];
1101 struct vmw_resource *ctx;
1102 int ret;
1103
1104 BUG_ON(!ctx_entry->valid);
1105 ctx = ctx_entry->res;
1106
1107 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1108
1109 if (unlikely(ret != 0))
1110 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1111 }
1112
1113 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1114 if (dev_priv->pinned_bo) {
1115 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1116 vmw_bo_unreference(&dev_priv->pinned_bo);
1117 }
1118
1119 if (!sw_context->needs_post_query_barrier) {
1120 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1121
1122 /*
1123 * We pin also the dummy_query_bo buffer so that we
1124 * don't need to validate it when emitting dummy queries
1125 * in context destroy paths.
1126 */
1127 if (!dev_priv->dummy_query_bo_pinned) {
1128 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1129 true);
1130 dev_priv->dummy_query_bo_pinned = true;
1131 }
1132
1133 BUG_ON(sw_context->last_query_ctx == NULL);
1134 dev_priv->query_cid = sw_context->last_query_ctx->id;
1135 dev_priv->query_cid_valid = true;
1136 dev_priv->pinned_bo =
1137 vmw_bo_reference(sw_context->cur_query_bo);
1138 }
1139 }
1140}
1141
1142/**
1143 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1144 * to a MOB id.
1145 *
1146 * @dev_priv: Pointer to a device private structure.
1147 * @sw_context: The software context used for this command batch validation.
1148 * @id: Pointer to the user-space handle to be translated.
1149 * @vmw_bo_p: Points to a location that, on successful return will carry a
1150 * non-reference-counted pointer to the buffer object identified by the
1151 * user-space handle in @id.
1152 *
1153 * This function saves information needed to translate a user-space buffer
1154 * handle to a MOB id. The translation does not take place immediately, but
1155 * during a call to vmw_apply_relocations().
1156 *
1157 * This function builds a relocation list and a list of buffers to validate. The
1158 * former needs to be freed using either vmw_apply_relocations() or
1159 * vmw_free_relocations(). The latter needs to be freed using
1160 * vmw_clear_validations.
1161 */
1162static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1163 struct vmw_sw_context *sw_context,
1164 SVGAMobId *id,
1165 struct vmw_buffer_object **vmw_bo_p)
1166{
1167 struct vmw_buffer_object *vmw_bo;
1168 uint32_t handle = *id;
1169 struct vmw_relocation *reloc;
1170 int ret;
1171
1172 vmw_validation_preload_bo(sw_context->ctx);
1173 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1174 if (IS_ERR(vmw_bo)) {
1175 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1176 return PTR_ERR(vmw_bo);
1177 }
1178
1179 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1180 vmw_user_bo_noref_release();
1181 if (unlikely(ret != 0))
1182 return ret;
1183
1184 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1185 if (!reloc)
1186 return -ENOMEM;
1187
1188 reloc->mob_loc = id;
1189 reloc->vbo = vmw_bo;
1190
1191 *vmw_bo_p = vmw_bo;
1192 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1193
1194 return 0;
1195}
1196
1197/**
1198 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1199 * to a valid SVGAGuestPtr
1200 *
1201 * @dev_priv: Pointer to a device private structure.
1202 * @sw_context: The software context used for this command batch validation.
1203 * @ptr: Pointer to the user-space handle to be translated.
1204 * @vmw_bo_p: Points to a location that, on successful return will carry a
1205 * non-reference-counted pointer to the DMA buffer identified by the user-space
1206 * handle in @id.
1207 *
1208 * This function saves information needed to translate a user-space buffer
1209 * handle to a valid SVGAGuestPtr. The translation does not take place
1210 * immediately, but during a call to vmw_apply_relocations().
1211 *
1212 * This function builds a relocation list and a list of buffers to validate.
1213 * The former needs to be freed using either vmw_apply_relocations() or
1214 * vmw_free_relocations(). The latter needs to be freed using
1215 * vmw_clear_validations.
1216 */
1217static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1218 struct vmw_sw_context *sw_context,
1219 SVGAGuestPtr *ptr,
1220 struct vmw_buffer_object **vmw_bo_p)
1221{
1222 struct vmw_buffer_object *vmw_bo;
1223 uint32_t handle = ptr->gmrId;
1224 struct vmw_relocation *reloc;
1225 int ret;
1226
1227 vmw_validation_preload_bo(sw_context->ctx);
1228 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1229 if (IS_ERR(vmw_bo)) {
1230 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1231 return PTR_ERR(vmw_bo);
1232 }
1233
1234 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1235 vmw_user_bo_noref_release();
1236 if (unlikely(ret != 0))
1237 return ret;
1238
1239 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1240 if (!reloc)
1241 return -ENOMEM;
1242
1243 reloc->location = ptr;
1244 reloc->vbo = vmw_bo;
1245 *vmw_bo_p = vmw_bo;
1246 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1247
1248 return 0;
1249}
1250
1251/**
1252 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1253 *
1254 * @dev_priv: Pointer to a device private struct.
1255 * @sw_context: The software context used for this command submission.
1256 * @header: Pointer to the command header in the command stream.
1257 *
1258 * This function adds the new query into the query COTABLE
1259 */
1260static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1261 struct vmw_sw_context *sw_context,
1262 SVGA3dCmdHeader *header)
1263{
1264 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1265 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1266 struct vmw_resource *cotable_res;
1267 int ret;
1268
1269 if (!ctx_node)
1270 return -EINVAL;
1271
1272 cmd = container_of(header, typeof(*cmd), header);
1273
1274 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1275 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1276 return -EINVAL;
1277
1278 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1279 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1280
1281 return ret;
1282}
1283
1284/**
1285 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1286 *
1287 * @dev_priv: Pointer to a device private struct.
1288 * @sw_context: The software context used for this command submission.
1289 * @header: Pointer to the command header in the command stream.
1290 *
1291 * The query bind operation will eventually associate the query ID with its
1292 * backing MOB. In this function, we take the user mode MOB ID and use
1293 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1294 */
1295static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1296 struct vmw_sw_context *sw_context,
1297 SVGA3dCmdHeader *header)
1298{
1299 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1300 struct vmw_buffer_object *vmw_bo;
1301 int ret;
1302
1303 cmd = container_of(header, typeof(*cmd), header);
1304
1305 /*
1306 * Look up the buffer pointed to by q.mobid, put it on the relocation
1307 * list so its kernel mode MOB ID can be filled in later
1308 */
1309 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1310 &vmw_bo);
1311
1312 if (ret != 0)
1313 return ret;
1314
1315 sw_context->dx_query_mob = vmw_bo;
1316 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1317 return 0;
1318}
1319
1320/**
1321 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1322 *
1323 * @dev_priv: Pointer to a device private struct.
1324 * @sw_context: The software context used for this command submission.
1325 * @header: Pointer to the command header in the command stream.
1326 */
1327static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1328 struct vmw_sw_context *sw_context,
1329 SVGA3dCmdHeader *header)
1330{
1331 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1332 container_of(header, typeof(*cmd), header);
1333
1334 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1335 VMW_RES_DIRTY_SET, user_context_converter,
1336 &cmd->body.cid, NULL);
1337}
1338
1339/**
1340 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1341 *
1342 * @dev_priv: Pointer to a device private struct.
1343 * @sw_context: The software context used for this command submission.
1344 * @header: Pointer to the command header in the command stream.
1345 */
1346static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1347 struct vmw_sw_context *sw_context,
1348 SVGA3dCmdHeader *header)
1349{
1350 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1351 container_of(header, typeof(*cmd), header);
1352
1353 if (unlikely(dev_priv->has_mob)) {
1354 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1355
1356 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1357
1358 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1359 gb_cmd.header.size = cmd->header.size;
1360 gb_cmd.body.cid = cmd->body.cid;
1361 gb_cmd.body.type = cmd->body.type;
1362
1363 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1364 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1365 }
1366
1367 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1368 VMW_RES_DIRTY_SET, user_context_converter,
1369 &cmd->body.cid, NULL);
1370}
1371
1372/**
1373 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1374 *
1375 * @dev_priv: Pointer to a device private struct.
1376 * @sw_context: The software context used for this command submission.
1377 * @header: Pointer to the command header in the command stream.
1378 */
1379static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1380 struct vmw_sw_context *sw_context,
1381 SVGA3dCmdHeader *header)
1382{
1383 struct vmw_buffer_object *vmw_bo;
1384 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1385 int ret;
1386
1387 cmd = container_of(header, typeof(*cmd), header);
1388 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1389 if (unlikely(ret != 0))
1390 return ret;
1391
1392 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1393 &vmw_bo);
1394 if (unlikely(ret != 0))
1395 return ret;
1396
1397 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1398
1399 return ret;
1400}
1401
1402/**
1403 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1404 *
1405 * @dev_priv: Pointer to a device private struct.
1406 * @sw_context: The software context used for this command submission.
1407 * @header: Pointer to the command header in the command stream.
1408 */
1409static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1410 struct vmw_sw_context *sw_context,
1411 SVGA3dCmdHeader *header)
1412{
1413 struct vmw_buffer_object *vmw_bo;
1414 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1415 int ret;
1416
1417 cmd = container_of(header, typeof(*cmd), header);
1418 if (dev_priv->has_mob) {
1419 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1420
1421 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1422
1423 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1424 gb_cmd.header.size = cmd->header.size;
1425 gb_cmd.body.cid = cmd->body.cid;
1426 gb_cmd.body.type = cmd->body.type;
1427 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1428 gb_cmd.body.offset = cmd->body.guestResult.offset;
1429
1430 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1431 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1432 }
1433
1434 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1435 if (unlikely(ret != 0))
1436 return ret;
1437
1438 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1439 &cmd->body.guestResult, &vmw_bo);
1440 if (unlikely(ret != 0))
1441 return ret;
1442
1443 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1444
1445 return ret;
1446}
1447
1448/**
1449 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1450 *
1451 * @dev_priv: Pointer to a device private struct.
1452 * @sw_context: The software context used for this command submission.
1453 * @header: Pointer to the command header in the command stream.
1454 */
1455static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1456 struct vmw_sw_context *sw_context,
1457 SVGA3dCmdHeader *header)
1458{
1459 struct vmw_buffer_object *vmw_bo;
1460 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1461 int ret;
1462
1463 cmd = container_of(header, typeof(*cmd), header);
1464 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1465 if (unlikely(ret != 0))
1466 return ret;
1467
1468 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1469 &vmw_bo);
1470 if (unlikely(ret != 0))
1471 return ret;
1472
1473 return 0;
1474}
1475
1476/**
1477 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1478 *
1479 * @dev_priv: Pointer to a device private struct.
1480 * @sw_context: The software context used for this command submission.
1481 * @header: Pointer to the command header in the command stream.
1482 */
1483static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1484 struct vmw_sw_context *sw_context,
1485 SVGA3dCmdHeader *header)
1486{
1487 struct vmw_buffer_object *vmw_bo;
1488 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1489 int ret;
1490
1491 cmd = container_of(header, typeof(*cmd), header);
1492 if (dev_priv->has_mob) {
1493 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1494
1495 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1496
1497 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1498 gb_cmd.header.size = cmd->header.size;
1499 gb_cmd.body.cid = cmd->body.cid;
1500 gb_cmd.body.type = cmd->body.type;
1501 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1502 gb_cmd.body.offset = cmd->body.guestResult.offset;
1503
1504 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1505 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1506 }
1507
1508 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1509 if (unlikely(ret != 0))
1510 return ret;
1511
1512 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1513 &cmd->body.guestResult, &vmw_bo);
1514 if (unlikely(ret != 0))
1515 return ret;
1516
1517 return 0;
1518}
1519
1520static int vmw_cmd_dma(struct vmw_private *dev_priv,
1521 struct vmw_sw_context *sw_context,
1522 SVGA3dCmdHeader *header)
1523{
1524 struct vmw_buffer_object *vmw_bo = NULL;
1525 struct vmw_surface *srf = NULL;
1526 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1527 int ret;
1528 SVGA3dCmdSurfaceDMASuffix *suffix;
1529 uint32_t bo_size;
1530 bool dirty;
1531
1532 cmd = container_of(header, typeof(*cmd), header);
1533 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1534 header->size - sizeof(*suffix));
1535
1536 /* Make sure device and verifier stays in sync. */
1537 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1538 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1539 return -EINVAL;
1540 }
1541
1542 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1543 &cmd->body.guest.ptr, &vmw_bo);
1544 if (unlikely(ret != 0))
1545 return ret;
1546
1547 /* Make sure DMA doesn't cross BO boundaries. */
1548 bo_size = vmw_bo->base.base.size;
1549 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1550 VMW_DEBUG_USER("Invalid DMA offset.\n");
1551 return -EINVAL;
1552 }
1553
1554 bo_size -= cmd->body.guest.ptr.offset;
1555 if (unlikely(suffix->maximumOffset > bo_size))
1556 suffix->maximumOffset = bo_size;
1557
1558 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1559 VMW_RES_DIRTY_SET : 0;
1560 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1561 dirty, user_surface_converter,
1562 &cmd->body.host.sid, NULL);
1563 if (unlikely(ret != 0)) {
1564 if (unlikely(ret != -ERESTARTSYS))
1565 VMW_DEBUG_USER("could not find surface for DMA.\n");
1566 return ret;
1567 }
1568
1569 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1570
1571 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1572
1573 return 0;
1574}
1575
1576static int vmw_cmd_draw(struct vmw_private *dev_priv,
1577 struct vmw_sw_context *sw_context,
1578 SVGA3dCmdHeader *header)
1579{
1580 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1581 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1582 (unsigned long)header + sizeof(*cmd));
1583 SVGA3dPrimitiveRange *range;
1584 uint32_t i;
1585 uint32_t maxnum;
1586 int ret;
1587
1588 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1589 if (unlikely(ret != 0))
1590 return ret;
1591
1592 cmd = container_of(header, typeof(*cmd), header);
1593 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1594
1595 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1596 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1597 return -EINVAL;
1598 }
1599
1600 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1601 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1602 VMW_RES_DIRTY_NONE,
1603 user_surface_converter,
1604 &decl->array.surfaceId, NULL);
1605 if (unlikely(ret != 0))
1606 return ret;
1607 }
1608
1609 maxnum = (header->size - sizeof(cmd->body) -
1610 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1611 if (unlikely(cmd->body.numRanges > maxnum)) {
1612 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1613 return -EINVAL;
1614 }
1615
1616 range = (SVGA3dPrimitiveRange *) decl;
1617 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1618 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1619 VMW_RES_DIRTY_NONE,
1620 user_surface_converter,
1621 &range->indexArray.surfaceId, NULL);
1622 if (unlikely(ret != 0))
1623 return ret;
1624 }
1625 return 0;
1626}
1627
1628static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1629 struct vmw_sw_context *sw_context,
1630 SVGA3dCmdHeader *header)
1631{
1632 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1633 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1634 ((unsigned long) header + header->size + sizeof(header));
1635 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1636 ((unsigned long) header + sizeof(*cmd));
1637 struct vmw_resource *ctx;
1638 struct vmw_resource *res;
1639 int ret;
1640
1641 cmd = container_of(header, typeof(*cmd), header);
1642
1643 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1644 VMW_RES_DIRTY_SET, user_context_converter,
1645 &cmd->body.cid, &ctx);
1646 if (unlikely(ret != 0))
1647 return ret;
1648
1649 for (; cur_state < last_state; ++cur_state) {
1650 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1651 continue;
1652
1653 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1654 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1655 (unsigned int) cur_state->stage);
1656 return -EINVAL;
1657 }
1658
1659 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1660 VMW_RES_DIRTY_NONE,
1661 user_surface_converter,
1662 &cur_state->value, &res);
1663 if (unlikely(ret != 0))
1664 return ret;
1665
1666 if (dev_priv->has_mob) {
1667 struct vmw_ctx_bindinfo_tex binding;
1668 struct vmw_ctx_validation_info *node;
1669
1670 node = vmw_execbuf_info_from_res(sw_context, ctx);
1671 if (!node)
1672 return -EINVAL;
1673
1674 binding.bi.ctx = ctx;
1675 binding.bi.res = res;
1676 binding.bi.bt = vmw_ctx_binding_tex;
1677 binding.texture_stage = cur_state->stage;
1678 vmw_binding_add(node->staged, &binding.bi, 0,
1679 binding.texture_stage);
1680 }
1681 }
1682
1683 return 0;
1684}
1685
1686static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1687 struct vmw_sw_context *sw_context,
1688 void *buf)
1689{
1690 struct vmw_buffer_object *vmw_bo;
1691
1692 struct {
1693 uint32_t header;
1694 SVGAFifoCmdDefineGMRFB body;
1695 } *cmd = buf;
1696
1697 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1698 &vmw_bo);
1699}
1700
1701/**
1702 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1703 * switching
1704 *
1705 * @dev_priv: Pointer to a device private struct.
1706 * @sw_context: The software context being used for this batch.
1707 * @res: Pointer to the resource.
1708 * @buf_id: Pointer to the user-space backup buffer handle in the command
1709 * stream.
1710 * @backup_offset: Offset of backup into MOB.
1711 *
1712 * This function prepares for registering a switch of backup buffers in the
1713 * resource metadata just prior to unreserving. It's basically a wrapper around
1714 * vmw_cmd_res_switch_backup with a different interface.
1715 */
1716static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1717 struct vmw_sw_context *sw_context,
1718 struct vmw_resource *res, uint32_t *buf_id,
1719 unsigned long backup_offset)
1720{
1721 struct vmw_buffer_object *vbo;
1722 void *info;
1723 int ret;
1724
1725 info = vmw_execbuf_info_from_res(sw_context, res);
1726 if (!info)
1727 return -EINVAL;
1728
1729 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1730 if (ret)
1731 return ret;
1732
1733 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1734 backup_offset);
1735 return 0;
1736}
1737
1738/**
1739 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1740 *
1741 * @dev_priv: Pointer to a device private struct.
1742 * @sw_context: The software context being used for this batch.
1743 * @res_type: The resource type.
1744 * @converter: Information about user-space binding for this resource type.
1745 * @res_id: Pointer to the user-space resource handle in the command stream.
1746 * @buf_id: Pointer to the user-space backup buffer handle in the command
1747 * stream.
1748 * @backup_offset: Offset of backup into MOB.
1749 *
1750 * This function prepares for registering a switch of backup buffers in the
1751 * resource metadata just prior to unreserving. It's basically a wrapper around
1752 * vmw_cmd_res_switch_backup with a different interface.
1753 */
1754static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1755 struct vmw_sw_context *sw_context,
1756 enum vmw_res_type res_type,
1757 const struct vmw_user_resource_conv
1758 *converter, uint32_t *res_id, uint32_t *buf_id,
1759 unsigned long backup_offset)
1760{
1761 struct vmw_resource *res;
1762 int ret;
1763
1764 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1765 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1766 if (ret)
1767 return ret;
1768
1769 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1770 backup_offset);
1771}
1772
1773/**
1774 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1775 *
1776 * @dev_priv: Pointer to a device private struct.
1777 * @sw_context: The software context being used for this batch.
1778 * @header: Pointer to the command header in the command stream.
1779 */
1780static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1781 struct vmw_sw_context *sw_context,
1782 SVGA3dCmdHeader *header)
1783{
1784 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1785 container_of(header, typeof(*cmd), header);
1786
1787 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1788 user_surface_converter, &cmd->body.sid,
1789 &cmd->body.mobid, 0);
1790}
1791
1792/**
1793 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1794 *
1795 * @dev_priv: Pointer to a device private struct.
1796 * @sw_context: The software context being used for this batch.
1797 * @header: Pointer to the command header in the command stream.
1798 */
1799static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1800 struct vmw_sw_context *sw_context,
1801 SVGA3dCmdHeader *header)
1802{
1803 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1804 container_of(header, typeof(*cmd), header);
1805
1806 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1807 VMW_RES_DIRTY_NONE, user_surface_converter,
1808 &cmd->body.image.sid, NULL);
1809}
1810
1811/**
1812 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1813 *
1814 * @dev_priv: Pointer to a device private struct.
1815 * @sw_context: The software context being used for this batch.
1816 * @header: Pointer to the command header in the command stream.
1817 */
1818static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1819 struct vmw_sw_context *sw_context,
1820 SVGA3dCmdHeader *header)
1821{
1822 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1823 container_of(header, typeof(*cmd), header);
1824
1825 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1826 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1827 &cmd->body.sid, NULL);
1828}
1829
1830/**
1831 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1832 *
1833 * @dev_priv: Pointer to a device private struct.
1834 * @sw_context: The software context being used for this batch.
1835 * @header: Pointer to the command header in the command stream.
1836 */
1837static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1838 struct vmw_sw_context *sw_context,
1839 SVGA3dCmdHeader *header)
1840{
1841 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1842 container_of(header, typeof(*cmd), header);
1843
1844 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1845 VMW_RES_DIRTY_NONE, user_surface_converter,
1846 &cmd->body.image.sid, NULL);
1847}
1848
1849/**
1850 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1851 * command
1852 *
1853 * @dev_priv: Pointer to a device private struct.
1854 * @sw_context: The software context being used for this batch.
1855 * @header: Pointer to the command header in the command stream.
1856 */
1857static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1858 struct vmw_sw_context *sw_context,
1859 SVGA3dCmdHeader *header)
1860{
1861 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1862 container_of(header, typeof(*cmd), header);
1863
1864 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1865 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1866 &cmd->body.sid, NULL);
1867}
1868
1869/**
1870 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1871 * command
1872 *
1873 * @dev_priv: Pointer to a device private struct.
1874 * @sw_context: The software context being used for this batch.
1875 * @header: Pointer to the command header in the command stream.
1876 */
1877static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1878 struct vmw_sw_context *sw_context,
1879 SVGA3dCmdHeader *header)
1880{
1881 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1882 container_of(header, typeof(*cmd), header);
1883
1884 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1885 VMW_RES_DIRTY_NONE, user_surface_converter,
1886 &cmd->body.image.sid, NULL);
1887}
1888
1889/**
1890 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1891 * command
1892 *
1893 * @dev_priv: Pointer to a device private struct.
1894 * @sw_context: The software context being used for this batch.
1895 * @header: Pointer to the command header in the command stream.
1896 */
1897static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1898 struct vmw_sw_context *sw_context,
1899 SVGA3dCmdHeader *header)
1900{
1901 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1902 container_of(header, typeof(*cmd), header);
1903
1904 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1905 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1906 &cmd->body.sid, NULL);
1907}
1908
1909/**
1910 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1911 *
1912 * @dev_priv: Pointer to a device private struct.
1913 * @sw_context: The software context being used for this batch.
1914 * @header: Pointer to the command header in the command stream.
1915 */
1916static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1917 struct vmw_sw_context *sw_context,
1918 SVGA3dCmdHeader *header)
1919{
1920 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1921 int ret;
1922 size_t size;
1923 struct vmw_resource *ctx;
1924
1925 cmd = container_of(header, typeof(*cmd), header);
1926
1927 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1928 VMW_RES_DIRTY_SET, user_context_converter,
1929 &cmd->body.cid, &ctx);
1930 if (unlikely(ret != 0))
1931 return ret;
1932
1933 if (unlikely(!dev_priv->has_mob))
1934 return 0;
1935
1936 size = cmd->header.size - sizeof(cmd->body);
1937 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1938 cmd->body.shid, cmd + 1, cmd->body.type,
1939 size, &sw_context->staged_cmd_res);
1940 if (unlikely(ret != 0))
1941 return ret;
1942
1943 return vmw_resource_relocation_add(sw_context, NULL,
1944 vmw_ptr_diff(sw_context->buf_start,
1945 &cmd->header.id),
1946 vmw_res_rel_nop);
1947}
1948
1949/**
1950 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1951 *
1952 * @dev_priv: Pointer to a device private struct.
1953 * @sw_context: The software context being used for this batch.
1954 * @header: Pointer to the command header in the command stream.
1955 */
1956static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1957 struct vmw_sw_context *sw_context,
1958 SVGA3dCmdHeader *header)
1959{
1960 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1961 int ret;
1962 struct vmw_resource *ctx;
1963
1964 cmd = container_of(header, typeof(*cmd), header);
1965
1966 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1967 VMW_RES_DIRTY_SET, user_context_converter,
1968 &cmd->body.cid, &ctx);
1969 if (unlikely(ret != 0))
1970 return ret;
1971
1972 if (unlikely(!dev_priv->has_mob))
1973 return 0;
1974
1975 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1976 cmd->body.type, &sw_context->staged_cmd_res);
1977 if (unlikely(ret != 0))
1978 return ret;
1979
1980 return vmw_resource_relocation_add(sw_context, NULL,
1981 vmw_ptr_diff(sw_context->buf_start,
1982 &cmd->header.id),
1983 vmw_res_rel_nop);
1984}
1985
1986/**
1987 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1988 *
1989 * @dev_priv: Pointer to a device private struct.
1990 * @sw_context: The software context being used for this batch.
1991 * @header: Pointer to the command header in the command stream.
1992 */
1993static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1994 struct vmw_sw_context *sw_context,
1995 SVGA3dCmdHeader *header)
1996{
1997 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1998 struct vmw_ctx_bindinfo_shader binding;
1999 struct vmw_resource *ctx, *res = NULL;
2000 struct vmw_ctx_validation_info *ctx_info;
2001 int ret;
2002
2003 cmd = container_of(header, typeof(*cmd), header);
2004
2005 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
2006 VMW_DEBUG_USER("Illegal shader type %u.\n",
2007 (unsigned int) cmd->body.type);
2008 return -EINVAL;
2009 }
2010
2011 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2012 VMW_RES_DIRTY_SET, user_context_converter,
2013 &cmd->body.cid, &ctx);
2014 if (unlikely(ret != 0))
2015 return ret;
2016
2017 if (!dev_priv->has_mob)
2018 return 0;
2019
2020 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2021 /*
2022 * This is the compat shader path - Per device guest-backed
2023 * shaders, but user-space thinks it's per context host-
2024 * backed shaders.
2025 */
2026 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2027 cmd->body.shid, cmd->body.type);
2028 if (!IS_ERR(res)) {
2029 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2030 VMW_RES_DIRTY_NONE);
2031 if (unlikely(ret != 0))
2032 return ret;
2033
2034 ret = vmw_resource_relocation_add
2035 (sw_context, res,
2036 vmw_ptr_diff(sw_context->buf_start,
2037 &cmd->body.shid),
2038 vmw_res_rel_normal);
2039 if (unlikely(ret != 0))
2040 return ret;
2041 }
2042 }
2043
2044 if (IS_ERR_OR_NULL(res)) {
2045 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2046 VMW_RES_DIRTY_NONE,
2047 user_shader_converter, &cmd->body.shid,
2048 &res);
2049 if (unlikely(ret != 0))
2050 return ret;
2051 }
2052
2053 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2054 if (!ctx_info)
2055 return -EINVAL;
2056
2057 binding.bi.ctx = ctx;
2058 binding.bi.res = res;
2059 binding.bi.bt = vmw_ctx_binding_shader;
2060 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2061 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2062
2063 return 0;
2064}
2065
2066/**
2067 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2068 *
2069 * @dev_priv: Pointer to a device private struct.
2070 * @sw_context: The software context being used for this batch.
2071 * @header: Pointer to the command header in the command stream.
2072 */
2073static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2074 struct vmw_sw_context *sw_context,
2075 SVGA3dCmdHeader *header)
2076{
2077 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2078 int ret;
2079
2080 cmd = container_of(header, typeof(*cmd), header);
2081
2082 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2083 VMW_RES_DIRTY_SET, user_context_converter,
2084 &cmd->body.cid, NULL);
2085 if (unlikely(ret != 0))
2086 return ret;
2087
2088 if (dev_priv->has_mob)
2089 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2090
2091 return 0;
2092}
2093
2094/**
2095 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2096 *
2097 * @dev_priv: Pointer to a device private struct.
2098 * @sw_context: The software context being used for this batch.
2099 * @header: Pointer to the command header in the command stream.
2100 */
2101static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2102 struct vmw_sw_context *sw_context,
2103 SVGA3dCmdHeader *header)
2104{
2105 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2106 container_of(header, typeof(*cmd), header);
2107
2108 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2109 user_shader_converter, &cmd->body.shid,
2110 &cmd->body.mobid, cmd->body.offsetInBytes);
2111}
2112
2113/**
2114 * vmw_cmd_dx_set_single_constant_buffer - Validate
2115 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2116 *
2117 * @dev_priv: Pointer to a device private struct.
2118 * @sw_context: The software context being used for this batch.
2119 * @header: Pointer to the command header in the command stream.
2120 */
2121static int
2122vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2123 struct vmw_sw_context *sw_context,
2124 SVGA3dCmdHeader *header)
2125{
2126 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2127 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2128 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2129
2130 struct vmw_resource *res = NULL;
2131 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2132 struct vmw_ctx_bindinfo_cb binding;
2133 int ret;
2134
2135 if (!ctx_node)
2136 return -EINVAL;
2137
2138 cmd = container_of(header, typeof(*cmd), header);
2139 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2140 VMW_RES_DIRTY_NONE, user_surface_converter,
2141 &cmd->body.sid, &res);
2142 if (unlikely(ret != 0))
2143 return ret;
2144
2145 binding.bi.ctx = ctx_node->ctx;
2146 binding.bi.res = res;
2147 binding.bi.bt = vmw_ctx_binding_cb;
2148 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2149 binding.offset = cmd->body.offsetInBytes;
2150 binding.size = cmd->body.sizeInBytes;
2151 binding.slot = cmd->body.slot;
2152
2153 if (binding.shader_slot >= max_shader_num ||
2154 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2155 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2156 (unsigned int) cmd->body.type,
2157 (unsigned int) binding.slot);
2158 return -EINVAL;
2159 }
2160
2161 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2162 binding.slot);
2163
2164 return 0;
2165}
2166
2167/**
2168 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2169 * command
2170 *
2171 * @dev_priv: Pointer to a device private struct.
2172 * @sw_context: The software context being used for this batch.
2173 * @header: Pointer to the command header in the command stream.
2174 */
2175static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2176 struct vmw_sw_context *sw_context,
2177 SVGA3dCmdHeader *header)
2178{
2179 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2180 container_of(header, typeof(*cmd), header);
2181 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2182 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2183
2184 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2185 sizeof(SVGA3dShaderResourceViewId);
2186
2187 if ((u64) cmd->body.startView + (u64) num_sr_view >
2188 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2189 cmd->body.type >= max_allowed) {
2190 VMW_DEBUG_USER("Invalid shader binding.\n");
2191 return -EINVAL;
2192 }
2193
2194 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2195 vmw_ctx_binding_sr,
2196 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2197 (void *) &cmd[1], num_sr_view,
2198 cmd->body.startView);
2199}
2200
2201/**
2202 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2203 *
2204 * @dev_priv: Pointer to a device private struct.
2205 * @sw_context: The software context being used for this batch.
2206 * @header: Pointer to the command header in the command stream.
2207 */
2208static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2209 struct vmw_sw_context *sw_context,
2210 SVGA3dCmdHeader *header)
2211{
2212 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2213 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2214 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2215 struct vmw_resource *res = NULL;
2216 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2217 struct vmw_ctx_bindinfo_shader binding;
2218 int ret = 0;
2219
2220 if (!ctx_node)
2221 return -EINVAL;
2222
2223 cmd = container_of(header, typeof(*cmd), header);
2224
2225 if (cmd->body.type >= max_allowed ||
2226 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2227 VMW_DEBUG_USER("Illegal shader type %u.\n",
2228 (unsigned int) cmd->body.type);
2229 return -EINVAL;
2230 }
2231
2232 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2233 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2234 if (IS_ERR(res)) {
2235 VMW_DEBUG_USER("Could not find shader for binding.\n");
2236 return PTR_ERR(res);
2237 }
2238
2239 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2240 VMW_RES_DIRTY_NONE);
2241 if (ret)
2242 return ret;
2243 }
2244
2245 binding.bi.ctx = ctx_node->ctx;
2246 binding.bi.res = res;
2247 binding.bi.bt = vmw_ctx_binding_dx_shader;
2248 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2249
2250 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2251
2252 return 0;
2253}
2254
2255/**
2256 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2257 * command
2258 *
2259 * @dev_priv: Pointer to a device private struct.
2260 * @sw_context: The software context being used for this batch.
2261 * @header: Pointer to the command header in the command stream.
2262 */
2263static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2264 struct vmw_sw_context *sw_context,
2265 SVGA3dCmdHeader *header)
2266{
2267 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2268 struct vmw_ctx_bindinfo_vb binding;
2269 struct vmw_resource *res;
2270 struct {
2271 SVGA3dCmdHeader header;
2272 SVGA3dCmdDXSetVertexBuffers body;
2273 SVGA3dVertexBuffer buf[];
2274 } *cmd;
2275 int i, ret, num;
2276
2277 if (!ctx_node)
2278 return -EINVAL;
2279
2280 cmd = container_of(header, typeof(*cmd), header);
2281 num = (cmd->header.size - sizeof(cmd->body)) /
2282 sizeof(SVGA3dVertexBuffer);
2283 if ((u64)num + (u64)cmd->body.startBuffer >
2284 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2285 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2286 return -EINVAL;
2287 }
2288
2289 for (i = 0; i < num; i++) {
2290 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2291 VMW_RES_DIRTY_NONE,
2292 user_surface_converter,
2293 &cmd->buf[i].sid, &res);
2294 if (unlikely(ret != 0))
2295 return ret;
2296
2297 binding.bi.ctx = ctx_node->ctx;
2298 binding.bi.bt = vmw_ctx_binding_vb;
2299 binding.bi.res = res;
2300 binding.offset = cmd->buf[i].offset;
2301 binding.stride = cmd->buf[i].stride;
2302 binding.slot = i + cmd->body.startBuffer;
2303
2304 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2305 }
2306
2307 return 0;
2308}
2309
2310/**
2311 * vmw_cmd_dx_set_index_buffer - Validate
2312 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2313 *
2314 * @dev_priv: Pointer to a device private struct.
2315 * @sw_context: The software context being used for this batch.
2316 * @header: Pointer to the command header in the command stream.
2317 */
2318static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2319 struct vmw_sw_context *sw_context,
2320 SVGA3dCmdHeader *header)
2321{
2322 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2323 struct vmw_ctx_bindinfo_ib binding;
2324 struct vmw_resource *res;
2325 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2326 int ret;
2327
2328 if (!ctx_node)
2329 return -EINVAL;
2330
2331 cmd = container_of(header, typeof(*cmd), header);
2332 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2333 VMW_RES_DIRTY_NONE, user_surface_converter,
2334 &cmd->body.sid, &res);
2335 if (unlikely(ret != 0))
2336 return ret;
2337
2338 binding.bi.ctx = ctx_node->ctx;
2339 binding.bi.res = res;
2340 binding.bi.bt = vmw_ctx_binding_ib;
2341 binding.offset = cmd->body.offset;
2342 binding.format = cmd->body.format;
2343
2344 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2345
2346 return 0;
2347}
2348
2349/**
2350 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2351 * command
2352 *
2353 * @dev_priv: Pointer to a device private struct.
2354 * @sw_context: The software context being used for this batch.
2355 * @header: Pointer to the command header in the command stream.
2356 */
2357static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2358 struct vmw_sw_context *sw_context,
2359 SVGA3dCmdHeader *header)
2360{
2361 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2362 container_of(header, typeof(*cmd), header);
2363 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2364 sizeof(SVGA3dRenderTargetViewId);
2365 int ret;
2366
2367 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2368 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2369 return -EINVAL;
2370 }
2371
2372 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2373 0, &cmd->body.depthStencilViewId, 1, 0);
2374 if (ret)
2375 return ret;
2376
2377 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2378 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2379 num_rt_view, 0);
2380}
2381
2382/**
2383 * vmw_cmd_dx_clear_rendertarget_view - Validate
2384 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2385 *
2386 * @dev_priv: Pointer to a device private struct.
2387 * @sw_context: The software context being used for this batch.
2388 * @header: Pointer to the command header in the command stream.
2389 */
2390static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2391 struct vmw_sw_context *sw_context,
2392 SVGA3dCmdHeader *header)
2393{
2394 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2395 container_of(header, typeof(*cmd), header);
2396 struct vmw_resource *ret;
2397
2398 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2399 cmd->body.renderTargetViewId);
2400
2401 return PTR_ERR_OR_ZERO(ret);
2402}
2403
2404/**
2405 * vmw_cmd_dx_clear_depthstencil_view - Validate
2406 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2407 *
2408 * @dev_priv: Pointer to a device private struct.
2409 * @sw_context: The software context being used for this batch.
2410 * @header: Pointer to the command header in the command stream.
2411 */
2412static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2413 struct vmw_sw_context *sw_context,
2414 SVGA3dCmdHeader *header)
2415{
2416 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2417 container_of(header, typeof(*cmd), header);
2418 struct vmw_resource *ret;
2419
2420 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2421 cmd->body.depthStencilViewId);
2422
2423 return PTR_ERR_OR_ZERO(ret);
2424}
2425
2426static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2427 struct vmw_sw_context *sw_context,
2428 SVGA3dCmdHeader *header)
2429{
2430 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2431 struct vmw_resource *srf;
2432 struct vmw_resource *res;
2433 enum vmw_view_type view_type;
2434 int ret;
2435 /*
2436 * This is based on the fact that all affected define commands have the
2437 * same initial command body layout.
2438 */
2439 struct {
2440 SVGA3dCmdHeader header;
2441 uint32 defined_id;
2442 uint32 sid;
2443 } *cmd;
2444
2445 if (!ctx_node)
2446 return -EINVAL;
2447
2448 view_type = vmw_view_cmd_to_type(header->id);
2449 if (view_type == vmw_view_max)
2450 return -EINVAL;
2451
2452 cmd = container_of(header, typeof(*cmd), header);
2453 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2454 VMW_DEBUG_USER("Invalid surface id.\n");
2455 return -EINVAL;
2456 }
2457 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2458 VMW_RES_DIRTY_NONE, user_surface_converter,
2459 &cmd->sid, &srf);
2460 if (unlikely(ret != 0))
2461 return ret;
2462
2463 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2464 ret = vmw_cotable_notify(res, cmd->defined_id);
2465 if (unlikely(ret != 0))
2466 return ret;
2467
2468 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2469 cmd->defined_id, header,
2470 header->size + sizeof(*header),
2471 &sw_context->staged_cmd_res);
2472}
2473
2474/**
2475 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2476 *
2477 * @dev_priv: Pointer to a device private struct.
2478 * @sw_context: The software context being used for this batch.
2479 * @header: Pointer to the command header in the command stream.
2480 */
2481static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2482 struct vmw_sw_context *sw_context,
2483 SVGA3dCmdHeader *header)
2484{
2485 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2486 struct vmw_ctx_bindinfo_so_target binding;
2487 struct vmw_resource *res;
2488 struct {
2489 SVGA3dCmdHeader header;
2490 SVGA3dCmdDXSetSOTargets body;
2491 SVGA3dSoTarget targets[];
2492 } *cmd;
2493 int i, ret, num;
2494
2495 if (!ctx_node)
2496 return -EINVAL;
2497
2498 cmd = container_of(header, typeof(*cmd), header);
2499 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2500
2501 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2502 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2503 return -EINVAL;
2504 }
2505
2506 for (i = 0; i < num; i++) {
2507 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2508 VMW_RES_DIRTY_SET,
2509 user_surface_converter,
2510 &cmd->targets[i].sid, &res);
2511 if (unlikely(ret != 0))
2512 return ret;
2513
2514 binding.bi.ctx = ctx_node->ctx;
2515 binding.bi.res = res;
2516 binding.bi.bt = vmw_ctx_binding_so_target;
2517 binding.offset = cmd->targets[i].offset;
2518 binding.size = cmd->targets[i].sizeInBytes;
2519 binding.slot = i;
2520
2521 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2522 }
2523
2524 return 0;
2525}
2526
2527static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2528 struct vmw_sw_context *sw_context,
2529 SVGA3dCmdHeader *header)
2530{
2531 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2532 struct vmw_resource *res;
2533 /*
2534 * This is based on the fact that all affected define commands have
2535 * the same initial command body layout.
2536 */
2537 struct {
2538 SVGA3dCmdHeader header;
2539 uint32 defined_id;
2540 } *cmd;
2541 enum vmw_so_type so_type;
2542 int ret;
2543
2544 if (!ctx_node)
2545 return -EINVAL;
2546
2547 so_type = vmw_so_cmd_to_type(header->id);
2548 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2549 if (IS_ERR(res))
2550 return PTR_ERR(res);
2551 cmd = container_of(header, typeof(*cmd), header);
2552 ret = vmw_cotable_notify(res, cmd->defined_id);
2553
2554 return ret;
2555}
2556
2557/**
2558 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2559 * command
2560 *
2561 * @dev_priv: Pointer to a device private struct.
2562 * @sw_context: The software context being used for this batch.
2563 * @header: Pointer to the command header in the command stream.
2564 */
2565static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2566 struct vmw_sw_context *sw_context,
2567 SVGA3dCmdHeader *header)
2568{
2569 struct {
2570 SVGA3dCmdHeader header;
2571 union {
2572 SVGA3dCmdDXReadbackSubResource r_body;
2573 SVGA3dCmdDXInvalidateSubResource i_body;
2574 SVGA3dCmdDXUpdateSubResource u_body;
2575 SVGA3dSurfaceId sid;
2576 };
2577 } *cmd;
2578
2579 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2580 offsetof(typeof(*cmd), sid));
2581 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2582 offsetof(typeof(*cmd), sid));
2583 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2584 offsetof(typeof(*cmd), sid));
2585
2586 cmd = container_of(header, typeof(*cmd), header);
2587 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2588 VMW_RES_DIRTY_NONE, user_surface_converter,
2589 &cmd->sid, NULL);
2590}
2591
2592static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2593 struct vmw_sw_context *sw_context,
2594 SVGA3dCmdHeader *header)
2595{
2596 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2597
2598 if (!ctx_node)
2599 return -EINVAL;
2600
2601 return 0;
2602}
2603
2604/**
2605 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2606 * resource for removal.
2607 *
2608 * @dev_priv: Pointer to a device private struct.
2609 * @sw_context: The software context being used for this batch.
2610 * @header: Pointer to the command header in the command stream.
2611 *
2612 * Check that the view exists, and if it was not created using this command
2613 * batch, conditionally make this command a NOP.
2614 */
2615static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2616 struct vmw_sw_context *sw_context,
2617 SVGA3dCmdHeader *header)
2618{
2619 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2620 struct {
2621 SVGA3dCmdHeader header;
2622 union vmw_view_destroy body;
2623 } *cmd = container_of(header, typeof(*cmd), header);
2624 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2625 struct vmw_resource *view;
2626 int ret;
2627
2628 if (!ctx_node)
2629 return -EINVAL;
2630
2631 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2632 &sw_context->staged_cmd_res, &view);
2633 if (ret || !view)
2634 return ret;
2635
2636 /*
2637 * If the view wasn't created during this command batch, it might
2638 * have been removed due to a context swapout, so add a
2639 * relocation to conditionally make this command a NOP to avoid
2640 * device errors.
2641 */
2642 return vmw_resource_relocation_add(sw_context, view,
2643 vmw_ptr_diff(sw_context->buf_start,
2644 &cmd->header.id),
2645 vmw_res_rel_cond_nop);
2646}
2647
2648/**
2649 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2650 *
2651 * @dev_priv: Pointer to a device private struct.
2652 * @sw_context: The software context being used for this batch.
2653 * @header: Pointer to the command header in the command stream.
2654 */
2655static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2656 struct vmw_sw_context *sw_context,
2657 SVGA3dCmdHeader *header)
2658{
2659 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2660 struct vmw_resource *res;
2661 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2662 container_of(header, typeof(*cmd), header);
2663 int ret;
2664
2665 if (!ctx_node)
2666 return -EINVAL;
2667
2668 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2669 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2670 if (ret)
2671 return ret;
2672
2673 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2674 cmd->body.shaderId, cmd->body.type,
2675 &sw_context->staged_cmd_res);
2676}
2677
2678/**
2679 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2680 *
2681 * @dev_priv: Pointer to a device private struct.
2682 * @sw_context: The software context being used for this batch.
2683 * @header: Pointer to the command header in the command stream.
2684 */
2685static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2686 struct vmw_sw_context *sw_context,
2687 SVGA3dCmdHeader *header)
2688{
2689 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2690 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2691 container_of(header, typeof(*cmd), header);
2692 int ret;
2693
2694 if (!ctx_node)
2695 return -EINVAL;
2696
2697 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2698 &sw_context->staged_cmd_res);
2699
2700 return ret;
2701}
2702
2703/**
2704 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2705 *
2706 * @dev_priv: Pointer to a device private struct.
2707 * @sw_context: The software context being used for this batch.
2708 * @header: Pointer to the command header in the command stream.
2709 */
2710static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2711 struct vmw_sw_context *sw_context,
2712 SVGA3dCmdHeader *header)
2713{
2714 struct vmw_resource *ctx;
2715 struct vmw_resource *res;
2716 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2717 container_of(header, typeof(*cmd), header);
2718 int ret;
2719
2720 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2721 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2722 VMW_RES_DIRTY_SET,
2723 user_context_converter, &cmd->body.cid,
2724 &ctx);
2725 if (ret)
2726 return ret;
2727 } else {
2728 struct vmw_ctx_validation_info *ctx_node =
2729 VMW_GET_CTX_NODE(sw_context);
2730
2731 if (!ctx_node)
2732 return -EINVAL;
2733
2734 ctx = ctx_node->ctx;
2735 }
2736
2737 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2738 if (IS_ERR(res)) {
2739 VMW_DEBUG_USER("Could not find shader to bind.\n");
2740 return PTR_ERR(res);
2741 }
2742
2743 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2744 VMW_RES_DIRTY_NONE);
2745 if (ret) {
2746 VMW_DEBUG_USER("Error creating resource validation node.\n");
2747 return ret;
2748 }
2749
2750 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2751 &cmd->body.mobid,
2752 cmd->body.offsetInBytes);
2753}
2754
2755/**
2756 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2757 *
2758 * @dev_priv: Pointer to a device private struct.
2759 * @sw_context: The software context being used for this batch.
2760 * @header: Pointer to the command header in the command stream.
2761 */
2762static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2763 struct vmw_sw_context *sw_context,
2764 SVGA3dCmdHeader *header)
2765{
2766 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2767 container_of(header, typeof(*cmd), header);
2768 struct vmw_resource *view;
2769 struct vmw_res_cache_entry *rcache;
2770
2771 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2772 cmd->body.shaderResourceViewId);
2773 if (IS_ERR(view))
2774 return PTR_ERR(view);
2775
2776 /*
2777 * Normally the shader-resource view is not gpu-dirtying, but for
2778 * this particular command it is...
2779 * So mark the last looked-up surface, which is the surface
2780 * the view points to, gpu-dirty.
2781 */
2782 rcache = &sw_context->res_cache[vmw_res_surface];
2783 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2784 VMW_RES_DIRTY_SET);
2785 return 0;
2786}
2787
2788/**
2789 * vmw_cmd_dx_transfer_from_buffer - Validate
2790 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2791 *
2792 * @dev_priv: Pointer to a device private struct.
2793 * @sw_context: The software context being used for this batch.
2794 * @header: Pointer to the command header in the command stream.
2795 */
2796static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2797 struct vmw_sw_context *sw_context,
2798 SVGA3dCmdHeader *header)
2799{
2800 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2801 container_of(header, typeof(*cmd), header);
2802 int ret;
2803
2804 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2805 VMW_RES_DIRTY_NONE, user_surface_converter,
2806 &cmd->body.srcSid, NULL);
2807 if (ret != 0)
2808 return ret;
2809
2810 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2811 VMW_RES_DIRTY_SET, user_surface_converter,
2812 &cmd->body.destSid, NULL);
2813}
2814
2815/**
2816 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2817 *
2818 * @dev_priv: Pointer to a device private struct.
2819 * @sw_context: The software context being used for this batch.
2820 * @header: Pointer to the command header in the command stream.
2821 */
2822static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2823 struct vmw_sw_context *sw_context,
2824 SVGA3dCmdHeader *header)
2825{
2826 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2827 container_of(header, typeof(*cmd), header);
2828
2829 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2830 return -EINVAL;
2831
2832 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2833 VMW_RES_DIRTY_SET, user_surface_converter,
2834 &cmd->body.surface.sid, NULL);
2835}
2836
2837static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2838 struct vmw_sw_context *sw_context,
2839 SVGA3dCmdHeader *header)
2840{
2841 if (!has_sm5_context(dev_priv))
2842 return -EINVAL;
2843
2844 return 0;
2845}
2846
2847static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2848 struct vmw_sw_context *sw_context,
2849 SVGA3dCmdHeader *header)
2850{
2851 if (!has_sm5_context(dev_priv))
2852 return -EINVAL;
2853
2854 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2855}
2856
2857static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2858 struct vmw_sw_context *sw_context,
2859 SVGA3dCmdHeader *header)
2860{
2861 if (!has_sm5_context(dev_priv))
2862 return -EINVAL;
2863
2864 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2865}
2866
2867static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2868 struct vmw_sw_context *sw_context,
2869 SVGA3dCmdHeader *header)
2870{
2871 struct {
2872 SVGA3dCmdHeader header;
2873 SVGA3dCmdDXClearUAViewUint body;
2874 } *cmd = container_of(header, typeof(*cmd), header);
2875 struct vmw_resource *ret;
2876
2877 if (!has_sm5_context(dev_priv))
2878 return -EINVAL;
2879
2880 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2881 cmd->body.uaViewId);
2882
2883 return PTR_ERR_OR_ZERO(ret);
2884}
2885
2886static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2887 struct vmw_sw_context *sw_context,
2888 SVGA3dCmdHeader *header)
2889{
2890 struct {
2891 SVGA3dCmdHeader header;
2892 SVGA3dCmdDXClearUAViewFloat body;
2893 } *cmd = container_of(header, typeof(*cmd), header);
2894 struct vmw_resource *ret;
2895
2896 if (!has_sm5_context(dev_priv))
2897 return -EINVAL;
2898
2899 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2900 cmd->body.uaViewId);
2901
2902 return PTR_ERR_OR_ZERO(ret);
2903}
2904
2905static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2906 struct vmw_sw_context *sw_context,
2907 SVGA3dCmdHeader *header)
2908{
2909 struct {
2910 SVGA3dCmdHeader header;
2911 SVGA3dCmdDXSetUAViews body;
2912 } *cmd = container_of(header, typeof(*cmd), header);
2913 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2914 sizeof(SVGA3dUAViewId);
2915 int ret;
2916
2917 if (!has_sm5_context(dev_priv))
2918 return -EINVAL;
2919
2920 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2921 VMW_DEBUG_USER("Invalid UAV binding.\n");
2922 return -EINVAL;
2923 }
2924
2925 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2926 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2927 num_uav, 0);
2928 if (ret)
2929 return ret;
2930
2931 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2932 cmd->body.uavSpliceIndex);
2933
2934 return ret;
2935}
2936
2937static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2938 struct vmw_sw_context *sw_context,
2939 SVGA3dCmdHeader *header)
2940{
2941 struct {
2942 SVGA3dCmdHeader header;
2943 SVGA3dCmdDXSetCSUAViews body;
2944 } *cmd = container_of(header, typeof(*cmd), header);
2945 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2946 sizeof(SVGA3dUAViewId);
2947 int ret;
2948
2949 if (!has_sm5_context(dev_priv))
2950 return -EINVAL;
2951
2952 if (num_uav > SVGA3D_MAX_UAVIEWS) {
2953 VMW_DEBUG_USER("Invalid UAV binding.\n");
2954 return -EINVAL;
2955 }
2956
2957 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2958 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2959 num_uav, 0);
2960 if (ret)
2961 return ret;
2962
2963 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2964 cmd->body.startIndex);
2965
2966 return ret;
2967}
2968
2969static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2970 struct vmw_sw_context *sw_context,
2971 SVGA3dCmdHeader *header)
2972{
2973 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2974 struct vmw_resource *res;
2975 struct {
2976 SVGA3dCmdHeader header;
2977 SVGA3dCmdDXDefineStreamOutputWithMob body;
2978 } *cmd = container_of(header, typeof(*cmd), header);
2979 int ret;
2980
2981 if (!has_sm5_context(dev_priv))
2982 return -EINVAL;
2983
2984 if (!ctx_node) {
2985 DRM_ERROR("DX Context not set.\n");
2986 return -EINVAL;
2987 }
2988
2989 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
2990 ret = vmw_cotable_notify(res, cmd->body.soid);
2991 if (ret)
2992 return ret;
2993
2994 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
2995 cmd->body.soid,
2996 &sw_context->staged_cmd_res);
2997}
2998
2999static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3000 struct vmw_sw_context *sw_context,
3001 SVGA3dCmdHeader *header)
3002{
3003 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3004 struct vmw_resource *res;
3005 struct {
3006 SVGA3dCmdHeader header;
3007 SVGA3dCmdDXDestroyStreamOutput body;
3008 } *cmd = container_of(header, typeof(*cmd), header);
3009
3010 if (!ctx_node) {
3011 DRM_ERROR("DX Context not set.\n");
3012 return -EINVAL;
3013 }
3014
3015 /*
3016 * When device does not support SM5 then streamoutput with mob command is
3017 * not available to user-space. Simply return in this case.
3018 */
3019 if (!has_sm5_context(dev_priv))
3020 return 0;
3021
3022 /*
3023 * With SM5 capable device if lookup fails then user-space probably used
3024 * old streamoutput define command. Return without an error.
3025 */
3026 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3027 cmd->body.soid);
3028 if (IS_ERR(res))
3029 return 0;
3030
3031 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3032 &sw_context->staged_cmd_res);
3033}
3034
3035static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3036 struct vmw_sw_context *sw_context,
3037 SVGA3dCmdHeader *header)
3038{
3039 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3040 struct vmw_resource *res;
3041 struct {
3042 SVGA3dCmdHeader header;
3043 SVGA3dCmdDXBindStreamOutput body;
3044 } *cmd = container_of(header, typeof(*cmd), header);
3045 int ret;
3046
3047 if (!has_sm5_context(dev_priv))
3048 return -EINVAL;
3049
3050 if (!ctx_node) {
3051 DRM_ERROR("DX Context not set.\n");
3052 return -EINVAL;
3053 }
3054
3055 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3056 cmd->body.soid);
3057 if (IS_ERR(res)) {
3058 DRM_ERROR("Could not find streamoutput to bind.\n");
3059 return PTR_ERR(res);
3060 }
3061
3062 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3063
3064 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3065 VMW_RES_DIRTY_NONE);
3066 if (ret) {
3067 DRM_ERROR("Error creating resource validation node.\n");
3068 return ret;
3069 }
3070
3071 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3072 &cmd->body.mobid,
3073 cmd->body.offsetInBytes);
3074}
3075
3076static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3077 struct vmw_sw_context *sw_context,
3078 SVGA3dCmdHeader *header)
3079{
3080 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3081 struct vmw_resource *res;
3082 struct vmw_ctx_bindinfo_so binding;
3083 struct {
3084 SVGA3dCmdHeader header;
3085 SVGA3dCmdDXSetStreamOutput body;
3086 } *cmd = container_of(header, typeof(*cmd), header);
3087 int ret;
3088
3089 if (!ctx_node) {
3090 DRM_ERROR("DX Context not set.\n");
3091 return -EINVAL;
3092 }
3093
3094 if (cmd->body.soid == SVGA3D_INVALID_ID)
3095 return 0;
3096
3097 /*
3098 * When device does not support SM5 then streamoutput with mob command is
3099 * not available to user-space. Simply return in this case.
3100 */
3101 if (!has_sm5_context(dev_priv))
3102 return 0;
3103
3104 /*
3105 * With SM5 capable device if lookup fails then user-space probably used
3106 * old streamoutput define command. Return without an error.
3107 */
3108 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3109 cmd->body.soid);
3110 if (IS_ERR(res)) {
3111 return 0;
3112 }
3113
3114 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
3115 VMW_RES_DIRTY_NONE);
3116 if (ret) {
3117 DRM_ERROR("Error creating resource validation node.\n");
3118 return ret;
3119 }
3120
3121 binding.bi.ctx = ctx_node->ctx;
3122 binding.bi.res = res;
3123 binding.bi.bt = vmw_ctx_binding_so;
3124 binding.slot = 0; /* Only one SO set to context at a time. */
3125
3126 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3127 binding.slot);
3128
3129 return ret;
3130}
3131
3132static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3133 struct vmw_sw_context *sw_context,
3134 SVGA3dCmdHeader *header)
3135{
3136 struct vmw_draw_indexed_instanced_indirect_cmd {
3137 SVGA3dCmdHeader header;
3138 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3139 } *cmd = container_of(header, typeof(*cmd), header);
3140
3141 if (!has_sm5_context(dev_priv))
3142 return -EINVAL;
3143
3144 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3145 VMW_RES_DIRTY_NONE, user_surface_converter,
3146 &cmd->body.argsBufferSid, NULL);
3147}
3148
3149static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3150 struct vmw_sw_context *sw_context,
3151 SVGA3dCmdHeader *header)
3152{
3153 struct vmw_draw_instanced_indirect_cmd {
3154 SVGA3dCmdHeader header;
3155 SVGA3dCmdDXDrawInstancedIndirect body;
3156 } *cmd = container_of(header, typeof(*cmd), header);
3157
3158 if (!has_sm5_context(dev_priv))
3159 return -EINVAL;
3160
3161 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3162 VMW_RES_DIRTY_NONE, user_surface_converter,
3163 &cmd->body.argsBufferSid, NULL);
3164}
3165
3166static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3167 struct vmw_sw_context *sw_context,
3168 SVGA3dCmdHeader *header)
3169{
3170 struct vmw_dispatch_indirect_cmd {
3171 SVGA3dCmdHeader header;
3172 SVGA3dCmdDXDispatchIndirect body;
3173 } *cmd = container_of(header, typeof(*cmd), header);
3174
3175 if (!has_sm5_context(dev_priv))
3176 return -EINVAL;
3177
3178 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3179 VMW_RES_DIRTY_NONE, user_surface_converter,
3180 &cmd->body.argsBufferSid, NULL);
3181}
3182
3183static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3184 struct vmw_sw_context *sw_context,
3185 void *buf, uint32_t *size)
3186{
3187 uint32_t size_remaining = *size;
3188 uint32_t cmd_id;
3189
3190 cmd_id = ((uint32_t *)buf)[0];
3191 switch (cmd_id) {
3192 case SVGA_CMD_UPDATE:
3193 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3194 break;
3195 case SVGA_CMD_DEFINE_GMRFB:
3196 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3197 break;
3198 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3199 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3200 break;
3201 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3202 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3203 break;
3204 default:
3205 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3206 return -EINVAL;
3207 }
3208
3209 if (*size > size_remaining) {
3210 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3211 cmd_id);
3212 return -EINVAL;
3213 }
3214
3215 if (unlikely(!sw_context->kernel)) {
3216 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3217 return -EPERM;
3218 }
3219
3220 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3221 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3222
3223 return 0;
3224}
3225
3226static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3227 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3228 false, false, false),
3229 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3230 false, false, false),
3231 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3232 true, false, false),
3233 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3234 true, false, false),
3235 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3236 true, false, false),
3237 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3238 false, false, false),
3239 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3240 false, false, false),
3241 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3242 true, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3244 true, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3246 true, false, false),
3247 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3248 &vmw_cmd_set_render_target_check, true, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3250 true, false, false),
3251 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3252 true, false, false),
3253 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3254 true, false, false),
3255 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3256 true, false, false),
3257 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3258 true, false, false),
3259 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3260 true, false, false),
3261 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3262 true, false, false),
3263 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3264 false, false, false),
3265 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3266 true, false, false),
3267 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3268 true, false, false),
3269 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3270 true, false, false),
3271 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3272 true, false, false),
3273 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3274 true, false, false),
3275 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3276 true, false, false),
3277 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3278 true, false, false),
3279 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3280 true, false, false),
3281 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3282 true, false, false),
3283 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3284 true, false, false),
3285 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3286 &vmw_cmd_blt_surf_screen_check, false, false, false),
3287 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3288 false, false, false),
3289 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3290 false, false, false),
3291 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3292 false, false, false),
3293 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3294 false, false, false),
3295 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3296 false, false, false),
3297 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3298 false, false, false),
3299 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3300 false, false, false),
3301 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3302 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3303 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3305 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3307 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3308 false, false, true),
3309 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3310 false, false, true),
3311 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3312 false, false, true),
3313 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3314 false, false, true),
3315 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3316 false, false, true),
3317 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3318 false, false, true),
3319 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3320 false, false, true),
3321 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3322 false, false, true),
3323 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3324 true, false, true),
3325 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3326 false, false, true),
3327 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3328 true, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3330 &vmw_cmd_update_gb_surface, true, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3332 &vmw_cmd_readback_gb_image, true, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3334 &vmw_cmd_readback_gb_surface, true, false, true),
3335 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3336 &vmw_cmd_invalidate_gb_image, true, false, true),
3337 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3338 &vmw_cmd_invalidate_gb_surface, true, false, true),
3339 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3340 false, false, true),
3341 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3342 false, false, true),
3343 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3344 false, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3346 false, false, true),
3347 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3348 false, false, true),
3349 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3350 false, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3352 true, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3354 false, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3356 false, false, false),
3357 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3358 true, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3360 true, false, true),
3361 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3362 true, false, true),
3363 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3364 true, false, true),
3365 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3366 true, false, true),
3367 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3368 false, false, true),
3369 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3370 false, false, true),
3371 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3372 false, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3374 false, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3376 false, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3378 false, false, true),
3379 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3380 false, false, true),
3381 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3382 false, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3384 false, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3386 false, false, true),
3387 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3388 true, false, true),
3389 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3390 false, false, true),
3391 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3392 false, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3394 false, false, true),
3395 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3396 false, false, true),
3397
3398 /* SM commands */
3399 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3400 false, false, true),
3401 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3402 false, false, true),
3403 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3404 false, false, true),
3405 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3406 false, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3408 false, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3410 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3411 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3412 &vmw_cmd_dx_set_shader_res, true, false, true),
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3414 true, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3416 true, false, true),
3417 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3418 true, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3420 true, false, true),
3421 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3422 true, false, true),
3423 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3424 &vmw_cmd_dx_cid_check, true, false, true),
3425 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3426 true, false, true),
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3428 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3430 &vmw_cmd_dx_set_index_buffer, true, false, true),
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3432 &vmw_cmd_dx_set_rendertargets, true, false, true),
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3434 true, false, true),
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3436 &vmw_cmd_dx_cid_check, true, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3438 &vmw_cmd_dx_cid_check, true, false, true),
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3440 true, false, true),
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3442 true, false, true),
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3444 true, false, true),
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3446 &vmw_cmd_dx_cid_check, true, false, true),
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3448 true, false, true),
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3450 true, false, true),
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3452 true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3454 true, false, true),
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3456 true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3458 true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3460 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3462 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3464 true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3466 true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3468 &vmw_cmd_dx_check_subresource, true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3470 &vmw_cmd_dx_check_subresource, true, false, true),
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3472 &vmw_cmd_dx_check_subresource, true, false, true),
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3474 &vmw_cmd_dx_view_define, true, false, true),
3475 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3476 &vmw_cmd_dx_view_remove, true, false, true),
3477 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3478 &vmw_cmd_dx_view_define, true, false, true),
3479 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3480 &vmw_cmd_dx_view_remove, true, false, true),
3481 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3482 &vmw_cmd_dx_view_define, true, false, true),
3483 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3484 &vmw_cmd_dx_view_remove, true, false, true),
3485 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3486 &vmw_cmd_dx_so_define, true, false, true),
3487 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3488 &vmw_cmd_dx_cid_check, true, false, true),
3489 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3490 &vmw_cmd_dx_so_define, true, false, true),
3491 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3492 &vmw_cmd_dx_cid_check, true, false, true),
3493 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3494 &vmw_cmd_dx_so_define, true, false, true),
3495 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3496 &vmw_cmd_dx_cid_check, true, false, true),
3497 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3498 &vmw_cmd_dx_so_define, true, false, true),
3499 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3500 &vmw_cmd_dx_cid_check, true, false, true),
3501 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3502 &vmw_cmd_dx_so_define, true, false, true),
3503 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3504 &vmw_cmd_dx_cid_check, true, false, true),
3505 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3506 &vmw_cmd_dx_define_shader, true, false, true),
3507 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3508 &vmw_cmd_dx_destroy_shader, true, false, true),
3509 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3510 &vmw_cmd_dx_bind_shader, true, false, true),
3511 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3512 &vmw_cmd_dx_so_define, true, false, true),
3513 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3514 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3515 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3516 &vmw_cmd_dx_set_streamoutput, true, false, true),
3517 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3518 &vmw_cmd_dx_set_so_targets, true, false, true),
3519 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3520 &vmw_cmd_dx_cid_check, true, false, true),
3521 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3522 &vmw_cmd_dx_cid_check, true, false, true),
3523 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3524 &vmw_cmd_buffer_copy_check, true, false, true),
3525 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3526 &vmw_cmd_pred_copy_check, true, false, true),
3527 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3528 &vmw_cmd_dx_transfer_from_buffer,
3529 true, false, true),
3530 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3531 true, false, true),
3532
3533 /*
3534 * SM5 commands
3535 */
3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3537 true, false, true),
3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3539 true, false, true),
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3541 true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3543 &vmw_cmd_clear_uav_float, true, false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3545 false, true),
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3547 true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3549 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3551 &vmw_cmd_instanced_indirect, true, false, true),
3552 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3554 &vmw_cmd_dispatch_indirect, true, false, true),
3555 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3556 false, true),
3557 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3558 &vmw_cmd_sm5_view_define, true, false, true),
3559 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3560 &vmw_cmd_dx_define_streamoutput, true, false, true),
3561 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3562 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3563};
3564
3565bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3566{
3567 u32 cmd_id = ((u32 *) buf)[0];
3568
3569 if (cmd_id >= SVGA_CMD_MAX) {
3570 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3571 const struct vmw_cmd_entry *entry;
3572
3573 *size = header->size + sizeof(SVGA3dCmdHeader);
3574 cmd_id = header->id;
3575 if (cmd_id >= SVGA_3D_CMD_MAX)
3576 return false;
3577
3578 cmd_id -= SVGA_3D_CMD_BASE;
3579 entry = &vmw_cmd_entries[cmd_id];
3580 *cmd = entry->cmd_name;
3581 return true;
3582 }
3583
3584 switch (cmd_id) {
3585 case SVGA_CMD_UPDATE:
3586 *cmd = "SVGA_CMD_UPDATE";
3587 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3588 break;
3589 case SVGA_CMD_DEFINE_GMRFB:
3590 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3591 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3592 break;
3593 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3594 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3595 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3596 break;
3597 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3598 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3599 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3600 break;
3601 default:
3602 *cmd = "UNKNOWN";
3603 *size = 0;
3604 return false;
3605 }
3606
3607 return true;
3608}
3609
3610static int vmw_cmd_check(struct vmw_private *dev_priv,
3611 struct vmw_sw_context *sw_context, void *buf,
3612 uint32_t *size)
3613{
3614 uint32_t cmd_id;
3615 uint32_t size_remaining = *size;
3616 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3617 int ret;
3618 const struct vmw_cmd_entry *entry;
3619 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3620
3621 cmd_id = ((uint32_t *)buf)[0];
3622 /* Handle any none 3D commands */
3623 if (unlikely(cmd_id < SVGA_CMD_MAX))
3624 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3625
3626
3627 cmd_id = header->id;
3628 *size = header->size + sizeof(SVGA3dCmdHeader);
3629
3630 cmd_id -= SVGA_3D_CMD_BASE;
3631 if (unlikely(*size > size_remaining))
3632 goto out_invalid;
3633
3634 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3635 goto out_invalid;
3636
3637 entry = &vmw_cmd_entries[cmd_id];
3638 if (unlikely(!entry->func))
3639 goto out_invalid;
3640
3641 if (unlikely(!entry->user_allow && !sw_context->kernel))
3642 goto out_privileged;
3643
3644 if (unlikely(entry->gb_disable && gb))
3645 goto out_old;
3646
3647 if (unlikely(entry->gb_enable && !gb))
3648 goto out_new;
3649
3650 ret = entry->func(dev_priv, sw_context, header);
3651 if (unlikely(ret != 0)) {
3652 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3653 cmd_id + SVGA_3D_CMD_BASE, ret);
3654 return ret;
3655 }
3656
3657 return 0;
3658out_invalid:
3659 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3660 cmd_id + SVGA_3D_CMD_BASE);
3661 return -EINVAL;
3662out_privileged:
3663 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3664 cmd_id + SVGA_3D_CMD_BASE);
3665 return -EPERM;
3666out_old:
3667 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3668 cmd_id + SVGA_3D_CMD_BASE);
3669 return -EINVAL;
3670out_new:
3671 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3672 cmd_id + SVGA_3D_CMD_BASE);
3673 return -EINVAL;
3674}
3675
3676static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3677 struct vmw_sw_context *sw_context, void *buf,
3678 uint32_t size)
3679{
3680 int32_t cur_size = size;
3681 int ret;
3682
3683 sw_context->buf_start = buf;
3684
3685 while (cur_size > 0) {
3686 size = cur_size;
3687 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3688 if (unlikely(ret != 0))
3689 return ret;
3690 buf = (void *)((unsigned long) buf + size);
3691 cur_size -= size;
3692 }
3693
3694 if (unlikely(cur_size != 0)) {
3695 VMW_DEBUG_USER("Command verifier out of sync.\n");
3696 return -EINVAL;
3697 }
3698
3699 return 0;
3700}
3701
3702static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3703{
3704 /* Memory is validation context memory, so no need to free it */
3705 INIT_LIST_HEAD(&sw_context->bo_relocations);
3706}
3707
3708static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3709{
3710 struct vmw_relocation *reloc;
3711 struct ttm_buffer_object *bo;
3712
3713 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3714 bo = &reloc->vbo->base;
3715 switch (bo->resource->mem_type) {
3716 case TTM_PL_VRAM:
3717 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3718 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3719 break;
3720 case VMW_PL_GMR:
3721 reloc->location->gmrId = bo->resource->start;
3722 break;
3723 case VMW_PL_MOB:
3724 *reloc->mob_loc = bo->resource->start;
3725 break;
3726 default:
3727 BUG();
3728 }
3729 }
3730 vmw_free_relocations(sw_context);
3731}
3732
3733static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3734 uint32_t size)
3735{
3736 if (likely(sw_context->cmd_bounce_size >= size))
3737 return 0;
3738
3739 if (sw_context->cmd_bounce_size == 0)
3740 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3741
3742 while (sw_context->cmd_bounce_size < size) {
3743 sw_context->cmd_bounce_size =
3744 PAGE_ALIGN(sw_context->cmd_bounce_size +
3745 (sw_context->cmd_bounce_size >> 1));
3746 }
3747
3748 vfree(sw_context->cmd_bounce);
3749 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3750
3751 if (sw_context->cmd_bounce == NULL) {
3752 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3753 sw_context->cmd_bounce_size = 0;
3754 return -ENOMEM;
3755 }
3756
3757 return 0;
3758}
3759
3760/*
3761 * vmw_execbuf_fence_commands - create and submit a command stream fence
3762 *
3763 * Creates a fence object and submits a command stream marker.
3764 * If this fails for some reason, We sync the fifo and return NULL.
3765 * It is then safe to fence buffers with a NULL pointer.
3766 *
3767 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3768 * userspace handle if @p_handle is not NULL, otherwise not.
3769 */
3770
3771int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3772 struct vmw_private *dev_priv,
3773 struct vmw_fence_obj **p_fence,
3774 uint32_t *p_handle)
3775{
3776 uint32_t sequence;
3777 int ret;
3778 bool synced = false;
3779
3780 /* p_handle implies file_priv. */
3781 BUG_ON(p_handle != NULL && file_priv == NULL);
3782
3783 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3784 if (unlikely(ret != 0)) {
3785 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3786 synced = true;
3787 }
3788
3789 if (p_handle != NULL)
3790 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3791 sequence, p_fence, p_handle);
3792 else
3793 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3794
3795 if (unlikely(ret != 0 && !synced)) {
3796 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3797 false, VMW_FENCE_WAIT_TIMEOUT);
3798 *p_fence = NULL;
3799 }
3800
3801 return ret;
3802}
3803
3804/**
3805 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3806 *
3807 * @dev_priv: Pointer to a vmw_private struct.
3808 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3809 * @ret: Return value from fence object creation.
3810 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3811 * the information should be copied.
3812 * @fence: Pointer to the fenc object.
3813 * @fence_handle: User-space fence handle.
3814 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3815 * @sync_file: Only used to clean up in case of an error in this function.
3816 *
3817 * This function copies fence information to user-space. If copying fails, the
3818 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3819 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3820 * will hopefully be detected.
3821 *
3822 * Also if copying fails, user-space will be unable to signal the fence object
3823 * so we wait for it immediately, and then unreference the user-space reference.
3824 */
3825void
3826vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3827 struct vmw_fpriv *vmw_fp, int ret,
3828 struct drm_vmw_fence_rep __user *user_fence_rep,
3829 struct vmw_fence_obj *fence, uint32_t fence_handle,
3830 int32_t out_fence_fd, struct sync_file *sync_file)
3831{
3832 struct drm_vmw_fence_rep fence_rep;
3833
3834 if (user_fence_rep == NULL)
3835 return;
3836
3837 memset(&fence_rep, 0, sizeof(fence_rep));
3838
3839 fence_rep.error = ret;
3840 fence_rep.fd = out_fence_fd;
3841 if (ret == 0) {
3842 BUG_ON(fence == NULL);
3843
3844 fence_rep.handle = fence_handle;
3845 fence_rep.seqno = fence->base.seqno;
3846 vmw_update_seqno(dev_priv);
3847 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3848 }
3849
3850 /*
3851 * copy_to_user errors will be detected by user space not seeing
3852 * fence_rep::error filled in. Typically user-space would have pre-set
3853 * that member to -EFAULT.
3854 */
3855 ret = copy_to_user(user_fence_rep, &fence_rep,
3856 sizeof(fence_rep));
3857
3858 /*
3859 * User-space lost the fence object. We need to sync and unreference the
3860 * handle.
3861 */
3862 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3863 if (sync_file)
3864 fput(sync_file->file);
3865
3866 if (fence_rep.fd != -1) {
3867 put_unused_fd(fence_rep.fd);
3868 fence_rep.fd = -1;
3869 }
3870
3871 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3872 TTM_REF_USAGE);
3873 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3874 (void) vmw_fence_obj_wait(fence, false, false,
3875 VMW_FENCE_WAIT_TIMEOUT);
3876 }
3877}
3878
3879/**
3880 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3881 *
3882 * @dev_priv: Pointer to a device private structure.
3883 * @kernel_commands: Pointer to the unpatched command batch.
3884 * @command_size: Size of the unpatched command batch.
3885 * @sw_context: Structure holding the relocation lists.
3886 *
3887 * Side effects: If this function returns 0, then the command batch pointed to
3888 * by @kernel_commands will have been modified.
3889 */
3890static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3891 void *kernel_commands, u32 command_size,
3892 struct vmw_sw_context *sw_context)
3893{
3894 void *cmd;
3895
3896 if (sw_context->dx_ctx_node)
3897 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3898 sw_context->dx_ctx_node->ctx->id);
3899 else
3900 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3901
3902 if (!cmd)
3903 return -ENOMEM;
3904
3905 vmw_apply_relocations(sw_context);
3906 memcpy(cmd, kernel_commands, command_size);
3907 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3908 vmw_resource_relocations_free(&sw_context->res_relocations);
3909 vmw_cmd_commit(dev_priv, command_size);
3910
3911 return 0;
3912}
3913
3914/**
3915 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3916 * command buffer manager.
3917 *
3918 * @dev_priv: Pointer to a device private structure.
3919 * @header: Opaque handle to the command buffer allocation.
3920 * @command_size: Size of the unpatched command batch.
3921 * @sw_context: Structure holding the relocation lists.
3922 *
3923 * Side effects: If this function returns 0, then the command buffer represented
3924 * by @header will have been modified.
3925 */
3926static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3927 struct vmw_cmdbuf_header *header,
3928 u32 command_size,
3929 struct vmw_sw_context *sw_context)
3930{
3931 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3932 SVGA3D_INVALID_ID);
3933 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3934 header);
3935
3936 vmw_apply_relocations(sw_context);
3937 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3938 vmw_resource_relocations_free(&sw_context->res_relocations);
3939 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3940
3941 return 0;
3942}
3943
3944/**
3945 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3946 * submission using a command buffer.
3947 *
3948 * @dev_priv: Pointer to a device private structure.
3949 * @user_commands: User-space pointer to the commands to be submitted.
3950 * @command_size: Size of the unpatched command batch.
3951 * @header: Out parameter returning the opaque pointer to the command buffer.
3952 *
3953 * This function checks whether we can use the command buffer manager for
3954 * submission and if so, creates a command buffer of suitable size and copies
3955 * the user data into that buffer.
3956 *
3957 * On successful return, the function returns a pointer to the data in the
3958 * command buffer and *@header is set to non-NULL.
3959 *
3960 * @kernel_commands: If command buffers could not be used, the function will
3961 * return the value of @kernel_commands on function call. That value may be
3962 * NULL. In that case, the value of *@header will be set to NULL.
3963 *
3964 * If an error is encountered, the function will return a pointer error value.
3965 * If the function is interrupted by a signal while sleeping, it will return
3966 * -ERESTARTSYS casted to a pointer error value.
3967 */
3968static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3969 void __user *user_commands,
3970 void *kernel_commands, u32 command_size,
3971 struct vmw_cmdbuf_header **header)
3972{
3973 size_t cmdbuf_size;
3974 int ret;
3975
3976 *header = NULL;
3977 if (command_size > SVGA_CB_MAX_SIZE) {
3978 VMW_DEBUG_USER("Command buffer is too large.\n");
3979 return ERR_PTR(-EINVAL);
3980 }
3981
3982 if (!dev_priv->cman || kernel_commands)
3983 return kernel_commands;
3984
3985 /* If possible, add a little space for fencing. */
3986 cmdbuf_size = command_size + 512;
3987 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3988 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3989 header);
3990 if (IS_ERR(kernel_commands))
3991 return kernel_commands;
3992
3993 ret = copy_from_user(kernel_commands, user_commands, command_size);
3994 if (ret) {
3995 VMW_DEBUG_USER("Failed copying commands.\n");
3996 vmw_cmdbuf_header_free(*header);
3997 *header = NULL;
3998 return ERR_PTR(-EFAULT);
3999 }
4000
4001 return kernel_commands;
4002}
4003
4004static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4005 struct vmw_sw_context *sw_context,
4006 uint32_t handle)
4007{
4008 struct vmw_resource *res;
4009 int ret;
4010 unsigned int size;
4011
4012 if (handle == SVGA3D_INVALID_ID)
4013 return 0;
4014
4015 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4016 ret = vmw_validation_preload_res(sw_context->ctx, size);
4017 if (ret)
4018 return ret;
4019
4020 res = vmw_user_resource_noref_lookup_handle
4021 (dev_priv, sw_context->fp->tfile, handle,
4022 user_context_converter);
4023 if (IS_ERR(res)) {
4024 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4025 (unsigned int) handle);
4026 return PTR_ERR(res);
4027 }
4028
4029 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
4030 if (unlikely(ret != 0))
4031 return ret;
4032
4033 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4034 sw_context->man = vmw_context_res_man(res);
4035
4036 return 0;
4037}
4038
4039int vmw_execbuf_process(struct drm_file *file_priv,
4040 struct vmw_private *dev_priv,
4041 void __user *user_commands, void *kernel_commands,
4042 uint32_t command_size, uint64_t throttle_us,
4043 uint32_t dx_context_handle,
4044 struct drm_vmw_fence_rep __user *user_fence_rep,
4045 struct vmw_fence_obj **out_fence, uint32_t flags)
4046{
4047 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4048 struct vmw_fence_obj *fence = NULL;
4049 struct vmw_cmdbuf_header *header;
4050 uint32_t handle = 0;
4051 int ret;
4052 int32_t out_fence_fd = -1;
4053 struct sync_file *sync_file = NULL;
4054 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
4055
4056 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
4057
4058 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4059 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4060 if (out_fence_fd < 0) {
4061 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4062 return out_fence_fd;
4063 }
4064 }
4065
4066 if (throttle_us) {
4067 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4068 }
4069
4070 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4071 kernel_commands, command_size,
4072 &header);
4073 if (IS_ERR(kernel_commands)) {
4074 ret = PTR_ERR(kernel_commands);
4075 goto out_free_fence_fd;
4076 }
4077
4078 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4079 if (ret) {
4080 ret = -ERESTARTSYS;
4081 goto out_free_header;
4082 }
4083
4084 sw_context->kernel = false;
4085 if (kernel_commands == NULL) {
4086 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4087 if (unlikely(ret != 0))
4088 goto out_unlock;
4089
4090 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4091 command_size);
4092 if (unlikely(ret != 0)) {
4093 ret = -EFAULT;
4094 VMW_DEBUG_USER("Failed copying commands.\n");
4095 goto out_unlock;
4096 }
4097
4098 kernel_commands = sw_context->cmd_bounce;
4099 } else if (!header) {
4100 sw_context->kernel = true;
4101 }
4102
4103 sw_context->fp = vmw_fpriv(file_priv);
4104 INIT_LIST_HEAD(&sw_context->ctx_list);
4105 sw_context->cur_query_bo = dev_priv->pinned_bo;
4106 sw_context->last_query_ctx = NULL;
4107 sw_context->needs_post_query_barrier = false;
4108 sw_context->dx_ctx_node = NULL;
4109 sw_context->dx_query_mob = NULL;
4110 sw_context->dx_query_ctx = NULL;
4111 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4112 INIT_LIST_HEAD(&sw_context->res_relocations);
4113 INIT_LIST_HEAD(&sw_context->bo_relocations);
4114
4115 if (sw_context->staged_bindings)
4116 vmw_binding_state_reset(sw_context->staged_bindings);
4117
4118 if (!sw_context->res_ht_initialized) {
4119 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
4120 if (unlikely(ret != 0))
4121 goto out_unlock;
4122
4123 sw_context->res_ht_initialized = true;
4124 }
4125
4126 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4127 sw_context->ctx = &val_ctx;
4128 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4129 if (unlikely(ret != 0))
4130 goto out_err_nores;
4131
4132 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4133 command_size);
4134 if (unlikely(ret != 0))
4135 goto out_err_nores;
4136
4137 ret = vmw_resources_reserve(sw_context);
4138 if (unlikely(ret != 0))
4139 goto out_err_nores;
4140
4141 ret = vmw_validation_bo_reserve(&val_ctx, true);
4142 if (unlikely(ret != 0))
4143 goto out_err_nores;
4144
4145 ret = vmw_validation_bo_validate(&val_ctx, true);
4146 if (unlikely(ret != 0))
4147 goto out_err;
4148
4149 ret = vmw_validation_res_validate(&val_ctx, true);
4150 if (unlikely(ret != 0))
4151 goto out_err;
4152
4153 vmw_validation_drop_ht(&val_ctx);
4154
4155 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4156 if (unlikely(ret != 0)) {
4157 ret = -ERESTARTSYS;
4158 goto out_err;
4159 }
4160
4161 if (dev_priv->has_mob) {
4162 ret = vmw_rebind_contexts(sw_context);
4163 if (unlikely(ret != 0))
4164 goto out_unlock_binding;
4165 }
4166
4167 if (!header) {
4168 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4169 command_size, sw_context);
4170 } else {
4171 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4172 sw_context);
4173 header = NULL;
4174 }
4175 mutex_unlock(&dev_priv->binding_mutex);
4176 if (ret)
4177 goto out_err;
4178
4179 vmw_query_bo_switch_commit(dev_priv, sw_context);
4180 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4181 (user_fence_rep) ? &handle : NULL);
4182 /*
4183 * This error is harmless, because if fence submission fails,
4184 * vmw_fifo_send_fence will sync. The error will be propagated to
4185 * user-space in @fence_rep
4186 */
4187 if (ret != 0)
4188 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4189
4190 vmw_execbuf_bindings_commit(sw_context, false);
4191 vmw_bind_dx_query_mob(sw_context);
4192 vmw_validation_res_unreserve(&val_ctx, false);
4193
4194 vmw_validation_bo_fence(sw_context->ctx, fence);
4195
4196 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4197 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4198
4199 /*
4200 * If anything fails here, give up trying to export the fence and do a
4201 * sync since the user mode will not be able to sync the fence itself.
4202 * This ensures we are still functionally correct.
4203 */
4204 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4205
4206 sync_file = sync_file_create(&fence->base);
4207 if (!sync_file) {
4208 VMW_DEBUG_USER("Sync file create failed for fence\n");
4209 put_unused_fd(out_fence_fd);
4210 out_fence_fd = -1;
4211
4212 (void) vmw_fence_obj_wait(fence, false, false,
4213 VMW_FENCE_WAIT_TIMEOUT);
4214 } else {
4215 /* Link the fence with the FD created earlier */
4216 fd_install(out_fence_fd, sync_file->file);
4217 }
4218 }
4219
4220 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4221 user_fence_rep, fence, handle, out_fence_fd,
4222 sync_file);
4223
4224 /* Don't unreference when handing fence out */
4225 if (unlikely(out_fence != NULL)) {
4226 *out_fence = fence;
4227 fence = NULL;
4228 } else if (likely(fence != NULL)) {
4229 vmw_fence_obj_unreference(&fence);
4230 }
4231
4232 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4233 mutex_unlock(&dev_priv->cmdbuf_mutex);
4234
4235 /*
4236 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4237 * in resource destruction paths.
4238 */
4239 vmw_validation_unref_lists(&val_ctx);
4240
4241 return 0;
4242
4243out_unlock_binding:
4244 mutex_unlock(&dev_priv->binding_mutex);
4245out_err:
4246 vmw_validation_bo_backoff(&val_ctx);
4247out_err_nores:
4248 vmw_execbuf_bindings_commit(sw_context, true);
4249 vmw_validation_res_unreserve(&val_ctx, true);
4250 vmw_resource_relocations_free(&sw_context->res_relocations);
4251 vmw_free_relocations(sw_context);
4252 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4253 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4254out_unlock:
4255 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4256 vmw_validation_drop_ht(&val_ctx);
4257 WARN_ON(!list_empty(&sw_context->ctx_list));
4258 mutex_unlock(&dev_priv->cmdbuf_mutex);
4259
4260 /*
4261 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4262 * in resource destruction paths.
4263 */
4264 vmw_validation_unref_lists(&val_ctx);
4265out_free_header:
4266 if (header)
4267 vmw_cmdbuf_header_free(header);
4268out_free_fence_fd:
4269 if (out_fence_fd >= 0)
4270 put_unused_fd(out_fence_fd);
4271
4272 return ret;
4273}
4274
4275/**
4276 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4277 *
4278 * @dev_priv: The device private structure.
4279 *
4280 * This function is called to idle the fifo and unpin the query buffer if the
4281 * normal way to do this hits an error, which should typically be extremely
4282 * rare.
4283 */
4284static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4285{
4286 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4287
4288 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4289 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4290 if (dev_priv->dummy_query_bo_pinned) {
4291 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4292 dev_priv->dummy_query_bo_pinned = false;
4293 }
4294}
4295
4296
4297/**
4298 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4299 * bo.
4300 *
4301 * @dev_priv: The device private structure.
4302 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4303 * query barrier that flushes all queries touching the current buffer pointed to
4304 * by @dev_priv->pinned_bo
4305 *
4306 * This function should be used to unpin the pinned query bo, or as a query
4307 * barrier when we need to make sure that all queries have finished before the
4308 * next fifo command. (For example on hardware context destructions where the
4309 * hardware may otherwise leak unfinished queries).
4310 *
4311 * This function does not return any failure codes, but make attempts to do safe
4312 * unpinning in case of errors.
4313 *
4314 * The function will synchronize on the previous query barrier, and will thus
4315 * not finish until that barrier has executed.
4316 *
4317 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4318 * calling this function.
4319 */
4320void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4321 struct vmw_fence_obj *fence)
4322{
4323 int ret = 0;
4324 struct vmw_fence_obj *lfence = NULL;
4325 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4326
4327 if (dev_priv->pinned_bo == NULL)
4328 goto out_unlock;
4329
4330 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4331 false);
4332 if (ret)
4333 goto out_no_reserve;
4334
4335 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4336 false);
4337 if (ret)
4338 goto out_no_reserve;
4339
4340 ret = vmw_validation_bo_reserve(&val_ctx, false);
4341 if (ret)
4342 goto out_no_reserve;
4343
4344 if (dev_priv->query_cid_valid) {
4345 BUG_ON(fence != NULL);
4346 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4347 if (ret)
4348 goto out_no_emit;
4349 dev_priv->query_cid_valid = false;
4350 }
4351
4352 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4353 if (dev_priv->dummy_query_bo_pinned) {
4354 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4355 dev_priv->dummy_query_bo_pinned = false;
4356 }
4357 if (fence == NULL) {
4358 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4359 NULL);
4360 fence = lfence;
4361 }
4362 vmw_validation_bo_fence(&val_ctx, fence);
4363 if (lfence != NULL)
4364 vmw_fence_obj_unreference(&lfence);
4365
4366 vmw_validation_unref_lists(&val_ctx);
4367 vmw_bo_unreference(&dev_priv->pinned_bo);
4368
4369out_unlock:
4370 return;
4371out_no_emit:
4372 vmw_validation_bo_backoff(&val_ctx);
4373out_no_reserve:
4374 vmw_validation_unref_lists(&val_ctx);
4375 vmw_execbuf_unpin_panic(dev_priv);
4376 vmw_bo_unreference(&dev_priv->pinned_bo);
4377}
4378
4379/**
4380 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4381 *
4382 * @dev_priv: The device private structure.
4383 *
4384 * This function should be used to unpin the pinned query bo, or as a query
4385 * barrier when we need to make sure that all queries have finished before the
4386 * next fifo command. (For example on hardware context destructions where the
4387 * hardware may otherwise leak unfinished queries).
4388 *
4389 * This function does not return any failure codes, but make attempts to do safe
4390 * unpinning in case of errors.
4391 *
4392 * The function will synchronize on the previous query barrier, and will thus
4393 * not finish until that barrier has executed.
4394 */
4395void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4396{
4397 mutex_lock(&dev_priv->cmdbuf_mutex);
4398 if (dev_priv->query_cid_valid)
4399 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4400 mutex_unlock(&dev_priv->cmdbuf_mutex);
4401}
4402
4403int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4404 struct drm_file *file_priv)
4405{
4406 struct vmw_private *dev_priv = vmw_priv(dev);
4407 struct drm_vmw_execbuf_arg *arg = data;
4408 int ret;
4409 struct dma_fence *in_fence = NULL;
4410
4411 /*
4412 * Extend the ioctl argument while maintaining backwards compatibility:
4413 * We take different code paths depending on the value of arg->version.
4414 *
4415 * Note: The ioctl argument is extended and zeropadded by core DRM.
4416 */
4417 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4418 arg->version == 0)) {
4419 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4420 return -EINVAL;
4421 }
4422
4423 switch (arg->version) {
4424 case 1:
4425 /* For v1 core DRM have extended + zeropadded the data */
4426 arg->context_handle = (uint32_t) -1;
4427 break;
4428 case 2:
4429 default:
4430 /* For v2 and later core DRM would have correctly copied it */
4431 break;
4432 }
4433
4434 /* If imported a fence FD from elsewhere, then wait on it */
4435 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4436 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4437
4438 if (!in_fence) {
4439 VMW_DEBUG_USER("Cannot get imported fence\n");
4440 return -EINVAL;
4441 }
4442
4443 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4444 if (ret)
4445 goto out;
4446 }
4447
4448 ret = vmw_execbuf_process(file_priv, dev_priv,
4449 (void __user *)(unsigned long)arg->commands,
4450 NULL, arg->command_size, arg->throttle_us,
4451 arg->context_handle,
4452 (void __user *)(unsigned long)arg->fence_rep,
4453 NULL, arg->flags);
4454
4455 if (unlikely(ret != 0))
4456 goto out;
4457
4458 vmw_kms_cursor_post_execbuf(dev_priv);
4459
4460out:
4461 if (in_fence)
4462 dma_fence_put(in_fence);
4463 return ret;
4464}