Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009 - 2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include <linux/sync_file.h>
28
29#include "vmwgfx_drv.h"
30#include "vmwgfx_reg.h"
31#include <drm/ttm/ttm_bo_api.h>
32#include <drm/ttm/ttm_placement.h>
33#include "vmwgfx_so.h"
34#include "vmwgfx_binding.h"
35
36#define VMW_RES_HT_ORDER 12
37
38/*
39 * Helper macro to get dx_ctx_node if available otherwise print an error
40 * message. This is for use in command verifier function where if dx_ctx_node
41 * is not set then command is invalid.
42 */
43#define VMW_GET_CTX_NODE(__sw_context) \
44({ \
45 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
46 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
47 __sw_context->dx_ctx_node; \
48 }); \
49})
50
51#define VMW_DECLARE_CMD_VAR(__var, __type) \
52 struct { \
53 SVGA3dCmdHeader header; \
54 __type body; \
55 } __var
56
57/**
58 * struct vmw_relocation - Buffer object relocation
59 *
60 * @head: List head for the command submission context's relocation list
61 * @vbo: Non ref-counted pointer to buffer object
62 * @mob_loc: Pointer to location for mob id to be modified
63 * @location: Pointer to location for guest pointer to be modified
64 */
65struct vmw_relocation {
66 struct list_head head;
67 struct vmw_buffer_object *vbo;
68 union {
69 SVGAMobId *mob_loc;
70 SVGAGuestPtr *location;
71 };
72};
73
74/**
75 * enum vmw_resource_relocation_type - Relocation type for resources
76 *
77 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
78 * command stream is replaced with the actual id after validation.
79 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
80 * with a NOP.
81 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
82 * validation is -1, the command is replaced with a NOP. Otherwise no action.
83 */
84enum vmw_resource_relocation_type {
85 vmw_res_rel_normal,
86 vmw_res_rel_nop,
87 vmw_res_rel_cond_nop,
88 vmw_res_rel_max
89};
90
91/**
92 * struct vmw_resource_relocation - Relocation info for resources
93 *
94 * @head: List head for the software context's relocation list.
95 * @res: Non-ref-counted pointer to the resource.
96 * @offset: Offset of single byte entries into the command buffer where the id
97 * that needs fixup is located.
98 * @rel_type: Type of relocation.
99 */
100struct vmw_resource_relocation {
101 struct list_head head;
102 const struct vmw_resource *res;
103 u32 offset:29;
104 enum vmw_resource_relocation_type rel_type:3;
105};
106
107/**
108 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
109 *
110 * @head: List head of context list
111 * @ctx: The context resource
112 * @cur: The context's persistent binding state
113 * @staged: The binding state changes of this command buffer
114 */
115struct vmw_ctx_validation_info {
116 struct list_head head;
117 struct vmw_resource *ctx;
118 struct vmw_ctx_binding_state *cur;
119 struct vmw_ctx_binding_state *staged;
120};
121
122/**
123 * struct vmw_cmd_entry - Describe a command for the verifier
124 *
125 * @user_allow: Whether allowed from the execbuf ioctl.
126 * @gb_disable: Whether disabled if guest-backed objects are available.
127 * @gb_enable: Whether enabled iff guest-backed objects are available.
128 */
129struct vmw_cmd_entry {
130 int (*func) (struct vmw_private *, struct vmw_sw_context *,
131 SVGA3dCmdHeader *);
132 bool user_allow;
133 bool gb_disable;
134 bool gb_enable;
135 const char *cmd_name;
136};
137
138#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
139 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
140 (_gb_disable), (_gb_enable), #_cmd}
141
142static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 struct vmw_resource *ctx);
145static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
146 struct vmw_sw_context *sw_context,
147 SVGAMobId *id,
148 struct vmw_buffer_object **vmw_bo_p);
149/**
150 * vmw_ptr_diff - Compute the offset from a to b in bytes
151 *
152 * @a: A starting pointer.
153 * @b: A pointer offset in the same address space.
154 *
155 * Returns: The offset in bytes between the two pointers.
156 */
157static size_t vmw_ptr_diff(void *a, void *b)
158{
159 return (unsigned long) b - (unsigned long) a;
160}
161
162/**
163 * vmw_execbuf_bindings_commit - Commit modified binding state
164 *
165 * @sw_context: The command submission context
166 * @backoff: Whether this is part of the error path and binding state changes
167 * should be ignored
168 */
169static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
170 bool backoff)
171{
172 struct vmw_ctx_validation_info *entry;
173
174 list_for_each_entry(entry, &sw_context->ctx_list, head) {
175 if (!backoff)
176 vmw_binding_state_commit(entry->cur, entry->staged);
177
178 if (entry->staged != sw_context->staged_bindings)
179 vmw_binding_state_free(entry->staged);
180 else
181 sw_context->staged_bindings_inuse = false;
182 }
183
184 /* List entries are freed with the validation context */
185 INIT_LIST_HEAD(&sw_context->ctx_list);
186}
187
188/**
189 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
190 *
191 * @sw_context: The command submission context
192 */
193static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
194{
195 if (sw_context->dx_query_mob)
196 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
197 sw_context->dx_query_mob);
198}
199
200/**
201 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
202 * the validate list.
203 *
204 * @dev_priv: Pointer to the device private:
205 * @sw_context: The command submission context
206 * @node: The validation node holding the context resource metadata
207 */
208static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
209 struct vmw_sw_context *sw_context,
210 struct vmw_resource *res,
211 struct vmw_ctx_validation_info *node)
212{
213 int ret;
214
215 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
216 if (unlikely(ret != 0))
217 goto out_err;
218
219 if (!sw_context->staged_bindings) {
220 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
221 if (IS_ERR(sw_context->staged_bindings)) {
222 ret = PTR_ERR(sw_context->staged_bindings);
223 sw_context->staged_bindings = NULL;
224 goto out_err;
225 }
226 }
227
228 if (sw_context->staged_bindings_inuse) {
229 node->staged = vmw_binding_state_alloc(dev_priv);
230 if (IS_ERR(node->staged)) {
231 ret = PTR_ERR(node->staged);
232 node->staged = NULL;
233 goto out_err;
234 }
235 } else {
236 node->staged = sw_context->staged_bindings;
237 sw_context->staged_bindings_inuse = true;
238 }
239
240 node->ctx = res;
241 node->cur = vmw_context_binding_state(res);
242 list_add_tail(&node->head, &sw_context->ctx_list);
243
244 return 0;
245
246out_err:
247 return ret;
248}
249
250/**
251 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
252 *
253 * @dev_priv: Pointer to the device private struct.
254 * @res_type: The resource type.
255 *
256 * Guest-backed contexts and DX contexts require extra size to store execbuf
257 * private information in the validation node. Typically the binding manager
258 * associated data structures.
259 *
260 * Returns: The extra size requirement based on resource type.
261 */
262static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
263 enum vmw_res_type res_type)
264{
265 return (res_type == vmw_res_dx_context ||
266 (res_type == vmw_res_context && dev_priv->has_mob)) ?
267 sizeof(struct vmw_ctx_validation_info) : 0;
268}
269
270/**
271 * vmw_execbuf_rcache_update - Update a resource-node cache entry
272 *
273 * @rcache: Pointer to the entry to update.
274 * @res: Pointer to the resource.
275 * @private: Pointer to the execbuf-private space in the resource validation
276 * node.
277 */
278static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
279 struct vmw_resource *res,
280 void *private)
281{
282 rcache->res = res;
283 rcache->private = private;
284 rcache->valid = 1;
285 rcache->valid_handle = 0;
286}
287
288/**
289 * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
290 * rcu-protected pointer to the validation list.
291 *
292 * @sw_context: Pointer to the software context.
293 * @res: Unreferenced rcu-protected pointer to the resource.
294 * @dirty: Whether to change dirty status.
295 *
296 * Returns: 0 on success. Negative error code on failure. Typical error codes
297 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
298 */
299static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
300 struct vmw_resource *res,
301 u32 dirty)
302{
303 struct vmw_private *dev_priv = res->dev_priv;
304 int ret;
305 enum vmw_res_type res_type = vmw_res_type(res);
306 struct vmw_res_cache_entry *rcache;
307 struct vmw_ctx_validation_info *ctx_info;
308 bool first_usage;
309 unsigned int priv_size;
310
311 rcache = &sw_context->res_cache[res_type];
312 if (likely(rcache->valid && rcache->res == res)) {
313 if (dirty)
314 vmw_validation_res_set_dirty(sw_context->ctx,
315 rcache->private, dirty);
316 vmw_user_resource_noref_release();
317 return 0;
318 }
319
320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
322 dirty, (void **)&ctx_info,
323 &first_usage);
324 vmw_user_resource_noref_release();
325 if (ret)
326 return ret;
327
328 if (priv_size && first_usage) {
329 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
330 ctx_info);
331 if (ret) {
332 VMW_DEBUG_USER("Failed first usage context setup.\n");
333 return ret;
334 }
335 }
336
337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 return 0;
339}
340
341/**
342 * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
343 * validation list if it's not already on it
344 *
345 * @sw_context: Pointer to the software context.
346 * @res: Pointer to the resource.
347 * @dirty: Whether to change dirty status.
348 *
349 * Returns: Zero on success. Negative error code on failure.
350 */
351static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
352 struct vmw_resource *res,
353 u32 dirty)
354{
355 struct vmw_res_cache_entry *rcache;
356 enum vmw_res_type res_type = vmw_res_type(res);
357 void *ptr;
358 int ret;
359
360 rcache = &sw_context->res_cache[res_type];
361 if (likely(rcache->valid && rcache->res == res)) {
362 if (dirty)
363 vmw_validation_res_set_dirty(sw_context->ctx,
364 rcache->private, dirty);
365 return 0;
366 }
367
368 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
369 &ptr, NULL);
370 if (ret)
371 return ret;
372
373 vmw_execbuf_rcache_update(rcache, res, ptr);
374
375 return 0;
376}
377
378/**
379 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
380 * validation list
381 *
382 * @sw_context: The software context holding the validation list.
383 * @view: Pointer to the view resource.
384 *
385 * Returns 0 if success, negative error code otherwise.
386 */
387static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
388 struct vmw_resource *view)
389{
390 int ret;
391
392 /*
393 * First add the resource the view is pointing to, otherwise it may be
394 * swapped out when the view is validated.
395 */
396 ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
397 vmw_view_dirtying(view));
398 if (ret)
399 return ret;
400
401 return vmw_execbuf_res_noctx_val_add(sw_context, view,
402 VMW_RES_DIRTY_NONE);
403}
404
405/**
406 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
407 * to to the validation list.
408 *
409 * @sw_context: The software context holding the validation list.
410 * @view_type: The view type to look up.
411 * @id: view id of the view.
412 *
413 * The view is represented by a view id and the DX context it's created on, or
414 * scheduled for creation on. If there is no DX context set, the function will
415 * return an -EINVAL error pointer.
416 *
417 * Returns: Unreferenced pointer to the resource on success, negative error
418 * pointer on failure.
419 */
420static struct vmw_resource *
421vmw_view_id_val_add(struct vmw_sw_context *sw_context,
422 enum vmw_view_type view_type, u32 id)
423{
424 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
425 struct vmw_resource *view;
426 int ret;
427
428 if (!ctx_node)
429 return ERR_PTR(-EINVAL);
430
431 view = vmw_view_lookup(sw_context->man, view_type, id);
432 if (IS_ERR(view))
433 return view;
434
435 ret = vmw_view_res_val_add(sw_context, view);
436 if (ret)
437 return ERR_PTR(ret);
438
439 return view;
440}
441
442/**
443 * vmw_resource_context_res_add - Put resources previously bound to a context on
444 * the validation list
445 *
446 * @dev_priv: Pointer to a device private structure
447 * @sw_context: Pointer to a software context used for this command submission
448 * @ctx: Pointer to the context resource
449 *
450 * This function puts all resources that were previously bound to @ctx on the
451 * resource validation list. This is part of the context state reemission
452 */
453static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
454 struct vmw_sw_context *sw_context,
455 struct vmw_resource *ctx)
456{
457 struct list_head *binding_list;
458 struct vmw_ctx_bindinfo *entry;
459 int ret = 0;
460 struct vmw_resource *res;
461 u32 i;
462
463 /* Add all cotables to the validation list. */
464 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
465 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
466 res = vmw_context_cotable(ctx, i);
467 if (IS_ERR(res))
468 continue;
469
470 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
471 VMW_RES_DIRTY_SET);
472 if (unlikely(ret != 0))
473 return ret;
474 }
475 }
476
477 /* Add all resources bound to the context to the validation list */
478 mutex_lock(&dev_priv->binding_mutex);
479 binding_list = vmw_context_binding_list(ctx);
480
481 list_for_each_entry(entry, binding_list, ctx_list) {
482 if (vmw_res_type(entry->res) == vmw_res_view)
483 ret = vmw_view_res_val_add(sw_context, entry->res);
484 else
485 ret = vmw_execbuf_res_noctx_val_add
486 (sw_context, entry->res,
487 vmw_binding_dirtying(entry->bt));
488 if (unlikely(ret != 0))
489 break;
490 }
491
492 if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) {
493 struct vmw_buffer_object *dx_query_mob;
494
495 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
496 if (dx_query_mob)
497 ret = vmw_validation_add_bo(sw_context->ctx,
498 dx_query_mob, true, false);
499 }
500
501 mutex_unlock(&dev_priv->binding_mutex);
502 return ret;
503}
504
505/**
506 * vmw_resource_relocation_add - Add a relocation to the relocation list
507 *
508 * @list: Pointer to head of relocation list.
509 * @res: The resource.
510 * @offset: Offset into the command buffer currently being parsed where the id
511 * that needs fixup is located. Granularity is one byte.
512 * @rel_type: Relocation type.
513 */
514static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
515 const struct vmw_resource *res,
516 unsigned long offset,
517 enum vmw_resource_relocation_type
518 rel_type)
519{
520 struct vmw_resource_relocation *rel;
521
522 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
523 if (unlikely(!rel)) {
524 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
525 return -ENOMEM;
526 }
527
528 rel->res = res;
529 rel->offset = offset;
530 rel->rel_type = rel_type;
531 list_add_tail(&rel->head, &sw_context->res_relocations);
532
533 return 0;
534}
535
536/**
537 * vmw_resource_relocations_free - Free all relocations on a list
538 *
539 * @list: Pointer to the head of the relocation list
540 */
541static void vmw_resource_relocations_free(struct list_head *list)
542{
543 /* Memory is validation context memory, so no need to free it */
544 INIT_LIST_HEAD(list);
545}
546
547/**
548 * vmw_resource_relocations_apply - Apply all relocations on a list
549 *
550 * @cb: Pointer to the start of the command buffer bein patch. This need not be
551 * the same buffer as the one being parsed when the relocation list was built,
552 * but the contents must be the same modulo the resource ids.
553 * @list: Pointer to the head of the relocation list.
554 */
555static void vmw_resource_relocations_apply(uint32_t *cb,
556 struct list_head *list)
557{
558 struct vmw_resource_relocation *rel;
559
560 /* Validate the struct vmw_resource_relocation member size */
561 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
562 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
563
564 list_for_each_entry(rel, list, head) {
565 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
566 switch (rel->rel_type) {
567 case vmw_res_rel_normal:
568 *addr = rel->res->id;
569 break;
570 case vmw_res_rel_nop:
571 *addr = SVGA_3D_CMD_NOP;
572 break;
573 default:
574 if (rel->res->id == -1)
575 *addr = SVGA_3D_CMD_NOP;
576 break;
577 }
578 }
579}
580
581static int vmw_cmd_invalid(struct vmw_private *dev_priv,
582 struct vmw_sw_context *sw_context,
583 SVGA3dCmdHeader *header)
584{
585 return -EINVAL;
586}
587
588static int vmw_cmd_ok(struct vmw_private *dev_priv,
589 struct vmw_sw_context *sw_context,
590 SVGA3dCmdHeader *header)
591{
592 return 0;
593}
594
595/**
596 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
597 * list.
598 *
599 * @sw_context: Pointer to the software context.
600 *
601 * Note that since vmware's command submission currently is protected by the
602 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
603 * only a single thread at once will attempt this.
604 */
605static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
606{
607 int ret;
608
609 ret = vmw_validation_res_reserve(sw_context->ctx, true);
610 if (ret)
611 return ret;
612
613 if (sw_context->dx_query_mob) {
614 struct vmw_buffer_object *expected_dx_query_mob;
615
616 expected_dx_query_mob =
617 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
618 if (expected_dx_query_mob &&
619 expected_dx_query_mob != sw_context->dx_query_mob) {
620 ret = -EINVAL;
621 }
622 }
623
624 return ret;
625}
626
627/**
628 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
629 * resource validate list unless it's already there.
630 *
631 * @dev_priv: Pointer to a device private structure.
632 * @sw_context: Pointer to the software context.
633 * @res_type: Resource type.
634 * @dirty: Whether to change dirty status.
635 * @converter: User-space visisble type specific information.
636 * @id_loc: Pointer to the location in the command buffer currently being parsed
637 * from where the user-space resource id handle is located.
638 * @p_val: Pointer to pointer to resource validalidation node. Populated on
639 * exit.
640 */
641static int
642vmw_cmd_res_check(struct vmw_private *dev_priv,
643 struct vmw_sw_context *sw_context,
644 enum vmw_res_type res_type,
645 u32 dirty,
646 const struct vmw_user_resource_conv *converter,
647 uint32_t *id_loc,
648 struct vmw_resource **p_res)
649{
650 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
651 struct vmw_resource *res;
652 int ret;
653
654 if (p_res)
655 *p_res = NULL;
656
657 if (*id_loc == SVGA3D_INVALID_ID) {
658 if (res_type == vmw_res_context) {
659 VMW_DEBUG_USER("Illegal context invalid id.\n");
660 return -EINVAL;
661 }
662 return 0;
663 }
664
665 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
666 res = rcache->res;
667 if (dirty)
668 vmw_validation_res_set_dirty(sw_context->ctx,
669 rcache->private, dirty);
670 } else {
671 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
672
673 ret = vmw_validation_preload_res(sw_context->ctx, size);
674 if (ret)
675 return ret;
676
677 res = vmw_user_resource_noref_lookup_handle
678 (dev_priv, sw_context->fp->tfile, *id_loc, converter);
679 if (IS_ERR(res)) {
680 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
681 (unsigned int) *id_loc);
682 return PTR_ERR(res);
683 }
684
685 ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
686 if (unlikely(ret != 0))
687 return ret;
688
689 if (rcache->valid && rcache->res == res) {
690 rcache->valid_handle = true;
691 rcache->handle = *id_loc;
692 }
693 }
694
695 ret = vmw_resource_relocation_add(sw_context, res,
696 vmw_ptr_diff(sw_context->buf_start,
697 id_loc),
698 vmw_res_rel_normal);
699 if (p_res)
700 *p_res = res;
701
702 return 0;
703}
704
705/**
706 * vmw_rebind_dx_query - Rebind DX query associated with the context
707 *
708 * @ctx_res: context the query belongs to
709 *
710 * This function assumes binding_mutex is held.
711 */
712static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
713{
714 struct vmw_private *dev_priv = ctx_res->dev_priv;
715 struct vmw_buffer_object *dx_query_mob;
716 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
717
718 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
719
720 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
721 return 0;
722
723 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
724 if (cmd == NULL)
725 return -ENOMEM;
726
727 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
728 cmd->header.size = sizeof(cmd->body);
729 cmd->body.cid = ctx_res->id;
730 cmd->body.mobid = dx_query_mob->base.mem.start;
731 vmw_fifo_commit(dev_priv, sizeof(*cmd));
732
733 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
734
735 return 0;
736}
737
738/**
739 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
740 * contexts.
741 *
742 * @sw_context: Pointer to the software context.
743 *
744 * Rebind context binding points that have been scrubbed because of eviction.
745 */
746static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
747{
748 struct vmw_ctx_validation_info *val;
749 int ret;
750
751 list_for_each_entry(val, &sw_context->ctx_list, head) {
752 ret = vmw_binding_rebind_all(val->cur);
753 if (unlikely(ret != 0)) {
754 if (ret != -ERESTARTSYS)
755 VMW_DEBUG_USER("Failed to rebind context.\n");
756 return ret;
757 }
758
759 ret = vmw_rebind_all_dx_query(val->ctx);
760 if (ret != 0) {
761 VMW_DEBUG_USER("Failed to rebind queries.\n");
762 return ret;
763 }
764 }
765
766 return 0;
767}
768
769/**
770 * vmw_view_bindings_add - Add an array of view bindings to a context binding
771 * state tracker.
772 *
773 * @sw_context: The execbuf state used for this command.
774 * @view_type: View type for the bindings.
775 * @binding_type: Binding type for the bindings.
776 * @shader_slot: The shader slot to user for the bindings.
777 * @view_ids: Array of view ids to be bound.
778 * @num_views: Number of view ids in @view_ids.
779 * @first_slot: The binding slot to be used for the first view id in @view_ids.
780 */
781static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
782 enum vmw_view_type view_type,
783 enum vmw_ctx_binding_type binding_type,
784 uint32 shader_slot,
785 uint32 view_ids[], u32 num_views,
786 u32 first_slot)
787{
788 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
789 u32 i;
790
791 if (!ctx_node)
792 return -EINVAL;
793
794 for (i = 0; i < num_views; ++i) {
795 struct vmw_ctx_bindinfo_view binding;
796 struct vmw_resource *view = NULL;
797
798 if (view_ids[i] != SVGA3D_INVALID_ID) {
799 view = vmw_view_id_val_add(sw_context, view_type,
800 view_ids[i]);
801 if (IS_ERR(view)) {
802 VMW_DEBUG_USER("View not found.\n");
803 return PTR_ERR(view);
804 }
805 }
806 binding.bi.ctx = ctx_node->ctx;
807 binding.bi.res = view;
808 binding.bi.bt = binding_type;
809 binding.shader_slot = shader_slot;
810 binding.slot = first_slot + i;
811 vmw_binding_add(ctx_node->staged, &binding.bi,
812 shader_slot, binding.slot);
813 }
814
815 return 0;
816}
817
818/**
819 * vmw_cmd_cid_check - Check a command header for valid context information.
820 *
821 * @dev_priv: Pointer to a device private structure.
822 * @sw_context: Pointer to the software context.
823 * @header: A command header with an embedded user-space context handle.
824 *
825 * Convenience function: Call vmw_cmd_res_check with the user-space context
826 * handle embedded in @header.
827 */
828static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
829 struct vmw_sw_context *sw_context,
830 SVGA3dCmdHeader *header)
831{
832 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
833 container_of(header, typeof(*cmd), header);
834
835 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
836 VMW_RES_DIRTY_SET, user_context_converter,
837 &cmd->body, NULL);
838}
839
840/**
841 * vmw_execbuf_info_from_res - Get the private validation metadata for a
842 * recently validated resource
843 *
844 * @sw_context: Pointer to the command submission context
845 * @res: The resource
846 *
847 * The resource pointed to by @res needs to be present in the command submission
848 * context's resource cache and hence the last resource of that type to be
849 * processed by the validation code.
850 *
851 * Return: a pointer to the private metadata of the resource, or NULL if it
852 * wasn't found
853 */
854static struct vmw_ctx_validation_info *
855vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
856 struct vmw_resource *res)
857{
858 struct vmw_res_cache_entry *rcache =
859 &sw_context->res_cache[vmw_res_type(res)];
860
861 if (rcache->valid && rcache->res == res)
862 return rcache->private;
863
864 WARN_ON_ONCE(true);
865 return NULL;
866}
867
868static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
869 struct vmw_sw_context *sw_context,
870 SVGA3dCmdHeader *header)
871{
872 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
873 struct vmw_resource *ctx;
874 struct vmw_resource *res;
875 int ret;
876
877 cmd = container_of(header, typeof(*cmd), header);
878
879 if (cmd->body.type >= SVGA3D_RT_MAX) {
880 VMW_DEBUG_USER("Illegal render target type %u.\n",
881 (unsigned int) cmd->body.type);
882 return -EINVAL;
883 }
884
885 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
886 VMW_RES_DIRTY_SET, user_context_converter,
887 &cmd->body.cid, &ctx);
888 if (unlikely(ret != 0))
889 return ret;
890
891 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
892 VMW_RES_DIRTY_SET, user_surface_converter,
893 &cmd->body.target.sid, &res);
894 if (unlikely(ret))
895 return ret;
896
897 if (dev_priv->has_mob) {
898 struct vmw_ctx_bindinfo_view binding;
899 struct vmw_ctx_validation_info *node;
900
901 node = vmw_execbuf_info_from_res(sw_context, ctx);
902 if (!node)
903 return -EINVAL;
904
905 binding.bi.ctx = ctx;
906 binding.bi.res = res;
907 binding.bi.bt = vmw_ctx_binding_rt;
908 binding.slot = cmd->body.type;
909 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
910 }
911
912 return 0;
913}
914
915static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
916 struct vmw_sw_context *sw_context,
917 SVGA3dCmdHeader *header)
918{
919 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
920 int ret;
921
922 cmd = container_of(header, typeof(*cmd), header);
923
924 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
925 VMW_RES_DIRTY_NONE, user_surface_converter,
926 &cmd->body.src.sid, NULL);
927 if (ret)
928 return ret;
929
930 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
931 VMW_RES_DIRTY_SET, user_surface_converter,
932 &cmd->body.dest.sid, NULL);
933}
934
935static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
936 struct vmw_sw_context *sw_context,
937 SVGA3dCmdHeader *header)
938{
939 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
940 int ret;
941
942 cmd = container_of(header, typeof(*cmd), header);
943 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
944 VMW_RES_DIRTY_NONE, user_surface_converter,
945 &cmd->body.src, NULL);
946 if (ret != 0)
947 return ret;
948
949 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
950 VMW_RES_DIRTY_SET, user_surface_converter,
951 &cmd->body.dest, NULL);
952}
953
954static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
955 struct vmw_sw_context *sw_context,
956 SVGA3dCmdHeader *header)
957{
958 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
959 int ret;
960
961 cmd = container_of(header, typeof(*cmd), header);
962 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
963 VMW_RES_DIRTY_NONE, user_surface_converter,
964 &cmd->body.srcSid, NULL);
965 if (ret != 0)
966 return ret;
967
968 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
969 VMW_RES_DIRTY_SET, user_surface_converter,
970 &cmd->body.dstSid, NULL);
971}
972
973static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
974 struct vmw_sw_context *sw_context,
975 SVGA3dCmdHeader *header)
976{
977 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
978 int ret;
979
980 cmd = container_of(header, typeof(*cmd), header);
981 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
982 VMW_RES_DIRTY_NONE, user_surface_converter,
983 &cmd->body.src.sid, NULL);
984 if (unlikely(ret != 0))
985 return ret;
986
987 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
988 VMW_RES_DIRTY_SET, user_surface_converter,
989 &cmd->body.dest.sid, NULL);
990}
991
992static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
993 struct vmw_sw_context *sw_context,
994 SVGA3dCmdHeader *header)
995{
996 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
997 container_of(header, typeof(*cmd), header);
998
999 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1000 VMW_RES_DIRTY_NONE, user_surface_converter,
1001 &cmd->body.srcImage.sid, NULL);
1002}
1003
1004static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1005 struct vmw_sw_context *sw_context,
1006 SVGA3dCmdHeader *header)
1007{
1008 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1009 container_of(header, typeof(*cmd), header);
1010
1011 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1012 VMW_RES_DIRTY_NONE, user_surface_converter,
1013 &cmd->body.sid, NULL);
1014}
1015
1016/**
1017 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1018 *
1019 * @dev_priv: The device private structure.
1020 * @new_query_bo: The new buffer holding query results.
1021 * @sw_context: The software context used for this command submission.
1022 *
1023 * This function checks whether @new_query_bo is suitable for holding query
1024 * results, and if another buffer currently is pinned for query results. If so,
1025 * the function prepares the state of @sw_context for switching pinned buffers
1026 * after successful submission of the current command batch.
1027 */
1028static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1029 struct vmw_buffer_object *new_query_bo,
1030 struct vmw_sw_context *sw_context)
1031{
1032 struct vmw_res_cache_entry *ctx_entry =
1033 &sw_context->res_cache[vmw_res_context];
1034 int ret;
1035
1036 BUG_ON(!ctx_entry->valid);
1037 sw_context->last_query_ctx = ctx_entry->res;
1038
1039 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1040
1041 if (unlikely(new_query_bo->base.num_pages > 4)) {
1042 VMW_DEBUG_USER("Query buffer too large.\n");
1043 return -EINVAL;
1044 }
1045
1046 if (unlikely(sw_context->cur_query_bo != NULL)) {
1047 sw_context->needs_post_query_barrier = true;
1048 ret = vmw_validation_add_bo(sw_context->ctx,
1049 sw_context->cur_query_bo,
1050 dev_priv->has_mob, false);
1051 if (unlikely(ret != 0))
1052 return ret;
1053 }
1054 sw_context->cur_query_bo = new_query_bo;
1055
1056 ret = vmw_validation_add_bo(sw_context->ctx,
1057 dev_priv->dummy_query_bo,
1058 dev_priv->has_mob, false);
1059 if (unlikely(ret != 0))
1060 return ret;
1061 }
1062
1063 return 0;
1064}
1065
1066/**
1067 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1068 *
1069 * @dev_priv: The device private structure.
1070 * @sw_context: The software context used for this command submission batch.
1071 *
1072 * This function will check if we're switching query buffers, and will then,
1073 * issue a dummy occlusion query wait used as a query barrier. When the fence
1074 * object following that query wait has signaled, we are sure that all preceding
1075 * queries have finished, and the old query buffer can be unpinned. However,
1076 * since both the new query buffer and the old one are fenced with that fence,
1077 * we can do an asynchronus unpin now, and be sure that the old query buffer
1078 * won't be moved until the fence has signaled.
1079 *
1080 * As mentioned above, both the new - and old query buffers need to be fenced
1081 * using a sequence emitted *after* calling this function.
1082 */
1083static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1084 struct vmw_sw_context *sw_context)
1085{
1086 /*
1087 * The validate list should still hold references to all
1088 * contexts here.
1089 */
1090 if (sw_context->needs_post_query_barrier) {
1091 struct vmw_res_cache_entry *ctx_entry =
1092 &sw_context->res_cache[vmw_res_context];
1093 struct vmw_resource *ctx;
1094 int ret;
1095
1096 BUG_ON(!ctx_entry->valid);
1097 ctx = ctx_entry->res;
1098
1099 ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
1100
1101 if (unlikely(ret != 0))
1102 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1103 }
1104
1105 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1106 if (dev_priv->pinned_bo) {
1107 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1108 vmw_bo_unreference(&dev_priv->pinned_bo);
1109 }
1110
1111 if (!sw_context->needs_post_query_barrier) {
1112 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1113
1114 /*
1115 * We pin also the dummy_query_bo buffer so that we
1116 * don't need to validate it when emitting dummy queries
1117 * in context destroy paths.
1118 */
1119 if (!dev_priv->dummy_query_bo_pinned) {
1120 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1121 true);
1122 dev_priv->dummy_query_bo_pinned = true;
1123 }
1124
1125 BUG_ON(sw_context->last_query_ctx == NULL);
1126 dev_priv->query_cid = sw_context->last_query_ctx->id;
1127 dev_priv->query_cid_valid = true;
1128 dev_priv->pinned_bo =
1129 vmw_bo_reference(sw_context->cur_query_bo);
1130 }
1131 }
1132}
1133
1134/**
1135 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
1136 * to a MOB id.
1137 *
1138 * @dev_priv: Pointer to a device private structure.
1139 * @sw_context: The software context used for this command batch validation.
1140 * @id: Pointer to the user-space handle to be translated.
1141 * @vmw_bo_p: Points to a location that, on successful return will carry a
1142 * non-reference-counted pointer to the buffer object identified by the
1143 * user-space handle in @id.
1144 *
1145 * This function saves information needed to translate a user-space buffer
1146 * handle to a MOB id. The translation does not take place immediately, but
1147 * during a call to vmw_apply_relocations().
1148 *
1149 * This function builds a relocation list and a list of buffers to validate. The
1150 * former needs to be freed using either vmw_apply_relocations() or
1151 * vmw_free_relocations(). The latter needs to be freed using
1152 * vmw_clear_validations.
1153 */
1154static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1155 struct vmw_sw_context *sw_context,
1156 SVGAMobId *id,
1157 struct vmw_buffer_object **vmw_bo_p)
1158{
1159 struct vmw_buffer_object *vmw_bo;
1160 uint32_t handle = *id;
1161 struct vmw_relocation *reloc;
1162 int ret;
1163
1164 vmw_validation_preload_bo(sw_context->ctx);
1165 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1166 if (IS_ERR(vmw_bo)) {
1167 VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
1168 return PTR_ERR(vmw_bo);
1169 }
1170
1171 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1172 vmw_user_bo_noref_release();
1173 if (unlikely(ret != 0))
1174 return ret;
1175
1176 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1177 if (!reloc)
1178 return -ENOMEM;
1179
1180 reloc->mob_loc = id;
1181 reloc->vbo = vmw_bo;
1182
1183 *vmw_bo_p = vmw_bo;
1184 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1185
1186 return 0;
1187}
1188
1189/**
1190 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
1191 * to a valid SVGAGuestPtr
1192 *
1193 * @dev_priv: Pointer to a device private structure.
1194 * @sw_context: The software context used for this command batch validation.
1195 * @ptr: Pointer to the user-space handle to be translated.
1196 * @vmw_bo_p: Points to a location that, on successful return will carry a
1197 * non-reference-counted pointer to the DMA buffer identified by the user-space
1198 * handle in @id.
1199 *
1200 * This function saves information needed to translate a user-space buffer
1201 * handle to a valid SVGAGuestPtr. The translation does not take place
1202 * immediately, but during a call to vmw_apply_relocations().
1203 *
1204 * This function builds a relocation list and a list of buffers to validate.
1205 * The former needs to be freed using either vmw_apply_relocations() or
1206 * vmw_free_relocations(). The latter needs to be freed using
1207 * vmw_clear_validations.
1208 */
1209static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1210 struct vmw_sw_context *sw_context,
1211 SVGAGuestPtr *ptr,
1212 struct vmw_buffer_object **vmw_bo_p)
1213{
1214 struct vmw_buffer_object *vmw_bo;
1215 uint32_t handle = ptr->gmrId;
1216 struct vmw_relocation *reloc;
1217 int ret;
1218
1219 vmw_validation_preload_bo(sw_context->ctx);
1220 vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
1221 if (IS_ERR(vmw_bo)) {
1222 VMW_DEBUG_USER("Could not find or use GMR region.\n");
1223 return PTR_ERR(vmw_bo);
1224 }
1225
1226 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1227 vmw_user_bo_noref_release();
1228 if (unlikely(ret != 0))
1229 return ret;
1230
1231 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1232 if (!reloc)
1233 return -ENOMEM;
1234
1235 reloc->location = ptr;
1236 reloc->vbo = vmw_bo;
1237 *vmw_bo_p = vmw_bo;
1238 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1239
1240 return 0;
1241}
1242
1243/**
1244 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1245 *
1246 * @dev_priv: Pointer to a device private struct.
1247 * @sw_context: The software context used for this command submission.
1248 * @header: Pointer to the command header in the command stream.
1249 *
1250 * This function adds the new query into the query COTABLE
1251 */
1252static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1253 struct vmw_sw_context *sw_context,
1254 SVGA3dCmdHeader *header)
1255{
1256 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1257 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1258 struct vmw_resource *cotable_res;
1259 int ret;
1260
1261 if (!ctx_node)
1262 return -EINVAL;
1263
1264 cmd = container_of(header, typeof(*cmd), header);
1265
1266 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1267 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1268 return -EINVAL;
1269
1270 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1271 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1272
1273 return ret;
1274}
1275
1276/**
1277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1278 *
1279 * @dev_priv: Pointer to a device private struct.
1280 * @sw_context: The software context used for this command submission.
1281 * @header: Pointer to the command header in the command stream.
1282 *
1283 * The query bind operation will eventually associate the query ID with its
1284 * backing MOB. In this function, we take the user mode MOB ID and use
1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1286 */
1287static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1288 struct vmw_sw_context *sw_context,
1289 SVGA3dCmdHeader *header)
1290{
1291 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1292 struct vmw_buffer_object *vmw_bo;
1293 int ret;
1294
1295 cmd = container_of(header, typeof(*cmd), header);
1296
1297 /*
1298 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299 * list so its kernel mode MOB ID can be filled in later
1300 */
1301 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1302 &vmw_bo);
1303
1304 if (ret != 0)
1305 return ret;
1306
1307 sw_context->dx_query_mob = vmw_bo;
1308 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1309 return 0;
1310}
1311
1312/**
1313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1314 *
1315 * @dev_priv: Pointer to a device private struct.
1316 * @sw_context: The software context used for this command submission.
1317 * @header: Pointer to the command header in the command stream.
1318 */
1319static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1320 struct vmw_sw_context *sw_context,
1321 SVGA3dCmdHeader *header)
1322{
1323 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1324 container_of(header, typeof(*cmd), header);
1325
1326 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1327 VMW_RES_DIRTY_SET, user_context_converter,
1328 &cmd->body.cid, NULL);
1329}
1330
1331/**
1332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1333 *
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1337 */
1338static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1339 struct vmw_sw_context *sw_context,
1340 SVGA3dCmdHeader *header)
1341{
1342 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1343 container_of(header, typeof(*cmd), header);
1344
1345 if (unlikely(dev_priv->has_mob)) {
1346 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1347
1348 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1349
1350 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1351 gb_cmd.header.size = cmd->header.size;
1352 gb_cmd.body.cid = cmd->body.cid;
1353 gb_cmd.body.type = cmd->body.type;
1354
1355 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1356 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1357 }
1358
1359 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1360 VMW_RES_DIRTY_SET, user_context_converter,
1361 &cmd->body.cid, NULL);
1362}
1363
1364/**
1365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1366 *
1367 * @dev_priv: Pointer to a device private struct.
1368 * @sw_context: The software context used for this command submission.
1369 * @header: Pointer to the command header in the command stream.
1370 */
1371static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1372 struct vmw_sw_context *sw_context,
1373 SVGA3dCmdHeader *header)
1374{
1375 struct vmw_buffer_object *vmw_bo;
1376 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1377 int ret;
1378
1379 cmd = container_of(header, typeof(*cmd), header);
1380 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381 if (unlikely(ret != 0))
1382 return ret;
1383
1384 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1385 &vmw_bo);
1386 if (unlikely(ret != 0))
1387 return ret;
1388
1389 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1390
1391 return ret;
1392}
1393
1394/**
1395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1400 */
1401static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1402 struct vmw_sw_context *sw_context,
1403 SVGA3dCmdHeader *header)
1404{
1405 struct vmw_buffer_object *vmw_bo;
1406 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1407 int ret;
1408
1409 cmd = container_of(header, typeof(*cmd), header);
1410 if (dev_priv->has_mob) {
1411 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1412
1413 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1414
1415 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1416 gb_cmd.header.size = cmd->header.size;
1417 gb_cmd.body.cid = cmd->body.cid;
1418 gb_cmd.body.type = cmd->body.type;
1419 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1420 gb_cmd.body.offset = cmd->body.guestResult.offset;
1421
1422 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1423 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1424 }
1425
1426 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1427 if (unlikely(ret != 0))
1428 return ret;
1429
1430 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1431 &cmd->body.guestResult, &vmw_bo);
1432 if (unlikely(ret != 0))
1433 return ret;
1434
1435 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1436
1437 return ret;
1438}
1439
1440/**
1441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1442 *
1443 * @dev_priv: Pointer to a device private struct.
1444 * @sw_context: The software context used for this command submission.
1445 * @header: Pointer to the command header in the command stream.
1446 */
1447static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1448 struct vmw_sw_context *sw_context,
1449 SVGA3dCmdHeader *header)
1450{
1451 struct vmw_buffer_object *vmw_bo;
1452 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1453 int ret;
1454
1455 cmd = container_of(header, typeof(*cmd), header);
1456 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1457 if (unlikely(ret != 0))
1458 return ret;
1459
1460 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1461 &vmw_bo);
1462 if (unlikely(ret != 0))
1463 return ret;
1464
1465 return 0;
1466}
1467
1468/**
1469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1470 *
1471 * @dev_priv: Pointer to a device private struct.
1472 * @sw_context: The software context used for this command submission.
1473 * @header: Pointer to the command header in the command stream.
1474 */
1475static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1476 struct vmw_sw_context *sw_context,
1477 SVGA3dCmdHeader *header)
1478{
1479 struct vmw_buffer_object *vmw_bo;
1480 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1481 int ret;
1482
1483 cmd = container_of(header, typeof(*cmd), header);
1484 if (dev_priv->has_mob) {
1485 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1486
1487 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1488
1489 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1490 gb_cmd.header.size = cmd->header.size;
1491 gb_cmd.body.cid = cmd->body.cid;
1492 gb_cmd.body.type = cmd->body.type;
1493 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1494 gb_cmd.body.offset = cmd->body.guestResult.offset;
1495
1496 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1497 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1498 }
1499
1500 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1501 if (unlikely(ret != 0))
1502 return ret;
1503
1504 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1505 &cmd->body.guestResult, &vmw_bo);
1506 if (unlikely(ret != 0))
1507 return ret;
1508
1509 return 0;
1510}
1511
1512static int vmw_cmd_dma(struct vmw_private *dev_priv,
1513 struct vmw_sw_context *sw_context,
1514 SVGA3dCmdHeader *header)
1515{
1516 struct vmw_buffer_object *vmw_bo = NULL;
1517 struct vmw_surface *srf = NULL;
1518 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1519 int ret;
1520 SVGA3dCmdSurfaceDMASuffix *suffix;
1521 uint32_t bo_size;
1522 bool dirty;
1523
1524 cmd = container_of(header, typeof(*cmd), header);
1525 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1526 header->size - sizeof(*suffix));
1527
1528 /* Make sure device and verifier stays in sync. */
1529 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1530 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1531 return -EINVAL;
1532 }
1533
1534 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1535 &cmd->body.guest.ptr, &vmw_bo);
1536 if (unlikely(ret != 0))
1537 return ret;
1538
1539 /* Make sure DMA doesn't cross BO boundaries. */
1540 bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1541 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1542 VMW_DEBUG_USER("Invalid DMA offset.\n");
1543 return -EINVAL;
1544 }
1545
1546 bo_size -= cmd->body.guest.ptr.offset;
1547 if (unlikely(suffix->maximumOffset > bo_size))
1548 suffix->maximumOffset = bo_size;
1549
1550 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1551 VMW_RES_DIRTY_SET : 0;
1552 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1553 dirty, user_surface_converter,
1554 &cmd->body.host.sid, NULL);
1555 if (unlikely(ret != 0)) {
1556 if (unlikely(ret != -ERESTARTSYS))
1557 VMW_DEBUG_USER("could not find surface for DMA.\n");
1558 return ret;
1559 }
1560
1561 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1562
1563 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1564
1565 return 0;
1566}
1567
1568static int vmw_cmd_draw(struct vmw_private *dev_priv,
1569 struct vmw_sw_context *sw_context,
1570 SVGA3dCmdHeader *header)
1571{
1572 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1573 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1574 (unsigned long)header + sizeof(*cmd));
1575 SVGA3dPrimitiveRange *range;
1576 uint32_t i;
1577 uint32_t maxnum;
1578 int ret;
1579
1580 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1581 if (unlikely(ret != 0))
1582 return ret;
1583
1584 cmd = container_of(header, typeof(*cmd), header);
1585 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1586
1587 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1588 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1589 return -EINVAL;
1590 }
1591
1592 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1593 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1594 VMW_RES_DIRTY_NONE,
1595 user_surface_converter,
1596 &decl->array.surfaceId, NULL);
1597 if (unlikely(ret != 0))
1598 return ret;
1599 }
1600
1601 maxnum = (header->size - sizeof(cmd->body) -
1602 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1603 if (unlikely(cmd->body.numRanges > maxnum)) {
1604 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1605 return -EINVAL;
1606 }
1607
1608 range = (SVGA3dPrimitiveRange *) decl;
1609 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1610 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1611 VMW_RES_DIRTY_NONE,
1612 user_surface_converter,
1613 &range->indexArray.surfaceId, NULL);
1614 if (unlikely(ret != 0))
1615 return ret;
1616 }
1617 return 0;
1618}
1619
1620static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1621 struct vmw_sw_context *sw_context,
1622 SVGA3dCmdHeader *header)
1623{
1624 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1625 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1626 ((unsigned long) header + header->size + sizeof(header));
1627 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1628 ((unsigned long) header + sizeof(*cmd));
1629 struct vmw_resource *ctx;
1630 struct vmw_resource *res;
1631 int ret;
1632
1633 cmd = container_of(header, typeof(*cmd), header);
1634
1635 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1636 VMW_RES_DIRTY_SET, user_context_converter,
1637 &cmd->body.cid, &ctx);
1638 if (unlikely(ret != 0))
1639 return ret;
1640
1641 for (; cur_state < last_state; ++cur_state) {
1642 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1643 continue;
1644
1645 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1646 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647 (unsigned int) cur_state->stage);
1648 return -EINVAL;
1649 }
1650
1651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1652 VMW_RES_DIRTY_NONE,
1653 user_surface_converter,
1654 &cur_state->value, &res);
1655 if (unlikely(ret != 0))
1656 return ret;
1657
1658 if (dev_priv->has_mob) {
1659 struct vmw_ctx_bindinfo_tex binding;
1660 struct vmw_ctx_validation_info *node;
1661
1662 node = vmw_execbuf_info_from_res(sw_context, ctx);
1663 if (!node)
1664 return -EINVAL;
1665
1666 binding.bi.ctx = ctx;
1667 binding.bi.res = res;
1668 binding.bi.bt = vmw_ctx_binding_tex;
1669 binding.texture_stage = cur_state->stage;
1670 vmw_binding_add(node->staged, &binding.bi, 0,
1671 binding.texture_stage);
1672 }
1673 }
1674
1675 return 0;
1676}
1677
1678static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1679 struct vmw_sw_context *sw_context,
1680 void *buf)
1681{
1682 struct vmw_buffer_object *vmw_bo;
1683
1684 struct {
1685 uint32_t header;
1686 SVGAFifoCmdDefineGMRFB body;
1687 } *cmd = buf;
1688
1689 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1690 &vmw_bo);
1691}
1692
1693/**
1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1695 * switching
1696 *
1697 * @dev_priv: Pointer to a device private struct.
1698 * @sw_context: The software context being used for this batch.
1699 * @val_node: The validation node representing the resource.
1700 * @buf_id: Pointer to the user-space backup buffer handle in the command
1701 * stream.
1702 * @backup_offset: Offset of backup into MOB.
1703 *
1704 * This function prepares for registering a switch of backup buffers in the
1705 * resource metadata just prior to unreserving. It's basically a wrapper around
1706 * vmw_cmd_res_switch_backup with a different interface.
1707 */
1708static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1709 struct vmw_sw_context *sw_context,
1710 struct vmw_resource *res, uint32_t *buf_id,
1711 unsigned long backup_offset)
1712{
1713 struct vmw_buffer_object *vbo;
1714 void *info;
1715 int ret;
1716
1717 info = vmw_execbuf_info_from_res(sw_context, res);
1718 if (!info)
1719 return -EINVAL;
1720
1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1722 if (ret)
1723 return ret;
1724
1725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1726 backup_offset);
1727 return 0;
1728}
1729
1730/**
1731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1732 *
1733 * @dev_priv: Pointer to a device private struct.
1734 * @sw_context: The software context being used for this batch.
1735 * @res_type: The resource type.
1736 * @converter: Information about user-space binding for this resource type.
1737 * @res_id: Pointer to the user-space resource handle in the command stream.
1738 * @buf_id: Pointer to the user-space backup buffer handle in the command
1739 * stream.
1740 * @backup_offset: Offset of backup into MOB.
1741 *
1742 * This function prepares for registering a switch of backup buffers in the
1743 * resource metadata just prior to unreserving. It's basically a wrapper around
1744 * vmw_cmd_res_switch_backup with a different interface.
1745 */
1746static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1747 struct vmw_sw_context *sw_context,
1748 enum vmw_res_type res_type,
1749 const struct vmw_user_resource_conv
1750 *converter, uint32_t *res_id, uint32_t *buf_id,
1751 unsigned long backup_offset)
1752{
1753 struct vmw_resource *res;
1754 int ret;
1755
1756 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1757 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1758 if (ret)
1759 return ret;
1760
1761 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1762 backup_offset);
1763}
1764
1765/**
1766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1767 *
1768 * @dev_priv: Pointer to a device private struct.
1769 * @sw_context: The software context being used for this batch.
1770 * @header: Pointer to the command header in the command stream.
1771 */
1772static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1773 struct vmw_sw_context *sw_context,
1774 SVGA3dCmdHeader *header)
1775{
1776 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1777 container_of(header, typeof(*cmd), header);
1778
1779 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1780 user_surface_converter, &cmd->body.sid,
1781 &cmd->body.mobid, 0);
1782}
1783
1784/**
1785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1786 *
1787 * @dev_priv: Pointer to a device private struct.
1788 * @sw_context: The software context being used for this batch.
1789 * @header: Pointer to the command header in the command stream.
1790 */
1791static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1792 struct vmw_sw_context *sw_context,
1793 SVGA3dCmdHeader *header)
1794{
1795 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1796 container_of(header, typeof(*cmd), header);
1797
1798 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1799 VMW_RES_DIRTY_NONE, user_surface_converter,
1800 &cmd->body.image.sid, NULL);
1801}
1802
1803/**
1804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1805 *
1806 * @dev_priv: Pointer to a device private struct.
1807 * @sw_context: The software context being used for this batch.
1808 * @header: Pointer to the command header in the command stream.
1809 */
1810static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1811 struct vmw_sw_context *sw_context,
1812 SVGA3dCmdHeader *header)
1813{
1814 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1815 container_of(header, typeof(*cmd), header);
1816
1817 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1819 &cmd->body.sid, NULL);
1820}
1821
1822/**
1823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1824 *
1825 * @dev_priv: Pointer to a device private struct.
1826 * @sw_context: The software context being used for this batch.
1827 * @header: Pointer to the command header in the command stream.
1828 */
1829static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1830 struct vmw_sw_context *sw_context,
1831 SVGA3dCmdHeader *header)
1832{
1833 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1834 container_of(header, typeof(*cmd), header);
1835
1836 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1837 VMW_RES_DIRTY_NONE, user_surface_converter,
1838 &cmd->body.image.sid, NULL);
1839}
1840
1841/**
1842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1843 * command
1844 *
1845 * @dev_priv: Pointer to a device private struct.
1846 * @sw_context: The software context being used for this batch.
1847 * @header: Pointer to the command header in the command stream.
1848 */
1849static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1850 struct vmw_sw_context *sw_context,
1851 SVGA3dCmdHeader *header)
1852{
1853 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1854 container_of(header, typeof(*cmd), header);
1855
1856 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1857 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1858 &cmd->body.sid, NULL);
1859}
1860
1861/**
1862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1863 * command
1864 *
1865 * @dev_priv: Pointer to a device private struct.
1866 * @sw_context: The software context being used for this batch.
1867 * @header: Pointer to the command header in the command stream.
1868 */
1869static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1870 struct vmw_sw_context *sw_context,
1871 SVGA3dCmdHeader *header)
1872{
1873 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1874 container_of(header, typeof(*cmd), header);
1875
1876 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1877 VMW_RES_DIRTY_NONE, user_surface_converter,
1878 &cmd->body.image.sid, NULL);
1879}
1880
1881/**
1882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1883 * command
1884 *
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1888 */
1889static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1890 struct vmw_sw_context *sw_context,
1891 SVGA3dCmdHeader *header)
1892{
1893 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1894 container_of(header, typeof(*cmd), header);
1895
1896 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1897 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1898 &cmd->body.sid, NULL);
1899}
1900
1901/**
1902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1903 *
1904 * @dev_priv: Pointer to a device private struct.
1905 * @sw_context: The software context being used for this batch.
1906 * @header: Pointer to the command header in the command stream.
1907 */
1908static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1909 struct vmw_sw_context *sw_context,
1910 SVGA3dCmdHeader *header)
1911{
1912 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1913 int ret;
1914 size_t size;
1915 struct vmw_resource *ctx;
1916
1917 cmd = container_of(header, typeof(*cmd), header);
1918
1919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1920 VMW_RES_DIRTY_SET, user_context_converter,
1921 &cmd->body.cid, &ctx);
1922 if (unlikely(ret != 0))
1923 return ret;
1924
1925 if (unlikely(!dev_priv->has_mob))
1926 return 0;
1927
1928 size = cmd->header.size - sizeof(cmd->body);
1929 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1930 cmd->body.shid, cmd + 1, cmd->body.type,
1931 size, &sw_context->staged_cmd_res);
1932 if (unlikely(ret != 0))
1933 return ret;
1934
1935 return vmw_resource_relocation_add(sw_context, NULL,
1936 vmw_ptr_diff(sw_context->buf_start,
1937 &cmd->header.id),
1938 vmw_res_rel_nop);
1939}
1940
1941/**
1942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1943 *
1944 * @dev_priv: Pointer to a device private struct.
1945 * @sw_context: The software context being used for this batch.
1946 * @header: Pointer to the command header in the command stream.
1947 */
1948static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1949 struct vmw_sw_context *sw_context,
1950 SVGA3dCmdHeader *header)
1951{
1952 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1953 int ret;
1954 struct vmw_resource *ctx;
1955
1956 cmd = container_of(header, typeof(*cmd), header);
1957
1958 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1959 VMW_RES_DIRTY_SET, user_context_converter,
1960 &cmd->body.cid, &ctx);
1961 if (unlikely(ret != 0))
1962 return ret;
1963
1964 if (unlikely(!dev_priv->has_mob))
1965 return 0;
1966
1967 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1968 cmd->body.type, &sw_context->staged_cmd_res);
1969 if (unlikely(ret != 0))
1970 return ret;
1971
1972 return vmw_resource_relocation_add(sw_context, NULL,
1973 vmw_ptr_diff(sw_context->buf_start,
1974 &cmd->header.id),
1975 vmw_res_rel_nop);
1976}
1977
1978/**
1979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1980 *
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1984 */
1985static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1986 struct vmw_sw_context *sw_context,
1987 SVGA3dCmdHeader *header)
1988{
1989 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1990 struct vmw_ctx_bindinfo_shader binding;
1991 struct vmw_resource *ctx, *res = NULL;
1992 struct vmw_ctx_validation_info *ctx_info;
1993 int ret;
1994
1995 cmd = container_of(header, typeof(*cmd), header);
1996
1997 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1998 VMW_DEBUG_USER("Illegal shader type %u.\n",
1999 (unsigned int) cmd->body.type);
2000 return -EINVAL;
2001 }
2002
2003 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2004 VMW_RES_DIRTY_SET, user_context_converter,
2005 &cmd->body.cid, &ctx);
2006 if (unlikely(ret != 0))
2007 return ret;
2008
2009 if (!dev_priv->has_mob)
2010 return 0;
2011
2012 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2013 /*
2014 * This is the compat shader path - Per device guest-backed
2015 * shaders, but user-space thinks it's per context host-
2016 * backed shaders.
2017 */
2018 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2019 cmd->body.shid, cmd->body.type);
2020 if (!IS_ERR(res)) {
2021 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2022 VMW_RES_DIRTY_NONE);
2023 if (unlikely(ret != 0))
2024 return ret;
2025
2026 ret = vmw_resource_relocation_add
2027 (sw_context, res,
2028 vmw_ptr_diff(sw_context->buf_start,
2029 &cmd->body.shid),
2030 vmw_res_rel_normal);
2031 if (unlikely(ret != 0))
2032 return ret;
2033 }
2034 }
2035
2036 if (IS_ERR_OR_NULL(res)) {
2037 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2038 VMW_RES_DIRTY_NONE,
2039 user_shader_converter, &cmd->body.shid,
2040 &res);
2041 if (unlikely(ret != 0))
2042 return ret;
2043 }
2044
2045 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2046 if (!ctx_info)
2047 return -EINVAL;
2048
2049 binding.bi.ctx = ctx;
2050 binding.bi.res = res;
2051 binding.bi.bt = vmw_ctx_binding_shader;
2052 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2053 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2054
2055 return 0;
2056}
2057
2058/**
2059 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2060 *
2061 * @dev_priv: Pointer to a device private struct.
2062 * @sw_context: The software context being used for this batch.
2063 * @header: Pointer to the command header in the command stream.
2064 */
2065static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2066 struct vmw_sw_context *sw_context,
2067 SVGA3dCmdHeader *header)
2068{
2069 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2070 int ret;
2071
2072 cmd = container_of(header, typeof(*cmd), header);
2073
2074 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2075 VMW_RES_DIRTY_SET, user_context_converter,
2076 &cmd->body.cid, NULL);
2077 if (unlikely(ret != 0))
2078 return ret;
2079
2080 if (dev_priv->has_mob)
2081 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2082
2083 return 0;
2084}
2085
2086/**
2087 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2088 *
2089 * @dev_priv: Pointer to a device private struct.
2090 * @sw_context: The software context being used for this batch.
2091 * @header: Pointer to the command header in the command stream.
2092 */
2093static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2094 struct vmw_sw_context *sw_context,
2095 SVGA3dCmdHeader *header)
2096{
2097 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2098 container_of(header, typeof(*cmd), header);
2099
2100 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2101 user_shader_converter, &cmd->body.shid,
2102 &cmd->body.mobid, cmd->body.offsetInBytes);
2103}
2104
2105/**
2106 * vmw_cmd_dx_set_single_constant_buffer - Validate
2107 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2108 *
2109 * @dev_priv: Pointer to a device private struct.
2110 * @sw_context: The software context being used for this batch.
2111 * @header: Pointer to the command header in the command stream.
2112 */
2113static int
2114vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2115 struct vmw_sw_context *sw_context,
2116 SVGA3dCmdHeader *header)
2117{
2118 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2119 struct vmw_resource *res = NULL;
2120 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2121 struct vmw_ctx_bindinfo_cb binding;
2122 int ret;
2123
2124 if (!ctx_node)
2125 return -EINVAL;
2126
2127 cmd = container_of(header, typeof(*cmd), header);
2128 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2129 VMW_RES_DIRTY_NONE, user_surface_converter,
2130 &cmd->body.sid, &res);
2131 if (unlikely(ret != 0))
2132 return ret;
2133
2134 binding.bi.ctx = ctx_node->ctx;
2135 binding.bi.res = res;
2136 binding.bi.bt = vmw_ctx_binding_cb;
2137 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2138 binding.offset = cmd->body.offsetInBytes;
2139 binding.size = cmd->body.sizeInBytes;
2140 binding.slot = cmd->body.slot;
2141
2142 if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
2143 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2144 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2145 (unsigned int) cmd->body.type,
2146 (unsigned int) binding.slot);
2147 return -EINVAL;
2148 }
2149
2150 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2151 binding.slot);
2152
2153 return 0;
2154}
2155
2156/**
2157 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2158 * command
2159 *
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2163 */
2164static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2165 struct vmw_sw_context *sw_context,
2166 SVGA3dCmdHeader *header)
2167{
2168 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2169 container_of(header, typeof(*cmd), header);
2170 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2171 sizeof(SVGA3dShaderResourceViewId);
2172
2173 if ((u64) cmd->body.startView + (u64) num_sr_view >
2174 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2175 cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
2176 VMW_DEBUG_USER("Invalid shader binding.\n");
2177 return -EINVAL;
2178 }
2179
2180 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2181 vmw_ctx_binding_sr,
2182 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2183 (void *) &cmd[1], num_sr_view,
2184 cmd->body.startView);
2185}
2186
2187/**
2188 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2189 *
2190 * @dev_priv: Pointer to a device private struct.
2191 * @sw_context: The software context being used for this batch.
2192 * @header: Pointer to the command header in the command stream.
2193 */
2194static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2195 struct vmw_sw_context *sw_context,
2196 SVGA3dCmdHeader *header)
2197{
2198 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2199 struct vmw_resource *res = NULL;
2200 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2201 struct vmw_ctx_bindinfo_shader binding;
2202 int ret = 0;
2203
2204 if (!ctx_node)
2205 return -EINVAL;
2206
2207 cmd = container_of(header, typeof(*cmd), header);
2208
2209 if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX ||
2210 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2211 VMW_DEBUG_USER("Illegal shader type %u.\n",
2212 (unsigned int) cmd->body.type);
2213 return -EINVAL;
2214 }
2215
2216 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2217 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2218 if (IS_ERR(res)) {
2219 VMW_DEBUG_USER("Could not find shader for binding.\n");
2220 return PTR_ERR(res);
2221 }
2222
2223 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2224 VMW_RES_DIRTY_NONE);
2225 if (ret)
2226 return ret;
2227 }
2228
2229 binding.bi.ctx = ctx_node->ctx;
2230 binding.bi.res = res;
2231 binding.bi.bt = vmw_ctx_binding_dx_shader;
2232 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2233
2234 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2235
2236 return 0;
2237}
2238
2239/**
2240 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2241 * command
2242 *
2243 * @dev_priv: Pointer to a device private struct.
2244 * @sw_context: The software context being used for this batch.
2245 * @header: Pointer to the command header in the command stream.
2246 */
2247static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2248 struct vmw_sw_context *sw_context,
2249 SVGA3dCmdHeader *header)
2250{
2251 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2252 struct vmw_ctx_bindinfo_vb binding;
2253 struct vmw_resource *res;
2254 struct {
2255 SVGA3dCmdHeader header;
2256 SVGA3dCmdDXSetVertexBuffers body;
2257 SVGA3dVertexBuffer buf[];
2258 } *cmd;
2259 int i, ret, num;
2260
2261 if (!ctx_node)
2262 return -EINVAL;
2263
2264 cmd = container_of(header, typeof(*cmd), header);
2265 num = (cmd->header.size - sizeof(cmd->body)) /
2266 sizeof(SVGA3dVertexBuffer);
2267 if ((u64)num + (u64)cmd->body.startBuffer >
2268 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2269 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2270 return -EINVAL;
2271 }
2272
2273 for (i = 0; i < num; i++) {
2274 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2275 VMW_RES_DIRTY_NONE,
2276 user_surface_converter,
2277 &cmd->buf[i].sid, &res);
2278 if (unlikely(ret != 0))
2279 return ret;
2280
2281 binding.bi.ctx = ctx_node->ctx;
2282 binding.bi.bt = vmw_ctx_binding_vb;
2283 binding.bi.res = res;
2284 binding.offset = cmd->buf[i].offset;
2285 binding.stride = cmd->buf[i].stride;
2286 binding.slot = i + cmd->body.startBuffer;
2287
2288 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2289 }
2290
2291 return 0;
2292}
2293
2294/**
2295 * vmw_cmd_dx_ia_set_vertex_buffers - Validate
2296 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2297 *
2298 * @dev_priv: Pointer to a device private struct.
2299 * @sw_context: The software context being used for this batch.
2300 * @header: Pointer to the command header in the command stream.
2301 */
2302static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2303 struct vmw_sw_context *sw_context,
2304 SVGA3dCmdHeader *header)
2305{
2306 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2307 struct vmw_ctx_bindinfo_ib binding;
2308 struct vmw_resource *res;
2309 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2310 int ret;
2311
2312 if (!ctx_node)
2313 return -EINVAL;
2314
2315 cmd = container_of(header, typeof(*cmd), header);
2316 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2317 VMW_RES_DIRTY_NONE, user_surface_converter,
2318 &cmd->body.sid, &res);
2319 if (unlikely(ret != 0))
2320 return ret;
2321
2322 binding.bi.ctx = ctx_node->ctx;
2323 binding.bi.res = res;
2324 binding.bi.bt = vmw_ctx_binding_ib;
2325 binding.offset = cmd->body.offset;
2326 binding.format = cmd->body.format;
2327
2328 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2329
2330 return 0;
2331}
2332
2333/**
2334 * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2335 * command
2336 *
2337 * @dev_priv: Pointer to a device private struct.
2338 * @sw_context: The software context being used for this batch.
2339 * @header: Pointer to the command header in the command stream.
2340 */
2341static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2342 struct vmw_sw_context *sw_context,
2343 SVGA3dCmdHeader *header)
2344{
2345 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2346 container_of(header, typeof(*cmd), header);
2347 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2348 sizeof(SVGA3dRenderTargetViewId);
2349 int ret;
2350
2351 if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
2352 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2353 return -EINVAL;
2354 }
2355
2356 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2357 0, &cmd->body.depthStencilViewId, 1, 0);
2358 if (ret)
2359 return ret;
2360
2361 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2362 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2363 num_rt_view, 0);
2364}
2365
2366/**
2367 * vmw_cmd_dx_clear_rendertarget_view - Validate
2368 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2369 *
2370 * @dev_priv: Pointer to a device private struct.
2371 * @sw_context: The software context being used for this batch.
2372 * @header: Pointer to the command header in the command stream.
2373 */
2374static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2375 struct vmw_sw_context *sw_context,
2376 SVGA3dCmdHeader *header)
2377{
2378 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2379 container_of(header, typeof(*cmd), header);
2380
2381 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
2382 cmd->body.renderTargetViewId));
2383}
2384
2385/**
2386 * vmw_cmd_dx_clear_rendertarget_view - Validate
2387 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2388 *
2389 * @dev_priv: Pointer to a device private struct.
2390 * @sw_context: The software context being used for this batch.
2391 * @header: Pointer to the command header in the command stream.
2392 */
2393static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2394 struct vmw_sw_context *sw_context,
2395 SVGA3dCmdHeader *header)
2396{
2397 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2398 container_of(header, typeof(*cmd), header);
2399
2400 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
2401 cmd->body.depthStencilViewId));
2402}
2403
2404static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2405 struct vmw_sw_context *sw_context,
2406 SVGA3dCmdHeader *header)
2407{
2408 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2409 struct vmw_resource *srf;
2410 struct vmw_resource *res;
2411 enum vmw_view_type view_type;
2412 int ret;
2413 /*
2414 * This is based on the fact that all affected define commands have the
2415 * same initial command body layout.
2416 */
2417 struct {
2418 SVGA3dCmdHeader header;
2419 uint32 defined_id;
2420 uint32 sid;
2421 } *cmd;
2422
2423 if (!ctx_node)
2424 return -EINVAL;
2425
2426 view_type = vmw_view_cmd_to_type(header->id);
2427 if (view_type == vmw_view_max)
2428 return -EINVAL;
2429
2430 cmd = container_of(header, typeof(*cmd), header);
2431 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2432 VMW_DEBUG_USER("Invalid surface id.\n");
2433 return -EINVAL;
2434 }
2435 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2436 VMW_RES_DIRTY_NONE, user_surface_converter,
2437 &cmd->sid, &srf);
2438 if (unlikely(ret != 0))
2439 return ret;
2440
2441 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2442 ret = vmw_cotable_notify(res, cmd->defined_id);
2443 if (unlikely(ret != 0))
2444 return ret;
2445
2446 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2447 cmd->defined_id, header,
2448 header->size + sizeof(*header),
2449 &sw_context->staged_cmd_res);
2450}
2451
2452/**
2453 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2454 *
2455 * @dev_priv: Pointer to a device private struct.
2456 * @sw_context: The software context being used for this batch.
2457 * @header: Pointer to the command header in the command stream.
2458 */
2459static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2460 struct vmw_sw_context *sw_context,
2461 SVGA3dCmdHeader *header)
2462{
2463 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2464 struct vmw_ctx_bindinfo_so binding;
2465 struct vmw_resource *res;
2466 struct {
2467 SVGA3dCmdHeader header;
2468 SVGA3dCmdDXSetSOTargets body;
2469 SVGA3dSoTarget targets[];
2470 } *cmd;
2471 int i, ret, num;
2472
2473 if (!ctx_node)
2474 return -EINVAL;
2475
2476 cmd = container_of(header, typeof(*cmd), header);
2477 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2478
2479 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2480 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2481 return -EINVAL;
2482 }
2483
2484 for (i = 0; i < num; i++) {
2485 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2486 VMW_RES_DIRTY_SET,
2487 user_surface_converter,
2488 &cmd->targets[i].sid, &res);
2489 if (unlikely(ret != 0))
2490 return ret;
2491
2492 binding.bi.ctx = ctx_node->ctx;
2493 binding.bi.res = res;
2494 binding.bi.bt = vmw_ctx_binding_so,
2495 binding.offset = cmd->targets[i].offset;
2496 binding.size = cmd->targets[i].sizeInBytes;
2497 binding.slot = i;
2498
2499 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2500 }
2501
2502 return 0;
2503}
2504
2505static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2506 struct vmw_sw_context *sw_context,
2507 SVGA3dCmdHeader *header)
2508{
2509 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2510 struct vmw_resource *res;
2511 /*
2512 * This is based on the fact that all affected define commands have
2513 * the same initial command body layout.
2514 */
2515 struct {
2516 SVGA3dCmdHeader header;
2517 uint32 defined_id;
2518 } *cmd;
2519 enum vmw_so_type so_type;
2520 int ret;
2521
2522 if (!ctx_node)
2523 return -EINVAL;
2524
2525 so_type = vmw_so_cmd_to_type(header->id);
2526 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2527 cmd = container_of(header, typeof(*cmd), header);
2528 ret = vmw_cotable_notify(res, cmd->defined_id);
2529
2530 return ret;
2531}
2532
2533/**
2534 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2535 * command
2536 *
2537 * @dev_priv: Pointer to a device private struct.
2538 * @sw_context: The software context being used for this batch.
2539 * @header: Pointer to the command header in the command stream.
2540 */
2541static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2542 struct vmw_sw_context *sw_context,
2543 SVGA3dCmdHeader *header)
2544{
2545 struct {
2546 SVGA3dCmdHeader header;
2547 union {
2548 SVGA3dCmdDXReadbackSubResource r_body;
2549 SVGA3dCmdDXInvalidateSubResource i_body;
2550 SVGA3dCmdDXUpdateSubResource u_body;
2551 SVGA3dSurfaceId sid;
2552 };
2553 } *cmd;
2554
2555 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2556 offsetof(typeof(*cmd), sid));
2557 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2558 offsetof(typeof(*cmd), sid));
2559 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2560 offsetof(typeof(*cmd), sid));
2561
2562 cmd = container_of(header, typeof(*cmd), header);
2563
2564 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2565 VMW_RES_DIRTY_NONE, user_surface_converter,
2566 &cmd->sid, NULL);
2567}
2568
2569static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2570 struct vmw_sw_context *sw_context,
2571 SVGA3dCmdHeader *header)
2572{
2573 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2574
2575 if (!ctx_node)
2576 return -EINVAL;
2577
2578 return 0;
2579}
2580
2581/**
2582 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2583 * resource for removal.
2584 *
2585 * @dev_priv: Pointer to a device private struct.
2586 * @sw_context: The software context being used for this batch.
2587 * @header: Pointer to the command header in the command stream.
2588 *
2589 * Check that the view exists, and if it was not created using this command
2590 * batch, conditionally make this command a NOP.
2591 */
2592static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2593 struct vmw_sw_context *sw_context,
2594 SVGA3dCmdHeader *header)
2595{
2596 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2597 struct {
2598 SVGA3dCmdHeader header;
2599 union vmw_view_destroy body;
2600 } *cmd = container_of(header, typeof(*cmd), header);
2601 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2602 struct vmw_resource *view;
2603 int ret;
2604
2605 if (!ctx_node)
2606 return -EINVAL;
2607
2608 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2609 &sw_context->staged_cmd_res, &view);
2610 if (ret || !view)
2611 return ret;
2612
2613 /*
2614 * If the view wasn't created during this command batch, it might
2615 * have been removed due to a context swapout, so add a
2616 * relocation to conditionally make this command a NOP to avoid
2617 * device errors.
2618 */
2619 return vmw_resource_relocation_add(sw_context, view,
2620 vmw_ptr_diff(sw_context->buf_start,
2621 &cmd->header.id),
2622 vmw_res_rel_cond_nop);
2623}
2624
2625/**
2626 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2627 *
2628 * @dev_priv: Pointer to a device private struct.
2629 * @sw_context: The software context being used for this batch.
2630 * @header: Pointer to the command header in the command stream.
2631 */
2632static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2633 struct vmw_sw_context *sw_context,
2634 SVGA3dCmdHeader *header)
2635{
2636 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2637 struct vmw_resource *res;
2638 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2639 container_of(header, typeof(*cmd), header);
2640 int ret;
2641
2642 if (!ctx_node)
2643 return -EINVAL;
2644
2645 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2646 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2647 if (ret)
2648 return ret;
2649
2650 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2651 cmd->body.shaderId, cmd->body.type,
2652 &sw_context->staged_cmd_res);
2653}
2654
2655/**
2656 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2657 *
2658 * @dev_priv: Pointer to a device private struct.
2659 * @sw_context: The software context being used for this batch.
2660 * @header: Pointer to the command header in the command stream.
2661 */
2662static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2663 struct vmw_sw_context *sw_context,
2664 SVGA3dCmdHeader *header)
2665{
2666 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2667 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2668 container_of(header, typeof(*cmd), header);
2669 int ret;
2670
2671 if (!ctx_node)
2672 return -EINVAL;
2673
2674 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2675 &sw_context->staged_cmd_res);
2676
2677 return ret;
2678}
2679
2680/**
2681 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2682 *
2683 * @dev_priv: Pointer to a device private struct.
2684 * @sw_context: The software context being used for this batch.
2685 * @header: Pointer to the command header in the command stream.
2686 */
2687static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2688 struct vmw_sw_context *sw_context,
2689 SVGA3dCmdHeader *header)
2690{
2691 struct vmw_resource *ctx;
2692 struct vmw_resource *res;
2693 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2694 container_of(header, typeof(*cmd), header);
2695 int ret;
2696
2697 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2698 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2699 VMW_RES_DIRTY_SET,
2700 user_context_converter, &cmd->body.cid,
2701 &ctx);
2702 if (ret)
2703 return ret;
2704 } else {
2705 struct vmw_ctx_validation_info *ctx_node =
2706 VMW_GET_CTX_NODE(sw_context);
2707
2708 if (!ctx_node)
2709 return -EINVAL;
2710
2711 ctx = ctx_node->ctx;
2712 }
2713
2714 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2715 if (IS_ERR(res)) {
2716 VMW_DEBUG_USER("Could not find shader to bind.\n");
2717 return PTR_ERR(res);
2718 }
2719
2720 ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
2721 VMW_RES_DIRTY_NONE);
2722 if (ret) {
2723 VMW_DEBUG_USER("Error creating resource validation node.\n");
2724 return ret;
2725 }
2726
2727 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2728 &cmd->body.mobid,
2729 cmd->body.offsetInBytes);
2730}
2731
2732/**
2733 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2734 *
2735 * @dev_priv: Pointer to a device private struct.
2736 * @sw_context: The software context being used for this batch.
2737 * @header: Pointer to the command header in the command stream.
2738 */
2739static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2740 struct vmw_sw_context *sw_context,
2741 SVGA3dCmdHeader *header)
2742{
2743 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2744 container_of(header, typeof(*cmd), header);
2745
2746 return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
2747 cmd->body.shaderResourceViewId));
2748}
2749
2750/**
2751 * vmw_cmd_dx_transfer_from_buffer - Validate
2752 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2753 *
2754 * @dev_priv: Pointer to a device private struct.
2755 * @sw_context: The software context being used for this batch.
2756 * @header: Pointer to the command header in the command stream.
2757 */
2758static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2759 struct vmw_sw_context *sw_context,
2760 SVGA3dCmdHeader *header)
2761{
2762 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2763 container_of(header, typeof(*cmd), header);
2764 int ret;
2765
2766 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2767 VMW_RES_DIRTY_NONE, user_surface_converter,
2768 &cmd->body.srcSid, NULL);
2769 if (ret != 0)
2770 return ret;
2771
2772 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2773 VMW_RES_DIRTY_SET, user_surface_converter,
2774 &cmd->body.destSid, NULL);
2775}
2776
2777/**
2778 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2779 *
2780 * @dev_priv: Pointer to a device private struct.
2781 * @sw_context: The software context being used for this batch.
2782 * @header: Pointer to the command header in the command stream.
2783 */
2784static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2785 struct vmw_sw_context *sw_context,
2786 SVGA3dCmdHeader *header)
2787{
2788 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2789 container_of(header, typeof(*cmd), header);
2790
2791 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2792 return -EINVAL;
2793
2794 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2795 VMW_RES_DIRTY_SET, user_surface_converter,
2796 &cmd->body.surface.sid, NULL);
2797}
2798
2799static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
2800 struct vmw_sw_context *sw_context,
2801 void *buf, uint32_t *size)
2802{
2803 uint32_t size_remaining = *size;
2804 uint32_t cmd_id;
2805
2806 cmd_id = ((uint32_t *)buf)[0];
2807 switch (cmd_id) {
2808 case SVGA_CMD_UPDATE:
2809 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
2810 break;
2811 case SVGA_CMD_DEFINE_GMRFB:
2812 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
2813 break;
2814 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
2815 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2816 break;
2817 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
2818 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
2819 break;
2820 default:
2821 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
2822 return -EINVAL;
2823 }
2824
2825 if (*size > size_remaining) {
2826 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
2827 cmd_id);
2828 return -EINVAL;
2829 }
2830
2831 if (unlikely(!sw_context->kernel)) {
2832 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
2833 return -EPERM;
2834 }
2835
2836 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
2837 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
2838
2839 return 0;
2840}
2841
2842static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
2843 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
2844 false, false, false),
2845 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
2846 false, false, false),
2847 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
2848 true, false, false),
2849 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
2850 true, false, false),
2851 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
2852 true, false, false),
2853 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
2854 false, false, false),
2855 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
2856 false, false, false),
2857 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
2858 true, false, false),
2859 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
2860 true, false, false),
2861 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
2862 true, false, false),
2863 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
2864 &vmw_cmd_set_render_target_check, true, false, false),
2865 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
2866 true, false, false),
2867 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
2868 true, false, false),
2869 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
2870 true, false, false),
2871 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
2872 true, false, false),
2873 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
2874 true, false, false),
2875 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
2876 true, false, false),
2877 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
2878 true, false, false),
2879 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
2880 false, false, false),
2881 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
2882 true, false, false),
2883 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
2884 true, false, false),
2885 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
2886 true, false, false),
2887 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
2888 true, false, false),
2889 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
2890 true, false, false),
2891 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
2892 true, false, false),
2893 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
2894 true, false, false),
2895 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
2896 true, false, false),
2897 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
2898 true, false, false),
2899 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
2900 true, false, false),
2901 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
2902 &vmw_cmd_blt_surf_screen_check, false, false, false),
2903 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
2904 false, false, false),
2905 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
2906 false, false, false),
2907 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
2908 false, false, false),
2909 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
2910 false, false, false),
2911 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
2912 false, false, false),
2913 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
2914 false, false, false),
2915 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
2916 false, false, false),
2917 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
2918 false, false, false),
2919 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
2920 false, false, false),
2921 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
2922 false, false, false),
2923 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
2924 false, false, false),
2925 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
2926 false, false, false),
2927 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
2928 false, false, false),
2929 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
2930 false, false, true),
2931 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
2932 false, false, true),
2933 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
2934 false, false, true),
2935 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
2936 false, false, true),
2937 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
2938 false, false, true),
2939 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
2940 false, false, true),
2941 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
2942 false, false, true),
2943 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
2944 false, false, true),
2945 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
2946 true, false, true),
2947 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
2948 false, false, true),
2949 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
2950 true, false, true),
2951 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2952 &vmw_cmd_update_gb_surface, true, false, true),
2953 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2954 &vmw_cmd_readback_gb_image, true, false, true),
2955 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2956 &vmw_cmd_readback_gb_surface, true, false, true),
2957 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2958 &vmw_cmd_invalidate_gb_image, true, false, true),
2959 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2960 &vmw_cmd_invalidate_gb_surface, true, false, true),
2961 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2962 false, false, true),
2963 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2964 false, false, true),
2965 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2966 false, false, true),
2967 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2968 false, false, true),
2969 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2970 false, false, true),
2971 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2972 false, false, true),
2973 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2974 true, false, true),
2975 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2976 false, false, true),
2977 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2978 false, false, false),
2979 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2980 true, false, true),
2981 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2982 true, false, true),
2983 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2984 true, false, true),
2985 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2986 true, false, true),
2987 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
2988 true, false, true),
2989 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2990 false, false, true),
2991 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2992 false, false, true),
2993 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2994 false, false, true),
2995 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2996 false, false, true),
2997 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2998 false, false, true),
2999 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3000 false, false, true),
3001 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3002 false, false, true),
3003 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3004 false, false, true),
3005 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3006 false, false, true),
3007 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3008 false, false, true),
3009 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3010 true, false, true),
3011 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3012 false, false, true),
3013 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3014 false, false, true),
3015 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3016 false, false, true),
3017 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3018 false, false, true),
3019
3020 /* SM commands */
3021 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3022 false, false, true),
3023 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3024 false, false, true),
3025 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3026 false, false, true),
3027 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3028 false, false, true),
3029 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3030 false, false, true),
3031 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3032 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3033 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3034 &vmw_cmd_dx_set_shader_res, true, false, true),
3035 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3036 true, false, true),
3037 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3038 true, false, true),
3039 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3040 true, false, true),
3041 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3042 true, false, true),
3043 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3044 true, false, true),
3045 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3046 &vmw_cmd_dx_cid_check, true, false, true),
3047 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3048 true, false, true),
3049 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3050 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3051 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3052 &vmw_cmd_dx_set_index_buffer, true, false, true),
3053 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3054 &vmw_cmd_dx_set_rendertargets, true, false, true),
3055 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3056 true, false, true),
3057 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3058 &vmw_cmd_dx_cid_check, true, false, true),
3059 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3060 &vmw_cmd_dx_cid_check, true, false, true),
3061 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3062 true, false, true),
3063 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3064 true, false, true),
3065 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3066 true, false, true),
3067 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3068 &vmw_cmd_dx_cid_check, true, false, true),
3069 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3070 true, false, true),
3071 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3072 true, false, true),
3073 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3074 true, false, true),
3075 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3076 true, false, true),
3077 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3078 true, false, true),
3079 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3080 true, false, true),
3081 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3082 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3083 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3084 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3085 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3086 true, false, true),
3087 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3088 true, false, true),
3089 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3090 &vmw_cmd_dx_check_subresource, true, false, true),
3091 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3092 &vmw_cmd_dx_check_subresource, true, false, true),
3093 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3094 &vmw_cmd_dx_check_subresource, true, false, true),
3095 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3096 &vmw_cmd_dx_view_define, true, false, true),
3097 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3098 &vmw_cmd_dx_view_remove, true, false, true),
3099 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3100 &vmw_cmd_dx_view_define, true, false, true),
3101 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3102 &vmw_cmd_dx_view_remove, true, false, true),
3103 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3104 &vmw_cmd_dx_view_define, true, false, true),
3105 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3106 &vmw_cmd_dx_view_remove, true, false, true),
3107 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3108 &vmw_cmd_dx_so_define, true, false, true),
3109 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3110 &vmw_cmd_dx_cid_check, true, false, true),
3111 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3112 &vmw_cmd_dx_so_define, true, false, true),
3113 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3114 &vmw_cmd_dx_cid_check, true, false, true),
3115 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3116 &vmw_cmd_dx_so_define, true, false, true),
3117 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3118 &vmw_cmd_dx_cid_check, true, false, true),
3119 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3120 &vmw_cmd_dx_so_define, true, false, true),
3121 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3122 &vmw_cmd_dx_cid_check, true, false, true),
3123 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3124 &vmw_cmd_dx_so_define, true, false, true),
3125 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3126 &vmw_cmd_dx_cid_check, true, false, true),
3127 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3128 &vmw_cmd_dx_define_shader, true, false, true),
3129 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3130 &vmw_cmd_dx_destroy_shader, true, false, true),
3131 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3132 &vmw_cmd_dx_bind_shader, true, false, true),
3133 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3134 &vmw_cmd_dx_so_define, true, false, true),
3135 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3136 &vmw_cmd_dx_cid_check, true, false, true),
3137 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_dx_cid_check,
3138 true, false, true),
3139 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3140 &vmw_cmd_dx_set_so_targets, true, false, true),
3141 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3142 &vmw_cmd_dx_cid_check, true, false, true),
3143 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3144 &vmw_cmd_dx_cid_check, true, false, true),
3145 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3146 &vmw_cmd_buffer_copy_check, true, false, true),
3147 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3148 &vmw_cmd_pred_copy_check, true, false, true),
3149 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3150 &vmw_cmd_dx_transfer_from_buffer,
3151 true, false, true),
3152 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3153 true, false, true),
3154};
3155
3156bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3157{
3158 u32 cmd_id = ((u32 *) buf)[0];
3159
3160 if (cmd_id >= SVGA_CMD_MAX) {
3161 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3162 const struct vmw_cmd_entry *entry;
3163
3164 *size = header->size + sizeof(SVGA3dCmdHeader);
3165 cmd_id = header->id;
3166 if (cmd_id >= SVGA_3D_CMD_MAX)
3167 return false;
3168
3169 cmd_id -= SVGA_3D_CMD_BASE;
3170 entry = &vmw_cmd_entries[cmd_id];
3171 *cmd = entry->cmd_name;
3172 return true;
3173 }
3174
3175 switch (cmd_id) {
3176 case SVGA_CMD_UPDATE:
3177 *cmd = "SVGA_CMD_UPDATE";
3178 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3179 break;
3180 case SVGA_CMD_DEFINE_GMRFB:
3181 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3182 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3183 break;
3184 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3185 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3186 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3187 break;
3188 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3189 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3190 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3191 break;
3192 default:
3193 *cmd = "UNKNOWN";
3194 *size = 0;
3195 return false;
3196 }
3197
3198 return true;
3199}
3200
3201static int vmw_cmd_check(struct vmw_private *dev_priv,
3202 struct vmw_sw_context *sw_context, void *buf,
3203 uint32_t *size)
3204{
3205 uint32_t cmd_id;
3206 uint32_t size_remaining = *size;
3207 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3208 int ret;
3209 const struct vmw_cmd_entry *entry;
3210 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3211
3212 cmd_id = ((uint32_t *)buf)[0];
3213 /* Handle any none 3D commands */
3214 if (unlikely(cmd_id < SVGA_CMD_MAX))
3215 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3216
3217
3218 cmd_id = header->id;
3219 *size = header->size + sizeof(SVGA3dCmdHeader);
3220
3221 cmd_id -= SVGA_3D_CMD_BASE;
3222 if (unlikely(*size > size_remaining))
3223 goto out_invalid;
3224
3225 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3226 goto out_invalid;
3227
3228 entry = &vmw_cmd_entries[cmd_id];
3229 if (unlikely(!entry->func))
3230 goto out_invalid;
3231
3232 if (unlikely(!entry->user_allow && !sw_context->kernel))
3233 goto out_privileged;
3234
3235 if (unlikely(entry->gb_disable && gb))
3236 goto out_old;
3237
3238 if (unlikely(entry->gb_enable && !gb))
3239 goto out_new;
3240
3241 ret = entry->func(dev_priv, sw_context, header);
3242 if (unlikely(ret != 0)) {
3243 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3244 cmd_id + SVGA_3D_CMD_BASE, ret);
3245 return ret;
3246 }
3247
3248 return 0;
3249out_invalid:
3250 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3251 cmd_id + SVGA_3D_CMD_BASE);
3252 return -EINVAL;
3253out_privileged:
3254 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3255 cmd_id + SVGA_3D_CMD_BASE);
3256 return -EPERM;
3257out_old:
3258 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3259 cmd_id + SVGA_3D_CMD_BASE);
3260 return -EINVAL;
3261out_new:
3262 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3263 cmd_id + SVGA_3D_CMD_BASE);
3264 return -EINVAL;
3265}
3266
3267static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3268 struct vmw_sw_context *sw_context, void *buf,
3269 uint32_t size)
3270{
3271 int32_t cur_size = size;
3272 int ret;
3273
3274 sw_context->buf_start = buf;
3275
3276 while (cur_size > 0) {
3277 size = cur_size;
3278 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3279 if (unlikely(ret != 0))
3280 return ret;
3281 buf = (void *)((unsigned long) buf + size);
3282 cur_size -= size;
3283 }
3284
3285 if (unlikely(cur_size != 0)) {
3286 VMW_DEBUG_USER("Command verifier out of sync.\n");
3287 return -EINVAL;
3288 }
3289
3290 return 0;
3291}
3292
3293static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3294{
3295 /* Memory is validation context memory, so no need to free it */
3296 INIT_LIST_HEAD(&sw_context->bo_relocations);
3297}
3298
3299static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3300{
3301 struct vmw_relocation *reloc;
3302 struct ttm_buffer_object *bo;
3303
3304 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3305 bo = &reloc->vbo->base;
3306 switch (bo->mem.mem_type) {
3307 case TTM_PL_VRAM:
3308 reloc->location->offset += bo->offset;
3309 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3310 break;
3311 case VMW_PL_GMR:
3312 reloc->location->gmrId = bo->mem.start;
3313 break;
3314 case VMW_PL_MOB:
3315 *reloc->mob_loc = bo->mem.start;
3316 break;
3317 default:
3318 BUG();
3319 }
3320 }
3321 vmw_free_relocations(sw_context);
3322}
3323
3324static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3325 uint32_t size)
3326{
3327 if (likely(sw_context->cmd_bounce_size >= size))
3328 return 0;
3329
3330 if (sw_context->cmd_bounce_size == 0)
3331 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3332
3333 while (sw_context->cmd_bounce_size < size) {
3334 sw_context->cmd_bounce_size =
3335 PAGE_ALIGN(sw_context->cmd_bounce_size +
3336 (sw_context->cmd_bounce_size >> 1));
3337 }
3338
3339 vfree(sw_context->cmd_bounce);
3340 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3341
3342 if (sw_context->cmd_bounce == NULL) {
3343 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3344 sw_context->cmd_bounce_size = 0;
3345 return -ENOMEM;
3346 }
3347
3348 return 0;
3349}
3350
3351/**
3352 * vmw_execbuf_fence_commands - create and submit a command stream fence
3353 *
3354 * Creates a fence object and submits a command stream marker.
3355 * If this fails for some reason, We sync the fifo and return NULL.
3356 * It is then safe to fence buffers with a NULL pointer.
3357 *
3358 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3359 * userspace handle if @p_handle is not NULL, otherwise not.
3360 */
3361
3362int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3363 struct vmw_private *dev_priv,
3364 struct vmw_fence_obj **p_fence,
3365 uint32_t *p_handle)
3366{
3367 uint32_t sequence;
3368 int ret;
3369 bool synced = false;
3370
3371 /* p_handle implies file_priv. */
3372 BUG_ON(p_handle != NULL && file_priv == NULL);
3373
3374 ret = vmw_fifo_send_fence(dev_priv, &sequence);
3375 if (unlikely(ret != 0)) {
3376 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3377 synced = true;
3378 }
3379
3380 if (p_handle != NULL)
3381 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3382 sequence, p_fence, p_handle);
3383 else
3384 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3385
3386 if (unlikely(ret != 0 && !synced)) {
3387 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3388 false, VMW_FENCE_WAIT_TIMEOUT);
3389 *p_fence = NULL;
3390 }
3391
3392 return ret;
3393}
3394
3395/**
3396 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3397 *
3398 * @dev_priv: Pointer to a vmw_private struct.
3399 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3400 * @ret: Return value from fence object creation.
3401 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3402 * the information should be copied.
3403 * @fence: Pointer to the fenc object.
3404 * @fence_handle: User-space fence handle.
3405 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3406 * @sync_file: Only used to clean up in case of an error in this function.
3407 *
3408 * This function copies fence information to user-space. If copying fails, the
3409 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3410 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3411 * will hopefully be detected.
3412 *
3413 * Also if copying fails, user-space will be unable to signal the fence object
3414 * so we wait for it immediately, and then unreference the user-space reference.
3415 */
3416void
3417vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3418 struct vmw_fpriv *vmw_fp, int ret,
3419 struct drm_vmw_fence_rep __user *user_fence_rep,
3420 struct vmw_fence_obj *fence, uint32_t fence_handle,
3421 int32_t out_fence_fd, struct sync_file *sync_file)
3422{
3423 struct drm_vmw_fence_rep fence_rep;
3424
3425 if (user_fence_rep == NULL)
3426 return;
3427
3428 memset(&fence_rep, 0, sizeof(fence_rep));
3429
3430 fence_rep.error = ret;
3431 fence_rep.fd = out_fence_fd;
3432 if (ret == 0) {
3433 BUG_ON(fence == NULL);
3434
3435 fence_rep.handle = fence_handle;
3436 fence_rep.seqno = fence->base.seqno;
3437 vmw_update_seqno(dev_priv, &dev_priv->fifo);
3438 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3439 }
3440
3441 /*
3442 * copy_to_user errors will be detected by user space not seeing
3443 * fence_rep::error filled in. Typically user-space would have pre-set
3444 * that member to -EFAULT.
3445 */
3446 ret = copy_to_user(user_fence_rep, &fence_rep,
3447 sizeof(fence_rep));
3448
3449 /*
3450 * User-space lost the fence object. We need to sync and unreference the
3451 * handle.
3452 */
3453 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3454 if (sync_file)
3455 fput(sync_file->file);
3456
3457 if (fence_rep.fd != -1) {
3458 put_unused_fd(fence_rep.fd);
3459 fence_rep.fd = -1;
3460 }
3461
3462 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
3463 TTM_REF_USAGE);
3464 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3465 (void) vmw_fence_obj_wait(fence, false, false,
3466 VMW_FENCE_WAIT_TIMEOUT);
3467 }
3468}
3469
3470/**
3471 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3472 *
3473 * @dev_priv: Pointer to a device private structure.
3474 * @kernel_commands: Pointer to the unpatched command batch.
3475 * @command_size: Size of the unpatched command batch.
3476 * @sw_context: Structure holding the relocation lists.
3477 *
3478 * Side effects: If this function returns 0, then the command batch pointed to
3479 * by @kernel_commands will have been modified.
3480 */
3481static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3482 void *kernel_commands, u32 command_size,
3483 struct vmw_sw_context *sw_context)
3484{
3485 void *cmd;
3486
3487 if (sw_context->dx_ctx_node)
3488 cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
3489 sw_context->dx_ctx_node->ctx->id);
3490 else
3491 cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
3492
3493 if (!cmd)
3494 return -ENOMEM;
3495
3496 vmw_apply_relocations(sw_context);
3497 memcpy(cmd, kernel_commands, command_size);
3498 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3499 vmw_resource_relocations_free(&sw_context->res_relocations);
3500 vmw_fifo_commit(dev_priv, command_size);
3501
3502 return 0;
3503}
3504
3505/**
3506 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3507 * command buffer manager.
3508 *
3509 * @dev_priv: Pointer to a device private structure.
3510 * @header: Opaque handle to the command buffer allocation.
3511 * @command_size: Size of the unpatched command batch.
3512 * @sw_context: Structure holding the relocation lists.
3513 *
3514 * Side effects: If this function returns 0, then the command buffer represented
3515 * by @header will have been modified.
3516 */
3517static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3518 struct vmw_cmdbuf_header *header,
3519 u32 command_size,
3520 struct vmw_sw_context *sw_context)
3521{
3522 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3523 SVGA3D_INVALID_ID);
3524 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3525 header);
3526
3527 vmw_apply_relocations(sw_context);
3528 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3529 vmw_resource_relocations_free(&sw_context->res_relocations);
3530 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3531
3532 return 0;
3533}
3534
3535/**
3536 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3537 * submission using a command buffer.
3538 *
3539 * @dev_priv: Pointer to a device private structure.
3540 * @user_commands: User-space pointer to the commands to be submitted.
3541 * @command_size: Size of the unpatched command batch.
3542 * @header: Out parameter returning the opaque pointer to the command buffer.
3543 *
3544 * This function checks whether we can use the command buffer manager for
3545 * submission and if so, creates a command buffer of suitable size and copies
3546 * the user data into that buffer.
3547 *
3548 * On successful return, the function returns a pointer to the data in the
3549 * command buffer and *@header is set to non-NULL.
3550 *
3551 * If command buffers could not be used, the function will return the value of
3552 * @kernel_commands on function call. That value may be NULL. In that case, the
3553 * value of *@header will be set to NULL.
3554 *
3555 * If an error is encountered, the function will return a pointer error value.
3556 * If the function is interrupted by a signal while sleeping, it will return
3557 * -ERESTARTSYS casted to a pointer error value.
3558 */
3559static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3560 void __user *user_commands,
3561 void *kernel_commands, u32 command_size,
3562 struct vmw_cmdbuf_header **header)
3563{
3564 size_t cmdbuf_size;
3565 int ret;
3566
3567 *header = NULL;
3568 if (command_size > SVGA_CB_MAX_SIZE) {
3569 VMW_DEBUG_USER("Command buffer is too large.\n");
3570 return ERR_PTR(-EINVAL);
3571 }
3572
3573 if (!dev_priv->cman || kernel_commands)
3574 return kernel_commands;
3575
3576 /* If possible, add a little space for fencing. */
3577 cmdbuf_size = command_size + 512;
3578 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
3579 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
3580 header);
3581 if (IS_ERR(kernel_commands))
3582 return kernel_commands;
3583
3584 ret = copy_from_user(kernel_commands, user_commands, command_size);
3585 if (ret) {
3586 VMW_DEBUG_USER("Failed copying commands.\n");
3587 vmw_cmdbuf_header_free(*header);
3588 *header = NULL;
3589 return ERR_PTR(-EFAULT);
3590 }
3591
3592 return kernel_commands;
3593}
3594
3595static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
3596 struct vmw_sw_context *sw_context,
3597 uint32_t handle)
3598{
3599 struct vmw_resource *res;
3600 int ret;
3601 unsigned int size;
3602
3603 if (handle == SVGA3D_INVALID_ID)
3604 return 0;
3605
3606 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
3607 ret = vmw_validation_preload_res(sw_context->ctx, size);
3608 if (ret)
3609 return ret;
3610
3611 res = vmw_user_resource_noref_lookup_handle
3612 (dev_priv, sw_context->fp->tfile, handle,
3613 user_context_converter);
3614 if (IS_ERR(res)) {
3615 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
3616 (unsigned int) handle);
3617 return PTR_ERR(res);
3618 }
3619
3620 ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
3621 if (unlikely(ret != 0))
3622 return ret;
3623
3624 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
3625 sw_context->man = vmw_context_res_man(res);
3626
3627 return 0;
3628}
3629
3630int vmw_execbuf_process(struct drm_file *file_priv,
3631 struct vmw_private *dev_priv,
3632 void __user *user_commands, void *kernel_commands,
3633 uint32_t command_size, uint64_t throttle_us,
3634 uint32_t dx_context_handle,
3635 struct drm_vmw_fence_rep __user *user_fence_rep,
3636 struct vmw_fence_obj **out_fence, uint32_t flags)
3637{
3638 struct vmw_sw_context *sw_context = &dev_priv->ctx;
3639 struct vmw_fence_obj *fence = NULL;
3640 struct vmw_cmdbuf_header *header;
3641 uint32_t handle = 0;
3642 int ret;
3643 int32_t out_fence_fd = -1;
3644 struct sync_file *sync_file = NULL;
3645 DECLARE_VAL_CONTEXT(val_ctx, &sw_context->res_ht, 1);
3646
3647 vmw_validation_set_val_mem(&val_ctx, &dev_priv->vvm);
3648
3649 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3650 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
3651 if (out_fence_fd < 0) {
3652 VMW_DEBUG_USER("Failed to get a fence fd.\n");
3653 return out_fence_fd;
3654 }
3655 }
3656
3657 if (throttle_us) {
3658 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
3659 throttle_us);
3660
3661 if (ret)
3662 goto out_free_fence_fd;
3663 }
3664
3665 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
3666 kernel_commands, command_size,
3667 &header);
3668 if (IS_ERR(kernel_commands)) {
3669 ret = PTR_ERR(kernel_commands);
3670 goto out_free_fence_fd;
3671 }
3672
3673 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
3674 if (ret) {
3675 ret = -ERESTARTSYS;
3676 goto out_free_header;
3677 }
3678
3679 sw_context->kernel = false;
3680 if (kernel_commands == NULL) {
3681 ret = vmw_resize_cmd_bounce(sw_context, command_size);
3682 if (unlikely(ret != 0))
3683 goto out_unlock;
3684
3685 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
3686 command_size);
3687 if (unlikely(ret != 0)) {
3688 ret = -EFAULT;
3689 VMW_DEBUG_USER("Failed copying commands.\n");
3690 goto out_unlock;
3691 }
3692
3693 kernel_commands = sw_context->cmd_bounce;
3694 } else if (!header) {
3695 sw_context->kernel = true;
3696 }
3697
3698 sw_context->fp = vmw_fpriv(file_priv);
3699 INIT_LIST_HEAD(&sw_context->ctx_list);
3700 sw_context->cur_query_bo = dev_priv->pinned_bo;
3701 sw_context->last_query_ctx = NULL;
3702 sw_context->needs_post_query_barrier = false;
3703 sw_context->dx_ctx_node = NULL;
3704 sw_context->dx_query_mob = NULL;
3705 sw_context->dx_query_ctx = NULL;
3706 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
3707 INIT_LIST_HEAD(&sw_context->res_relocations);
3708 INIT_LIST_HEAD(&sw_context->bo_relocations);
3709
3710 if (sw_context->staged_bindings)
3711 vmw_binding_state_reset(sw_context->staged_bindings);
3712
3713 if (!sw_context->res_ht_initialized) {
3714 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
3715 if (unlikely(ret != 0))
3716 goto out_unlock;
3717
3718 sw_context->res_ht_initialized = true;
3719 }
3720
3721 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
3722 sw_context->ctx = &val_ctx;
3723 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
3724 if (unlikely(ret != 0))
3725 goto out_err_nores;
3726
3727 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
3728 command_size);
3729 if (unlikely(ret != 0))
3730 goto out_err_nores;
3731
3732 ret = vmw_resources_reserve(sw_context);
3733 if (unlikely(ret != 0))
3734 goto out_err_nores;
3735
3736 ret = vmw_validation_bo_reserve(&val_ctx, true);
3737 if (unlikely(ret != 0))
3738 goto out_err_nores;
3739
3740 ret = vmw_validation_bo_validate(&val_ctx, true);
3741 if (unlikely(ret != 0))
3742 goto out_err;
3743
3744 ret = vmw_validation_res_validate(&val_ctx, true);
3745 if (unlikely(ret != 0))
3746 goto out_err;
3747
3748 vmw_validation_drop_ht(&val_ctx);
3749
3750 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
3751 if (unlikely(ret != 0)) {
3752 ret = -ERESTARTSYS;
3753 goto out_err;
3754 }
3755
3756 if (dev_priv->has_mob) {
3757 ret = vmw_rebind_contexts(sw_context);
3758 if (unlikely(ret != 0))
3759 goto out_unlock_binding;
3760 }
3761
3762 if (!header) {
3763 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
3764 command_size, sw_context);
3765 } else {
3766 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
3767 sw_context);
3768 header = NULL;
3769 }
3770 mutex_unlock(&dev_priv->binding_mutex);
3771 if (ret)
3772 goto out_err;
3773
3774 vmw_query_bo_switch_commit(dev_priv, sw_context);
3775 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
3776 (user_fence_rep) ? &handle : NULL);
3777 /*
3778 * This error is harmless, because if fence submission fails,
3779 * vmw_fifo_send_fence will sync. The error will be propagated to
3780 * user-space in @fence_rep
3781 */
3782 if (ret != 0)
3783 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3784
3785 vmw_execbuf_bindings_commit(sw_context, false);
3786 vmw_bind_dx_query_mob(sw_context);
3787 vmw_validation_res_unreserve(&val_ctx, false);
3788
3789 vmw_validation_bo_fence(sw_context->ctx, fence);
3790
3791 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3792 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
3793
3794 /*
3795 * If anything fails here, give up trying to export the fence and do a
3796 * sync since the user mode will not be able to sync the fence itself.
3797 * This ensures we are still functionally correct.
3798 */
3799 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
3800
3801 sync_file = sync_file_create(&fence->base);
3802 if (!sync_file) {
3803 VMW_DEBUG_USER("Sync file create failed for fence\n");
3804 put_unused_fd(out_fence_fd);
3805 out_fence_fd = -1;
3806
3807 (void) vmw_fence_obj_wait(fence, false, false,
3808 VMW_FENCE_WAIT_TIMEOUT);
3809 } else {
3810 /* Link the fence with the FD created earlier */
3811 fd_install(out_fence_fd, sync_file->file);
3812 }
3813 }
3814
3815 vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
3816 user_fence_rep, fence, handle, out_fence_fd,
3817 sync_file);
3818
3819 /* Don't unreference when handing fence out */
3820 if (unlikely(out_fence != NULL)) {
3821 *out_fence = fence;
3822 fence = NULL;
3823 } else if (likely(fence != NULL)) {
3824 vmw_fence_obj_unreference(&fence);
3825 }
3826
3827 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
3828 mutex_unlock(&dev_priv->cmdbuf_mutex);
3829
3830 /*
3831 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3832 * in resource destruction paths.
3833 */
3834 vmw_validation_unref_lists(&val_ctx);
3835
3836 return 0;
3837
3838out_unlock_binding:
3839 mutex_unlock(&dev_priv->binding_mutex);
3840out_err:
3841 vmw_validation_bo_backoff(&val_ctx);
3842out_err_nores:
3843 vmw_execbuf_bindings_commit(sw_context, true);
3844 vmw_validation_res_unreserve(&val_ctx, true);
3845 vmw_resource_relocations_free(&sw_context->res_relocations);
3846 vmw_free_relocations(sw_context);
3847 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
3848 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3849out_unlock:
3850 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
3851 vmw_validation_drop_ht(&val_ctx);
3852 WARN_ON(!list_empty(&sw_context->ctx_list));
3853 mutex_unlock(&dev_priv->cmdbuf_mutex);
3854
3855 /*
3856 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
3857 * in resource destruction paths.
3858 */
3859 vmw_validation_unref_lists(&val_ctx);
3860out_free_header:
3861 if (header)
3862 vmw_cmdbuf_header_free(header);
3863out_free_fence_fd:
3864 if (out_fence_fd >= 0)
3865 put_unused_fd(out_fence_fd);
3866
3867 return ret;
3868}
3869
3870/**
3871 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
3872 *
3873 * @dev_priv: The device private structure.
3874 *
3875 * This function is called to idle the fifo and unpin the query buffer if the
3876 * normal way to do this hits an error, which should typically be extremely
3877 * rare.
3878 */
3879static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
3880{
3881 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
3882
3883 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
3884 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3885 if (dev_priv->dummy_query_bo_pinned) {
3886 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3887 dev_priv->dummy_query_bo_pinned = false;
3888 }
3889}
3890
3891
3892/**
3893 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
3894 * bo.
3895 *
3896 * @dev_priv: The device private structure.
3897 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
3898 * query barrier that flushes all queries touching the current buffer pointed to
3899 * by @dev_priv->pinned_bo
3900 *
3901 * This function should be used to unpin the pinned query bo, or as a query
3902 * barrier when we need to make sure that all queries have finished before the
3903 * next fifo command. (For example on hardware context destructions where the
3904 * hardware may otherwise leak unfinished queries).
3905 *
3906 * This function does not return any failure codes, but make attempts to do safe
3907 * unpinning in case of errors.
3908 *
3909 * The function will synchronize on the previous query barrier, and will thus
3910 * not finish until that barrier has executed.
3911 *
3912 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
3913 * calling this function.
3914 */
3915void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
3916 struct vmw_fence_obj *fence)
3917{
3918 int ret = 0;
3919 struct vmw_fence_obj *lfence = NULL;
3920 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
3921
3922 if (dev_priv->pinned_bo == NULL)
3923 goto out_unlock;
3924
3925 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
3926 false);
3927 if (ret)
3928 goto out_no_reserve;
3929
3930 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
3931 false);
3932 if (ret)
3933 goto out_no_reserve;
3934
3935 ret = vmw_validation_bo_reserve(&val_ctx, false);
3936 if (ret)
3937 goto out_no_reserve;
3938
3939 if (dev_priv->query_cid_valid) {
3940 BUG_ON(fence != NULL);
3941 ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
3942 if (ret)
3943 goto out_no_emit;
3944 dev_priv->query_cid_valid = false;
3945 }
3946
3947 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
3948 if (dev_priv->dummy_query_bo_pinned) {
3949 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
3950 dev_priv->dummy_query_bo_pinned = false;
3951 }
3952 if (fence == NULL) {
3953 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
3954 NULL);
3955 fence = lfence;
3956 }
3957 vmw_validation_bo_fence(&val_ctx, fence);
3958 if (lfence != NULL)
3959 vmw_fence_obj_unreference(&lfence);
3960
3961 vmw_validation_unref_lists(&val_ctx);
3962 vmw_bo_unreference(&dev_priv->pinned_bo);
3963
3964out_unlock:
3965 return;
3966out_no_emit:
3967 vmw_validation_bo_backoff(&val_ctx);
3968out_no_reserve:
3969 vmw_validation_unref_lists(&val_ctx);
3970 vmw_execbuf_unpin_panic(dev_priv);
3971 vmw_bo_unreference(&dev_priv->pinned_bo);
3972}
3973
3974/**
3975 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
3976 *
3977 * @dev_priv: The device private structure.
3978 *
3979 * This function should be used to unpin the pinned query bo, or as a query
3980 * barrier when we need to make sure that all queries have finished before the
3981 * next fifo command. (For example on hardware context destructions where the
3982 * hardware may otherwise leak unfinished queries).
3983 *
3984 * This function does not return any failure codes, but make attempts to do safe
3985 * unpinning in case of errors.
3986 *
3987 * The function will synchronize on the previous query barrier, and will thus
3988 * not finish until that barrier has executed.
3989 */
3990void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
3991{
3992 mutex_lock(&dev_priv->cmdbuf_mutex);
3993 if (dev_priv->query_cid_valid)
3994 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
3995 mutex_unlock(&dev_priv->cmdbuf_mutex);
3996}
3997
3998int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
3999 struct drm_file *file_priv)
4000{
4001 struct vmw_private *dev_priv = vmw_priv(dev);
4002 struct drm_vmw_execbuf_arg *arg = data;
4003 int ret;
4004 struct dma_fence *in_fence = NULL;
4005
4006 /*
4007 * Extend the ioctl argument while maintaining backwards compatibility:
4008 * We take different code paths depending on the value of arg->version.
4009 *
4010 * Note: The ioctl argument is extended and zeropadded by core DRM.
4011 */
4012 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4013 arg->version == 0)) {
4014 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4015 return -EINVAL;
4016 }
4017
4018 switch (arg->version) {
4019 case 1:
4020 /* For v1 core DRM have extended + zeropadded the data */
4021 arg->context_handle = (uint32_t) -1;
4022 break;
4023 case 2:
4024 default:
4025 /* For v2 and later core DRM would have correctly copied it */
4026 break;
4027 }
4028
4029 /* If imported a fence FD from elsewhere, then wait on it */
4030 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4031 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4032
4033 if (!in_fence) {
4034 VMW_DEBUG_USER("Cannot get imported fence\n");
4035 return -EINVAL;
4036 }
4037
4038 ret = vmw_wait_dma_fence(dev_priv->fman, in_fence);
4039 if (ret)
4040 goto out;
4041 }
4042
4043 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
4044 if (unlikely(ret != 0))
4045 return ret;
4046
4047 ret = vmw_execbuf_process(file_priv, dev_priv,
4048 (void __user *)(unsigned long)arg->commands,
4049 NULL, arg->command_size, arg->throttle_us,
4050 arg->context_handle,
4051 (void __user *)(unsigned long)arg->fence_rep,
4052 NULL, arg->flags);
4053
4054 ttm_read_unlock(&dev_priv->reservation_sem);
4055 if (unlikely(ret != 0))
4056 goto out;
4057
4058 vmw_kms_cursor_post_execbuf(dev_priv);
4059
4060out:
4061 if (in_fence)
4062 dma_fence_put(in_fence);
4063 return ret;
4064}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include "vmwgfx_binding.h"
28#include "vmwgfx_bo.h"
29#include "vmwgfx_drv.h"
30#include "vmwgfx_mksstat.h"
31#include "vmwgfx_so.h"
32
33#include <drm/ttm/ttm_bo.h>
34#include <drm/ttm/ttm_placement.h>
35
36#include <linux/sync_file.h>
37#include <linux/hashtable.h>
38
39/*
40 * Helper macro to get dx_ctx_node if available otherwise print an error
41 * message. This is for use in command verifier function where if dx_ctx_node
42 * is not set then command is invalid.
43 */
44#define VMW_GET_CTX_NODE(__sw_context) \
45({ \
46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
47 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
48 __sw_context->dx_ctx_node; \
49 }); \
50})
51
52#define VMW_DECLARE_CMD_VAR(__var, __type) \
53 struct { \
54 SVGA3dCmdHeader header; \
55 __type body; \
56 } __var
57
58/**
59 * struct vmw_relocation - Buffer object relocation
60 *
61 * @head: List head for the command submission context's relocation list
62 * @vbo: Non ref-counted pointer to buffer object
63 * @mob_loc: Pointer to location for mob id to be modified
64 * @location: Pointer to location for guest pointer to be modified
65 */
66struct vmw_relocation {
67 struct list_head head;
68 struct vmw_bo *vbo;
69 union {
70 SVGAMobId *mob_loc;
71 SVGAGuestPtr *location;
72 };
73};
74
75/**
76 * enum vmw_resource_relocation_type - Relocation type for resources
77 *
78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79 * command stream is replaced with the actual id after validation.
80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * with a NOP.
82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 * @vmw_res_rel_max: Last value in the enum - used for error checking
85*/
86enum vmw_resource_relocation_type {
87 vmw_res_rel_normal,
88 vmw_res_rel_nop,
89 vmw_res_rel_cond_nop,
90 vmw_res_rel_max
91};
92
93/**
94 * struct vmw_resource_relocation - Relocation info for resources
95 *
96 * @head: List head for the software context's relocation list.
97 * @res: Non-ref-counted pointer to the resource.
98 * @offset: Offset of single byte entries into the command buffer where the id
99 * that needs fixup is located.
100 * @rel_type: Type of relocation.
101 */
102struct vmw_resource_relocation {
103 struct list_head head;
104 const struct vmw_resource *res;
105 u32 offset:29;
106 enum vmw_resource_relocation_type rel_type:3;
107};
108
109/**
110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
111 *
112 * @head: List head of context list
113 * @ctx: The context resource
114 * @cur: The context's persistent binding state
115 * @staged: The binding state changes of this command buffer
116 */
117struct vmw_ctx_validation_info {
118 struct list_head head;
119 struct vmw_resource *ctx;
120 struct vmw_ctx_binding_state *cur;
121 struct vmw_ctx_binding_state *staged;
122};
123
124/**
125 * struct vmw_cmd_entry - Describe a command for the verifier
126 *
127 * @func: Call-back to handle the command.
128 * @user_allow: Whether allowed from the execbuf ioctl.
129 * @gb_disable: Whether disabled if guest-backed objects are available.
130 * @gb_enable: Whether enabled iff guest-backed objects are available.
131 * @cmd_name: Name of the command.
132 */
133struct vmw_cmd_entry {
134 int (*func) (struct vmw_private *, struct vmw_sw_context *,
135 SVGA3dCmdHeader *);
136 bool user_allow;
137 bool gb_disable;
138 bool gb_enable;
139 const char *cmd_name;
140};
141
142#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 (_gb_disable), (_gb_enable), #_cmd}
145
146static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 struct vmw_sw_context *sw_context,
148 struct vmw_resource *ctx);
149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 struct vmw_sw_context *sw_context,
151 SVGAMobId *id,
152 struct vmw_bo **vmw_bo_p);
153/**
154 * vmw_ptr_diff - Compute the offset from a to b in bytes
155 *
156 * @a: A starting pointer.
157 * @b: A pointer offset in the same address space.
158 *
159 * Returns: The offset in bytes between the two pointers.
160 */
161static size_t vmw_ptr_diff(void *a, void *b)
162{
163 return (unsigned long) b - (unsigned long) a;
164}
165
166/**
167 * vmw_execbuf_bindings_commit - Commit modified binding state
168 *
169 * @sw_context: The command submission context
170 * @backoff: Whether this is part of the error path and binding state changes
171 * should be ignored
172 */
173static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
174 bool backoff)
175{
176 struct vmw_ctx_validation_info *entry;
177
178 list_for_each_entry(entry, &sw_context->ctx_list, head) {
179 if (!backoff)
180 vmw_binding_state_commit(entry->cur, entry->staged);
181
182 if (entry->staged != sw_context->staged_bindings)
183 vmw_binding_state_free(entry->staged);
184 else
185 sw_context->staged_bindings_inuse = false;
186 }
187
188 /* List entries are freed with the validation context */
189 INIT_LIST_HEAD(&sw_context->ctx_list);
190}
191
192/**
193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
194 *
195 * @sw_context: The command submission context
196 */
197static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198{
199 if (sw_context->dx_query_mob)
200 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 sw_context->dx_query_mob);
202}
203
204/**
205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
206 * the validate list.
207 *
208 * @dev_priv: Pointer to the device private:
209 * @sw_context: The command submission context
210 * @res: Pointer to the resource
211 * @node: The validation node holding the context resource metadata
212 */
213static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 struct vmw_sw_context *sw_context,
215 struct vmw_resource *res,
216 struct vmw_ctx_validation_info *node)
217{
218 int ret;
219
220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 if (unlikely(ret != 0))
222 goto out_err;
223
224 if (!sw_context->staged_bindings) {
225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 if (IS_ERR(sw_context->staged_bindings)) {
227 ret = PTR_ERR(sw_context->staged_bindings);
228 sw_context->staged_bindings = NULL;
229 goto out_err;
230 }
231 }
232
233 if (sw_context->staged_bindings_inuse) {
234 node->staged = vmw_binding_state_alloc(dev_priv);
235 if (IS_ERR(node->staged)) {
236 ret = PTR_ERR(node->staged);
237 node->staged = NULL;
238 goto out_err;
239 }
240 } else {
241 node->staged = sw_context->staged_bindings;
242 sw_context->staged_bindings_inuse = true;
243 }
244
245 node->ctx = res;
246 node->cur = vmw_context_binding_state(res);
247 list_add_tail(&node->head, &sw_context->ctx_list);
248
249 return 0;
250
251out_err:
252 return ret;
253}
254
255/**
256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257 *
258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
260 *
261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
264 *
265 * Returns: The extra size requirement based on resource type.
266 */
267static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 enum vmw_res_type res_type)
269{
270 return (res_type == vmw_res_dx_context ||
271 (res_type == vmw_res_context && dev_priv->has_mob)) ?
272 sizeof(struct vmw_ctx_validation_info) : 0;
273}
274
275/**
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
277 *
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
280 * @private: Pointer to the execbuf-private space in the resource validation
281 * node.
282 */
283static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 struct vmw_resource *res,
285 void *private)
286{
287 rcache->res = res;
288 rcache->private = private;
289 rcache->valid = 1;
290 rcache->valid_handle = 0;
291}
292
293enum vmw_val_add_flags {
294 vmw_val_add_flag_none = 0,
295 vmw_val_add_flag_noctx = 1 << 0,
296};
297
298/**
299 * vmw_execbuf_res_val_add - Add a resource to the validation list.
300 *
301 * @sw_context: Pointer to the software context.
302 * @res: Unreferenced rcu-protected pointer to the resource.
303 * @dirty: Whether to change dirty status.
304 * @flags: specifies whether to use the context or not
305 *
306 * Returns: 0 on success. Negative error code on failure. Typical error codes
307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
308 */
309static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
310 struct vmw_resource *res,
311 u32 dirty,
312 u32 flags)
313{
314 struct vmw_private *dev_priv = res->dev_priv;
315 int ret;
316 enum vmw_res_type res_type = vmw_res_type(res);
317 struct vmw_res_cache_entry *rcache;
318 struct vmw_ctx_validation_info *ctx_info;
319 bool first_usage;
320 unsigned int priv_size;
321
322 rcache = &sw_context->res_cache[res_type];
323 if (likely(rcache->valid && rcache->res == res)) {
324 if (dirty)
325 vmw_validation_res_set_dirty(sw_context->ctx,
326 rcache->private, dirty);
327 return 0;
328 }
329
330 if ((flags & vmw_val_add_flag_noctx) != 0) {
331 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
332 (void **)&ctx_info, NULL);
333 if (ret)
334 return ret;
335
336 } else {
337 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
338 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
339 dirty, (void **)&ctx_info,
340 &first_usage);
341 if (ret)
342 return ret;
343
344 if (priv_size && first_usage) {
345 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
346 ctx_info);
347 if (ret) {
348 VMW_DEBUG_USER("Failed first usage context setup.\n");
349 return ret;
350 }
351 }
352 }
353
354 vmw_execbuf_rcache_update(rcache, res, ctx_info);
355 return 0;
356}
357
358/**
359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
360 * validation list
361 *
362 * @sw_context: The software context holding the validation list.
363 * @view: Pointer to the view resource.
364 *
365 * Returns 0 if success, negative error code otherwise.
366 */
367static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
368 struct vmw_resource *view)
369{
370 int ret;
371
372 /*
373 * First add the resource the view is pointing to, otherwise it may be
374 * swapped out when the view is validated.
375 */
376 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
377 vmw_view_dirtying(view), vmw_val_add_flag_noctx);
378 if (ret)
379 return ret;
380
381 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
382 vmw_val_add_flag_noctx);
383}
384
385/**
386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
387 * to to the validation list.
388 *
389 * @sw_context: The software context holding the validation list.
390 * @view_type: The view type to look up.
391 * @id: view id of the view.
392 *
393 * The view is represented by a view id and the DX context it's created on, or
394 * scheduled for creation on. If there is no DX context set, the function will
395 * return an -EINVAL error pointer.
396 *
397 * Returns: Unreferenced pointer to the resource on success, negative error
398 * pointer on failure.
399 */
400static struct vmw_resource *
401vmw_view_id_val_add(struct vmw_sw_context *sw_context,
402 enum vmw_view_type view_type, u32 id)
403{
404 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
405 struct vmw_resource *view;
406 int ret;
407
408 if (!ctx_node)
409 return ERR_PTR(-EINVAL);
410
411 view = vmw_view_lookup(sw_context->man, view_type, id);
412 if (IS_ERR(view))
413 return view;
414
415 ret = vmw_view_res_val_add(sw_context, view);
416 if (ret)
417 return ERR_PTR(ret);
418
419 return view;
420}
421
422/**
423 * vmw_resource_context_res_add - Put resources previously bound to a context on
424 * the validation list
425 *
426 * @dev_priv: Pointer to a device private structure
427 * @sw_context: Pointer to a software context used for this command submission
428 * @ctx: Pointer to the context resource
429 *
430 * This function puts all resources that were previously bound to @ctx on the
431 * resource validation list. This is part of the context state reemission
432 */
433static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
434 struct vmw_sw_context *sw_context,
435 struct vmw_resource *ctx)
436{
437 struct list_head *binding_list;
438 struct vmw_ctx_bindinfo *entry;
439 int ret = 0;
440 struct vmw_resource *res;
441 u32 i;
442 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
443 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
444
445 /* Add all cotables to the validation list. */
446 if (has_sm4_context(dev_priv) &&
447 vmw_res_type(ctx) == vmw_res_dx_context) {
448 for (i = 0; i < cotable_max; ++i) {
449 res = vmw_context_cotable(ctx, i);
450 if (IS_ERR_OR_NULL(res))
451 continue;
452
453 ret = vmw_execbuf_res_val_add(sw_context, res,
454 VMW_RES_DIRTY_SET,
455 vmw_val_add_flag_noctx);
456 if (unlikely(ret != 0))
457 return ret;
458 }
459 }
460
461 /* Add all resources bound to the context to the validation list */
462 mutex_lock(&dev_priv->binding_mutex);
463 binding_list = vmw_context_binding_list(ctx);
464
465 list_for_each_entry(entry, binding_list, ctx_list) {
466 if (vmw_res_type(entry->res) == vmw_res_view)
467 ret = vmw_view_res_val_add(sw_context, entry->res);
468 else
469 ret = vmw_execbuf_res_val_add(sw_context, entry->res,
470 vmw_binding_dirtying(entry->bt),
471 vmw_val_add_flag_noctx);
472 if (unlikely(ret != 0))
473 break;
474 }
475
476 if (has_sm4_context(dev_priv) &&
477 vmw_res_type(ctx) == vmw_res_dx_context) {
478 struct vmw_bo *dx_query_mob;
479
480 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
481 if (dx_query_mob) {
482 vmw_bo_placement_set(dx_query_mob,
483 VMW_BO_DOMAIN_MOB,
484 VMW_BO_DOMAIN_MOB);
485 ret = vmw_validation_add_bo(sw_context->ctx,
486 dx_query_mob);
487 }
488 }
489
490 mutex_unlock(&dev_priv->binding_mutex);
491 return ret;
492}
493
494/**
495 * vmw_resource_relocation_add - Add a relocation to the relocation list
496 *
497 * @sw_context: Pointer to the software context.
498 * @res: The resource.
499 * @offset: Offset into the command buffer currently being parsed where the id
500 * that needs fixup is located. Granularity is one byte.
501 * @rel_type: Relocation type.
502 */
503static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
504 const struct vmw_resource *res,
505 unsigned long offset,
506 enum vmw_resource_relocation_type
507 rel_type)
508{
509 struct vmw_resource_relocation *rel;
510
511 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
512 if (unlikely(!rel)) {
513 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
514 return -ENOMEM;
515 }
516
517 rel->res = res;
518 rel->offset = offset;
519 rel->rel_type = rel_type;
520 list_add_tail(&rel->head, &sw_context->res_relocations);
521
522 return 0;
523}
524
525/**
526 * vmw_resource_relocations_free - Free all relocations on a list
527 *
528 * @list: Pointer to the head of the relocation list
529 */
530static void vmw_resource_relocations_free(struct list_head *list)
531{
532 /* Memory is validation context memory, so no need to free it */
533 INIT_LIST_HEAD(list);
534}
535
536/**
537 * vmw_resource_relocations_apply - Apply all relocations on a list
538 *
539 * @cb: Pointer to the start of the command buffer bein patch. This need not be
540 * the same buffer as the one being parsed when the relocation list was built,
541 * but the contents must be the same modulo the resource ids.
542 * @list: Pointer to the head of the relocation list.
543 */
544static void vmw_resource_relocations_apply(uint32_t *cb,
545 struct list_head *list)
546{
547 struct vmw_resource_relocation *rel;
548
549 /* Validate the struct vmw_resource_relocation member size */
550 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
551 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
552
553 list_for_each_entry(rel, list, head) {
554 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
555 switch (rel->rel_type) {
556 case vmw_res_rel_normal:
557 *addr = rel->res->id;
558 break;
559 case vmw_res_rel_nop:
560 *addr = SVGA_3D_CMD_NOP;
561 break;
562 default:
563 if (rel->res->id == -1)
564 *addr = SVGA_3D_CMD_NOP;
565 break;
566 }
567 }
568}
569
570static int vmw_cmd_invalid(struct vmw_private *dev_priv,
571 struct vmw_sw_context *sw_context,
572 SVGA3dCmdHeader *header)
573{
574 return -EINVAL;
575}
576
577static int vmw_cmd_ok(struct vmw_private *dev_priv,
578 struct vmw_sw_context *sw_context,
579 SVGA3dCmdHeader *header)
580{
581 return 0;
582}
583
584/**
585 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
586 * list.
587 *
588 * @sw_context: Pointer to the software context.
589 *
590 * Note that since vmware's command submission currently is protected by the
591 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
592 * only a single thread at once will attempt this.
593 */
594static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
595{
596 int ret;
597
598 ret = vmw_validation_res_reserve(sw_context->ctx, true);
599 if (ret)
600 return ret;
601
602 if (sw_context->dx_query_mob) {
603 struct vmw_bo *expected_dx_query_mob;
604
605 expected_dx_query_mob =
606 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
607 if (expected_dx_query_mob &&
608 expected_dx_query_mob != sw_context->dx_query_mob) {
609 ret = -EINVAL;
610 }
611 }
612
613 return ret;
614}
615
616/**
617 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
618 * resource validate list unless it's already there.
619 *
620 * @dev_priv: Pointer to a device private structure.
621 * @sw_context: Pointer to the software context.
622 * @res_type: Resource type.
623 * @dirty: Whether to change dirty status.
624 * @converter: User-space visible type specific information.
625 * @id_loc: Pointer to the location in the command buffer currently being parsed
626 * from where the user-space resource id handle is located.
627 * @p_res: Pointer to pointer to resource validation node. Populated on
628 * exit.
629 */
630static int
631vmw_cmd_res_check(struct vmw_private *dev_priv,
632 struct vmw_sw_context *sw_context,
633 enum vmw_res_type res_type,
634 u32 dirty,
635 const struct vmw_user_resource_conv *converter,
636 uint32_t *id_loc,
637 struct vmw_resource **p_res)
638{
639 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
640 struct vmw_resource *res;
641 int ret = 0;
642 bool needs_unref = false;
643
644 if (p_res)
645 *p_res = NULL;
646
647 if (*id_loc == SVGA3D_INVALID_ID) {
648 if (res_type == vmw_res_context) {
649 VMW_DEBUG_USER("Illegal context invalid id.\n");
650 return -EINVAL;
651 }
652 return 0;
653 }
654
655 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
656 res = rcache->res;
657 if (dirty)
658 vmw_validation_res_set_dirty(sw_context->ctx,
659 rcache->private, dirty);
660 } else {
661 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
662
663 ret = vmw_validation_preload_res(sw_context->ctx, size);
664 if (ret)
665 return ret;
666
667 ret = vmw_user_resource_lookup_handle
668 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
669 if (ret != 0) {
670 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
671 (unsigned int) *id_loc);
672 return ret;
673 }
674 needs_unref = true;
675
676 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
677 if (unlikely(ret != 0))
678 goto res_check_done;
679
680 if (rcache->valid && rcache->res == res) {
681 rcache->valid_handle = true;
682 rcache->handle = *id_loc;
683 }
684 }
685
686 ret = vmw_resource_relocation_add(sw_context, res,
687 vmw_ptr_diff(sw_context->buf_start,
688 id_loc),
689 vmw_res_rel_normal);
690 if (p_res)
691 *p_res = res;
692
693res_check_done:
694 if (needs_unref)
695 vmw_resource_unreference(&res);
696
697 return ret;
698}
699
700/**
701 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
702 *
703 * @ctx_res: context the query belongs to
704 *
705 * This function assumes binding_mutex is held.
706 */
707static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
708{
709 struct vmw_private *dev_priv = ctx_res->dev_priv;
710 struct vmw_bo *dx_query_mob;
711 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
712
713 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
714
715 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
716 return 0;
717
718 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
719 if (cmd == NULL)
720 return -ENOMEM;
721
722 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
723 cmd->header.size = sizeof(cmd->body);
724 cmd->body.cid = ctx_res->id;
725 cmd->body.mobid = dx_query_mob->tbo.resource->start;
726 vmw_cmd_commit(dev_priv, sizeof(*cmd));
727
728 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
729
730 return 0;
731}
732
733/**
734 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
735 * contexts.
736 *
737 * @sw_context: Pointer to the software context.
738 *
739 * Rebind context binding points that have been scrubbed because of eviction.
740 */
741static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
742{
743 struct vmw_ctx_validation_info *val;
744 int ret;
745
746 list_for_each_entry(val, &sw_context->ctx_list, head) {
747 ret = vmw_binding_rebind_all(val->cur);
748 if (unlikely(ret != 0)) {
749 if (ret != -ERESTARTSYS)
750 VMW_DEBUG_USER("Failed to rebind context.\n");
751 return ret;
752 }
753
754 ret = vmw_rebind_all_dx_query(val->ctx);
755 if (ret != 0) {
756 VMW_DEBUG_USER("Failed to rebind queries.\n");
757 return ret;
758 }
759 }
760
761 return 0;
762}
763
764/**
765 * vmw_view_bindings_add - Add an array of view bindings to a context binding
766 * state tracker.
767 *
768 * @sw_context: The execbuf state used for this command.
769 * @view_type: View type for the bindings.
770 * @binding_type: Binding type for the bindings.
771 * @shader_slot: The shader slot to user for the bindings.
772 * @view_ids: Array of view ids to be bound.
773 * @num_views: Number of view ids in @view_ids.
774 * @first_slot: The binding slot to be used for the first view id in @view_ids.
775 */
776static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
777 enum vmw_view_type view_type,
778 enum vmw_ctx_binding_type binding_type,
779 uint32 shader_slot,
780 uint32 view_ids[], u32 num_views,
781 u32 first_slot)
782{
783 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
784 u32 i;
785
786 if (!ctx_node)
787 return -EINVAL;
788
789 for (i = 0; i < num_views; ++i) {
790 struct vmw_ctx_bindinfo_view binding;
791 struct vmw_resource *view = NULL;
792
793 if (view_ids[i] != SVGA3D_INVALID_ID) {
794 view = vmw_view_id_val_add(sw_context, view_type,
795 view_ids[i]);
796 if (IS_ERR(view)) {
797 VMW_DEBUG_USER("View not found.\n");
798 return PTR_ERR(view);
799 }
800 }
801 binding.bi.ctx = ctx_node->ctx;
802 binding.bi.res = view;
803 binding.bi.bt = binding_type;
804 binding.shader_slot = shader_slot;
805 binding.slot = first_slot + i;
806 vmw_binding_add(ctx_node->staged, &binding.bi,
807 shader_slot, binding.slot);
808 }
809
810 return 0;
811}
812
813/**
814 * vmw_cmd_cid_check - Check a command header for valid context information.
815 *
816 * @dev_priv: Pointer to a device private structure.
817 * @sw_context: Pointer to the software context.
818 * @header: A command header with an embedded user-space context handle.
819 *
820 * Convenience function: Call vmw_cmd_res_check with the user-space context
821 * handle embedded in @header.
822 */
823static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
824 struct vmw_sw_context *sw_context,
825 SVGA3dCmdHeader *header)
826{
827 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
828 container_of(header, typeof(*cmd), header);
829
830 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
831 VMW_RES_DIRTY_SET, user_context_converter,
832 &cmd->body, NULL);
833}
834
835/**
836 * vmw_execbuf_info_from_res - Get the private validation metadata for a
837 * recently validated resource
838 *
839 * @sw_context: Pointer to the command submission context
840 * @res: The resource
841 *
842 * The resource pointed to by @res needs to be present in the command submission
843 * context's resource cache and hence the last resource of that type to be
844 * processed by the validation code.
845 *
846 * Return: a pointer to the private metadata of the resource, or NULL if it
847 * wasn't found
848 */
849static struct vmw_ctx_validation_info *
850vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
851 struct vmw_resource *res)
852{
853 struct vmw_res_cache_entry *rcache =
854 &sw_context->res_cache[vmw_res_type(res)];
855
856 if (rcache->valid && rcache->res == res)
857 return rcache->private;
858
859 WARN_ON_ONCE(true);
860 return NULL;
861}
862
863static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
864 struct vmw_sw_context *sw_context,
865 SVGA3dCmdHeader *header)
866{
867 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
868 struct vmw_resource *ctx;
869 struct vmw_resource *res;
870 int ret;
871
872 cmd = container_of(header, typeof(*cmd), header);
873
874 if (cmd->body.type >= SVGA3D_RT_MAX) {
875 VMW_DEBUG_USER("Illegal render target type %u.\n",
876 (unsigned int) cmd->body.type);
877 return -EINVAL;
878 }
879
880 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
881 VMW_RES_DIRTY_SET, user_context_converter,
882 &cmd->body.cid, &ctx);
883 if (unlikely(ret != 0))
884 return ret;
885
886 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
887 VMW_RES_DIRTY_SET, user_surface_converter,
888 &cmd->body.target.sid, &res);
889 if (unlikely(ret))
890 return ret;
891
892 if (dev_priv->has_mob) {
893 struct vmw_ctx_bindinfo_view binding;
894 struct vmw_ctx_validation_info *node;
895
896 node = vmw_execbuf_info_from_res(sw_context, ctx);
897 if (!node)
898 return -EINVAL;
899
900 binding.bi.ctx = ctx;
901 binding.bi.res = res;
902 binding.bi.bt = vmw_ctx_binding_rt;
903 binding.slot = cmd->body.type;
904 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
905 }
906
907 return 0;
908}
909
910static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
911 struct vmw_sw_context *sw_context,
912 SVGA3dCmdHeader *header)
913{
914 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
915 int ret;
916
917 cmd = container_of(header, typeof(*cmd), header);
918
919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
920 VMW_RES_DIRTY_NONE, user_surface_converter,
921 &cmd->body.src.sid, NULL);
922 if (ret)
923 return ret;
924
925 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
926 VMW_RES_DIRTY_SET, user_surface_converter,
927 &cmd->body.dest.sid, NULL);
928}
929
930static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
931 struct vmw_sw_context *sw_context,
932 SVGA3dCmdHeader *header)
933{
934 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
935 int ret;
936
937 cmd = container_of(header, typeof(*cmd), header);
938 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939 VMW_RES_DIRTY_NONE, user_surface_converter,
940 &cmd->body.src, NULL);
941 if (ret != 0)
942 return ret;
943
944 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
945 VMW_RES_DIRTY_SET, user_surface_converter,
946 &cmd->body.dest, NULL);
947}
948
949static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
950 struct vmw_sw_context *sw_context,
951 SVGA3dCmdHeader *header)
952{
953 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
954 int ret;
955
956 cmd = container_of(header, typeof(*cmd), header);
957 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958 VMW_RES_DIRTY_NONE, user_surface_converter,
959 &cmd->body.srcSid, NULL);
960 if (ret != 0)
961 return ret;
962
963 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
964 VMW_RES_DIRTY_SET, user_surface_converter,
965 &cmd->body.dstSid, NULL);
966}
967
968static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
969 struct vmw_sw_context *sw_context,
970 SVGA3dCmdHeader *header)
971{
972 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
973 int ret;
974
975 cmd = container_of(header, typeof(*cmd), header);
976 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977 VMW_RES_DIRTY_NONE, user_surface_converter,
978 &cmd->body.src.sid, NULL);
979 if (unlikely(ret != 0))
980 return ret;
981
982 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
983 VMW_RES_DIRTY_SET, user_surface_converter,
984 &cmd->body.dest.sid, NULL);
985}
986
987static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
988 struct vmw_sw_context *sw_context,
989 SVGA3dCmdHeader *header)
990{
991 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
992 container_of(header, typeof(*cmd), header);
993
994 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
995 VMW_RES_DIRTY_NONE, user_surface_converter,
996 &cmd->body.srcImage.sid, NULL);
997}
998
999static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1000 struct vmw_sw_context *sw_context,
1001 SVGA3dCmdHeader *header)
1002{
1003 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1004 container_of(header, typeof(*cmd), header);
1005
1006 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1007 VMW_RES_DIRTY_NONE, user_surface_converter,
1008 &cmd->body.sid, NULL);
1009}
1010
1011/**
1012 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1013 *
1014 * @dev_priv: The device private structure.
1015 * @new_query_bo: The new buffer holding query results.
1016 * @sw_context: The software context used for this command submission.
1017 *
1018 * This function checks whether @new_query_bo is suitable for holding query
1019 * results, and if another buffer currently is pinned for query results. If so,
1020 * the function prepares the state of @sw_context for switching pinned buffers
1021 * after successful submission of the current command batch.
1022 */
1023static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024 struct vmw_bo *new_query_bo,
1025 struct vmw_sw_context *sw_context)
1026{
1027 struct vmw_res_cache_entry *ctx_entry =
1028 &sw_context->res_cache[vmw_res_context];
1029 int ret;
1030
1031 BUG_ON(!ctx_entry->valid);
1032 sw_context->last_query_ctx = ctx_entry->res;
1033
1034 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1035
1036 if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1037 VMW_DEBUG_USER("Query buffer too large.\n");
1038 return -EINVAL;
1039 }
1040
1041 if (unlikely(sw_context->cur_query_bo != NULL)) {
1042 sw_context->needs_post_query_barrier = true;
1043 vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1044 ret = vmw_validation_add_bo(sw_context->ctx,
1045 sw_context->cur_query_bo);
1046 if (unlikely(ret != 0))
1047 return ret;
1048 }
1049 sw_context->cur_query_bo = new_query_bo;
1050
1051 vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1052 ret = vmw_validation_add_bo(sw_context->ctx,
1053 dev_priv->dummy_query_bo);
1054 if (unlikely(ret != 0))
1055 return ret;
1056 }
1057
1058 return 0;
1059}
1060
1061/**
1062 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1063 *
1064 * @dev_priv: The device private structure.
1065 * @sw_context: The software context used for this command submission batch.
1066 *
1067 * This function will check if we're switching query buffers, and will then,
1068 * issue a dummy occlusion query wait used as a query barrier. When the fence
1069 * object following that query wait has signaled, we are sure that all preceding
1070 * queries have finished, and the old query buffer can be unpinned. However,
1071 * since both the new query buffer and the old one are fenced with that fence,
1072 * we can do an asynchronus unpin now, and be sure that the old query buffer
1073 * won't be moved until the fence has signaled.
1074 *
1075 * As mentioned above, both the new - and old query buffers need to be fenced
1076 * using a sequence emitted *after* calling this function.
1077 */
1078static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1079 struct vmw_sw_context *sw_context)
1080{
1081 /*
1082 * The validate list should still hold references to all
1083 * contexts here.
1084 */
1085 if (sw_context->needs_post_query_barrier) {
1086 struct vmw_res_cache_entry *ctx_entry =
1087 &sw_context->res_cache[vmw_res_context];
1088 struct vmw_resource *ctx;
1089 int ret;
1090
1091 BUG_ON(!ctx_entry->valid);
1092 ctx = ctx_entry->res;
1093
1094 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1095
1096 if (unlikely(ret != 0))
1097 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1098 }
1099
1100 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1101 if (dev_priv->pinned_bo) {
1102 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1103 vmw_bo_unreference(&dev_priv->pinned_bo);
1104 }
1105
1106 if (!sw_context->needs_post_query_barrier) {
1107 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1108
1109 /*
1110 * We pin also the dummy_query_bo buffer so that we
1111 * don't need to validate it when emitting dummy queries
1112 * in context destroy paths.
1113 */
1114 if (!dev_priv->dummy_query_bo_pinned) {
1115 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1116 true);
1117 dev_priv->dummy_query_bo_pinned = true;
1118 }
1119
1120 BUG_ON(sw_context->last_query_ctx == NULL);
1121 dev_priv->query_cid = sw_context->last_query_ctx->id;
1122 dev_priv->query_cid_valid = true;
1123 dev_priv->pinned_bo =
1124 vmw_bo_reference(sw_context->cur_query_bo);
1125 }
1126 }
1127}
1128
1129/**
1130 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1131 * to a MOB id.
1132 *
1133 * @dev_priv: Pointer to a device private structure.
1134 * @sw_context: The software context used for this command batch validation.
1135 * @id: Pointer to the user-space handle to be translated.
1136 * @vmw_bo_p: Points to a location that, on successful return will carry a
1137 * non-reference-counted pointer to the buffer object identified by the
1138 * user-space handle in @id.
1139 *
1140 * This function saves information needed to translate a user-space buffer
1141 * handle to a MOB id. The translation does not take place immediately, but
1142 * during a call to vmw_apply_relocations().
1143 *
1144 * This function builds a relocation list and a list of buffers to validate. The
1145 * former needs to be freed using either vmw_apply_relocations() or
1146 * vmw_free_relocations(). The latter needs to be freed using
1147 * vmw_clear_validations.
1148 */
1149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1150 struct vmw_sw_context *sw_context,
1151 SVGAMobId *id,
1152 struct vmw_bo **vmw_bo_p)
1153{
1154 struct vmw_bo *vmw_bo, *tmp_bo;
1155 uint32_t handle = *id;
1156 struct vmw_relocation *reloc;
1157 int ret;
1158
1159 vmw_validation_preload_bo(sw_context->ctx);
1160 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1161 if (ret != 0) {
1162 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1163 return PTR_ERR(vmw_bo);
1164 }
1165 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1166 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1167 tmp_bo = vmw_bo;
1168 vmw_user_bo_unref(&tmp_bo);
1169 if (unlikely(ret != 0))
1170 return ret;
1171
1172 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1173 if (!reloc)
1174 return -ENOMEM;
1175
1176 reloc->mob_loc = id;
1177 reloc->vbo = vmw_bo;
1178
1179 *vmw_bo_p = vmw_bo;
1180 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1181
1182 return 0;
1183}
1184
1185/**
1186 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1187 * to a valid SVGAGuestPtr
1188 *
1189 * @dev_priv: Pointer to a device private structure.
1190 * @sw_context: The software context used for this command batch validation.
1191 * @ptr: Pointer to the user-space handle to be translated.
1192 * @vmw_bo_p: Points to a location that, on successful return will carry a
1193 * non-reference-counted pointer to the DMA buffer identified by the user-space
1194 * handle in @id.
1195 *
1196 * This function saves information needed to translate a user-space buffer
1197 * handle to a valid SVGAGuestPtr. The translation does not take place
1198 * immediately, but during a call to vmw_apply_relocations().
1199 *
1200 * This function builds a relocation list and a list of buffers to validate.
1201 * The former needs to be freed using either vmw_apply_relocations() or
1202 * vmw_free_relocations(). The latter needs to be freed using
1203 * vmw_clear_validations.
1204 */
1205static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1206 struct vmw_sw_context *sw_context,
1207 SVGAGuestPtr *ptr,
1208 struct vmw_bo **vmw_bo_p)
1209{
1210 struct vmw_bo *vmw_bo, *tmp_bo;
1211 uint32_t handle = ptr->gmrId;
1212 struct vmw_relocation *reloc;
1213 int ret;
1214
1215 vmw_validation_preload_bo(sw_context->ctx);
1216 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1217 if (ret != 0) {
1218 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1219 return PTR_ERR(vmw_bo);
1220 }
1221 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1222 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1223 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1224 tmp_bo = vmw_bo;
1225 vmw_user_bo_unref(&tmp_bo);
1226 if (unlikely(ret != 0))
1227 return ret;
1228
1229 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1230 if (!reloc)
1231 return -ENOMEM;
1232
1233 reloc->location = ptr;
1234 reloc->vbo = vmw_bo;
1235 *vmw_bo_p = vmw_bo;
1236 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1237
1238 return 0;
1239}
1240
1241/**
1242 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1243 *
1244 * @dev_priv: Pointer to a device private struct.
1245 * @sw_context: The software context used for this command submission.
1246 * @header: Pointer to the command header in the command stream.
1247 *
1248 * This function adds the new query into the query COTABLE
1249 */
1250static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1251 struct vmw_sw_context *sw_context,
1252 SVGA3dCmdHeader *header)
1253{
1254 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1255 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1256 struct vmw_resource *cotable_res;
1257 int ret;
1258
1259 if (!ctx_node)
1260 return -EINVAL;
1261
1262 cmd = container_of(header, typeof(*cmd), header);
1263
1264 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1265 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1266 return -EINVAL;
1267
1268 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1269 if (IS_ERR_OR_NULL(cotable_res))
1270 return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
1271 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1272
1273 return ret;
1274}
1275
1276/**
1277 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1278 *
1279 * @dev_priv: Pointer to a device private struct.
1280 * @sw_context: The software context used for this command submission.
1281 * @header: Pointer to the command header in the command stream.
1282 *
1283 * The query bind operation will eventually associate the query ID with its
1284 * backing MOB. In this function, we take the user mode MOB ID and use
1285 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1286 */
1287static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1288 struct vmw_sw_context *sw_context,
1289 SVGA3dCmdHeader *header)
1290{
1291 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1292 struct vmw_bo *vmw_bo;
1293 int ret;
1294
1295 cmd = container_of(header, typeof(*cmd), header);
1296
1297 /*
1298 * Look up the buffer pointed to by q.mobid, put it on the relocation
1299 * list so its kernel mode MOB ID can be filled in later
1300 */
1301 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1302 &vmw_bo);
1303
1304 if (ret != 0)
1305 return ret;
1306
1307 sw_context->dx_query_mob = vmw_bo;
1308 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1309 return 0;
1310}
1311
1312/**
1313 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1314 *
1315 * @dev_priv: Pointer to a device private struct.
1316 * @sw_context: The software context used for this command submission.
1317 * @header: Pointer to the command header in the command stream.
1318 */
1319static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1320 struct vmw_sw_context *sw_context,
1321 SVGA3dCmdHeader *header)
1322{
1323 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1324 container_of(header, typeof(*cmd), header);
1325
1326 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1327 VMW_RES_DIRTY_SET, user_context_converter,
1328 &cmd->body.cid, NULL);
1329}
1330
1331/**
1332 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1333 *
1334 * @dev_priv: Pointer to a device private struct.
1335 * @sw_context: The software context used for this command submission.
1336 * @header: Pointer to the command header in the command stream.
1337 */
1338static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1339 struct vmw_sw_context *sw_context,
1340 SVGA3dCmdHeader *header)
1341{
1342 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1343 container_of(header, typeof(*cmd), header);
1344
1345 if (unlikely(dev_priv->has_mob)) {
1346 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1347
1348 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1349
1350 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1351 gb_cmd.header.size = cmd->header.size;
1352 gb_cmd.body.cid = cmd->body.cid;
1353 gb_cmd.body.type = cmd->body.type;
1354
1355 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1356 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1357 }
1358
1359 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1360 VMW_RES_DIRTY_SET, user_context_converter,
1361 &cmd->body.cid, NULL);
1362}
1363
1364/**
1365 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1366 *
1367 * @dev_priv: Pointer to a device private struct.
1368 * @sw_context: The software context used for this command submission.
1369 * @header: Pointer to the command header in the command stream.
1370 */
1371static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1372 struct vmw_sw_context *sw_context,
1373 SVGA3dCmdHeader *header)
1374{
1375 struct vmw_bo *vmw_bo;
1376 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1377 int ret;
1378
1379 cmd = container_of(header, typeof(*cmd), header);
1380 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1381 if (unlikely(ret != 0))
1382 return ret;
1383
1384 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1385 &vmw_bo);
1386 if (unlikely(ret != 0))
1387 return ret;
1388
1389 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1390
1391 return ret;
1392}
1393
1394/**
1395 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1396 *
1397 * @dev_priv: Pointer to a device private struct.
1398 * @sw_context: The software context used for this command submission.
1399 * @header: Pointer to the command header in the command stream.
1400 */
1401static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1402 struct vmw_sw_context *sw_context,
1403 SVGA3dCmdHeader *header)
1404{
1405 struct vmw_bo *vmw_bo;
1406 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1407 int ret;
1408
1409 cmd = container_of(header, typeof(*cmd), header);
1410 if (dev_priv->has_mob) {
1411 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1412
1413 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1414
1415 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1416 gb_cmd.header.size = cmd->header.size;
1417 gb_cmd.body.cid = cmd->body.cid;
1418 gb_cmd.body.type = cmd->body.type;
1419 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1420 gb_cmd.body.offset = cmd->body.guestResult.offset;
1421
1422 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1423 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1424 }
1425
1426 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1427 if (unlikely(ret != 0))
1428 return ret;
1429
1430 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1431 &cmd->body.guestResult, &vmw_bo);
1432 if (unlikely(ret != 0))
1433 return ret;
1434
1435 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1436
1437 return ret;
1438}
1439
1440/**
1441 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1442 *
1443 * @dev_priv: Pointer to a device private struct.
1444 * @sw_context: The software context used for this command submission.
1445 * @header: Pointer to the command header in the command stream.
1446 */
1447static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1448 struct vmw_sw_context *sw_context,
1449 SVGA3dCmdHeader *header)
1450{
1451 struct vmw_bo *vmw_bo;
1452 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1453 int ret;
1454
1455 cmd = container_of(header, typeof(*cmd), header);
1456 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1457 if (unlikely(ret != 0))
1458 return ret;
1459
1460 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1461 &vmw_bo);
1462 if (unlikely(ret != 0))
1463 return ret;
1464
1465 return 0;
1466}
1467
1468/**
1469 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1470 *
1471 * @dev_priv: Pointer to a device private struct.
1472 * @sw_context: The software context used for this command submission.
1473 * @header: Pointer to the command header in the command stream.
1474 */
1475static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1476 struct vmw_sw_context *sw_context,
1477 SVGA3dCmdHeader *header)
1478{
1479 struct vmw_bo *vmw_bo;
1480 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1481 int ret;
1482
1483 cmd = container_of(header, typeof(*cmd), header);
1484 if (dev_priv->has_mob) {
1485 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1486
1487 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1488
1489 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1490 gb_cmd.header.size = cmd->header.size;
1491 gb_cmd.body.cid = cmd->body.cid;
1492 gb_cmd.body.type = cmd->body.type;
1493 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1494 gb_cmd.body.offset = cmd->body.guestResult.offset;
1495
1496 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1497 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1498 }
1499
1500 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1501 if (unlikely(ret != 0))
1502 return ret;
1503
1504 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1505 &cmd->body.guestResult, &vmw_bo);
1506 if (unlikely(ret != 0))
1507 return ret;
1508
1509 return 0;
1510}
1511
1512static int vmw_cmd_dma(struct vmw_private *dev_priv,
1513 struct vmw_sw_context *sw_context,
1514 SVGA3dCmdHeader *header)
1515{
1516 struct vmw_bo *vmw_bo = NULL;
1517 struct vmw_surface *srf = NULL;
1518 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1519 int ret;
1520 SVGA3dCmdSurfaceDMASuffix *suffix;
1521 uint32_t bo_size;
1522 bool dirty;
1523
1524 cmd = container_of(header, typeof(*cmd), header);
1525 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1526 header->size - sizeof(*suffix));
1527
1528 /* Make sure device and verifier stays in sync. */
1529 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1530 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1531 return -EINVAL;
1532 }
1533
1534 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1535 &cmd->body.guest.ptr, &vmw_bo);
1536 if (unlikely(ret != 0))
1537 return ret;
1538
1539 /* Make sure DMA doesn't cross BO boundaries. */
1540 bo_size = vmw_bo->tbo.base.size;
1541 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1542 VMW_DEBUG_USER("Invalid DMA offset.\n");
1543 return -EINVAL;
1544 }
1545
1546 bo_size -= cmd->body.guest.ptr.offset;
1547 if (unlikely(suffix->maximumOffset > bo_size))
1548 suffix->maximumOffset = bo_size;
1549
1550 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1551 VMW_RES_DIRTY_SET : 0;
1552 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1553 dirty, user_surface_converter,
1554 &cmd->body.host.sid, NULL);
1555 if (unlikely(ret != 0)) {
1556 if (unlikely(ret != -ERESTARTSYS))
1557 VMW_DEBUG_USER("could not find surface for DMA.\n");
1558 return ret;
1559 }
1560
1561 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1562
1563 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1564
1565 return 0;
1566}
1567
1568static int vmw_cmd_draw(struct vmw_private *dev_priv,
1569 struct vmw_sw_context *sw_context,
1570 SVGA3dCmdHeader *header)
1571{
1572 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1573 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1574 (unsigned long)header + sizeof(*cmd));
1575 SVGA3dPrimitiveRange *range;
1576 uint32_t i;
1577 uint32_t maxnum;
1578 int ret;
1579
1580 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1581 if (unlikely(ret != 0))
1582 return ret;
1583
1584 cmd = container_of(header, typeof(*cmd), header);
1585 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1586
1587 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1588 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1589 return -EINVAL;
1590 }
1591
1592 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1593 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1594 VMW_RES_DIRTY_NONE,
1595 user_surface_converter,
1596 &decl->array.surfaceId, NULL);
1597 if (unlikely(ret != 0))
1598 return ret;
1599 }
1600
1601 maxnum = (header->size - sizeof(cmd->body) -
1602 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1603 if (unlikely(cmd->body.numRanges > maxnum)) {
1604 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1605 return -EINVAL;
1606 }
1607
1608 range = (SVGA3dPrimitiveRange *) decl;
1609 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1610 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1611 VMW_RES_DIRTY_NONE,
1612 user_surface_converter,
1613 &range->indexArray.surfaceId, NULL);
1614 if (unlikely(ret != 0))
1615 return ret;
1616 }
1617 return 0;
1618}
1619
1620static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1621 struct vmw_sw_context *sw_context,
1622 SVGA3dCmdHeader *header)
1623{
1624 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1625 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1626 ((unsigned long) header + header->size + sizeof(*header));
1627 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1628 ((unsigned long) header + sizeof(*cmd));
1629 struct vmw_resource *ctx;
1630 struct vmw_resource *res;
1631 int ret;
1632
1633 cmd = container_of(header, typeof(*cmd), header);
1634
1635 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1636 VMW_RES_DIRTY_SET, user_context_converter,
1637 &cmd->body.cid, &ctx);
1638 if (unlikely(ret != 0))
1639 return ret;
1640
1641 for (; cur_state < last_state; ++cur_state) {
1642 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1643 continue;
1644
1645 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1646 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1647 (unsigned int) cur_state->stage);
1648 return -EINVAL;
1649 }
1650
1651 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1652 VMW_RES_DIRTY_NONE,
1653 user_surface_converter,
1654 &cur_state->value, &res);
1655 if (unlikely(ret != 0))
1656 return ret;
1657
1658 if (dev_priv->has_mob) {
1659 struct vmw_ctx_bindinfo_tex binding;
1660 struct vmw_ctx_validation_info *node;
1661
1662 node = vmw_execbuf_info_from_res(sw_context, ctx);
1663 if (!node)
1664 return -EINVAL;
1665
1666 binding.bi.ctx = ctx;
1667 binding.bi.res = res;
1668 binding.bi.bt = vmw_ctx_binding_tex;
1669 binding.texture_stage = cur_state->stage;
1670 vmw_binding_add(node->staged, &binding.bi, 0,
1671 binding.texture_stage);
1672 }
1673 }
1674
1675 return 0;
1676}
1677
1678static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1679 struct vmw_sw_context *sw_context,
1680 void *buf)
1681{
1682 struct vmw_bo *vmw_bo;
1683
1684 struct {
1685 uint32_t header;
1686 SVGAFifoCmdDefineGMRFB body;
1687 } *cmd = buf;
1688
1689 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1690 &vmw_bo);
1691}
1692
1693/**
1694 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1695 * switching
1696 *
1697 * @dev_priv: Pointer to a device private struct.
1698 * @sw_context: The software context being used for this batch.
1699 * @res: Pointer to the resource.
1700 * @buf_id: Pointer to the user-space backup buffer handle in the command
1701 * stream.
1702 * @backup_offset: Offset of backup into MOB.
1703 *
1704 * This function prepares for registering a switch of backup buffers in the
1705 * resource metadata just prior to unreserving. It's basically a wrapper around
1706 * vmw_cmd_res_switch_backup with a different interface.
1707 */
1708static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1709 struct vmw_sw_context *sw_context,
1710 struct vmw_resource *res, uint32_t *buf_id,
1711 unsigned long backup_offset)
1712{
1713 struct vmw_bo *vbo;
1714 void *info;
1715 int ret;
1716
1717 info = vmw_execbuf_info_from_res(sw_context, res);
1718 if (!info)
1719 return -EINVAL;
1720
1721 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1722 if (ret)
1723 return ret;
1724
1725 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1726 backup_offset);
1727 return 0;
1728}
1729
1730/**
1731 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1732 *
1733 * @dev_priv: Pointer to a device private struct.
1734 * @sw_context: The software context being used for this batch.
1735 * @res_type: The resource type.
1736 * @converter: Information about user-space binding for this resource type.
1737 * @res_id: Pointer to the user-space resource handle in the command stream.
1738 * @buf_id: Pointer to the user-space backup buffer handle in the command
1739 * stream.
1740 * @backup_offset: Offset of backup into MOB.
1741 *
1742 * This function prepares for registering a switch of backup buffers in the
1743 * resource metadata just prior to unreserving. It's basically a wrapper around
1744 * vmw_cmd_res_switch_backup with a different interface.
1745 */
1746static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1747 struct vmw_sw_context *sw_context,
1748 enum vmw_res_type res_type,
1749 const struct vmw_user_resource_conv
1750 *converter, uint32_t *res_id, uint32_t *buf_id,
1751 unsigned long backup_offset)
1752{
1753 struct vmw_resource *res;
1754 int ret;
1755
1756 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1757 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1758 if (ret)
1759 return ret;
1760
1761 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1762 backup_offset);
1763}
1764
1765/**
1766 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1767 *
1768 * @dev_priv: Pointer to a device private struct.
1769 * @sw_context: The software context being used for this batch.
1770 * @header: Pointer to the command header in the command stream.
1771 */
1772static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1773 struct vmw_sw_context *sw_context,
1774 SVGA3dCmdHeader *header)
1775{
1776 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1777 container_of(header, typeof(*cmd), header);
1778
1779 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1780 user_surface_converter, &cmd->body.sid,
1781 &cmd->body.mobid, 0);
1782}
1783
1784/**
1785 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1786 *
1787 * @dev_priv: Pointer to a device private struct.
1788 * @sw_context: The software context being used for this batch.
1789 * @header: Pointer to the command header in the command stream.
1790 */
1791static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1792 struct vmw_sw_context *sw_context,
1793 SVGA3dCmdHeader *header)
1794{
1795 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1796 container_of(header, typeof(*cmd), header);
1797
1798 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1799 VMW_RES_DIRTY_NONE, user_surface_converter,
1800 &cmd->body.image.sid, NULL);
1801}
1802
1803/**
1804 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1805 *
1806 * @dev_priv: Pointer to a device private struct.
1807 * @sw_context: The software context being used for this batch.
1808 * @header: Pointer to the command header in the command stream.
1809 */
1810static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1811 struct vmw_sw_context *sw_context,
1812 SVGA3dCmdHeader *header)
1813{
1814 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1815 container_of(header, typeof(*cmd), header);
1816
1817 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1818 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1819 &cmd->body.sid, NULL);
1820}
1821
1822/**
1823 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1824 *
1825 * @dev_priv: Pointer to a device private struct.
1826 * @sw_context: The software context being used for this batch.
1827 * @header: Pointer to the command header in the command stream.
1828 */
1829static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1830 struct vmw_sw_context *sw_context,
1831 SVGA3dCmdHeader *header)
1832{
1833 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1834 container_of(header, typeof(*cmd), header);
1835
1836 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1837 VMW_RES_DIRTY_NONE, user_surface_converter,
1838 &cmd->body.image.sid, NULL);
1839}
1840
1841/**
1842 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1843 * command
1844 *
1845 * @dev_priv: Pointer to a device private struct.
1846 * @sw_context: The software context being used for this batch.
1847 * @header: Pointer to the command header in the command stream.
1848 */
1849static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1850 struct vmw_sw_context *sw_context,
1851 SVGA3dCmdHeader *header)
1852{
1853 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1854 container_of(header, typeof(*cmd), header);
1855
1856 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1857 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1858 &cmd->body.sid, NULL);
1859}
1860
1861/**
1862 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1863 * command
1864 *
1865 * @dev_priv: Pointer to a device private struct.
1866 * @sw_context: The software context being used for this batch.
1867 * @header: Pointer to the command header in the command stream.
1868 */
1869static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1870 struct vmw_sw_context *sw_context,
1871 SVGA3dCmdHeader *header)
1872{
1873 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1874 container_of(header, typeof(*cmd), header);
1875
1876 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1877 VMW_RES_DIRTY_NONE, user_surface_converter,
1878 &cmd->body.image.sid, NULL);
1879}
1880
1881/**
1882 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1883 * command
1884 *
1885 * @dev_priv: Pointer to a device private struct.
1886 * @sw_context: The software context being used for this batch.
1887 * @header: Pointer to the command header in the command stream.
1888 */
1889static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1890 struct vmw_sw_context *sw_context,
1891 SVGA3dCmdHeader *header)
1892{
1893 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1894 container_of(header, typeof(*cmd), header);
1895
1896 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1897 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1898 &cmd->body.sid, NULL);
1899}
1900
1901/**
1902 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1903 *
1904 * @dev_priv: Pointer to a device private struct.
1905 * @sw_context: The software context being used for this batch.
1906 * @header: Pointer to the command header in the command stream.
1907 */
1908static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1909 struct vmw_sw_context *sw_context,
1910 SVGA3dCmdHeader *header)
1911{
1912 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1913 int ret;
1914 size_t size;
1915 struct vmw_resource *ctx;
1916
1917 cmd = container_of(header, typeof(*cmd), header);
1918
1919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1920 VMW_RES_DIRTY_SET, user_context_converter,
1921 &cmd->body.cid, &ctx);
1922 if (unlikely(ret != 0))
1923 return ret;
1924
1925 if (unlikely(!dev_priv->has_mob))
1926 return 0;
1927
1928 size = cmd->header.size - sizeof(cmd->body);
1929 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1930 cmd->body.shid, cmd + 1, cmd->body.type,
1931 size, &sw_context->staged_cmd_res);
1932 if (unlikely(ret != 0))
1933 return ret;
1934
1935 return vmw_resource_relocation_add(sw_context, NULL,
1936 vmw_ptr_diff(sw_context->buf_start,
1937 &cmd->header.id),
1938 vmw_res_rel_nop);
1939}
1940
1941/**
1942 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1943 *
1944 * @dev_priv: Pointer to a device private struct.
1945 * @sw_context: The software context being used for this batch.
1946 * @header: Pointer to the command header in the command stream.
1947 */
1948static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1949 struct vmw_sw_context *sw_context,
1950 SVGA3dCmdHeader *header)
1951{
1952 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1953 int ret;
1954 struct vmw_resource *ctx;
1955
1956 cmd = container_of(header, typeof(*cmd), header);
1957
1958 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1959 VMW_RES_DIRTY_SET, user_context_converter,
1960 &cmd->body.cid, &ctx);
1961 if (unlikely(ret != 0))
1962 return ret;
1963
1964 if (unlikely(!dev_priv->has_mob))
1965 return 0;
1966
1967 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1968 cmd->body.type, &sw_context->staged_cmd_res);
1969 if (unlikely(ret != 0))
1970 return ret;
1971
1972 return vmw_resource_relocation_add(sw_context, NULL,
1973 vmw_ptr_diff(sw_context->buf_start,
1974 &cmd->header.id),
1975 vmw_res_rel_nop);
1976}
1977
1978/**
1979 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1980 *
1981 * @dev_priv: Pointer to a device private struct.
1982 * @sw_context: The software context being used for this batch.
1983 * @header: Pointer to the command header in the command stream.
1984 */
1985static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1986 struct vmw_sw_context *sw_context,
1987 SVGA3dCmdHeader *header)
1988{
1989 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1990 struct vmw_ctx_bindinfo_shader binding;
1991 struct vmw_resource *ctx, *res = NULL;
1992 struct vmw_ctx_validation_info *ctx_info;
1993 int ret;
1994
1995 cmd = container_of(header, typeof(*cmd), header);
1996
1997 if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1998 VMW_DEBUG_USER("Illegal shader type %u.\n",
1999 (unsigned int) cmd->body.type);
2000 return -EINVAL;
2001 }
2002
2003 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2004 VMW_RES_DIRTY_SET, user_context_converter,
2005 &cmd->body.cid, &ctx);
2006 if (unlikely(ret != 0))
2007 return ret;
2008
2009 if (!dev_priv->has_mob)
2010 return 0;
2011
2012 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2013 /*
2014 * This is the compat shader path - Per device guest-backed
2015 * shaders, but user-space thinks it's per context host-
2016 * backed shaders.
2017 */
2018 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2019 cmd->body.shid, cmd->body.type);
2020 if (!IS_ERR(res)) {
2021 ret = vmw_execbuf_res_val_add(sw_context, res,
2022 VMW_RES_DIRTY_NONE,
2023 vmw_val_add_flag_noctx);
2024 if (unlikely(ret != 0))
2025 return ret;
2026
2027 ret = vmw_resource_relocation_add
2028 (sw_context, res,
2029 vmw_ptr_diff(sw_context->buf_start,
2030 &cmd->body.shid),
2031 vmw_res_rel_normal);
2032 if (unlikely(ret != 0))
2033 return ret;
2034 }
2035 }
2036
2037 if (IS_ERR_OR_NULL(res)) {
2038 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2039 VMW_RES_DIRTY_NONE,
2040 user_shader_converter, &cmd->body.shid,
2041 &res);
2042 if (unlikely(ret != 0))
2043 return ret;
2044 }
2045
2046 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2047 if (!ctx_info)
2048 return -EINVAL;
2049
2050 binding.bi.ctx = ctx;
2051 binding.bi.res = res;
2052 binding.bi.bt = vmw_ctx_binding_shader;
2053 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2054 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2055
2056 return 0;
2057}
2058
2059/**
2060 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2061 *
2062 * @dev_priv: Pointer to a device private struct.
2063 * @sw_context: The software context being used for this batch.
2064 * @header: Pointer to the command header in the command stream.
2065 */
2066static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2067 struct vmw_sw_context *sw_context,
2068 SVGA3dCmdHeader *header)
2069{
2070 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2071 int ret;
2072
2073 cmd = container_of(header, typeof(*cmd), header);
2074
2075 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2076 VMW_RES_DIRTY_SET, user_context_converter,
2077 &cmd->body.cid, NULL);
2078 if (unlikely(ret != 0))
2079 return ret;
2080
2081 if (dev_priv->has_mob)
2082 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2083
2084 return 0;
2085}
2086
2087/**
2088 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2089 *
2090 * @dev_priv: Pointer to a device private struct.
2091 * @sw_context: The software context being used for this batch.
2092 * @header: Pointer to the command header in the command stream.
2093 */
2094static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2095 struct vmw_sw_context *sw_context,
2096 SVGA3dCmdHeader *header)
2097{
2098 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2099 container_of(header, typeof(*cmd), header);
2100
2101 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2102 user_shader_converter, &cmd->body.shid,
2103 &cmd->body.mobid, cmd->body.offsetInBytes);
2104}
2105
2106/**
2107 * vmw_cmd_dx_set_single_constant_buffer - Validate
2108 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2109 *
2110 * @dev_priv: Pointer to a device private struct.
2111 * @sw_context: The software context being used for this batch.
2112 * @header: Pointer to the command header in the command stream.
2113 */
2114static int
2115vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2116 struct vmw_sw_context *sw_context,
2117 SVGA3dCmdHeader *header)
2118{
2119 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2120
2121 struct vmw_resource *res = NULL;
2122 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2123 struct vmw_ctx_bindinfo_cb binding;
2124 int ret;
2125
2126 if (!ctx_node)
2127 return -EINVAL;
2128
2129 cmd = container_of(header, typeof(*cmd), header);
2130 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2131 VMW_RES_DIRTY_NONE, user_surface_converter,
2132 &cmd->body.sid, &res);
2133 if (unlikely(ret != 0))
2134 return ret;
2135
2136 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2137 cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2138 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2139 (unsigned int) cmd->body.type,
2140 (unsigned int) cmd->body.slot);
2141 return -EINVAL;
2142 }
2143
2144 binding.bi.ctx = ctx_node->ctx;
2145 binding.bi.res = res;
2146 binding.bi.bt = vmw_ctx_binding_cb;
2147 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2148 binding.offset = cmd->body.offsetInBytes;
2149 binding.size = cmd->body.sizeInBytes;
2150 binding.slot = cmd->body.slot;
2151
2152 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2153 binding.slot);
2154
2155 return 0;
2156}
2157
2158/**
2159 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2160 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2161 *
2162 * @dev_priv: Pointer to a device private struct.
2163 * @sw_context: The software context being used for this batch.
2164 * @header: Pointer to the command header in the command stream.
2165 */
2166static int
2167vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2168 struct vmw_sw_context *sw_context,
2169 SVGA3dCmdHeader *header)
2170{
2171 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2172
2173 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2174 u32 shader_slot;
2175
2176 if (!has_sm5_context(dev_priv))
2177 return -EINVAL;
2178
2179 if (!ctx_node)
2180 return -EINVAL;
2181
2182 cmd = container_of(header, typeof(*cmd), header);
2183 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2184 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2185 (unsigned int) cmd->body.slot);
2186 return -EINVAL;
2187 }
2188
2189 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2190 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2191 cmd->body.slot, cmd->body.offsetInBytes);
2192
2193 return 0;
2194}
2195
2196/**
2197 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2198 * command
2199 *
2200 * @dev_priv: Pointer to a device private struct.
2201 * @sw_context: The software context being used for this batch.
2202 * @header: Pointer to the command header in the command stream.
2203 */
2204static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2205 struct vmw_sw_context *sw_context,
2206 SVGA3dCmdHeader *header)
2207{
2208 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2209 container_of(header, typeof(*cmd), header);
2210
2211 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2212 sizeof(SVGA3dShaderResourceViewId);
2213
2214 if ((u64) cmd->body.startView + (u64) num_sr_view >
2215 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2216 !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2217 VMW_DEBUG_USER("Invalid shader binding.\n");
2218 return -EINVAL;
2219 }
2220
2221 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2222 vmw_ctx_binding_sr,
2223 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2224 (void *) &cmd[1], num_sr_view,
2225 cmd->body.startView);
2226}
2227
2228/**
2229 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2230 *
2231 * @dev_priv: Pointer to a device private struct.
2232 * @sw_context: The software context being used for this batch.
2233 * @header: Pointer to the command header in the command stream.
2234 */
2235static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2236 struct vmw_sw_context *sw_context,
2237 SVGA3dCmdHeader *header)
2238{
2239 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2240 struct vmw_resource *res = NULL;
2241 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2242 struct vmw_ctx_bindinfo_shader binding;
2243 int ret = 0;
2244
2245 if (!ctx_node)
2246 return -EINVAL;
2247
2248 cmd = container_of(header, typeof(*cmd), header);
2249
2250 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2251 VMW_DEBUG_USER("Illegal shader type %u.\n",
2252 (unsigned int) cmd->body.type);
2253 return -EINVAL;
2254 }
2255
2256 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2257 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2258 if (IS_ERR(res)) {
2259 VMW_DEBUG_USER("Could not find shader for binding.\n");
2260 return PTR_ERR(res);
2261 }
2262
2263 ret = vmw_execbuf_res_val_add(sw_context, res,
2264 VMW_RES_DIRTY_NONE,
2265 vmw_val_add_flag_noctx);
2266 if (ret)
2267 return ret;
2268 }
2269
2270 binding.bi.ctx = ctx_node->ctx;
2271 binding.bi.res = res;
2272 binding.bi.bt = vmw_ctx_binding_dx_shader;
2273 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2274
2275 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2276
2277 return 0;
2278}
2279
2280/**
2281 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2282 * command
2283 *
2284 * @dev_priv: Pointer to a device private struct.
2285 * @sw_context: The software context being used for this batch.
2286 * @header: Pointer to the command header in the command stream.
2287 */
2288static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2289 struct vmw_sw_context *sw_context,
2290 SVGA3dCmdHeader *header)
2291{
2292 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2293 struct vmw_ctx_bindinfo_vb binding;
2294 struct vmw_resource *res;
2295 struct {
2296 SVGA3dCmdHeader header;
2297 SVGA3dCmdDXSetVertexBuffers body;
2298 SVGA3dVertexBuffer buf[];
2299 } *cmd;
2300 int i, ret, num;
2301
2302 if (!ctx_node)
2303 return -EINVAL;
2304
2305 cmd = container_of(header, typeof(*cmd), header);
2306 num = (cmd->header.size - sizeof(cmd->body)) /
2307 sizeof(SVGA3dVertexBuffer);
2308 if ((u64)num + (u64)cmd->body.startBuffer >
2309 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2310 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2311 return -EINVAL;
2312 }
2313
2314 for (i = 0; i < num; i++) {
2315 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2316 VMW_RES_DIRTY_NONE,
2317 user_surface_converter,
2318 &cmd->buf[i].sid, &res);
2319 if (unlikely(ret != 0))
2320 return ret;
2321
2322 binding.bi.ctx = ctx_node->ctx;
2323 binding.bi.bt = vmw_ctx_binding_vb;
2324 binding.bi.res = res;
2325 binding.offset = cmd->buf[i].offset;
2326 binding.stride = cmd->buf[i].stride;
2327 binding.slot = i + cmd->body.startBuffer;
2328
2329 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2330 }
2331
2332 return 0;
2333}
2334
2335/**
2336 * vmw_cmd_dx_set_index_buffer - Validate
2337 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2338 *
2339 * @dev_priv: Pointer to a device private struct.
2340 * @sw_context: The software context being used for this batch.
2341 * @header: Pointer to the command header in the command stream.
2342 */
2343static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2344 struct vmw_sw_context *sw_context,
2345 SVGA3dCmdHeader *header)
2346{
2347 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2348 struct vmw_ctx_bindinfo_ib binding;
2349 struct vmw_resource *res;
2350 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2351 int ret;
2352
2353 if (!ctx_node)
2354 return -EINVAL;
2355
2356 cmd = container_of(header, typeof(*cmd), header);
2357 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2358 VMW_RES_DIRTY_NONE, user_surface_converter,
2359 &cmd->body.sid, &res);
2360 if (unlikely(ret != 0))
2361 return ret;
2362
2363 binding.bi.ctx = ctx_node->ctx;
2364 binding.bi.res = res;
2365 binding.bi.bt = vmw_ctx_binding_ib;
2366 binding.offset = cmd->body.offset;
2367 binding.format = cmd->body.format;
2368
2369 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2370
2371 return 0;
2372}
2373
2374/**
2375 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2376 * command
2377 *
2378 * @dev_priv: Pointer to a device private struct.
2379 * @sw_context: The software context being used for this batch.
2380 * @header: Pointer to the command header in the command stream.
2381 */
2382static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2383 struct vmw_sw_context *sw_context,
2384 SVGA3dCmdHeader *header)
2385{
2386 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2387 container_of(header, typeof(*cmd), header);
2388 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2389 sizeof(SVGA3dRenderTargetViewId);
2390 int ret;
2391
2392 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2393 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2394 return -EINVAL;
2395 }
2396
2397 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2398 0, &cmd->body.depthStencilViewId, 1, 0);
2399 if (ret)
2400 return ret;
2401
2402 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2403 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2404 num_rt_view, 0);
2405}
2406
2407/**
2408 * vmw_cmd_dx_clear_rendertarget_view - Validate
2409 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2410 *
2411 * @dev_priv: Pointer to a device private struct.
2412 * @sw_context: The software context being used for this batch.
2413 * @header: Pointer to the command header in the command stream.
2414 */
2415static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2416 struct vmw_sw_context *sw_context,
2417 SVGA3dCmdHeader *header)
2418{
2419 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2420 container_of(header, typeof(*cmd), header);
2421 struct vmw_resource *ret;
2422
2423 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2424 cmd->body.renderTargetViewId);
2425
2426 return PTR_ERR_OR_ZERO(ret);
2427}
2428
2429/**
2430 * vmw_cmd_dx_clear_depthstencil_view - Validate
2431 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2432 *
2433 * @dev_priv: Pointer to a device private struct.
2434 * @sw_context: The software context being used for this batch.
2435 * @header: Pointer to the command header in the command stream.
2436 */
2437static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2438 struct vmw_sw_context *sw_context,
2439 SVGA3dCmdHeader *header)
2440{
2441 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2442 container_of(header, typeof(*cmd), header);
2443 struct vmw_resource *ret;
2444
2445 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2446 cmd->body.depthStencilViewId);
2447
2448 return PTR_ERR_OR_ZERO(ret);
2449}
2450
2451static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2452 struct vmw_sw_context *sw_context,
2453 SVGA3dCmdHeader *header)
2454{
2455 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2456 struct vmw_resource *srf;
2457 struct vmw_resource *res;
2458 enum vmw_view_type view_type;
2459 int ret;
2460 /*
2461 * This is based on the fact that all affected define commands have the
2462 * same initial command body layout.
2463 */
2464 struct {
2465 SVGA3dCmdHeader header;
2466 uint32 defined_id;
2467 uint32 sid;
2468 } *cmd;
2469
2470 if (!ctx_node)
2471 return -EINVAL;
2472
2473 view_type = vmw_view_cmd_to_type(header->id);
2474 if (view_type == vmw_view_max)
2475 return -EINVAL;
2476
2477 cmd = container_of(header, typeof(*cmd), header);
2478 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2479 VMW_DEBUG_USER("Invalid surface id.\n");
2480 return -EINVAL;
2481 }
2482 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2483 VMW_RES_DIRTY_NONE, user_surface_converter,
2484 &cmd->sid, &srf);
2485 if (unlikely(ret != 0))
2486 return ret;
2487
2488 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2489 if (IS_ERR_OR_NULL(res))
2490 return res ? PTR_ERR(res) : -EINVAL;
2491 ret = vmw_cotable_notify(res, cmd->defined_id);
2492 if (unlikely(ret != 0))
2493 return ret;
2494
2495 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2496 cmd->defined_id, header,
2497 header->size + sizeof(*header),
2498 &sw_context->staged_cmd_res);
2499}
2500
2501/**
2502 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2503 *
2504 * @dev_priv: Pointer to a device private struct.
2505 * @sw_context: The software context being used for this batch.
2506 * @header: Pointer to the command header in the command stream.
2507 */
2508static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2509 struct vmw_sw_context *sw_context,
2510 SVGA3dCmdHeader *header)
2511{
2512 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2513 struct vmw_ctx_bindinfo_so_target binding;
2514 struct vmw_resource *res;
2515 struct {
2516 SVGA3dCmdHeader header;
2517 SVGA3dCmdDXSetSOTargets body;
2518 SVGA3dSoTarget targets[];
2519 } *cmd;
2520 int i, ret, num;
2521
2522 if (!ctx_node)
2523 return -EINVAL;
2524
2525 cmd = container_of(header, typeof(*cmd), header);
2526 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2527
2528 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2529 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2530 return -EINVAL;
2531 }
2532
2533 for (i = 0; i < num; i++) {
2534 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2535 VMW_RES_DIRTY_SET,
2536 user_surface_converter,
2537 &cmd->targets[i].sid, &res);
2538 if (unlikely(ret != 0))
2539 return ret;
2540
2541 binding.bi.ctx = ctx_node->ctx;
2542 binding.bi.res = res;
2543 binding.bi.bt = vmw_ctx_binding_so_target;
2544 binding.offset = cmd->targets[i].offset;
2545 binding.size = cmd->targets[i].sizeInBytes;
2546 binding.slot = i;
2547
2548 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2549 }
2550
2551 return 0;
2552}
2553
2554static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2555 struct vmw_sw_context *sw_context,
2556 SVGA3dCmdHeader *header)
2557{
2558 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2559 struct vmw_resource *res;
2560 /*
2561 * This is based on the fact that all affected define commands have
2562 * the same initial command body layout.
2563 */
2564 struct {
2565 SVGA3dCmdHeader header;
2566 uint32 defined_id;
2567 } *cmd;
2568 enum vmw_so_type so_type;
2569 int ret;
2570
2571 if (!ctx_node)
2572 return -EINVAL;
2573
2574 so_type = vmw_so_cmd_to_type(header->id);
2575 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2576 if (IS_ERR_OR_NULL(res))
2577 return res ? PTR_ERR(res) : -EINVAL;
2578 cmd = container_of(header, typeof(*cmd), header);
2579 ret = vmw_cotable_notify(res, cmd->defined_id);
2580
2581 return ret;
2582}
2583
2584/**
2585 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2586 * command
2587 *
2588 * @dev_priv: Pointer to a device private struct.
2589 * @sw_context: The software context being used for this batch.
2590 * @header: Pointer to the command header in the command stream.
2591 */
2592static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2593 struct vmw_sw_context *sw_context,
2594 SVGA3dCmdHeader *header)
2595{
2596 struct {
2597 SVGA3dCmdHeader header;
2598 union {
2599 SVGA3dCmdDXReadbackSubResource r_body;
2600 SVGA3dCmdDXInvalidateSubResource i_body;
2601 SVGA3dCmdDXUpdateSubResource u_body;
2602 SVGA3dSurfaceId sid;
2603 };
2604 } *cmd;
2605
2606 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2607 offsetof(typeof(*cmd), sid));
2608 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2609 offsetof(typeof(*cmd), sid));
2610 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2611 offsetof(typeof(*cmd), sid));
2612
2613 cmd = container_of(header, typeof(*cmd), header);
2614 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2615 VMW_RES_DIRTY_NONE, user_surface_converter,
2616 &cmd->sid, NULL);
2617}
2618
2619static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2620 struct vmw_sw_context *sw_context,
2621 SVGA3dCmdHeader *header)
2622{
2623 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2624
2625 if (!ctx_node)
2626 return -EINVAL;
2627
2628 return 0;
2629}
2630
2631/**
2632 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2633 * resource for removal.
2634 *
2635 * @dev_priv: Pointer to a device private struct.
2636 * @sw_context: The software context being used for this batch.
2637 * @header: Pointer to the command header in the command stream.
2638 *
2639 * Check that the view exists, and if it was not created using this command
2640 * batch, conditionally make this command a NOP.
2641 */
2642static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2643 struct vmw_sw_context *sw_context,
2644 SVGA3dCmdHeader *header)
2645{
2646 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2647 struct {
2648 SVGA3dCmdHeader header;
2649 union vmw_view_destroy body;
2650 } *cmd = container_of(header, typeof(*cmd), header);
2651 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2652 struct vmw_resource *view;
2653 int ret;
2654
2655 if (!ctx_node)
2656 return -EINVAL;
2657
2658 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2659 &sw_context->staged_cmd_res, &view);
2660 if (ret || !view)
2661 return ret;
2662
2663 /*
2664 * If the view wasn't created during this command batch, it might
2665 * have been removed due to a context swapout, so add a
2666 * relocation to conditionally make this command a NOP to avoid
2667 * device errors.
2668 */
2669 return vmw_resource_relocation_add(sw_context, view,
2670 vmw_ptr_diff(sw_context->buf_start,
2671 &cmd->header.id),
2672 vmw_res_rel_cond_nop);
2673}
2674
2675/**
2676 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2677 *
2678 * @dev_priv: Pointer to a device private struct.
2679 * @sw_context: The software context being used for this batch.
2680 * @header: Pointer to the command header in the command stream.
2681 */
2682static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2683 struct vmw_sw_context *sw_context,
2684 SVGA3dCmdHeader *header)
2685{
2686 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2687 struct vmw_resource *res;
2688 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2689 container_of(header, typeof(*cmd), header);
2690 int ret;
2691
2692 if (!ctx_node)
2693 return -EINVAL;
2694
2695 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2696 if (IS_ERR_OR_NULL(res))
2697 return res ? PTR_ERR(res) : -EINVAL;
2698 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2699 if (ret)
2700 return ret;
2701
2702 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2703 cmd->body.shaderId, cmd->body.type,
2704 &sw_context->staged_cmd_res);
2705}
2706
2707/**
2708 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2709 *
2710 * @dev_priv: Pointer to a device private struct.
2711 * @sw_context: The software context being used for this batch.
2712 * @header: Pointer to the command header in the command stream.
2713 */
2714static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2715 struct vmw_sw_context *sw_context,
2716 SVGA3dCmdHeader *header)
2717{
2718 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2719 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2720 container_of(header, typeof(*cmd), header);
2721 int ret;
2722
2723 if (!ctx_node)
2724 return -EINVAL;
2725
2726 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2727 &sw_context->staged_cmd_res);
2728
2729 return ret;
2730}
2731
2732/**
2733 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2734 *
2735 * @dev_priv: Pointer to a device private struct.
2736 * @sw_context: The software context being used for this batch.
2737 * @header: Pointer to the command header in the command stream.
2738 */
2739static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2740 struct vmw_sw_context *sw_context,
2741 SVGA3dCmdHeader *header)
2742{
2743 struct vmw_resource *ctx;
2744 struct vmw_resource *res;
2745 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2746 container_of(header, typeof(*cmd), header);
2747 int ret;
2748
2749 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2750 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2751 VMW_RES_DIRTY_SET,
2752 user_context_converter, &cmd->body.cid,
2753 &ctx);
2754 if (ret)
2755 return ret;
2756 } else {
2757 struct vmw_ctx_validation_info *ctx_node =
2758 VMW_GET_CTX_NODE(sw_context);
2759
2760 if (!ctx_node)
2761 return -EINVAL;
2762
2763 ctx = ctx_node->ctx;
2764 }
2765
2766 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2767 if (IS_ERR(res)) {
2768 VMW_DEBUG_USER("Could not find shader to bind.\n");
2769 return PTR_ERR(res);
2770 }
2771
2772 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2773 vmw_val_add_flag_noctx);
2774 if (ret) {
2775 VMW_DEBUG_USER("Error creating resource validation node.\n");
2776 return ret;
2777 }
2778
2779 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2780 &cmd->body.mobid,
2781 cmd->body.offsetInBytes);
2782}
2783
2784/**
2785 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2786 *
2787 * @dev_priv: Pointer to a device private struct.
2788 * @sw_context: The software context being used for this batch.
2789 * @header: Pointer to the command header in the command stream.
2790 */
2791static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2792 struct vmw_sw_context *sw_context,
2793 SVGA3dCmdHeader *header)
2794{
2795 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2796 container_of(header, typeof(*cmd), header);
2797 struct vmw_resource *view;
2798 struct vmw_res_cache_entry *rcache;
2799
2800 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2801 cmd->body.shaderResourceViewId);
2802 if (IS_ERR(view))
2803 return PTR_ERR(view);
2804
2805 /*
2806 * Normally the shader-resource view is not gpu-dirtying, but for
2807 * this particular command it is...
2808 * So mark the last looked-up surface, which is the surface
2809 * the view points to, gpu-dirty.
2810 */
2811 rcache = &sw_context->res_cache[vmw_res_surface];
2812 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2813 VMW_RES_DIRTY_SET);
2814 return 0;
2815}
2816
2817/**
2818 * vmw_cmd_dx_transfer_from_buffer - Validate
2819 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2820 *
2821 * @dev_priv: Pointer to a device private struct.
2822 * @sw_context: The software context being used for this batch.
2823 * @header: Pointer to the command header in the command stream.
2824 */
2825static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2826 struct vmw_sw_context *sw_context,
2827 SVGA3dCmdHeader *header)
2828{
2829 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2830 container_of(header, typeof(*cmd), header);
2831 int ret;
2832
2833 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2834 VMW_RES_DIRTY_NONE, user_surface_converter,
2835 &cmd->body.srcSid, NULL);
2836 if (ret != 0)
2837 return ret;
2838
2839 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2840 VMW_RES_DIRTY_SET, user_surface_converter,
2841 &cmd->body.destSid, NULL);
2842}
2843
2844/**
2845 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2846 *
2847 * @dev_priv: Pointer to a device private struct.
2848 * @sw_context: The software context being used for this batch.
2849 * @header: Pointer to the command header in the command stream.
2850 */
2851static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2852 struct vmw_sw_context *sw_context,
2853 SVGA3dCmdHeader *header)
2854{
2855 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2856 container_of(header, typeof(*cmd), header);
2857
2858 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2859 return -EINVAL;
2860
2861 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2862 VMW_RES_DIRTY_SET, user_surface_converter,
2863 &cmd->body.surface.sid, NULL);
2864}
2865
2866static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2867 struct vmw_sw_context *sw_context,
2868 SVGA3dCmdHeader *header)
2869{
2870 if (!has_sm5_context(dev_priv))
2871 return -EINVAL;
2872
2873 return 0;
2874}
2875
2876static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2877 struct vmw_sw_context *sw_context,
2878 SVGA3dCmdHeader *header)
2879{
2880 if (!has_sm5_context(dev_priv))
2881 return -EINVAL;
2882
2883 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2884}
2885
2886static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2887 struct vmw_sw_context *sw_context,
2888 SVGA3dCmdHeader *header)
2889{
2890 if (!has_sm5_context(dev_priv))
2891 return -EINVAL;
2892
2893 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2894}
2895
2896static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2897 struct vmw_sw_context *sw_context,
2898 SVGA3dCmdHeader *header)
2899{
2900 struct {
2901 SVGA3dCmdHeader header;
2902 SVGA3dCmdDXClearUAViewUint body;
2903 } *cmd = container_of(header, typeof(*cmd), header);
2904 struct vmw_resource *ret;
2905
2906 if (!has_sm5_context(dev_priv))
2907 return -EINVAL;
2908
2909 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2910 cmd->body.uaViewId);
2911
2912 return PTR_ERR_OR_ZERO(ret);
2913}
2914
2915static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2916 struct vmw_sw_context *sw_context,
2917 SVGA3dCmdHeader *header)
2918{
2919 struct {
2920 SVGA3dCmdHeader header;
2921 SVGA3dCmdDXClearUAViewFloat body;
2922 } *cmd = container_of(header, typeof(*cmd), header);
2923 struct vmw_resource *ret;
2924
2925 if (!has_sm5_context(dev_priv))
2926 return -EINVAL;
2927
2928 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2929 cmd->body.uaViewId);
2930
2931 return PTR_ERR_OR_ZERO(ret);
2932}
2933
2934static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2935 struct vmw_sw_context *sw_context,
2936 SVGA3dCmdHeader *header)
2937{
2938 struct {
2939 SVGA3dCmdHeader header;
2940 SVGA3dCmdDXSetUAViews body;
2941 } *cmd = container_of(header, typeof(*cmd), header);
2942 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2943 sizeof(SVGA3dUAViewId);
2944 int ret;
2945
2946 if (!has_sm5_context(dev_priv))
2947 return -EINVAL;
2948
2949 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2950 VMW_DEBUG_USER("Invalid UAV binding.\n");
2951 return -EINVAL;
2952 }
2953
2954 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2955 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2956 num_uav, 0);
2957 if (ret)
2958 return ret;
2959
2960 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2961 cmd->body.uavSpliceIndex);
2962
2963 return ret;
2964}
2965
2966static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2967 struct vmw_sw_context *sw_context,
2968 SVGA3dCmdHeader *header)
2969{
2970 struct {
2971 SVGA3dCmdHeader header;
2972 SVGA3dCmdDXSetCSUAViews body;
2973 } *cmd = container_of(header, typeof(*cmd), header);
2974 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2975 sizeof(SVGA3dUAViewId);
2976 int ret;
2977
2978 if (!has_sm5_context(dev_priv))
2979 return -EINVAL;
2980
2981 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2982 VMW_DEBUG_USER("Invalid UAV binding.\n");
2983 return -EINVAL;
2984 }
2985
2986 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2987 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2988 num_uav, 0);
2989 if (ret)
2990 return ret;
2991
2992 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2993 cmd->body.startIndex);
2994
2995 return ret;
2996}
2997
2998static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2999 struct vmw_sw_context *sw_context,
3000 SVGA3dCmdHeader *header)
3001{
3002 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3003 struct vmw_resource *res;
3004 struct {
3005 SVGA3dCmdHeader header;
3006 SVGA3dCmdDXDefineStreamOutputWithMob body;
3007 } *cmd = container_of(header, typeof(*cmd), header);
3008 int ret;
3009
3010 if (!has_sm5_context(dev_priv))
3011 return -EINVAL;
3012
3013 if (!ctx_node) {
3014 DRM_ERROR("DX Context not set.\n");
3015 return -EINVAL;
3016 }
3017
3018 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3019 if (IS_ERR_OR_NULL(res))
3020 return res ? PTR_ERR(res) : -EINVAL;
3021 ret = vmw_cotable_notify(res, cmd->body.soid);
3022 if (ret)
3023 return ret;
3024
3025 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3026 cmd->body.soid,
3027 &sw_context->staged_cmd_res);
3028}
3029
3030static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3031 struct vmw_sw_context *sw_context,
3032 SVGA3dCmdHeader *header)
3033{
3034 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3035 struct vmw_resource *res;
3036 struct {
3037 SVGA3dCmdHeader header;
3038 SVGA3dCmdDXDestroyStreamOutput body;
3039 } *cmd = container_of(header, typeof(*cmd), header);
3040
3041 if (!ctx_node) {
3042 DRM_ERROR("DX Context not set.\n");
3043 return -EINVAL;
3044 }
3045
3046 /*
3047 * When device does not support SM5 then streamoutput with mob command is
3048 * not available to user-space. Simply return in this case.
3049 */
3050 if (!has_sm5_context(dev_priv))
3051 return 0;
3052
3053 /*
3054 * With SM5 capable device if lookup fails then user-space probably used
3055 * old streamoutput define command. Return without an error.
3056 */
3057 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3058 cmd->body.soid);
3059 if (IS_ERR(res))
3060 return 0;
3061
3062 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3063 &sw_context->staged_cmd_res);
3064}
3065
3066static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3067 struct vmw_sw_context *sw_context,
3068 SVGA3dCmdHeader *header)
3069{
3070 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3071 struct vmw_resource *res;
3072 struct {
3073 SVGA3dCmdHeader header;
3074 SVGA3dCmdDXBindStreamOutput body;
3075 } *cmd = container_of(header, typeof(*cmd), header);
3076 int ret;
3077
3078 if (!has_sm5_context(dev_priv))
3079 return -EINVAL;
3080
3081 if (!ctx_node) {
3082 DRM_ERROR("DX Context not set.\n");
3083 return -EINVAL;
3084 }
3085
3086 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3087 cmd->body.soid);
3088 if (IS_ERR(res)) {
3089 DRM_ERROR("Could not find streamoutput to bind.\n");
3090 return PTR_ERR(res);
3091 }
3092
3093 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3094
3095 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3096 vmw_val_add_flag_noctx);
3097 if (ret) {
3098 DRM_ERROR("Error creating resource validation node.\n");
3099 return ret;
3100 }
3101
3102 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3103 &cmd->body.mobid,
3104 cmd->body.offsetInBytes);
3105}
3106
3107static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3108 struct vmw_sw_context *sw_context,
3109 SVGA3dCmdHeader *header)
3110{
3111 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3112 struct vmw_resource *res;
3113 struct vmw_ctx_bindinfo_so binding;
3114 struct {
3115 SVGA3dCmdHeader header;
3116 SVGA3dCmdDXSetStreamOutput body;
3117 } *cmd = container_of(header, typeof(*cmd), header);
3118 int ret;
3119
3120 if (!ctx_node) {
3121 DRM_ERROR("DX Context not set.\n");
3122 return -EINVAL;
3123 }
3124
3125 if (cmd->body.soid == SVGA3D_INVALID_ID)
3126 return 0;
3127
3128 /*
3129 * When device does not support SM5 then streamoutput with mob command is
3130 * not available to user-space. Simply return in this case.
3131 */
3132 if (!has_sm5_context(dev_priv))
3133 return 0;
3134
3135 /*
3136 * With SM5 capable device if lookup fails then user-space probably used
3137 * old streamoutput define command. Return without an error.
3138 */
3139 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3140 cmd->body.soid);
3141 if (IS_ERR(res)) {
3142 return 0;
3143 }
3144
3145 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3146 vmw_val_add_flag_noctx);
3147 if (ret) {
3148 DRM_ERROR("Error creating resource validation node.\n");
3149 return ret;
3150 }
3151
3152 binding.bi.ctx = ctx_node->ctx;
3153 binding.bi.res = res;
3154 binding.bi.bt = vmw_ctx_binding_so;
3155 binding.slot = 0; /* Only one SO set to context at a time. */
3156
3157 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3158 binding.slot);
3159
3160 return ret;
3161}
3162
3163static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3164 struct vmw_sw_context *sw_context,
3165 SVGA3dCmdHeader *header)
3166{
3167 struct vmw_draw_indexed_instanced_indirect_cmd {
3168 SVGA3dCmdHeader header;
3169 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3170 } *cmd = container_of(header, typeof(*cmd), header);
3171
3172 if (!has_sm5_context(dev_priv))
3173 return -EINVAL;
3174
3175 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3176 VMW_RES_DIRTY_NONE, user_surface_converter,
3177 &cmd->body.argsBufferSid, NULL);
3178}
3179
3180static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3181 struct vmw_sw_context *sw_context,
3182 SVGA3dCmdHeader *header)
3183{
3184 struct vmw_draw_instanced_indirect_cmd {
3185 SVGA3dCmdHeader header;
3186 SVGA3dCmdDXDrawInstancedIndirect body;
3187 } *cmd = container_of(header, typeof(*cmd), header);
3188
3189 if (!has_sm5_context(dev_priv))
3190 return -EINVAL;
3191
3192 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3193 VMW_RES_DIRTY_NONE, user_surface_converter,
3194 &cmd->body.argsBufferSid, NULL);
3195}
3196
3197static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3198 struct vmw_sw_context *sw_context,
3199 SVGA3dCmdHeader *header)
3200{
3201 struct vmw_dispatch_indirect_cmd {
3202 SVGA3dCmdHeader header;
3203 SVGA3dCmdDXDispatchIndirect body;
3204 } *cmd = container_of(header, typeof(*cmd), header);
3205
3206 if (!has_sm5_context(dev_priv))
3207 return -EINVAL;
3208
3209 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3210 VMW_RES_DIRTY_NONE, user_surface_converter,
3211 &cmd->body.argsBufferSid, NULL);
3212}
3213
3214static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3215 struct vmw_sw_context *sw_context,
3216 void *buf, uint32_t *size)
3217{
3218 uint32_t size_remaining = *size;
3219 uint32_t cmd_id;
3220
3221 cmd_id = ((uint32_t *)buf)[0];
3222 switch (cmd_id) {
3223 case SVGA_CMD_UPDATE:
3224 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3225 break;
3226 case SVGA_CMD_DEFINE_GMRFB:
3227 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3228 break;
3229 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3230 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3231 break;
3232 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3233 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3234 break;
3235 default:
3236 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3237 return -EINVAL;
3238 }
3239
3240 if (*size > size_remaining) {
3241 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3242 cmd_id);
3243 return -EINVAL;
3244 }
3245
3246 if (unlikely(!sw_context->kernel)) {
3247 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3248 return -EPERM;
3249 }
3250
3251 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3252 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3253
3254 return 0;
3255}
3256
3257static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3258 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3259 false, false, false),
3260 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3261 false, false, false),
3262 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3263 true, false, false),
3264 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3265 true, false, false),
3266 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3267 true, false, false),
3268 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3269 false, false, false),
3270 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3271 false, false, false),
3272 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3273 true, false, false),
3274 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3275 true, false, false),
3276 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3277 true, false, false),
3278 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3279 &vmw_cmd_set_render_target_check, true, false, false),
3280 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3281 true, false, false),
3282 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3283 true, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3285 true, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3287 true, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3289 true, false, false),
3290 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3291 true, false, false),
3292 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3293 true, false, false),
3294 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3295 false, false, false),
3296 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3297 true, false, false),
3298 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3299 true, false, false),
3300 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3301 true, false, false),
3302 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3303 true, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3305 true, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3307 true, false, false),
3308 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3309 true, false, false),
3310 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3311 true, false, false),
3312 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3313 true, false, false),
3314 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3315 true, false, false),
3316 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3317 &vmw_cmd_blt_surf_screen_check, false, false, false),
3318 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3319 false, false, false),
3320 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3321 false, false, false),
3322 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3323 false, false, false),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3325 false, false, false),
3326 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3327 false, false, false),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3329 false, false, false),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3331 false, false, false),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3333 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3335 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3337 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3338 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3339 false, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3345 false, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3347 false, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3349 false, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3351 false, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3353 false, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3355 true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3357 false, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3359 true, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3361 &vmw_cmd_update_gb_surface, true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3363 &vmw_cmd_readback_gb_image, true, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3365 &vmw_cmd_readback_gb_surface, true, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3367 &vmw_cmd_invalidate_gb_image, true, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3369 &vmw_cmd_invalidate_gb_surface, true, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3371 false, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3373 false, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3375 false, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3377 false, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3379 false, false, true),
3380 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3381 false, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3383 true, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3385 false, false, true),
3386 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3387 false, false, false),
3388 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3389 true, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3391 true, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3393 true, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3395 true, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3397 true, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3399 false, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3401 false, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3403 false, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3405 false, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3407 false, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3409 false, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3411 false, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3413 false, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3415 false, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3417 false, false, true),
3418 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3419 true, false, true),
3420 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3421 false, false, true),
3422 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3423 false, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3425 false, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3427 false, false, true),
3428
3429 /* SM commands */
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3431 false, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3433 false, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3435 false, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3437 false, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3439 false, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3441 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3443 &vmw_cmd_dx_set_shader_res, true, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3445 true, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3447 true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3449 true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3451 true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3453 true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3455 &vmw_cmd_dx_cid_check, true, false, true),
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3457 true, false, true),
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3459 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3461 &vmw_cmd_dx_set_index_buffer, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3463 &vmw_cmd_dx_set_rendertargets, true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3465 true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3467 &vmw_cmd_dx_cid_check, true, false, true),
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3469 &vmw_cmd_dx_cid_check, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3471 true, false, true),
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3473 true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3475 true, false, true),
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3477 &vmw_cmd_dx_cid_check, true, false, true),
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3479 true, false, true),
3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3481 true, false, true),
3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3483 true, false, true),
3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3485 true, false, true),
3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3487 true, false, true),
3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3489 true, false, true),
3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3491 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3493 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3495 true, false, true),
3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3497 true, false, true),
3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3499 &vmw_cmd_dx_check_subresource, true, false, true),
3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3501 &vmw_cmd_dx_check_subresource, true, false, true),
3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3503 &vmw_cmd_dx_check_subresource, true, false, true),
3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3505 &vmw_cmd_dx_view_define, true, false, true),
3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3507 &vmw_cmd_dx_view_remove, true, false, true),
3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3509 &vmw_cmd_dx_view_define, true, false, true),
3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3511 &vmw_cmd_dx_view_remove, true, false, true),
3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3513 &vmw_cmd_dx_view_define, true, false, true),
3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3515 &vmw_cmd_dx_view_remove, true, false, true),
3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3517 &vmw_cmd_dx_so_define, true, false, true),
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3519 &vmw_cmd_dx_cid_check, true, false, true),
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3521 &vmw_cmd_dx_so_define, true, false, true),
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3523 &vmw_cmd_dx_cid_check, true, false, true),
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3525 &vmw_cmd_dx_so_define, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3527 &vmw_cmd_dx_cid_check, true, false, true),
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3529 &vmw_cmd_dx_so_define, true, false, true),
3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3531 &vmw_cmd_dx_cid_check, true, false, true),
3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3533 &vmw_cmd_dx_so_define, true, false, true),
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3535 &vmw_cmd_dx_cid_check, true, false, true),
3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3537 &vmw_cmd_dx_define_shader, true, false, true),
3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3539 &vmw_cmd_dx_destroy_shader, true, false, true),
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3541 &vmw_cmd_dx_bind_shader, true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3543 &vmw_cmd_dx_so_define, true, false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3545 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3547 &vmw_cmd_dx_set_streamoutput, true, false, true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3549 &vmw_cmd_dx_set_so_targets, true, false, true),
3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3551 &vmw_cmd_dx_cid_check, true, false, true),
3552 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3553 &vmw_cmd_dx_cid_check, true, false, true),
3554 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3555 &vmw_cmd_buffer_copy_check, true, false, true),
3556 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3557 &vmw_cmd_pred_copy_check, true, false, true),
3558 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3559 &vmw_cmd_dx_transfer_from_buffer,
3560 true, false, true),
3561 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3562 &vmw_cmd_dx_set_constant_buffer_offset,
3563 true, false, true),
3564 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3565 &vmw_cmd_dx_set_constant_buffer_offset,
3566 true, false, true),
3567 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3568 &vmw_cmd_dx_set_constant_buffer_offset,
3569 true, false, true),
3570 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3571 &vmw_cmd_dx_set_constant_buffer_offset,
3572 true, false, true),
3573 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3574 &vmw_cmd_dx_set_constant_buffer_offset,
3575 true, false, true),
3576 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3577 &vmw_cmd_dx_set_constant_buffer_offset,
3578 true, false, true),
3579 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3580 true, false, true),
3581
3582 /*
3583 * SM5 commands
3584 */
3585 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3586 true, false, true),
3587 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3588 true, false, true),
3589 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3590 true, false, true),
3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3592 &vmw_cmd_clear_uav_float, true, false, true),
3593 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3594 false, true),
3595 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3596 true),
3597 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3598 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3599 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3600 &vmw_cmd_instanced_indirect, true, false, true),
3601 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3602 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3603 &vmw_cmd_dispatch_indirect, true, false, true),
3604 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3605 false, true),
3606 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3607 &vmw_cmd_sm5_view_define, true, false, true),
3608 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3609 &vmw_cmd_dx_define_streamoutput, true, false, true),
3610 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3611 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3612 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3613 &vmw_cmd_dx_so_define, true, false, true),
3614 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
3615 &vmw_cmd_invalid, false, false, true),
3616};
3617
3618bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3619{
3620 u32 cmd_id = ((u32 *) buf)[0];
3621
3622 if (cmd_id >= SVGA_CMD_MAX) {
3623 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3624 const struct vmw_cmd_entry *entry;
3625
3626 *size = header->size + sizeof(SVGA3dCmdHeader);
3627 cmd_id = header->id;
3628 if (cmd_id >= SVGA_3D_CMD_MAX)
3629 return false;
3630
3631 cmd_id -= SVGA_3D_CMD_BASE;
3632 entry = &vmw_cmd_entries[cmd_id];
3633 *cmd = entry->cmd_name;
3634 return true;
3635 }
3636
3637 switch (cmd_id) {
3638 case SVGA_CMD_UPDATE:
3639 *cmd = "SVGA_CMD_UPDATE";
3640 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3641 break;
3642 case SVGA_CMD_DEFINE_GMRFB:
3643 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3644 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3645 break;
3646 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3647 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3648 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3649 break;
3650 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3651 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3652 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3653 break;
3654 default:
3655 *cmd = "UNKNOWN";
3656 *size = 0;
3657 return false;
3658 }
3659
3660 return true;
3661}
3662
3663static int vmw_cmd_check(struct vmw_private *dev_priv,
3664 struct vmw_sw_context *sw_context, void *buf,
3665 uint32_t *size)
3666{
3667 uint32_t cmd_id;
3668 uint32_t size_remaining = *size;
3669 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3670 int ret;
3671 const struct vmw_cmd_entry *entry;
3672 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3673
3674 cmd_id = ((uint32_t *)buf)[0];
3675 /* Handle any none 3D commands */
3676 if (unlikely(cmd_id < SVGA_CMD_MAX))
3677 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3678
3679
3680 cmd_id = header->id;
3681 *size = header->size + sizeof(SVGA3dCmdHeader);
3682
3683 cmd_id -= SVGA_3D_CMD_BASE;
3684 if (unlikely(*size > size_remaining))
3685 goto out_invalid;
3686
3687 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3688 goto out_invalid;
3689
3690 entry = &vmw_cmd_entries[cmd_id];
3691 if (unlikely(!entry->func))
3692 goto out_invalid;
3693
3694 if (unlikely(!entry->user_allow && !sw_context->kernel))
3695 goto out_privileged;
3696
3697 if (unlikely(entry->gb_disable && gb))
3698 goto out_old;
3699
3700 if (unlikely(entry->gb_enable && !gb))
3701 goto out_new;
3702
3703 ret = entry->func(dev_priv, sw_context, header);
3704 if (unlikely(ret != 0)) {
3705 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3706 cmd_id + SVGA_3D_CMD_BASE, ret);
3707 return ret;
3708 }
3709
3710 return 0;
3711out_invalid:
3712 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3713 cmd_id + SVGA_3D_CMD_BASE);
3714 return -EINVAL;
3715out_privileged:
3716 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3717 cmd_id + SVGA_3D_CMD_BASE);
3718 return -EPERM;
3719out_old:
3720 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3721 cmd_id + SVGA_3D_CMD_BASE);
3722 return -EINVAL;
3723out_new:
3724 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3725 cmd_id + SVGA_3D_CMD_BASE);
3726 return -EINVAL;
3727}
3728
3729static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3730 struct vmw_sw_context *sw_context, void *buf,
3731 uint32_t size)
3732{
3733 int32_t cur_size = size;
3734 int ret;
3735
3736 sw_context->buf_start = buf;
3737
3738 while (cur_size > 0) {
3739 size = cur_size;
3740 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3741 if (unlikely(ret != 0))
3742 return ret;
3743 buf = (void *)((unsigned long) buf + size);
3744 cur_size -= size;
3745 }
3746
3747 if (unlikely(cur_size != 0)) {
3748 VMW_DEBUG_USER("Command verifier out of sync.\n");
3749 return -EINVAL;
3750 }
3751
3752 return 0;
3753}
3754
3755static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3756{
3757 /* Memory is validation context memory, so no need to free it */
3758 INIT_LIST_HEAD(&sw_context->bo_relocations);
3759}
3760
3761static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3762{
3763 struct vmw_relocation *reloc;
3764 struct ttm_buffer_object *bo;
3765
3766 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3767 bo = &reloc->vbo->tbo;
3768 switch (bo->resource->mem_type) {
3769 case TTM_PL_VRAM:
3770 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3771 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3772 break;
3773 case VMW_PL_GMR:
3774 reloc->location->gmrId = bo->resource->start;
3775 break;
3776 case VMW_PL_MOB:
3777 *reloc->mob_loc = bo->resource->start;
3778 break;
3779 default:
3780 BUG();
3781 }
3782 }
3783 vmw_free_relocations(sw_context);
3784}
3785
3786static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3787 uint32_t size)
3788{
3789 if (likely(sw_context->cmd_bounce_size >= size))
3790 return 0;
3791
3792 if (sw_context->cmd_bounce_size == 0)
3793 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3794
3795 while (sw_context->cmd_bounce_size < size) {
3796 sw_context->cmd_bounce_size =
3797 PAGE_ALIGN(sw_context->cmd_bounce_size +
3798 (sw_context->cmd_bounce_size >> 1));
3799 }
3800
3801 vfree(sw_context->cmd_bounce);
3802 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3803
3804 if (sw_context->cmd_bounce == NULL) {
3805 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3806 sw_context->cmd_bounce_size = 0;
3807 return -ENOMEM;
3808 }
3809
3810 return 0;
3811}
3812
3813/*
3814 * vmw_execbuf_fence_commands - create and submit a command stream fence
3815 *
3816 * Creates a fence object and submits a command stream marker.
3817 * If this fails for some reason, We sync the fifo and return NULL.
3818 * It is then safe to fence buffers with a NULL pointer.
3819 *
3820 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3821 * userspace handle if @p_handle is not NULL, otherwise not.
3822 */
3823
3824int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3825 struct vmw_private *dev_priv,
3826 struct vmw_fence_obj **p_fence,
3827 uint32_t *p_handle)
3828{
3829 uint32_t sequence;
3830 int ret;
3831 bool synced = false;
3832
3833 /* p_handle implies file_priv. */
3834 BUG_ON(p_handle != NULL && file_priv == NULL);
3835
3836 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3837 if (unlikely(ret != 0)) {
3838 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3839 synced = true;
3840 }
3841
3842 if (p_handle != NULL)
3843 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3844 sequence, p_fence, p_handle);
3845 else
3846 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3847
3848 if (unlikely(ret != 0 && !synced)) {
3849 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3850 false, VMW_FENCE_WAIT_TIMEOUT);
3851 *p_fence = NULL;
3852 }
3853
3854 return ret;
3855}
3856
3857/**
3858 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3859 *
3860 * @dev_priv: Pointer to a vmw_private struct.
3861 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3862 * @ret: Return value from fence object creation.
3863 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3864 * the information should be copied.
3865 * @fence: Pointer to the fenc object.
3866 * @fence_handle: User-space fence handle.
3867 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3868 *
3869 * This function copies fence information to user-space. If copying fails, the
3870 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3871 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3872 * will hopefully be detected.
3873 *
3874 * Also if copying fails, user-space will be unable to signal the fence object
3875 * so we wait for it immediately, and then unreference the user-space reference.
3876 */
3877int
3878vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3879 struct vmw_fpriv *vmw_fp, int ret,
3880 struct drm_vmw_fence_rep __user *user_fence_rep,
3881 struct vmw_fence_obj *fence, uint32_t fence_handle,
3882 int32_t out_fence_fd)
3883{
3884 struct drm_vmw_fence_rep fence_rep;
3885
3886 if (user_fence_rep == NULL)
3887 return 0;
3888
3889 memset(&fence_rep, 0, sizeof(fence_rep));
3890
3891 fence_rep.error = ret;
3892 fence_rep.fd = out_fence_fd;
3893 if (ret == 0) {
3894 BUG_ON(fence == NULL);
3895
3896 fence_rep.handle = fence_handle;
3897 fence_rep.seqno = fence->base.seqno;
3898 vmw_update_seqno(dev_priv);
3899 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3900 }
3901
3902 /*
3903 * copy_to_user errors will be detected by user space not seeing
3904 * fence_rep::error filled in. Typically user-space would have pre-set
3905 * that member to -EFAULT.
3906 */
3907 ret = copy_to_user(user_fence_rep, &fence_rep,
3908 sizeof(fence_rep));
3909
3910 /*
3911 * User-space lost the fence object. We need to sync and unreference the
3912 * handle.
3913 */
3914 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3915 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3916 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3917 (void) vmw_fence_obj_wait(fence, false, false,
3918 VMW_FENCE_WAIT_TIMEOUT);
3919 }
3920
3921 return ret ? -EFAULT : 0;
3922}
3923
3924/**
3925 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3926 *
3927 * @dev_priv: Pointer to a device private structure.
3928 * @kernel_commands: Pointer to the unpatched command batch.
3929 * @command_size: Size of the unpatched command batch.
3930 * @sw_context: Structure holding the relocation lists.
3931 *
3932 * Side effects: If this function returns 0, then the command batch pointed to
3933 * by @kernel_commands will have been modified.
3934 */
3935static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3936 void *kernel_commands, u32 command_size,
3937 struct vmw_sw_context *sw_context)
3938{
3939 void *cmd;
3940
3941 if (sw_context->dx_ctx_node)
3942 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3943 sw_context->dx_ctx_node->ctx->id);
3944 else
3945 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3946
3947 if (!cmd)
3948 return -ENOMEM;
3949
3950 vmw_apply_relocations(sw_context);
3951 memcpy(cmd, kernel_commands, command_size);
3952 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3953 vmw_resource_relocations_free(&sw_context->res_relocations);
3954 vmw_cmd_commit(dev_priv, command_size);
3955
3956 return 0;
3957}
3958
3959/**
3960 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3961 * command buffer manager.
3962 *
3963 * @dev_priv: Pointer to a device private structure.
3964 * @header: Opaque handle to the command buffer allocation.
3965 * @command_size: Size of the unpatched command batch.
3966 * @sw_context: Structure holding the relocation lists.
3967 *
3968 * Side effects: If this function returns 0, then the command buffer represented
3969 * by @header will have been modified.
3970 */
3971static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3972 struct vmw_cmdbuf_header *header,
3973 u32 command_size,
3974 struct vmw_sw_context *sw_context)
3975{
3976 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3977 SVGA3D_INVALID_ID);
3978 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3979 header);
3980
3981 vmw_apply_relocations(sw_context);
3982 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3983 vmw_resource_relocations_free(&sw_context->res_relocations);
3984 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3985
3986 return 0;
3987}
3988
3989/**
3990 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3991 * submission using a command buffer.
3992 *
3993 * @dev_priv: Pointer to a device private structure.
3994 * @user_commands: User-space pointer to the commands to be submitted.
3995 * @command_size: Size of the unpatched command batch.
3996 * @header: Out parameter returning the opaque pointer to the command buffer.
3997 *
3998 * This function checks whether we can use the command buffer manager for
3999 * submission and if so, creates a command buffer of suitable size and copies
4000 * the user data into that buffer.
4001 *
4002 * On successful return, the function returns a pointer to the data in the
4003 * command buffer and *@header is set to non-NULL.
4004 *
4005 * @kernel_commands: If command buffers could not be used, the function will
4006 * return the value of @kernel_commands on function call. That value may be
4007 * NULL. In that case, the value of *@header will be set to NULL.
4008 *
4009 * If an error is encountered, the function will return a pointer error value.
4010 * If the function is interrupted by a signal while sleeping, it will return
4011 * -ERESTARTSYS casted to a pointer error value.
4012 */
4013static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4014 void __user *user_commands,
4015 void *kernel_commands, u32 command_size,
4016 struct vmw_cmdbuf_header **header)
4017{
4018 size_t cmdbuf_size;
4019 int ret;
4020
4021 *header = NULL;
4022 if (command_size > SVGA_CB_MAX_SIZE) {
4023 VMW_DEBUG_USER("Command buffer is too large.\n");
4024 return ERR_PTR(-EINVAL);
4025 }
4026
4027 if (!dev_priv->cman || kernel_commands)
4028 return kernel_commands;
4029
4030 /* If possible, add a little space for fencing. */
4031 cmdbuf_size = command_size + 512;
4032 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4033 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4034 header);
4035 if (IS_ERR(kernel_commands))
4036 return kernel_commands;
4037
4038 ret = copy_from_user(kernel_commands, user_commands, command_size);
4039 if (ret) {
4040 VMW_DEBUG_USER("Failed copying commands.\n");
4041 vmw_cmdbuf_header_free(*header);
4042 *header = NULL;
4043 return ERR_PTR(-EFAULT);
4044 }
4045
4046 return kernel_commands;
4047}
4048
4049static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4050 struct vmw_sw_context *sw_context,
4051 uint32_t handle)
4052{
4053 struct vmw_resource *res;
4054 int ret;
4055 unsigned int size;
4056
4057 if (handle == SVGA3D_INVALID_ID)
4058 return 0;
4059
4060 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4061 ret = vmw_validation_preload_res(sw_context->ctx, size);
4062 if (ret)
4063 return ret;
4064
4065 ret = vmw_user_resource_lookup_handle
4066 (dev_priv, sw_context->fp->tfile, handle,
4067 user_context_converter, &res);
4068 if (ret != 0) {
4069 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4070 (unsigned int) handle);
4071 return ret;
4072 }
4073
4074 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4075 vmw_val_add_flag_none);
4076 if (unlikely(ret != 0)) {
4077 vmw_resource_unreference(&res);
4078 return ret;
4079 }
4080
4081 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4082 sw_context->man = vmw_context_res_man(res);
4083
4084 vmw_resource_unreference(&res);
4085 return 0;
4086}
4087
4088int vmw_execbuf_process(struct drm_file *file_priv,
4089 struct vmw_private *dev_priv,
4090 void __user *user_commands, void *kernel_commands,
4091 uint32_t command_size, uint64_t throttle_us,
4092 uint32_t dx_context_handle,
4093 struct drm_vmw_fence_rep __user *user_fence_rep,
4094 struct vmw_fence_obj **out_fence, uint32_t flags)
4095{
4096 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4097 struct vmw_fence_obj *fence = NULL;
4098 struct vmw_cmdbuf_header *header;
4099 uint32_t handle = 0;
4100 int ret;
4101 int32_t out_fence_fd = -1;
4102 struct sync_file *sync_file = NULL;
4103 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4104
4105 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4106 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4107 if (out_fence_fd < 0) {
4108 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4109 return out_fence_fd;
4110 }
4111 }
4112
4113 if (throttle_us) {
4114 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4115 }
4116
4117 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4118 kernel_commands, command_size,
4119 &header);
4120 if (IS_ERR(kernel_commands)) {
4121 ret = PTR_ERR(kernel_commands);
4122 goto out_free_fence_fd;
4123 }
4124
4125 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4126 if (ret) {
4127 ret = -ERESTARTSYS;
4128 goto out_free_header;
4129 }
4130
4131 sw_context->kernel = false;
4132 if (kernel_commands == NULL) {
4133 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4134 if (unlikely(ret != 0))
4135 goto out_unlock;
4136
4137 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4138 command_size);
4139 if (unlikely(ret != 0)) {
4140 ret = -EFAULT;
4141 VMW_DEBUG_USER("Failed copying commands.\n");
4142 goto out_unlock;
4143 }
4144
4145 kernel_commands = sw_context->cmd_bounce;
4146 } else if (!header) {
4147 sw_context->kernel = true;
4148 }
4149
4150 sw_context->filp = file_priv;
4151 sw_context->fp = vmw_fpriv(file_priv);
4152 INIT_LIST_HEAD(&sw_context->ctx_list);
4153 sw_context->cur_query_bo = dev_priv->pinned_bo;
4154 sw_context->last_query_ctx = NULL;
4155 sw_context->needs_post_query_barrier = false;
4156 sw_context->dx_ctx_node = NULL;
4157 sw_context->dx_query_mob = NULL;
4158 sw_context->dx_query_ctx = NULL;
4159 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4160 INIT_LIST_HEAD(&sw_context->res_relocations);
4161 INIT_LIST_HEAD(&sw_context->bo_relocations);
4162
4163 if (sw_context->staged_bindings)
4164 vmw_binding_state_reset(sw_context->staged_bindings);
4165
4166 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4167 sw_context->ctx = &val_ctx;
4168 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4169 if (unlikely(ret != 0))
4170 goto out_err_nores;
4171
4172 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4173 command_size);
4174 if (unlikely(ret != 0))
4175 goto out_err_nores;
4176
4177 ret = vmw_resources_reserve(sw_context);
4178 if (unlikely(ret != 0))
4179 goto out_err_nores;
4180
4181 ret = vmw_validation_bo_reserve(&val_ctx, true);
4182 if (unlikely(ret != 0))
4183 goto out_err_nores;
4184
4185 ret = vmw_validation_bo_validate(&val_ctx, true);
4186 if (unlikely(ret != 0))
4187 goto out_err;
4188
4189 ret = vmw_validation_res_validate(&val_ctx, true);
4190 if (unlikely(ret != 0))
4191 goto out_err;
4192
4193 vmw_validation_drop_ht(&val_ctx);
4194
4195 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4196 if (unlikely(ret != 0)) {
4197 ret = -ERESTARTSYS;
4198 goto out_err;
4199 }
4200
4201 if (dev_priv->has_mob) {
4202 ret = vmw_rebind_contexts(sw_context);
4203 if (unlikely(ret != 0))
4204 goto out_unlock_binding;
4205 }
4206
4207 if (!header) {
4208 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4209 command_size, sw_context);
4210 } else {
4211 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4212 sw_context);
4213 header = NULL;
4214 }
4215 mutex_unlock(&dev_priv->binding_mutex);
4216 if (ret)
4217 goto out_err;
4218
4219 vmw_query_bo_switch_commit(dev_priv, sw_context);
4220 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4221 (user_fence_rep) ? &handle : NULL);
4222 /*
4223 * This error is harmless, because if fence submission fails,
4224 * vmw_fifo_send_fence will sync. The error will be propagated to
4225 * user-space in @fence_rep
4226 */
4227 if (ret != 0)
4228 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4229
4230 vmw_execbuf_bindings_commit(sw_context, false);
4231 vmw_bind_dx_query_mob(sw_context);
4232 vmw_validation_res_unreserve(&val_ctx, false);
4233
4234 vmw_validation_bo_fence(sw_context->ctx, fence);
4235
4236 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4237 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4238
4239 /*
4240 * If anything fails here, give up trying to export the fence and do a
4241 * sync since the user mode will not be able to sync the fence itself.
4242 * This ensures we are still functionally correct.
4243 */
4244 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4245
4246 sync_file = sync_file_create(&fence->base);
4247 if (!sync_file) {
4248 VMW_DEBUG_USER("Sync file create failed for fence\n");
4249 put_unused_fd(out_fence_fd);
4250 out_fence_fd = -1;
4251
4252 (void) vmw_fence_obj_wait(fence, false, false,
4253 VMW_FENCE_WAIT_TIMEOUT);
4254 }
4255 }
4256
4257 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4258 user_fence_rep, fence, handle, out_fence_fd);
4259
4260 if (sync_file) {
4261 if (ret) {
4262 /* usercopy of fence failed, put the file object */
4263 fput(sync_file->file);
4264 put_unused_fd(out_fence_fd);
4265 } else {
4266 /* Link the fence with the FD created earlier */
4267 fd_install(out_fence_fd, sync_file->file);
4268 }
4269 }
4270
4271 /* Don't unreference when handing fence out */
4272 if (unlikely(out_fence != NULL)) {
4273 *out_fence = fence;
4274 fence = NULL;
4275 } else if (likely(fence != NULL)) {
4276 vmw_fence_obj_unreference(&fence);
4277 }
4278
4279 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4280 mutex_unlock(&dev_priv->cmdbuf_mutex);
4281
4282 /*
4283 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4284 * in resource destruction paths.
4285 */
4286 vmw_validation_unref_lists(&val_ctx);
4287
4288 return ret;
4289
4290out_unlock_binding:
4291 mutex_unlock(&dev_priv->binding_mutex);
4292out_err:
4293 vmw_validation_bo_backoff(&val_ctx);
4294out_err_nores:
4295 vmw_execbuf_bindings_commit(sw_context, true);
4296 vmw_validation_res_unreserve(&val_ctx, true);
4297 vmw_resource_relocations_free(&sw_context->res_relocations);
4298 vmw_free_relocations(sw_context);
4299 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4300 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4301out_unlock:
4302 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4303 vmw_validation_drop_ht(&val_ctx);
4304 WARN_ON(!list_empty(&sw_context->ctx_list));
4305 mutex_unlock(&dev_priv->cmdbuf_mutex);
4306
4307 /*
4308 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4309 * in resource destruction paths.
4310 */
4311 vmw_validation_unref_lists(&val_ctx);
4312out_free_header:
4313 if (header)
4314 vmw_cmdbuf_header_free(header);
4315out_free_fence_fd:
4316 if (out_fence_fd >= 0)
4317 put_unused_fd(out_fence_fd);
4318
4319 return ret;
4320}
4321
4322/**
4323 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4324 *
4325 * @dev_priv: The device private structure.
4326 *
4327 * This function is called to idle the fifo and unpin the query buffer if the
4328 * normal way to do this hits an error, which should typically be extremely
4329 * rare.
4330 */
4331static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4332{
4333 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4334
4335 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4336 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4337 if (dev_priv->dummy_query_bo_pinned) {
4338 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4339 dev_priv->dummy_query_bo_pinned = false;
4340 }
4341}
4342
4343
4344/**
4345 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4346 * bo.
4347 *
4348 * @dev_priv: The device private structure.
4349 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4350 * query barrier that flushes all queries touching the current buffer pointed to
4351 * by @dev_priv->pinned_bo
4352 *
4353 * This function should be used to unpin the pinned query bo, or as a query
4354 * barrier when we need to make sure that all queries have finished before the
4355 * next fifo command. (For example on hardware context destructions where the
4356 * hardware may otherwise leak unfinished queries).
4357 *
4358 * This function does not return any failure codes, but make attempts to do safe
4359 * unpinning in case of errors.
4360 *
4361 * The function will synchronize on the previous query barrier, and will thus
4362 * not finish until that barrier has executed.
4363 *
4364 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4365 * calling this function.
4366 */
4367void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4368 struct vmw_fence_obj *fence)
4369{
4370 int ret = 0;
4371 struct vmw_fence_obj *lfence = NULL;
4372 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4373
4374 if (dev_priv->pinned_bo == NULL)
4375 goto out_unlock;
4376
4377 vmw_bo_placement_set(dev_priv->pinned_bo,
4378 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4379 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4380 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4381 if (ret)
4382 goto out_no_reserve;
4383
4384 vmw_bo_placement_set(dev_priv->dummy_query_bo,
4385 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4386 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4387 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4388 if (ret)
4389 goto out_no_reserve;
4390
4391 ret = vmw_validation_bo_reserve(&val_ctx, false);
4392 if (ret)
4393 goto out_no_reserve;
4394
4395 if (dev_priv->query_cid_valid) {
4396 BUG_ON(fence != NULL);
4397 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4398 if (ret)
4399 goto out_no_emit;
4400 dev_priv->query_cid_valid = false;
4401 }
4402
4403 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4404 if (dev_priv->dummy_query_bo_pinned) {
4405 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4406 dev_priv->dummy_query_bo_pinned = false;
4407 }
4408 if (fence == NULL) {
4409 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4410 NULL);
4411 fence = lfence;
4412 }
4413 vmw_validation_bo_fence(&val_ctx, fence);
4414 if (lfence != NULL)
4415 vmw_fence_obj_unreference(&lfence);
4416
4417 vmw_validation_unref_lists(&val_ctx);
4418 vmw_bo_unreference(&dev_priv->pinned_bo);
4419
4420out_unlock:
4421 return;
4422out_no_emit:
4423 vmw_validation_bo_backoff(&val_ctx);
4424out_no_reserve:
4425 vmw_validation_unref_lists(&val_ctx);
4426 vmw_execbuf_unpin_panic(dev_priv);
4427 vmw_bo_unreference(&dev_priv->pinned_bo);
4428}
4429
4430/**
4431 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4432 *
4433 * @dev_priv: The device private structure.
4434 *
4435 * This function should be used to unpin the pinned query bo, or as a query
4436 * barrier when we need to make sure that all queries have finished before the
4437 * next fifo command. (For example on hardware context destructions where the
4438 * hardware may otherwise leak unfinished queries).
4439 *
4440 * This function does not return any failure codes, but make attempts to do safe
4441 * unpinning in case of errors.
4442 *
4443 * The function will synchronize on the previous query barrier, and will thus
4444 * not finish until that barrier has executed.
4445 */
4446void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4447{
4448 mutex_lock(&dev_priv->cmdbuf_mutex);
4449 if (dev_priv->query_cid_valid)
4450 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4451 mutex_unlock(&dev_priv->cmdbuf_mutex);
4452}
4453
4454int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4455 struct drm_file *file_priv)
4456{
4457 struct vmw_private *dev_priv = vmw_priv(dev);
4458 struct drm_vmw_execbuf_arg *arg = data;
4459 int ret;
4460 struct dma_fence *in_fence = NULL;
4461
4462 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4463 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4464
4465 /*
4466 * Extend the ioctl argument while maintaining backwards compatibility:
4467 * We take different code paths depending on the value of arg->version.
4468 *
4469 * Note: The ioctl argument is extended and zeropadded by core DRM.
4470 */
4471 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4472 arg->version == 0)) {
4473 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4474 ret = -EINVAL;
4475 goto mksstats_out;
4476 }
4477
4478 switch (arg->version) {
4479 case 1:
4480 /* For v1 core DRM have extended + zeropadded the data */
4481 arg->context_handle = (uint32_t) -1;
4482 break;
4483 case 2:
4484 default:
4485 /* For v2 and later core DRM would have correctly copied it */
4486 break;
4487 }
4488
4489 /* If imported a fence FD from elsewhere, then wait on it */
4490 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4491 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4492
4493 if (!in_fence) {
4494 VMW_DEBUG_USER("Cannot get imported fence\n");
4495 ret = -EINVAL;
4496 goto mksstats_out;
4497 }
4498
4499 ret = dma_fence_wait(in_fence, true);
4500 if (ret)
4501 goto out;
4502 }
4503
4504 ret = vmw_execbuf_process(file_priv, dev_priv,
4505 (void __user *)(unsigned long)arg->commands,
4506 NULL, arg->command_size, arg->throttle_us,
4507 arg->context_handle,
4508 (void __user *)(unsigned long)arg->fence_rep,
4509 NULL, arg->flags);
4510
4511 if (unlikely(ret != 0))
4512 goto out;
4513
4514 vmw_kms_cursor_post_execbuf(dev_priv);
4515
4516out:
4517 if (in_fence)
4518 dma_fence_put(in_fence);
4519
4520mksstats_out:
4521 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4522 return ret;
4523}