Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_reg.h"
30#include "ttm/ttm_bo_api.h"
31#include "ttm/ttm_placement.h"
32
33static int vmw_cmd_invalid(struct vmw_private *dev_priv,
34 struct vmw_sw_context *sw_context,
35 SVGA3dCmdHeader *header)
36{
37 return capable(CAP_SYS_ADMIN) ? : -EINVAL;
38}
39
40static int vmw_cmd_ok(struct vmw_private *dev_priv,
41 struct vmw_sw_context *sw_context,
42 SVGA3dCmdHeader *header)
43{
44 return 0;
45}
46
47static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
48 struct vmw_sw_context *sw_context,
49 SVGA3dCmdHeader *header)
50{
51 struct vmw_cid_cmd {
52 SVGA3dCmdHeader header;
53 __le32 cid;
54 } *cmd;
55 int ret;
56
57 cmd = container_of(header, struct vmw_cid_cmd, header);
58 if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
59 return 0;
60
61 ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
62 if (unlikely(ret != 0)) {
63 DRM_ERROR("Could not find or use context %u\n",
64 (unsigned) cmd->cid);
65 return ret;
66 }
67
68 sw_context->last_cid = cmd->cid;
69 sw_context->cid_valid = true;
70
71 return 0;
72}
73
74static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
75 struct vmw_sw_context *sw_context,
76 uint32_t *sid)
77{
78 if (*sid == SVGA3D_INVALID_ID)
79 return 0;
80
81 if (unlikely((!sw_context->sid_valid ||
82 *sid != sw_context->last_sid))) {
83 int real_id;
84 int ret = vmw_surface_check(dev_priv, sw_context->tfile,
85 *sid, &real_id);
86
87 if (unlikely(ret != 0)) {
88 DRM_ERROR("Could ot find or use surface 0x%08x "
89 "address 0x%08lx\n",
90 (unsigned int) *sid,
91 (unsigned long) sid);
92 return ret;
93 }
94
95 sw_context->last_sid = *sid;
96 sw_context->sid_valid = true;
97 *sid = real_id;
98 sw_context->sid_translation = real_id;
99 } else
100 *sid = sw_context->sid_translation;
101
102 return 0;
103}
104
105
106static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
107 struct vmw_sw_context *sw_context,
108 SVGA3dCmdHeader *header)
109{
110 struct vmw_sid_cmd {
111 SVGA3dCmdHeader header;
112 SVGA3dCmdSetRenderTarget body;
113 } *cmd;
114 int ret;
115
116 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
117 if (unlikely(ret != 0))
118 return ret;
119
120 cmd = container_of(header, struct vmw_sid_cmd, header);
121 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
122 return ret;
123}
124
125static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
126 struct vmw_sw_context *sw_context,
127 SVGA3dCmdHeader *header)
128{
129 struct vmw_sid_cmd {
130 SVGA3dCmdHeader header;
131 SVGA3dCmdSurfaceCopy body;
132 } *cmd;
133 int ret;
134
135 cmd = container_of(header, struct vmw_sid_cmd, header);
136 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
137 if (unlikely(ret != 0))
138 return ret;
139 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
140}
141
142static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
143 struct vmw_sw_context *sw_context,
144 SVGA3dCmdHeader *header)
145{
146 struct vmw_sid_cmd {
147 SVGA3dCmdHeader header;
148 SVGA3dCmdSurfaceStretchBlt body;
149 } *cmd;
150 int ret;
151
152 cmd = container_of(header, struct vmw_sid_cmd, header);
153 ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
154 if (unlikely(ret != 0))
155 return ret;
156 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
157}
158
159static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
160 struct vmw_sw_context *sw_context,
161 SVGA3dCmdHeader *header)
162{
163 struct vmw_sid_cmd {
164 SVGA3dCmdHeader header;
165 SVGA3dCmdBlitSurfaceToScreen body;
166 } *cmd;
167
168 cmd = container_of(header, struct vmw_sid_cmd, header);
169 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
170}
171
172static int vmw_cmd_present_check(struct vmw_private *dev_priv,
173 struct vmw_sw_context *sw_context,
174 SVGA3dCmdHeader *header)
175{
176 struct vmw_sid_cmd {
177 SVGA3dCmdHeader header;
178 SVGA3dCmdPresent body;
179 } *cmd;
180
181 cmd = container_of(header, struct vmw_sid_cmd, header);
182 return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
183}
184
185static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
186 struct vmw_sw_context *sw_context,
187 SVGAGuestPtr *ptr,
188 struct vmw_dma_buffer **vmw_bo_p)
189{
190 struct vmw_dma_buffer *vmw_bo = NULL;
191 struct ttm_buffer_object *bo;
192 uint32_t handle = ptr->gmrId;
193 struct vmw_relocation *reloc;
194 uint32_t cur_validate_node;
195 struct ttm_validate_buffer *val_buf;
196 int ret;
197
198 ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
199 if (unlikely(ret != 0)) {
200 DRM_ERROR("Could not find or use GMR region.\n");
201 return -EINVAL;
202 }
203 bo = &vmw_bo->base;
204
205 if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
206 DRM_ERROR("Max number relocations per submission"
207 " exceeded\n");
208 ret = -EINVAL;
209 goto out_no_reloc;
210 }
211
212 reloc = &sw_context->relocs[sw_context->cur_reloc++];
213 reloc->location = ptr;
214
215 cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
216 if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
217 DRM_ERROR("Max number of DMA buffers per submission"
218 " exceeded.\n");
219 ret = -EINVAL;
220 goto out_no_reloc;
221 }
222
223 reloc->index = cur_validate_node;
224 if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
225 val_buf = &sw_context->val_bufs[cur_validate_node];
226 val_buf->bo = ttm_bo_reference(bo);
227 val_buf->new_sync_obj_arg = (void *) dev_priv;
228 list_add_tail(&val_buf->head, &sw_context->validate_nodes);
229 ++sw_context->cur_val_buf;
230 }
231 *vmw_bo_p = vmw_bo;
232 return 0;
233
234out_no_reloc:
235 vmw_dmabuf_unreference(&vmw_bo);
236 vmw_bo_p = NULL;
237 return ret;
238}
239
240static int vmw_cmd_end_query(struct vmw_private *dev_priv,
241 struct vmw_sw_context *sw_context,
242 SVGA3dCmdHeader *header)
243{
244 struct vmw_dma_buffer *vmw_bo;
245 struct vmw_query_cmd {
246 SVGA3dCmdHeader header;
247 SVGA3dCmdEndQuery q;
248 } *cmd;
249 int ret;
250
251 cmd = container_of(header, struct vmw_query_cmd, header);
252 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
253 if (unlikely(ret != 0))
254 return ret;
255
256 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
257 &cmd->q.guestResult,
258 &vmw_bo);
259 if (unlikely(ret != 0))
260 return ret;
261
262 vmw_dmabuf_unreference(&vmw_bo);
263 return 0;
264}
265
266static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
267 struct vmw_sw_context *sw_context,
268 SVGA3dCmdHeader *header)
269{
270 struct vmw_dma_buffer *vmw_bo;
271 struct vmw_query_cmd {
272 SVGA3dCmdHeader header;
273 SVGA3dCmdWaitForQuery q;
274 } *cmd;
275 int ret;
276
277 cmd = container_of(header, struct vmw_query_cmd, header);
278 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
279 if (unlikely(ret != 0))
280 return ret;
281
282 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
283 &cmd->q.guestResult,
284 &vmw_bo);
285 if (unlikely(ret != 0))
286 return ret;
287
288 vmw_dmabuf_unreference(&vmw_bo);
289 return 0;
290}
291
292
293static int vmw_cmd_dma(struct vmw_private *dev_priv,
294 struct vmw_sw_context *sw_context,
295 SVGA3dCmdHeader *header)
296{
297 struct vmw_dma_buffer *vmw_bo = NULL;
298 struct ttm_buffer_object *bo;
299 struct vmw_surface *srf = NULL;
300 struct vmw_dma_cmd {
301 SVGA3dCmdHeader header;
302 SVGA3dCmdSurfaceDMA dma;
303 } *cmd;
304 int ret;
305
306 cmd = container_of(header, struct vmw_dma_cmd, header);
307 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
308 &cmd->dma.guest.ptr,
309 &vmw_bo);
310 if (unlikely(ret != 0))
311 return ret;
312
313 bo = &vmw_bo->base;
314 ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
315 cmd->dma.host.sid, &srf);
316 if (ret) {
317 DRM_ERROR("could not find surface\n");
318 goto out_no_reloc;
319 }
320
321 /**
322 * Patch command stream with device SID.
323 */
324
325 cmd->dma.host.sid = srf->res.id;
326 vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
327 /**
328 * FIXME: May deadlock here when called from the
329 * command parsing code.
330 */
331 vmw_surface_unreference(&srf);
332
333out_no_reloc:
334 vmw_dmabuf_unreference(&vmw_bo);
335 return ret;
336}
337
338static int vmw_cmd_draw(struct vmw_private *dev_priv,
339 struct vmw_sw_context *sw_context,
340 SVGA3dCmdHeader *header)
341{
342 struct vmw_draw_cmd {
343 SVGA3dCmdHeader header;
344 SVGA3dCmdDrawPrimitives body;
345 } *cmd;
346 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
347 (unsigned long)header + sizeof(*cmd));
348 SVGA3dPrimitiveRange *range;
349 uint32_t i;
350 uint32_t maxnum;
351 int ret;
352
353 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
354 if (unlikely(ret != 0))
355 return ret;
356
357 cmd = container_of(header, struct vmw_draw_cmd, header);
358 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
359
360 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
361 DRM_ERROR("Illegal number of vertex declarations.\n");
362 return -EINVAL;
363 }
364
365 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
366 ret = vmw_cmd_sid_check(dev_priv, sw_context,
367 &decl->array.surfaceId);
368 if (unlikely(ret != 0))
369 return ret;
370 }
371
372 maxnum = (header->size - sizeof(cmd->body) -
373 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
374 if (unlikely(cmd->body.numRanges > maxnum)) {
375 DRM_ERROR("Illegal number of index ranges.\n");
376 return -EINVAL;
377 }
378
379 range = (SVGA3dPrimitiveRange *) decl;
380 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
381 ret = vmw_cmd_sid_check(dev_priv, sw_context,
382 &range->indexArray.surfaceId);
383 if (unlikely(ret != 0))
384 return ret;
385 }
386 return 0;
387}
388
389
390static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
391 struct vmw_sw_context *sw_context,
392 SVGA3dCmdHeader *header)
393{
394 struct vmw_tex_state_cmd {
395 SVGA3dCmdHeader header;
396 SVGA3dCmdSetTextureState state;
397 };
398
399 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
400 ((unsigned long) header + header->size + sizeof(header));
401 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
402 ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
403 int ret;
404
405 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
406 if (unlikely(ret != 0))
407 return ret;
408
409 for (; cur_state < last_state; ++cur_state) {
410 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
411 continue;
412
413 ret = vmw_cmd_sid_check(dev_priv, sw_context,
414 &cur_state->value);
415 if (unlikely(ret != 0))
416 return ret;
417 }
418
419 return 0;
420}
421
422
423typedef int (*vmw_cmd_func) (struct vmw_private *,
424 struct vmw_sw_context *,
425 SVGA3dCmdHeader *);
426
427#define VMW_CMD_DEF(cmd, func) \
428 [cmd - SVGA_3D_CMD_BASE] = func
429
430static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
431 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
432 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
433 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
434 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
435 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
436 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
437 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
438 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
439 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
440 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
441 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
442 &vmw_cmd_set_render_target_check),
443 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
444 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
445 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
446 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
447 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
448 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
449 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
450 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
451 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
452 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
453 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
454 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
455 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
456 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
457 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
458 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
459 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
460 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
461 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
462 &vmw_cmd_blt_surf_screen_check)
463};
464
465static int vmw_cmd_check(struct vmw_private *dev_priv,
466 struct vmw_sw_context *sw_context,
467 void *buf, uint32_t *size)
468{
469 uint32_t cmd_id;
470 uint32_t size_remaining = *size;
471 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
472 int ret;
473
474 cmd_id = ((uint32_t *)buf)[0];
475 if (cmd_id == SVGA_CMD_UPDATE) {
476 *size = 5 << 2;
477 return 0;
478 }
479
480 cmd_id = le32_to_cpu(header->id);
481 *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
482
483 cmd_id -= SVGA_3D_CMD_BASE;
484 if (unlikely(*size > size_remaining))
485 goto out_err;
486
487 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
488 goto out_err;
489
490 ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
491 if (unlikely(ret != 0))
492 goto out_err;
493
494 return 0;
495out_err:
496 DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
497 cmd_id + SVGA_3D_CMD_BASE);
498 return -EINVAL;
499}
500
501static int vmw_cmd_check_all(struct vmw_private *dev_priv,
502 struct vmw_sw_context *sw_context,
503 void *buf, uint32_t size)
504{
505 int32_t cur_size = size;
506 int ret;
507
508 while (cur_size > 0) {
509 size = cur_size;
510 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
511 if (unlikely(ret != 0))
512 return ret;
513 buf = (void *)((unsigned long) buf + size);
514 cur_size -= size;
515 }
516
517 if (unlikely(cur_size != 0)) {
518 DRM_ERROR("Command verifier out of sync.\n");
519 return -EINVAL;
520 }
521
522 return 0;
523}
524
525static void vmw_free_relocations(struct vmw_sw_context *sw_context)
526{
527 sw_context->cur_reloc = 0;
528}
529
530static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
531{
532 uint32_t i;
533 struct vmw_relocation *reloc;
534 struct ttm_validate_buffer *validate;
535 struct ttm_buffer_object *bo;
536
537 for (i = 0; i < sw_context->cur_reloc; ++i) {
538 reloc = &sw_context->relocs[i];
539 validate = &sw_context->val_bufs[reloc->index];
540 bo = validate->bo;
541 if (bo->mem.mem_type == TTM_PL_VRAM) {
542 reloc->location->offset += bo->offset;
543 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
544 } else
545 reloc->location->gmrId = bo->mem.start;
546 }
547 vmw_free_relocations(sw_context);
548}
549
550static void vmw_clear_validations(struct vmw_sw_context *sw_context)
551{
552 struct ttm_validate_buffer *entry, *next;
553
554 list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
555 head) {
556 list_del(&entry->head);
557 vmw_dmabuf_validate_clear(entry->bo);
558 ttm_bo_unref(&entry->bo);
559 sw_context->cur_val_buf--;
560 }
561 BUG_ON(sw_context->cur_val_buf != 0);
562}
563
564static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
565 struct ttm_buffer_object *bo)
566{
567 int ret;
568
569 /**
570 * Put BO in VRAM if there is space, otherwise as a GMR.
571 * If there is no space in VRAM and GMR ids are all used up,
572 * start evicting GMRs to make room. If the DMA buffer can't be
573 * used as a GMR, this will return -ENOMEM.
574 */
575
576 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
577 if (likely(ret == 0 || ret == -ERESTARTSYS))
578 return ret;
579
580 /**
581 * If that failed, try VRAM again, this time evicting
582 * previous contents.
583 */
584
585 DRM_INFO("Falling through to VRAM.\n");
586 ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
587 return ret;
588}
589
590
591static int vmw_validate_buffers(struct vmw_private *dev_priv,
592 struct vmw_sw_context *sw_context)
593{
594 struct ttm_validate_buffer *entry;
595 int ret;
596
597 list_for_each_entry(entry, &sw_context->validate_nodes, head) {
598 ret = vmw_validate_single_buffer(dev_priv, entry->bo);
599 if (unlikely(ret != 0))
600 return ret;
601 }
602 return 0;
603}
604
605int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
606 struct drm_file *file_priv)
607{
608 struct vmw_private *dev_priv = vmw_priv(dev);
609 struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
610 struct drm_vmw_fence_rep fence_rep;
611 struct drm_vmw_fence_rep __user *user_fence_rep;
612 int ret;
613 void *user_cmd;
614 void *cmd;
615 uint32_t sequence;
616 struct vmw_sw_context *sw_context = &dev_priv->ctx;
617 struct vmw_master *vmaster = vmw_master(file_priv->master);
618
619 ret = ttm_read_lock(&vmaster->lock, true);
620 if (unlikely(ret != 0))
621 return ret;
622
623 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
624 if (unlikely(ret != 0)) {
625 ret = -ERESTARTSYS;
626 goto out_no_cmd_mutex;
627 }
628
629 cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
630 if (unlikely(cmd == NULL)) {
631 DRM_ERROR("Failed reserving fifo space for commands.\n");
632 ret = -ENOMEM;
633 goto out_unlock;
634 }
635
636 user_cmd = (void __user *)(unsigned long)arg->commands;
637 ret = copy_from_user(cmd, user_cmd, arg->command_size);
638
639 if (unlikely(ret != 0)) {
640 ret = -EFAULT;
641 DRM_ERROR("Failed copying commands.\n");
642 goto out_commit;
643 }
644
645 sw_context->tfile = vmw_fpriv(file_priv)->tfile;
646 sw_context->cid_valid = false;
647 sw_context->sid_valid = false;
648 sw_context->cur_reloc = 0;
649 sw_context->cur_val_buf = 0;
650
651 INIT_LIST_HEAD(&sw_context->validate_nodes);
652
653 ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
654 if (unlikely(ret != 0))
655 goto out_err;
656 ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
657 if (unlikely(ret != 0))
658 goto out_err;
659
660 ret = vmw_validate_buffers(dev_priv, sw_context);
661 if (unlikely(ret != 0))
662 goto out_err;
663
664 vmw_apply_relocations(sw_context);
665
666 if (arg->throttle_us) {
667 ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
668 arg->throttle_us);
669
670 if (unlikely(ret != 0))
671 goto out_err;
672 }
673
674 vmw_fifo_commit(dev_priv, arg->command_size);
675
676 ret = vmw_fifo_send_fence(dev_priv, &sequence);
677
678 ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
679 (void *)(unsigned long) sequence);
680 vmw_clear_validations(sw_context);
681 mutex_unlock(&dev_priv->cmdbuf_mutex);
682
683 /*
684 * This error is harmless, because if fence submission fails,
685 * vmw_fifo_send_fence will sync.
686 */
687
688 if (ret != 0)
689 DRM_ERROR("Fence submission error. Syncing.\n");
690
691 fence_rep.error = ret;
692 fence_rep.fence_seq = (uint64_t) sequence;
693 fence_rep.pad64 = 0;
694
695 user_fence_rep = (struct drm_vmw_fence_rep __user *)
696 (unsigned long)arg->fence_rep;
697
698 /*
699 * copy_to_user errors will be detected by user space not
700 * seeing fence_rep::error filled in.
701 */
702
703 ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
704
705 vmw_kms_cursor_post_execbuf(dev_priv);
706 ttm_read_unlock(&vmaster->lock);
707 return 0;
708out_err:
709 vmw_free_relocations(sw_context);
710 ttm_eu_backoff_reservation(&sw_context->validate_nodes);
711 vmw_clear_validations(sw_context);
712out_commit:
713 vmw_fifo_commit(dev_priv, 0);
714out_unlock:
715 mutex_unlock(&dev_priv->cmdbuf_mutex);
716out_no_cmd_mutex:
717 ttm_read_unlock(&vmaster->lock);
718 return ret;
719}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27#include <linux/sync_file.h>
28#include <linux/hashtable.h>
29
30#include "vmwgfx_drv.h"
31#include "vmwgfx_reg.h"
32#include <drm/ttm/ttm_bo_api.h>
33#include <drm/ttm/ttm_placement.h>
34#include "vmwgfx_so.h"
35#include "vmwgfx_binding.h"
36#include "vmwgfx_mksstat.h"
37
38
39/*
40 * Helper macro to get dx_ctx_node if available otherwise print an error
41 * message. This is for use in command verifier function where if dx_ctx_node
42 * is not set then command is invalid.
43 */
44#define VMW_GET_CTX_NODE(__sw_context) \
45({ \
46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
47 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
48 __sw_context->dx_ctx_node; \
49 }); \
50})
51
52#define VMW_DECLARE_CMD_VAR(__var, __type) \
53 struct { \
54 SVGA3dCmdHeader header; \
55 __type body; \
56 } __var
57
58/**
59 * struct vmw_relocation - Buffer object relocation
60 *
61 * @head: List head for the command submission context's relocation list
62 * @vbo: Non ref-counted pointer to buffer object
63 * @mob_loc: Pointer to location for mob id to be modified
64 * @location: Pointer to location for guest pointer to be modified
65 */
66struct vmw_relocation {
67 struct list_head head;
68 struct vmw_buffer_object *vbo;
69 union {
70 SVGAMobId *mob_loc;
71 SVGAGuestPtr *location;
72 };
73};
74
75/**
76 * enum vmw_resource_relocation_type - Relocation type for resources
77 *
78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79 * command stream is replaced with the actual id after validation.
80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
81 * with a NOP.
82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 * @vmw_res_rel_max: Last value in the enum - used for error checking
85*/
86enum vmw_resource_relocation_type {
87 vmw_res_rel_normal,
88 vmw_res_rel_nop,
89 vmw_res_rel_cond_nop,
90 vmw_res_rel_max
91};
92
93/**
94 * struct vmw_resource_relocation - Relocation info for resources
95 *
96 * @head: List head for the software context's relocation list.
97 * @res: Non-ref-counted pointer to the resource.
98 * @offset: Offset of single byte entries into the command buffer where the id
99 * that needs fixup is located.
100 * @rel_type: Type of relocation.
101 */
102struct vmw_resource_relocation {
103 struct list_head head;
104 const struct vmw_resource *res;
105 u32 offset:29;
106 enum vmw_resource_relocation_type rel_type:3;
107};
108
109/**
110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
111 *
112 * @head: List head of context list
113 * @ctx: The context resource
114 * @cur: The context's persistent binding state
115 * @staged: The binding state changes of this command buffer
116 */
117struct vmw_ctx_validation_info {
118 struct list_head head;
119 struct vmw_resource *ctx;
120 struct vmw_ctx_binding_state *cur;
121 struct vmw_ctx_binding_state *staged;
122};
123
124/**
125 * struct vmw_cmd_entry - Describe a command for the verifier
126 *
127 * @func: Call-back to handle the command.
128 * @user_allow: Whether allowed from the execbuf ioctl.
129 * @gb_disable: Whether disabled if guest-backed objects are available.
130 * @gb_enable: Whether enabled iff guest-backed objects are available.
131 * @cmd_name: Name of the command.
132 */
133struct vmw_cmd_entry {
134 int (*func) (struct vmw_private *, struct vmw_sw_context *,
135 SVGA3dCmdHeader *);
136 bool user_allow;
137 bool gb_disable;
138 bool gb_enable;
139 const char *cmd_name;
140};
141
142#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 (_gb_disable), (_gb_enable), #_cmd}
145
146static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 struct vmw_sw_context *sw_context,
148 struct vmw_resource *ctx);
149static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 struct vmw_sw_context *sw_context,
151 SVGAMobId *id,
152 struct vmw_buffer_object **vmw_bo_p);
153/**
154 * vmw_ptr_diff - Compute the offset from a to b in bytes
155 *
156 * @a: A starting pointer.
157 * @b: A pointer offset in the same address space.
158 *
159 * Returns: The offset in bytes between the two pointers.
160 */
161static size_t vmw_ptr_diff(void *a, void *b)
162{
163 return (unsigned long) b - (unsigned long) a;
164}
165
166/**
167 * vmw_execbuf_bindings_commit - Commit modified binding state
168 *
169 * @sw_context: The command submission context
170 * @backoff: Whether this is part of the error path and binding state changes
171 * should be ignored
172 */
173static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
174 bool backoff)
175{
176 struct vmw_ctx_validation_info *entry;
177
178 list_for_each_entry(entry, &sw_context->ctx_list, head) {
179 if (!backoff)
180 vmw_binding_state_commit(entry->cur, entry->staged);
181
182 if (entry->staged != sw_context->staged_bindings)
183 vmw_binding_state_free(entry->staged);
184 else
185 sw_context->staged_bindings_inuse = false;
186 }
187
188 /* List entries are freed with the validation context */
189 INIT_LIST_HEAD(&sw_context->ctx_list);
190}
191
192/**
193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
194 *
195 * @sw_context: The command submission context
196 */
197static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
198{
199 if (sw_context->dx_query_mob)
200 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 sw_context->dx_query_mob);
202}
203
204/**
205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
206 * the validate list.
207 *
208 * @dev_priv: Pointer to the device private:
209 * @sw_context: The command submission context
210 * @res: Pointer to the resource
211 * @node: The validation node holding the context resource metadata
212 */
213static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 struct vmw_sw_context *sw_context,
215 struct vmw_resource *res,
216 struct vmw_ctx_validation_info *node)
217{
218 int ret;
219
220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 if (unlikely(ret != 0))
222 goto out_err;
223
224 if (!sw_context->staged_bindings) {
225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 if (IS_ERR(sw_context->staged_bindings)) {
227 ret = PTR_ERR(sw_context->staged_bindings);
228 sw_context->staged_bindings = NULL;
229 goto out_err;
230 }
231 }
232
233 if (sw_context->staged_bindings_inuse) {
234 node->staged = vmw_binding_state_alloc(dev_priv);
235 if (IS_ERR(node->staged)) {
236 ret = PTR_ERR(node->staged);
237 node->staged = NULL;
238 goto out_err;
239 }
240 } else {
241 node->staged = sw_context->staged_bindings;
242 sw_context->staged_bindings_inuse = true;
243 }
244
245 node->ctx = res;
246 node->cur = vmw_context_binding_state(res);
247 list_add_tail(&node->head, &sw_context->ctx_list);
248
249 return 0;
250
251out_err:
252 return ret;
253}
254
255/**
256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
257 *
258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
260 *
261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
264 *
265 * Returns: The extra size requirement based on resource type.
266 */
267static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 enum vmw_res_type res_type)
269{
270 return (res_type == vmw_res_dx_context ||
271 (res_type == vmw_res_context && dev_priv->has_mob)) ?
272 sizeof(struct vmw_ctx_validation_info) : 0;
273}
274
275/**
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
277 *
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
280 * @private: Pointer to the execbuf-private space in the resource validation
281 * node.
282 */
283static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 struct vmw_resource *res,
285 void *private)
286{
287 rcache->res = res;
288 rcache->private = private;
289 rcache->valid = 1;
290 rcache->valid_handle = 0;
291}
292
293enum vmw_val_add_flags {
294 vmw_val_add_flag_none = 0,
295 vmw_val_add_flag_noctx = 1 << 0,
296};
297
298/**
299 * vmw_execbuf_res_val_add - Add a resource to the validation list.
300 *
301 * @sw_context: Pointer to the software context.
302 * @res: Unreferenced rcu-protected pointer to the resource.
303 * @dirty: Whether to change dirty status.
304 * @flags: specifies whether to use the context or not
305 *
306 * Returns: 0 on success. Negative error code on failure. Typical error codes
307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
308 */
309static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
310 struct vmw_resource *res,
311 u32 dirty,
312 u32 flags)
313{
314 struct vmw_private *dev_priv = res->dev_priv;
315 int ret;
316 enum vmw_res_type res_type = vmw_res_type(res);
317 struct vmw_res_cache_entry *rcache;
318 struct vmw_ctx_validation_info *ctx_info;
319 bool first_usage;
320 unsigned int priv_size;
321
322 rcache = &sw_context->res_cache[res_type];
323 if (likely(rcache->valid && rcache->res == res)) {
324 if (dirty)
325 vmw_validation_res_set_dirty(sw_context->ctx,
326 rcache->private, dirty);
327 return 0;
328 }
329
330 if ((flags & vmw_val_add_flag_noctx) != 0) {
331 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
332 (void **)&ctx_info, NULL);
333 if (ret)
334 return ret;
335
336 } else {
337 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
338 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
339 dirty, (void **)&ctx_info,
340 &first_usage);
341 if (ret)
342 return ret;
343
344 if (priv_size && first_usage) {
345 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
346 ctx_info);
347 if (ret) {
348 VMW_DEBUG_USER("Failed first usage context setup.\n");
349 return ret;
350 }
351 }
352 }
353
354 vmw_execbuf_rcache_update(rcache, res, ctx_info);
355 return 0;
356}
357
358/**
359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
360 * validation list
361 *
362 * @sw_context: The software context holding the validation list.
363 * @view: Pointer to the view resource.
364 *
365 * Returns 0 if success, negative error code otherwise.
366 */
367static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
368 struct vmw_resource *view)
369{
370 int ret;
371
372 /*
373 * First add the resource the view is pointing to, otherwise it may be
374 * swapped out when the view is validated.
375 */
376 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
377 vmw_view_dirtying(view), vmw_val_add_flag_noctx);
378 if (ret)
379 return ret;
380
381 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
382 vmw_val_add_flag_noctx);
383}
384
385/**
386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
387 * to to the validation list.
388 *
389 * @sw_context: The software context holding the validation list.
390 * @view_type: The view type to look up.
391 * @id: view id of the view.
392 *
393 * The view is represented by a view id and the DX context it's created on, or
394 * scheduled for creation on. If there is no DX context set, the function will
395 * return an -EINVAL error pointer.
396 *
397 * Returns: Unreferenced pointer to the resource on success, negative error
398 * pointer on failure.
399 */
400static struct vmw_resource *
401vmw_view_id_val_add(struct vmw_sw_context *sw_context,
402 enum vmw_view_type view_type, u32 id)
403{
404 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
405 struct vmw_resource *view;
406 int ret;
407
408 if (!ctx_node)
409 return ERR_PTR(-EINVAL);
410
411 view = vmw_view_lookup(sw_context->man, view_type, id);
412 if (IS_ERR(view))
413 return view;
414
415 ret = vmw_view_res_val_add(sw_context, view);
416 if (ret)
417 return ERR_PTR(ret);
418
419 return view;
420}
421
422/**
423 * vmw_resource_context_res_add - Put resources previously bound to a context on
424 * the validation list
425 *
426 * @dev_priv: Pointer to a device private structure
427 * @sw_context: Pointer to a software context used for this command submission
428 * @ctx: Pointer to the context resource
429 *
430 * This function puts all resources that were previously bound to @ctx on the
431 * resource validation list. This is part of the context state reemission
432 */
433static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
434 struct vmw_sw_context *sw_context,
435 struct vmw_resource *ctx)
436{
437 struct list_head *binding_list;
438 struct vmw_ctx_bindinfo *entry;
439 int ret = 0;
440 struct vmw_resource *res;
441 u32 i;
442 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
443 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
444
445 /* Add all cotables to the validation list. */
446 if (has_sm4_context(dev_priv) &&
447 vmw_res_type(ctx) == vmw_res_dx_context) {
448 for (i = 0; i < cotable_max; ++i) {
449 res = vmw_context_cotable(ctx, i);
450 if (IS_ERR(res))
451 continue;
452
453 ret = vmw_execbuf_res_val_add(sw_context, res,
454 VMW_RES_DIRTY_SET,
455 vmw_val_add_flag_noctx);
456 if (unlikely(ret != 0))
457 return ret;
458 }
459 }
460
461 /* Add all resources bound to the context to the validation list */
462 mutex_lock(&dev_priv->binding_mutex);
463 binding_list = vmw_context_binding_list(ctx);
464
465 list_for_each_entry(entry, binding_list, ctx_list) {
466 if (vmw_res_type(entry->res) == vmw_res_view)
467 ret = vmw_view_res_val_add(sw_context, entry->res);
468 else
469 ret = vmw_execbuf_res_val_add(sw_context, entry->res,
470 vmw_binding_dirtying(entry->bt),
471 vmw_val_add_flag_noctx);
472 if (unlikely(ret != 0))
473 break;
474 }
475
476 if (has_sm4_context(dev_priv) &&
477 vmw_res_type(ctx) == vmw_res_dx_context) {
478 struct vmw_buffer_object *dx_query_mob;
479
480 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
481 if (dx_query_mob)
482 ret = vmw_validation_add_bo(sw_context->ctx,
483 dx_query_mob, true, false);
484 }
485
486 mutex_unlock(&dev_priv->binding_mutex);
487 return ret;
488}
489
490/**
491 * vmw_resource_relocation_add - Add a relocation to the relocation list
492 *
493 * @sw_context: Pointer to the software context.
494 * @res: The resource.
495 * @offset: Offset into the command buffer currently being parsed where the id
496 * that needs fixup is located. Granularity is one byte.
497 * @rel_type: Relocation type.
498 */
499static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
500 const struct vmw_resource *res,
501 unsigned long offset,
502 enum vmw_resource_relocation_type
503 rel_type)
504{
505 struct vmw_resource_relocation *rel;
506
507 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
508 if (unlikely(!rel)) {
509 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
510 return -ENOMEM;
511 }
512
513 rel->res = res;
514 rel->offset = offset;
515 rel->rel_type = rel_type;
516 list_add_tail(&rel->head, &sw_context->res_relocations);
517
518 return 0;
519}
520
521/**
522 * vmw_resource_relocations_free - Free all relocations on a list
523 *
524 * @list: Pointer to the head of the relocation list
525 */
526static void vmw_resource_relocations_free(struct list_head *list)
527{
528 /* Memory is validation context memory, so no need to free it */
529 INIT_LIST_HEAD(list);
530}
531
532/**
533 * vmw_resource_relocations_apply - Apply all relocations on a list
534 *
535 * @cb: Pointer to the start of the command buffer bein patch. This need not be
536 * the same buffer as the one being parsed when the relocation list was built,
537 * but the contents must be the same modulo the resource ids.
538 * @list: Pointer to the head of the relocation list.
539 */
540static void vmw_resource_relocations_apply(uint32_t *cb,
541 struct list_head *list)
542{
543 struct vmw_resource_relocation *rel;
544
545 /* Validate the struct vmw_resource_relocation member size */
546 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
547 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
548
549 list_for_each_entry(rel, list, head) {
550 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
551 switch (rel->rel_type) {
552 case vmw_res_rel_normal:
553 *addr = rel->res->id;
554 break;
555 case vmw_res_rel_nop:
556 *addr = SVGA_3D_CMD_NOP;
557 break;
558 default:
559 if (rel->res->id == -1)
560 *addr = SVGA_3D_CMD_NOP;
561 break;
562 }
563 }
564}
565
566static int vmw_cmd_invalid(struct vmw_private *dev_priv,
567 struct vmw_sw_context *sw_context,
568 SVGA3dCmdHeader *header)
569{
570 return -EINVAL;
571}
572
573static int vmw_cmd_ok(struct vmw_private *dev_priv,
574 struct vmw_sw_context *sw_context,
575 SVGA3dCmdHeader *header)
576{
577 return 0;
578}
579
580/**
581 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
582 * list.
583 *
584 * @sw_context: Pointer to the software context.
585 *
586 * Note that since vmware's command submission currently is protected by the
587 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
588 * only a single thread at once will attempt this.
589 */
590static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
591{
592 int ret;
593
594 ret = vmw_validation_res_reserve(sw_context->ctx, true);
595 if (ret)
596 return ret;
597
598 if (sw_context->dx_query_mob) {
599 struct vmw_buffer_object *expected_dx_query_mob;
600
601 expected_dx_query_mob =
602 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
603 if (expected_dx_query_mob &&
604 expected_dx_query_mob != sw_context->dx_query_mob) {
605 ret = -EINVAL;
606 }
607 }
608
609 return ret;
610}
611
612/**
613 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
614 * resource validate list unless it's already there.
615 *
616 * @dev_priv: Pointer to a device private structure.
617 * @sw_context: Pointer to the software context.
618 * @res_type: Resource type.
619 * @dirty: Whether to change dirty status.
620 * @converter: User-space visisble type specific information.
621 * @id_loc: Pointer to the location in the command buffer currently being parsed
622 * from where the user-space resource id handle is located.
623 * @p_res: Pointer to pointer to resource validalidation node. Populated on
624 * exit.
625 */
626static int
627vmw_cmd_res_check(struct vmw_private *dev_priv,
628 struct vmw_sw_context *sw_context,
629 enum vmw_res_type res_type,
630 u32 dirty,
631 const struct vmw_user_resource_conv *converter,
632 uint32_t *id_loc,
633 struct vmw_resource **p_res)
634{
635 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
636 struct vmw_resource *res;
637 int ret = 0;
638 bool needs_unref = false;
639
640 if (p_res)
641 *p_res = NULL;
642
643 if (*id_loc == SVGA3D_INVALID_ID) {
644 if (res_type == vmw_res_context) {
645 VMW_DEBUG_USER("Illegal context invalid id.\n");
646 return -EINVAL;
647 }
648 return 0;
649 }
650
651 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
652 res = rcache->res;
653 if (dirty)
654 vmw_validation_res_set_dirty(sw_context->ctx,
655 rcache->private, dirty);
656 } else {
657 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
658
659 ret = vmw_validation_preload_res(sw_context->ctx, size);
660 if (ret)
661 return ret;
662
663 ret = vmw_user_resource_lookup_handle
664 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
665 if (ret != 0) {
666 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
667 (unsigned int) *id_loc);
668 return ret;
669 }
670 needs_unref = true;
671
672 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
673 if (unlikely(ret != 0))
674 goto res_check_done;
675
676 if (rcache->valid && rcache->res == res) {
677 rcache->valid_handle = true;
678 rcache->handle = *id_loc;
679 }
680 }
681
682 ret = vmw_resource_relocation_add(sw_context, res,
683 vmw_ptr_diff(sw_context->buf_start,
684 id_loc),
685 vmw_res_rel_normal);
686 if (p_res)
687 *p_res = res;
688
689res_check_done:
690 if (needs_unref)
691 vmw_resource_unreference(&res);
692
693 return ret;
694}
695
696/**
697 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
698 *
699 * @ctx_res: context the query belongs to
700 *
701 * This function assumes binding_mutex is held.
702 */
703static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
704{
705 struct vmw_private *dev_priv = ctx_res->dev_priv;
706 struct vmw_buffer_object *dx_query_mob;
707 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
708
709 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
710
711 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
712 return 0;
713
714 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
715 if (cmd == NULL)
716 return -ENOMEM;
717
718 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
719 cmd->header.size = sizeof(cmd->body);
720 cmd->body.cid = ctx_res->id;
721 cmd->body.mobid = dx_query_mob->base.resource->start;
722 vmw_cmd_commit(dev_priv, sizeof(*cmd));
723
724 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
725
726 return 0;
727}
728
729/**
730 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
731 * contexts.
732 *
733 * @sw_context: Pointer to the software context.
734 *
735 * Rebind context binding points that have been scrubbed because of eviction.
736 */
737static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
738{
739 struct vmw_ctx_validation_info *val;
740 int ret;
741
742 list_for_each_entry(val, &sw_context->ctx_list, head) {
743 ret = vmw_binding_rebind_all(val->cur);
744 if (unlikely(ret != 0)) {
745 if (ret != -ERESTARTSYS)
746 VMW_DEBUG_USER("Failed to rebind context.\n");
747 return ret;
748 }
749
750 ret = vmw_rebind_all_dx_query(val->ctx);
751 if (ret != 0) {
752 VMW_DEBUG_USER("Failed to rebind queries.\n");
753 return ret;
754 }
755 }
756
757 return 0;
758}
759
760/**
761 * vmw_view_bindings_add - Add an array of view bindings to a context binding
762 * state tracker.
763 *
764 * @sw_context: The execbuf state used for this command.
765 * @view_type: View type for the bindings.
766 * @binding_type: Binding type for the bindings.
767 * @shader_slot: The shader slot to user for the bindings.
768 * @view_ids: Array of view ids to be bound.
769 * @num_views: Number of view ids in @view_ids.
770 * @first_slot: The binding slot to be used for the first view id in @view_ids.
771 */
772static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
773 enum vmw_view_type view_type,
774 enum vmw_ctx_binding_type binding_type,
775 uint32 shader_slot,
776 uint32 view_ids[], u32 num_views,
777 u32 first_slot)
778{
779 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
780 u32 i;
781
782 if (!ctx_node)
783 return -EINVAL;
784
785 for (i = 0; i < num_views; ++i) {
786 struct vmw_ctx_bindinfo_view binding;
787 struct vmw_resource *view = NULL;
788
789 if (view_ids[i] != SVGA3D_INVALID_ID) {
790 view = vmw_view_id_val_add(sw_context, view_type,
791 view_ids[i]);
792 if (IS_ERR(view)) {
793 VMW_DEBUG_USER("View not found.\n");
794 return PTR_ERR(view);
795 }
796 }
797 binding.bi.ctx = ctx_node->ctx;
798 binding.bi.res = view;
799 binding.bi.bt = binding_type;
800 binding.shader_slot = shader_slot;
801 binding.slot = first_slot + i;
802 vmw_binding_add(ctx_node->staged, &binding.bi,
803 shader_slot, binding.slot);
804 }
805
806 return 0;
807}
808
809/**
810 * vmw_cmd_cid_check - Check a command header for valid context information.
811 *
812 * @dev_priv: Pointer to a device private structure.
813 * @sw_context: Pointer to the software context.
814 * @header: A command header with an embedded user-space context handle.
815 *
816 * Convenience function: Call vmw_cmd_res_check with the user-space context
817 * handle embedded in @header.
818 */
819static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
820 struct vmw_sw_context *sw_context,
821 SVGA3dCmdHeader *header)
822{
823 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
824 container_of(header, typeof(*cmd), header);
825
826 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
827 VMW_RES_DIRTY_SET, user_context_converter,
828 &cmd->body, NULL);
829}
830
831/**
832 * vmw_execbuf_info_from_res - Get the private validation metadata for a
833 * recently validated resource
834 *
835 * @sw_context: Pointer to the command submission context
836 * @res: The resource
837 *
838 * The resource pointed to by @res needs to be present in the command submission
839 * context's resource cache and hence the last resource of that type to be
840 * processed by the validation code.
841 *
842 * Return: a pointer to the private metadata of the resource, or NULL if it
843 * wasn't found
844 */
845static struct vmw_ctx_validation_info *
846vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
847 struct vmw_resource *res)
848{
849 struct vmw_res_cache_entry *rcache =
850 &sw_context->res_cache[vmw_res_type(res)];
851
852 if (rcache->valid && rcache->res == res)
853 return rcache->private;
854
855 WARN_ON_ONCE(true);
856 return NULL;
857}
858
859static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
860 struct vmw_sw_context *sw_context,
861 SVGA3dCmdHeader *header)
862{
863 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
864 struct vmw_resource *ctx;
865 struct vmw_resource *res;
866 int ret;
867
868 cmd = container_of(header, typeof(*cmd), header);
869
870 if (cmd->body.type >= SVGA3D_RT_MAX) {
871 VMW_DEBUG_USER("Illegal render target type %u.\n",
872 (unsigned int) cmd->body.type);
873 return -EINVAL;
874 }
875
876 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
877 VMW_RES_DIRTY_SET, user_context_converter,
878 &cmd->body.cid, &ctx);
879 if (unlikely(ret != 0))
880 return ret;
881
882 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
883 VMW_RES_DIRTY_SET, user_surface_converter,
884 &cmd->body.target.sid, &res);
885 if (unlikely(ret))
886 return ret;
887
888 if (dev_priv->has_mob) {
889 struct vmw_ctx_bindinfo_view binding;
890 struct vmw_ctx_validation_info *node;
891
892 node = vmw_execbuf_info_from_res(sw_context, ctx);
893 if (!node)
894 return -EINVAL;
895
896 binding.bi.ctx = ctx;
897 binding.bi.res = res;
898 binding.bi.bt = vmw_ctx_binding_rt;
899 binding.slot = cmd->body.type;
900 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
901 }
902
903 return 0;
904}
905
906static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
907 struct vmw_sw_context *sw_context,
908 SVGA3dCmdHeader *header)
909{
910 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
911 int ret;
912
913 cmd = container_of(header, typeof(*cmd), header);
914
915 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
916 VMW_RES_DIRTY_NONE, user_surface_converter,
917 &cmd->body.src.sid, NULL);
918 if (ret)
919 return ret;
920
921 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
922 VMW_RES_DIRTY_SET, user_surface_converter,
923 &cmd->body.dest.sid, NULL);
924}
925
926static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
927 struct vmw_sw_context *sw_context,
928 SVGA3dCmdHeader *header)
929{
930 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
931 int ret;
932
933 cmd = container_of(header, typeof(*cmd), header);
934 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
935 VMW_RES_DIRTY_NONE, user_surface_converter,
936 &cmd->body.src, NULL);
937 if (ret != 0)
938 return ret;
939
940 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
941 VMW_RES_DIRTY_SET, user_surface_converter,
942 &cmd->body.dest, NULL);
943}
944
945static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
946 struct vmw_sw_context *sw_context,
947 SVGA3dCmdHeader *header)
948{
949 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
950 int ret;
951
952 cmd = container_of(header, typeof(*cmd), header);
953 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
954 VMW_RES_DIRTY_NONE, user_surface_converter,
955 &cmd->body.srcSid, NULL);
956 if (ret != 0)
957 return ret;
958
959 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
960 VMW_RES_DIRTY_SET, user_surface_converter,
961 &cmd->body.dstSid, NULL);
962}
963
964static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
965 struct vmw_sw_context *sw_context,
966 SVGA3dCmdHeader *header)
967{
968 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
969 int ret;
970
971 cmd = container_of(header, typeof(*cmd), header);
972 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
973 VMW_RES_DIRTY_NONE, user_surface_converter,
974 &cmd->body.src.sid, NULL);
975 if (unlikely(ret != 0))
976 return ret;
977
978 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
979 VMW_RES_DIRTY_SET, user_surface_converter,
980 &cmd->body.dest.sid, NULL);
981}
982
983static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
984 struct vmw_sw_context *sw_context,
985 SVGA3dCmdHeader *header)
986{
987 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
988 container_of(header, typeof(*cmd), header);
989
990 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
991 VMW_RES_DIRTY_NONE, user_surface_converter,
992 &cmd->body.srcImage.sid, NULL);
993}
994
995static int vmw_cmd_present_check(struct vmw_private *dev_priv,
996 struct vmw_sw_context *sw_context,
997 SVGA3dCmdHeader *header)
998{
999 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1000 container_of(header, typeof(*cmd), header);
1001
1002 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1003 VMW_RES_DIRTY_NONE, user_surface_converter,
1004 &cmd->body.sid, NULL);
1005}
1006
1007/**
1008 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1009 *
1010 * @dev_priv: The device private structure.
1011 * @new_query_bo: The new buffer holding query results.
1012 * @sw_context: The software context used for this command submission.
1013 *
1014 * This function checks whether @new_query_bo is suitable for holding query
1015 * results, and if another buffer currently is pinned for query results. If so,
1016 * the function prepares the state of @sw_context for switching pinned buffers
1017 * after successful submission of the current command batch.
1018 */
1019static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1020 struct vmw_buffer_object *new_query_bo,
1021 struct vmw_sw_context *sw_context)
1022{
1023 struct vmw_res_cache_entry *ctx_entry =
1024 &sw_context->res_cache[vmw_res_context];
1025 int ret;
1026
1027 BUG_ON(!ctx_entry->valid);
1028 sw_context->last_query_ctx = ctx_entry->res;
1029
1030 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1031
1032 if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
1033 VMW_DEBUG_USER("Query buffer too large.\n");
1034 return -EINVAL;
1035 }
1036
1037 if (unlikely(sw_context->cur_query_bo != NULL)) {
1038 sw_context->needs_post_query_barrier = true;
1039 ret = vmw_validation_add_bo(sw_context->ctx,
1040 sw_context->cur_query_bo,
1041 dev_priv->has_mob, false);
1042 if (unlikely(ret != 0))
1043 return ret;
1044 }
1045 sw_context->cur_query_bo = new_query_bo;
1046
1047 ret = vmw_validation_add_bo(sw_context->ctx,
1048 dev_priv->dummy_query_bo,
1049 dev_priv->has_mob, false);
1050 if (unlikely(ret != 0))
1051 return ret;
1052 }
1053
1054 return 0;
1055}
1056
1057/**
1058 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1059 *
1060 * @dev_priv: The device private structure.
1061 * @sw_context: The software context used for this command submission batch.
1062 *
1063 * This function will check if we're switching query buffers, and will then,
1064 * issue a dummy occlusion query wait used as a query barrier. When the fence
1065 * object following that query wait has signaled, we are sure that all preceding
1066 * queries have finished, and the old query buffer can be unpinned. However,
1067 * since both the new query buffer and the old one are fenced with that fence,
1068 * we can do an asynchronus unpin now, and be sure that the old query buffer
1069 * won't be moved until the fence has signaled.
1070 *
1071 * As mentioned above, both the new - and old query buffers need to be fenced
1072 * using a sequence emitted *after* calling this function.
1073 */
1074static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1075 struct vmw_sw_context *sw_context)
1076{
1077 /*
1078 * The validate list should still hold references to all
1079 * contexts here.
1080 */
1081 if (sw_context->needs_post_query_barrier) {
1082 struct vmw_res_cache_entry *ctx_entry =
1083 &sw_context->res_cache[vmw_res_context];
1084 struct vmw_resource *ctx;
1085 int ret;
1086
1087 BUG_ON(!ctx_entry->valid);
1088 ctx = ctx_entry->res;
1089
1090 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1091
1092 if (unlikely(ret != 0))
1093 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1094 }
1095
1096 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1097 if (dev_priv->pinned_bo) {
1098 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1099 vmw_bo_unreference(&dev_priv->pinned_bo);
1100 }
1101
1102 if (!sw_context->needs_post_query_barrier) {
1103 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1104
1105 /*
1106 * We pin also the dummy_query_bo buffer so that we
1107 * don't need to validate it when emitting dummy queries
1108 * in context destroy paths.
1109 */
1110 if (!dev_priv->dummy_query_bo_pinned) {
1111 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1112 true);
1113 dev_priv->dummy_query_bo_pinned = true;
1114 }
1115
1116 BUG_ON(sw_context->last_query_ctx == NULL);
1117 dev_priv->query_cid = sw_context->last_query_ctx->id;
1118 dev_priv->query_cid_valid = true;
1119 dev_priv->pinned_bo =
1120 vmw_bo_reference(sw_context->cur_query_bo);
1121 }
1122 }
1123}
1124
1125/**
1126 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1127 * to a MOB id.
1128 *
1129 * @dev_priv: Pointer to a device private structure.
1130 * @sw_context: The software context used for this command batch validation.
1131 * @id: Pointer to the user-space handle to be translated.
1132 * @vmw_bo_p: Points to a location that, on successful return will carry a
1133 * non-reference-counted pointer to the buffer object identified by the
1134 * user-space handle in @id.
1135 *
1136 * This function saves information needed to translate a user-space buffer
1137 * handle to a MOB id. The translation does not take place immediately, but
1138 * during a call to vmw_apply_relocations().
1139 *
1140 * This function builds a relocation list and a list of buffers to validate. The
1141 * former needs to be freed using either vmw_apply_relocations() or
1142 * vmw_free_relocations(). The latter needs to be freed using
1143 * vmw_clear_validations.
1144 */
1145static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1146 struct vmw_sw_context *sw_context,
1147 SVGAMobId *id,
1148 struct vmw_buffer_object **vmw_bo_p)
1149{
1150 struct vmw_buffer_object *vmw_bo;
1151 uint32_t handle = *id;
1152 struct vmw_relocation *reloc;
1153 int ret;
1154
1155 vmw_validation_preload_bo(sw_context->ctx);
1156 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1157 if (ret != 0) {
1158 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1159 return PTR_ERR(vmw_bo);
1160 }
1161 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
1162 ttm_bo_put(&vmw_bo->base);
1163 drm_gem_object_put(&vmw_bo->base.base);
1164 if (unlikely(ret != 0))
1165 return ret;
1166
1167 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1168 if (!reloc)
1169 return -ENOMEM;
1170
1171 reloc->mob_loc = id;
1172 reloc->vbo = vmw_bo;
1173
1174 *vmw_bo_p = vmw_bo;
1175 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1176
1177 return 0;
1178}
1179
1180/**
1181 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1182 * to a valid SVGAGuestPtr
1183 *
1184 * @dev_priv: Pointer to a device private structure.
1185 * @sw_context: The software context used for this command batch validation.
1186 * @ptr: Pointer to the user-space handle to be translated.
1187 * @vmw_bo_p: Points to a location that, on successful return will carry a
1188 * non-reference-counted pointer to the DMA buffer identified by the user-space
1189 * handle in @id.
1190 *
1191 * This function saves information needed to translate a user-space buffer
1192 * handle to a valid SVGAGuestPtr. The translation does not take place
1193 * immediately, but during a call to vmw_apply_relocations().
1194 *
1195 * This function builds a relocation list and a list of buffers to validate.
1196 * The former needs to be freed using either vmw_apply_relocations() or
1197 * vmw_free_relocations(). The latter needs to be freed using
1198 * vmw_clear_validations.
1199 */
1200static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1201 struct vmw_sw_context *sw_context,
1202 SVGAGuestPtr *ptr,
1203 struct vmw_buffer_object **vmw_bo_p)
1204{
1205 struct vmw_buffer_object *vmw_bo;
1206 uint32_t handle = ptr->gmrId;
1207 struct vmw_relocation *reloc;
1208 int ret;
1209
1210 vmw_validation_preload_bo(sw_context->ctx);
1211 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1212 if (ret != 0) {
1213 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1214 return PTR_ERR(vmw_bo);
1215 }
1216 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
1217 ttm_bo_put(&vmw_bo->base);
1218 drm_gem_object_put(&vmw_bo->base.base);
1219 if (unlikely(ret != 0))
1220 return ret;
1221
1222 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1223 if (!reloc)
1224 return -ENOMEM;
1225
1226 reloc->location = ptr;
1227 reloc->vbo = vmw_bo;
1228 *vmw_bo_p = vmw_bo;
1229 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1230
1231 return 0;
1232}
1233
1234/**
1235 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1236 *
1237 * @dev_priv: Pointer to a device private struct.
1238 * @sw_context: The software context used for this command submission.
1239 * @header: Pointer to the command header in the command stream.
1240 *
1241 * This function adds the new query into the query COTABLE
1242 */
1243static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1244 struct vmw_sw_context *sw_context,
1245 SVGA3dCmdHeader *header)
1246{
1247 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1248 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1249 struct vmw_resource *cotable_res;
1250 int ret;
1251
1252 if (!ctx_node)
1253 return -EINVAL;
1254
1255 cmd = container_of(header, typeof(*cmd), header);
1256
1257 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1258 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1259 return -EINVAL;
1260
1261 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1262 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1263
1264 return ret;
1265}
1266
1267/**
1268 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1269 *
1270 * @dev_priv: Pointer to a device private struct.
1271 * @sw_context: The software context used for this command submission.
1272 * @header: Pointer to the command header in the command stream.
1273 *
1274 * The query bind operation will eventually associate the query ID with its
1275 * backing MOB. In this function, we take the user mode MOB ID and use
1276 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1277 */
1278static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1279 struct vmw_sw_context *sw_context,
1280 SVGA3dCmdHeader *header)
1281{
1282 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1283 struct vmw_buffer_object *vmw_bo;
1284 int ret;
1285
1286 cmd = container_of(header, typeof(*cmd), header);
1287
1288 /*
1289 * Look up the buffer pointed to by q.mobid, put it on the relocation
1290 * list so its kernel mode MOB ID can be filled in later
1291 */
1292 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1293 &vmw_bo);
1294
1295 if (ret != 0)
1296 return ret;
1297
1298 sw_context->dx_query_mob = vmw_bo;
1299 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1300 return 0;
1301}
1302
1303/**
1304 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1305 *
1306 * @dev_priv: Pointer to a device private struct.
1307 * @sw_context: The software context used for this command submission.
1308 * @header: Pointer to the command header in the command stream.
1309 */
1310static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1311 struct vmw_sw_context *sw_context,
1312 SVGA3dCmdHeader *header)
1313{
1314 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1315 container_of(header, typeof(*cmd), header);
1316
1317 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1318 VMW_RES_DIRTY_SET, user_context_converter,
1319 &cmd->body.cid, NULL);
1320}
1321
1322/**
1323 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1324 *
1325 * @dev_priv: Pointer to a device private struct.
1326 * @sw_context: The software context used for this command submission.
1327 * @header: Pointer to the command header in the command stream.
1328 */
1329static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1330 struct vmw_sw_context *sw_context,
1331 SVGA3dCmdHeader *header)
1332{
1333 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1334 container_of(header, typeof(*cmd), header);
1335
1336 if (unlikely(dev_priv->has_mob)) {
1337 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1338
1339 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1340
1341 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1342 gb_cmd.header.size = cmd->header.size;
1343 gb_cmd.body.cid = cmd->body.cid;
1344 gb_cmd.body.type = cmd->body.type;
1345
1346 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1347 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1348 }
1349
1350 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1351 VMW_RES_DIRTY_SET, user_context_converter,
1352 &cmd->body.cid, NULL);
1353}
1354
1355/**
1356 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1357 *
1358 * @dev_priv: Pointer to a device private struct.
1359 * @sw_context: The software context used for this command submission.
1360 * @header: Pointer to the command header in the command stream.
1361 */
1362static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1363 struct vmw_sw_context *sw_context,
1364 SVGA3dCmdHeader *header)
1365{
1366 struct vmw_buffer_object *vmw_bo;
1367 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1368 int ret;
1369
1370 cmd = container_of(header, typeof(*cmd), header);
1371 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1372 if (unlikely(ret != 0))
1373 return ret;
1374
1375 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1376 &vmw_bo);
1377 if (unlikely(ret != 0))
1378 return ret;
1379
1380 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1381
1382 return ret;
1383}
1384
1385/**
1386 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1387 *
1388 * @dev_priv: Pointer to a device private struct.
1389 * @sw_context: The software context used for this command submission.
1390 * @header: Pointer to the command header in the command stream.
1391 */
1392static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1393 struct vmw_sw_context *sw_context,
1394 SVGA3dCmdHeader *header)
1395{
1396 struct vmw_buffer_object *vmw_bo;
1397 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1398 int ret;
1399
1400 cmd = container_of(header, typeof(*cmd), header);
1401 if (dev_priv->has_mob) {
1402 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1403
1404 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1405
1406 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1407 gb_cmd.header.size = cmd->header.size;
1408 gb_cmd.body.cid = cmd->body.cid;
1409 gb_cmd.body.type = cmd->body.type;
1410 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1411 gb_cmd.body.offset = cmd->body.guestResult.offset;
1412
1413 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1414 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1415 }
1416
1417 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1418 if (unlikely(ret != 0))
1419 return ret;
1420
1421 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1422 &cmd->body.guestResult, &vmw_bo);
1423 if (unlikely(ret != 0))
1424 return ret;
1425
1426 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1427
1428 return ret;
1429}
1430
1431/**
1432 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1433 *
1434 * @dev_priv: Pointer to a device private struct.
1435 * @sw_context: The software context used for this command submission.
1436 * @header: Pointer to the command header in the command stream.
1437 */
1438static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1439 struct vmw_sw_context *sw_context,
1440 SVGA3dCmdHeader *header)
1441{
1442 struct vmw_buffer_object *vmw_bo;
1443 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1444 int ret;
1445
1446 cmd = container_of(header, typeof(*cmd), header);
1447 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1448 if (unlikely(ret != 0))
1449 return ret;
1450
1451 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1452 &vmw_bo);
1453 if (unlikely(ret != 0))
1454 return ret;
1455
1456 return 0;
1457}
1458
1459/**
1460 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1461 *
1462 * @dev_priv: Pointer to a device private struct.
1463 * @sw_context: The software context used for this command submission.
1464 * @header: Pointer to the command header in the command stream.
1465 */
1466static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1467 struct vmw_sw_context *sw_context,
1468 SVGA3dCmdHeader *header)
1469{
1470 struct vmw_buffer_object *vmw_bo;
1471 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1472 int ret;
1473
1474 cmd = container_of(header, typeof(*cmd), header);
1475 if (dev_priv->has_mob) {
1476 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1477
1478 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1479
1480 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1481 gb_cmd.header.size = cmd->header.size;
1482 gb_cmd.body.cid = cmd->body.cid;
1483 gb_cmd.body.type = cmd->body.type;
1484 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1485 gb_cmd.body.offset = cmd->body.guestResult.offset;
1486
1487 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1488 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1489 }
1490
1491 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1492 if (unlikely(ret != 0))
1493 return ret;
1494
1495 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1496 &cmd->body.guestResult, &vmw_bo);
1497 if (unlikely(ret != 0))
1498 return ret;
1499
1500 return 0;
1501}
1502
1503static int vmw_cmd_dma(struct vmw_private *dev_priv,
1504 struct vmw_sw_context *sw_context,
1505 SVGA3dCmdHeader *header)
1506{
1507 struct vmw_buffer_object *vmw_bo = NULL;
1508 struct vmw_surface *srf = NULL;
1509 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1510 int ret;
1511 SVGA3dCmdSurfaceDMASuffix *suffix;
1512 uint32_t bo_size;
1513 bool dirty;
1514
1515 cmd = container_of(header, typeof(*cmd), header);
1516 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1517 header->size - sizeof(*suffix));
1518
1519 /* Make sure device and verifier stays in sync. */
1520 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1521 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1522 return -EINVAL;
1523 }
1524
1525 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1526 &cmd->body.guest.ptr, &vmw_bo);
1527 if (unlikely(ret != 0))
1528 return ret;
1529
1530 /* Make sure DMA doesn't cross BO boundaries. */
1531 bo_size = vmw_bo->base.base.size;
1532 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1533 VMW_DEBUG_USER("Invalid DMA offset.\n");
1534 return -EINVAL;
1535 }
1536
1537 bo_size -= cmd->body.guest.ptr.offset;
1538 if (unlikely(suffix->maximumOffset > bo_size))
1539 suffix->maximumOffset = bo_size;
1540
1541 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1542 VMW_RES_DIRTY_SET : 0;
1543 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1544 dirty, user_surface_converter,
1545 &cmd->body.host.sid, NULL);
1546 if (unlikely(ret != 0)) {
1547 if (unlikely(ret != -ERESTARTSYS))
1548 VMW_DEBUG_USER("could not find surface for DMA.\n");
1549 return ret;
1550 }
1551
1552 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1553
1554 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1555
1556 return 0;
1557}
1558
1559static int vmw_cmd_draw(struct vmw_private *dev_priv,
1560 struct vmw_sw_context *sw_context,
1561 SVGA3dCmdHeader *header)
1562{
1563 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1564 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1565 (unsigned long)header + sizeof(*cmd));
1566 SVGA3dPrimitiveRange *range;
1567 uint32_t i;
1568 uint32_t maxnum;
1569 int ret;
1570
1571 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1572 if (unlikely(ret != 0))
1573 return ret;
1574
1575 cmd = container_of(header, typeof(*cmd), header);
1576 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1577
1578 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1579 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1580 return -EINVAL;
1581 }
1582
1583 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1584 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1585 VMW_RES_DIRTY_NONE,
1586 user_surface_converter,
1587 &decl->array.surfaceId, NULL);
1588 if (unlikely(ret != 0))
1589 return ret;
1590 }
1591
1592 maxnum = (header->size - sizeof(cmd->body) -
1593 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1594 if (unlikely(cmd->body.numRanges > maxnum)) {
1595 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1596 return -EINVAL;
1597 }
1598
1599 range = (SVGA3dPrimitiveRange *) decl;
1600 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1601 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1602 VMW_RES_DIRTY_NONE,
1603 user_surface_converter,
1604 &range->indexArray.surfaceId, NULL);
1605 if (unlikely(ret != 0))
1606 return ret;
1607 }
1608 return 0;
1609}
1610
1611static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1612 struct vmw_sw_context *sw_context,
1613 SVGA3dCmdHeader *header)
1614{
1615 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1616 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1617 ((unsigned long) header + header->size + sizeof(header));
1618 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1619 ((unsigned long) header + sizeof(*cmd));
1620 struct vmw_resource *ctx;
1621 struct vmw_resource *res;
1622 int ret;
1623
1624 cmd = container_of(header, typeof(*cmd), header);
1625
1626 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1627 VMW_RES_DIRTY_SET, user_context_converter,
1628 &cmd->body.cid, &ctx);
1629 if (unlikely(ret != 0))
1630 return ret;
1631
1632 for (; cur_state < last_state; ++cur_state) {
1633 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1634 continue;
1635
1636 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1637 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1638 (unsigned int) cur_state->stage);
1639 return -EINVAL;
1640 }
1641
1642 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1643 VMW_RES_DIRTY_NONE,
1644 user_surface_converter,
1645 &cur_state->value, &res);
1646 if (unlikely(ret != 0))
1647 return ret;
1648
1649 if (dev_priv->has_mob) {
1650 struct vmw_ctx_bindinfo_tex binding;
1651 struct vmw_ctx_validation_info *node;
1652
1653 node = vmw_execbuf_info_from_res(sw_context, ctx);
1654 if (!node)
1655 return -EINVAL;
1656
1657 binding.bi.ctx = ctx;
1658 binding.bi.res = res;
1659 binding.bi.bt = vmw_ctx_binding_tex;
1660 binding.texture_stage = cur_state->stage;
1661 vmw_binding_add(node->staged, &binding.bi, 0,
1662 binding.texture_stage);
1663 }
1664 }
1665
1666 return 0;
1667}
1668
1669static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1670 struct vmw_sw_context *sw_context,
1671 void *buf)
1672{
1673 struct vmw_buffer_object *vmw_bo;
1674
1675 struct {
1676 uint32_t header;
1677 SVGAFifoCmdDefineGMRFB body;
1678 } *cmd = buf;
1679
1680 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1681 &vmw_bo);
1682}
1683
1684/**
1685 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1686 * switching
1687 *
1688 * @dev_priv: Pointer to a device private struct.
1689 * @sw_context: The software context being used for this batch.
1690 * @res: Pointer to the resource.
1691 * @buf_id: Pointer to the user-space backup buffer handle in the command
1692 * stream.
1693 * @backup_offset: Offset of backup into MOB.
1694 *
1695 * This function prepares for registering a switch of backup buffers in the
1696 * resource metadata just prior to unreserving. It's basically a wrapper around
1697 * vmw_cmd_res_switch_backup with a different interface.
1698 */
1699static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1700 struct vmw_sw_context *sw_context,
1701 struct vmw_resource *res, uint32_t *buf_id,
1702 unsigned long backup_offset)
1703{
1704 struct vmw_buffer_object *vbo;
1705 void *info;
1706 int ret;
1707
1708 info = vmw_execbuf_info_from_res(sw_context, res);
1709 if (!info)
1710 return -EINVAL;
1711
1712 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1713 if (ret)
1714 return ret;
1715
1716 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1717 backup_offset);
1718 return 0;
1719}
1720
1721/**
1722 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1723 *
1724 * @dev_priv: Pointer to a device private struct.
1725 * @sw_context: The software context being used for this batch.
1726 * @res_type: The resource type.
1727 * @converter: Information about user-space binding for this resource type.
1728 * @res_id: Pointer to the user-space resource handle in the command stream.
1729 * @buf_id: Pointer to the user-space backup buffer handle in the command
1730 * stream.
1731 * @backup_offset: Offset of backup into MOB.
1732 *
1733 * This function prepares for registering a switch of backup buffers in the
1734 * resource metadata just prior to unreserving. It's basically a wrapper around
1735 * vmw_cmd_res_switch_backup with a different interface.
1736 */
1737static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1738 struct vmw_sw_context *sw_context,
1739 enum vmw_res_type res_type,
1740 const struct vmw_user_resource_conv
1741 *converter, uint32_t *res_id, uint32_t *buf_id,
1742 unsigned long backup_offset)
1743{
1744 struct vmw_resource *res;
1745 int ret;
1746
1747 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1748 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1749 if (ret)
1750 return ret;
1751
1752 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1753 backup_offset);
1754}
1755
1756/**
1757 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1758 *
1759 * @dev_priv: Pointer to a device private struct.
1760 * @sw_context: The software context being used for this batch.
1761 * @header: Pointer to the command header in the command stream.
1762 */
1763static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1764 struct vmw_sw_context *sw_context,
1765 SVGA3dCmdHeader *header)
1766{
1767 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1768 container_of(header, typeof(*cmd), header);
1769
1770 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1771 user_surface_converter, &cmd->body.sid,
1772 &cmd->body.mobid, 0);
1773}
1774
1775/**
1776 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1777 *
1778 * @dev_priv: Pointer to a device private struct.
1779 * @sw_context: The software context being used for this batch.
1780 * @header: Pointer to the command header in the command stream.
1781 */
1782static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1783 struct vmw_sw_context *sw_context,
1784 SVGA3dCmdHeader *header)
1785{
1786 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1787 container_of(header, typeof(*cmd), header);
1788
1789 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1790 VMW_RES_DIRTY_NONE, user_surface_converter,
1791 &cmd->body.image.sid, NULL);
1792}
1793
1794/**
1795 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1796 *
1797 * @dev_priv: Pointer to a device private struct.
1798 * @sw_context: The software context being used for this batch.
1799 * @header: Pointer to the command header in the command stream.
1800 */
1801static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1802 struct vmw_sw_context *sw_context,
1803 SVGA3dCmdHeader *header)
1804{
1805 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1806 container_of(header, typeof(*cmd), header);
1807
1808 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1809 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1810 &cmd->body.sid, NULL);
1811}
1812
1813/**
1814 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1815 *
1816 * @dev_priv: Pointer to a device private struct.
1817 * @sw_context: The software context being used for this batch.
1818 * @header: Pointer to the command header in the command stream.
1819 */
1820static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1821 struct vmw_sw_context *sw_context,
1822 SVGA3dCmdHeader *header)
1823{
1824 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1825 container_of(header, typeof(*cmd), header);
1826
1827 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1828 VMW_RES_DIRTY_NONE, user_surface_converter,
1829 &cmd->body.image.sid, NULL);
1830}
1831
1832/**
1833 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1834 * command
1835 *
1836 * @dev_priv: Pointer to a device private struct.
1837 * @sw_context: The software context being used for this batch.
1838 * @header: Pointer to the command header in the command stream.
1839 */
1840static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1841 struct vmw_sw_context *sw_context,
1842 SVGA3dCmdHeader *header)
1843{
1844 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1845 container_of(header, typeof(*cmd), header);
1846
1847 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1848 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1849 &cmd->body.sid, NULL);
1850}
1851
1852/**
1853 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1854 * command
1855 *
1856 * @dev_priv: Pointer to a device private struct.
1857 * @sw_context: The software context being used for this batch.
1858 * @header: Pointer to the command header in the command stream.
1859 */
1860static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1861 struct vmw_sw_context *sw_context,
1862 SVGA3dCmdHeader *header)
1863{
1864 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1865 container_of(header, typeof(*cmd), header);
1866
1867 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1868 VMW_RES_DIRTY_NONE, user_surface_converter,
1869 &cmd->body.image.sid, NULL);
1870}
1871
1872/**
1873 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1874 * command
1875 *
1876 * @dev_priv: Pointer to a device private struct.
1877 * @sw_context: The software context being used for this batch.
1878 * @header: Pointer to the command header in the command stream.
1879 */
1880static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1881 struct vmw_sw_context *sw_context,
1882 SVGA3dCmdHeader *header)
1883{
1884 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1885 container_of(header, typeof(*cmd), header);
1886
1887 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1888 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1889 &cmd->body.sid, NULL);
1890}
1891
1892/**
1893 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1894 *
1895 * @dev_priv: Pointer to a device private struct.
1896 * @sw_context: The software context being used for this batch.
1897 * @header: Pointer to the command header in the command stream.
1898 */
1899static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1900 struct vmw_sw_context *sw_context,
1901 SVGA3dCmdHeader *header)
1902{
1903 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1904 int ret;
1905 size_t size;
1906 struct vmw_resource *ctx;
1907
1908 cmd = container_of(header, typeof(*cmd), header);
1909
1910 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1911 VMW_RES_DIRTY_SET, user_context_converter,
1912 &cmd->body.cid, &ctx);
1913 if (unlikely(ret != 0))
1914 return ret;
1915
1916 if (unlikely(!dev_priv->has_mob))
1917 return 0;
1918
1919 size = cmd->header.size - sizeof(cmd->body);
1920 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1921 cmd->body.shid, cmd + 1, cmd->body.type,
1922 size, &sw_context->staged_cmd_res);
1923 if (unlikely(ret != 0))
1924 return ret;
1925
1926 return vmw_resource_relocation_add(sw_context, NULL,
1927 vmw_ptr_diff(sw_context->buf_start,
1928 &cmd->header.id),
1929 vmw_res_rel_nop);
1930}
1931
1932/**
1933 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1934 *
1935 * @dev_priv: Pointer to a device private struct.
1936 * @sw_context: The software context being used for this batch.
1937 * @header: Pointer to the command header in the command stream.
1938 */
1939static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1940 struct vmw_sw_context *sw_context,
1941 SVGA3dCmdHeader *header)
1942{
1943 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1944 int ret;
1945 struct vmw_resource *ctx;
1946
1947 cmd = container_of(header, typeof(*cmd), header);
1948
1949 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1950 VMW_RES_DIRTY_SET, user_context_converter,
1951 &cmd->body.cid, &ctx);
1952 if (unlikely(ret != 0))
1953 return ret;
1954
1955 if (unlikely(!dev_priv->has_mob))
1956 return 0;
1957
1958 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1959 cmd->body.type, &sw_context->staged_cmd_res);
1960 if (unlikely(ret != 0))
1961 return ret;
1962
1963 return vmw_resource_relocation_add(sw_context, NULL,
1964 vmw_ptr_diff(sw_context->buf_start,
1965 &cmd->header.id),
1966 vmw_res_rel_nop);
1967}
1968
1969/**
1970 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1971 *
1972 * @dev_priv: Pointer to a device private struct.
1973 * @sw_context: The software context being used for this batch.
1974 * @header: Pointer to the command header in the command stream.
1975 */
1976static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1977 struct vmw_sw_context *sw_context,
1978 SVGA3dCmdHeader *header)
1979{
1980 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1981 struct vmw_ctx_bindinfo_shader binding;
1982 struct vmw_resource *ctx, *res = NULL;
1983 struct vmw_ctx_validation_info *ctx_info;
1984 int ret;
1985
1986 cmd = container_of(header, typeof(*cmd), header);
1987
1988 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1989 VMW_DEBUG_USER("Illegal shader type %u.\n",
1990 (unsigned int) cmd->body.type);
1991 return -EINVAL;
1992 }
1993
1994 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1995 VMW_RES_DIRTY_SET, user_context_converter,
1996 &cmd->body.cid, &ctx);
1997 if (unlikely(ret != 0))
1998 return ret;
1999
2000 if (!dev_priv->has_mob)
2001 return 0;
2002
2003 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2004 /*
2005 * This is the compat shader path - Per device guest-backed
2006 * shaders, but user-space thinks it's per context host-
2007 * backed shaders.
2008 */
2009 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2010 cmd->body.shid, cmd->body.type);
2011 if (!IS_ERR(res)) {
2012 ret = vmw_execbuf_res_val_add(sw_context, res,
2013 VMW_RES_DIRTY_NONE,
2014 vmw_val_add_flag_noctx);
2015 if (unlikely(ret != 0))
2016 return ret;
2017
2018 ret = vmw_resource_relocation_add
2019 (sw_context, res,
2020 vmw_ptr_diff(sw_context->buf_start,
2021 &cmd->body.shid),
2022 vmw_res_rel_normal);
2023 if (unlikely(ret != 0))
2024 return ret;
2025 }
2026 }
2027
2028 if (IS_ERR_OR_NULL(res)) {
2029 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2030 VMW_RES_DIRTY_NONE,
2031 user_shader_converter, &cmd->body.shid,
2032 &res);
2033 if (unlikely(ret != 0))
2034 return ret;
2035 }
2036
2037 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2038 if (!ctx_info)
2039 return -EINVAL;
2040
2041 binding.bi.ctx = ctx;
2042 binding.bi.res = res;
2043 binding.bi.bt = vmw_ctx_binding_shader;
2044 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2045 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2046
2047 return 0;
2048}
2049
2050/**
2051 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2052 *
2053 * @dev_priv: Pointer to a device private struct.
2054 * @sw_context: The software context being used for this batch.
2055 * @header: Pointer to the command header in the command stream.
2056 */
2057static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2058 struct vmw_sw_context *sw_context,
2059 SVGA3dCmdHeader *header)
2060{
2061 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2062 int ret;
2063
2064 cmd = container_of(header, typeof(*cmd), header);
2065
2066 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2067 VMW_RES_DIRTY_SET, user_context_converter,
2068 &cmd->body.cid, NULL);
2069 if (unlikely(ret != 0))
2070 return ret;
2071
2072 if (dev_priv->has_mob)
2073 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2074
2075 return 0;
2076}
2077
2078/**
2079 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2080 *
2081 * @dev_priv: Pointer to a device private struct.
2082 * @sw_context: The software context being used for this batch.
2083 * @header: Pointer to the command header in the command stream.
2084 */
2085static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2086 struct vmw_sw_context *sw_context,
2087 SVGA3dCmdHeader *header)
2088{
2089 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2090 container_of(header, typeof(*cmd), header);
2091
2092 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2093 user_shader_converter, &cmd->body.shid,
2094 &cmd->body.mobid, cmd->body.offsetInBytes);
2095}
2096
2097/**
2098 * vmw_cmd_dx_set_single_constant_buffer - Validate
2099 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2100 *
2101 * @dev_priv: Pointer to a device private struct.
2102 * @sw_context: The software context being used for this batch.
2103 * @header: Pointer to the command header in the command stream.
2104 */
2105static int
2106vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2107 struct vmw_sw_context *sw_context,
2108 SVGA3dCmdHeader *header)
2109{
2110 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2111 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2112 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2113
2114 struct vmw_resource *res = NULL;
2115 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2116 struct vmw_ctx_bindinfo_cb binding;
2117 int ret;
2118
2119 if (!ctx_node)
2120 return -EINVAL;
2121
2122 cmd = container_of(header, typeof(*cmd), header);
2123 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2124 VMW_RES_DIRTY_NONE, user_surface_converter,
2125 &cmd->body.sid, &res);
2126 if (unlikely(ret != 0))
2127 return ret;
2128
2129 binding.bi.ctx = ctx_node->ctx;
2130 binding.bi.res = res;
2131 binding.bi.bt = vmw_ctx_binding_cb;
2132 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2133 binding.offset = cmd->body.offsetInBytes;
2134 binding.size = cmd->body.sizeInBytes;
2135 binding.slot = cmd->body.slot;
2136
2137 if (binding.shader_slot >= max_shader_num ||
2138 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2139 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2140 (unsigned int) cmd->body.type,
2141 (unsigned int) binding.slot);
2142 return -EINVAL;
2143 }
2144
2145 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2146 binding.slot);
2147
2148 return 0;
2149}
2150
2151/**
2152 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2153 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2154 *
2155 * @dev_priv: Pointer to a device private struct.
2156 * @sw_context: The software context being used for this batch.
2157 * @header: Pointer to the command header in the command stream.
2158 */
2159static int
2160vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2161 struct vmw_sw_context *sw_context,
2162 SVGA3dCmdHeader *header)
2163{
2164 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2165
2166 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2167 u32 shader_slot;
2168
2169 if (!has_sm5_context(dev_priv))
2170 return -EINVAL;
2171
2172 if (!ctx_node)
2173 return -EINVAL;
2174
2175 cmd = container_of(header, typeof(*cmd), header);
2176 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2177 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2178 (unsigned int) cmd->body.slot);
2179 return -EINVAL;
2180 }
2181
2182 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2183 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2184 cmd->body.slot, cmd->body.offsetInBytes);
2185
2186 return 0;
2187}
2188
2189/**
2190 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2191 * command
2192 *
2193 * @dev_priv: Pointer to a device private struct.
2194 * @sw_context: The software context being used for this batch.
2195 * @header: Pointer to the command header in the command stream.
2196 */
2197static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2198 struct vmw_sw_context *sw_context,
2199 SVGA3dCmdHeader *header)
2200{
2201 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2202 container_of(header, typeof(*cmd), header);
2203 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2204 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2205
2206 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2207 sizeof(SVGA3dShaderResourceViewId);
2208
2209 if ((u64) cmd->body.startView + (u64) num_sr_view >
2210 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2211 cmd->body.type >= max_allowed) {
2212 VMW_DEBUG_USER("Invalid shader binding.\n");
2213 return -EINVAL;
2214 }
2215
2216 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2217 vmw_ctx_binding_sr,
2218 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2219 (void *) &cmd[1], num_sr_view,
2220 cmd->body.startView);
2221}
2222
2223/**
2224 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2225 *
2226 * @dev_priv: Pointer to a device private struct.
2227 * @sw_context: The software context being used for this batch.
2228 * @header: Pointer to the command header in the command stream.
2229 */
2230static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2231 struct vmw_sw_context *sw_context,
2232 SVGA3dCmdHeader *header)
2233{
2234 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2235 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2236 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2237 struct vmw_resource *res = NULL;
2238 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2239 struct vmw_ctx_bindinfo_shader binding;
2240 int ret = 0;
2241
2242 if (!ctx_node)
2243 return -EINVAL;
2244
2245 cmd = container_of(header, typeof(*cmd), header);
2246
2247 if (cmd->body.type >= max_allowed ||
2248 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2249 VMW_DEBUG_USER("Illegal shader type %u.\n",
2250 (unsigned int) cmd->body.type);
2251 return -EINVAL;
2252 }
2253
2254 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2255 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2256 if (IS_ERR(res)) {
2257 VMW_DEBUG_USER("Could not find shader for binding.\n");
2258 return PTR_ERR(res);
2259 }
2260
2261 ret = vmw_execbuf_res_val_add(sw_context, res,
2262 VMW_RES_DIRTY_NONE,
2263 vmw_val_add_flag_noctx);
2264 if (ret)
2265 return ret;
2266 }
2267
2268 binding.bi.ctx = ctx_node->ctx;
2269 binding.bi.res = res;
2270 binding.bi.bt = vmw_ctx_binding_dx_shader;
2271 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2272
2273 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2274
2275 return 0;
2276}
2277
2278/**
2279 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2280 * command
2281 *
2282 * @dev_priv: Pointer to a device private struct.
2283 * @sw_context: The software context being used for this batch.
2284 * @header: Pointer to the command header in the command stream.
2285 */
2286static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2287 struct vmw_sw_context *sw_context,
2288 SVGA3dCmdHeader *header)
2289{
2290 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2291 struct vmw_ctx_bindinfo_vb binding;
2292 struct vmw_resource *res;
2293 struct {
2294 SVGA3dCmdHeader header;
2295 SVGA3dCmdDXSetVertexBuffers body;
2296 SVGA3dVertexBuffer buf[];
2297 } *cmd;
2298 int i, ret, num;
2299
2300 if (!ctx_node)
2301 return -EINVAL;
2302
2303 cmd = container_of(header, typeof(*cmd), header);
2304 num = (cmd->header.size - sizeof(cmd->body)) /
2305 sizeof(SVGA3dVertexBuffer);
2306 if ((u64)num + (u64)cmd->body.startBuffer >
2307 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2308 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2309 return -EINVAL;
2310 }
2311
2312 for (i = 0; i < num; i++) {
2313 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2314 VMW_RES_DIRTY_NONE,
2315 user_surface_converter,
2316 &cmd->buf[i].sid, &res);
2317 if (unlikely(ret != 0))
2318 return ret;
2319
2320 binding.bi.ctx = ctx_node->ctx;
2321 binding.bi.bt = vmw_ctx_binding_vb;
2322 binding.bi.res = res;
2323 binding.offset = cmd->buf[i].offset;
2324 binding.stride = cmd->buf[i].stride;
2325 binding.slot = i + cmd->body.startBuffer;
2326
2327 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2328 }
2329
2330 return 0;
2331}
2332
2333/**
2334 * vmw_cmd_dx_set_index_buffer - Validate
2335 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2336 *
2337 * @dev_priv: Pointer to a device private struct.
2338 * @sw_context: The software context being used for this batch.
2339 * @header: Pointer to the command header in the command stream.
2340 */
2341static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2342 struct vmw_sw_context *sw_context,
2343 SVGA3dCmdHeader *header)
2344{
2345 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2346 struct vmw_ctx_bindinfo_ib binding;
2347 struct vmw_resource *res;
2348 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2349 int ret;
2350
2351 if (!ctx_node)
2352 return -EINVAL;
2353
2354 cmd = container_of(header, typeof(*cmd), header);
2355 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2356 VMW_RES_DIRTY_NONE, user_surface_converter,
2357 &cmd->body.sid, &res);
2358 if (unlikely(ret != 0))
2359 return ret;
2360
2361 binding.bi.ctx = ctx_node->ctx;
2362 binding.bi.res = res;
2363 binding.bi.bt = vmw_ctx_binding_ib;
2364 binding.offset = cmd->body.offset;
2365 binding.format = cmd->body.format;
2366
2367 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2368
2369 return 0;
2370}
2371
2372/**
2373 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2374 * command
2375 *
2376 * @dev_priv: Pointer to a device private struct.
2377 * @sw_context: The software context being used for this batch.
2378 * @header: Pointer to the command header in the command stream.
2379 */
2380static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2381 struct vmw_sw_context *sw_context,
2382 SVGA3dCmdHeader *header)
2383{
2384 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2385 container_of(header, typeof(*cmd), header);
2386 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2387 sizeof(SVGA3dRenderTargetViewId);
2388 int ret;
2389
2390 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2391 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2392 return -EINVAL;
2393 }
2394
2395 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2396 0, &cmd->body.depthStencilViewId, 1, 0);
2397 if (ret)
2398 return ret;
2399
2400 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2401 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2402 num_rt_view, 0);
2403}
2404
2405/**
2406 * vmw_cmd_dx_clear_rendertarget_view - Validate
2407 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2408 *
2409 * @dev_priv: Pointer to a device private struct.
2410 * @sw_context: The software context being used for this batch.
2411 * @header: Pointer to the command header in the command stream.
2412 */
2413static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2414 struct vmw_sw_context *sw_context,
2415 SVGA3dCmdHeader *header)
2416{
2417 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2418 container_of(header, typeof(*cmd), header);
2419 struct vmw_resource *ret;
2420
2421 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2422 cmd->body.renderTargetViewId);
2423
2424 return PTR_ERR_OR_ZERO(ret);
2425}
2426
2427/**
2428 * vmw_cmd_dx_clear_depthstencil_view - Validate
2429 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2430 *
2431 * @dev_priv: Pointer to a device private struct.
2432 * @sw_context: The software context being used for this batch.
2433 * @header: Pointer to the command header in the command stream.
2434 */
2435static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2436 struct vmw_sw_context *sw_context,
2437 SVGA3dCmdHeader *header)
2438{
2439 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2440 container_of(header, typeof(*cmd), header);
2441 struct vmw_resource *ret;
2442
2443 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2444 cmd->body.depthStencilViewId);
2445
2446 return PTR_ERR_OR_ZERO(ret);
2447}
2448
2449static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2450 struct vmw_sw_context *sw_context,
2451 SVGA3dCmdHeader *header)
2452{
2453 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2454 struct vmw_resource *srf;
2455 struct vmw_resource *res;
2456 enum vmw_view_type view_type;
2457 int ret;
2458 /*
2459 * This is based on the fact that all affected define commands have the
2460 * same initial command body layout.
2461 */
2462 struct {
2463 SVGA3dCmdHeader header;
2464 uint32 defined_id;
2465 uint32 sid;
2466 } *cmd;
2467
2468 if (!ctx_node)
2469 return -EINVAL;
2470
2471 view_type = vmw_view_cmd_to_type(header->id);
2472 if (view_type == vmw_view_max)
2473 return -EINVAL;
2474
2475 cmd = container_of(header, typeof(*cmd), header);
2476 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2477 VMW_DEBUG_USER("Invalid surface id.\n");
2478 return -EINVAL;
2479 }
2480 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2481 VMW_RES_DIRTY_NONE, user_surface_converter,
2482 &cmd->sid, &srf);
2483 if (unlikely(ret != 0))
2484 return ret;
2485
2486 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2487 ret = vmw_cotable_notify(res, cmd->defined_id);
2488 if (unlikely(ret != 0))
2489 return ret;
2490
2491 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2492 cmd->defined_id, header,
2493 header->size + sizeof(*header),
2494 &sw_context->staged_cmd_res);
2495}
2496
2497/**
2498 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2499 *
2500 * @dev_priv: Pointer to a device private struct.
2501 * @sw_context: The software context being used for this batch.
2502 * @header: Pointer to the command header in the command stream.
2503 */
2504static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2505 struct vmw_sw_context *sw_context,
2506 SVGA3dCmdHeader *header)
2507{
2508 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2509 struct vmw_ctx_bindinfo_so_target binding;
2510 struct vmw_resource *res;
2511 struct {
2512 SVGA3dCmdHeader header;
2513 SVGA3dCmdDXSetSOTargets body;
2514 SVGA3dSoTarget targets[];
2515 } *cmd;
2516 int i, ret, num;
2517
2518 if (!ctx_node)
2519 return -EINVAL;
2520
2521 cmd = container_of(header, typeof(*cmd), header);
2522 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2523
2524 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2525 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2526 return -EINVAL;
2527 }
2528
2529 for (i = 0; i < num; i++) {
2530 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2531 VMW_RES_DIRTY_SET,
2532 user_surface_converter,
2533 &cmd->targets[i].sid, &res);
2534 if (unlikely(ret != 0))
2535 return ret;
2536
2537 binding.bi.ctx = ctx_node->ctx;
2538 binding.bi.res = res;
2539 binding.bi.bt = vmw_ctx_binding_so_target;
2540 binding.offset = cmd->targets[i].offset;
2541 binding.size = cmd->targets[i].sizeInBytes;
2542 binding.slot = i;
2543
2544 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2545 }
2546
2547 return 0;
2548}
2549
2550static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2551 struct vmw_sw_context *sw_context,
2552 SVGA3dCmdHeader *header)
2553{
2554 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2555 struct vmw_resource *res;
2556 /*
2557 * This is based on the fact that all affected define commands have
2558 * the same initial command body layout.
2559 */
2560 struct {
2561 SVGA3dCmdHeader header;
2562 uint32 defined_id;
2563 } *cmd;
2564 enum vmw_so_type so_type;
2565 int ret;
2566
2567 if (!ctx_node)
2568 return -EINVAL;
2569
2570 so_type = vmw_so_cmd_to_type(header->id);
2571 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2572 if (IS_ERR(res))
2573 return PTR_ERR(res);
2574 cmd = container_of(header, typeof(*cmd), header);
2575 ret = vmw_cotable_notify(res, cmd->defined_id);
2576
2577 return ret;
2578}
2579
2580/**
2581 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2582 * command
2583 *
2584 * @dev_priv: Pointer to a device private struct.
2585 * @sw_context: The software context being used for this batch.
2586 * @header: Pointer to the command header in the command stream.
2587 */
2588static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2589 struct vmw_sw_context *sw_context,
2590 SVGA3dCmdHeader *header)
2591{
2592 struct {
2593 SVGA3dCmdHeader header;
2594 union {
2595 SVGA3dCmdDXReadbackSubResource r_body;
2596 SVGA3dCmdDXInvalidateSubResource i_body;
2597 SVGA3dCmdDXUpdateSubResource u_body;
2598 SVGA3dSurfaceId sid;
2599 };
2600 } *cmd;
2601
2602 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2603 offsetof(typeof(*cmd), sid));
2604 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2605 offsetof(typeof(*cmd), sid));
2606 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2607 offsetof(typeof(*cmd), sid));
2608
2609 cmd = container_of(header, typeof(*cmd), header);
2610 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2611 VMW_RES_DIRTY_NONE, user_surface_converter,
2612 &cmd->sid, NULL);
2613}
2614
2615static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2616 struct vmw_sw_context *sw_context,
2617 SVGA3dCmdHeader *header)
2618{
2619 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2620
2621 if (!ctx_node)
2622 return -EINVAL;
2623
2624 return 0;
2625}
2626
2627/**
2628 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2629 * resource for removal.
2630 *
2631 * @dev_priv: Pointer to a device private struct.
2632 * @sw_context: The software context being used for this batch.
2633 * @header: Pointer to the command header in the command stream.
2634 *
2635 * Check that the view exists, and if it was not created using this command
2636 * batch, conditionally make this command a NOP.
2637 */
2638static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2639 struct vmw_sw_context *sw_context,
2640 SVGA3dCmdHeader *header)
2641{
2642 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2643 struct {
2644 SVGA3dCmdHeader header;
2645 union vmw_view_destroy body;
2646 } *cmd = container_of(header, typeof(*cmd), header);
2647 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2648 struct vmw_resource *view;
2649 int ret;
2650
2651 if (!ctx_node)
2652 return -EINVAL;
2653
2654 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2655 &sw_context->staged_cmd_res, &view);
2656 if (ret || !view)
2657 return ret;
2658
2659 /*
2660 * If the view wasn't created during this command batch, it might
2661 * have been removed due to a context swapout, so add a
2662 * relocation to conditionally make this command a NOP to avoid
2663 * device errors.
2664 */
2665 return vmw_resource_relocation_add(sw_context, view,
2666 vmw_ptr_diff(sw_context->buf_start,
2667 &cmd->header.id),
2668 vmw_res_rel_cond_nop);
2669}
2670
2671/**
2672 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2673 *
2674 * @dev_priv: Pointer to a device private struct.
2675 * @sw_context: The software context being used for this batch.
2676 * @header: Pointer to the command header in the command stream.
2677 */
2678static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2679 struct vmw_sw_context *sw_context,
2680 SVGA3dCmdHeader *header)
2681{
2682 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2683 struct vmw_resource *res;
2684 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2685 container_of(header, typeof(*cmd), header);
2686 int ret;
2687
2688 if (!ctx_node)
2689 return -EINVAL;
2690
2691 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2692 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2693 if (ret)
2694 return ret;
2695
2696 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2697 cmd->body.shaderId, cmd->body.type,
2698 &sw_context->staged_cmd_res);
2699}
2700
2701/**
2702 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2703 *
2704 * @dev_priv: Pointer to a device private struct.
2705 * @sw_context: The software context being used for this batch.
2706 * @header: Pointer to the command header in the command stream.
2707 */
2708static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2709 struct vmw_sw_context *sw_context,
2710 SVGA3dCmdHeader *header)
2711{
2712 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2713 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2714 container_of(header, typeof(*cmd), header);
2715 int ret;
2716
2717 if (!ctx_node)
2718 return -EINVAL;
2719
2720 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2721 &sw_context->staged_cmd_res);
2722
2723 return ret;
2724}
2725
2726/**
2727 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2728 *
2729 * @dev_priv: Pointer to a device private struct.
2730 * @sw_context: The software context being used for this batch.
2731 * @header: Pointer to the command header in the command stream.
2732 */
2733static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2734 struct vmw_sw_context *sw_context,
2735 SVGA3dCmdHeader *header)
2736{
2737 struct vmw_resource *ctx;
2738 struct vmw_resource *res;
2739 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2740 container_of(header, typeof(*cmd), header);
2741 int ret;
2742
2743 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2744 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2745 VMW_RES_DIRTY_SET,
2746 user_context_converter, &cmd->body.cid,
2747 &ctx);
2748 if (ret)
2749 return ret;
2750 } else {
2751 struct vmw_ctx_validation_info *ctx_node =
2752 VMW_GET_CTX_NODE(sw_context);
2753
2754 if (!ctx_node)
2755 return -EINVAL;
2756
2757 ctx = ctx_node->ctx;
2758 }
2759
2760 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2761 if (IS_ERR(res)) {
2762 VMW_DEBUG_USER("Could not find shader to bind.\n");
2763 return PTR_ERR(res);
2764 }
2765
2766 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2767 vmw_val_add_flag_noctx);
2768 if (ret) {
2769 VMW_DEBUG_USER("Error creating resource validation node.\n");
2770 return ret;
2771 }
2772
2773 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2774 &cmd->body.mobid,
2775 cmd->body.offsetInBytes);
2776}
2777
2778/**
2779 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2780 *
2781 * @dev_priv: Pointer to a device private struct.
2782 * @sw_context: The software context being used for this batch.
2783 * @header: Pointer to the command header in the command stream.
2784 */
2785static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2786 struct vmw_sw_context *sw_context,
2787 SVGA3dCmdHeader *header)
2788{
2789 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2790 container_of(header, typeof(*cmd), header);
2791 struct vmw_resource *view;
2792 struct vmw_res_cache_entry *rcache;
2793
2794 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2795 cmd->body.shaderResourceViewId);
2796 if (IS_ERR(view))
2797 return PTR_ERR(view);
2798
2799 /*
2800 * Normally the shader-resource view is not gpu-dirtying, but for
2801 * this particular command it is...
2802 * So mark the last looked-up surface, which is the surface
2803 * the view points to, gpu-dirty.
2804 */
2805 rcache = &sw_context->res_cache[vmw_res_surface];
2806 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2807 VMW_RES_DIRTY_SET);
2808 return 0;
2809}
2810
2811/**
2812 * vmw_cmd_dx_transfer_from_buffer - Validate
2813 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2814 *
2815 * @dev_priv: Pointer to a device private struct.
2816 * @sw_context: The software context being used for this batch.
2817 * @header: Pointer to the command header in the command stream.
2818 */
2819static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2820 struct vmw_sw_context *sw_context,
2821 SVGA3dCmdHeader *header)
2822{
2823 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2824 container_of(header, typeof(*cmd), header);
2825 int ret;
2826
2827 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2828 VMW_RES_DIRTY_NONE, user_surface_converter,
2829 &cmd->body.srcSid, NULL);
2830 if (ret != 0)
2831 return ret;
2832
2833 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2834 VMW_RES_DIRTY_SET, user_surface_converter,
2835 &cmd->body.destSid, NULL);
2836}
2837
2838/**
2839 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2840 *
2841 * @dev_priv: Pointer to a device private struct.
2842 * @sw_context: The software context being used for this batch.
2843 * @header: Pointer to the command header in the command stream.
2844 */
2845static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2846 struct vmw_sw_context *sw_context,
2847 SVGA3dCmdHeader *header)
2848{
2849 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2850 container_of(header, typeof(*cmd), header);
2851
2852 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2853 return -EINVAL;
2854
2855 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2856 VMW_RES_DIRTY_SET, user_surface_converter,
2857 &cmd->body.surface.sid, NULL);
2858}
2859
2860static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2861 struct vmw_sw_context *sw_context,
2862 SVGA3dCmdHeader *header)
2863{
2864 if (!has_sm5_context(dev_priv))
2865 return -EINVAL;
2866
2867 return 0;
2868}
2869
2870static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2871 struct vmw_sw_context *sw_context,
2872 SVGA3dCmdHeader *header)
2873{
2874 if (!has_sm5_context(dev_priv))
2875 return -EINVAL;
2876
2877 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2878}
2879
2880static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2881 struct vmw_sw_context *sw_context,
2882 SVGA3dCmdHeader *header)
2883{
2884 if (!has_sm5_context(dev_priv))
2885 return -EINVAL;
2886
2887 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2888}
2889
2890static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2891 struct vmw_sw_context *sw_context,
2892 SVGA3dCmdHeader *header)
2893{
2894 struct {
2895 SVGA3dCmdHeader header;
2896 SVGA3dCmdDXClearUAViewUint body;
2897 } *cmd = container_of(header, typeof(*cmd), header);
2898 struct vmw_resource *ret;
2899
2900 if (!has_sm5_context(dev_priv))
2901 return -EINVAL;
2902
2903 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2904 cmd->body.uaViewId);
2905
2906 return PTR_ERR_OR_ZERO(ret);
2907}
2908
2909static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2910 struct vmw_sw_context *sw_context,
2911 SVGA3dCmdHeader *header)
2912{
2913 struct {
2914 SVGA3dCmdHeader header;
2915 SVGA3dCmdDXClearUAViewFloat body;
2916 } *cmd = container_of(header, typeof(*cmd), header);
2917 struct vmw_resource *ret;
2918
2919 if (!has_sm5_context(dev_priv))
2920 return -EINVAL;
2921
2922 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2923 cmd->body.uaViewId);
2924
2925 return PTR_ERR_OR_ZERO(ret);
2926}
2927
2928static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2929 struct vmw_sw_context *sw_context,
2930 SVGA3dCmdHeader *header)
2931{
2932 struct {
2933 SVGA3dCmdHeader header;
2934 SVGA3dCmdDXSetUAViews body;
2935 } *cmd = container_of(header, typeof(*cmd), header);
2936 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2937 sizeof(SVGA3dUAViewId);
2938 int ret;
2939
2940 if (!has_sm5_context(dev_priv))
2941 return -EINVAL;
2942
2943 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2944 VMW_DEBUG_USER("Invalid UAV binding.\n");
2945 return -EINVAL;
2946 }
2947
2948 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2949 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2950 num_uav, 0);
2951 if (ret)
2952 return ret;
2953
2954 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2955 cmd->body.uavSpliceIndex);
2956
2957 return ret;
2958}
2959
2960static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2961 struct vmw_sw_context *sw_context,
2962 SVGA3dCmdHeader *header)
2963{
2964 struct {
2965 SVGA3dCmdHeader header;
2966 SVGA3dCmdDXSetCSUAViews body;
2967 } *cmd = container_of(header, typeof(*cmd), header);
2968 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2969 sizeof(SVGA3dUAViewId);
2970 int ret;
2971
2972 if (!has_sm5_context(dev_priv))
2973 return -EINVAL;
2974
2975 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2976 VMW_DEBUG_USER("Invalid UAV binding.\n");
2977 return -EINVAL;
2978 }
2979
2980 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2981 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2982 num_uav, 0);
2983 if (ret)
2984 return ret;
2985
2986 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2987 cmd->body.startIndex);
2988
2989 return ret;
2990}
2991
2992static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2993 struct vmw_sw_context *sw_context,
2994 SVGA3dCmdHeader *header)
2995{
2996 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2997 struct vmw_resource *res;
2998 struct {
2999 SVGA3dCmdHeader header;
3000 SVGA3dCmdDXDefineStreamOutputWithMob body;
3001 } *cmd = container_of(header, typeof(*cmd), header);
3002 int ret;
3003
3004 if (!has_sm5_context(dev_priv))
3005 return -EINVAL;
3006
3007 if (!ctx_node) {
3008 DRM_ERROR("DX Context not set.\n");
3009 return -EINVAL;
3010 }
3011
3012 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3013 ret = vmw_cotable_notify(res, cmd->body.soid);
3014 if (ret)
3015 return ret;
3016
3017 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3018 cmd->body.soid,
3019 &sw_context->staged_cmd_res);
3020}
3021
3022static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3023 struct vmw_sw_context *sw_context,
3024 SVGA3dCmdHeader *header)
3025{
3026 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3027 struct vmw_resource *res;
3028 struct {
3029 SVGA3dCmdHeader header;
3030 SVGA3dCmdDXDestroyStreamOutput body;
3031 } *cmd = container_of(header, typeof(*cmd), header);
3032
3033 if (!ctx_node) {
3034 DRM_ERROR("DX Context not set.\n");
3035 return -EINVAL;
3036 }
3037
3038 /*
3039 * When device does not support SM5 then streamoutput with mob command is
3040 * not available to user-space. Simply return in this case.
3041 */
3042 if (!has_sm5_context(dev_priv))
3043 return 0;
3044
3045 /*
3046 * With SM5 capable device if lookup fails then user-space probably used
3047 * old streamoutput define command. Return without an error.
3048 */
3049 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3050 cmd->body.soid);
3051 if (IS_ERR(res))
3052 return 0;
3053
3054 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3055 &sw_context->staged_cmd_res);
3056}
3057
3058static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3059 struct vmw_sw_context *sw_context,
3060 SVGA3dCmdHeader *header)
3061{
3062 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3063 struct vmw_resource *res;
3064 struct {
3065 SVGA3dCmdHeader header;
3066 SVGA3dCmdDXBindStreamOutput body;
3067 } *cmd = container_of(header, typeof(*cmd), header);
3068 int ret;
3069
3070 if (!has_sm5_context(dev_priv))
3071 return -EINVAL;
3072
3073 if (!ctx_node) {
3074 DRM_ERROR("DX Context not set.\n");
3075 return -EINVAL;
3076 }
3077
3078 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3079 cmd->body.soid);
3080 if (IS_ERR(res)) {
3081 DRM_ERROR("Could not find streamoutput to bind.\n");
3082 return PTR_ERR(res);
3083 }
3084
3085 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3086
3087 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3088 vmw_val_add_flag_noctx);
3089 if (ret) {
3090 DRM_ERROR("Error creating resource validation node.\n");
3091 return ret;
3092 }
3093
3094 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3095 &cmd->body.mobid,
3096 cmd->body.offsetInBytes);
3097}
3098
3099static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3100 struct vmw_sw_context *sw_context,
3101 SVGA3dCmdHeader *header)
3102{
3103 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3104 struct vmw_resource *res;
3105 struct vmw_ctx_bindinfo_so binding;
3106 struct {
3107 SVGA3dCmdHeader header;
3108 SVGA3dCmdDXSetStreamOutput body;
3109 } *cmd = container_of(header, typeof(*cmd), header);
3110 int ret;
3111
3112 if (!ctx_node) {
3113 DRM_ERROR("DX Context not set.\n");
3114 return -EINVAL;
3115 }
3116
3117 if (cmd->body.soid == SVGA3D_INVALID_ID)
3118 return 0;
3119
3120 /*
3121 * When device does not support SM5 then streamoutput with mob command is
3122 * not available to user-space. Simply return in this case.
3123 */
3124 if (!has_sm5_context(dev_priv))
3125 return 0;
3126
3127 /*
3128 * With SM5 capable device if lookup fails then user-space probably used
3129 * old streamoutput define command. Return without an error.
3130 */
3131 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3132 cmd->body.soid);
3133 if (IS_ERR(res)) {
3134 return 0;
3135 }
3136
3137 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3138 vmw_val_add_flag_noctx);
3139 if (ret) {
3140 DRM_ERROR("Error creating resource validation node.\n");
3141 return ret;
3142 }
3143
3144 binding.bi.ctx = ctx_node->ctx;
3145 binding.bi.res = res;
3146 binding.bi.bt = vmw_ctx_binding_so;
3147 binding.slot = 0; /* Only one SO set to context at a time. */
3148
3149 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3150 binding.slot);
3151
3152 return ret;
3153}
3154
3155static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3156 struct vmw_sw_context *sw_context,
3157 SVGA3dCmdHeader *header)
3158{
3159 struct vmw_draw_indexed_instanced_indirect_cmd {
3160 SVGA3dCmdHeader header;
3161 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3162 } *cmd = container_of(header, typeof(*cmd), header);
3163
3164 if (!has_sm5_context(dev_priv))
3165 return -EINVAL;
3166
3167 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3168 VMW_RES_DIRTY_NONE, user_surface_converter,
3169 &cmd->body.argsBufferSid, NULL);
3170}
3171
3172static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3173 struct vmw_sw_context *sw_context,
3174 SVGA3dCmdHeader *header)
3175{
3176 struct vmw_draw_instanced_indirect_cmd {
3177 SVGA3dCmdHeader header;
3178 SVGA3dCmdDXDrawInstancedIndirect body;
3179 } *cmd = container_of(header, typeof(*cmd), header);
3180
3181 if (!has_sm5_context(dev_priv))
3182 return -EINVAL;
3183
3184 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3185 VMW_RES_DIRTY_NONE, user_surface_converter,
3186 &cmd->body.argsBufferSid, NULL);
3187}
3188
3189static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3190 struct vmw_sw_context *sw_context,
3191 SVGA3dCmdHeader *header)
3192{
3193 struct vmw_dispatch_indirect_cmd {
3194 SVGA3dCmdHeader header;
3195 SVGA3dCmdDXDispatchIndirect body;
3196 } *cmd = container_of(header, typeof(*cmd), header);
3197
3198 if (!has_sm5_context(dev_priv))
3199 return -EINVAL;
3200
3201 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3202 VMW_RES_DIRTY_NONE, user_surface_converter,
3203 &cmd->body.argsBufferSid, NULL);
3204}
3205
3206static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3207 struct vmw_sw_context *sw_context,
3208 void *buf, uint32_t *size)
3209{
3210 uint32_t size_remaining = *size;
3211 uint32_t cmd_id;
3212
3213 cmd_id = ((uint32_t *)buf)[0];
3214 switch (cmd_id) {
3215 case SVGA_CMD_UPDATE:
3216 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3217 break;
3218 case SVGA_CMD_DEFINE_GMRFB:
3219 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3220 break;
3221 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3222 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3223 break;
3224 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3225 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3226 break;
3227 default:
3228 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3229 return -EINVAL;
3230 }
3231
3232 if (*size > size_remaining) {
3233 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3234 cmd_id);
3235 return -EINVAL;
3236 }
3237
3238 if (unlikely(!sw_context->kernel)) {
3239 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3240 return -EPERM;
3241 }
3242
3243 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3244 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3245
3246 return 0;
3247}
3248
3249static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3250 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3251 false, false, false),
3252 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3253 false, false, false),
3254 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3255 true, false, false),
3256 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3257 true, false, false),
3258 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3259 true, false, false),
3260 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3261 false, false, false),
3262 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3263 false, false, false),
3264 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3265 true, false, false),
3266 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3267 true, false, false),
3268 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3269 true, false, false),
3270 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3271 &vmw_cmd_set_render_target_check, true, false, false),
3272 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3273 true, false, false),
3274 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3275 true, false, false),
3276 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3277 true, false, false),
3278 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3279 true, false, false),
3280 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3281 true, false, false),
3282 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3283 true, false, false),
3284 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3285 true, false, false),
3286 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3287 false, false, false),
3288 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3289 true, false, false),
3290 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3291 true, false, false),
3292 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3293 true, false, false),
3294 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3295 true, false, false),
3296 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3297 true, false, false),
3298 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3299 true, false, false),
3300 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3301 true, false, false),
3302 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3303 true, false, false),
3304 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3305 true, false, false),
3306 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3307 true, false, false),
3308 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3309 &vmw_cmd_blt_surf_screen_check, false, false, false),
3310 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3311 false, false, false),
3312 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3313 false, false, false),
3314 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3315 false, false, false),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3317 false, false, false),
3318 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3319 false, false, false),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3321 false, false, false),
3322 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3323 false, false, false),
3324 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3325 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3326 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3327 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3328 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3329 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3330 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3331 false, false, true),
3332 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3333 false, false, true),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3335 false, false, true),
3336 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3337 false, false, true),
3338 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3339 false, false, true),
3340 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3341 false, false, true),
3342 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3343 false, false, true),
3344 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3345 false, false, true),
3346 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3347 true, false, true),
3348 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3349 false, false, true),
3350 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3351 true, false, true),
3352 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3353 &vmw_cmd_update_gb_surface, true, false, true),
3354 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3355 &vmw_cmd_readback_gb_image, true, false, true),
3356 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3357 &vmw_cmd_readback_gb_surface, true, false, true),
3358 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3359 &vmw_cmd_invalidate_gb_image, true, false, true),
3360 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3361 &vmw_cmd_invalidate_gb_surface, true, false, true),
3362 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3363 false, false, true),
3364 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3365 false, false, true),
3366 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3367 false, false, true),
3368 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3369 false, false, true),
3370 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3371 false, false, true),
3372 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3373 false, false, true),
3374 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3375 true, false, true),
3376 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3377 false, false, true),
3378 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3379 false, false, false),
3380 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3381 true, false, true),
3382 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3383 true, false, true),
3384 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3385 true, false, true),
3386 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3387 true, false, true),
3388 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3389 true, false, true),
3390 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3391 false, false, true),
3392 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3393 false, false, true),
3394 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3395 false, false, true),
3396 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3397 false, false, true),
3398 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3399 false, false, true),
3400 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3401 false, false, true),
3402 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3403 false, false, true),
3404 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3405 false, false, true),
3406 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3407 false, false, true),
3408 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3409 false, false, true),
3410 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3411 true, false, true),
3412 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3413 false, false, true),
3414 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3415 false, false, true),
3416 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3417 false, false, true),
3418 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3419 false, false, true),
3420
3421 /* SM commands */
3422 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3423 false, false, true),
3424 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3425 false, false, true),
3426 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3427 false, false, true),
3428 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3429 false, false, true),
3430 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3431 false, false, true),
3432 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3433 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3434 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3435 &vmw_cmd_dx_set_shader_res, true, false, true),
3436 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3437 true, false, true),
3438 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3439 true, false, true),
3440 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3441 true, false, true),
3442 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3443 true, false, true),
3444 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3445 true, false, true),
3446 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3447 &vmw_cmd_dx_cid_check, true, false, true),
3448 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3449 true, false, true),
3450 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3451 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3452 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3453 &vmw_cmd_dx_set_index_buffer, true, false, true),
3454 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3455 &vmw_cmd_dx_set_rendertargets, true, false, true),
3456 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3457 true, false, true),
3458 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3459 &vmw_cmd_dx_cid_check, true, false, true),
3460 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3461 &vmw_cmd_dx_cid_check, true, false, true),
3462 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3463 true, false, true),
3464 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3465 true, false, true),
3466 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3467 true, false, true),
3468 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3469 &vmw_cmd_dx_cid_check, true, false, true),
3470 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3471 true, false, true),
3472 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3473 true, false, true),
3474 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3475 true, false, true),
3476 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3477 true, false, true),
3478 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3479 true, false, true),
3480 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3481 true, false, true),
3482 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3483 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3484 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3485 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3486 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3487 true, false, true),
3488 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3489 true, false, true),
3490 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3491 &vmw_cmd_dx_check_subresource, true, false, true),
3492 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3493 &vmw_cmd_dx_check_subresource, true, false, true),
3494 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3495 &vmw_cmd_dx_check_subresource, true, false, true),
3496 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3497 &vmw_cmd_dx_view_define, true, false, true),
3498 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3499 &vmw_cmd_dx_view_remove, true, false, true),
3500 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3501 &vmw_cmd_dx_view_define, true, false, true),
3502 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3503 &vmw_cmd_dx_view_remove, true, false, true),
3504 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3505 &vmw_cmd_dx_view_define, true, false, true),
3506 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3507 &vmw_cmd_dx_view_remove, true, false, true),
3508 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3509 &vmw_cmd_dx_so_define, true, false, true),
3510 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3511 &vmw_cmd_dx_cid_check, true, false, true),
3512 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3513 &vmw_cmd_dx_so_define, true, false, true),
3514 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3515 &vmw_cmd_dx_cid_check, true, false, true),
3516 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3517 &vmw_cmd_dx_so_define, true, false, true),
3518 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3519 &vmw_cmd_dx_cid_check, true, false, true),
3520 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3521 &vmw_cmd_dx_so_define, true, false, true),
3522 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3523 &vmw_cmd_dx_cid_check, true, false, true),
3524 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3525 &vmw_cmd_dx_so_define, true, false, true),
3526 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3527 &vmw_cmd_dx_cid_check, true, false, true),
3528 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3529 &vmw_cmd_dx_define_shader, true, false, true),
3530 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3531 &vmw_cmd_dx_destroy_shader, true, false, true),
3532 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3533 &vmw_cmd_dx_bind_shader, true, false, true),
3534 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3535 &vmw_cmd_dx_so_define, true, false, true),
3536 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3537 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3538 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3539 &vmw_cmd_dx_set_streamoutput, true, false, true),
3540 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3541 &vmw_cmd_dx_set_so_targets, true, false, true),
3542 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3543 &vmw_cmd_dx_cid_check, true, false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3545 &vmw_cmd_dx_cid_check, true, false, true),
3546 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3547 &vmw_cmd_buffer_copy_check, true, false, true),
3548 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3549 &vmw_cmd_pred_copy_check, true, false, true),
3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3551 &vmw_cmd_dx_transfer_from_buffer,
3552 true, false, true),
3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3554 &vmw_cmd_dx_set_constant_buffer_offset,
3555 true, false, true),
3556 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3557 &vmw_cmd_dx_set_constant_buffer_offset,
3558 true, false, true),
3559 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3560 &vmw_cmd_dx_set_constant_buffer_offset,
3561 true, false, true),
3562 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3563 &vmw_cmd_dx_set_constant_buffer_offset,
3564 true, false, true),
3565 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3566 &vmw_cmd_dx_set_constant_buffer_offset,
3567 true, false, true),
3568 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3569 &vmw_cmd_dx_set_constant_buffer_offset,
3570 true, false, true),
3571 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3572 true, false, true),
3573
3574 /*
3575 * SM5 commands
3576 */
3577 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3578 true, false, true),
3579 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3580 true, false, true),
3581 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3582 true, false, true),
3583 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3584 &vmw_cmd_clear_uav_float, true, false, true),
3585 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3586 false, true),
3587 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3588 true),
3589 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3590 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3592 &vmw_cmd_instanced_indirect, true, false, true),
3593 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3594 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3595 &vmw_cmd_dispatch_indirect, true, false, true),
3596 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3597 false, true),
3598 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3599 &vmw_cmd_sm5_view_define, true, false, true),
3600 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3601 &vmw_cmd_dx_define_streamoutput, true, false, true),
3602 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3603 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3604 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3605 &vmw_cmd_dx_so_define, true, false, true),
3606};
3607
3608bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3609{
3610 u32 cmd_id = ((u32 *) buf)[0];
3611
3612 if (cmd_id >= SVGA_CMD_MAX) {
3613 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3614 const struct vmw_cmd_entry *entry;
3615
3616 *size = header->size + sizeof(SVGA3dCmdHeader);
3617 cmd_id = header->id;
3618 if (cmd_id >= SVGA_3D_CMD_MAX)
3619 return false;
3620
3621 cmd_id -= SVGA_3D_CMD_BASE;
3622 entry = &vmw_cmd_entries[cmd_id];
3623 *cmd = entry->cmd_name;
3624 return true;
3625 }
3626
3627 switch (cmd_id) {
3628 case SVGA_CMD_UPDATE:
3629 *cmd = "SVGA_CMD_UPDATE";
3630 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3631 break;
3632 case SVGA_CMD_DEFINE_GMRFB:
3633 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3634 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3635 break;
3636 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3637 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3638 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3639 break;
3640 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3641 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3642 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3643 break;
3644 default:
3645 *cmd = "UNKNOWN";
3646 *size = 0;
3647 return false;
3648 }
3649
3650 return true;
3651}
3652
3653static int vmw_cmd_check(struct vmw_private *dev_priv,
3654 struct vmw_sw_context *sw_context, void *buf,
3655 uint32_t *size)
3656{
3657 uint32_t cmd_id;
3658 uint32_t size_remaining = *size;
3659 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3660 int ret;
3661 const struct vmw_cmd_entry *entry;
3662 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3663
3664 cmd_id = ((uint32_t *)buf)[0];
3665 /* Handle any none 3D commands */
3666 if (unlikely(cmd_id < SVGA_CMD_MAX))
3667 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3668
3669
3670 cmd_id = header->id;
3671 *size = header->size + sizeof(SVGA3dCmdHeader);
3672
3673 cmd_id -= SVGA_3D_CMD_BASE;
3674 if (unlikely(*size > size_remaining))
3675 goto out_invalid;
3676
3677 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3678 goto out_invalid;
3679
3680 entry = &vmw_cmd_entries[cmd_id];
3681 if (unlikely(!entry->func))
3682 goto out_invalid;
3683
3684 if (unlikely(!entry->user_allow && !sw_context->kernel))
3685 goto out_privileged;
3686
3687 if (unlikely(entry->gb_disable && gb))
3688 goto out_old;
3689
3690 if (unlikely(entry->gb_enable && !gb))
3691 goto out_new;
3692
3693 ret = entry->func(dev_priv, sw_context, header);
3694 if (unlikely(ret != 0)) {
3695 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3696 cmd_id + SVGA_3D_CMD_BASE, ret);
3697 return ret;
3698 }
3699
3700 return 0;
3701out_invalid:
3702 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3703 cmd_id + SVGA_3D_CMD_BASE);
3704 return -EINVAL;
3705out_privileged:
3706 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3707 cmd_id + SVGA_3D_CMD_BASE);
3708 return -EPERM;
3709out_old:
3710 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3711 cmd_id + SVGA_3D_CMD_BASE);
3712 return -EINVAL;
3713out_new:
3714 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3715 cmd_id + SVGA_3D_CMD_BASE);
3716 return -EINVAL;
3717}
3718
3719static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3720 struct vmw_sw_context *sw_context, void *buf,
3721 uint32_t size)
3722{
3723 int32_t cur_size = size;
3724 int ret;
3725
3726 sw_context->buf_start = buf;
3727
3728 while (cur_size > 0) {
3729 size = cur_size;
3730 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3731 if (unlikely(ret != 0))
3732 return ret;
3733 buf = (void *)((unsigned long) buf + size);
3734 cur_size -= size;
3735 }
3736
3737 if (unlikely(cur_size != 0)) {
3738 VMW_DEBUG_USER("Command verifier out of sync.\n");
3739 return -EINVAL;
3740 }
3741
3742 return 0;
3743}
3744
3745static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3746{
3747 /* Memory is validation context memory, so no need to free it */
3748 INIT_LIST_HEAD(&sw_context->bo_relocations);
3749}
3750
3751static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3752{
3753 struct vmw_relocation *reloc;
3754 struct ttm_buffer_object *bo;
3755
3756 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3757 bo = &reloc->vbo->base;
3758 switch (bo->resource->mem_type) {
3759 case TTM_PL_VRAM:
3760 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3761 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3762 break;
3763 case VMW_PL_GMR:
3764 reloc->location->gmrId = bo->resource->start;
3765 break;
3766 case VMW_PL_MOB:
3767 *reloc->mob_loc = bo->resource->start;
3768 break;
3769 default:
3770 BUG();
3771 }
3772 }
3773 vmw_free_relocations(sw_context);
3774}
3775
3776static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3777 uint32_t size)
3778{
3779 if (likely(sw_context->cmd_bounce_size >= size))
3780 return 0;
3781
3782 if (sw_context->cmd_bounce_size == 0)
3783 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3784
3785 while (sw_context->cmd_bounce_size < size) {
3786 sw_context->cmd_bounce_size =
3787 PAGE_ALIGN(sw_context->cmd_bounce_size +
3788 (sw_context->cmd_bounce_size >> 1));
3789 }
3790
3791 vfree(sw_context->cmd_bounce);
3792 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3793
3794 if (sw_context->cmd_bounce == NULL) {
3795 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3796 sw_context->cmd_bounce_size = 0;
3797 return -ENOMEM;
3798 }
3799
3800 return 0;
3801}
3802
3803/*
3804 * vmw_execbuf_fence_commands - create and submit a command stream fence
3805 *
3806 * Creates a fence object and submits a command stream marker.
3807 * If this fails for some reason, We sync the fifo and return NULL.
3808 * It is then safe to fence buffers with a NULL pointer.
3809 *
3810 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3811 * userspace handle if @p_handle is not NULL, otherwise not.
3812 */
3813
3814int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3815 struct vmw_private *dev_priv,
3816 struct vmw_fence_obj **p_fence,
3817 uint32_t *p_handle)
3818{
3819 uint32_t sequence;
3820 int ret;
3821 bool synced = false;
3822
3823 /* p_handle implies file_priv. */
3824 BUG_ON(p_handle != NULL && file_priv == NULL);
3825
3826 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3827 if (unlikely(ret != 0)) {
3828 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3829 synced = true;
3830 }
3831
3832 if (p_handle != NULL)
3833 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3834 sequence, p_fence, p_handle);
3835 else
3836 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3837
3838 if (unlikely(ret != 0 && !synced)) {
3839 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3840 false, VMW_FENCE_WAIT_TIMEOUT);
3841 *p_fence = NULL;
3842 }
3843
3844 return ret;
3845}
3846
3847/**
3848 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3849 *
3850 * @dev_priv: Pointer to a vmw_private struct.
3851 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3852 * @ret: Return value from fence object creation.
3853 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3854 * the information should be copied.
3855 * @fence: Pointer to the fenc object.
3856 * @fence_handle: User-space fence handle.
3857 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3858 *
3859 * This function copies fence information to user-space. If copying fails, the
3860 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3861 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3862 * will hopefully be detected.
3863 *
3864 * Also if copying fails, user-space will be unable to signal the fence object
3865 * so we wait for it immediately, and then unreference the user-space reference.
3866 */
3867int
3868vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3869 struct vmw_fpriv *vmw_fp, int ret,
3870 struct drm_vmw_fence_rep __user *user_fence_rep,
3871 struct vmw_fence_obj *fence, uint32_t fence_handle,
3872 int32_t out_fence_fd)
3873{
3874 struct drm_vmw_fence_rep fence_rep;
3875
3876 if (user_fence_rep == NULL)
3877 return 0;
3878
3879 memset(&fence_rep, 0, sizeof(fence_rep));
3880
3881 fence_rep.error = ret;
3882 fence_rep.fd = out_fence_fd;
3883 if (ret == 0) {
3884 BUG_ON(fence == NULL);
3885
3886 fence_rep.handle = fence_handle;
3887 fence_rep.seqno = fence->base.seqno;
3888 vmw_update_seqno(dev_priv);
3889 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3890 }
3891
3892 /*
3893 * copy_to_user errors will be detected by user space not seeing
3894 * fence_rep::error filled in. Typically user-space would have pre-set
3895 * that member to -EFAULT.
3896 */
3897 ret = copy_to_user(user_fence_rep, &fence_rep,
3898 sizeof(fence_rep));
3899
3900 /*
3901 * User-space lost the fence object. We need to sync and unreference the
3902 * handle.
3903 */
3904 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3905 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3906 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3907 (void) vmw_fence_obj_wait(fence, false, false,
3908 VMW_FENCE_WAIT_TIMEOUT);
3909 }
3910
3911 return ret ? -EFAULT : 0;
3912}
3913
3914/**
3915 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3916 *
3917 * @dev_priv: Pointer to a device private structure.
3918 * @kernel_commands: Pointer to the unpatched command batch.
3919 * @command_size: Size of the unpatched command batch.
3920 * @sw_context: Structure holding the relocation lists.
3921 *
3922 * Side effects: If this function returns 0, then the command batch pointed to
3923 * by @kernel_commands will have been modified.
3924 */
3925static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3926 void *kernel_commands, u32 command_size,
3927 struct vmw_sw_context *sw_context)
3928{
3929 void *cmd;
3930
3931 if (sw_context->dx_ctx_node)
3932 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3933 sw_context->dx_ctx_node->ctx->id);
3934 else
3935 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3936
3937 if (!cmd)
3938 return -ENOMEM;
3939
3940 vmw_apply_relocations(sw_context);
3941 memcpy(cmd, kernel_commands, command_size);
3942 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3943 vmw_resource_relocations_free(&sw_context->res_relocations);
3944 vmw_cmd_commit(dev_priv, command_size);
3945
3946 return 0;
3947}
3948
3949/**
3950 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3951 * command buffer manager.
3952 *
3953 * @dev_priv: Pointer to a device private structure.
3954 * @header: Opaque handle to the command buffer allocation.
3955 * @command_size: Size of the unpatched command batch.
3956 * @sw_context: Structure holding the relocation lists.
3957 *
3958 * Side effects: If this function returns 0, then the command buffer represented
3959 * by @header will have been modified.
3960 */
3961static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3962 struct vmw_cmdbuf_header *header,
3963 u32 command_size,
3964 struct vmw_sw_context *sw_context)
3965{
3966 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3967 SVGA3D_INVALID_ID);
3968 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3969 header);
3970
3971 vmw_apply_relocations(sw_context);
3972 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3973 vmw_resource_relocations_free(&sw_context->res_relocations);
3974 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3975
3976 return 0;
3977}
3978
3979/**
3980 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3981 * submission using a command buffer.
3982 *
3983 * @dev_priv: Pointer to a device private structure.
3984 * @user_commands: User-space pointer to the commands to be submitted.
3985 * @command_size: Size of the unpatched command batch.
3986 * @header: Out parameter returning the opaque pointer to the command buffer.
3987 *
3988 * This function checks whether we can use the command buffer manager for
3989 * submission and if so, creates a command buffer of suitable size and copies
3990 * the user data into that buffer.
3991 *
3992 * On successful return, the function returns a pointer to the data in the
3993 * command buffer and *@header is set to non-NULL.
3994 *
3995 * @kernel_commands: If command buffers could not be used, the function will
3996 * return the value of @kernel_commands on function call. That value may be
3997 * NULL. In that case, the value of *@header will be set to NULL.
3998 *
3999 * If an error is encountered, the function will return a pointer error value.
4000 * If the function is interrupted by a signal while sleeping, it will return
4001 * -ERESTARTSYS casted to a pointer error value.
4002 */
4003static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4004 void __user *user_commands,
4005 void *kernel_commands, u32 command_size,
4006 struct vmw_cmdbuf_header **header)
4007{
4008 size_t cmdbuf_size;
4009 int ret;
4010
4011 *header = NULL;
4012 if (command_size > SVGA_CB_MAX_SIZE) {
4013 VMW_DEBUG_USER("Command buffer is too large.\n");
4014 return ERR_PTR(-EINVAL);
4015 }
4016
4017 if (!dev_priv->cman || kernel_commands)
4018 return kernel_commands;
4019
4020 /* If possible, add a little space for fencing. */
4021 cmdbuf_size = command_size + 512;
4022 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4023 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4024 header);
4025 if (IS_ERR(kernel_commands))
4026 return kernel_commands;
4027
4028 ret = copy_from_user(kernel_commands, user_commands, command_size);
4029 if (ret) {
4030 VMW_DEBUG_USER("Failed copying commands.\n");
4031 vmw_cmdbuf_header_free(*header);
4032 *header = NULL;
4033 return ERR_PTR(-EFAULT);
4034 }
4035
4036 return kernel_commands;
4037}
4038
4039static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4040 struct vmw_sw_context *sw_context,
4041 uint32_t handle)
4042{
4043 struct vmw_resource *res;
4044 int ret;
4045 unsigned int size;
4046
4047 if (handle == SVGA3D_INVALID_ID)
4048 return 0;
4049
4050 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4051 ret = vmw_validation_preload_res(sw_context->ctx, size);
4052 if (ret)
4053 return ret;
4054
4055 ret = vmw_user_resource_lookup_handle
4056 (dev_priv, sw_context->fp->tfile, handle,
4057 user_context_converter, &res);
4058 if (ret != 0) {
4059 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4060 (unsigned int) handle);
4061 return ret;
4062 }
4063
4064 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4065 vmw_val_add_flag_none);
4066 if (unlikely(ret != 0)) {
4067 vmw_resource_unreference(&res);
4068 return ret;
4069 }
4070
4071 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4072 sw_context->man = vmw_context_res_man(res);
4073
4074 vmw_resource_unreference(&res);
4075 return 0;
4076}
4077
4078int vmw_execbuf_process(struct drm_file *file_priv,
4079 struct vmw_private *dev_priv,
4080 void __user *user_commands, void *kernel_commands,
4081 uint32_t command_size, uint64_t throttle_us,
4082 uint32_t dx_context_handle,
4083 struct drm_vmw_fence_rep __user *user_fence_rep,
4084 struct vmw_fence_obj **out_fence, uint32_t flags)
4085{
4086 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4087 struct vmw_fence_obj *fence = NULL;
4088 struct vmw_cmdbuf_header *header;
4089 uint32_t handle = 0;
4090 int ret;
4091 int32_t out_fence_fd = -1;
4092 struct sync_file *sync_file = NULL;
4093 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4094
4095 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4096 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4097 if (out_fence_fd < 0) {
4098 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4099 return out_fence_fd;
4100 }
4101 }
4102
4103 if (throttle_us) {
4104 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4105 }
4106
4107 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4108 kernel_commands, command_size,
4109 &header);
4110 if (IS_ERR(kernel_commands)) {
4111 ret = PTR_ERR(kernel_commands);
4112 goto out_free_fence_fd;
4113 }
4114
4115 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4116 if (ret) {
4117 ret = -ERESTARTSYS;
4118 goto out_free_header;
4119 }
4120
4121 sw_context->kernel = false;
4122 if (kernel_commands == NULL) {
4123 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4124 if (unlikely(ret != 0))
4125 goto out_unlock;
4126
4127 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4128 command_size);
4129 if (unlikely(ret != 0)) {
4130 ret = -EFAULT;
4131 VMW_DEBUG_USER("Failed copying commands.\n");
4132 goto out_unlock;
4133 }
4134
4135 kernel_commands = sw_context->cmd_bounce;
4136 } else if (!header) {
4137 sw_context->kernel = true;
4138 }
4139
4140 sw_context->filp = file_priv;
4141 sw_context->fp = vmw_fpriv(file_priv);
4142 INIT_LIST_HEAD(&sw_context->ctx_list);
4143 sw_context->cur_query_bo = dev_priv->pinned_bo;
4144 sw_context->last_query_ctx = NULL;
4145 sw_context->needs_post_query_barrier = false;
4146 sw_context->dx_ctx_node = NULL;
4147 sw_context->dx_query_mob = NULL;
4148 sw_context->dx_query_ctx = NULL;
4149 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4150 INIT_LIST_HEAD(&sw_context->res_relocations);
4151 INIT_LIST_HEAD(&sw_context->bo_relocations);
4152
4153 if (sw_context->staged_bindings)
4154 vmw_binding_state_reset(sw_context->staged_bindings);
4155
4156 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4157 sw_context->ctx = &val_ctx;
4158 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4159 if (unlikely(ret != 0))
4160 goto out_err_nores;
4161
4162 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4163 command_size);
4164 if (unlikely(ret != 0))
4165 goto out_err_nores;
4166
4167 ret = vmw_resources_reserve(sw_context);
4168 if (unlikely(ret != 0))
4169 goto out_err_nores;
4170
4171 ret = vmw_validation_bo_reserve(&val_ctx, true);
4172 if (unlikely(ret != 0))
4173 goto out_err_nores;
4174
4175 ret = vmw_validation_bo_validate(&val_ctx, true);
4176 if (unlikely(ret != 0))
4177 goto out_err;
4178
4179 ret = vmw_validation_res_validate(&val_ctx, true);
4180 if (unlikely(ret != 0))
4181 goto out_err;
4182
4183 vmw_validation_drop_ht(&val_ctx);
4184
4185 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4186 if (unlikely(ret != 0)) {
4187 ret = -ERESTARTSYS;
4188 goto out_err;
4189 }
4190
4191 if (dev_priv->has_mob) {
4192 ret = vmw_rebind_contexts(sw_context);
4193 if (unlikely(ret != 0))
4194 goto out_unlock_binding;
4195 }
4196
4197 if (!header) {
4198 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4199 command_size, sw_context);
4200 } else {
4201 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4202 sw_context);
4203 header = NULL;
4204 }
4205 mutex_unlock(&dev_priv->binding_mutex);
4206 if (ret)
4207 goto out_err;
4208
4209 vmw_query_bo_switch_commit(dev_priv, sw_context);
4210 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4211 (user_fence_rep) ? &handle : NULL);
4212 /*
4213 * This error is harmless, because if fence submission fails,
4214 * vmw_fifo_send_fence will sync. The error will be propagated to
4215 * user-space in @fence_rep
4216 */
4217 if (ret != 0)
4218 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4219
4220 vmw_execbuf_bindings_commit(sw_context, false);
4221 vmw_bind_dx_query_mob(sw_context);
4222 vmw_validation_res_unreserve(&val_ctx, false);
4223
4224 vmw_validation_bo_fence(sw_context->ctx, fence);
4225
4226 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4227 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4228
4229 /*
4230 * If anything fails here, give up trying to export the fence and do a
4231 * sync since the user mode will not be able to sync the fence itself.
4232 * This ensures we are still functionally correct.
4233 */
4234 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4235
4236 sync_file = sync_file_create(&fence->base);
4237 if (!sync_file) {
4238 VMW_DEBUG_USER("Sync file create failed for fence\n");
4239 put_unused_fd(out_fence_fd);
4240 out_fence_fd = -1;
4241
4242 (void) vmw_fence_obj_wait(fence, false, false,
4243 VMW_FENCE_WAIT_TIMEOUT);
4244 }
4245 }
4246
4247 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4248 user_fence_rep, fence, handle, out_fence_fd);
4249
4250 if (sync_file) {
4251 if (ret) {
4252 /* usercopy of fence failed, put the file object */
4253 fput(sync_file->file);
4254 put_unused_fd(out_fence_fd);
4255 } else {
4256 /* Link the fence with the FD created earlier */
4257 fd_install(out_fence_fd, sync_file->file);
4258 }
4259 }
4260
4261 /* Don't unreference when handing fence out */
4262 if (unlikely(out_fence != NULL)) {
4263 *out_fence = fence;
4264 fence = NULL;
4265 } else if (likely(fence != NULL)) {
4266 vmw_fence_obj_unreference(&fence);
4267 }
4268
4269 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4270 mutex_unlock(&dev_priv->cmdbuf_mutex);
4271
4272 /*
4273 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4274 * in resource destruction paths.
4275 */
4276 vmw_validation_unref_lists(&val_ctx);
4277
4278 return ret;
4279
4280out_unlock_binding:
4281 mutex_unlock(&dev_priv->binding_mutex);
4282out_err:
4283 vmw_validation_bo_backoff(&val_ctx);
4284out_err_nores:
4285 vmw_execbuf_bindings_commit(sw_context, true);
4286 vmw_validation_res_unreserve(&val_ctx, true);
4287 vmw_resource_relocations_free(&sw_context->res_relocations);
4288 vmw_free_relocations(sw_context);
4289 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4290 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4291out_unlock:
4292 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4293 vmw_validation_drop_ht(&val_ctx);
4294 WARN_ON(!list_empty(&sw_context->ctx_list));
4295 mutex_unlock(&dev_priv->cmdbuf_mutex);
4296
4297 /*
4298 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4299 * in resource destruction paths.
4300 */
4301 vmw_validation_unref_lists(&val_ctx);
4302out_free_header:
4303 if (header)
4304 vmw_cmdbuf_header_free(header);
4305out_free_fence_fd:
4306 if (out_fence_fd >= 0)
4307 put_unused_fd(out_fence_fd);
4308
4309 return ret;
4310}
4311
4312/**
4313 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4314 *
4315 * @dev_priv: The device private structure.
4316 *
4317 * This function is called to idle the fifo and unpin the query buffer if the
4318 * normal way to do this hits an error, which should typically be extremely
4319 * rare.
4320 */
4321static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4322{
4323 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4324
4325 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4326 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4327 if (dev_priv->dummy_query_bo_pinned) {
4328 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4329 dev_priv->dummy_query_bo_pinned = false;
4330 }
4331}
4332
4333
4334/**
4335 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4336 * bo.
4337 *
4338 * @dev_priv: The device private structure.
4339 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4340 * query barrier that flushes all queries touching the current buffer pointed to
4341 * by @dev_priv->pinned_bo
4342 *
4343 * This function should be used to unpin the pinned query bo, or as a query
4344 * barrier when we need to make sure that all queries have finished before the
4345 * next fifo command. (For example on hardware context destructions where the
4346 * hardware may otherwise leak unfinished queries).
4347 *
4348 * This function does not return any failure codes, but make attempts to do safe
4349 * unpinning in case of errors.
4350 *
4351 * The function will synchronize on the previous query barrier, and will thus
4352 * not finish until that barrier has executed.
4353 *
4354 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4355 * calling this function.
4356 */
4357void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4358 struct vmw_fence_obj *fence)
4359{
4360 int ret = 0;
4361 struct vmw_fence_obj *lfence = NULL;
4362 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4363
4364 if (dev_priv->pinned_bo == NULL)
4365 goto out_unlock;
4366
4367 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
4368 false);
4369 if (ret)
4370 goto out_no_reserve;
4371
4372 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
4373 false);
4374 if (ret)
4375 goto out_no_reserve;
4376
4377 ret = vmw_validation_bo_reserve(&val_ctx, false);
4378 if (ret)
4379 goto out_no_reserve;
4380
4381 if (dev_priv->query_cid_valid) {
4382 BUG_ON(fence != NULL);
4383 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4384 if (ret)
4385 goto out_no_emit;
4386 dev_priv->query_cid_valid = false;
4387 }
4388
4389 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4390 if (dev_priv->dummy_query_bo_pinned) {
4391 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4392 dev_priv->dummy_query_bo_pinned = false;
4393 }
4394 if (fence == NULL) {
4395 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4396 NULL);
4397 fence = lfence;
4398 }
4399 vmw_validation_bo_fence(&val_ctx, fence);
4400 if (lfence != NULL)
4401 vmw_fence_obj_unreference(&lfence);
4402
4403 vmw_validation_unref_lists(&val_ctx);
4404 vmw_bo_unreference(&dev_priv->pinned_bo);
4405
4406out_unlock:
4407 return;
4408out_no_emit:
4409 vmw_validation_bo_backoff(&val_ctx);
4410out_no_reserve:
4411 vmw_validation_unref_lists(&val_ctx);
4412 vmw_execbuf_unpin_panic(dev_priv);
4413 vmw_bo_unreference(&dev_priv->pinned_bo);
4414}
4415
4416/**
4417 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4418 *
4419 * @dev_priv: The device private structure.
4420 *
4421 * This function should be used to unpin the pinned query bo, or as a query
4422 * barrier when we need to make sure that all queries have finished before the
4423 * next fifo command. (For example on hardware context destructions where the
4424 * hardware may otherwise leak unfinished queries).
4425 *
4426 * This function does not return any failure codes, but make attempts to do safe
4427 * unpinning in case of errors.
4428 *
4429 * The function will synchronize on the previous query barrier, and will thus
4430 * not finish until that barrier has executed.
4431 */
4432void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4433{
4434 mutex_lock(&dev_priv->cmdbuf_mutex);
4435 if (dev_priv->query_cid_valid)
4436 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4437 mutex_unlock(&dev_priv->cmdbuf_mutex);
4438}
4439
4440int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4441 struct drm_file *file_priv)
4442{
4443 struct vmw_private *dev_priv = vmw_priv(dev);
4444 struct drm_vmw_execbuf_arg *arg = data;
4445 int ret;
4446 struct dma_fence *in_fence = NULL;
4447
4448 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4449 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4450
4451 /*
4452 * Extend the ioctl argument while maintaining backwards compatibility:
4453 * We take different code paths depending on the value of arg->version.
4454 *
4455 * Note: The ioctl argument is extended and zeropadded by core DRM.
4456 */
4457 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4458 arg->version == 0)) {
4459 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4460 ret = -EINVAL;
4461 goto mksstats_out;
4462 }
4463
4464 switch (arg->version) {
4465 case 1:
4466 /* For v1 core DRM have extended + zeropadded the data */
4467 arg->context_handle = (uint32_t) -1;
4468 break;
4469 case 2:
4470 default:
4471 /* For v2 and later core DRM would have correctly copied it */
4472 break;
4473 }
4474
4475 /* If imported a fence FD from elsewhere, then wait on it */
4476 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4477 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4478
4479 if (!in_fence) {
4480 VMW_DEBUG_USER("Cannot get imported fence\n");
4481 ret = -EINVAL;
4482 goto mksstats_out;
4483 }
4484
4485 ret = dma_fence_wait(in_fence, true);
4486 if (ret)
4487 goto out;
4488 }
4489
4490 ret = vmw_execbuf_process(file_priv, dev_priv,
4491 (void __user *)(unsigned long)arg->commands,
4492 NULL, arg->command_size, arg->throttle_us,
4493 arg->context_handle,
4494 (void __user *)(unsigned long)arg->fence_rep,
4495 NULL, arg->flags);
4496
4497 if (unlikely(ret != 0))
4498 goto out;
4499
4500 vmw_kms_cursor_post_execbuf(dev_priv);
4501
4502out:
4503 if (in_fence)
4504 dma_fence_put(in_fence);
4505
4506mksstats_out:
4507 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4508 return ret;
4509}