Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_binding.h"
31#include "ttm/ttm_placement.h"
32
33struct vmw_user_context {
34 struct ttm_base_object base;
35 struct vmw_resource res;
36 struct vmw_ctx_binding_state *cbs;
37 struct vmw_cmdbuf_res_manager *man;
38 struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
39 spinlock_t cotable_lock;
40 struct vmw_dma_buffer *dx_query_mob;
41};
42
43static void vmw_user_context_free(struct vmw_resource *res);
44static struct vmw_resource *
45vmw_user_context_base_to_res(struct ttm_base_object *base);
46
47static int vmw_gb_context_create(struct vmw_resource *res);
48static int vmw_gb_context_bind(struct vmw_resource *res,
49 struct ttm_validate_buffer *val_buf);
50static int vmw_gb_context_unbind(struct vmw_resource *res,
51 bool readback,
52 struct ttm_validate_buffer *val_buf);
53static int vmw_gb_context_destroy(struct vmw_resource *res);
54static int vmw_dx_context_create(struct vmw_resource *res);
55static int vmw_dx_context_bind(struct vmw_resource *res,
56 struct ttm_validate_buffer *val_buf);
57static int vmw_dx_context_unbind(struct vmw_resource *res,
58 bool readback,
59 struct ttm_validate_buffer *val_buf);
60static int vmw_dx_context_destroy(struct vmw_resource *res);
61
62static uint64_t vmw_user_context_size;
63
64static const struct vmw_user_resource_conv user_context_conv = {
65 .object_type = VMW_RES_CONTEXT,
66 .base_obj_to_res = vmw_user_context_base_to_res,
67 .res_free = vmw_user_context_free
68};
69
70const struct vmw_user_resource_conv *user_context_converter =
71 &user_context_conv;
72
73
74static const struct vmw_res_func vmw_legacy_context_func = {
75 .res_type = vmw_res_context,
76 .needs_backup = false,
77 .may_evict = false,
78 .type_name = "legacy contexts",
79 .backup_placement = NULL,
80 .create = NULL,
81 .destroy = NULL,
82 .bind = NULL,
83 .unbind = NULL
84};
85
86static const struct vmw_res_func vmw_gb_context_func = {
87 .res_type = vmw_res_context,
88 .needs_backup = true,
89 .may_evict = true,
90 .type_name = "guest backed contexts",
91 .backup_placement = &vmw_mob_placement,
92 .create = vmw_gb_context_create,
93 .destroy = vmw_gb_context_destroy,
94 .bind = vmw_gb_context_bind,
95 .unbind = vmw_gb_context_unbind
96};
97
98static const struct vmw_res_func vmw_dx_context_func = {
99 .res_type = vmw_res_dx_context,
100 .needs_backup = true,
101 .may_evict = true,
102 .type_name = "dx contexts",
103 .backup_placement = &vmw_mob_placement,
104 .create = vmw_dx_context_create,
105 .destroy = vmw_dx_context_destroy,
106 .bind = vmw_dx_context_bind,
107 .unbind = vmw_dx_context_unbind
108};
109
110/**
111 * Context management:
112 */
113
114static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
115{
116 struct vmw_resource *res;
117 int i;
118
119 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
120 spin_lock(&uctx->cotable_lock);
121 res = uctx->cotables[i];
122 uctx->cotables[i] = NULL;
123 spin_unlock(&uctx->cotable_lock);
124
125 if (res)
126 vmw_resource_unreference(&res);
127 }
128}
129
130static void vmw_hw_context_destroy(struct vmw_resource *res)
131{
132 struct vmw_user_context *uctx =
133 container_of(res, struct vmw_user_context, res);
134 struct vmw_private *dev_priv = res->dev_priv;
135 struct {
136 SVGA3dCmdHeader header;
137 SVGA3dCmdDestroyContext body;
138 } *cmd;
139
140
141 if (res->func->destroy == vmw_gb_context_destroy ||
142 res->func->destroy == vmw_dx_context_destroy) {
143 mutex_lock(&dev_priv->cmdbuf_mutex);
144 vmw_cmdbuf_res_man_destroy(uctx->man);
145 mutex_lock(&dev_priv->binding_mutex);
146 vmw_binding_state_kill(uctx->cbs);
147 (void) res->func->destroy(res);
148 mutex_unlock(&dev_priv->binding_mutex);
149 if (dev_priv->pinned_bo != NULL &&
150 !dev_priv->query_cid_valid)
151 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
152 mutex_unlock(&dev_priv->cmdbuf_mutex);
153 vmw_context_cotables_unref(uctx);
154 return;
155 }
156
157 vmw_execbuf_release_pinned_bo(dev_priv);
158 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
159 if (unlikely(cmd == NULL)) {
160 DRM_ERROR("Failed reserving FIFO space for surface "
161 "destruction.\n");
162 return;
163 }
164
165 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
166 cmd->header.size = sizeof(cmd->body);
167 cmd->body.cid = res->id;
168
169 vmw_fifo_commit(dev_priv, sizeof(*cmd));
170 vmw_fifo_resource_dec(dev_priv);
171}
172
173static int vmw_gb_context_init(struct vmw_private *dev_priv,
174 bool dx,
175 struct vmw_resource *res,
176 void (*res_free)(struct vmw_resource *res))
177{
178 int ret, i;
179 struct vmw_user_context *uctx =
180 container_of(res, struct vmw_user_context, res);
181
182 res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
183 SVGA3D_CONTEXT_DATA_SIZE);
184 ret = vmw_resource_init(dev_priv, res, true,
185 res_free,
186 dx ? &vmw_dx_context_func :
187 &vmw_gb_context_func);
188 if (unlikely(ret != 0))
189 goto out_err;
190
191 if (dev_priv->has_mob) {
192 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
193 if (IS_ERR(uctx->man)) {
194 ret = PTR_ERR(uctx->man);
195 uctx->man = NULL;
196 goto out_err;
197 }
198 }
199
200 uctx->cbs = vmw_binding_state_alloc(dev_priv);
201 if (IS_ERR(uctx->cbs)) {
202 ret = PTR_ERR(uctx->cbs);
203 goto out_err;
204 }
205
206 spin_lock_init(&uctx->cotable_lock);
207
208 if (dx) {
209 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
210 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
211 &uctx->res, i);
212 if (unlikely(uctx->cotables[i] == NULL)) {
213 ret = -ENOMEM;
214 goto out_cotables;
215 }
216 }
217 }
218
219
220
221 vmw_resource_activate(res, vmw_hw_context_destroy);
222 return 0;
223
224out_cotables:
225 vmw_context_cotables_unref(uctx);
226out_err:
227 if (res_free)
228 res_free(res);
229 else
230 kfree(res);
231 return ret;
232}
233
234static int vmw_context_init(struct vmw_private *dev_priv,
235 struct vmw_resource *res,
236 void (*res_free)(struct vmw_resource *res),
237 bool dx)
238{
239 int ret;
240
241 struct {
242 SVGA3dCmdHeader header;
243 SVGA3dCmdDefineContext body;
244 } *cmd;
245
246 if (dev_priv->has_mob)
247 return vmw_gb_context_init(dev_priv, dx, res, res_free);
248
249 ret = vmw_resource_init(dev_priv, res, false,
250 res_free, &vmw_legacy_context_func);
251
252 if (unlikely(ret != 0)) {
253 DRM_ERROR("Failed to allocate a resource id.\n");
254 goto out_early;
255 }
256
257 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
258 DRM_ERROR("Out of hw context ids.\n");
259 vmw_resource_unreference(&res);
260 return -ENOMEM;
261 }
262
263 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
264 if (unlikely(cmd == NULL)) {
265 DRM_ERROR("Fifo reserve failed.\n");
266 vmw_resource_unreference(&res);
267 return -ENOMEM;
268 }
269
270 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
271 cmd->header.size = sizeof(cmd->body);
272 cmd->body.cid = res->id;
273
274 vmw_fifo_commit(dev_priv, sizeof(*cmd));
275 vmw_fifo_resource_inc(dev_priv);
276 vmw_resource_activate(res, vmw_hw_context_destroy);
277 return 0;
278
279out_early:
280 if (res_free == NULL)
281 kfree(res);
282 else
283 res_free(res);
284 return ret;
285}
286
287
288/*
289 * GB context.
290 */
291
292static int vmw_gb_context_create(struct vmw_resource *res)
293{
294 struct vmw_private *dev_priv = res->dev_priv;
295 int ret;
296 struct {
297 SVGA3dCmdHeader header;
298 SVGA3dCmdDefineGBContext body;
299 } *cmd;
300
301 if (likely(res->id != -1))
302 return 0;
303
304 ret = vmw_resource_alloc_id(res);
305 if (unlikely(ret != 0)) {
306 DRM_ERROR("Failed to allocate a context id.\n");
307 goto out_no_id;
308 }
309
310 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
311 ret = -EBUSY;
312 goto out_no_fifo;
313 }
314
315 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
316 if (unlikely(cmd == NULL)) {
317 DRM_ERROR("Failed reserving FIFO space for context "
318 "creation.\n");
319 ret = -ENOMEM;
320 goto out_no_fifo;
321 }
322
323 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
324 cmd->header.size = sizeof(cmd->body);
325 cmd->body.cid = res->id;
326 vmw_fifo_commit(dev_priv, sizeof(*cmd));
327 vmw_fifo_resource_inc(dev_priv);
328
329 return 0;
330
331out_no_fifo:
332 vmw_resource_release_id(res);
333out_no_id:
334 return ret;
335}
336
337static int vmw_gb_context_bind(struct vmw_resource *res,
338 struct ttm_validate_buffer *val_buf)
339{
340 struct vmw_private *dev_priv = res->dev_priv;
341 struct {
342 SVGA3dCmdHeader header;
343 SVGA3dCmdBindGBContext body;
344 } *cmd;
345 struct ttm_buffer_object *bo = val_buf->bo;
346
347 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
348
349 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
350 if (unlikely(cmd == NULL)) {
351 DRM_ERROR("Failed reserving FIFO space for context "
352 "binding.\n");
353 return -ENOMEM;
354 }
355 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
356 cmd->header.size = sizeof(cmd->body);
357 cmd->body.cid = res->id;
358 cmd->body.mobid = bo->mem.start;
359 cmd->body.validContents = res->backup_dirty;
360 res->backup_dirty = false;
361 vmw_fifo_commit(dev_priv, sizeof(*cmd));
362
363 return 0;
364}
365
366static int vmw_gb_context_unbind(struct vmw_resource *res,
367 bool readback,
368 struct ttm_validate_buffer *val_buf)
369{
370 struct vmw_private *dev_priv = res->dev_priv;
371 struct ttm_buffer_object *bo = val_buf->bo;
372 struct vmw_fence_obj *fence;
373 struct vmw_user_context *uctx =
374 container_of(res, struct vmw_user_context, res);
375
376 struct {
377 SVGA3dCmdHeader header;
378 SVGA3dCmdReadbackGBContext body;
379 } *cmd1;
380 struct {
381 SVGA3dCmdHeader header;
382 SVGA3dCmdBindGBContext body;
383 } *cmd2;
384 uint32_t submit_size;
385 uint8_t *cmd;
386
387
388 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
389
390 mutex_lock(&dev_priv->binding_mutex);
391 vmw_binding_state_scrub(uctx->cbs);
392
393 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
394
395 cmd = vmw_fifo_reserve(dev_priv, submit_size);
396 if (unlikely(cmd == NULL)) {
397 DRM_ERROR("Failed reserving FIFO space for context "
398 "unbinding.\n");
399 mutex_unlock(&dev_priv->binding_mutex);
400 return -ENOMEM;
401 }
402
403 cmd2 = (void *) cmd;
404 if (readback) {
405 cmd1 = (void *) cmd;
406 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
407 cmd1->header.size = sizeof(cmd1->body);
408 cmd1->body.cid = res->id;
409 cmd2 = (void *) (&cmd1[1]);
410 }
411 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
412 cmd2->header.size = sizeof(cmd2->body);
413 cmd2->body.cid = res->id;
414 cmd2->body.mobid = SVGA3D_INVALID_ID;
415
416 vmw_fifo_commit(dev_priv, submit_size);
417 mutex_unlock(&dev_priv->binding_mutex);
418
419 /*
420 * Create a fence object and fence the backup buffer.
421 */
422
423 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
424 &fence, NULL);
425
426 vmw_fence_single_bo(bo, fence);
427
428 if (likely(fence != NULL))
429 vmw_fence_obj_unreference(&fence);
430
431 return 0;
432}
433
434static int vmw_gb_context_destroy(struct vmw_resource *res)
435{
436 struct vmw_private *dev_priv = res->dev_priv;
437 struct {
438 SVGA3dCmdHeader header;
439 SVGA3dCmdDestroyGBContext body;
440 } *cmd;
441
442 if (likely(res->id == -1))
443 return 0;
444
445 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
446 if (unlikely(cmd == NULL)) {
447 DRM_ERROR("Failed reserving FIFO space for context "
448 "destruction.\n");
449 return -ENOMEM;
450 }
451
452 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
453 cmd->header.size = sizeof(cmd->body);
454 cmd->body.cid = res->id;
455 vmw_fifo_commit(dev_priv, sizeof(*cmd));
456 if (dev_priv->query_cid == res->id)
457 dev_priv->query_cid_valid = false;
458 vmw_resource_release_id(res);
459 vmw_fifo_resource_dec(dev_priv);
460
461 return 0;
462}
463
464/*
465 * DX context.
466 */
467
468static int vmw_dx_context_create(struct vmw_resource *res)
469{
470 struct vmw_private *dev_priv = res->dev_priv;
471 int ret;
472 struct {
473 SVGA3dCmdHeader header;
474 SVGA3dCmdDXDefineContext body;
475 } *cmd;
476
477 if (likely(res->id != -1))
478 return 0;
479
480 ret = vmw_resource_alloc_id(res);
481 if (unlikely(ret != 0)) {
482 DRM_ERROR("Failed to allocate a context id.\n");
483 goto out_no_id;
484 }
485
486 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
487 ret = -EBUSY;
488 goto out_no_fifo;
489 }
490
491 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
492 if (unlikely(cmd == NULL)) {
493 DRM_ERROR("Failed reserving FIFO space for context "
494 "creation.\n");
495 ret = -ENOMEM;
496 goto out_no_fifo;
497 }
498
499 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
500 cmd->header.size = sizeof(cmd->body);
501 cmd->body.cid = res->id;
502 vmw_fifo_commit(dev_priv, sizeof(*cmd));
503 vmw_fifo_resource_inc(dev_priv);
504
505 return 0;
506
507out_no_fifo:
508 vmw_resource_release_id(res);
509out_no_id:
510 return ret;
511}
512
513static int vmw_dx_context_bind(struct vmw_resource *res,
514 struct ttm_validate_buffer *val_buf)
515{
516 struct vmw_private *dev_priv = res->dev_priv;
517 struct {
518 SVGA3dCmdHeader header;
519 SVGA3dCmdDXBindContext body;
520 } *cmd;
521 struct ttm_buffer_object *bo = val_buf->bo;
522
523 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
524
525 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
526 if (unlikely(cmd == NULL)) {
527 DRM_ERROR("Failed reserving FIFO space for context "
528 "binding.\n");
529 return -ENOMEM;
530 }
531
532 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
533 cmd->header.size = sizeof(cmd->body);
534 cmd->body.cid = res->id;
535 cmd->body.mobid = bo->mem.start;
536 cmd->body.validContents = res->backup_dirty;
537 res->backup_dirty = false;
538 vmw_fifo_commit(dev_priv, sizeof(*cmd));
539
540
541 return 0;
542}
543
544/**
545 * vmw_dx_context_scrub_cotables - Scrub all bindings and
546 * cotables from a context
547 *
548 * @ctx: Pointer to the context resource
549 * @readback: Whether to save the otable contents on scrubbing.
550 *
551 * COtables must be unbound before their context, but unbinding requires
552 * the backup buffer being reserved, whereas scrubbing does not.
553 * This function scrubs all cotables of a context, potentially reading back
554 * the contents into their backup buffers. However, scrubbing cotables
555 * also makes the device context invalid, so scrub all bindings first so
556 * that doesn't have to be done later with an invalid context.
557 */
558void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
559 bool readback)
560{
561 struct vmw_user_context *uctx =
562 container_of(ctx, struct vmw_user_context, res);
563 int i;
564
565 vmw_binding_state_scrub(uctx->cbs);
566 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
567 struct vmw_resource *res;
568
569 /* Avoid racing with ongoing cotable destruction. */
570 spin_lock(&uctx->cotable_lock);
571 res = uctx->cotables[vmw_cotable_scrub_order[i]];
572 if (res)
573 res = vmw_resource_reference_unless_doomed(res);
574 spin_unlock(&uctx->cotable_lock);
575 if (!res)
576 continue;
577
578 WARN_ON(vmw_cotable_scrub(res, readback));
579 vmw_resource_unreference(&res);
580 }
581}
582
583static int vmw_dx_context_unbind(struct vmw_resource *res,
584 bool readback,
585 struct ttm_validate_buffer *val_buf)
586{
587 struct vmw_private *dev_priv = res->dev_priv;
588 struct ttm_buffer_object *bo = val_buf->bo;
589 struct vmw_fence_obj *fence;
590 struct vmw_user_context *uctx =
591 container_of(res, struct vmw_user_context, res);
592
593 struct {
594 SVGA3dCmdHeader header;
595 SVGA3dCmdDXReadbackContext body;
596 } *cmd1;
597 struct {
598 SVGA3dCmdHeader header;
599 SVGA3dCmdDXBindContext body;
600 } *cmd2;
601 uint32_t submit_size;
602 uint8_t *cmd;
603
604
605 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
606
607 mutex_lock(&dev_priv->binding_mutex);
608 vmw_dx_context_scrub_cotables(res, readback);
609
610 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
611 readback) {
612 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
613 if (vmw_query_readback_all(uctx->dx_query_mob))
614 DRM_ERROR("Failed to read back query states\n");
615 }
616
617 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
618
619 cmd = vmw_fifo_reserve(dev_priv, submit_size);
620 if (unlikely(cmd == NULL)) {
621 DRM_ERROR("Failed reserving FIFO space for context "
622 "unbinding.\n");
623 mutex_unlock(&dev_priv->binding_mutex);
624 return -ENOMEM;
625 }
626
627 cmd2 = (void *) cmd;
628 if (readback) {
629 cmd1 = (void *) cmd;
630 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
631 cmd1->header.size = sizeof(cmd1->body);
632 cmd1->body.cid = res->id;
633 cmd2 = (void *) (&cmd1[1]);
634 }
635 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
636 cmd2->header.size = sizeof(cmd2->body);
637 cmd2->body.cid = res->id;
638 cmd2->body.mobid = SVGA3D_INVALID_ID;
639
640 vmw_fifo_commit(dev_priv, submit_size);
641 mutex_unlock(&dev_priv->binding_mutex);
642
643 /*
644 * Create a fence object and fence the backup buffer.
645 */
646
647 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
648 &fence, NULL);
649
650 vmw_fence_single_bo(bo, fence);
651
652 if (likely(fence != NULL))
653 vmw_fence_obj_unreference(&fence);
654
655 return 0;
656}
657
658static int vmw_dx_context_destroy(struct vmw_resource *res)
659{
660 struct vmw_private *dev_priv = res->dev_priv;
661 struct {
662 SVGA3dCmdHeader header;
663 SVGA3dCmdDXDestroyContext body;
664 } *cmd;
665
666 if (likely(res->id == -1))
667 return 0;
668
669 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
670 if (unlikely(cmd == NULL)) {
671 DRM_ERROR("Failed reserving FIFO space for context "
672 "destruction.\n");
673 return -ENOMEM;
674 }
675
676 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
677 cmd->header.size = sizeof(cmd->body);
678 cmd->body.cid = res->id;
679 vmw_fifo_commit(dev_priv, sizeof(*cmd));
680 if (dev_priv->query_cid == res->id)
681 dev_priv->query_cid_valid = false;
682 vmw_resource_release_id(res);
683 vmw_fifo_resource_dec(dev_priv);
684
685 return 0;
686}
687
688/**
689 * User-space context management:
690 */
691
692static struct vmw_resource *
693vmw_user_context_base_to_res(struct ttm_base_object *base)
694{
695 return &(container_of(base, struct vmw_user_context, base)->res);
696}
697
698static void vmw_user_context_free(struct vmw_resource *res)
699{
700 struct vmw_user_context *ctx =
701 container_of(res, struct vmw_user_context, res);
702 struct vmw_private *dev_priv = res->dev_priv;
703
704 if (ctx->cbs)
705 vmw_binding_state_free(ctx->cbs);
706
707 (void) vmw_context_bind_dx_query(res, NULL);
708
709 ttm_base_object_kfree(ctx, base);
710 ttm_mem_global_free(vmw_mem_glob(dev_priv),
711 vmw_user_context_size);
712}
713
714/**
715 * This function is called when user space has no more references on the
716 * base object. It releases the base-object's reference on the resource object.
717 */
718
719static void vmw_user_context_base_release(struct ttm_base_object **p_base)
720{
721 struct ttm_base_object *base = *p_base;
722 struct vmw_user_context *ctx =
723 container_of(base, struct vmw_user_context, base);
724 struct vmw_resource *res = &ctx->res;
725
726 *p_base = NULL;
727 vmw_resource_unreference(&res);
728}
729
730int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
731 struct drm_file *file_priv)
732{
733 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
734 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
735
736 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
737}
738
739static int vmw_context_define(struct drm_device *dev, void *data,
740 struct drm_file *file_priv, bool dx)
741{
742 struct vmw_private *dev_priv = vmw_priv(dev);
743 struct vmw_user_context *ctx;
744 struct vmw_resource *res;
745 struct vmw_resource *tmp;
746 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
747 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
748 int ret;
749
750 if (!dev_priv->has_dx && dx) {
751 DRM_ERROR("DX contexts not supported by device.\n");
752 return -EINVAL;
753 }
754
755 /*
756 * Approximate idr memory usage with 128 bytes. It will be limited
757 * by maximum number_of contexts anyway.
758 */
759
760 if (unlikely(vmw_user_context_size == 0))
761 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
762 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
763
764 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
765 if (unlikely(ret != 0))
766 return ret;
767
768 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
769 vmw_user_context_size,
770 false, true);
771 if (unlikely(ret != 0)) {
772 if (ret != -ERESTARTSYS)
773 DRM_ERROR("Out of graphics memory for context"
774 " creation.\n");
775 goto out_unlock;
776 }
777
778 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
779 if (unlikely(ctx == NULL)) {
780 ttm_mem_global_free(vmw_mem_glob(dev_priv),
781 vmw_user_context_size);
782 ret = -ENOMEM;
783 goto out_unlock;
784 }
785
786 res = &ctx->res;
787 ctx->base.shareable = false;
788 ctx->base.tfile = NULL;
789
790 /*
791 * From here on, the destructor takes over resource freeing.
792 */
793
794 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
795 if (unlikely(ret != 0))
796 goto out_unlock;
797
798 tmp = vmw_resource_reference(&ctx->res);
799 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
800 &vmw_user_context_base_release, NULL);
801
802 if (unlikely(ret != 0)) {
803 vmw_resource_unreference(&tmp);
804 goto out_err;
805 }
806
807 arg->cid = ctx->base.hash.key;
808out_err:
809 vmw_resource_unreference(&res);
810out_unlock:
811 ttm_read_unlock(&dev_priv->reservation_sem);
812 return ret;
813}
814
815int vmw_context_define_ioctl(struct drm_device *dev, void *data,
816 struct drm_file *file_priv)
817{
818 return vmw_context_define(dev, data, file_priv, false);
819}
820
821int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
822 struct drm_file *file_priv)
823{
824 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
825 struct drm_vmw_context_arg *rep = &arg->rep;
826
827 switch (arg->req) {
828 case drm_vmw_context_legacy:
829 return vmw_context_define(dev, rep, file_priv, false);
830 case drm_vmw_context_dx:
831 return vmw_context_define(dev, rep, file_priv, true);
832 default:
833 break;
834 }
835 return -EINVAL;
836}
837
838/**
839 * vmw_context_binding_list - Return a list of context bindings
840 *
841 * @ctx: The context resource
842 *
843 * Returns the current list of bindings of the given context. Note that
844 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
845 */
846struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
847{
848 struct vmw_user_context *uctx =
849 container_of(ctx, struct vmw_user_context, res);
850
851 return vmw_binding_state_list(uctx->cbs);
852}
853
854struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
855{
856 return container_of(ctx, struct vmw_user_context, res)->man;
857}
858
859struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
860 SVGACOTableType cotable_type)
861{
862 if (cotable_type >= SVGA_COTABLE_DX10_MAX)
863 return ERR_PTR(-EINVAL);
864
865 return vmw_resource_reference
866 (container_of(ctx, struct vmw_user_context, res)->
867 cotables[cotable_type]);
868}
869
870/**
871 * vmw_context_binding_state -
872 * Return a pointer to a context binding state structure
873 *
874 * @ctx: The context resource
875 *
876 * Returns the current state of bindings of the given context. Note that
877 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
878 */
879struct vmw_ctx_binding_state *
880vmw_context_binding_state(struct vmw_resource *ctx)
881{
882 return container_of(ctx, struct vmw_user_context, res)->cbs;
883}
884
885/**
886 * vmw_context_bind_dx_query -
887 * Sets query MOB for the context. If @mob is NULL, then this function will
888 * remove the association between the MOB and the context. This function
889 * assumes the binding_mutex is held.
890 *
891 * @ctx_res: The context resource
892 * @mob: a reference to the query MOB
893 *
894 * Returns -EINVAL if a MOB has already been set and does not match the one
895 * specified in the parameter. 0 otherwise.
896 */
897int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
898 struct vmw_dma_buffer *mob)
899{
900 struct vmw_user_context *uctx =
901 container_of(ctx_res, struct vmw_user_context, res);
902
903 if (mob == NULL) {
904 if (uctx->dx_query_mob) {
905 uctx->dx_query_mob->dx_query_ctx = NULL;
906 vmw_dmabuf_unreference(&uctx->dx_query_mob);
907 uctx->dx_query_mob = NULL;
908 }
909
910 return 0;
911 }
912
913 /* Can only have one MOB per context for queries */
914 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
915 return -EINVAL;
916
917 mob->dx_query_ctx = ctx_res;
918
919 if (!uctx->dx_query_mob)
920 uctx->dx_query_mob = vmw_dmabuf_reference(mob);
921
922 return 0;
923}
924
925/**
926 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
927 *
928 * @ctx_res: The context resource
929 */
930struct vmw_dma_buffer *
931vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
932{
933 struct vmw_user_context *uctx =
934 container_of(ctx_res, struct vmw_user_context, res);
935
936 return uctx->dx_query_mob;
937}
1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h"
31
32struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs;
36};
37
38
39
40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41
42static void vmw_user_context_free(struct vmw_resource *res);
43static struct vmw_resource *
44vmw_user_context_base_to_res(struct ttm_base_object *base);
45
46static int vmw_gb_context_create(struct vmw_resource *res);
47static int vmw_gb_context_bind(struct vmw_resource *res,
48 struct ttm_validate_buffer *val_buf);
49static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback,
51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res);
53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55 bool rebind);
56static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
58static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
59static uint64_t vmw_user_context_size;
60
61static const struct vmw_user_resource_conv user_context_conv = {
62 .object_type = VMW_RES_CONTEXT,
63 .base_obj_to_res = vmw_user_context_base_to_res,
64 .res_free = vmw_user_context_free
65};
66
67const struct vmw_user_resource_conv *user_context_converter =
68 &user_context_conv;
69
70
71static const struct vmw_res_func vmw_legacy_context_func = {
72 .res_type = vmw_res_context,
73 .needs_backup = false,
74 .may_evict = false,
75 .type_name = "legacy contexts",
76 .backup_placement = NULL,
77 .create = NULL,
78 .destroy = NULL,
79 .bind = NULL,
80 .unbind = NULL
81};
82
83static const struct vmw_res_func vmw_gb_context_func = {
84 .res_type = vmw_res_context,
85 .needs_backup = true,
86 .may_evict = true,
87 .type_name = "guest backed contexts",
88 .backup_placement = &vmw_mob_placement,
89 .create = vmw_gb_context_create,
90 .destroy = vmw_gb_context_destroy,
91 .bind = vmw_gb_context_bind,
92 .unbind = vmw_gb_context_unbind
93};
94
95static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
96 [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
97 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
98 [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
99
100/**
101 * Context management:
102 */
103
104static void vmw_hw_context_destroy(struct vmw_resource *res)
105{
106
107 struct vmw_private *dev_priv = res->dev_priv;
108 struct {
109 SVGA3dCmdHeader header;
110 SVGA3dCmdDestroyContext body;
111 } *cmd;
112
113
114 if (res->func->destroy == vmw_gb_context_destroy) {
115 mutex_lock(&dev_priv->cmdbuf_mutex);
116 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill
118 (&container_of(res, struct vmw_user_context, res)->cbs);
119 (void) vmw_gb_context_destroy(res);
120 mutex_unlock(&dev_priv->binding_mutex);
121 if (dev_priv->pinned_bo != NULL &&
122 !dev_priv->query_cid_valid)
123 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
124 mutex_unlock(&dev_priv->cmdbuf_mutex);
125 return;
126 }
127
128 vmw_execbuf_release_pinned_bo(dev_priv);
129 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
130 if (unlikely(cmd == NULL)) {
131 DRM_ERROR("Failed reserving FIFO space for surface "
132 "destruction.\n");
133 return;
134 }
135
136 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
137 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
138 cmd->body.cid = cpu_to_le32(res->id);
139
140 vmw_fifo_commit(dev_priv, sizeof(*cmd));
141 vmw_3d_resource_dec(dev_priv, false);
142}
143
144static int vmw_gb_context_init(struct vmw_private *dev_priv,
145 struct vmw_resource *res,
146 void (*res_free) (struct vmw_resource *res))
147{
148 int ret;
149 struct vmw_user_context *uctx =
150 container_of(res, struct vmw_user_context, res);
151
152 ret = vmw_resource_init(dev_priv, res, true,
153 res_free, &vmw_gb_context_func);
154 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
155
156 if (unlikely(ret != 0)) {
157 if (res_free)
158 res_free(res);
159 else
160 kfree(res);
161 return ret;
162 }
163
164 memset(&uctx->cbs, 0, sizeof(uctx->cbs));
165 INIT_LIST_HEAD(&uctx->cbs.list);
166
167 vmw_resource_activate(res, vmw_hw_context_destroy);
168 return 0;
169}
170
171static int vmw_context_init(struct vmw_private *dev_priv,
172 struct vmw_resource *res,
173 void (*res_free) (struct vmw_resource *res))
174{
175 int ret;
176
177 struct {
178 SVGA3dCmdHeader header;
179 SVGA3dCmdDefineContext body;
180 } *cmd;
181
182 if (dev_priv->has_mob)
183 return vmw_gb_context_init(dev_priv, res, res_free);
184
185 ret = vmw_resource_init(dev_priv, res, false,
186 res_free, &vmw_legacy_context_func);
187
188 if (unlikely(ret != 0)) {
189 DRM_ERROR("Failed to allocate a resource id.\n");
190 goto out_early;
191 }
192
193 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
194 DRM_ERROR("Out of hw context ids.\n");
195 vmw_resource_unreference(&res);
196 return -ENOMEM;
197 }
198
199 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
200 if (unlikely(cmd == NULL)) {
201 DRM_ERROR("Fifo reserve failed.\n");
202 vmw_resource_unreference(&res);
203 return -ENOMEM;
204 }
205
206 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
207 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
208 cmd->body.cid = cpu_to_le32(res->id);
209
210 vmw_fifo_commit(dev_priv, sizeof(*cmd));
211 (void) vmw_3d_resource_inc(dev_priv, false);
212 vmw_resource_activate(res, vmw_hw_context_destroy);
213 return 0;
214
215out_early:
216 if (res_free == NULL)
217 kfree(res);
218 else
219 res_free(res);
220 return ret;
221}
222
223struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
224{
225 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
226 int ret;
227
228 if (unlikely(res == NULL))
229 return NULL;
230
231 ret = vmw_context_init(dev_priv, res, NULL);
232
233 return (ret == 0) ? res : NULL;
234}
235
236
237static int vmw_gb_context_create(struct vmw_resource *res)
238{
239 struct vmw_private *dev_priv = res->dev_priv;
240 int ret;
241 struct {
242 SVGA3dCmdHeader header;
243 SVGA3dCmdDefineGBContext body;
244 } *cmd;
245
246 if (likely(res->id != -1))
247 return 0;
248
249 ret = vmw_resource_alloc_id(res);
250 if (unlikely(ret != 0)) {
251 DRM_ERROR("Failed to allocate a context id.\n");
252 goto out_no_id;
253 }
254
255 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
256 ret = -EBUSY;
257 goto out_no_fifo;
258 }
259
260 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
261 if (unlikely(cmd == NULL)) {
262 DRM_ERROR("Failed reserving FIFO space for context "
263 "creation.\n");
264 ret = -ENOMEM;
265 goto out_no_fifo;
266 }
267
268 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
269 cmd->header.size = sizeof(cmd->body);
270 cmd->body.cid = res->id;
271 vmw_fifo_commit(dev_priv, sizeof(*cmd));
272 (void) vmw_3d_resource_inc(dev_priv, false);
273
274 return 0;
275
276out_no_fifo:
277 vmw_resource_release_id(res);
278out_no_id:
279 return ret;
280}
281
282static int vmw_gb_context_bind(struct vmw_resource *res,
283 struct ttm_validate_buffer *val_buf)
284{
285 struct vmw_private *dev_priv = res->dev_priv;
286 struct {
287 SVGA3dCmdHeader header;
288 SVGA3dCmdBindGBContext body;
289 } *cmd;
290 struct ttm_buffer_object *bo = val_buf->bo;
291
292 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
293
294 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
295 if (unlikely(cmd == NULL)) {
296 DRM_ERROR("Failed reserving FIFO space for context "
297 "binding.\n");
298 return -ENOMEM;
299 }
300
301 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
302 cmd->header.size = sizeof(cmd->body);
303 cmd->body.cid = res->id;
304 cmd->body.mobid = bo->mem.start;
305 cmd->body.validContents = res->backup_dirty;
306 res->backup_dirty = false;
307 vmw_fifo_commit(dev_priv, sizeof(*cmd));
308
309 return 0;
310}
311
312static int vmw_gb_context_unbind(struct vmw_resource *res,
313 bool readback,
314 struct ttm_validate_buffer *val_buf)
315{
316 struct vmw_private *dev_priv = res->dev_priv;
317 struct ttm_buffer_object *bo = val_buf->bo;
318 struct vmw_fence_obj *fence;
319 struct vmw_user_context *uctx =
320 container_of(res, struct vmw_user_context, res);
321
322 struct {
323 SVGA3dCmdHeader header;
324 SVGA3dCmdReadbackGBContext body;
325 } *cmd1;
326 struct {
327 SVGA3dCmdHeader header;
328 SVGA3dCmdBindGBContext body;
329 } *cmd2;
330 uint32_t submit_size;
331 uint8_t *cmd;
332
333
334 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
335
336 mutex_lock(&dev_priv->binding_mutex);
337 vmw_context_binding_state_scrub(&uctx->cbs);
338
339 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
340
341 cmd = vmw_fifo_reserve(dev_priv, submit_size);
342 if (unlikely(cmd == NULL)) {
343 DRM_ERROR("Failed reserving FIFO space for context "
344 "unbinding.\n");
345 mutex_unlock(&dev_priv->binding_mutex);
346 return -ENOMEM;
347 }
348
349 cmd2 = (void *) cmd;
350 if (readback) {
351 cmd1 = (void *) cmd;
352 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
353 cmd1->header.size = sizeof(cmd1->body);
354 cmd1->body.cid = res->id;
355 cmd2 = (void *) (&cmd1[1]);
356 }
357 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 cmd2->header.size = sizeof(cmd2->body);
359 cmd2->body.cid = res->id;
360 cmd2->body.mobid = SVGA3D_INVALID_ID;
361
362 vmw_fifo_commit(dev_priv, submit_size);
363 mutex_unlock(&dev_priv->binding_mutex);
364
365 /*
366 * Create a fence object and fence the backup buffer.
367 */
368
369 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
370 &fence, NULL);
371
372 vmw_fence_single_bo(bo, fence);
373
374 if (likely(fence != NULL))
375 vmw_fence_obj_unreference(&fence);
376
377 return 0;
378}
379
380static int vmw_gb_context_destroy(struct vmw_resource *res)
381{
382 struct vmw_private *dev_priv = res->dev_priv;
383 struct {
384 SVGA3dCmdHeader header;
385 SVGA3dCmdDestroyGBContext body;
386 } *cmd;
387
388 if (likely(res->id == -1))
389 return 0;
390
391 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
392 if (unlikely(cmd == NULL)) {
393 DRM_ERROR("Failed reserving FIFO space for context "
394 "destruction.\n");
395 return -ENOMEM;
396 }
397
398 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
399 cmd->header.size = sizeof(cmd->body);
400 cmd->body.cid = res->id;
401 vmw_fifo_commit(dev_priv, sizeof(*cmd));
402 if (dev_priv->query_cid == res->id)
403 dev_priv->query_cid_valid = false;
404 vmw_resource_release_id(res);
405 vmw_3d_resource_dec(dev_priv, false);
406
407 return 0;
408}
409
410/**
411 * User-space context management:
412 */
413
414static struct vmw_resource *
415vmw_user_context_base_to_res(struct ttm_base_object *base)
416{
417 return &(container_of(base, struct vmw_user_context, base)->res);
418}
419
420static void vmw_user_context_free(struct vmw_resource *res)
421{
422 struct vmw_user_context *ctx =
423 container_of(res, struct vmw_user_context, res);
424 struct vmw_private *dev_priv = res->dev_priv;
425
426 ttm_base_object_kfree(ctx, base);
427 ttm_mem_global_free(vmw_mem_glob(dev_priv),
428 vmw_user_context_size);
429}
430
431/**
432 * This function is called when user space has no more references on the
433 * base object. It releases the base-object's reference on the resource object.
434 */
435
436static void vmw_user_context_base_release(struct ttm_base_object **p_base)
437{
438 struct ttm_base_object *base = *p_base;
439 struct vmw_user_context *ctx =
440 container_of(base, struct vmw_user_context, base);
441 struct vmw_resource *res = &ctx->res;
442
443 *p_base = NULL;
444 vmw_resource_unreference(&res);
445}
446
447int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
448 struct drm_file *file_priv)
449{
450 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
452
453 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
454}
455
456int vmw_context_define_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458{
459 struct vmw_private *dev_priv = vmw_priv(dev);
460 struct vmw_user_context *ctx;
461 struct vmw_resource *res;
462 struct vmw_resource *tmp;
463 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465 int ret;
466
467
468 /*
469 * Approximate idr memory usage with 128 bytes. It will be limited
470 * by maximum number_of contexts anyway.
471 */
472
473 if (unlikely(vmw_user_context_size == 0))
474 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
475
476 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
477 if (unlikely(ret != 0))
478 return ret;
479
480 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
481 vmw_user_context_size,
482 false, true);
483 if (unlikely(ret != 0)) {
484 if (ret != -ERESTARTSYS)
485 DRM_ERROR("Out of graphics memory for context"
486 " creation.\n");
487 goto out_unlock;
488 }
489
490 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
491 if (unlikely(ctx == NULL)) {
492 ttm_mem_global_free(vmw_mem_glob(dev_priv),
493 vmw_user_context_size);
494 ret = -ENOMEM;
495 goto out_unlock;
496 }
497
498 res = &ctx->res;
499 ctx->base.shareable = false;
500 ctx->base.tfile = NULL;
501
502 /*
503 * From here on, the destructor takes over resource freeing.
504 */
505
506 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
507 if (unlikely(ret != 0))
508 goto out_unlock;
509
510 tmp = vmw_resource_reference(&ctx->res);
511 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
512 &vmw_user_context_base_release, NULL);
513
514 if (unlikely(ret != 0)) {
515 vmw_resource_unreference(&tmp);
516 goto out_err;
517 }
518
519 arg->cid = ctx->base.hash.key;
520out_err:
521 vmw_resource_unreference(&res);
522out_unlock:
523 ttm_read_unlock(&dev_priv->reservation_sem);
524 return ret;
525
526}
527
528/**
529 * vmw_context_scrub_shader - scrub a shader binding from a context.
530 *
531 * @bi: single binding information.
532 * @rebind: Whether to issue a bind instead of scrub command.
533 */
534static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
535{
536 struct vmw_private *dev_priv = bi->ctx->dev_priv;
537 struct {
538 SVGA3dCmdHeader header;
539 SVGA3dCmdSetShader body;
540 } *cmd;
541
542 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
543 if (unlikely(cmd == NULL)) {
544 DRM_ERROR("Failed reserving FIFO space for shader "
545 "unbinding.\n");
546 return -ENOMEM;
547 }
548
549 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
550 cmd->header.size = sizeof(cmd->body);
551 cmd->body.cid = bi->ctx->id;
552 cmd->body.type = bi->i1.shader_type;
553 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
554 vmw_fifo_commit(dev_priv, sizeof(*cmd));
555
556 return 0;
557}
558
559/**
560 * vmw_context_scrub_render_target - scrub a render target binding
561 * from a context.
562 *
563 * @bi: single binding information.
564 * @rebind: Whether to issue a bind instead of scrub command.
565 */
566static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
567 bool rebind)
568{
569 struct vmw_private *dev_priv = bi->ctx->dev_priv;
570 struct {
571 SVGA3dCmdHeader header;
572 SVGA3dCmdSetRenderTarget body;
573 } *cmd;
574
575 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
576 if (unlikely(cmd == NULL)) {
577 DRM_ERROR("Failed reserving FIFO space for render target "
578 "unbinding.\n");
579 return -ENOMEM;
580 }
581
582 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
583 cmd->header.size = sizeof(cmd->body);
584 cmd->body.cid = bi->ctx->id;
585 cmd->body.type = bi->i1.rt_type;
586 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
587 cmd->body.target.face = 0;
588 cmd->body.target.mipmap = 0;
589 vmw_fifo_commit(dev_priv, sizeof(*cmd));
590
591 return 0;
592}
593
594/**
595 * vmw_context_scrub_texture - scrub a texture binding from a context.
596 *
597 * @bi: single binding information.
598 * @rebind: Whether to issue a bind instead of scrub command.
599 *
600 * TODO: Possibly complement this function with a function that takes
601 * a list of texture bindings and combines them to a single command.
602 */
603static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
604 bool rebind)
605{
606 struct vmw_private *dev_priv = bi->ctx->dev_priv;
607 struct {
608 SVGA3dCmdHeader header;
609 struct {
610 SVGA3dCmdSetTextureState c;
611 SVGA3dTextureState s1;
612 } body;
613 } *cmd;
614
615 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
616 if (unlikely(cmd == NULL)) {
617 DRM_ERROR("Failed reserving FIFO space for texture "
618 "unbinding.\n");
619 return -ENOMEM;
620 }
621
622
623 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
624 cmd->header.size = sizeof(cmd->body);
625 cmd->body.c.cid = bi->ctx->id;
626 cmd->body.s1.stage = bi->i1.texture_stage;
627 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
628 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
629 vmw_fifo_commit(dev_priv, sizeof(*cmd));
630
631 return 0;
632}
633
634/**
635 * vmw_context_binding_drop: Stop tracking a context binding
636 *
637 * @cb: Pointer to binding tracker storage.
638 *
639 * Stops tracking a context binding, and re-initializes its storage.
640 * Typically used when the context binding is replaced with a binding to
641 * another (or the same, for that matter) resource.
642 */
643static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
644{
645 list_del(&cb->ctx_list);
646 if (!list_empty(&cb->res_list))
647 list_del(&cb->res_list);
648 cb->bi.ctx = NULL;
649}
650
651/**
652 * vmw_context_binding_add: Start tracking a context binding
653 *
654 * @cbs: Pointer to the context binding state tracker.
655 * @bi: Information about the binding to track.
656 *
657 * Performs basic checks on the binding to make sure arguments are within
658 * bounds and then starts tracking the binding in the context binding
659 * state structure @cbs.
660 */
661int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
662 const struct vmw_ctx_bindinfo *bi)
663{
664 struct vmw_ctx_binding *loc;
665
666 switch (bi->bt) {
667 case vmw_ctx_binding_rt:
668 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
669 DRM_ERROR("Illegal render target type %u.\n",
670 (unsigned) bi->i1.rt_type);
671 return -EINVAL;
672 }
673 loc = &cbs->render_targets[bi->i1.rt_type];
674 break;
675 case vmw_ctx_binding_tex:
676 if (unlikely((unsigned)bi->i1.texture_stage >=
677 SVGA3D_NUM_TEXTURE_UNITS)) {
678 DRM_ERROR("Illegal texture/sampler unit %u.\n",
679 (unsigned) bi->i1.texture_stage);
680 return -EINVAL;
681 }
682 loc = &cbs->texture_units[bi->i1.texture_stage];
683 break;
684 case vmw_ctx_binding_shader:
685 if (unlikely((unsigned)bi->i1.shader_type >=
686 SVGA3D_SHADERTYPE_MAX)) {
687 DRM_ERROR("Illegal shader type %u.\n",
688 (unsigned) bi->i1.shader_type);
689 return -EINVAL;
690 }
691 loc = &cbs->shaders[bi->i1.shader_type];
692 break;
693 default:
694 BUG();
695 }
696
697 if (loc->bi.ctx != NULL)
698 vmw_context_binding_drop(loc);
699
700 loc->bi = *bi;
701 loc->bi.scrubbed = false;
702 list_add_tail(&loc->ctx_list, &cbs->list);
703 INIT_LIST_HEAD(&loc->res_list);
704
705 return 0;
706}
707
708/**
709 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
710 *
711 * @cbs: Pointer to the persistent context binding state tracker.
712 * @bi: Information about the binding to track.
713 *
714 */
715static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
716 const struct vmw_ctx_bindinfo *bi)
717{
718 struct vmw_ctx_binding *loc;
719
720 switch (bi->bt) {
721 case vmw_ctx_binding_rt:
722 loc = &cbs->render_targets[bi->i1.rt_type];
723 break;
724 case vmw_ctx_binding_tex:
725 loc = &cbs->texture_units[bi->i1.texture_stage];
726 break;
727 case vmw_ctx_binding_shader:
728 loc = &cbs->shaders[bi->i1.shader_type];
729 break;
730 default:
731 BUG();
732 }
733
734 if (loc->bi.ctx != NULL)
735 vmw_context_binding_drop(loc);
736
737 if (bi->res != NULL) {
738 loc->bi = *bi;
739 list_add_tail(&loc->ctx_list, &cbs->list);
740 list_add_tail(&loc->res_list, &bi->res->binding_head);
741 }
742}
743
744/**
745 * vmw_context_binding_kill - Kill a binding on the device
746 * and stop tracking it.
747 *
748 * @cb: Pointer to binding tracker storage.
749 *
750 * Emits FIFO commands to scrub a binding represented by @cb.
751 * Then stops tracking the binding and re-initializes its storage.
752 */
753static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
754{
755 if (!cb->bi.scrubbed) {
756 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
757 cb->bi.scrubbed = true;
758 }
759 vmw_context_binding_drop(cb);
760}
761
762/**
763 * vmw_context_binding_state_kill - Kill all bindings associated with a
764 * struct vmw_ctx_binding state structure, and re-initialize the structure.
765 *
766 * @cbs: Pointer to the context binding state tracker.
767 *
768 * Emits commands to scrub all bindings associated with the
769 * context binding state tracker. Then re-initializes the whole structure.
770 */
771static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
772{
773 struct vmw_ctx_binding *entry, *next;
774
775 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
776 vmw_context_binding_kill(entry);
777}
778
779/**
780 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
781 * struct vmw_ctx_binding state structure.
782 *
783 * @cbs: Pointer to the context binding state tracker.
784 *
785 * Emits commands to scrub all bindings associated with the
786 * context binding state tracker.
787 */
788static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
789{
790 struct vmw_ctx_binding *entry;
791
792 list_for_each_entry(entry, &cbs->list, ctx_list) {
793 if (!entry->bi.scrubbed) {
794 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
795 entry->bi.scrubbed = true;
796 }
797 }
798}
799
800/**
801 * vmw_context_binding_res_list_kill - Kill all bindings on a
802 * resource binding list
803 *
804 * @head: list head of resource binding list
805 *
806 * Kills all bindings associated with a specific resource. Typically
807 * called before the resource is destroyed.
808 */
809void vmw_context_binding_res_list_kill(struct list_head *head)
810{
811 struct vmw_ctx_binding *entry, *next;
812
813 list_for_each_entry_safe(entry, next, head, res_list)
814 vmw_context_binding_kill(entry);
815}
816
817/**
818 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
819 * resource binding list
820 *
821 * @head: list head of resource binding list
822 *
823 * Scrub all bindings associated with a specific resource. Typically
824 * called before the resource is evicted.
825 */
826void vmw_context_binding_res_list_scrub(struct list_head *head)
827{
828 struct vmw_ctx_binding *entry;
829
830 list_for_each_entry(entry, head, res_list) {
831 if (!entry->bi.scrubbed) {
832 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
833 entry->bi.scrubbed = true;
834 }
835 }
836}
837
838/**
839 * vmw_context_binding_state_transfer - Commit staged binding info
840 *
841 * @ctx: Pointer to context to commit the staged binding info to.
842 * @from: Staged binding info built during execbuf.
843 *
844 * Transfers binding info from a temporary structure to the persistent
845 * structure in the context. This can be done once commands
846 */
847void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
848 struct vmw_ctx_binding_state *from)
849{
850 struct vmw_user_context *uctx =
851 container_of(ctx, struct vmw_user_context, res);
852 struct vmw_ctx_binding *entry, *next;
853
854 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
855 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
856}
857
858/**
859 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
860 *
861 * @ctx: The context resource
862 *
863 * Walks through the context binding list and rebinds all scrubbed
864 * resources.
865 */
866int vmw_context_rebind_all(struct vmw_resource *ctx)
867{
868 struct vmw_ctx_binding *entry;
869 struct vmw_user_context *uctx =
870 container_of(ctx, struct vmw_user_context, res);
871 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
872 int ret;
873
874 list_for_each_entry(entry, &cbs->list, ctx_list) {
875 if (likely(!entry->bi.scrubbed))
876 continue;
877
878 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
879 SVGA3D_INVALID_ID))
880 continue;
881
882 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
883 if (unlikely(ret != 0))
884 return ret;
885
886 entry->bi.scrubbed = false;
887 }
888
889 return 0;
890}
891
892/**
893 * vmw_context_binding_list - Return a list of context bindings
894 *
895 * @ctx: The context resource
896 *
897 * Returns the current list of bindings of the given context. Note that
898 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
899 */
900struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
901{
902 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
903}