Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/ttm/ttm_placement.h>
29
30#include "vmwgfx_binding.h"
31#include "vmwgfx_bo.h"
32#include "vmwgfx_drv.h"
33#include "vmwgfx_resource_priv.h"
34
35struct vmw_user_context {
36 struct ttm_base_object base;
37 struct vmw_resource res;
38 struct vmw_ctx_binding_state *cbs;
39 struct vmw_cmdbuf_res_manager *man;
40 struct vmw_resource *cotables[SVGA_COTABLE_MAX];
41 spinlock_t cotable_lock;
42 struct vmw_bo *dx_query_mob;
43};
44
45static void vmw_user_context_free(struct vmw_resource *res);
46static struct vmw_resource *
47vmw_user_context_base_to_res(struct ttm_base_object *base);
48
49static int vmw_gb_context_create(struct vmw_resource *res);
50static int vmw_gb_context_bind(struct vmw_resource *res,
51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_unbind(struct vmw_resource *res,
53 bool readback,
54 struct ttm_validate_buffer *val_buf);
55static int vmw_gb_context_destroy(struct vmw_resource *res);
56static int vmw_dx_context_create(struct vmw_resource *res);
57static int vmw_dx_context_bind(struct vmw_resource *res,
58 struct ttm_validate_buffer *val_buf);
59static int vmw_dx_context_unbind(struct vmw_resource *res,
60 bool readback,
61 struct ttm_validate_buffer *val_buf);
62static int vmw_dx_context_destroy(struct vmw_resource *res);
63
64static const struct vmw_user_resource_conv user_context_conv = {
65 .object_type = VMW_RES_CONTEXT,
66 .base_obj_to_res = vmw_user_context_base_to_res,
67 .res_free = vmw_user_context_free
68};
69
70const struct vmw_user_resource_conv *user_context_converter =
71 &user_context_conv;
72
73
74static const struct vmw_res_func vmw_legacy_context_func = {
75 .res_type = vmw_res_context,
76 .needs_guest_memory = false,
77 .may_evict = false,
78 .type_name = "legacy contexts",
79 .domain = VMW_BO_DOMAIN_SYS,
80 .busy_domain = VMW_BO_DOMAIN_SYS,
81 .create = NULL,
82 .destroy = NULL,
83 .bind = NULL,
84 .unbind = NULL
85};
86
87static const struct vmw_res_func vmw_gb_context_func = {
88 .res_type = vmw_res_context,
89 .needs_guest_memory = true,
90 .may_evict = true,
91 .prio = 3,
92 .dirty_prio = 3,
93 .type_name = "guest backed contexts",
94 .domain = VMW_BO_DOMAIN_MOB,
95 .busy_domain = VMW_BO_DOMAIN_MOB,
96 .create = vmw_gb_context_create,
97 .destroy = vmw_gb_context_destroy,
98 .bind = vmw_gb_context_bind,
99 .unbind = vmw_gb_context_unbind
100};
101
102static const struct vmw_res_func vmw_dx_context_func = {
103 .res_type = vmw_res_dx_context,
104 .needs_guest_memory = true,
105 .may_evict = true,
106 .prio = 3,
107 .dirty_prio = 3,
108 .type_name = "dx contexts",
109 .domain = VMW_BO_DOMAIN_MOB,
110 .busy_domain = VMW_BO_DOMAIN_MOB,
111 .create = vmw_dx_context_create,
112 .destroy = vmw_dx_context_destroy,
113 .bind = vmw_dx_context_bind,
114 .unbind = vmw_dx_context_unbind
115};
116
117/*
118 * Context management:
119 */
120
121static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
122 struct vmw_user_context *uctx)
123{
124 struct vmw_resource *res;
125 int i;
126 u32 cotable_max = has_sm5_context(dev_priv) ?
127 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
128
129 for (i = 0; i < cotable_max; ++i) {
130 spin_lock(&uctx->cotable_lock);
131 res = uctx->cotables[i];
132 uctx->cotables[i] = NULL;
133 spin_unlock(&uctx->cotable_lock);
134
135 if (res)
136 vmw_resource_unreference(&res);
137 }
138}
139
140static void vmw_hw_context_destroy(struct vmw_resource *res)
141{
142 struct vmw_user_context *uctx =
143 container_of(res, struct vmw_user_context, res);
144 struct vmw_private *dev_priv = res->dev_priv;
145 struct {
146 SVGA3dCmdHeader header;
147 SVGA3dCmdDestroyContext body;
148 } *cmd;
149
150
151 if (res->func->destroy == vmw_gb_context_destroy ||
152 res->func->destroy == vmw_dx_context_destroy) {
153 mutex_lock(&dev_priv->cmdbuf_mutex);
154 vmw_cmdbuf_res_man_destroy(uctx->man);
155 mutex_lock(&dev_priv->binding_mutex);
156 vmw_binding_state_kill(uctx->cbs);
157 (void) res->func->destroy(res);
158 mutex_unlock(&dev_priv->binding_mutex);
159 if (dev_priv->pinned_bo != NULL &&
160 !dev_priv->query_cid_valid)
161 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
162 mutex_unlock(&dev_priv->cmdbuf_mutex);
163 vmw_context_cotables_unref(dev_priv, uctx);
164 return;
165 }
166
167 vmw_execbuf_release_pinned_bo(dev_priv);
168 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
169 if (unlikely(cmd == NULL))
170 return;
171
172 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
173 cmd->header.size = sizeof(cmd->body);
174 cmd->body.cid = res->id;
175
176 vmw_cmd_commit(dev_priv, sizeof(*cmd));
177 vmw_fifo_resource_dec(dev_priv);
178}
179
180static int vmw_gb_context_init(struct vmw_private *dev_priv,
181 bool dx,
182 struct vmw_resource *res,
183 void (*res_free)(struct vmw_resource *res))
184{
185 int ret, i;
186 struct vmw_user_context *uctx =
187 container_of(res, struct vmw_user_context, res);
188
189 res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
190 sizeof(SVGAGBContextData));
191 ret = vmw_resource_init(dev_priv, res, true,
192 res_free,
193 dx ? &vmw_dx_context_func :
194 &vmw_gb_context_func);
195 if (unlikely(ret != 0))
196 goto out_err;
197
198 if (dev_priv->has_mob) {
199 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
200 if (IS_ERR(uctx->man)) {
201 ret = PTR_ERR(uctx->man);
202 uctx->man = NULL;
203 goto out_err;
204 }
205 }
206
207 uctx->cbs = vmw_binding_state_alloc(dev_priv);
208 if (IS_ERR(uctx->cbs)) {
209 ret = PTR_ERR(uctx->cbs);
210 goto out_err;
211 }
212
213 spin_lock_init(&uctx->cotable_lock);
214
215 if (dx) {
216 u32 cotable_max = has_sm5_context(dev_priv) ?
217 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
218 for (i = 0; i < cotable_max; ++i) {
219 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
220 &uctx->res, i);
221 if (IS_ERR(uctx->cotables[i])) {
222 ret = PTR_ERR(uctx->cotables[i]);
223 goto out_cotables;
224 }
225 }
226 }
227
228 res->hw_destroy = vmw_hw_context_destroy;
229 return 0;
230
231out_cotables:
232 vmw_context_cotables_unref(dev_priv, uctx);
233out_err:
234 if (res_free)
235 res_free(res);
236 else
237 kfree(res);
238 return ret;
239}
240
241static int vmw_context_init(struct vmw_private *dev_priv,
242 struct vmw_resource *res,
243 void (*res_free)(struct vmw_resource *res),
244 bool dx)
245{
246 int ret;
247
248 struct {
249 SVGA3dCmdHeader header;
250 SVGA3dCmdDefineContext body;
251 } *cmd;
252
253 if (dev_priv->has_mob)
254 return vmw_gb_context_init(dev_priv, dx, res, res_free);
255
256 ret = vmw_resource_init(dev_priv, res, false,
257 res_free, &vmw_legacy_context_func);
258
259 if (unlikely(ret != 0)) {
260 DRM_ERROR("Failed to allocate a resource id.\n");
261 goto out_early;
262 }
263
264 if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
265 DRM_ERROR("Out of hw context ids.\n");
266 vmw_resource_unreference(&res);
267 return -ENOMEM;
268 }
269
270 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
271 if (unlikely(cmd == NULL)) {
272 vmw_resource_unreference(&res);
273 return -ENOMEM;
274 }
275
276 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
277 cmd->header.size = sizeof(cmd->body);
278 cmd->body.cid = res->id;
279
280 vmw_cmd_commit(dev_priv, sizeof(*cmd));
281 vmw_fifo_resource_inc(dev_priv);
282 res->hw_destroy = vmw_hw_context_destroy;
283 return 0;
284
285out_early:
286 if (res_free == NULL)
287 kfree(res);
288 else
289 res_free(res);
290 return ret;
291}
292
293
294/*
295 * GB context.
296 */
297
298static int vmw_gb_context_create(struct vmw_resource *res)
299{
300 struct vmw_private *dev_priv = res->dev_priv;
301 int ret;
302 struct {
303 SVGA3dCmdHeader header;
304 SVGA3dCmdDefineGBContext body;
305 } *cmd;
306
307 if (likely(res->id != -1))
308 return 0;
309
310 ret = vmw_resource_alloc_id(res);
311 if (unlikely(ret != 0)) {
312 DRM_ERROR("Failed to allocate a context id.\n");
313 goto out_no_id;
314 }
315
316 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
317 ret = -EBUSY;
318 goto out_no_fifo;
319 }
320
321 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
322 if (unlikely(cmd == NULL)) {
323 ret = -ENOMEM;
324 goto out_no_fifo;
325 }
326
327 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
328 cmd->header.size = sizeof(cmd->body);
329 cmd->body.cid = res->id;
330 vmw_cmd_commit(dev_priv, sizeof(*cmd));
331 vmw_fifo_resource_inc(dev_priv);
332
333 return 0;
334
335out_no_fifo:
336 vmw_resource_release_id(res);
337out_no_id:
338 return ret;
339}
340
341static int vmw_gb_context_bind(struct vmw_resource *res,
342 struct ttm_validate_buffer *val_buf)
343{
344 struct vmw_private *dev_priv = res->dev_priv;
345 struct {
346 SVGA3dCmdHeader header;
347 SVGA3dCmdBindGBContext body;
348 } *cmd;
349 struct ttm_buffer_object *bo = val_buf->bo;
350
351 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
352
353 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
354 if (unlikely(cmd == NULL))
355 return -ENOMEM;
356
357 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 cmd->header.size = sizeof(cmd->body);
359 cmd->body.cid = res->id;
360 cmd->body.mobid = bo->resource->start;
361 cmd->body.validContents = res->guest_memory_dirty;
362 res->guest_memory_dirty = false;
363 vmw_cmd_commit(dev_priv, sizeof(*cmd));
364
365 return 0;
366}
367
368static int vmw_gb_context_unbind(struct vmw_resource *res,
369 bool readback,
370 struct ttm_validate_buffer *val_buf)
371{
372 struct vmw_private *dev_priv = res->dev_priv;
373 struct ttm_buffer_object *bo = val_buf->bo;
374 struct vmw_fence_obj *fence;
375 struct vmw_user_context *uctx =
376 container_of(res, struct vmw_user_context, res);
377
378 struct {
379 SVGA3dCmdHeader header;
380 SVGA3dCmdReadbackGBContext body;
381 } *cmd1;
382 struct {
383 SVGA3dCmdHeader header;
384 SVGA3dCmdBindGBContext body;
385 } *cmd2;
386 uint32_t submit_size;
387 uint8_t *cmd;
388
389
390 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
391
392 mutex_lock(&dev_priv->binding_mutex);
393 vmw_binding_state_scrub(uctx->cbs);
394
395 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
396
397 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
398 if (unlikely(cmd == NULL)) {
399 mutex_unlock(&dev_priv->binding_mutex);
400 return -ENOMEM;
401 }
402
403 cmd2 = (void *) cmd;
404 if (readback) {
405 cmd1 = (void *) cmd;
406 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
407 cmd1->header.size = sizeof(cmd1->body);
408 cmd1->body.cid = res->id;
409 cmd2 = (void *) (&cmd1[1]);
410 }
411 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
412 cmd2->header.size = sizeof(cmd2->body);
413 cmd2->body.cid = res->id;
414 cmd2->body.mobid = SVGA3D_INVALID_ID;
415
416 vmw_cmd_commit(dev_priv, submit_size);
417 mutex_unlock(&dev_priv->binding_mutex);
418
419 /*
420 * Create a fence object and fence the backup buffer.
421 */
422
423 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
424 &fence, NULL);
425
426 vmw_bo_fence_single(bo, fence);
427
428 if (likely(fence != NULL))
429 vmw_fence_obj_unreference(&fence);
430
431 return 0;
432}
433
434static int vmw_gb_context_destroy(struct vmw_resource *res)
435{
436 struct vmw_private *dev_priv = res->dev_priv;
437 struct {
438 SVGA3dCmdHeader header;
439 SVGA3dCmdDestroyGBContext body;
440 } *cmd;
441
442 if (likely(res->id == -1))
443 return 0;
444
445 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
446 if (unlikely(cmd == NULL))
447 return -ENOMEM;
448
449 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
450 cmd->header.size = sizeof(cmd->body);
451 cmd->body.cid = res->id;
452 vmw_cmd_commit(dev_priv, sizeof(*cmd));
453 if (dev_priv->query_cid == res->id)
454 dev_priv->query_cid_valid = false;
455 vmw_resource_release_id(res);
456 vmw_fifo_resource_dec(dev_priv);
457
458 return 0;
459}
460
461/*
462 * DX context.
463 */
464
465static int vmw_dx_context_create(struct vmw_resource *res)
466{
467 struct vmw_private *dev_priv = res->dev_priv;
468 int ret;
469 struct {
470 SVGA3dCmdHeader header;
471 SVGA3dCmdDXDefineContext body;
472 } *cmd;
473
474 if (likely(res->id != -1))
475 return 0;
476
477 ret = vmw_resource_alloc_id(res);
478 if (unlikely(ret != 0)) {
479 DRM_ERROR("Failed to allocate a context id.\n");
480 goto out_no_id;
481 }
482
483 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
484 ret = -EBUSY;
485 goto out_no_fifo;
486 }
487
488 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
489 if (unlikely(cmd == NULL)) {
490 ret = -ENOMEM;
491 goto out_no_fifo;
492 }
493
494 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
495 cmd->header.size = sizeof(cmd->body);
496 cmd->body.cid = res->id;
497 vmw_cmd_commit(dev_priv, sizeof(*cmd));
498 vmw_fifo_resource_inc(dev_priv);
499
500 return 0;
501
502out_no_fifo:
503 vmw_resource_release_id(res);
504out_no_id:
505 return ret;
506}
507
508static int vmw_dx_context_bind(struct vmw_resource *res,
509 struct ttm_validate_buffer *val_buf)
510{
511 struct vmw_private *dev_priv = res->dev_priv;
512 struct {
513 SVGA3dCmdHeader header;
514 SVGA3dCmdDXBindContext body;
515 } *cmd;
516 struct ttm_buffer_object *bo = val_buf->bo;
517
518 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
519
520 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
521 if (unlikely(cmd == NULL))
522 return -ENOMEM;
523
524 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
525 cmd->header.size = sizeof(cmd->body);
526 cmd->body.cid = res->id;
527 cmd->body.mobid = bo->resource->start;
528 cmd->body.validContents = res->guest_memory_dirty;
529 res->guest_memory_dirty = false;
530 vmw_cmd_commit(dev_priv, sizeof(*cmd));
531
532
533 return 0;
534}
535
536/**
537 * vmw_dx_context_scrub_cotables - Scrub all bindings and
538 * cotables from a context
539 *
540 * @ctx: Pointer to the context resource
541 * @readback: Whether to save the otable contents on scrubbing.
542 *
543 * COtables must be unbound before their context, but unbinding requires
544 * the backup buffer being reserved, whereas scrubbing does not.
545 * This function scrubs all cotables of a context, potentially reading back
546 * the contents into their backup buffers. However, scrubbing cotables
547 * also makes the device context invalid, so scrub all bindings first so
548 * that doesn't have to be done later with an invalid context.
549 */
550void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
551 bool readback)
552{
553 struct vmw_user_context *uctx =
554 container_of(ctx, struct vmw_user_context, res);
555 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
556 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
557 int i;
558
559 vmw_binding_state_scrub(uctx->cbs);
560 for (i = 0; i < cotable_max; ++i) {
561 struct vmw_resource *res;
562
563 /* Avoid racing with ongoing cotable destruction. */
564 spin_lock(&uctx->cotable_lock);
565 res = uctx->cotables[vmw_cotable_scrub_order[i]];
566 if (res)
567 res = vmw_resource_reference_unless_doomed(res);
568 spin_unlock(&uctx->cotable_lock);
569 if (!res)
570 continue;
571
572 WARN_ON(vmw_cotable_scrub(res, readback));
573 vmw_resource_unreference(&res);
574 }
575}
576
577static int vmw_dx_context_unbind(struct vmw_resource *res,
578 bool readback,
579 struct ttm_validate_buffer *val_buf)
580{
581 struct vmw_private *dev_priv = res->dev_priv;
582 struct ttm_buffer_object *bo = val_buf->bo;
583 struct vmw_fence_obj *fence;
584 struct vmw_user_context *uctx =
585 container_of(res, struct vmw_user_context, res);
586
587 struct {
588 SVGA3dCmdHeader header;
589 SVGA3dCmdDXReadbackContext body;
590 } *cmd1;
591 struct {
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDXBindContext body;
594 } *cmd2;
595 uint32_t submit_size;
596 uint8_t *cmd;
597
598
599 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
600
601 mutex_lock(&dev_priv->binding_mutex);
602 vmw_dx_context_scrub_cotables(res, readback);
603
604 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
605 readback) {
606 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
607 if (vmw_query_readback_all(uctx->dx_query_mob))
608 DRM_ERROR("Failed to read back query states\n");
609 }
610
611 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
612
613 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
614 if (unlikely(cmd == NULL)) {
615 mutex_unlock(&dev_priv->binding_mutex);
616 return -ENOMEM;
617 }
618
619 cmd2 = (void *) cmd;
620 if (readback) {
621 cmd1 = (void *) cmd;
622 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
623 cmd1->header.size = sizeof(cmd1->body);
624 cmd1->body.cid = res->id;
625 cmd2 = (void *) (&cmd1[1]);
626 }
627 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
628 cmd2->header.size = sizeof(cmd2->body);
629 cmd2->body.cid = res->id;
630 cmd2->body.mobid = SVGA3D_INVALID_ID;
631
632 vmw_cmd_commit(dev_priv, submit_size);
633 mutex_unlock(&dev_priv->binding_mutex);
634
635 /*
636 * Create a fence object and fence the backup buffer.
637 */
638
639 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
640 &fence, NULL);
641
642 vmw_bo_fence_single(bo, fence);
643
644 if (likely(fence != NULL))
645 vmw_fence_obj_unreference(&fence);
646
647 return 0;
648}
649
650static int vmw_dx_context_destroy(struct vmw_resource *res)
651{
652 struct vmw_private *dev_priv = res->dev_priv;
653 struct {
654 SVGA3dCmdHeader header;
655 SVGA3dCmdDXDestroyContext body;
656 } *cmd;
657
658 if (likely(res->id == -1))
659 return 0;
660
661 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
662 if (unlikely(cmd == NULL))
663 return -ENOMEM;
664
665 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
666 cmd->header.size = sizeof(cmd->body);
667 cmd->body.cid = res->id;
668 vmw_cmd_commit(dev_priv, sizeof(*cmd));
669 if (dev_priv->query_cid == res->id)
670 dev_priv->query_cid_valid = false;
671 vmw_resource_release_id(res);
672 vmw_fifo_resource_dec(dev_priv);
673
674 return 0;
675}
676
677/*
678 * User-space context management:
679 */
680
681static struct vmw_resource *
682vmw_user_context_base_to_res(struct ttm_base_object *base)
683{
684 return &(container_of(base, struct vmw_user_context, base)->res);
685}
686
687static void vmw_user_context_free(struct vmw_resource *res)
688{
689 struct vmw_user_context *ctx =
690 container_of(res, struct vmw_user_context, res);
691
692 if (ctx->cbs)
693 vmw_binding_state_free(ctx->cbs);
694
695 (void) vmw_context_bind_dx_query(res, NULL);
696
697 ttm_base_object_kfree(ctx, base);
698}
699
700/*
701 * This function is called when user space has no more references on the
702 * base object. It releases the base-object's reference on the resource object.
703 */
704
705static void vmw_user_context_base_release(struct ttm_base_object **p_base)
706{
707 struct ttm_base_object *base = *p_base;
708 struct vmw_user_context *ctx =
709 container_of(base, struct vmw_user_context, base);
710 struct vmw_resource *res = &ctx->res;
711
712 *p_base = NULL;
713 vmw_resource_unreference(&res);
714}
715
716int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
717 struct drm_file *file_priv)
718{
719 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
720 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
721
722 return ttm_ref_object_base_unref(tfile, arg->cid);
723}
724
725static int vmw_context_define(struct drm_device *dev, void *data,
726 struct drm_file *file_priv, bool dx)
727{
728 struct vmw_private *dev_priv = vmw_priv(dev);
729 struct vmw_user_context *ctx;
730 struct vmw_resource *res;
731 struct vmw_resource *tmp;
732 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
733 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
734 int ret;
735
736 if (!has_sm4_context(dev_priv) && dx) {
737 VMW_DEBUG_USER("DX contexts not supported by device.\n");
738 return -EINVAL;
739 }
740
741 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
742 if (unlikely(!ctx)) {
743 ret = -ENOMEM;
744 goto out_ret;
745 }
746
747 res = &ctx->res;
748 ctx->base.shareable = false;
749 ctx->base.tfile = NULL;
750
751 /*
752 * From here on, the destructor takes over resource freeing.
753 */
754
755 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
756 if (unlikely(ret != 0))
757 goto out_ret;
758
759 tmp = vmw_resource_reference(&ctx->res);
760 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
761 &vmw_user_context_base_release);
762
763 if (unlikely(ret != 0)) {
764 vmw_resource_unreference(&tmp);
765 goto out_err;
766 }
767
768 arg->cid = ctx->base.handle;
769out_err:
770 vmw_resource_unreference(&res);
771out_ret:
772 return ret;
773}
774
775int vmw_context_define_ioctl(struct drm_device *dev, void *data,
776 struct drm_file *file_priv)
777{
778 return vmw_context_define(dev, data, file_priv, false);
779}
780
781int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
782 struct drm_file *file_priv)
783{
784 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
785 struct drm_vmw_context_arg *rep = &arg->rep;
786
787 switch (arg->req) {
788 case drm_vmw_context_legacy:
789 return vmw_context_define(dev, rep, file_priv, false);
790 case drm_vmw_context_dx:
791 return vmw_context_define(dev, rep, file_priv, true);
792 default:
793 break;
794 }
795 return -EINVAL;
796}
797
798/**
799 * vmw_context_binding_list - Return a list of context bindings
800 *
801 * @ctx: The context resource
802 *
803 * Returns the current list of bindings of the given context. Note that
804 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
805 */
806struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
807{
808 struct vmw_user_context *uctx =
809 container_of(ctx, struct vmw_user_context, res);
810
811 return vmw_binding_state_list(uctx->cbs);
812}
813
814struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
815{
816 return container_of(ctx, struct vmw_user_context, res)->man;
817}
818
819struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
820 SVGACOTableType cotable_type)
821{
822 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
823 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
824
825 if (cotable_type >= cotable_max)
826 return ERR_PTR(-EINVAL);
827
828 return container_of(ctx, struct vmw_user_context, res)->
829 cotables[cotable_type];
830}
831
832/**
833 * vmw_context_binding_state -
834 * Return a pointer to a context binding state structure
835 *
836 * @ctx: The context resource
837 *
838 * Returns the current state of bindings of the given context. Note that
839 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
840 */
841struct vmw_ctx_binding_state *
842vmw_context_binding_state(struct vmw_resource *ctx)
843{
844 return container_of(ctx, struct vmw_user_context, res)->cbs;
845}
846
847/**
848 * vmw_context_bind_dx_query -
849 * Sets query MOB for the context. If @mob is NULL, then this function will
850 * remove the association between the MOB and the context. This function
851 * assumes the binding_mutex is held.
852 *
853 * @ctx_res: The context resource
854 * @mob: a reference to the query MOB
855 *
856 * Returns -EINVAL if a MOB has already been set and does not match the one
857 * specified in the parameter. 0 otherwise.
858 */
859int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
860 struct vmw_bo *mob)
861{
862 struct vmw_user_context *uctx =
863 container_of(ctx_res, struct vmw_user_context, res);
864
865 if (mob == NULL) {
866 if (uctx->dx_query_mob) {
867 uctx->dx_query_mob->dx_query_ctx = NULL;
868 vmw_bo_unreference(&uctx->dx_query_mob);
869 uctx->dx_query_mob = NULL;
870 }
871
872 return 0;
873 }
874
875 /* Can only have one MOB per context for queries */
876 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
877 return -EINVAL;
878
879 mob->dx_query_ctx = ctx_res;
880
881 if (!uctx->dx_query_mob)
882 uctx->dx_query_mob = vmw_bo_reference(mob);
883
884 return 0;
885}
886
887/**
888 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
889 *
890 * @ctx_res: The context resource
891 */
892struct vmw_bo *
893vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
894{
895 struct vmw_user_context *uctx =
896 container_of(ctx_res, struct vmw_user_context, res);
897
898 return uctx->dx_query_mob;
899}
1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h"
31
32struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs;
36};
37
38
39
40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
41
42static void vmw_user_context_free(struct vmw_resource *res);
43static struct vmw_resource *
44vmw_user_context_base_to_res(struct ttm_base_object *base);
45
46static int vmw_gb_context_create(struct vmw_resource *res);
47static int vmw_gb_context_bind(struct vmw_resource *res,
48 struct ttm_validate_buffer *val_buf);
49static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback,
51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res);
53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55 bool rebind);
56static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
58static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
59static uint64_t vmw_user_context_size;
60
61static const struct vmw_user_resource_conv user_context_conv = {
62 .object_type = VMW_RES_CONTEXT,
63 .base_obj_to_res = vmw_user_context_base_to_res,
64 .res_free = vmw_user_context_free
65};
66
67const struct vmw_user_resource_conv *user_context_converter =
68 &user_context_conv;
69
70
71static const struct vmw_res_func vmw_legacy_context_func = {
72 .res_type = vmw_res_context,
73 .needs_backup = false,
74 .may_evict = false,
75 .type_name = "legacy contexts",
76 .backup_placement = NULL,
77 .create = NULL,
78 .destroy = NULL,
79 .bind = NULL,
80 .unbind = NULL
81};
82
83static const struct vmw_res_func vmw_gb_context_func = {
84 .res_type = vmw_res_context,
85 .needs_backup = true,
86 .may_evict = true,
87 .type_name = "guest backed contexts",
88 .backup_placement = &vmw_mob_placement,
89 .create = vmw_gb_context_create,
90 .destroy = vmw_gb_context_destroy,
91 .bind = vmw_gb_context_bind,
92 .unbind = vmw_gb_context_unbind
93};
94
95static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
96 [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
97 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
98 [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
99
100/**
101 * Context management:
102 */
103
104static void vmw_hw_context_destroy(struct vmw_resource *res)
105{
106
107 struct vmw_private *dev_priv = res->dev_priv;
108 struct {
109 SVGA3dCmdHeader header;
110 SVGA3dCmdDestroyContext body;
111 } *cmd;
112
113
114 if (res->func->destroy == vmw_gb_context_destroy) {
115 mutex_lock(&dev_priv->cmdbuf_mutex);
116 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill
118 (&container_of(res, struct vmw_user_context, res)->cbs);
119 (void) vmw_gb_context_destroy(res);
120 mutex_unlock(&dev_priv->binding_mutex);
121 if (dev_priv->pinned_bo != NULL &&
122 !dev_priv->query_cid_valid)
123 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
124 mutex_unlock(&dev_priv->cmdbuf_mutex);
125 return;
126 }
127
128 vmw_execbuf_release_pinned_bo(dev_priv);
129 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
130 if (unlikely(cmd == NULL)) {
131 DRM_ERROR("Failed reserving FIFO space for surface "
132 "destruction.\n");
133 return;
134 }
135
136 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
137 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
138 cmd->body.cid = cpu_to_le32(res->id);
139
140 vmw_fifo_commit(dev_priv, sizeof(*cmd));
141 vmw_3d_resource_dec(dev_priv, false);
142}
143
144static int vmw_gb_context_init(struct vmw_private *dev_priv,
145 struct vmw_resource *res,
146 void (*res_free) (struct vmw_resource *res))
147{
148 int ret;
149 struct vmw_user_context *uctx =
150 container_of(res, struct vmw_user_context, res);
151
152 ret = vmw_resource_init(dev_priv, res, true,
153 res_free, &vmw_gb_context_func);
154 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
155
156 if (unlikely(ret != 0)) {
157 if (res_free)
158 res_free(res);
159 else
160 kfree(res);
161 return ret;
162 }
163
164 memset(&uctx->cbs, 0, sizeof(uctx->cbs));
165 INIT_LIST_HEAD(&uctx->cbs.list);
166
167 vmw_resource_activate(res, vmw_hw_context_destroy);
168 return 0;
169}
170
171static int vmw_context_init(struct vmw_private *dev_priv,
172 struct vmw_resource *res,
173 void (*res_free) (struct vmw_resource *res))
174{
175 int ret;
176
177 struct {
178 SVGA3dCmdHeader header;
179 SVGA3dCmdDefineContext body;
180 } *cmd;
181
182 if (dev_priv->has_mob)
183 return vmw_gb_context_init(dev_priv, res, res_free);
184
185 ret = vmw_resource_init(dev_priv, res, false,
186 res_free, &vmw_legacy_context_func);
187
188 if (unlikely(ret != 0)) {
189 DRM_ERROR("Failed to allocate a resource id.\n");
190 goto out_early;
191 }
192
193 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
194 DRM_ERROR("Out of hw context ids.\n");
195 vmw_resource_unreference(&res);
196 return -ENOMEM;
197 }
198
199 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
200 if (unlikely(cmd == NULL)) {
201 DRM_ERROR("Fifo reserve failed.\n");
202 vmw_resource_unreference(&res);
203 return -ENOMEM;
204 }
205
206 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
207 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
208 cmd->body.cid = cpu_to_le32(res->id);
209
210 vmw_fifo_commit(dev_priv, sizeof(*cmd));
211 (void) vmw_3d_resource_inc(dev_priv, false);
212 vmw_resource_activate(res, vmw_hw_context_destroy);
213 return 0;
214
215out_early:
216 if (res_free == NULL)
217 kfree(res);
218 else
219 res_free(res);
220 return ret;
221}
222
223struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
224{
225 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
226 int ret;
227
228 if (unlikely(res == NULL))
229 return NULL;
230
231 ret = vmw_context_init(dev_priv, res, NULL);
232
233 return (ret == 0) ? res : NULL;
234}
235
236
237static int vmw_gb_context_create(struct vmw_resource *res)
238{
239 struct vmw_private *dev_priv = res->dev_priv;
240 int ret;
241 struct {
242 SVGA3dCmdHeader header;
243 SVGA3dCmdDefineGBContext body;
244 } *cmd;
245
246 if (likely(res->id != -1))
247 return 0;
248
249 ret = vmw_resource_alloc_id(res);
250 if (unlikely(ret != 0)) {
251 DRM_ERROR("Failed to allocate a context id.\n");
252 goto out_no_id;
253 }
254
255 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
256 ret = -EBUSY;
257 goto out_no_fifo;
258 }
259
260 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
261 if (unlikely(cmd == NULL)) {
262 DRM_ERROR("Failed reserving FIFO space for context "
263 "creation.\n");
264 ret = -ENOMEM;
265 goto out_no_fifo;
266 }
267
268 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
269 cmd->header.size = sizeof(cmd->body);
270 cmd->body.cid = res->id;
271 vmw_fifo_commit(dev_priv, sizeof(*cmd));
272 (void) vmw_3d_resource_inc(dev_priv, false);
273
274 return 0;
275
276out_no_fifo:
277 vmw_resource_release_id(res);
278out_no_id:
279 return ret;
280}
281
282static int vmw_gb_context_bind(struct vmw_resource *res,
283 struct ttm_validate_buffer *val_buf)
284{
285 struct vmw_private *dev_priv = res->dev_priv;
286 struct {
287 SVGA3dCmdHeader header;
288 SVGA3dCmdBindGBContext body;
289 } *cmd;
290 struct ttm_buffer_object *bo = val_buf->bo;
291
292 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
293
294 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
295 if (unlikely(cmd == NULL)) {
296 DRM_ERROR("Failed reserving FIFO space for context "
297 "binding.\n");
298 return -ENOMEM;
299 }
300
301 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
302 cmd->header.size = sizeof(cmd->body);
303 cmd->body.cid = res->id;
304 cmd->body.mobid = bo->mem.start;
305 cmd->body.validContents = res->backup_dirty;
306 res->backup_dirty = false;
307 vmw_fifo_commit(dev_priv, sizeof(*cmd));
308
309 return 0;
310}
311
312static int vmw_gb_context_unbind(struct vmw_resource *res,
313 bool readback,
314 struct ttm_validate_buffer *val_buf)
315{
316 struct vmw_private *dev_priv = res->dev_priv;
317 struct ttm_buffer_object *bo = val_buf->bo;
318 struct vmw_fence_obj *fence;
319 struct vmw_user_context *uctx =
320 container_of(res, struct vmw_user_context, res);
321
322 struct {
323 SVGA3dCmdHeader header;
324 SVGA3dCmdReadbackGBContext body;
325 } *cmd1;
326 struct {
327 SVGA3dCmdHeader header;
328 SVGA3dCmdBindGBContext body;
329 } *cmd2;
330 uint32_t submit_size;
331 uint8_t *cmd;
332
333
334 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
335
336 mutex_lock(&dev_priv->binding_mutex);
337 vmw_context_binding_state_scrub(&uctx->cbs);
338
339 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
340
341 cmd = vmw_fifo_reserve(dev_priv, submit_size);
342 if (unlikely(cmd == NULL)) {
343 DRM_ERROR("Failed reserving FIFO space for context "
344 "unbinding.\n");
345 mutex_unlock(&dev_priv->binding_mutex);
346 return -ENOMEM;
347 }
348
349 cmd2 = (void *) cmd;
350 if (readback) {
351 cmd1 = (void *) cmd;
352 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
353 cmd1->header.size = sizeof(cmd1->body);
354 cmd1->body.cid = res->id;
355 cmd2 = (void *) (&cmd1[1]);
356 }
357 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 cmd2->header.size = sizeof(cmd2->body);
359 cmd2->body.cid = res->id;
360 cmd2->body.mobid = SVGA3D_INVALID_ID;
361
362 vmw_fifo_commit(dev_priv, submit_size);
363 mutex_unlock(&dev_priv->binding_mutex);
364
365 /*
366 * Create a fence object and fence the backup buffer.
367 */
368
369 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
370 &fence, NULL);
371
372 vmw_fence_single_bo(bo, fence);
373
374 if (likely(fence != NULL))
375 vmw_fence_obj_unreference(&fence);
376
377 return 0;
378}
379
380static int vmw_gb_context_destroy(struct vmw_resource *res)
381{
382 struct vmw_private *dev_priv = res->dev_priv;
383 struct {
384 SVGA3dCmdHeader header;
385 SVGA3dCmdDestroyGBContext body;
386 } *cmd;
387
388 if (likely(res->id == -1))
389 return 0;
390
391 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
392 if (unlikely(cmd == NULL)) {
393 DRM_ERROR("Failed reserving FIFO space for context "
394 "destruction.\n");
395 return -ENOMEM;
396 }
397
398 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
399 cmd->header.size = sizeof(cmd->body);
400 cmd->body.cid = res->id;
401 vmw_fifo_commit(dev_priv, sizeof(*cmd));
402 if (dev_priv->query_cid == res->id)
403 dev_priv->query_cid_valid = false;
404 vmw_resource_release_id(res);
405 vmw_3d_resource_dec(dev_priv, false);
406
407 return 0;
408}
409
410/**
411 * User-space context management:
412 */
413
414static struct vmw_resource *
415vmw_user_context_base_to_res(struct ttm_base_object *base)
416{
417 return &(container_of(base, struct vmw_user_context, base)->res);
418}
419
420static void vmw_user_context_free(struct vmw_resource *res)
421{
422 struct vmw_user_context *ctx =
423 container_of(res, struct vmw_user_context, res);
424 struct vmw_private *dev_priv = res->dev_priv;
425
426 ttm_base_object_kfree(ctx, base);
427 ttm_mem_global_free(vmw_mem_glob(dev_priv),
428 vmw_user_context_size);
429}
430
431/**
432 * This function is called when user space has no more references on the
433 * base object. It releases the base-object's reference on the resource object.
434 */
435
436static void vmw_user_context_base_release(struct ttm_base_object **p_base)
437{
438 struct ttm_base_object *base = *p_base;
439 struct vmw_user_context *ctx =
440 container_of(base, struct vmw_user_context, base);
441 struct vmw_resource *res = &ctx->res;
442
443 *p_base = NULL;
444 vmw_resource_unreference(&res);
445}
446
447int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
448 struct drm_file *file_priv)
449{
450 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
452
453 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
454}
455
456int vmw_context_define_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458{
459 struct vmw_private *dev_priv = vmw_priv(dev);
460 struct vmw_user_context *ctx;
461 struct vmw_resource *res;
462 struct vmw_resource *tmp;
463 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
465 int ret;
466
467
468 /*
469 * Approximate idr memory usage with 128 bytes. It will be limited
470 * by maximum number_of contexts anyway.
471 */
472
473 if (unlikely(vmw_user_context_size == 0))
474 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
475
476 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
477 if (unlikely(ret != 0))
478 return ret;
479
480 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
481 vmw_user_context_size,
482 false, true);
483 if (unlikely(ret != 0)) {
484 if (ret != -ERESTARTSYS)
485 DRM_ERROR("Out of graphics memory for context"
486 " creation.\n");
487 goto out_unlock;
488 }
489
490 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
491 if (unlikely(ctx == NULL)) {
492 ttm_mem_global_free(vmw_mem_glob(dev_priv),
493 vmw_user_context_size);
494 ret = -ENOMEM;
495 goto out_unlock;
496 }
497
498 res = &ctx->res;
499 ctx->base.shareable = false;
500 ctx->base.tfile = NULL;
501
502 /*
503 * From here on, the destructor takes over resource freeing.
504 */
505
506 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
507 if (unlikely(ret != 0))
508 goto out_unlock;
509
510 tmp = vmw_resource_reference(&ctx->res);
511 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
512 &vmw_user_context_base_release, NULL);
513
514 if (unlikely(ret != 0)) {
515 vmw_resource_unreference(&tmp);
516 goto out_err;
517 }
518
519 arg->cid = ctx->base.hash.key;
520out_err:
521 vmw_resource_unreference(&res);
522out_unlock:
523 ttm_read_unlock(&dev_priv->reservation_sem);
524 return ret;
525
526}
527
528/**
529 * vmw_context_scrub_shader - scrub a shader binding from a context.
530 *
531 * @bi: single binding information.
532 * @rebind: Whether to issue a bind instead of scrub command.
533 */
534static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
535{
536 struct vmw_private *dev_priv = bi->ctx->dev_priv;
537 struct {
538 SVGA3dCmdHeader header;
539 SVGA3dCmdSetShader body;
540 } *cmd;
541
542 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
543 if (unlikely(cmd == NULL)) {
544 DRM_ERROR("Failed reserving FIFO space for shader "
545 "unbinding.\n");
546 return -ENOMEM;
547 }
548
549 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
550 cmd->header.size = sizeof(cmd->body);
551 cmd->body.cid = bi->ctx->id;
552 cmd->body.type = bi->i1.shader_type;
553 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
554 vmw_fifo_commit(dev_priv, sizeof(*cmd));
555
556 return 0;
557}
558
559/**
560 * vmw_context_scrub_render_target - scrub a render target binding
561 * from a context.
562 *
563 * @bi: single binding information.
564 * @rebind: Whether to issue a bind instead of scrub command.
565 */
566static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
567 bool rebind)
568{
569 struct vmw_private *dev_priv = bi->ctx->dev_priv;
570 struct {
571 SVGA3dCmdHeader header;
572 SVGA3dCmdSetRenderTarget body;
573 } *cmd;
574
575 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
576 if (unlikely(cmd == NULL)) {
577 DRM_ERROR("Failed reserving FIFO space for render target "
578 "unbinding.\n");
579 return -ENOMEM;
580 }
581
582 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
583 cmd->header.size = sizeof(cmd->body);
584 cmd->body.cid = bi->ctx->id;
585 cmd->body.type = bi->i1.rt_type;
586 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
587 cmd->body.target.face = 0;
588 cmd->body.target.mipmap = 0;
589 vmw_fifo_commit(dev_priv, sizeof(*cmd));
590
591 return 0;
592}
593
594/**
595 * vmw_context_scrub_texture - scrub a texture binding from a context.
596 *
597 * @bi: single binding information.
598 * @rebind: Whether to issue a bind instead of scrub command.
599 *
600 * TODO: Possibly complement this function with a function that takes
601 * a list of texture bindings and combines them to a single command.
602 */
603static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
604 bool rebind)
605{
606 struct vmw_private *dev_priv = bi->ctx->dev_priv;
607 struct {
608 SVGA3dCmdHeader header;
609 struct {
610 SVGA3dCmdSetTextureState c;
611 SVGA3dTextureState s1;
612 } body;
613 } *cmd;
614
615 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
616 if (unlikely(cmd == NULL)) {
617 DRM_ERROR("Failed reserving FIFO space for texture "
618 "unbinding.\n");
619 return -ENOMEM;
620 }
621
622
623 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
624 cmd->header.size = sizeof(cmd->body);
625 cmd->body.c.cid = bi->ctx->id;
626 cmd->body.s1.stage = bi->i1.texture_stage;
627 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
628 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
629 vmw_fifo_commit(dev_priv, sizeof(*cmd));
630
631 return 0;
632}
633
634/**
635 * vmw_context_binding_drop: Stop tracking a context binding
636 *
637 * @cb: Pointer to binding tracker storage.
638 *
639 * Stops tracking a context binding, and re-initializes its storage.
640 * Typically used when the context binding is replaced with a binding to
641 * another (or the same, for that matter) resource.
642 */
643static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
644{
645 list_del(&cb->ctx_list);
646 if (!list_empty(&cb->res_list))
647 list_del(&cb->res_list);
648 cb->bi.ctx = NULL;
649}
650
651/**
652 * vmw_context_binding_add: Start tracking a context binding
653 *
654 * @cbs: Pointer to the context binding state tracker.
655 * @bi: Information about the binding to track.
656 *
657 * Performs basic checks on the binding to make sure arguments are within
658 * bounds and then starts tracking the binding in the context binding
659 * state structure @cbs.
660 */
661int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
662 const struct vmw_ctx_bindinfo *bi)
663{
664 struct vmw_ctx_binding *loc;
665
666 switch (bi->bt) {
667 case vmw_ctx_binding_rt:
668 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
669 DRM_ERROR("Illegal render target type %u.\n",
670 (unsigned) bi->i1.rt_type);
671 return -EINVAL;
672 }
673 loc = &cbs->render_targets[bi->i1.rt_type];
674 break;
675 case vmw_ctx_binding_tex:
676 if (unlikely((unsigned)bi->i1.texture_stage >=
677 SVGA3D_NUM_TEXTURE_UNITS)) {
678 DRM_ERROR("Illegal texture/sampler unit %u.\n",
679 (unsigned) bi->i1.texture_stage);
680 return -EINVAL;
681 }
682 loc = &cbs->texture_units[bi->i1.texture_stage];
683 break;
684 case vmw_ctx_binding_shader:
685 if (unlikely((unsigned)bi->i1.shader_type >=
686 SVGA3D_SHADERTYPE_MAX)) {
687 DRM_ERROR("Illegal shader type %u.\n",
688 (unsigned) bi->i1.shader_type);
689 return -EINVAL;
690 }
691 loc = &cbs->shaders[bi->i1.shader_type];
692 break;
693 default:
694 BUG();
695 }
696
697 if (loc->bi.ctx != NULL)
698 vmw_context_binding_drop(loc);
699
700 loc->bi = *bi;
701 loc->bi.scrubbed = false;
702 list_add_tail(&loc->ctx_list, &cbs->list);
703 INIT_LIST_HEAD(&loc->res_list);
704
705 return 0;
706}
707
708/**
709 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
710 *
711 * @cbs: Pointer to the persistent context binding state tracker.
712 * @bi: Information about the binding to track.
713 *
714 */
715static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
716 const struct vmw_ctx_bindinfo *bi)
717{
718 struct vmw_ctx_binding *loc;
719
720 switch (bi->bt) {
721 case vmw_ctx_binding_rt:
722 loc = &cbs->render_targets[bi->i1.rt_type];
723 break;
724 case vmw_ctx_binding_tex:
725 loc = &cbs->texture_units[bi->i1.texture_stage];
726 break;
727 case vmw_ctx_binding_shader:
728 loc = &cbs->shaders[bi->i1.shader_type];
729 break;
730 default:
731 BUG();
732 }
733
734 if (loc->bi.ctx != NULL)
735 vmw_context_binding_drop(loc);
736
737 if (bi->res != NULL) {
738 loc->bi = *bi;
739 list_add_tail(&loc->ctx_list, &cbs->list);
740 list_add_tail(&loc->res_list, &bi->res->binding_head);
741 }
742}
743
744/**
745 * vmw_context_binding_kill - Kill a binding on the device
746 * and stop tracking it.
747 *
748 * @cb: Pointer to binding tracker storage.
749 *
750 * Emits FIFO commands to scrub a binding represented by @cb.
751 * Then stops tracking the binding and re-initializes its storage.
752 */
753static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
754{
755 if (!cb->bi.scrubbed) {
756 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
757 cb->bi.scrubbed = true;
758 }
759 vmw_context_binding_drop(cb);
760}
761
762/**
763 * vmw_context_binding_state_kill - Kill all bindings associated with a
764 * struct vmw_ctx_binding state structure, and re-initialize the structure.
765 *
766 * @cbs: Pointer to the context binding state tracker.
767 *
768 * Emits commands to scrub all bindings associated with the
769 * context binding state tracker. Then re-initializes the whole structure.
770 */
771static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
772{
773 struct vmw_ctx_binding *entry, *next;
774
775 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
776 vmw_context_binding_kill(entry);
777}
778
779/**
780 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
781 * struct vmw_ctx_binding state structure.
782 *
783 * @cbs: Pointer to the context binding state tracker.
784 *
785 * Emits commands to scrub all bindings associated with the
786 * context binding state tracker.
787 */
788static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
789{
790 struct vmw_ctx_binding *entry;
791
792 list_for_each_entry(entry, &cbs->list, ctx_list) {
793 if (!entry->bi.scrubbed) {
794 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
795 entry->bi.scrubbed = true;
796 }
797 }
798}
799
800/**
801 * vmw_context_binding_res_list_kill - Kill all bindings on a
802 * resource binding list
803 *
804 * @head: list head of resource binding list
805 *
806 * Kills all bindings associated with a specific resource. Typically
807 * called before the resource is destroyed.
808 */
809void vmw_context_binding_res_list_kill(struct list_head *head)
810{
811 struct vmw_ctx_binding *entry, *next;
812
813 list_for_each_entry_safe(entry, next, head, res_list)
814 vmw_context_binding_kill(entry);
815}
816
817/**
818 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
819 * resource binding list
820 *
821 * @head: list head of resource binding list
822 *
823 * Scrub all bindings associated with a specific resource. Typically
824 * called before the resource is evicted.
825 */
826void vmw_context_binding_res_list_scrub(struct list_head *head)
827{
828 struct vmw_ctx_binding *entry;
829
830 list_for_each_entry(entry, head, res_list) {
831 if (!entry->bi.scrubbed) {
832 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
833 entry->bi.scrubbed = true;
834 }
835 }
836}
837
838/**
839 * vmw_context_binding_state_transfer - Commit staged binding info
840 *
841 * @ctx: Pointer to context to commit the staged binding info to.
842 * @from: Staged binding info built during execbuf.
843 *
844 * Transfers binding info from a temporary structure to the persistent
845 * structure in the context. This can be done once commands
846 */
847void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
848 struct vmw_ctx_binding_state *from)
849{
850 struct vmw_user_context *uctx =
851 container_of(ctx, struct vmw_user_context, res);
852 struct vmw_ctx_binding *entry, *next;
853
854 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
855 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
856}
857
858/**
859 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
860 *
861 * @ctx: The context resource
862 *
863 * Walks through the context binding list and rebinds all scrubbed
864 * resources.
865 */
866int vmw_context_rebind_all(struct vmw_resource *ctx)
867{
868 struct vmw_ctx_binding *entry;
869 struct vmw_user_context *uctx =
870 container_of(ctx, struct vmw_user_context, res);
871 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
872 int ret;
873
874 list_for_each_entry(entry, &cbs->list, ctx_list) {
875 if (likely(!entry->bi.scrubbed))
876 continue;
877
878 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
879 SVGA3D_INVALID_ID))
880 continue;
881
882 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
883 if (unlikely(ret != 0))
884 return ret;
885
886 entry->bi.scrubbed = false;
887 }
888
889 return 0;
890}
891
892/**
893 * vmw_context_binding_list - Return a list of context bindings
894 *
895 * @ctx: The context resource
896 *
897 * Returns the current list of bindings of the given context. Note that
898 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
899 */
900struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
901{
902 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
903}