Linux Audio

Check our new training course

Loading...
v4.6
 
  1/**************************************************************************
  2 *
  3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 
 
 
 
 28#include "vmwgfx_drv.h"
 29#include "vmwgfx_resource_priv.h"
 30#include "vmwgfx_binding.h"
 31#include "ttm/ttm_placement.h"
 32
 33struct vmw_user_context {
 34	struct ttm_base_object base;
 35	struct vmw_resource res;
 36	struct vmw_ctx_binding_state *cbs;
 37	struct vmw_cmdbuf_res_manager *man;
 38	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
 39	spinlock_t cotable_lock;
 40	struct vmw_dma_buffer *dx_query_mob;
 41};
 42
 43static void vmw_user_context_free(struct vmw_resource *res);
 44static struct vmw_resource *
 45vmw_user_context_base_to_res(struct ttm_base_object *base);
 46
 47static int vmw_gb_context_create(struct vmw_resource *res);
 48static int vmw_gb_context_bind(struct vmw_resource *res,
 49			       struct ttm_validate_buffer *val_buf);
 50static int vmw_gb_context_unbind(struct vmw_resource *res,
 51				 bool readback,
 52				 struct ttm_validate_buffer *val_buf);
 53static int vmw_gb_context_destroy(struct vmw_resource *res);
 54static int vmw_dx_context_create(struct vmw_resource *res);
 55static int vmw_dx_context_bind(struct vmw_resource *res,
 56			       struct ttm_validate_buffer *val_buf);
 57static int vmw_dx_context_unbind(struct vmw_resource *res,
 58				 bool readback,
 59				 struct ttm_validate_buffer *val_buf);
 60static int vmw_dx_context_destroy(struct vmw_resource *res);
 61
 62static uint64_t vmw_user_context_size;
 63
 64static const struct vmw_user_resource_conv user_context_conv = {
 65	.object_type = VMW_RES_CONTEXT,
 66	.base_obj_to_res = vmw_user_context_base_to_res,
 67	.res_free = vmw_user_context_free
 68};
 69
 70const struct vmw_user_resource_conv *user_context_converter =
 71	&user_context_conv;
 72
 73
 74static const struct vmw_res_func vmw_legacy_context_func = {
 75	.res_type = vmw_res_context,
 76	.needs_backup = false,
 77	.may_evict = false,
 78	.type_name = "legacy contexts",
 79	.backup_placement = NULL,
 
 80	.create = NULL,
 81	.destroy = NULL,
 82	.bind = NULL,
 83	.unbind = NULL
 84};
 85
 86static const struct vmw_res_func vmw_gb_context_func = {
 87	.res_type = vmw_res_context,
 88	.needs_backup = true,
 89	.may_evict = true,
 
 
 90	.type_name = "guest backed contexts",
 91	.backup_placement = &vmw_mob_placement,
 
 92	.create = vmw_gb_context_create,
 93	.destroy = vmw_gb_context_destroy,
 94	.bind = vmw_gb_context_bind,
 95	.unbind = vmw_gb_context_unbind
 96};
 97
 98static const struct vmw_res_func vmw_dx_context_func = {
 99	.res_type = vmw_res_dx_context,
100	.needs_backup = true,
101	.may_evict = true,
 
 
102	.type_name = "dx contexts",
103	.backup_placement = &vmw_mob_placement,
 
104	.create = vmw_dx_context_create,
105	.destroy = vmw_dx_context_destroy,
106	.bind = vmw_dx_context_bind,
107	.unbind = vmw_dx_context_unbind
108};
109
110/**
111 * Context management:
112 */
113
114static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
 
115{
116	struct vmw_resource *res;
117	int i;
 
 
118
119	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
120		spin_lock(&uctx->cotable_lock);
121		res = uctx->cotables[i];
122		uctx->cotables[i] = NULL;
123		spin_unlock(&uctx->cotable_lock);
124
125		if (res)
126			vmw_resource_unreference(&res);
127	}
128}
129
130static void vmw_hw_context_destroy(struct vmw_resource *res)
131{
132	struct vmw_user_context *uctx =
133		container_of(res, struct vmw_user_context, res);
134	struct vmw_private *dev_priv = res->dev_priv;
135	struct {
136		SVGA3dCmdHeader header;
137		SVGA3dCmdDestroyContext body;
138	} *cmd;
139
140
141	if (res->func->destroy == vmw_gb_context_destroy ||
142	    res->func->destroy == vmw_dx_context_destroy) {
143		mutex_lock(&dev_priv->cmdbuf_mutex);
144		vmw_cmdbuf_res_man_destroy(uctx->man);
145		mutex_lock(&dev_priv->binding_mutex);
146		vmw_binding_state_kill(uctx->cbs);
147		(void) res->func->destroy(res);
148		mutex_unlock(&dev_priv->binding_mutex);
149		if (dev_priv->pinned_bo != NULL &&
150		    !dev_priv->query_cid_valid)
151			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
152		mutex_unlock(&dev_priv->cmdbuf_mutex);
153		vmw_context_cotables_unref(uctx);
154		return;
155	}
156
157	vmw_execbuf_release_pinned_bo(dev_priv);
158	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
159	if (unlikely(cmd == NULL)) {
160		DRM_ERROR("Failed reserving FIFO space for surface "
161			  "destruction.\n");
162		return;
163	}
164
165	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
166	cmd->header.size = sizeof(cmd->body);
167	cmd->body.cid = res->id;
168
169	vmw_fifo_commit(dev_priv, sizeof(*cmd));
170	vmw_fifo_resource_dec(dev_priv);
171}
172
173static int vmw_gb_context_init(struct vmw_private *dev_priv,
174			       bool dx,
175			       struct vmw_resource *res,
176			       void (*res_free)(struct vmw_resource *res))
177{
178	int ret, i;
179	struct vmw_user_context *uctx =
180		container_of(res, struct vmw_user_context, res);
181
182	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
183			    SVGA3D_CONTEXT_DATA_SIZE);
184	ret = vmw_resource_init(dev_priv, res, true,
185				res_free,
186				dx ? &vmw_dx_context_func :
187				&vmw_gb_context_func);
188	if (unlikely(ret != 0))
189		goto out_err;
190
191	if (dev_priv->has_mob) {
192		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
193		if (IS_ERR(uctx->man)) {
194			ret = PTR_ERR(uctx->man);
195			uctx->man = NULL;
196			goto out_err;
197		}
198	}
199
200	uctx->cbs = vmw_binding_state_alloc(dev_priv);
201	if (IS_ERR(uctx->cbs)) {
202		ret = PTR_ERR(uctx->cbs);
203		goto out_err;
204	}
205
206	spin_lock_init(&uctx->cotable_lock);
207
208	if (dx) {
209		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 
 
210			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
211							      &uctx->res, i);
212			if (unlikely(uctx->cotables[i] == NULL)) {
213				ret = -ENOMEM;
214				goto out_cotables;
215			}
216		}
217	}
218
219
220
221	vmw_resource_activate(res, vmw_hw_context_destroy);
222	return 0;
223
224out_cotables:
225	vmw_context_cotables_unref(uctx);
226out_err:
227	if (res_free)
228		res_free(res);
229	else
230		kfree(res);
231	return ret;
232}
233
234static int vmw_context_init(struct vmw_private *dev_priv,
235			    struct vmw_resource *res,
236			    void (*res_free)(struct vmw_resource *res),
237			    bool dx)
238{
239	int ret;
240
241	struct {
242		SVGA3dCmdHeader header;
243		SVGA3dCmdDefineContext body;
244	} *cmd;
245
246	if (dev_priv->has_mob)
247		return vmw_gb_context_init(dev_priv, dx, res, res_free);
248
249	ret = vmw_resource_init(dev_priv, res, false,
250				res_free, &vmw_legacy_context_func);
251
252	if (unlikely(ret != 0)) {
253		DRM_ERROR("Failed to allocate a resource id.\n");
254		goto out_early;
255	}
256
257	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
258		DRM_ERROR("Out of hw context ids.\n");
259		vmw_resource_unreference(&res);
260		return -ENOMEM;
261	}
262
263	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
264	if (unlikely(cmd == NULL)) {
265		DRM_ERROR("Fifo reserve failed.\n");
266		vmw_resource_unreference(&res);
267		return -ENOMEM;
268	}
269
270	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
271	cmd->header.size = sizeof(cmd->body);
272	cmd->body.cid = res->id;
273
274	vmw_fifo_commit(dev_priv, sizeof(*cmd));
275	vmw_fifo_resource_inc(dev_priv);
276	vmw_resource_activate(res, vmw_hw_context_destroy);
277	return 0;
278
279out_early:
280	if (res_free == NULL)
281		kfree(res);
282	else
283		res_free(res);
284	return ret;
285}
286
287
288/*
289 * GB context.
290 */
291
292static int vmw_gb_context_create(struct vmw_resource *res)
293{
294	struct vmw_private *dev_priv = res->dev_priv;
295	int ret;
296	struct {
297		SVGA3dCmdHeader header;
298		SVGA3dCmdDefineGBContext body;
299	} *cmd;
300
301	if (likely(res->id != -1))
302		return 0;
303
304	ret = vmw_resource_alloc_id(res);
305	if (unlikely(ret != 0)) {
306		DRM_ERROR("Failed to allocate a context id.\n");
307		goto out_no_id;
308	}
309
310	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
311		ret = -EBUSY;
312		goto out_no_fifo;
313	}
314
315	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
316	if (unlikely(cmd == NULL)) {
317		DRM_ERROR("Failed reserving FIFO space for context "
318			  "creation.\n");
319		ret = -ENOMEM;
320		goto out_no_fifo;
321	}
322
323	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
324	cmd->header.size = sizeof(cmd->body);
325	cmd->body.cid = res->id;
326	vmw_fifo_commit(dev_priv, sizeof(*cmd));
327	vmw_fifo_resource_inc(dev_priv);
328
329	return 0;
330
331out_no_fifo:
332	vmw_resource_release_id(res);
333out_no_id:
334	return ret;
335}
336
337static int vmw_gb_context_bind(struct vmw_resource *res,
338			       struct ttm_validate_buffer *val_buf)
339{
340	struct vmw_private *dev_priv = res->dev_priv;
341	struct {
342		SVGA3dCmdHeader header;
343		SVGA3dCmdBindGBContext body;
344	} *cmd;
345	struct ttm_buffer_object *bo = val_buf->bo;
346
347	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
348
349	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
350	if (unlikely(cmd == NULL)) {
351		DRM_ERROR("Failed reserving FIFO space for context "
352			  "binding.\n");
353		return -ENOMEM;
354	}
355	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
356	cmd->header.size = sizeof(cmd->body);
357	cmd->body.cid = res->id;
358	cmd->body.mobid = bo->mem.start;
359	cmd->body.validContents = res->backup_dirty;
360	res->backup_dirty = false;
361	vmw_fifo_commit(dev_priv, sizeof(*cmd));
362
363	return 0;
364}
365
366static int vmw_gb_context_unbind(struct vmw_resource *res,
367				 bool readback,
368				 struct ttm_validate_buffer *val_buf)
369{
370	struct vmw_private *dev_priv = res->dev_priv;
371	struct ttm_buffer_object *bo = val_buf->bo;
372	struct vmw_fence_obj *fence;
373	struct vmw_user_context *uctx =
374		container_of(res, struct vmw_user_context, res);
375
376	struct {
377		SVGA3dCmdHeader header;
378		SVGA3dCmdReadbackGBContext body;
379	} *cmd1;
380	struct {
381		SVGA3dCmdHeader header;
382		SVGA3dCmdBindGBContext body;
383	} *cmd2;
384	uint32_t submit_size;
385	uint8_t *cmd;
386
387
388	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
389
390	mutex_lock(&dev_priv->binding_mutex);
391	vmw_binding_state_scrub(uctx->cbs);
392
393	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
394
395	cmd = vmw_fifo_reserve(dev_priv, submit_size);
396	if (unlikely(cmd == NULL)) {
397		DRM_ERROR("Failed reserving FIFO space for context "
398			  "unbinding.\n");
399		mutex_unlock(&dev_priv->binding_mutex);
400		return -ENOMEM;
401	}
402
403	cmd2 = (void *) cmd;
404	if (readback) {
405		cmd1 = (void *) cmd;
406		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
407		cmd1->header.size = sizeof(cmd1->body);
408		cmd1->body.cid = res->id;
409		cmd2 = (void *) (&cmd1[1]);
410	}
411	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
412	cmd2->header.size = sizeof(cmd2->body);
413	cmd2->body.cid = res->id;
414	cmd2->body.mobid = SVGA3D_INVALID_ID;
415
416	vmw_fifo_commit(dev_priv, submit_size);
417	mutex_unlock(&dev_priv->binding_mutex);
418
419	/*
420	 * Create a fence object and fence the backup buffer.
421	 */
422
423	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
424					  &fence, NULL);
425
426	vmw_fence_single_bo(bo, fence);
427
428	if (likely(fence != NULL))
429		vmw_fence_obj_unreference(&fence);
430
431	return 0;
432}
433
434static int vmw_gb_context_destroy(struct vmw_resource *res)
435{
436	struct vmw_private *dev_priv = res->dev_priv;
437	struct {
438		SVGA3dCmdHeader header;
439		SVGA3dCmdDestroyGBContext body;
440	} *cmd;
441
442	if (likely(res->id == -1))
443		return 0;
444
445	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
446	if (unlikely(cmd == NULL)) {
447		DRM_ERROR("Failed reserving FIFO space for context "
448			  "destruction.\n");
449		return -ENOMEM;
450	}
451
452	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
453	cmd->header.size = sizeof(cmd->body);
454	cmd->body.cid = res->id;
455	vmw_fifo_commit(dev_priv, sizeof(*cmd));
456	if (dev_priv->query_cid == res->id)
457		dev_priv->query_cid_valid = false;
458	vmw_resource_release_id(res);
459	vmw_fifo_resource_dec(dev_priv);
460
461	return 0;
462}
463
464/*
465 * DX context.
466 */
467
468static int vmw_dx_context_create(struct vmw_resource *res)
469{
470	struct vmw_private *dev_priv = res->dev_priv;
471	int ret;
472	struct {
473		SVGA3dCmdHeader header;
474		SVGA3dCmdDXDefineContext body;
475	} *cmd;
476
477	if (likely(res->id != -1))
478		return 0;
479
480	ret = vmw_resource_alloc_id(res);
481	if (unlikely(ret != 0)) {
482		DRM_ERROR("Failed to allocate a context id.\n");
483		goto out_no_id;
484	}
485
486	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
487		ret = -EBUSY;
488		goto out_no_fifo;
489	}
490
491	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
492	if (unlikely(cmd == NULL)) {
493		DRM_ERROR("Failed reserving FIFO space for context "
494			  "creation.\n");
495		ret = -ENOMEM;
496		goto out_no_fifo;
497	}
498
499	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
500	cmd->header.size = sizeof(cmd->body);
501	cmd->body.cid = res->id;
502	vmw_fifo_commit(dev_priv, sizeof(*cmd));
503	vmw_fifo_resource_inc(dev_priv);
504
505	return 0;
506
507out_no_fifo:
508	vmw_resource_release_id(res);
509out_no_id:
510	return ret;
511}
512
513static int vmw_dx_context_bind(struct vmw_resource *res,
514			       struct ttm_validate_buffer *val_buf)
515{
516	struct vmw_private *dev_priv = res->dev_priv;
517	struct {
518		SVGA3dCmdHeader header;
519		SVGA3dCmdDXBindContext body;
520	} *cmd;
521	struct ttm_buffer_object *bo = val_buf->bo;
522
523	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
524
525	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
526	if (unlikely(cmd == NULL)) {
527		DRM_ERROR("Failed reserving FIFO space for context "
528			  "binding.\n");
529		return -ENOMEM;
530	}
531
532	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
533	cmd->header.size = sizeof(cmd->body);
534	cmd->body.cid = res->id;
535	cmd->body.mobid = bo->mem.start;
536	cmd->body.validContents = res->backup_dirty;
537	res->backup_dirty = false;
538	vmw_fifo_commit(dev_priv, sizeof(*cmd));
539
540
541	return 0;
542}
543
544/**
545 * vmw_dx_context_scrub_cotables - Scrub all bindings and
546 * cotables from a context
547 *
548 * @ctx: Pointer to the context resource
549 * @readback: Whether to save the otable contents on scrubbing.
550 *
551 * COtables must be unbound before their context, but unbinding requires
552 * the backup buffer being reserved, whereas scrubbing does not.
553 * This function scrubs all cotables of a context, potentially reading back
554 * the contents into their backup buffers. However, scrubbing cotables
555 * also makes the device context invalid, so scrub all bindings first so
556 * that doesn't have to be done later with an invalid context.
557 */
558void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
559				   bool readback)
560{
561	struct vmw_user_context *uctx =
562		container_of(ctx, struct vmw_user_context, res);
 
 
563	int i;
564
565	vmw_binding_state_scrub(uctx->cbs);
566	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
567		struct vmw_resource *res;
568
569		/* Avoid racing with ongoing cotable destruction. */
570		spin_lock(&uctx->cotable_lock);
571		res = uctx->cotables[vmw_cotable_scrub_order[i]];
572		if (res)
573			res = vmw_resource_reference_unless_doomed(res);
574		spin_unlock(&uctx->cotable_lock);
575		if (!res)
576			continue;
577
578		WARN_ON(vmw_cotable_scrub(res, readback));
579		vmw_resource_unreference(&res);
580	}
581}
582
583static int vmw_dx_context_unbind(struct vmw_resource *res,
584				 bool readback,
585				 struct ttm_validate_buffer *val_buf)
586{
587	struct vmw_private *dev_priv = res->dev_priv;
588	struct ttm_buffer_object *bo = val_buf->bo;
589	struct vmw_fence_obj *fence;
590	struct vmw_user_context *uctx =
591		container_of(res, struct vmw_user_context, res);
592
593	struct {
594		SVGA3dCmdHeader header;
595		SVGA3dCmdDXReadbackContext body;
596	} *cmd1;
597	struct {
598		SVGA3dCmdHeader header;
599		SVGA3dCmdDXBindContext body;
600	} *cmd2;
601	uint32_t submit_size;
602	uint8_t *cmd;
603
604
605	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
606
607	mutex_lock(&dev_priv->binding_mutex);
608	vmw_dx_context_scrub_cotables(res, readback);
609
610	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
611	    readback) {
612		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
613		if (vmw_query_readback_all(uctx->dx_query_mob))
614			DRM_ERROR("Failed to read back query states\n");
615	}
616
617	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
618
619	cmd = vmw_fifo_reserve(dev_priv, submit_size);
620	if (unlikely(cmd == NULL)) {
621		DRM_ERROR("Failed reserving FIFO space for context "
622			  "unbinding.\n");
623		mutex_unlock(&dev_priv->binding_mutex);
624		return -ENOMEM;
625	}
626
627	cmd2 = (void *) cmd;
628	if (readback) {
629		cmd1 = (void *) cmd;
630		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
631		cmd1->header.size = sizeof(cmd1->body);
632		cmd1->body.cid = res->id;
633		cmd2 = (void *) (&cmd1[1]);
634	}
635	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
636	cmd2->header.size = sizeof(cmd2->body);
637	cmd2->body.cid = res->id;
638	cmd2->body.mobid = SVGA3D_INVALID_ID;
639
640	vmw_fifo_commit(dev_priv, submit_size);
641	mutex_unlock(&dev_priv->binding_mutex);
642
643	/*
644	 * Create a fence object and fence the backup buffer.
645	 */
646
647	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
648					  &fence, NULL);
649
650	vmw_fence_single_bo(bo, fence);
651
652	if (likely(fence != NULL))
653		vmw_fence_obj_unreference(&fence);
654
655	return 0;
656}
657
658static int vmw_dx_context_destroy(struct vmw_resource *res)
659{
660	struct vmw_private *dev_priv = res->dev_priv;
661	struct {
662		SVGA3dCmdHeader header;
663		SVGA3dCmdDXDestroyContext body;
664	} *cmd;
665
666	if (likely(res->id == -1))
667		return 0;
668
669	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
670	if (unlikely(cmd == NULL)) {
671		DRM_ERROR("Failed reserving FIFO space for context "
672			  "destruction.\n");
673		return -ENOMEM;
674	}
675
676	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
677	cmd->header.size = sizeof(cmd->body);
678	cmd->body.cid = res->id;
679	vmw_fifo_commit(dev_priv, sizeof(*cmd));
680	if (dev_priv->query_cid == res->id)
681		dev_priv->query_cid_valid = false;
682	vmw_resource_release_id(res);
683	vmw_fifo_resource_dec(dev_priv);
684
685	return 0;
686}
687
688/**
689 * User-space context management:
690 */
691
692static struct vmw_resource *
693vmw_user_context_base_to_res(struct ttm_base_object *base)
694{
695	return &(container_of(base, struct vmw_user_context, base)->res);
696}
697
698static void vmw_user_context_free(struct vmw_resource *res)
699{
700	struct vmw_user_context *ctx =
701	    container_of(res, struct vmw_user_context, res);
702	struct vmw_private *dev_priv = res->dev_priv;
703
704	if (ctx->cbs)
705		vmw_binding_state_free(ctx->cbs);
706
707	(void) vmw_context_bind_dx_query(res, NULL);
708
709	ttm_base_object_kfree(ctx, base);
710	ttm_mem_global_free(vmw_mem_glob(dev_priv),
711			    vmw_user_context_size);
712}
713
714/**
715 * This function is called when user space has no more references on the
716 * base object. It releases the base-object's reference on the resource object.
717 */
718
719static void vmw_user_context_base_release(struct ttm_base_object **p_base)
720{
721	struct ttm_base_object *base = *p_base;
722	struct vmw_user_context *ctx =
723	    container_of(base, struct vmw_user_context, base);
724	struct vmw_resource *res = &ctx->res;
725
726	*p_base = NULL;
727	vmw_resource_unreference(&res);
728}
729
730int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
731			      struct drm_file *file_priv)
732{
733	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
734	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
735
736	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
737}
738
739static int vmw_context_define(struct drm_device *dev, void *data,
740			      struct drm_file *file_priv, bool dx)
741{
742	struct vmw_private *dev_priv = vmw_priv(dev);
743	struct vmw_user_context *ctx;
744	struct vmw_resource *res;
745	struct vmw_resource *tmp;
746	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
747	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
748	int ret;
749
750	if (!dev_priv->has_dx && dx) {
751		DRM_ERROR("DX contexts not supported by device.\n");
752		return -EINVAL;
753	}
754
755	/*
756	 * Approximate idr memory usage with 128 bytes. It will be limited
757	 * by maximum number_of contexts anyway.
758	 */
759
760	if (unlikely(vmw_user_context_size == 0))
761		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
762		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
763
764	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
765	if (unlikely(ret != 0))
766		return ret;
767
768	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
769				   vmw_user_context_size,
770				   false, true);
771	if (unlikely(ret != 0)) {
772		if (ret != -ERESTARTSYS)
773			DRM_ERROR("Out of graphics memory for context"
774				  " creation.\n");
775		goto out_unlock;
776	}
777
778	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
779	if (unlikely(ctx == NULL)) {
780		ttm_mem_global_free(vmw_mem_glob(dev_priv),
781				    vmw_user_context_size);
782		ret = -ENOMEM;
783		goto out_unlock;
784	}
785
786	res = &ctx->res;
787	ctx->base.shareable = false;
788	ctx->base.tfile = NULL;
789
790	/*
791	 * From here on, the destructor takes over resource freeing.
792	 */
793
794	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
795	if (unlikely(ret != 0))
796		goto out_unlock;
797
798	tmp = vmw_resource_reference(&ctx->res);
799	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
800				   &vmw_user_context_base_release, NULL);
801
802	if (unlikely(ret != 0)) {
803		vmw_resource_unreference(&tmp);
804		goto out_err;
805	}
806
807	arg->cid = ctx->base.hash.key;
808out_err:
809	vmw_resource_unreference(&res);
810out_unlock:
811	ttm_read_unlock(&dev_priv->reservation_sem);
812	return ret;
813}
814
815int vmw_context_define_ioctl(struct drm_device *dev, void *data,
816			     struct drm_file *file_priv)
817{
818	return vmw_context_define(dev, data, file_priv, false);
819}
820
821int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
822				      struct drm_file *file_priv)
823{
824	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
825	struct drm_vmw_context_arg *rep = &arg->rep;
826
827	switch (arg->req) {
828	case drm_vmw_context_legacy:
829		return vmw_context_define(dev, rep, file_priv, false);
830	case drm_vmw_context_dx:
831		return vmw_context_define(dev, rep, file_priv, true);
832	default:
833		break;
834	}
835	return -EINVAL;
836}
837
838/**
839 * vmw_context_binding_list - Return a list of context bindings
840 *
841 * @ctx: The context resource
842 *
843 * Returns the current list of bindings of the given context. Note that
844 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
845 */
846struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
847{
848	struct vmw_user_context *uctx =
849		container_of(ctx, struct vmw_user_context, res);
850
851	return vmw_binding_state_list(uctx->cbs);
852}
853
854struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
855{
856	return container_of(ctx, struct vmw_user_context, res)->man;
857}
858
859struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
860					 SVGACOTableType cotable_type)
861{
862	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
 
 
 
863		return ERR_PTR(-EINVAL);
864
865	return vmw_resource_reference
866		(container_of(ctx, struct vmw_user_context, res)->
867		 cotables[cotable_type]);
868}
869
870/**
871 * vmw_context_binding_state -
872 * Return a pointer to a context binding state structure
873 *
874 * @ctx: The context resource
875 *
876 * Returns the current state of bindings of the given context. Note that
877 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
878 */
879struct vmw_ctx_binding_state *
880vmw_context_binding_state(struct vmw_resource *ctx)
881{
882	return container_of(ctx, struct vmw_user_context, res)->cbs;
883}
884
885/**
886 * vmw_context_bind_dx_query -
887 * Sets query MOB for the context.  If @mob is NULL, then this function will
888 * remove the association between the MOB and the context.  This function
889 * assumes the binding_mutex is held.
890 *
891 * @ctx_res: The context resource
892 * @mob: a reference to the query MOB
893 *
894 * Returns -EINVAL if a MOB has already been set and does not match the one
895 * specified in the parameter.  0 otherwise.
896 */
897int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
898			      struct vmw_dma_buffer *mob)
899{
900	struct vmw_user_context *uctx =
901		container_of(ctx_res, struct vmw_user_context, res);
902
903	if (mob == NULL) {
904		if (uctx->dx_query_mob) {
905			uctx->dx_query_mob->dx_query_ctx = NULL;
906			vmw_dmabuf_unreference(&uctx->dx_query_mob);
907			uctx->dx_query_mob = NULL;
908		}
909
910		return 0;
911	}
912
913	/* Can only have one MOB per context for queries */
914	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
915		return -EINVAL;
916
917	mob->dx_query_ctx  = ctx_res;
918
919	if (!uctx->dx_query_mob)
920		uctx->dx_query_mob = vmw_dmabuf_reference(mob);
921
922	return 0;
923}
924
925/**
926 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
927 *
928 * @ctx_res: The context resource
929 */
930struct vmw_dma_buffer *
931vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
932{
933	struct vmw_user_context *uctx =
934		container_of(ctx_res, struct vmw_user_context, res);
935
936	return uctx->dx_query_mob;
937}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
 
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include <drm/ttm/ttm_placement.h>
 29
 30#include "vmwgfx_binding.h"
 31#include "vmwgfx_bo.h"
 32#include "vmwgfx_drv.h"
 33#include "vmwgfx_resource_priv.h"
 
 
 34
 35struct vmw_user_context {
 36	struct ttm_base_object base;
 37	struct vmw_resource res;
 38	struct vmw_ctx_binding_state *cbs;
 39	struct vmw_cmdbuf_res_manager *man;
 40	struct vmw_resource *cotables[SVGA_COTABLE_MAX];
 41	spinlock_t cotable_lock;
 42	struct vmw_bo *dx_query_mob;
 43};
 44
 45static void vmw_user_context_free(struct vmw_resource *res);
 46static struct vmw_resource *
 47vmw_user_context_base_to_res(struct ttm_base_object *base);
 48
 49static int vmw_gb_context_create(struct vmw_resource *res);
 50static int vmw_gb_context_bind(struct vmw_resource *res,
 51			       struct ttm_validate_buffer *val_buf);
 52static int vmw_gb_context_unbind(struct vmw_resource *res,
 53				 bool readback,
 54				 struct ttm_validate_buffer *val_buf);
 55static int vmw_gb_context_destroy(struct vmw_resource *res);
 56static int vmw_dx_context_create(struct vmw_resource *res);
 57static int vmw_dx_context_bind(struct vmw_resource *res,
 58			       struct ttm_validate_buffer *val_buf);
 59static int vmw_dx_context_unbind(struct vmw_resource *res,
 60				 bool readback,
 61				 struct ttm_validate_buffer *val_buf);
 62static int vmw_dx_context_destroy(struct vmw_resource *res);
 63
 
 
 64static const struct vmw_user_resource_conv user_context_conv = {
 65	.object_type = VMW_RES_CONTEXT,
 66	.base_obj_to_res = vmw_user_context_base_to_res,
 67	.res_free = vmw_user_context_free
 68};
 69
 70const struct vmw_user_resource_conv *user_context_converter =
 71	&user_context_conv;
 72
 73
 74static const struct vmw_res_func vmw_legacy_context_func = {
 75	.res_type = vmw_res_context,
 76	.needs_guest_memory = false,
 77	.may_evict = false,
 78	.type_name = "legacy contexts",
 79	.domain = VMW_BO_DOMAIN_SYS,
 80	.busy_domain = VMW_BO_DOMAIN_SYS,
 81	.create = NULL,
 82	.destroy = NULL,
 83	.bind = NULL,
 84	.unbind = NULL
 85};
 86
 87static const struct vmw_res_func vmw_gb_context_func = {
 88	.res_type = vmw_res_context,
 89	.needs_guest_memory = true,
 90	.may_evict = true,
 91	.prio = 3,
 92	.dirty_prio = 3,
 93	.type_name = "guest backed contexts",
 94	.domain = VMW_BO_DOMAIN_MOB,
 95	.busy_domain = VMW_BO_DOMAIN_MOB,
 96	.create = vmw_gb_context_create,
 97	.destroy = vmw_gb_context_destroy,
 98	.bind = vmw_gb_context_bind,
 99	.unbind = vmw_gb_context_unbind
100};
101
102static const struct vmw_res_func vmw_dx_context_func = {
103	.res_type = vmw_res_dx_context,
104	.needs_guest_memory = true,
105	.may_evict = true,
106	.prio = 3,
107	.dirty_prio = 3,
108	.type_name = "dx contexts",
109	.domain = VMW_BO_DOMAIN_MOB,
110	.busy_domain = VMW_BO_DOMAIN_MOB,
111	.create = vmw_dx_context_create,
112	.destroy = vmw_dx_context_destroy,
113	.bind = vmw_dx_context_bind,
114	.unbind = vmw_dx_context_unbind
115};
116
117/*
118 * Context management:
119 */
120
121static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
122				       struct vmw_user_context *uctx)
123{
124	struct vmw_resource *res;
125	int i;
126	u32 cotable_max = has_sm5_context(dev_priv) ?
127		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
128
129	for (i = 0; i < cotable_max; ++i) {
130		spin_lock(&uctx->cotable_lock);
131		res = uctx->cotables[i];
132		uctx->cotables[i] = NULL;
133		spin_unlock(&uctx->cotable_lock);
134
135		if (res)
136			vmw_resource_unreference(&res);
137	}
138}
139
140static void vmw_hw_context_destroy(struct vmw_resource *res)
141{
142	struct vmw_user_context *uctx =
143		container_of(res, struct vmw_user_context, res);
144	struct vmw_private *dev_priv = res->dev_priv;
145	struct {
146		SVGA3dCmdHeader header;
147		SVGA3dCmdDestroyContext body;
148	} *cmd;
149
150
151	if (res->func->destroy == vmw_gb_context_destroy ||
152	    res->func->destroy == vmw_dx_context_destroy) {
153		mutex_lock(&dev_priv->cmdbuf_mutex);
154		vmw_cmdbuf_res_man_destroy(uctx->man);
155		mutex_lock(&dev_priv->binding_mutex);
156		vmw_binding_state_kill(uctx->cbs);
157		(void) res->func->destroy(res);
158		mutex_unlock(&dev_priv->binding_mutex);
159		if (dev_priv->pinned_bo != NULL &&
160		    !dev_priv->query_cid_valid)
161			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
162		mutex_unlock(&dev_priv->cmdbuf_mutex);
163		vmw_context_cotables_unref(dev_priv, uctx);
164		return;
165	}
166
167	vmw_execbuf_release_pinned_bo(dev_priv);
168	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
169	if (unlikely(cmd == NULL))
 
 
170		return;
 
171
172	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
173	cmd->header.size = sizeof(cmd->body);
174	cmd->body.cid = res->id;
175
176	vmw_cmd_commit(dev_priv, sizeof(*cmd));
177	vmw_fifo_resource_dec(dev_priv);
178}
179
180static int vmw_gb_context_init(struct vmw_private *dev_priv,
181			       bool dx,
182			       struct vmw_resource *res,
183			       void (*res_free)(struct vmw_resource *res))
184{
185	int ret, i;
186	struct vmw_user_context *uctx =
187		container_of(res, struct vmw_user_context, res);
188
189	res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
190				 sizeof(SVGAGBContextData));
191	ret = vmw_resource_init(dev_priv, res, true,
192				res_free,
193				dx ? &vmw_dx_context_func :
194				&vmw_gb_context_func);
195	if (unlikely(ret != 0))
196		goto out_err;
197
198	if (dev_priv->has_mob) {
199		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
200		if (IS_ERR(uctx->man)) {
201			ret = PTR_ERR(uctx->man);
202			uctx->man = NULL;
203			goto out_err;
204		}
205	}
206
207	uctx->cbs = vmw_binding_state_alloc(dev_priv);
208	if (IS_ERR(uctx->cbs)) {
209		ret = PTR_ERR(uctx->cbs);
210		goto out_err;
211	}
212
213	spin_lock_init(&uctx->cotable_lock);
214
215	if (dx) {
216		u32 cotable_max = has_sm5_context(dev_priv) ?
217			SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
218		for (i = 0; i < cotable_max; ++i) {
219			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
220							      &uctx->res, i);
221			if (IS_ERR(uctx->cotables[i])) {
222				ret = PTR_ERR(uctx->cotables[i]);
223				goto out_cotables;
224			}
225		}
226	}
227
228	res->hw_destroy = vmw_hw_context_destroy;
 
 
229	return 0;
230
231out_cotables:
232	vmw_context_cotables_unref(dev_priv, uctx);
233out_err:
234	if (res_free)
235		res_free(res);
236	else
237		kfree(res);
238	return ret;
239}
240
241static int vmw_context_init(struct vmw_private *dev_priv,
242			    struct vmw_resource *res,
243			    void (*res_free)(struct vmw_resource *res),
244			    bool dx)
245{
246	int ret;
247
248	struct {
249		SVGA3dCmdHeader header;
250		SVGA3dCmdDefineContext body;
251	} *cmd;
252
253	if (dev_priv->has_mob)
254		return vmw_gb_context_init(dev_priv, dx, res, res_free);
255
256	ret = vmw_resource_init(dev_priv, res, false,
257				res_free, &vmw_legacy_context_func);
258
259	if (unlikely(ret != 0)) {
260		DRM_ERROR("Failed to allocate a resource id.\n");
261		goto out_early;
262	}
263
264	if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
265		DRM_ERROR("Out of hw context ids.\n");
266		vmw_resource_unreference(&res);
267		return -ENOMEM;
268	}
269
270	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
271	if (unlikely(cmd == NULL)) {
 
272		vmw_resource_unreference(&res);
273		return -ENOMEM;
274	}
275
276	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
277	cmd->header.size = sizeof(cmd->body);
278	cmd->body.cid = res->id;
279
280	vmw_cmd_commit(dev_priv, sizeof(*cmd));
281	vmw_fifo_resource_inc(dev_priv);
282	res->hw_destroy = vmw_hw_context_destroy;
283	return 0;
284
285out_early:
286	if (res_free == NULL)
287		kfree(res);
288	else
289		res_free(res);
290	return ret;
291}
292
293
294/*
295 * GB context.
296 */
297
298static int vmw_gb_context_create(struct vmw_resource *res)
299{
300	struct vmw_private *dev_priv = res->dev_priv;
301	int ret;
302	struct {
303		SVGA3dCmdHeader header;
304		SVGA3dCmdDefineGBContext body;
305	} *cmd;
306
307	if (likely(res->id != -1))
308		return 0;
309
310	ret = vmw_resource_alloc_id(res);
311	if (unlikely(ret != 0)) {
312		DRM_ERROR("Failed to allocate a context id.\n");
313		goto out_no_id;
314	}
315
316	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
317		ret = -EBUSY;
318		goto out_no_fifo;
319	}
320
321	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
322	if (unlikely(cmd == NULL)) {
 
 
323		ret = -ENOMEM;
324		goto out_no_fifo;
325	}
326
327	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
328	cmd->header.size = sizeof(cmd->body);
329	cmd->body.cid = res->id;
330	vmw_cmd_commit(dev_priv, sizeof(*cmd));
331	vmw_fifo_resource_inc(dev_priv);
332
333	return 0;
334
335out_no_fifo:
336	vmw_resource_release_id(res);
337out_no_id:
338	return ret;
339}
340
341static int vmw_gb_context_bind(struct vmw_resource *res,
342			       struct ttm_validate_buffer *val_buf)
343{
344	struct vmw_private *dev_priv = res->dev_priv;
345	struct {
346		SVGA3dCmdHeader header;
347		SVGA3dCmdBindGBContext body;
348	} *cmd;
349	struct ttm_buffer_object *bo = val_buf->bo;
350
351	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
352
353	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
354	if (unlikely(cmd == NULL))
 
 
355		return -ENOMEM;
356
357	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358	cmd->header.size = sizeof(cmd->body);
359	cmd->body.cid = res->id;
360	cmd->body.mobid = bo->resource->start;
361	cmd->body.validContents = res->guest_memory_dirty;
362	res->guest_memory_dirty = false;
363	vmw_cmd_commit(dev_priv, sizeof(*cmd));
364
365	return 0;
366}
367
368static int vmw_gb_context_unbind(struct vmw_resource *res,
369				 bool readback,
370				 struct ttm_validate_buffer *val_buf)
371{
372	struct vmw_private *dev_priv = res->dev_priv;
373	struct ttm_buffer_object *bo = val_buf->bo;
374	struct vmw_fence_obj *fence;
375	struct vmw_user_context *uctx =
376		container_of(res, struct vmw_user_context, res);
377
378	struct {
379		SVGA3dCmdHeader header;
380		SVGA3dCmdReadbackGBContext body;
381	} *cmd1;
382	struct {
383		SVGA3dCmdHeader header;
384		SVGA3dCmdBindGBContext body;
385	} *cmd2;
386	uint32_t submit_size;
387	uint8_t *cmd;
388
389
390	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
391
392	mutex_lock(&dev_priv->binding_mutex);
393	vmw_binding_state_scrub(uctx->cbs);
394
395	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
396
397	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
398	if (unlikely(cmd == NULL)) {
 
 
399		mutex_unlock(&dev_priv->binding_mutex);
400		return -ENOMEM;
401	}
402
403	cmd2 = (void *) cmd;
404	if (readback) {
405		cmd1 = (void *) cmd;
406		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
407		cmd1->header.size = sizeof(cmd1->body);
408		cmd1->body.cid = res->id;
409		cmd2 = (void *) (&cmd1[1]);
410	}
411	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
412	cmd2->header.size = sizeof(cmd2->body);
413	cmd2->body.cid = res->id;
414	cmd2->body.mobid = SVGA3D_INVALID_ID;
415
416	vmw_cmd_commit(dev_priv, submit_size);
417	mutex_unlock(&dev_priv->binding_mutex);
418
419	/*
420	 * Create a fence object and fence the backup buffer.
421	 */
422
423	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
424					  &fence, NULL);
425
426	vmw_bo_fence_single(bo, fence);
427
428	if (likely(fence != NULL))
429		vmw_fence_obj_unreference(&fence);
430
431	return 0;
432}
433
434static int vmw_gb_context_destroy(struct vmw_resource *res)
435{
436	struct vmw_private *dev_priv = res->dev_priv;
437	struct {
438		SVGA3dCmdHeader header;
439		SVGA3dCmdDestroyGBContext body;
440	} *cmd;
441
442	if (likely(res->id == -1))
443		return 0;
444
445	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
446	if (unlikely(cmd == NULL))
 
 
447		return -ENOMEM;
 
448
449	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
450	cmd->header.size = sizeof(cmd->body);
451	cmd->body.cid = res->id;
452	vmw_cmd_commit(dev_priv, sizeof(*cmd));
453	if (dev_priv->query_cid == res->id)
454		dev_priv->query_cid_valid = false;
455	vmw_resource_release_id(res);
456	vmw_fifo_resource_dec(dev_priv);
457
458	return 0;
459}
460
461/*
462 * DX context.
463 */
464
465static int vmw_dx_context_create(struct vmw_resource *res)
466{
467	struct vmw_private *dev_priv = res->dev_priv;
468	int ret;
469	struct {
470		SVGA3dCmdHeader header;
471		SVGA3dCmdDXDefineContext body;
472	} *cmd;
473
474	if (likely(res->id != -1))
475		return 0;
476
477	ret = vmw_resource_alloc_id(res);
478	if (unlikely(ret != 0)) {
479		DRM_ERROR("Failed to allocate a context id.\n");
480		goto out_no_id;
481	}
482
483	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
484		ret = -EBUSY;
485		goto out_no_fifo;
486	}
487
488	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
489	if (unlikely(cmd == NULL)) {
 
 
490		ret = -ENOMEM;
491		goto out_no_fifo;
492	}
493
494	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
495	cmd->header.size = sizeof(cmd->body);
496	cmd->body.cid = res->id;
497	vmw_cmd_commit(dev_priv, sizeof(*cmd));
498	vmw_fifo_resource_inc(dev_priv);
499
500	return 0;
501
502out_no_fifo:
503	vmw_resource_release_id(res);
504out_no_id:
505	return ret;
506}
507
508static int vmw_dx_context_bind(struct vmw_resource *res,
509			       struct ttm_validate_buffer *val_buf)
510{
511	struct vmw_private *dev_priv = res->dev_priv;
512	struct {
513		SVGA3dCmdHeader header;
514		SVGA3dCmdDXBindContext body;
515	} *cmd;
516	struct ttm_buffer_object *bo = val_buf->bo;
517
518	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
519
520	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
521	if (unlikely(cmd == NULL))
 
 
522		return -ENOMEM;
 
523
524	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
525	cmd->header.size = sizeof(cmd->body);
526	cmd->body.cid = res->id;
527	cmd->body.mobid = bo->resource->start;
528	cmd->body.validContents = res->guest_memory_dirty;
529	res->guest_memory_dirty = false;
530	vmw_cmd_commit(dev_priv, sizeof(*cmd));
531
532
533	return 0;
534}
535
536/**
537 * vmw_dx_context_scrub_cotables - Scrub all bindings and
538 * cotables from a context
539 *
540 * @ctx: Pointer to the context resource
541 * @readback: Whether to save the otable contents on scrubbing.
542 *
543 * COtables must be unbound before their context, but unbinding requires
544 * the backup buffer being reserved, whereas scrubbing does not.
545 * This function scrubs all cotables of a context, potentially reading back
546 * the contents into their backup buffers. However, scrubbing cotables
547 * also makes the device context invalid, so scrub all bindings first so
548 * that doesn't have to be done later with an invalid context.
549 */
550void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
551				   bool readback)
552{
553	struct vmw_user_context *uctx =
554		container_of(ctx, struct vmw_user_context, res);
555	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
556		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
557	int i;
558
559	vmw_binding_state_scrub(uctx->cbs);
560	for (i = 0; i < cotable_max; ++i) {
561		struct vmw_resource *res;
562
563		/* Avoid racing with ongoing cotable destruction. */
564		spin_lock(&uctx->cotable_lock);
565		res = uctx->cotables[vmw_cotable_scrub_order[i]];
566		if (res)
567			res = vmw_resource_reference_unless_doomed(res);
568		spin_unlock(&uctx->cotable_lock);
569		if (!res)
570			continue;
571
572		WARN_ON(vmw_cotable_scrub(res, readback));
573		vmw_resource_unreference(&res);
574	}
575}
576
577static int vmw_dx_context_unbind(struct vmw_resource *res,
578				 bool readback,
579				 struct ttm_validate_buffer *val_buf)
580{
581	struct vmw_private *dev_priv = res->dev_priv;
582	struct ttm_buffer_object *bo = val_buf->bo;
583	struct vmw_fence_obj *fence;
584	struct vmw_user_context *uctx =
585		container_of(res, struct vmw_user_context, res);
586
587	struct {
588		SVGA3dCmdHeader header;
589		SVGA3dCmdDXReadbackContext body;
590	} *cmd1;
591	struct {
592		SVGA3dCmdHeader header;
593		SVGA3dCmdDXBindContext body;
594	} *cmd2;
595	uint32_t submit_size;
596	uint8_t *cmd;
597
598
599	BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
600
601	mutex_lock(&dev_priv->binding_mutex);
602	vmw_dx_context_scrub_cotables(res, readback);
603
604	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
605	    readback) {
606		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
607		if (vmw_query_readback_all(uctx->dx_query_mob))
608			DRM_ERROR("Failed to read back query states\n");
609	}
610
611	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
612
613	cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
614	if (unlikely(cmd == NULL)) {
 
 
615		mutex_unlock(&dev_priv->binding_mutex);
616		return -ENOMEM;
617	}
618
619	cmd2 = (void *) cmd;
620	if (readback) {
621		cmd1 = (void *) cmd;
622		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
623		cmd1->header.size = sizeof(cmd1->body);
624		cmd1->body.cid = res->id;
625		cmd2 = (void *) (&cmd1[1]);
626	}
627	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
628	cmd2->header.size = sizeof(cmd2->body);
629	cmd2->body.cid = res->id;
630	cmd2->body.mobid = SVGA3D_INVALID_ID;
631
632	vmw_cmd_commit(dev_priv, submit_size);
633	mutex_unlock(&dev_priv->binding_mutex);
634
635	/*
636	 * Create a fence object and fence the backup buffer.
637	 */
638
639	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
640					  &fence, NULL);
641
642	vmw_bo_fence_single(bo, fence);
643
644	if (likely(fence != NULL))
645		vmw_fence_obj_unreference(&fence);
646
647	return 0;
648}
649
650static int vmw_dx_context_destroy(struct vmw_resource *res)
651{
652	struct vmw_private *dev_priv = res->dev_priv;
653	struct {
654		SVGA3dCmdHeader header;
655		SVGA3dCmdDXDestroyContext body;
656	} *cmd;
657
658	if (likely(res->id == -1))
659		return 0;
660
661	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
662	if (unlikely(cmd == NULL))
 
 
663		return -ENOMEM;
 
664
665	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
666	cmd->header.size = sizeof(cmd->body);
667	cmd->body.cid = res->id;
668	vmw_cmd_commit(dev_priv, sizeof(*cmd));
669	if (dev_priv->query_cid == res->id)
670		dev_priv->query_cid_valid = false;
671	vmw_resource_release_id(res);
672	vmw_fifo_resource_dec(dev_priv);
673
674	return 0;
675}
676
677/*
678 * User-space context management:
679 */
680
681static struct vmw_resource *
682vmw_user_context_base_to_res(struct ttm_base_object *base)
683{
684	return &(container_of(base, struct vmw_user_context, base)->res);
685}
686
687static void vmw_user_context_free(struct vmw_resource *res)
688{
689	struct vmw_user_context *ctx =
690	    container_of(res, struct vmw_user_context, res);
 
691
692	if (ctx->cbs)
693		vmw_binding_state_free(ctx->cbs);
694
695	(void) vmw_context_bind_dx_query(res, NULL);
696
697	ttm_base_object_kfree(ctx, base);
 
 
698}
699
700/*
701 * This function is called when user space has no more references on the
702 * base object. It releases the base-object's reference on the resource object.
703 */
704
705static void vmw_user_context_base_release(struct ttm_base_object **p_base)
706{
707	struct ttm_base_object *base = *p_base;
708	struct vmw_user_context *ctx =
709	    container_of(base, struct vmw_user_context, base);
710	struct vmw_resource *res = &ctx->res;
711
712	*p_base = NULL;
713	vmw_resource_unreference(&res);
714}
715
716int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
717			      struct drm_file *file_priv)
718{
719	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
720	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
721
722	return ttm_ref_object_base_unref(tfile, arg->cid);
723}
724
725static int vmw_context_define(struct drm_device *dev, void *data,
726			      struct drm_file *file_priv, bool dx)
727{
728	struct vmw_private *dev_priv = vmw_priv(dev);
729	struct vmw_user_context *ctx;
730	struct vmw_resource *res;
731	struct vmw_resource *tmp;
732	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
733	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
734	int ret;
735
736	if (!has_sm4_context(dev_priv) && dx) {
737		VMW_DEBUG_USER("DX contexts not supported by device.\n");
738		return -EINVAL;
739	}
740
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
741	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
742	if (unlikely(!ctx)) {
 
 
743		ret = -ENOMEM;
744		goto out_ret;
745	}
746
747	res = &ctx->res;
748	ctx->base.shareable = false;
749	ctx->base.tfile = NULL;
750
751	/*
752	 * From here on, the destructor takes over resource freeing.
753	 */
754
755	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
756	if (unlikely(ret != 0))
757		goto out_ret;
758
759	tmp = vmw_resource_reference(&ctx->res);
760	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
761				   &vmw_user_context_base_release);
762
763	if (unlikely(ret != 0)) {
764		vmw_resource_unreference(&tmp);
765		goto out_err;
766	}
767
768	arg->cid = ctx->base.handle;
769out_err:
770	vmw_resource_unreference(&res);
771out_ret:
 
772	return ret;
773}
774
775int vmw_context_define_ioctl(struct drm_device *dev, void *data,
776			     struct drm_file *file_priv)
777{
778	return vmw_context_define(dev, data, file_priv, false);
779}
780
781int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
782				      struct drm_file *file_priv)
783{
784	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
785	struct drm_vmw_context_arg *rep = &arg->rep;
786
787	switch (arg->req) {
788	case drm_vmw_context_legacy:
789		return vmw_context_define(dev, rep, file_priv, false);
790	case drm_vmw_context_dx:
791		return vmw_context_define(dev, rep, file_priv, true);
792	default:
793		break;
794	}
795	return -EINVAL;
796}
797
798/**
799 * vmw_context_binding_list - Return a list of context bindings
800 *
801 * @ctx: The context resource
802 *
803 * Returns the current list of bindings of the given context. Note that
804 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
805 */
806struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
807{
808	struct vmw_user_context *uctx =
809		container_of(ctx, struct vmw_user_context, res);
810
811	return vmw_binding_state_list(uctx->cbs);
812}
813
814struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
815{
816	return container_of(ctx, struct vmw_user_context, res)->man;
817}
818
819struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
820					 SVGACOTableType cotable_type)
821{
822	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
823		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
824
825	if (cotable_type >= cotable_max)
826		return ERR_PTR(-EINVAL);
827
828	return container_of(ctx, struct vmw_user_context, res)->
829		cotables[cotable_type];
 
830}
831
832/**
833 * vmw_context_binding_state -
834 * Return a pointer to a context binding state structure
835 *
836 * @ctx: The context resource
837 *
838 * Returns the current state of bindings of the given context. Note that
839 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
840 */
841struct vmw_ctx_binding_state *
842vmw_context_binding_state(struct vmw_resource *ctx)
843{
844	return container_of(ctx, struct vmw_user_context, res)->cbs;
845}
846
847/**
848 * vmw_context_bind_dx_query -
849 * Sets query MOB for the context.  If @mob is NULL, then this function will
850 * remove the association between the MOB and the context.  This function
851 * assumes the binding_mutex is held.
852 *
853 * @ctx_res: The context resource
854 * @mob: a reference to the query MOB
855 *
856 * Returns -EINVAL if a MOB has already been set and does not match the one
857 * specified in the parameter.  0 otherwise.
858 */
859int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
860			      struct vmw_bo *mob)
861{
862	struct vmw_user_context *uctx =
863		container_of(ctx_res, struct vmw_user_context, res);
864
865	if (mob == NULL) {
866		if (uctx->dx_query_mob) {
867			uctx->dx_query_mob->dx_query_ctx = NULL;
868			vmw_bo_unreference(&uctx->dx_query_mob);
869			uctx->dx_query_mob = NULL;
870		}
871
872		return 0;
873	}
874
875	/* Can only have one MOB per context for queries */
876	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
877		return -EINVAL;
878
879	mob->dx_query_ctx  = ctx_res;
880
881	if (!uctx->dx_query_mob)
882		uctx->dx_query_mob = vmw_bo_reference(mob);
883
884	return 0;
885}
886
887/**
888 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
889 *
890 * @ctx_res: The context resource
891 */
892struct vmw_bo *
893vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
894{
895	struct vmw_user_context *uctx =
896		container_of(ctx_res, struct vmw_user_context, res);
897
898	return uctx->dx_query_mob;
899}