Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include <drm/ttm/ttm_placement.h>
 29
 30#include "vmwgfx_drv.h"
 31#include "vmwgfx_resource_priv.h"
 32#include "vmwgfx_binding.h"
 33
 34struct vmw_user_context {
 35	struct ttm_base_object base;
 36	struct vmw_resource res;
 37	struct vmw_ctx_binding_state *cbs;
 38	struct vmw_cmdbuf_res_manager *man;
 39	struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX];
 40	spinlock_t cotable_lock;
 41	struct vmw_buffer_object *dx_query_mob;
 42};
 43
 44static void vmw_user_context_free(struct vmw_resource *res);
 45static struct vmw_resource *
 46vmw_user_context_base_to_res(struct ttm_base_object *base);
 47
 48static int vmw_gb_context_create(struct vmw_resource *res);
 49static int vmw_gb_context_bind(struct vmw_resource *res,
 50			       struct ttm_validate_buffer *val_buf);
 51static int vmw_gb_context_unbind(struct vmw_resource *res,
 52				 bool readback,
 53				 struct ttm_validate_buffer *val_buf);
 54static int vmw_gb_context_destroy(struct vmw_resource *res);
 55static int vmw_dx_context_create(struct vmw_resource *res);
 56static int vmw_dx_context_bind(struct vmw_resource *res,
 57			       struct ttm_validate_buffer *val_buf);
 58static int vmw_dx_context_unbind(struct vmw_resource *res,
 59				 bool readback,
 60				 struct ttm_validate_buffer *val_buf);
 61static int vmw_dx_context_destroy(struct vmw_resource *res);
 62
 63static uint64_t vmw_user_context_size;
 64
 65static const struct vmw_user_resource_conv user_context_conv = {
 66	.object_type = VMW_RES_CONTEXT,
 67	.base_obj_to_res = vmw_user_context_base_to_res,
 68	.res_free = vmw_user_context_free
 69};
 70
 71const struct vmw_user_resource_conv *user_context_converter =
 72	&user_context_conv;
 73
 74
 75static const struct vmw_res_func vmw_legacy_context_func = {
 76	.res_type = vmw_res_context,
 77	.needs_backup = false,
 78	.may_evict = false,
 79	.type_name = "legacy contexts",
 80	.backup_placement = NULL,
 81	.create = NULL,
 82	.destroy = NULL,
 83	.bind = NULL,
 84	.unbind = NULL
 85};
 86
 87static const struct vmw_res_func vmw_gb_context_func = {
 88	.res_type = vmw_res_context,
 89	.needs_backup = true,
 90	.may_evict = true,
 91	.prio = 3,
 92	.dirty_prio = 3,
 93	.type_name = "guest backed contexts",
 94	.backup_placement = &vmw_mob_placement,
 95	.create = vmw_gb_context_create,
 96	.destroy = vmw_gb_context_destroy,
 97	.bind = vmw_gb_context_bind,
 98	.unbind = vmw_gb_context_unbind
 99};
100
101static const struct vmw_res_func vmw_dx_context_func = {
102	.res_type = vmw_res_dx_context,
103	.needs_backup = true,
104	.may_evict = true,
105	.prio = 3,
106	.dirty_prio = 3,
107	.type_name = "dx contexts",
108	.backup_placement = &vmw_mob_placement,
109	.create = vmw_dx_context_create,
110	.destroy = vmw_dx_context_destroy,
111	.bind = vmw_dx_context_bind,
112	.unbind = vmw_dx_context_unbind
113};
114
115/**
116 * Context management:
117 */
118
119static void vmw_context_cotables_unref(struct vmw_user_context *uctx)
 
120{
121	struct vmw_resource *res;
122	int i;
 
 
123
124	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
125		spin_lock(&uctx->cotable_lock);
126		res = uctx->cotables[i];
127		uctx->cotables[i] = NULL;
128		spin_unlock(&uctx->cotable_lock);
129
130		if (res)
131			vmw_resource_unreference(&res);
132	}
133}
134
135static void vmw_hw_context_destroy(struct vmw_resource *res)
136{
137	struct vmw_user_context *uctx =
138		container_of(res, struct vmw_user_context, res);
139	struct vmw_private *dev_priv = res->dev_priv;
140	struct {
141		SVGA3dCmdHeader header;
142		SVGA3dCmdDestroyContext body;
143	} *cmd;
144
145
146	if (res->func->destroy == vmw_gb_context_destroy ||
147	    res->func->destroy == vmw_dx_context_destroy) {
148		mutex_lock(&dev_priv->cmdbuf_mutex);
149		vmw_cmdbuf_res_man_destroy(uctx->man);
150		mutex_lock(&dev_priv->binding_mutex);
151		vmw_binding_state_kill(uctx->cbs);
152		(void) res->func->destroy(res);
153		mutex_unlock(&dev_priv->binding_mutex);
154		if (dev_priv->pinned_bo != NULL &&
155		    !dev_priv->query_cid_valid)
156			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
157		mutex_unlock(&dev_priv->cmdbuf_mutex);
158		vmw_context_cotables_unref(uctx);
159		return;
160	}
161
162	vmw_execbuf_release_pinned_bo(dev_priv);
163	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
164	if (unlikely(cmd == NULL))
165		return;
166
167	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
168	cmd->header.size = sizeof(cmd->body);
169	cmd->body.cid = res->id;
170
171	vmw_fifo_commit(dev_priv, sizeof(*cmd));
172	vmw_fifo_resource_dec(dev_priv);
173}
174
175static int vmw_gb_context_init(struct vmw_private *dev_priv,
176			       bool dx,
177			       struct vmw_resource *res,
178			       void (*res_free)(struct vmw_resource *res))
179{
180	int ret, i;
181	struct vmw_user_context *uctx =
182		container_of(res, struct vmw_user_context, res);
183
184	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
185			    SVGA3D_CONTEXT_DATA_SIZE);
186	ret = vmw_resource_init(dev_priv, res, true,
187				res_free,
188				dx ? &vmw_dx_context_func :
189				&vmw_gb_context_func);
190	if (unlikely(ret != 0))
191		goto out_err;
192
193	if (dev_priv->has_mob) {
194		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
195		if (IS_ERR(uctx->man)) {
196			ret = PTR_ERR(uctx->man);
197			uctx->man = NULL;
198			goto out_err;
199		}
200	}
201
202	uctx->cbs = vmw_binding_state_alloc(dev_priv);
203	if (IS_ERR(uctx->cbs)) {
204		ret = PTR_ERR(uctx->cbs);
205		goto out_err;
206	}
207
208	spin_lock_init(&uctx->cotable_lock);
209
210	if (dx) {
211		for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
 
 
212			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
213							      &uctx->res, i);
214			if (IS_ERR(uctx->cotables[i])) {
215				ret = PTR_ERR(uctx->cotables[i]);
216				goto out_cotables;
217			}
218		}
219	}
220
221	res->hw_destroy = vmw_hw_context_destroy;
222	return 0;
223
224out_cotables:
225	vmw_context_cotables_unref(uctx);
226out_err:
227	if (res_free)
228		res_free(res);
229	else
230		kfree(res);
231	return ret;
232}
233
234static int vmw_context_init(struct vmw_private *dev_priv,
235			    struct vmw_resource *res,
236			    void (*res_free)(struct vmw_resource *res),
237			    bool dx)
238{
239	int ret;
240
241	struct {
242		SVGA3dCmdHeader header;
243		SVGA3dCmdDefineContext body;
244	} *cmd;
245
246	if (dev_priv->has_mob)
247		return vmw_gb_context_init(dev_priv, dx, res, res_free);
248
249	ret = vmw_resource_init(dev_priv, res, false,
250				res_free, &vmw_legacy_context_func);
251
252	if (unlikely(ret != 0)) {
253		DRM_ERROR("Failed to allocate a resource id.\n");
254		goto out_early;
255	}
256
257	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
258		DRM_ERROR("Out of hw context ids.\n");
259		vmw_resource_unreference(&res);
260		return -ENOMEM;
261	}
262
263	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
264	if (unlikely(cmd == NULL)) {
265		vmw_resource_unreference(&res);
266		return -ENOMEM;
267	}
268
269	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
270	cmd->header.size = sizeof(cmd->body);
271	cmd->body.cid = res->id;
272
273	vmw_fifo_commit(dev_priv, sizeof(*cmd));
274	vmw_fifo_resource_inc(dev_priv);
275	res->hw_destroy = vmw_hw_context_destroy;
276	return 0;
277
278out_early:
279	if (res_free == NULL)
280		kfree(res);
281	else
282		res_free(res);
283	return ret;
284}
285
286
287/*
288 * GB context.
289 */
290
291static int vmw_gb_context_create(struct vmw_resource *res)
292{
293	struct vmw_private *dev_priv = res->dev_priv;
294	int ret;
295	struct {
296		SVGA3dCmdHeader header;
297		SVGA3dCmdDefineGBContext body;
298	} *cmd;
299
300	if (likely(res->id != -1))
301		return 0;
302
303	ret = vmw_resource_alloc_id(res);
304	if (unlikely(ret != 0)) {
305		DRM_ERROR("Failed to allocate a context id.\n");
306		goto out_no_id;
307	}
308
309	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
310		ret = -EBUSY;
311		goto out_no_fifo;
312	}
313
314	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
315	if (unlikely(cmd == NULL)) {
316		ret = -ENOMEM;
317		goto out_no_fifo;
318	}
319
320	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
321	cmd->header.size = sizeof(cmd->body);
322	cmd->body.cid = res->id;
323	vmw_fifo_commit(dev_priv, sizeof(*cmd));
324	vmw_fifo_resource_inc(dev_priv);
325
326	return 0;
327
328out_no_fifo:
329	vmw_resource_release_id(res);
330out_no_id:
331	return ret;
332}
333
334static int vmw_gb_context_bind(struct vmw_resource *res,
335			       struct ttm_validate_buffer *val_buf)
336{
337	struct vmw_private *dev_priv = res->dev_priv;
338	struct {
339		SVGA3dCmdHeader header;
340		SVGA3dCmdBindGBContext body;
341	} *cmd;
342	struct ttm_buffer_object *bo = val_buf->bo;
343
344	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
345
346	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
347	if (unlikely(cmd == NULL))
348		return -ENOMEM;
349
350	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
351	cmd->header.size = sizeof(cmd->body);
352	cmd->body.cid = res->id;
353	cmd->body.mobid = bo->mem.start;
354	cmd->body.validContents = res->backup_dirty;
355	res->backup_dirty = false;
356	vmw_fifo_commit(dev_priv, sizeof(*cmd));
357
358	return 0;
359}
360
361static int vmw_gb_context_unbind(struct vmw_resource *res,
362				 bool readback,
363				 struct ttm_validate_buffer *val_buf)
364{
365	struct vmw_private *dev_priv = res->dev_priv;
366	struct ttm_buffer_object *bo = val_buf->bo;
367	struct vmw_fence_obj *fence;
368	struct vmw_user_context *uctx =
369		container_of(res, struct vmw_user_context, res);
370
371	struct {
372		SVGA3dCmdHeader header;
373		SVGA3dCmdReadbackGBContext body;
374	} *cmd1;
375	struct {
376		SVGA3dCmdHeader header;
377		SVGA3dCmdBindGBContext body;
378	} *cmd2;
379	uint32_t submit_size;
380	uint8_t *cmd;
381
382
383	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
384
385	mutex_lock(&dev_priv->binding_mutex);
386	vmw_binding_state_scrub(uctx->cbs);
387
388	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
389
390	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
391	if (unlikely(cmd == NULL)) {
392		mutex_unlock(&dev_priv->binding_mutex);
393		return -ENOMEM;
394	}
395
396	cmd2 = (void *) cmd;
397	if (readback) {
398		cmd1 = (void *) cmd;
399		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
400		cmd1->header.size = sizeof(cmd1->body);
401		cmd1->body.cid = res->id;
402		cmd2 = (void *) (&cmd1[1]);
403	}
404	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
405	cmd2->header.size = sizeof(cmd2->body);
406	cmd2->body.cid = res->id;
407	cmd2->body.mobid = SVGA3D_INVALID_ID;
408
409	vmw_fifo_commit(dev_priv, submit_size);
410	mutex_unlock(&dev_priv->binding_mutex);
411
412	/*
413	 * Create a fence object and fence the backup buffer.
414	 */
415
416	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
417					  &fence, NULL);
418
419	vmw_bo_fence_single(bo, fence);
420
421	if (likely(fence != NULL))
422		vmw_fence_obj_unreference(&fence);
423
424	return 0;
425}
426
427static int vmw_gb_context_destroy(struct vmw_resource *res)
428{
429	struct vmw_private *dev_priv = res->dev_priv;
430	struct {
431		SVGA3dCmdHeader header;
432		SVGA3dCmdDestroyGBContext body;
433	} *cmd;
434
435	if (likely(res->id == -1))
436		return 0;
437
438	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
439	if (unlikely(cmd == NULL))
440		return -ENOMEM;
441
442	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
443	cmd->header.size = sizeof(cmd->body);
444	cmd->body.cid = res->id;
445	vmw_fifo_commit(dev_priv, sizeof(*cmd));
446	if (dev_priv->query_cid == res->id)
447		dev_priv->query_cid_valid = false;
448	vmw_resource_release_id(res);
449	vmw_fifo_resource_dec(dev_priv);
450
451	return 0;
452}
453
454/*
455 * DX context.
456 */
457
458static int vmw_dx_context_create(struct vmw_resource *res)
459{
460	struct vmw_private *dev_priv = res->dev_priv;
461	int ret;
462	struct {
463		SVGA3dCmdHeader header;
464		SVGA3dCmdDXDefineContext body;
465	} *cmd;
466
467	if (likely(res->id != -1))
468		return 0;
469
470	ret = vmw_resource_alloc_id(res);
471	if (unlikely(ret != 0)) {
472		DRM_ERROR("Failed to allocate a context id.\n");
473		goto out_no_id;
474	}
475
476	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
477		ret = -EBUSY;
478		goto out_no_fifo;
479	}
480
481	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
482	if (unlikely(cmd == NULL)) {
483		ret = -ENOMEM;
484		goto out_no_fifo;
485	}
486
487	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
488	cmd->header.size = sizeof(cmd->body);
489	cmd->body.cid = res->id;
490	vmw_fifo_commit(dev_priv, sizeof(*cmd));
491	vmw_fifo_resource_inc(dev_priv);
492
493	return 0;
494
495out_no_fifo:
496	vmw_resource_release_id(res);
497out_no_id:
498	return ret;
499}
500
501static int vmw_dx_context_bind(struct vmw_resource *res,
502			       struct ttm_validate_buffer *val_buf)
503{
504	struct vmw_private *dev_priv = res->dev_priv;
505	struct {
506		SVGA3dCmdHeader header;
507		SVGA3dCmdDXBindContext body;
508	} *cmd;
509	struct ttm_buffer_object *bo = val_buf->bo;
510
511	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
512
513	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
514	if (unlikely(cmd == NULL))
515		return -ENOMEM;
516
517	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
518	cmd->header.size = sizeof(cmd->body);
519	cmd->body.cid = res->id;
520	cmd->body.mobid = bo->mem.start;
521	cmd->body.validContents = res->backup_dirty;
522	res->backup_dirty = false;
523	vmw_fifo_commit(dev_priv, sizeof(*cmd));
524
525
526	return 0;
527}
528
529/**
530 * vmw_dx_context_scrub_cotables - Scrub all bindings and
531 * cotables from a context
532 *
533 * @ctx: Pointer to the context resource
534 * @readback: Whether to save the otable contents on scrubbing.
535 *
536 * COtables must be unbound before their context, but unbinding requires
537 * the backup buffer being reserved, whereas scrubbing does not.
538 * This function scrubs all cotables of a context, potentially reading back
539 * the contents into their backup buffers. However, scrubbing cotables
540 * also makes the device context invalid, so scrub all bindings first so
541 * that doesn't have to be done later with an invalid context.
542 */
543void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
544				   bool readback)
545{
546	struct vmw_user_context *uctx =
547		container_of(ctx, struct vmw_user_context, res);
 
 
548	int i;
549
550	vmw_binding_state_scrub(uctx->cbs);
551	for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
552		struct vmw_resource *res;
553
554		/* Avoid racing with ongoing cotable destruction. */
555		spin_lock(&uctx->cotable_lock);
556		res = uctx->cotables[vmw_cotable_scrub_order[i]];
557		if (res)
558			res = vmw_resource_reference_unless_doomed(res);
559		spin_unlock(&uctx->cotable_lock);
560		if (!res)
561			continue;
562
563		WARN_ON(vmw_cotable_scrub(res, readback));
564		vmw_resource_unreference(&res);
565	}
566}
567
568static int vmw_dx_context_unbind(struct vmw_resource *res,
569				 bool readback,
570				 struct ttm_validate_buffer *val_buf)
571{
572	struct vmw_private *dev_priv = res->dev_priv;
573	struct ttm_buffer_object *bo = val_buf->bo;
574	struct vmw_fence_obj *fence;
575	struct vmw_user_context *uctx =
576		container_of(res, struct vmw_user_context, res);
577
578	struct {
579		SVGA3dCmdHeader header;
580		SVGA3dCmdDXReadbackContext body;
581	} *cmd1;
582	struct {
583		SVGA3dCmdHeader header;
584		SVGA3dCmdDXBindContext body;
585	} *cmd2;
586	uint32_t submit_size;
587	uint8_t *cmd;
588
589
590	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
591
592	mutex_lock(&dev_priv->binding_mutex);
593	vmw_dx_context_scrub_cotables(res, readback);
594
595	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
596	    readback) {
597		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
598		if (vmw_query_readback_all(uctx->dx_query_mob))
599			DRM_ERROR("Failed to read back query states\n");
600	}
601
602	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
603
604	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
605	if (unlikely(cmd == NULL)) {
606		mutex_unlock(&dev_priv->binding_mutex);
607		return -ENOMEM;
608	}
609
610	cmd2 = (void *) cmd;
611	if (readback) {
612		cmd1 = (void *) cmd;
613		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
614		cmd1->header.size = sizeof(cmd1->body);
615		cmd1->body.cid = res->id;
616		cmd2 = (void *) (&cmd1[1]);
617	}
618	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
619	cmd2->header.size = sizeof(cmd2->body);
620	cmd2->body.cid = res->id;
621	cmd2->body.mobid = SVGA3D_INVALID_ID;
622
623	vmw_fifo_commit(dev_priv, submit_size);
624	mutex_unlock(&dev_priv->binding_mutex);
625
626	/*
627	 * Create a fence object and fence the backup buffer.
628	 */
629
630	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
631					  &fence, NULL);
632
633	vmw_bo_fence_single(bo, fence);
634
635	if (likely(fence != NULL))
636		vmw_fence_obj_unreference(&fence);
637
638	return 0;
639}
640
641static int vmw_dx_context_destroy(struct vmw_resource *res)
642{
643	struct vmw_private *dev_priv = res->dev_priv;
644	struct {
645		SVGA3dCmdHeader header;
646		SVGA3dCmdDXDestroyContext body;
647	} *cmd;
648
649	if (likely(res->id == -1))
650		return 0;
651
652	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
653	if (unlikely(cmd == NULL))
654		return -ENOMEM;
655
656	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
657	cmd->header.size = sizeof(cmd->body);
658	cmd->body.cid = res->id;
659	vmw_fifo_commit(dev_priv, sizeof(*cmd));
660	if (dev_priv->query_cid == res->id)
661		dev_priv->query_cid_valid = false;
662	vmw_resource_release_id(res);
663	vmw_fifo_resource_dec(dev_priv);
664
665	return 0;
666}
667
668/**
669 * User-space context management:
670 */
671
672static struct vmw_resource *
673vmw_user_context_base_to_res(struct ttm_base_object *base)
674{
675	return &(container_of(base, struct vmw_user_context, base)->res);
676}
677
678static void vmw_user_context_free(struct vmw_resource *res)
679{
680	struct vmw_user_context *ctx =
681	    container_of(res, struct vmw_user_context, res);
682	struct vmw_private *dev_priv = res->dev_priv;
683
684	if (ctx->cbs)
685		vmw_binding_state_free(ctx->cbs);
686
687	(void) vmw_context_bind_dx_query(res, NULL);
688
689	ttm_base_object_kfree(ctx, base);
690	ttm_mem_global_free(vmw_mem_glob(dev_priv),
691			    vmw_user_context_size);
692}
693
694/**
695 * This function is called when user space has no more references on the
696 * base object. It releases the base-object's reference on the resource object.
697 */
698
699static void vmw_user_context_base_release(struct ttm_base_object **p_base)
700{
701	struct ttm_base_object *base = *p_base;
702	struct vmw_user_context *ctx =
703	    container_of(base, struct vmw_user_context, base);
704	struct vmw_resource *res = &ctx->res;
705
706	*p_base = NULL;
707	vmw_resource_unreference(&res);
708}
709
710int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
711			      struct drm_file *file_priv)
712{
713	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
714	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
715
716	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
717}
718
719static int vmw_context_define(struct drm_device *dev, void *data,
720			      struct drm_file *file_priv, bool dx)
721{
722	struct vmw_private *dev_priv = vmw_priv(dev);
723	struct vmw_user_context *ctx;
724	struct vmw_resource *res;
725	struct vmw_resource *tmp;
726	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
727	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
728	struct ttm_operation_ctx ttm_opt_ctx = {
729		.interruptible = true,
730		.no_wait_gpu = false
731	};
732	int ret;
733
734	if (!dev_priv->has_dx && dx) {
735		VMW_DEBUG_USER("DX contexts not supported by device.\n");
736		return -EINVAL;
737	}
738
739	if (unlikely(vmw_user_context_size == 0))
740		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
741		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
742		  + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
743
744	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
745	if (unlikely(ret != 0))
746		return ret;
747
748	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
749				   vmw_user_context_size,
750				   &ttm_opt_ctx);
751	if (unlikely(ret != 0)) {
752		if (ret != -ERESTARTSYS)
753			DRM_ERROR("Out of graphics memory for context"
754				  " creation.\n");
755		goto out_unlock;
756	}
757
758	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
759	if (unlikely(!ctx)) {
760		ttm_mem_global_free(vmw_mem_glob(dev_priv),
761				    vmw_user_context_size);
762		ret = -ENOMEM;
763		goto out_unlock;
764	}
765
766	res = &ctx->res;
767	ctx->base.shareable = false;
768	ctx->base.tfile = NULL;
769
770	/*
771	 * From here on, the destructor takes over resource freeing.
772	 */
773
774	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
775	if (unlikely(ret != 0))
776		goto out_unlock;
777
778	tmp = vmw_resource_reference(&ctx->res);
779	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
780				   &vmw_user_context_base_release, NULL);
781
782	if (unlikely(ret != 0)) {
783		vmw_resource_unreference(&tmp);
784		goto out_err;
785	}
786
787	arg->cid = ctx->base.handle;
788out_err:
789	vmw_resource_unreference(&res);
790out_unlock:
791	ttm_read_unlock(&dev_priv->reservation_sem);
792	return ret;
793}
794
795int vmw_context_define_ioctl(struct drm_device *dev, void *data,
796			     struct drm_file *file_priv)
797{
798	return vmw_context_define(dev, data, file_priv, false);
799}
800
801int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
802				      struct drm_file *file_priv)
803{
804	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
805	struct drm_vmw_context_arg *rep = &arg->rep;
806
807	switch (arg->req) {
808	case drm_vmw_context_legacy:
809		return vmw_context_define(dev, rep, file_priv, false);
810	case drm_vmw_context_dx:
811		return vmw_context_define(dev, rep, file_priv, true);
812	default:
813		break;
814	}
815	return -EINVAL;
816}
817
818/**
819 * vmw_context_binding_list - Return a list of context bindings
820 *
821 * @ctx: The context resource
822 *
823 * Returns the current list of bindings of the given context. Note that
824 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
825 */
826struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
827{
828	struct vmw_user_context *uctx =
829		container_of(ctx, struct vmw_user_context, res);
830
831	return vmw_binding_state_list(uctx->cbs);
832}
833
834struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
835{
836	return container_of(ctx, struct vmw_user_context, res)->man;
837}
838
839struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
840					 SVGACOTableType cotable_type)
841{
842	if (cotable_type >= SVGA_COTABLE_DX10_MAX)
 
 
 
843		return ERR_PTR(-EINVAL);
844
845	return container_of(ctx, struct vmw_user_context, res)->
846		cotables[cotable_type];
847}
848
849/**
850 * vmw_context_binding_state -
851 * Return a pointer to a context binding state structure
852 *
853 * @ctx: The context resource
854 *
855 * Returns the current state of bindings of the given context. Note that
856 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
857 */
858struct vmw_ctx_binding_state *
859vmw_context_binding_state(struct vmw_resource *ctx)
860{
861	return container_of(ctx, struct vmw_user_context, res)->cbs;
862}
863
864/**
865 * vmw_context_bind_dx_query -
866 * Sets query MOB for the context.  If @mob is NULL, then this function will
867 * remove the association between the MOB and the context.  This function
868 * assumes the binding_mutex is held.
869 *
870 * @ctx_res: The context resource
871 * @mob: a reference to the query MOB
872 *
873 * Returns -EINVAL if a MOB has already been set and does not match the one
874 * specified in the parameter.  0 otherwise.
875 */
876int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
877			      struct vmw_buffer_object *mob)
878{
879	struct vmw_user_context *uctx =
880		container_of(ctx_res, struct vmw_user_context, res);
881
882	if (mob == NULL) {
883		if (uctx->dx_query_mob) {
884			uctx->dx_query_mob->dx_query_ctx = NULL;
885			vmw_bo_unreference(&uctx->dx_query_mob);
886			uctx->dx_query_mob = NULL;
887		}
888
889		return 0;
890	}
891
892	/* Can only have one MOB per context for queries */
893	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
894		return -EINVAL;
895
896	mob->dx_query_ctx  = ctx_res;
897
898	if (!uctx->dx_query_mob)
899		uctx->dx_query_mob = vmw_bo_reference(mob);
900
901	return 0;
902}
903
904/**
905 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
906 *
907 * @ctx_res: The context resource
908 */
909struct vmw_buffer_object *
910vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
911{
912	struct vmw_user_context *uctx =
913		container_of(ctx_res, struct vmw_user_context, res);
914
915	return uctx->dx_query_mob;
916}
v5.9
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include <drm/ttm/ttm_placement.h>
 29
 30#include "vmwgfx_drv.h"
 31#include "vmwgfx_resource_priv.h"
 32#include "vmwgfx_binding.h"
 33
 34struct vmw_user_context {
 35	struct ttm_base_object base;
 36	struct vmw_resource res;
 37	struct vmw_ctx_binding_state *cbs;
 38	struct vmw_cmdbuf_res_manager *man;
 39	struct vmw_resource *cotables[SVGA_COTABLE_MAX];
 40	spinlock_t cotable_lock;
 41	struct vmw_buffer_object *dx_query_mob;
 42};
 43
 44static void vmw_user_context_free(struct vmw_resource *res);
 45static struct vmw_resource *
 46vmw_user_context_base_to_res(struct ttm_base_object *base);
 47
 48static int vmw_gb_context_create(struct vmw_resource *res);
 49static int vmw_gb_context_bind(struct vmw_resource *res,
 50			       struct ttm_validate_buffer *val_buf);
 51static int vmw_gb_context_unbind(struct vmw_resource *res,
 52				 bool readback,
 53				 struct ttm_validate_buffer *val_buf);
 54static int vmw_gb_context_destroy(struct vmw_resource *res);
 55static int vmw_dx_context_create(struct vmw_resource *res);
 56static int vmw_dx_context_bind(struct vmw_resource *res,
 57			       struct ttm_validate_buffer *val_buf);
 58static int vmw_dx_context_unbind(struct vmw_resource *res,
 59				 bool readback,
 60				 struct ttm_validate_buffer *val_buf);
 61static int vmw_dx_context_destroy(struct vmw_resource *res);
 62
 63static uint64_t vmw_user_context_size;
 64
 65static const struct vmw_user_resource_conv user_context_conv = {
 66	.object_type = VMW_RES_CONTEXT,
 67	.base_obj_to_res = vmw_user_context_base_to_res,
 68	.res_free = vmw_user_context_free
 69};
 70
 71const struct vmw_user_resource_conv *user_context_converter =
 72	&user_context_conv;
 73
 74
 75static const struct vmw_res_func vmw_legacy_context_func = {
 76	.res_type = vmw_res_context,
 77	.needs_backup = false,
 78	.may_evict = false,
 79	.type_name = "legacy contexts",
 80	.backup_placement = NULL,
 81	.create = NULL,
 82	.destroy = NULL,
 83	.bind = NULL,
 84	.unbind = NULL
 85};
 86
 87static const struct vmw_res_func vmw_gb_context_func = {
 88	.res_type = vmw_res_context,
 89	.needs_backup = true,
 90	.may_evict = true,
 91	.prio = 3,
 92	.dirty_prio = 3,
 93	.type_name = "guest backed contexts",
 94	.backup_placement = &vmw_mob_placement,
 95	.create = vmw_gb_context_create,
 96	.destroy = vmw_gb_context_destroy,
 97	.bind = vmw_gb_context_bind,
 98	.unbind = vmw_gb_context_unbind
 99};
100
101static const struct vmw_res_func vmw_dx_context_func = {
102	.res_type = vmw_res_dx_context,
103	.needs_backup = true,
104	.may_evict = true,
105	.prio = 3,
106	.dirty_prio = 3,
107	.type_name = "dx contexts",
108	.backup_placement = &vmw_mob_placement,
109	.create = vmw_dx_context_create,
110	.destroy = vmw_dx_context_destroy,
111	.bind = vmw_dx_context_bind,
112	.unbind = vmw_dx_context_unbind
113};
114
115/**
116 * Context management:
117 */
118
119static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
120				       struct vmw_user_context *uctx)
121{
122	struct vmw_resource *res;
123	int i;
124	u32 cotable_max = has_sm5_context(dev_priv) ?
125		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
126
127	for (i = 0; i < cotable_max; ++i) {
128		spin_lock(&uctx->cotable_lock);
129		res = uctx->cotables[i];
130		uctx->cotables[i] = NULL;
131		spin_unlock(&uctx->cotable_lock);
132
133		if (res)
134			vmw_resource_unreference(&res);
135	}
136}
137
138static void vmw_hw_context_destroy(struct vmw_resource *res)
139{
140	struct vmw_user_context *uctx =
141		container_of(res, struct vmw_user_context, res);
142	struct vmw_private *dev_priv = res->dev_priv;
143	struct {
144		SVGA3dCmdHeader header;
145		SVGA3dCmdDestroyContext body;
146	} *cmd;
147
148
149	if (res->func->destroy == vmw_gb_context_destroy ||
150	    res->func->destroy == vmw_dx_context_destroy) {
151		mutex_lock(&dev_priv->cmdbuf_mutex);
152		vmw_cmdbuf_res_man_destroy(uctx->man);
153		mutex_lock(&dev_priv->binding_mutex);
154		vmw_binding_state_kill(uctx->cbs);
155		(void) res->func->destroy(res);
156		mutex_unlock(&dev_priv->binding_mutex);
157		if (dev_priv->pinned_bo != NULL &&
158		    !dev_priv->query_cid_valid)
159			__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
160		mutex_unlock(&dev_priv->cmdbuf_mutex);
161		vmw_context_cotables_unref(dev_priv, uctx);
162		return;
163	}
164
165	vmw_execbuf_release_pinned_bo(dev_priv);
166	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
167	if (unlikely(cmd == NULL))
168		return;
169
170	cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
171	cmd->header.size = sizeof(cmd->body);
172	cmd->body.cid = res->id;
173
174	vmw_fifo_commit(dev_priv, sizeof(*cmd));
175	vmw_fifo_resource_dec(dev_priv);
176}
177
178static int vmw_gb_context_init(struct vmw_private *dev_priv,
179			       bool dx,
180			       struct vmw_resource *res,
181			       void (*res_free)(struct vmw_resource *res))
182{
183	int ret, i;
184	struct vmw_user_context *uctx =
185		container_of(res, struct vmw_user_context, res);
186
187	res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
188			    SVGA3D_CONTEXT_DATA_SIZE);
189	ret = vmw_resource_init(dev_priv, res, true,
190				res_free,
191				dx ? &vmw_dx_context_func :
192				&vmw_gb_context_func);
193	if (unlikely(ret != 0))
194		goto out_err;
195
196	if (dev_priv->has_mob) {
197		uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
198		if (IS_ERR(uctx->man)) {
199			ret = PTR_ERR(uctx->man);
200			uctx->man = NULL;
201			goto out_err;
202		}
203	}
204
205	uctx->cbs = vmw_binding_state_alloc(dev_priv);
206	if (IS_ERR(uctx->cbs)) {
207		ret = PTR_ERR(uctx->cbs);
208		goto out_err;
209	}
210
211	spin_lock_init(&uctx->cotable_lock);
212
213	if (dx) {
214		u32 cotable_max = has_sm5_context(dev_priv) ?
215			SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
216		for (i = 0; i < cotable_max; ++i) {
217			uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
218							      &uctx->res, i);
219			if (IS_ERR(uctx->cotables[i])) {
220				ret = PTR_ERR(uctx->cotables[i]);
221				goto out_cotables;
222			}
223		}
224	}
225
226	res->hw_destroy = vmw_hw_context_destroy;
227	return 0;
228
229out_cotables:
230	vmw_context_cotables_unref(dev_priv, uctx);
231out_err:
232	if (res_free)
233		res_free(res);
234	else
235		kfree(res);
236	return ret;
237}
238
239static int vmw_context_init(struct vmw_private *dev_priv,
240			    struct vmw_resource *res,
241			    void (*res_free)(struct vmw_resource *res),
242			    bool dx)
243{
244	int ret;
245
246	struct {
247		SVGA3dCmdHeader header;
248		SVGA3dCmdDefineContext body;
249	} *cmd;
250
251	if (dev_priv->has_mob)
252		return vmw_gb_context_init(dev_priv, dx, res, res_free);
253
254	ret = vmw_resource_init(dev_priv, res, false,
255				res_free, &vmw_legacy_context_func);
256
257	if (unlikely(ret != 0)) {
258		DRM_ERROR("Failed to allocate a resource id.\n");
259		goto out_early;
260	}
261
262	if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
263		DRM_ERROR("Out of hw context ids.\n");
264		vmw_resource_unreference(&res);
265		return -ENOMEM;
266	}
267
268	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
269	if (unlikely(cmd == NULL)) {
270		vmw_resource_unreference(&res);
271		return -ENOMEM;
272	}
273
274	cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
275	cmd->header.size = sizeof(cmd->body);
276	cmd->body.cid = res->id;
277
278	vmw_fifo_commit(dev_priv, sizeof(*cmd));
279	vmw_fifo_resource_inc(dev_priv);
280	res->hw_destroy = vmw_hw_context_destroy;
281	return 0;
282
283out_early:
284	if (res_free == NULL)
285		kfree(res);
286	else
287		res_free(res);
288	return ret;
289}
290
291
292/*
293 * GB context.
294 */
295
296static int vmw_gb_context_create(struct vmw_resource *res)
297{
298	struct vmw_private *dev_priv = res->dev_priv;
299	int ret;
300	struct {
301		SVGA3dCmdHeader header;
302		SVGA3dCmdDefineGBContext body;
303	} *cmd;
304
305	if (likely(res->id != -1))
306		return 0;
307
308	ret = vmw_resource_alloc_id(res);
309	if (unlikely(ret != 0)) {
310		DRM_ERROR("Failed to allocate a context id.\n");
311		goto out_no_id;
312	}
313
314	if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
315		ret = -EBUSY;
316		goto out_no_fifo;
317	}
318
319	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
320	if (unlikely(cmd == NULL)) {
321		ret = -ENOMEM;
322		goto out_no_fifo;
323	}
324
325	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
326	cmd->header.size = sizeof(cmd->body);
327	cmd->body.cid = res->id;
328	vmw_fifo_commit(dev_priv, sizeof(*cmd));
329	vmw_fifo_resource_inc(dev_priv);
330
331	return 0;
332
333out_no_fifo:
334	vmw_resource_release_id(res);
335out_no_id:
336	return ret;
337}
338
339static int vmw_gb_context_bind(struct vmw_resource *res,
340			       struct ttm_validate_buffer *val_buf)
341{
342	struct vmw_private *dev_priv = res->dev_priv;
343	struct {
344		SVGA3dCmdHeader header;
345		SVGA3dCmdBindGBContext body;
346	} *cmd;
347	struct ttm_buffer_object *bo = val_buf->bo;
348
349	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
350
351	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
352	if (unlikely(cmd == NULL))
353		return -ENOMEM;
354
355	cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
356	cmd->header.size = sizeof(cmd->body);
357	cmd->body.cid = res->id;
358	cmd->body.mobid = bo->mem.start;
359	cmd->body.validContents = res->backup_dirty;
360	res->backup_dirty = false;
361	vmw_fifo_commit(dev_priv, sizeof(*cmd));
362
363	return 0;
364}
365
366static int vmw_gb_context_unbind(struct vmw_resource *res,
367				 bool readback,
368				 struct ttm_validate_buffer *val_buf)
369{
370	struct vmw_private *dev_priv = res->dev_priv;
371	struct ttm_buffer_object *bo = val_buf->bo;
372	struct vmw_fence_obj *fence;
373	struct vmw_user_context *uctx =
374		container_of(res, struct vmw_user_context, res);
375
376	struct {
377		SVGA3dCmdHeader header;
378		SVGA3dCmdReadbackGBContext body;
379	} *cmd1;
380	struct {
381		SVGA3dCmdHeader header;
382		SVGA3dCmdBindGBContext body;
383	} *cmd2;
384	uint32_t submit_size;
385	uint8_t *cmd;
386
387
388	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
389
390	mutex_lock(&dev_priv->binding_mutex);
391	vmw_binding_state_scrub(uctx->cbs);
392
393	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
394
395	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
396	if (unlikely(cmd == NULL)) {
397		mutex_unlock(&dev_priv->binding_mutex);
398		return -ENOMEM;
399	}
400
401	cmd2 = (void *) cmd;
402	if (readback) {
403		cmd1 = (void *) cmd;
404		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
405		cmd1->header.size = sizeof(cmd1->body);
406		cmd1->body.cid = res->id;
407		cmd2 = (void *) (&cmd1[1]);
408	}
409	cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
410	cmd2->header.size = sizeof(cmd2->body);
411	cmd2->body.cid = res->id;
412	cmd2->body.mobid = SVGA3D_INVALID_ID;
413
414	vmw_fifo_commit(dev_priv, submit_size);
415	mutex_unlock(&dev_priv->binding_mutex);
416
417	/*
418	 * Create a fence object and fence the backup buffer.
419	 */
420
421	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
422					  &fence, NULL);
423
424	vmw_bo_fence_single(bo, fence);
425
426	if (likely(fence != NULL))
427		vmw_fence_obj_unreference(&fence);
428
429	return 0;
430}
431
432static int vmw_gb_context_destroy(struct vmw_resource *res)
433{
434	struct vmw_private *dev_priv = res->dev_priv;
435	struct {
436		SVGA3dCmdHeader header;
437		SVGA3dCmdDestroyGBContext body;
438	} *cmd;
439
440	if (likely(res->id == -1))
441		return 0;
442
443	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
444	if (unlikely(cmd == NULL))
445		return -ENOMEM;
446
447	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
448	cmd->header.size = sizeof(cmd->body);
449	cmd->body.cid = res->id;
450	vmw_fifo_commit(dev_priv, sizeof(*cmd));
451	if (dev_priv->query_cid == res->id)
452		dev_priv->query_cid_valid = false;
453	vmw_resource_release_id(res);
454	vmw_fifo_resource_dec(dev_priv);
455
456	return 0;
457}
458
459/*
460 * DX context.
461 */
462
463static int vmw_dx_context_create(struct vmw_resource *res)
464{
465	struct vmw_private *dev_priv = res->dev_priv;
466	int ret;
467	struct {
468		SVGA3dCmdHeader header;
469		SVGA3dCmdDXDefineContext body;
470	} *cmd;
471
472	if (likely(res->id != -1))
473		return 0;
474
475	ret = vmw_resource_alloc_id(res);
476	if (unlikely(ret != 0)) {
477		DRM_ERROR("Failed to allocate a context id.\n");
478		goto out_no_id;
479	}
480
481	if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
482		ret = -EBUSY;
483		goto out_no_fifo;
484	}
485
486	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
487	if (unlikely(cmd == NULL)) {
488		ret = -ENOMEM;
489		goto out_no_fifo;
490	}
491
492	cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
493	cmd->header.size = sizeof(cmd->body);
494	cmd->body.cid = res->id;
495	vmw_fifo_commit(dev_priv, sizeof(*cmd));
496	vmw_fifo_resource_inc(dev_priv);
497
498	return 0;
499
500out_no_fifo:
501	vmw_resource_release_id(res);
502out_no_id:
503	return ret;
504}
505
506static int vmw_dx_context_bind(struct vmw_resource *res,
507			       struct ttm_validate_buffer *val_buf)
508{
509	struct vmw_private *dev_priv = res->dev_priv;
510	struct {
511		SVGA3dCmdHeader header;
512		SVGA3dCmdDXBindContext body;
513	} *cmd;
514	struct ttm_buffer_object *bo = val_buf->bo;
515
516	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
517
518	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
519	if (unlikely(cmd == NULL))
520		return -ENOMEM;
521
522	cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
523	cmd->header.size = sizeof(cmd->body);
524	cmd->body.cid = res->id;
525	cmd->body.mobid = bo->mem.start;
526	cmd->body.validContents = res->backup_dirty;
527	res->backup_dirty = false;
528	vmw_fifo_commit(dev_priv, sizeof(*cmd));
529
530
531	return 0;
532}
533
534/**
535 * vmw_dx_context_scrub_cotables - Scrub all bindings and
536 * cotables from a context
537 *
538 * @ctx: Pointer to the context resource
539 * @readback: Whether to save the otable contents on scrubbing.
540 *
541 * COtables must be unbound before their context, but unbinding requires
542 * the backup buffer being reserved, whereas scrubbing does not.
543 * This function scrubs all cotables of a context, potentially reading back
544 * the contents into their backup buffers. However, scrubbing cotables
545 * also makes the device context invalid, so scrub all bindings first so
546 * that doesn't have to be done later with an invalid context.
547 */
548void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
549				   bool readback)
550{
551	struct vmw_user_context *uctx =
552		container_of(ctx, struct vmw_user_context, res);
553	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
554		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
555	int i;
556
557	vmw_binding_state_scrub(uctx->cbs);
558	for (i = 0; i < cotable_max; ++i) {
559		struct vmw_resource *res;
560
561		/* Avoid racing with ongoing cotable destruction. */
562		spin_lock(&uctx->cotable_lock);
563		res = uctx->cotables[vmw_cotable_scrub_order[i]];
564		if (res)
565			res = vmw_resource_reference_unless_doomed(res);
566		spin_unlock(&uctx->cotable_lock);
567		if (!res)
568			continue;
569
570		WARN_ON(vmw_cotable_scrub(res, readback));
571		vmw_resource_unreference(&res);
572	}
573}
574
575static int vmw_dx_context_unbind(struct vmw_resource *res,
576				 bool readback,
577				 struct ttm_validate_buffer *val_buf)
578{
579	struct vmw_private *dev_priv = res->dev_priv;
580	struct ttm_buffer_object *bo = val_buf->bo;
581	struct vmw_fence_obj *fence;
582	struct vmw_user_context *uctx =
583		container_of(res, struct vmw_user_context, res);
584
585	struct {
586		SVGA3dCmdHeader header;
587		SVGA3dCmdDXReadbackContext body;
588	} *cmd1;
589	struct {
590		SVGA3dCmdHeader header;
591		SVGA3dCmdDXBindContext body;
592	} *cmd2;
593	uint32_t submit_size;
594	uint8_t *cmd;
595
596
597	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
598
599	mutex_lock(&dev_priv->binding_mutex);
600	vmw_dx_context_scrub_cotables(res, readback);
601
602	if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
603	    readback) {
604		WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
605		if (vmw_query_readback_all(uctx->dx_query_mob))
606			DRM_ERROR("Failed to read back query states\n");
607	}
608
609	submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
610
611	cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
612	if (unlikely(cmd == NULL)) {
613		mutex_unlock(&dev_priv->binding_mutex);
614		return -ENOMEM;
615	}
616
617	cmd2 = (void *) cmd;
618	if (readback) {
619		cmd1 = (void *) cmd;
620		cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
621		cmd1->header.size = sizeof(cmd1->body);
622		cmd1->body.cid = res->id;
623		cmd2 = (void *) (&cmd1[1]);
624	}
625	cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
626	cmd2->header.size = sizeof(cmd2->body);
627	cmd2->body.cid = res->id;
628	cmd2->body.mobid = SVGA3D_INVALID_ID;
629
630	vmw_fifo_commit(dev_priv, submit_size);
631	mutex_unlock(&dev_priv->binding_mutex);
632
633	/*
634	 * Create a fence object and fence the backup buffer.
635	 */
636
637	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
638					  &fence, NULL);
639
640	vmw_bo_fence_single(bo, fence);
641
642	if (likely(fence != NULL))
643		vmw_fence_obj_unreference(&fence);
644
645	return 0;
646}
647
648static int vmw_dx_context_destroy(struct vmw_resource *res)
649{
650	struct vmw_private *dev_priv = res->dev_priv;
651	struct {
652		SVGA3dCmdHeader header;
653		SVGA3dCmdDXDestroyContext body;
654	} *cmd;
655
656	if (likely(res->id == -1))
657		return 0;
658
659	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
660	if (unlikely(cmd == NULL))
661		return -ENOMEM;
662
663	cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
664	cmd->header.size = sizeof(cmd->body);
665	cmd->body.cid = res->id;
666	vmw_fifo_commit(dev_priv, sizeof(*cmd));
667	if (dev_priv->query_cid == res->id)
668		dev_priv->query_cid_valid = false;
669	vmw_resource_release_id(res);
670	vmw_fifo_resource_dec(dev_priv);
671
672	return 0;
673}
674
675/**
676 * User-space context management:
677 */
678
679static struct vmw_resource *
680vmw_user_context_base_to_res(struct ttm_base_object *base)
681{
682	return &(container_of(base, struct vmw_user_context, base)->res);
683}
684
685static void vmw_user_context_free(struct vmw_resource *res)
686{
687	struct vmw_user_context *ctx =
688	    container_of(res, struct vmw_user_context, res);
689	struct vmw_private *dev_priv = res->dev_priv;
690
691	if (ctx->cbs)
692		vmw_binding_state_free(ctx->cbs);
693
694	(void) vmw_context_bind_dx_query(res, NULL);
695
696	ttm_base_object_kfree(ctx, base);
697	ttm_mem_global_free(vmw_mem_glob(dev_priv),
698			    vmw_user_context_size);
699}
700
701/**
702 * This function is called when user space has no more references on the
703 * base object. It releases the base-object's reference on the resource object.
704 */
705
706static void vmw_user_context_base_release(struct ttm_base_object **p_base)
707{
708	struct ttm_base_object *base = *p_base;
709	struct vmw_user_context *ctx =
710	    container_of(base, struct vmw_user_context, base);
711	struct vmw_resource *res = &ctx->res;
712
713	*p_base = NULL;
714	vmw_resource_unreference(&res);
715}
716
717int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
718			      struct drm_file *file_priv)
719{
720	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
721	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
722
723	return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
724}
725
726static int vmw_context_define(struct drm_device *dev, void *data,
727			      struct drm_file *file_priv, bool dx)
728{
729	struct vmw_private *dev_priv = vmw_priv(dev);
730	struct vmw_user_context *ctx;
731	struct vmw_resource *res;
732	struct vmw_resource *tmp;
733	struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
734	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
735	struct ttm_operation_ctx ttm_opt_ctx = {
736		.interruptible = true,
737		.no_wait_gpu = false
738	};
739	int ret;
740
741	if (!has_sm4_context(dev_priv) && dx) {
742		VMW_DEBUG_USER("DX contexts not supported by device.\n");
743		return -EINVAL;
744	}
745
746	if (unlikely(vmw_user_context_size == 0))
747		vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) +
748		  ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
749		  + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
750
751	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
752	if (unlikely(ret != 0))
753		return ret;
754
755	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
756				   vmw_user_context_size,
757				   &ttm_opt_ctx);
758	if (unlikely(ret != 0)) {
759		if (ret != -ERESTARTSYS)
760			DRM_ERROR("Out of graphics memory for context"
761				  " creation.\n");
762		goto out_unlock;
763	}
764
765	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
766	if (unlikely(!ctx)) {
767		ttm_mem_global_free(vmw_mem_glob(dev_priv),
768				    vmw_user_context_size);
769		ret = -ENOMEM;
770		goto out_unlock;
771	}
772
773	res = &ctx->res;
774	ctx->base.shareable = false;
775	ctx->base.tfile = NULL;
776
777	/*
778	 * From here on, the destructor takes over resource freeing.
779	 */
780
781	ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
782	if (unlikely(ret != 0))
783		goto out_unlock;
784
785	tmp = vmw_resource_reference(&ctx->res);
786	ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
787				   &vmw_user_context_base_release, NULL);
788
789	if (unlikely(ret != 0)) {
790		vmw_resource_unreference(&tmp);
791		goto out_err;
792	}
793
794	arg->cid = ctx->base.handle;
795out_err:
796	vmw_resource_unreference(&res);
797out_unlock:
798	ttm_read_unlock(&dev_priv->reservation_sem);
799	return ret;
800}
801
802int vmw_context_define_ioctl(struct drm_device *dev, void *data,
803			     struct drm_file *file_priv)
804{
805	return vmw_context_define(dev, data, file_priv, false);
806}
807
808int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
809				      struct drm_file *file_priv)
810{
811	union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
812	struct drm_vmw_context_arg *rep = &arg->rep;
813
814	switch (arg->req) {
815	case drm_vmw_context_legacy:
816		return vmw_context_define(dev, rep, file_priv, false);
817	case drm_vmw_context_dx:
818		return vmw_context_define(dev, rep, file_priv, true);
819	default:
820		break;
821	}
822	return -EINVAL;
823}
824
825/**
826 * vmw_context_binding_list - Return a list of context bindings
827 *
828 * @ctx: The context resource
829 *
830 * Returns the current list of bindings of the given context. Note that
831 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
832 */
833struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
834{
835	struct vmw_user_context *uctx =
836		container_of(ctx, struct vmw_user_context, res);
837
838	return vmw_binding_state_list(uctx->cbs);
839}
840
841struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
842{
843	return container_of(ctx, struct vmw_user_context, res)->man;
844}
845
846struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
847					 SVGACOTableType cotable_type)
848{
849	u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
850		SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
851
852	if (cotable_type >= cotable_max)
853		return ERR_PTR(-EINVAL);
854
855	return container_of(ctx, struct vmw_user_context, res)->
856		cotables[cotable_type];
857}
858
859/**
860 * vmw_context_binding_state -
861 * Return a pointer to a context binding state structure
862 *
863 * @ctx: The context resource
864 *
865 * Returns the current state of bindings of the given context. Note that
866 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
867 */
868struct vmw_ctx_binding_state *
869vmw_context_binding_state(struct vmw_resource *ctx)
870{
871	return container_of(ctx, struct vmw_user_context, res)->cbs;
872}
873
874/**
875 * vmw_context_bind_dx_query -
876 * Sets query MOB for the context.  If @mob is NULL, then this function will
877 * remove the association between the MOB and the context.  This function
878 * assumes the binding_mutex is held.
879 *
880 * @ctx_res: The context resource
881 * @mob: a reference to the query MOB
882 *
883 * Returns -EINVAL if a MOB has already been set and does not match the one
884 * specified in the parameter.  0 otherwise.
885 */
886int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
887			      struct vmw_buffer_object *mob)
888{
889	struct vmw_user_context *uctx =
890		container_of(ctx_res, struct vmw_user_context, res);
891
892	if (mob == NULL) {
893		if (uctx->dx_query_mob) {
894			uctx->dx_query_mob->dx_query_ctx = NULL;
895			vmw_bo_unreference(&uctx->dx_query_mob);
896			uctx->dx_query_mob = NULL;
897		}
898
899		return 0;
900	}
901
902	/* Can only have one MOB per context for queries */
903	if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
904		return -EINVAL;
905
906	mob->dx_query_ctx  = ctx_res;
907
908	if (!uctx->dx_query_mob)
909		uctx->dx_query_mob = vmw_bo_reference(mob);
910
911	return 0;
912}
913
914/**
915 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
916 *
917 * @ctx_res: The context resource
918 */
919struct vmw_buffer_object *
920vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
921{
922	struct vmw_user_context *uctx =
923		container_of(ctx_res, struct vmw_user_context, res);
924
925	return uctx->dx_query_mob;
926}