Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Treat context OTables as resources to make use of the resource
 29 * backing MOB eviction mechanism, that is used to read back the COTable
 30 * whenever the backing MOB is evicted.
 31 */
 32
 33#include <drm/ttm/ttm_placement.h>
 34
 35#include "vmwgfx_drv.h"
 
 36#include "vmwgfx_resource_priv.h"
 37#include "vmwgfx_so.h"
 38
 
 
 39/**
 40 * struct vmw_cotable - Context Object Table resource
 41 *
 42 * @res: struct vmw_resource we are deriving from.
 43 * @ctx: non-refcounted pointer to the owning context.
 44 * @size_read_back: Size of data read back during eviction.
 45 * @seen_entries: Seen entries in command stream for this cotable.
 46 * @type: The cotable type.
 47 * @scrubbed: Whether the cotable has been scrubbed.
 48 * @resource_list: List of resources in the cotable.
 49 */
 50struct vmw_cotable {
 51	struct vmw_resource res;
 52	struct vmw_resource *ctx;
 53	size_t size_read_back;
 54	int seen_entries;
 55	u32 type;
 56	bool scrubbed;
 57	struct list_head resource_list;
 58};
 59
 60/**
 61 * struct vmw_cotable_info - Static info about cotable types
 62 *
 63 * @min_initial_entries: Min number of initial intries at cotable allocation
 64 * for this cotable type.
 65 * @size: Size of each entry.
 
 66 */
 67struct vmw_cotable_info {
 68	u32 min_initial_entries;
 69	u32 size;
 70	void (*unbind_func)(struct vmw_private *, struct list_head *,
 71			    bool);
 72};
 73
 
 
 
 
 
 
 
 
 
 
 
 
 74static const struct vmw_cotable_info co_info[] = {
 75	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
 76	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
 77	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
 78	{1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
 79	{1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
 80	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
 81	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
 82	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
 83	{1, sizeof(SVGACOTableDXStreamOutputEntry), NULL},
 84	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
 85	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub}
 
 86};
 87
 88/*
 89 * Cotables with bindings that we remove must be scrubbed first,
 90 * otherwise, the device will swap in an invalid context when we remove
 91 * bindings before scrubbing a cotable...
 92 */
 93const SVGACOTableType vmw_cotable_scrub_order[] = {
 94	SVGA_COTABLE_RTVIEW,
 95	SVGA_COTABLE_DSVIEW,
 96	SVGA_COTABLE_SRVIEW,
 97	SVGA_COTABLE_DXSHADER,
 98	SVGA_COTABLE_ELEMENTLAYOUT,
 99	SVGA_COTABLE_BLENDSTATE,
100	SVGA_COTABLE_DEPTHSTENCIL,
101	SVGA_COTABLE_RASTERIZERSTATE,
102	SVGA_COTABLE_SAMPLER,
103	SVGA_COTABLE_STREAMOUTPUT,
104	SVGA_COTABLE_DXQUERY,
 
105};
106
107static int vmw_cotable_bind(struct vmw_resource *res,
108			    struct ttm_validate_buffer *val_buf);
109static int vmw_cotable_unbind(struct vmw_resource *res,
110			      bool readback,
111			      struct ttm_validate_buffer *val_buf);
112static int vmw_cotable_create(struct vmw_resource *res);
113static int vmw_cotable_destroy(struct vmw_resource *res);
114
115static const struct vmw_res_func vmw_cotable_func = {
116	.res_type = vmw_res_cotable,
117	.needs_backup = true,
118	.may_evict = true,
119	.prio = 3,
120	.dirty_prio = 3,
121	.type_name = "context guest backed object tables",
122	.backup_placement = &vmw_mob_placement,
 
123	.create = vmw_cotable_create,
124	.destroy = vmw_cotable_destroy,
125	.bind = vmw_cotable_bind,
126	.unbind = vmw_cotable_unbind,
127};
128
129/**
130 * vmw_cotable - Convert a struct vmw_resource pointer to a struct
131 * vmw_cotable pointer
132 *
133 * @res: Pointer to the resource.
134 */
135static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
136{
137	return container_of(res, struct vmw_cotable, res);
138}
139
140/**
141 * vmw_cotable_destroy - Cotable resource destroy callback
142 *
143 * @res: Pointer to the cotable resource.
144 *
145 * There is no device cotable destroy command, so this function only
146 * makes sure that the resource id is set to invalid.
147 */
148static int vmw_cotable_destroy(struct vmw_resource *res)
149{
150	res->id = -1;
151	return 0;
152}
153
154/**
155 * vmw_cotable_unscrub - Undo a cotable unscrub operation
156 *
157 * @res: Pointer to the cotable resource
158 *
159 * This function issues commands to (re)bind the cotable to
160 * its backing mob, which needs to be validated and reserved at this point.
161 * This is identical to bind() except the function interface looks different.
162 */
163static int vmw_cotable_unscrub(struct vmw_resource *res)
164{
165	struct vmw_cotable *vcotbl = vmw_cotable(res);
166	struct vmw_private *dev_priv = res->dev_priv;
167	struct ttm_buffer_object *bo = &res->backup->base;
168	struct {
169		SVGA3dCmdHeader header;
170		SVGA3dCmdDXSetCOTable body;
171	} *cmd;
172
173	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
174	dma_resv_assert_held(bo->base.resv);
175
176	cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
177	if (!cmd)
178		return -ENOMEM;
179
180	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
181	WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
182	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
183	cmd->header.size = sizeof(cmd->body);
184	cmd->body.cid = vcotbl->ctx->id;
185	cmd->body.type = vcotbl->type;
186	cmd->body.mobid = bo->mem.start;
187	cmd->body.validSizeInBytes = vcotbl->size_read_back;
188
189	vmw_fifo_commit_flush(dev_priv, sizeof(*cmd));
190	vcotbl->scrubbed = false;
191
192	return 0;
193}
194
195/**
196 * vmw_cotable_bind - Undo a cotable unscrub operation
197 *
198 * @res: Pointer to the cotable resource
199 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
200 * for convenience / fencing.
201 *
202 * This function issues commands to (re)bind the cotable to
203 * its backing mob, which needs to be validated and reserved at this point.
204 */
205static int vmw_cotable_bind(struct vmw_resource *res,
206			    struct ttm_validate_buffer *val_buf)
207{
208	/*
209	 * The create() callback may have changed @res->backup without
210	 * the caller noticing, and with val_buf->bo still pointing to
211	 * the old backup buffer. Although hackish, and not used currently,
212	 * take the opportunity to correct the value here so that it's not
213	 * misused in the future.
214	 */
215	val_buf->bo = &res->backup->base;
216
217	return vmw_cotable_unscrub(res);
218}
219
220/**
221 * vmw_cotable_scrub - Scrub the cotable from the device.
222 *
223 * @res: Pointer to the cotable resource.
224 * @readback: Whether initiate a readback of the cotable data to the backup
225 * buffer.
226 *
227 * In some situations (context swapouts) it might be desirable to make the
228 * device forget about the cotable without performing a full unbind. A full
229 * unbind requires reserved backup buffers and it might not be possible to
230 * reserve them due to locking order violation issues. The vmw_cotable_scrub
231 * function implements a partial unbind() without that requirement but with the
232 * following restrictions.
233 * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
234 *    be called.
235 * 2) Before the cotable backing buffer is used by the CPU, or during the
236 *    resource destruction, vmw_cotable_unbind() must be called.
237 */
238int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
239{
240	struct vmw_cotable *vcotbl = vmw_cotable(res);
241	struct vmw_private *dev_priv = res->dev_priv;
242	size_t submit_size;
243
244	struct {
245		SVGA3dCmdHeader header;
246		SVGA3dCmdDXReadbackCOTable body;
247	} *cmd0;
248	struct {
249		SVGA3dCmdHeader header;
250		SVGA3dCmdDXSetCOTable body;
251	} *cmd1;
252
253	if (vcotbl->scrubbed)
254		return 0;
255
256	if (co_info[vcotbl->type].unbind_func)
257		co_info[vcotbl->type].unbind_func(dev_priv,
258						  &vcotbl->resource_list,
259						  readback);
260	submit_size = sizeof(*cmd1);
261	if (readback)
262		submit_size += sizeof(*cmd0);
263
264	cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
265	if (!cmd1)
266		return -ENOMEM;
267
268	vcotbl->size_read_back = 0;
269	if (readback) {
270		cmd0 = (void *) cmd1;
271		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
272		cmd0->header.size = sizeof(cmd0->body);
273		cmd0->body.cid = vcotbl->ctx->id;
274		cmd0->body.type = vcotbl->type;
275		cmd1 = (void *) &cmd0[1];
276		vcotbl->size_read_back = res->backup_size;
277	}
278	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
279	cmd1->header.size = sizeof(cmd1->body);
280	cmd1->body.cid = vcotbl->ctx->id;
281	cmd1->body.type = vcotbl->type;
282	cmd1->body.mobid = SVGA3D_INVALID_ID;
283	cmd1->body.validSizeInBytes = 0;
284	vmw_fifo_commit_flush(dev_priv, submit_size);
285	vcotbl->scrubbed = true;
286
287	/* Trigger a create() on next validate. */
288	res->id = -1;
289
290	return 0;
291}
292
293/**
294 * vmw_cotable_unbind - Cotable resource unbind callback
295 *
296 * @res: Pointer to the cotable resource.
297 * @readback: Whether to read back cotable data to the backup buffer.
298 * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
299 * for convenience / fencing.
300 *
301 * Unbinds the cotable from the device and fences the backup buffer.
302 */
303static int vmw_cotable_unbind(struct vmw_resource *res,
304			      bool readback,
305			      struct ttm_validate_buffer *val_buf)
306{
307	struct vmw_cotable *vcotbl = vmw_cotable(res);
308	struct vmw_private *dev_priv = res->dev_priv;
309	struct ttm_buffer_object *bo = val_buf->bo;
310	struct vmw_fence_obj *fence;
311
312	if (!vmw_resource_mob_attached(res))
313		return 0;
314
315	WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
316	dma_resv_assert_held(bo->base.resv);
317
318	mutex_lock(&dev_priv->binding_mutex);
319	if (!vcotbl->scrubbed)
320		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
321	mutex_unlock(&dev_priv->binding_mutex);
322	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
323	vmw_bo_fence_single(bo, fence);
324	if (likely(fence != NULL))
325		vmw_fence_obj_unreference(&fence);
326
327	return 0;
328}
329
330/**
331 * vmw_cotable_readback - Read back a cotable without unbinding.
332 *
333 * @res: The cotable resource.
334 *
335 * Reads back a cotable to its backing mob without scrubbing the MOB from
336 * the cotable. The MOB is fenced for subsequent CPU access.
337 */
338static int vmw_cotable_readback(struct vmw_resource *res)
339{
340	struct vmw_cotable *vcotbl = vmw_cotable(res);
341	struct vmw_private *dev_priv = res->dev_priv;
342
343	struct {
344		SVGA3dCmdHeader header;
345		SVGA3dCmdDXReadbackCOTable body;
346	} *cmd;
347	struct vmw_fence_obj *fence;
348
349	if (!vcotbl->scrubbed) {
350		cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
351		if (!cmd)
352			return -ENOMEM;
353
354		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
355		cmd->header.size = sizeof(cmd->body);
356		cmd->body.cid = vcotbl->ctx->id;
357		cmd->body.type = vcotbl->type;
358		vcotbl->size_read_back = res->backup_size;
359		vmw_fifo_commit(dev_priv, sizeof(*cmd));
360	}
361
362	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
363	vmw_bo_fence_single(&res->backup->base, fence);
364	vmw_fence_obj_unreference(&fence);
365
366	return 0;
367}
368
369/**
370 * vmw_cotable_resize - Resize a cotable.
371 *
372 * @res: The cotable resource.
373 * @new_size: The new size.
374 *
375 * Resizes a cotable and binds the new backup buffer.
376 * On failure the cotable is left intact.
377 * Important! This function may not fail once the MOB switch has been
378 * committed to hardware. That would put the device context in an
379 * invalid state which we can't currently recover from.
380 */
381static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
382{
383	struct ttm_operation_ctx ctx = { false, false };
384	struct vmw_private *dev_priv = res->dev_priv;
385	struct vmw_cotable *vcotbl = vmw_cotable(res);
386	struct vmw_buffer_object *buf, *old_buf = res->backup;
387	struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
388	size_t old_size = res->backup_size;
389	size_t old_size_read_back = vcotbl->size_read_back;
390	size_t cur_size_read_back;
391	struct ttm_bo_kmap_obj old_map, new_map;
392	int ret;
393	size_t i;
 
 
 
 
 
 
 
 
 
 
394
395	ret = vmw_cotable_readback(res);
396	if (ret)
397		return ret;
398
399	cur_size_read_back = vcotbl->size_read_back;
400	vcotbl->size_read_back = old_size_read_back;
401
402	/*
403	 * While device is processing, Allocate and reserve a buffer object
404	 * for the new COTable. Initially pin the buffer object to make sure
405	 * we can use tryreserve without failure.
406	 */
407	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
408	if (!buf)
409		return -ENOMEM;
410
411	ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
412			  true, vmw_bo_bo_free);
413	if (ret) {
414		DRM_ERROR("Failed initializing new cotable MOB.\n");
415		return ret;
416	}
417
418	bo = &buf->base;
419	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
420
421	ret = ttm_bo_wait(old_bo, false, false);
422	if (unlikely(ret != 0)) {
423		DRM_ERROR("Failed waiting for cotable unbind.\n");
424		goto out_wait;
425	}
426
427	/*
428	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
429	 * This should really be a TTM utility.
430	 */
431	for (i = 0; i < old_bo->num_pages; ++i) {
432		bool dummy;
433
434		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
435		if (unlikely(ret != 0)) {
436			DRM_ERROR("Failed mapping old COTable on resize.\n");
437			goto out_wait;
438		}
439		ret = ttm_bo_kmap(bo, i, 1, &new_map);
440		if (unlikely(ret != 0)) {
441			DRM_ERROR("Failed mapping new COTable on resize.\n");
442			goto out_map_new;
443		}
444		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
445		       ttm_kmap_obj_virtual(&old_map, &dummy),
446		       PAGE_SIZE);
447		ttm_bo_kunmap(&new_map);
448		ttm_bo_kunmap(&old_map);
449	}
450
451	/* Unpin new buffer, and switch backup buffers. */
452	ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
 
 
 
453	if (unlikely(ret != 0)) {
454		DRM_ERROR("Failed validating new COTable backup buffer.\n");
455		goto out_wait;
456	}
457
458	vmw_resource_mob_detach(res);
459	res->backup = buf;
460	res->backup_size = new_size;
461	vcotbl->size_read_back = cur_size_read_back;
462
463	/*
464	 * Now tell the device to switch. If this fails, then we need to
465	 * revert the full resize.
466	 */
467	ret = vmw_cotable_unscrub(res);
468	if (ret) {
469		DRM_ERROR("Failed switching COTable backup buffer.\n");
470		res->backup = old_buf;
471		res->backup_size = old_size;
472		vcotbl->size_read_back = old_size_read_back;
473		vmw_resource_mob_attach(res);
474		goto out_wait;
475	}
476
477	vmw_resource_mob_attach(res);
478	/* Let go of the old mob. */
479	vmw_bo_unreference(&old_buf);
480	res->id = vcotbl->type;
481
 
 
 
 
 
 
 
 
 
482	return 0;
483
484out_map_new:
485	ttm_bo_kunmap(&old_map);
486out_wait:
 
487	ttm_bo_unreserve(bo);
488	vmw_bo_unreference(&buf);
 
 
 
489
490	return ret;
491}
492
493/**
494 * vmw_cotable_create - Cotable resource create callback
495 *
496 * @res: Pointer to a cotable resource.
497 *
498 * There is no separate create command for cotables, so this callback, which
499 * is called before bind() in the validation sequence is instead used for two
500 * things.
501 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
502 *    buffer.
503 * 2) Resize the cotable if needed.
504 */
505static int vmw_cotable_create(struct vmw_resource *res)
506{
507	struct vmw_cotable *vcotbl = vmw_cotable(res);
508	size_t new_size = res->backup_size;
509	size_t needed_size;
510	int ret;
511
512	/* Check whether we need to resize the cotable */
513	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
514	while (needed_size > new_size)
515		new_size *= 2;
516
517	if (likely(new_size <= res->backup_size)) {
518		if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
519			ret = vmw_cotable_unscrub(res);
520			if (ret)
521				return ret;
522		}
523		res->id = vcotbl->type;
524		return 0;
525	}
526
527	return vmw_cotable_resize(res, new_size);
528}
529
530/**
531 * vmw_hw_cotable_destroy - Cotable hw_destroy callback
532 *
533 * @res: Pointer to a cotable resource.
534 *
535 * The final (part of resource destruction) destroy callback.
536 */
537static void vmw_hw_cotable_destroy(struct vmw_resource *res)
538{
539	(void) vmw_cotable_destroy(res);
540}
541
542static size_t cotable_acc_size;
543
544/**
545 * vmw_cotable_free - Cotable resource destructor
546 *
547 * @res: Pointer to a cotable resource.
548 */
549static void vmw_cotable_free(struct vmw_resource *res)
550{
551	struct vmw_private *dev_priv = res->dev_priv;
552
553	kfree(res);
554	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
555}
556
557/**
558 * vmw_cotable_alloc - Create a cotable resource
559 *
560 * @dev_priv: Pointer to a device private struct.
561 * @ctx: Pointer to the context resource.
562 * The cotable resource will not add a refcount.
563 * @type: The cotable type.
564 */
565struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
566				       struct vmw_resource *ctx,
567				       u32 type)
568{
569	struct vmw_cotable *vcotbl;
570	struct ttm_operation_ctx ttm_opt_ctx = {
571		.interruptible = true,
572		.no_wait_gpu = false
573	};
574	int ret;
575	u32 num_entries;
576
577	if (unlikely(cotable_acc_size == 0))
578		cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable));
579
580	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
581				   cotable_acc_size, &ttm_opt_ctx);
582	if (unlikely(ret))
583		return ERR_PTR(ret);
584
585	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
586	if (unlikely(!vcotbl)) {
587		ret = -ENOMEM;
588		goto out_no_alloc;
589	}
590
591	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
592				vmw_cotable_free, &vmw_cotable_func);
593	if (unlikely(ret != 0))
594		goto out_no_init;
595
596	INIT_LIST_HEAD(&vcotbl->resource_list);
597	vcotbl->res.id = type;
598	vcotbl->res.backup_size = PAGE_SIZE;
599	num_entries = PAGE_SIZE / co_info[type].size;
600	if (num_entries < co_info[type].min_initial_entries) {
601		vcotbl->res.backup_size = co_info[type].min_initial_entries *
602			co_info[type].size;
603		vcotbl->res.backup_size =
604			(vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK;
605	}
606
607	vcotbl->scrubbed = true;
608	vcotbl->seen_entries = -1;
609	vcotbl->type = type;
610	vcotbl->ctx = ctx;
611
612	vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
613
614	return &vcotbl->res;
615
616out_no_init:
617	kfree(vcotbl);
618out_no_alloc:
619	ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size);
620	return ERR_PTR(ret);
621}
622
623/**
624 * vmw_cotable_notify - Notify the cotable about an item creation
625 *
626 * @res: Pointer to a cotable resource.
627 * @id: Item id.
628 */
629int vmw_cotable_notify(struct vmw_resource *res, int id)
630{
631	struct vmw_cotable *vcotbl = vmw_cotable(res);
632
633	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
634		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
635			  (unsigned) vcotbl->type, id);
636		return -EINVAL;
637	}
638
639	if (vcotbl->seen_entries < id) {
640		/* Trigger a call to create() on next validate */
641		res->id = -1;
642		vcotbl->seen_entries = id;
643	}
644
645	return 0;
646}
647
648/**
649 * vmw_cotable_add_view - add a view to the cotable's list of active views.
650 *
651 * @res: pointer struct vmw_resource representing the cotable.
652 * @head: pointer to the struct list_head member of the resource, dedicated
653 * to the cotable active resource list.
654 */
655void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
656{
657	struct vmw_cotable *vcotbl =
658		container_of(res, struct vmw_cotable, res);
659
660	list_add_tail(head, &vcotbl->resource_list);
661}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Treat context OTables as resources to make use of the resource
 29 * backing MOB eviction mechanism, that is used to read back the COTable
 30 * whenever the backing MOB is evicted.
 31 */
 32
 33#include "vmwgfx_bo.h"
 
 34#include "vmwgfx_drv.h"
 35#include "vmwgfx_mksstat.h"
 36#include "vmwgfx_resource_priv.h"
 37#include "vmwgfx_so.h"
 38
 39#include <drm/ttm/ttm_placement.h>
 40
 41/**
 42 * struct vmw_cotable - Context Object Table resource
 43 *
 44 * @res: struct vmw_resource we are deriving from.
 45 * @ctx: non-refcounted pointer to the owning context.
 46 * @size_read_back: Size of data read back during eviction.
 47 * @seen_entries: Seen entries in command stream for this cotable.
 48 * @type: The cotable type.
 49 * @scrubbed: Whether the cotable has been scrubbed.
 50 * @resource_list: List of resources in the cotable.
 51 */
 52struct vmw_cotable {
 53	struct vmw_resource res;
 54	struct vmw_resource *ctx;
 55	size_t size_read_back;
 56	int seen_entries;
 57	u32 type;
 58	bool scrubbed;
 59	struct list_head resource_list;
 60};
 61
 62/**
 63 * struct vmw_cotable_info - Static info about cotable types
 64 *
 65 * @min_initial_entries: Min number of initial intries at cotable allocation
 66 * for this cotable type.
 67 * @size: Size of each entry.
 68 * @unbind_func: Unbind call-back function.
 69 */
 70struct vmw_cotable_info {
 71	u32 min_initial_entries;
 72	u32 size;
 73	void (*unbind_func)(struct vmw_private *, struct list_head *,
 74			    bool);
 75};
 76
 77
 78/*
 79 * Getting the initial size right is difficult because it all depends
 80 * on what the userspace is doing. The sizes will be aligned up to
 81 * a PAGE_SIZE so we just want to make sure that for majority of apps
 82 * the initial number of entries doesn't require an immediate resize.
 83 * For all cotables except SVGACOTableDXElementLayoutEntry and
 84 * SVGACOTableDXBlendStateEntry the initial number of entries fits
 85 * within the PAGE_SIZE. For SVGACOTableDXElementLayoutEntry and
 86 * SVGACOTableDXBlendStateEntry we want to reserve two pages,
 87 * because that's what all apps will require initially.
 88 */
 89static const struct vmw_cotable_info co_info[] = {
 90	{1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy},
 91	{1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy},
 92	{1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy},
 93	{PAGE_SIZE/sizeof(SVGACOTableDXElementLayoutEntry) + 1, sizeof(SVGACOTableDXElementLayoutEntry), NULL},
 94	{PAGE_SIZE/sizeof(SVGACOTableDXBlendStateEntry) + 1, sizeof(SVGACOTableDXBlendStateEntry), NULL},
 95	{1, sizeof(SVGACOTableDXDepthStencilEntry), NULL},
 96	{1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL},
 97	{1, sizeof(SVGACOTableDXSamplerEntry), NULL},
 98	{1, sizeof(SVGACOTableDXStreamOutputEntry), &vmw_dx_streamoutput_cotable_list_scrub},
 99	{1, sizeof(SVGACOTableDXQueryEntry), NULL},
100	{1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub},
101	{1, sizeof(SVGACOTableDXUAViewEntry), &vmw_view_cotable_list_destroy}
102};
103
104/*
105 * Cotables with bindings that we remove must be scrubbed first,
106 * otherwise, the device will swap in an invalid context when we remove
107 * bindings before scrubbing a cotable...
108 */
109const SVGACOTableType vmw_cotable_scrub_order[] = {
110	SVGA_COTABLE_RTVIEW,
111	SVGA_COTABLE_DSVIEW,
112	SVGA_COTABLE_SRVIEW,
113	SVGA_COTABLE_DXSHADER,
114	SVGA_COTABLE_ELEMENTLAYOUT,
115	SVGA_COTABLE_BLENDSTATE,
116	SVGA_COTABLE_DEPTHSTENCIL,
117	SVGA_COTABLE_RASTERIZERSTATE,
118	SVGA_COTABLE_SAMPLER,
119	SVGA_COTABLE_STREAMOUTPUT,
120	SVGA_COTABLE_DXQUERY,
121	SVGA_COTABLE_UAVIEW,
122};
123
124static int vmw_cotable_bind(struct vmw_resource *res,
125			    struct ttm_validate_buffer *val_buf);
126static int vmw_cotable_unbind(struct vmw_resource *res,
127			      bool readback,
128			      struct ttm_validate_buffer *val_buf);
129static int vmw_cotable_create(struct vmw_resource *res);
130static int vmw_cotable_destroy(struct vmw_resource *res);
131
132static const struct vmw_res_func vmw_cotable_func = {
133	.res_type = vmw_res_cotable,
134	.needs_guest_memory = true,
135	.may_evict = true,
136	.prio = 3,
137	.dirty_prio = 3,
138	.type_name = "context guest backed object tables",
139	.domain = VMW_BO_DOMAIN_MOB,
140	.busy_domain = VMW_BO_DOMAIN_MOB,
141	.create = vmw_cotable_create,
142	.destroy = vmw_cotable_destroy,
143	.bind = vmw_cotable_bind,
144	.unbind = vmw_cotable_unbind,
145};
146
147/**
148 * vmw_cotable - Convert a struct vmw_resource pointer to a struct
149 * vmw_cotable pointer
150 *
151 * @res: Pointer to the resource.
152 */
153static struct vmw_cotable *vmw_cotable(struct vmw_resource *res)
154{
155	return container_of(res, struct vmw_cotable, res);
156}
157
158/**
159 * vmw_cotable_destroy - Cotable resource destroy callback
160 *
161 * @res: Pointer to the cotable resource.
162 *
163 * There is no device cotable destroy command, so this function only
164 * makes sure that the resource id is set to invalid.
165 */
166static int vmw_cotable_destroy(struct vmw_resource *res)
167{
168	res->id = -1;
169	return 0;
170}
171
172/**
173 * vmw_cotable_unscrub - Undo a cotable unscrub operation
174 *
175 * @res: Pointer to the cotable resource
176 *
177 * This function issues commands to (re)bind the cotable to
178 * its backing mob, which needs to be validated and reserved at this point.
179 * This is identical to bind() except the function interface looks different.
180 */
181static int vmw_cotable_unscrub(struct vmw_resource *res)
182{
183	struct vmw_cotable *vcotbl = vmw_cotable(res);
184	struct vmw_private *dev_priv = res->dev_priv;
185	struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
186	struct {
187		SVGA3dCmdHeader header;
188		SVGA3dCmdDXSetCOTable body;
189	} *cmd;
190
191	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
192	dma_resv_assert_held(bo->base.resv);
193
194	cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
195	if (!cmd)
196		return -ENOMEM;
197
198	WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
199	WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
200	cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
201	cmd->header.size = sizeof(cmd->body);
202	cmd->body.cid = vcotbl->ctx->id;
203	cmd->body.type = vcotbl->type;
204	cmd->body.mobid = bo->resource->start;
205	cmd->body.validSizeInBytes = vcotbl->size_read_back;
206
207	vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
208	vcotbl->scrubbed = false;
209
210	return 0;
211}
212
213/**
214 * vmw_cotable_bind - Undo a cotable unscrub operation
215 *
216 * @res: Pointer to the cotable resource
217 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
218 * for convenience / fencing.
219 *
220 * This function issues commands to (re)bind the cotable to
221 * its backing mob, which needs to be validated and reserved at this point.
222 */
223static int vmw_cotable_bind(struct vmw_resource *res,
224			    struct ttm_validate_buffer *val_buf)
225{
226	/*
227	 * The create() callback may have changed @res->backup without
228	 * the caller noticing, and with val_buf->bo still pointing to
229	 * the old backup buffer. Although hackish, and not used currently,
230	 * take the opportunity to correct the value here so that it's not
231	 * misused in the future.
232	 */
233	val_buf->bo = &res->guest_memory_bo->tbo;
234
235	return vmw_cotable_unscrub(res);
236}
237
238/**
239 * vmw_cotable_scrub - Scrub the cotable from the device.
240 *
241 * @res: Pointer to the cotable resource.
242 * @readback: Whether initiate a readback of the cotable data to the backup
243 * buffer.
244 *
245 * In some situations (context swapouts) it might be desirable to make the
246 * device forget about the cotable without performing a full unbind. A full
247 * unbind requires reserved backup buffers and it might not be possible to
248 * reserve them due to locking order violation issues. The vmw_cotable_scrub
249 * function implements a partial unbind() without that requirement but with the
250 * following restrictions.
251 * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must
252 *    be called.
253 * 2) Before the cotable backing buffer is used by the CPU, or during the
254 *    resource destruction, vmw_cotable_unbind() must be called.
255 */
256int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
257{
258	struct vmw_cotable *vcotbl = vmw_cotable(res);
259	struct vmw_private *dev_priv = res->dev_priv;
260	size_t submit_size;
261
262	struct {
263		SVGA3dCmdHeader header;
264		SVGA3dCmdDXReadbackCOTable body;
265	} *cmd0;
266	struct {
267		SVGA3dCmdHeader header;
268		SVGA3dCmdDXSetCOTable body;
269	} *cmd1;
270
271	if (vcotbl->scrubbed)
272		return 0;
273
274	if (co_info[vcotbl->type].unbind_func)
275		co_info[vcotbl->type].unbind_func(dev_priv,
276						  &vcotbl->resource_list,
277						  readback);
278	submit_size = sizeof(*cmd1);
279	if (readback)
280		submit_size += sizeof(*cmd0);
281
282	cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
283	if (!cmd1)
284		return -ENOMEM;
285
286	vcotbl->size_read_back = 0;
287	if (readback) {
288		cmd0 = (void *) cmd1;
289		cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
290		cmd0->header.size = sizeof(cmd0->body);
291		cmd0->body.cid = vcotbl->ctx->id;
292		cmd0->body.type = vcotbl->type;
293		cmd1 = (void *) &cmd0[1];
294		vcotbl->size_read_back = res->guest_memory_size;
295	}
296	cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
297	cmd1->header.size = sizeof(cmd1->body);
298	cmd1->body.cid = vcotbl->ctx->id;
299	cmd1->body.type = vcotbl->type;
300	cmd1->body.mobid = SVGA3D_INVALID_ID;
301	cmd1->body.validSizeInBytes = 0;
302	vmw_cmd_commit_flush(dev_priv, submit_size);
303	vcotbl->scrubbed = true;
304
305	/* Trigger a create() on next validate. */
306	res->id = -1;
307
308	return 0;
309}
310
311/**
312 * vmw_cotable_unbind - Cotable resource unbind callback
313 *
314 * @res: Pointer to the cotable resource.
315 * @readback: Whether to read back cotable data to the backup buffer.
316 * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
317 * for convenience / fencing.
318 *
319 * Unbinds the cotable from the device and fences the backup buffer.
320 */
321static int vmw_cotable_unbind(struct vmw_resource *res,
322			      bool readback,
323			      struct ttm_validate_buffer *val_buf)
324{
325	struct vmw_cotable *vcotbl = vmw_cotable(res);
326	struct vmw_private *dev_priv = res->dev_priv;
327	struct ttm_buffer_object *bo = val_buf->bo;
328	struct vmw_fence_obj *fence;
329
330	if (!vmw_resource_mob_attached(res))
331		return 0;
332
333	WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
334	dma_resv_assert_held(bo->base.resv);
335
336	mutex_lock(&dev_priv->binding_mutex);
337	if (!vcotbl->scrubbed)
338		vmw_dx_context_scrub_cotables(vcotbl->ctx, readback);
339	mutex_unlock(&dev_priv->binding_mutex);
340	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
341	vmw_bo_fence_single(bo, fence);
342	if (likely(fence != NULL))
343		vmw_fence_obj_unreference(&fence);
344
345	return 0;
346}
347
348/**
349 * vmw_cotable_readback - Read back a cotable without unbinding.
350 *
351 * @res: The cotable resource.
352 *
353 * Reads back a cotable to its backing mob without scrubbing the MOB from
354 * the cotable. The MOB is fenced for subsequent CPU access.
355 */
356static int vmw_cotable_readback(struct vmw_resource *res)
357{
358	struct vmw_cotable *vcotbl = vmw_cotable(res);
359	struct vmw_private *dev_priv = res->dev_priv;
360
361	struct {
362		SVGA3dCmdHeader header;
363		SVGA3dCmdDXReadbackCOTable body;
364	} *cmd;
365	struct vmw_fence_obj *fence;
366
367	if (!vcotbl->scrubbed) {
368		cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
369		if (!cmd)
370			return -ENOMEM;
371
372		cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
373		cmd->header.size = sizeof(cmd->body);
374		cmd->body.cid = vcotbl->ctx->id;
375		cmd->body.type = vcotbl->type;
376		vcotbl->size_read_back = res->guest_memory_size;
377		vmw_cmd_commit(dev_priv, sizeof(*cmd));
378	}
379
380	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
381	vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
382	vmw_fence_obj_unreference(&fence);
383
384	return 0;
385}
386
387/**
388 * vmw_cotable_resize - Resize a cotable.
389 *
390 * @res: The cotable resource.
391 * @new_size: The new size.
392 *
393 * Resizes a cotable and binds the new backup buffer.
394 * On failure the cotable is left intact.
395 * Important! This function may not fail once the MOB switch has been
396 * committed to hardware. That would put the device context in an
397 * invalid state which we can't currently recover from.
398 */
399static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
400{
401	struct ttm_operation_ctx ctx = { false, false };
402	struct vmw_private *dev_priv = res->dev_priv;
403	struct vmw_cotable *vcotbl = vmw_cotable(res);
404	struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
405	struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
406	size_t old_size = res->guest_memory_size;
407	size_t old_size_read_back = vcotbl->size_read_back;
408	size_t cur_size_read_back;
409	struct ttm_bo_kmap_obj old_map, new_map;
410	int ret;
411	size_t i;
412	struct vmw_bo_params bo_params = {
413		.domain = VMW_BO_DOMAIN_MOB,
414		.busy_domain = VMW_BO_DOMAIN_MOB,
415		.bo_type = ttm_bo_type_device,
416		.size = new_size,
417		.pin = true
418	};
419
420	MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
421	MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
422
423	ret = vmw_cotable_readback(res);
424	if (ret)
425		goto out_done;
426
427	cur_size_read_back = vcotbl->size_read_back;
428	vcotbl->size_read_back = old_size_read_back;
429
430	/*
431	 * While device is processing, Allocate and reserve a buffer object
432	 * for the new COTable. Initially pin the buffer object to make sure
433	 * we can use tryreserve without failure.
434	 */
435	ret = vmw_gem_object_create(dev_priv, &bo_params, &buf);
 
 
 
 
 
436	if (ret) {
437		DRM_ERROR("Failed initializing new cotable MOB.\n");
438		goto out_done;
439	}
440
441	bo = &buf->tbo;
442	WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
443
444	ret = ttm_bo_wait(old_bo, false, false);
445	if (unlikely(ret != 0)) {
446		DRM_ERROR("Failed waiting for cotable unbind.\n");
447		goto out_wait;
448	}
449
450	/*
451	 * Do a page by page copy of COTables. This eliminates slow vmap()s.
452	 * This should really be a TTM utility.
453	 */
454	for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) {
455		bool dummy;
456
457		ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
458		if (unlikely(ret != 0)) {
459			DRM_ERROR("Failed mapping old COTable on resize.\n");
460			goto out_wait;
461		}
462		ret = ttm_bo_kmap(bo, i, 1, &new_map);
463		if (unlikely(ret != 0)) {
464			DRM_ERROR("Failed mapping new COTable on resize.\n");
465			goto out_map_new;
466		}
467		memcpy(ttm_kmap_obj_virtual(&new_map, &dummy),
468		       ttm_kmap_obj_virtual(&old_map, &dummy),
469		       PAGE_SIZE);
470		ttm_bo_kunmap(&new_map);
471		ttm_bo_kunmap(&old_map);
472	}
473
474	/* Unpin new buffer, and switch backup buffers. */
475	vmw_bo_placement_set(buf,
476			     VMW_BO_DOMAIN_MOB,
477			     VMW_BO_DOMAIN_MOB);
478	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
479	if (unlikely(ret != 0)) {
480		DRM_ERROR("Failed validating new COTable backup buffer.\n");
481		goto out_wait;
482	}
483
484	vmw_resource_mob_detach(res);
485	res->guest_memory_bo = buf;
486	res->guest_memory_size = new_size;
487	vcotbl->size_read_back = cur_size_read_back;
488
489	/*
490	 * Now tell the device to switch. If this fails, then we need to
491	 * revert the full resize.
492	 */
493	ret = vmw_cotable_unscrub(res);
494	if (ret) {
495		DRM_ERROR("Failed switching COTable backup buffer.\n");
496		res->guest_memory_bo = old_buf;
497		res->guest_memory_size = old_size;
498		vcotbl->size_read_back = old_size_read_back;
499		vmw_resource_mob_attach(res);
500		goto out_wait;
501	}
502
503	vmw_resource_mob_attach(res);
504	/* Let go of the old mob. */
505	vmw_user_bo_unref(&old_buf);
506	res->id = vcotbl->type;
507
508	ret = dma_resv_reserve_fences(bo->base.resv, 1);
509	if (unlikely(ret))
510		goto out_wait;
511
512	/* Release the pin acquired in vmw_bo_create */
513	ttm_bo_unpin(bo);
514
515	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
516
517	return 0;
518
519out_map_new:
520	ttm_bo_kunmap(&old_map);
521out_wait:
522	ttm_bo_unpin(bo);
523	ttm_bo_unreserve(bo);
524	vmw_user_bo_unref(&buf);
525
526out_done:
527	MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
528
529	return ret;
530}
531
532/**
533 * vmw_cotable_create - Cotable resource create callback
534 *
535 * @res: Pointer to a cotable resource.
536 *
537 * There is no separate create command for cotables, so this callback, which
538 * is called before bind() in the validation sequence is instead used for two
539 * things.
540 * 1) Unscrub the cotable if it is scrubbed and still attached to a backup
541 *    buffer.
542 * 2) Resize the cotable if needed.
543 */
544static int vmw_cotable_create(struct vmw_resource *res)
545{
546	struct vmw_cotable *vcotbl = vmw_cotable(res);
547	size_t new_size = res->guest_memory_size;
548	size_t needed_size;
549	int ret;
550
551	/* Check whether we need to resize the cotable */
552	needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size;
553	while (needed_size > new_size)
554		new_size *= 2;
555
556	if (likely(new_size <= res->guest_memory_size)) {
557		if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
558			ret = vmw_cotable_unscrub(res);
559			if (ret)
560				return ret;
561		}
562		res->id = vcotbl->type;
563		return 0;
564	}
565
566	return vmw_cotable_resize(res, new_size);
567}
568
569/**
570 * vmw_hw_cotable_destroy - Cotable hw_destroy callback
571 *
572 * @res: Pointer to a cotable resource.
573 *
574 * The final (part of resource destruction) destroy callback.
575 */
576static void vmw_hw_cotable_destroy(struct vmw_resource *res)
577{
578	(void) vmw_cotable_destroy(res);
579}
580
 
 
581/**
582 * vmw_cotable_free - Cotable resource destructor
583 *
584 * @res: Pointer to a cotable resource.
585 */
586static void vmw_cotable_free(struct vmw_resource *res)
587{
 
 
588	kfree(res);
 
589}
590
591/**
592 * vmw_cotable_alloc - Create a cotable resource
593 *
594 * @dev_priv: Pointer to a device private struct.
595 * @ctx: Pointer to the context resource.
596 * The cotable resource will not add a refcount.
597 * @type: The cotable type.
598 */
599struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
600				       struct vmw_resource *ctx,
601				       u32 type)
602{
603	struct vmw_cotable *vcotbl;
 
 
 
 
604	int ret;
605	u32 num_entries;
606
 
 
 
 
 
 
 
 
607	vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL);
608	if (unlikely(!vcotbl)) {
609		ret = -ENOMEM;
610		goto out_no_alloc;
611	}
612
613	ret = vmw_resource_init(dev_priv, &vcotbl->res, true,
614				vmw_cotable_free, &vmw_cotable_func);
615	if (unlikely(ret != 0))
616		goto out_no_init;
617
618	INIT_LIST_HEAD(&vcotbl->resource_list);
619	vcotbl->res.id = type;
620	vcotbl->res.guest_memory_size = PAGE_SIZE;
621	num_entries = PAGE_SIZE / co_info[type].size;
622	if (num_entries < co_info[type].min_initial_entries) {
623		vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
624			co_info[type].size;
625		vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
 
626	}
627
628	vcotbl->scrubbed = true;
629	vcotbl->seen_entries = -1;
630	vcotbl->type = type;
631	vcotbl->ctx = ctx;
632
633	vcotbl->res.hw_destroy = vmw_hw_cotable_destroy;
634
635	return &vcotbl->res;
636
637out_no_init:
638	kfree(vcotbl);
639out_no_alloc:
 
640	return ERR_PTR(ret);
641}
642
643/**
644 * vmw_cotable_notify - Notify the cotable about an item creation
645 *
646 * @res: Pointer to a cotable resource.
647 * @id: Item id.
648 */
649int vmw_cotable_notify(struct vmw_resource *res, int id)
650{
651	struct vmw_cotable *vcotbl = vmw_cotable(res);
652
653	if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) {
654		DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n",
655			  (unsigned) vcotbl->type, id);
656		return -EINVAL;
657	}
658
659	if (vcotbl->seen_entries < id) {
660		/* Trigger a call to create() on next validate */
661		res->id = -1;
662		vcotbl->seen_entries = id;
663	}
664
665	return 0;
666}
667
668/**
669 * vmw_cotable_add_resource - add a view to the cotable's list of active views.
670 *
671 * @res: pointer struct vmw_resource representing the cotable.
672 * @head: pointer to the struct list_head member of the resource, dedicated
673 * to the cotable active resource list.
674 */
675void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head)
676{
677	struct vmw_cotable *vcotbl =
678		container_of(res, struct vmw_cotable, res);
679
680	list_add_tail(head, &vcotbl->resource_list);
681}