Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
  5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include "vmwgfx_bo.h"
 
 30#include "vmwgfx_drv.h"
 31#include "vmwgfx_resource_priv.h"
 32
 33#include <drm/ttm/ttm_placement.h>
 34
 35static void vmw_bo_release(struct vmw_bo *vbo)
 36{
 37	struct vmw_resource *res;
 
 
 
 
 
 
 
 38
 39	WARN_ON(vbo->tbo.base.funcs &&
 40		kref_read(&vbo->tbo.base.refcount) != 0);
 41	vmw_bo_unmap(vbo);
 42
 43	xa_destroy(&vbo->detached_resources);
 44	WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
 45	if (vbo->is_dumb && vbo->dumb_surface) {
 46		res = &vbo->dumb_surface->res;
 47		WARN_ON(vbo != res->guest_memory_bo);
 48		WARN_ON(!res->guest_memory_bo);
 49		if (res->guest_memory_bo) {
 50			/* Reserve and switch the backing mob. */
 51			mutex_lock(&res->dev_priv->cmdbuf_mutex);
 52			(void)vmw_resource_reserve(res, false, true);
 53			vmw_resource_mob_detach(res);
 54			if (res->coherent)
 55				vmw_bo_dirty_release(res->guest_memory_bo);
 56			res->guest_memory_bo = NULL;
 57			res->guest_memory_offset = 0;
 58			vmw_resource_unreserve(res, false, false, false, NULL,
 59					       0);
 60			mutex_unlock(&res->dev_priv->cmdbuf_mutex);
 61		}
 62		vmw_surface_unreference(&vbo->dumb_surface);
 63	}
 64	drm_gem_object_release(&vbo->tbo.base);
 65}
 66
 
 67/**
 68 * vmw_bo_free - vmw_bo destructor
 
 69 *
 70 * @bo: Pointer to the embedded struct ttm_buffer_object
 
 
 71 */
 72static void vmw_bo_free(struct ttm_buffer_object *bo)
 
 73{
 74	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
 75
 76	WARN_ON(vbo->dirty);
 77	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
 78	vmw_bo_release(vbo);
 79	kfree(vbo);
 80}
 81
 
 82/**
 83 * vmw_bo_pin_in_placement - Validate a buffer to placement.
 84 *
 85 * @dev_priv:  Driver private.
 86 * @buf:  DMA buffer to move.
 87 * @placement:  The placement to pin it.
 88 * @interruptible:  Use interruptible wait.
 89 * Return: Zero on success, Negative error code on failure. In particular
 90 * -ERESTARTSYS if interrupted by a signal
 91 */
 92static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
 93				   struct vmw_bo *buf,
 94				   struct ttm_placement *placement,
 95				   bool interruptible)
 96{
 97	struct ttm_operation_ctx ctx = {interruptible, false };
 98	struct ttm_buffer_object *bo = &buf->tbo;
 99	int ret;
 
 
 
 
 
100
101	vmw_execbuf_release_pinned_bo(dev_priv);
102
103	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
104	if (unlikely(ret != 0))
105		goto err;
106
107	ret = ttm_bo_validate(bo, placement, &ctx);
 
 
 
 
 
108	if (!ret)
109		vmw_bo_pin_reserved(buf, true);
110
111	ttm_bo_unreserve(bo);
 
112err:
 
113	return ret;
114}
115
116
117/**
118 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
119 *
120 * This function takes the reservation_sem in write mode.
121 * Flushes and unpins the query bo to avoid failures.
122 *
123 * @dev_priv:  Driver private.
124 * @buf:  DMA buffer to move.
 
125 * @interruptible:  Use interruptible wait.
126 * Return: Zero on success, Negative error code on failure. In particular
127 * -ERESTARTSYS if interrupted by a signal
128 */
129int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
130			      struct vmw_bo *buf,
131			      bool interruptible)
132{
133	struct ttm_operation_ctx ctx = {interruptible, false };
134	struct ttm_buffer_object *bo = &buf->tbo;
135	int ret;
 
 
 
 
 
136
137	vmw_execbuf_release_pinned_bo(dev_priv);
138
139	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
140	if (unlikely(ret != 0))
141		goto err;
142
143	vmw_bo_placement_set(buf,
144			     VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
145			     VMW_BO_DOMAIN_GMR);
146	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
 
 
 
147	if (likely(ret == 0) || ret == -ERESTARTSYS)
148		goto out_unreserve;
149
150	vmw_bo_placement_set(buf,
151			     VMW_BO_DOMAIN_VRAM,
152			     VMW_BO_DOMAIN_VRAM);
153	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
154
155out_unreserve:
156	if (!ret)
157		vmw_bo_pin_reserved(buf, true);
158
159	ttm_bo_unreserve(bo);
160err:
 
161	return ret;
162}
163
164
165/**
166 * vmw_bo_pin_in_vram - Move a buffer to vram.
167 *
168 * This function takes the reservation_sem in write mode.
169 * Flushes and unpins the query bo to avoid failures.
170 *
171 * @dev_priv:  Driver private.
172 * @buf:  DMA buffer to move.
173 * @interruptible:  Use interruptible wait.
174 * Return: Zero on success, Negative error code on failure. In particular
175 * -ERESTARTSYS if interrupted by a signal
176 */
177int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
178		       struct vmw_bo *buf,
179		       bool interruptible)
180{
181	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
182				       interruptible);
183}
184
185
186/**
187 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
188 *
189 * This function takes the reservation_sem in write mode.
190 * Flushes and unpins the query bo to avoid failures.
191 *
192 * @dev_priv:  Driver private.
193 * @buf:  DMA buffer to pin.
194 * @interruptible:  Use interruptible wait.
195 * Return: Zero on success, Negative error code on failure. In particular
196 * -ERESTARTSYS if interrupted by a signal
197 */
198int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
199				struct vmw_bo *buf,
200				bool interruptible)
201{
202	struct ttm_operation_ctx ctx = {interruptible, false };
203	struct ttm_buffer_object *bo = &buf->tbo;
 
 
204	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
205
206	vmw_execbuf_release_pinned_bo(dev_priv);
207	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
208	if (unlikely(ret != 0))
209		goto err_unlock;
210
211	/*
212	 * Is this buffer already in vram but not at the start of it?
213	 * In that case, evict it first because TTM isn't good at handling
214	 * that situation.
215	 */
216	if (bo->resource->mem_type == TTM_PL_VRAM &&
217	    bo->resource->start < PFN_UP(bo->resource->size) &&
218	    bo->resource->start > 0 &&
219	    buf->tbo.pin_count == 0) {
220		ctx.interruptible = false;
221		vmw_bo_placement_set(buf,
222				     VMW_BO_DOMAIN_SYS,
223				     VMW_BO_DOMAIN_SYS);
224		(void)ttm_bo_validate(bo, &buf->placement, &ctx);
225	}
226
227	vmw_bo_placement_set(buf,
228			     VMW_BO_DOMAIN_VRAM,
229			     VMW_BO_DOMAIN_VRAM);
230	buf->places[0].lpfn = PFN_UP(bo->resource->size);
231	ret = ttm_bo_validate(bo, &buf->placement, &ctx);
232
233	/* For some reason we didn't end up at the start of vram */
234	WARN_ON(ret == 0 && bo->resource->start != 0);
235	if (!ret)
236		vmw_bo_pin_reserved(buf, true);
237
238	ttm_bo_unreserve(bo);
239err_unlock:
 
240
241	return ret;
242}
243
244
245/**
246 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
247 *
248 * This function takes the reservation_sem in write mode.
249 *
250 * @dev_priv:  Driver private.
251 * @buf:  DMA buffer to unpin.
252 * @interruptible:  Use interruptible wait.
253 * Return: Zero on success, Negative error code on failure. In particular
254 * -ERESTARTSYS if interrupted by a signal
255 */
256int vmw_bo_unpin(struct vmw_private *dev_priv,
257		 struct vmw_bo *buf,
258		 bool interruptible)
259{
260	struct ttm_buffer_object *bo = &buf->tbo;
261	int ret;
262
 
 
 
 
263	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
264	if (unlikely(ret != 0))
265		goto err;
266
267	vmw_bo_pin_reserved(buf, false);
268
269	ttm_bo_unreserve(bo);
270
271err:
 
272	return ret;
273}
274
275/**
276 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
277 * of a buffer.
278 *
279 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
280 * @ptr: SVGAGuestPtr returning the result.
281 */
282void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
283			  SVGAGuestPtr *ptr)
284{
285	if (bo->resource->mem_type == TTM_PL_VRAM) {
286		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
287		ptr->offset = bo->resource->start << PAGE_SHIFT;
288	} else {
289		ptr->gmrId = bo->resource->start;
290		ptr->offset = 0;
291	}
292}
293
294
295/**
296 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
297 *
298 * @vbo: The buffer object. Must be reserved.
299 * @pin: Whether to pin or unpin.
300 *
301 */
302void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
303{
304	struct ttm_operation_ctx ctx = { false, true };
305	struct ttm_place pl;
306	struct ttm_placement placement;
307	struct ttm_buffer_object *bo = &vbo->tbo;
308	uint32_t old_mem_type = bo->resource->mem_type;
309	int ret;
310
311	dma_resv_assert_held(bo->base.resv);
312
313	if (pin == !!bo->pin_count)
314		return;
 
 
 
 
 
 
315
316	pl.fpfn = 0;
317	pl.lpfn = 0;
318	pl.mem_type = bo->resource->mem_type;
319	pl.flags = bo->resource->placement;
 
 
320
321	memset(&placement, 0, sizeof(placement));
322	placement.num_placement = 1;
323	placement.placement = &pl;
324
325	ret = ttm_bo_validate(bo, &placement, &ctx);
326
327	BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
328
329	if (pin)
330		ttm_bo_pin(bo);
331	else
332		ttm_bo_unpin(bo);
333}
334
 
335/**
336 * vmw_bo_map_and_cache - Map a buffer object and cache the map
337 *
338 * @vbo: The buffer object to map
339 * Return: A kernel virtual address or NULL if mapping failed.
340 *
341 * This function maps a buffer object into the kernel address space, or
342 * returns the virtual kernel address of an already existing map. The virtual
343 * address remains valid as long as the buffer object is pinned or reserved.
344 * The cached map is torn down on either
345 * 1) Buffer object move
346 * 2) Buffer object swapout
347 * 3) Buffer object destruction
348 *
349 */
350void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
351{
352	return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
353}
354
355void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
356{
357	struct ttm_buffer_object *bo = &vbo->tbo;
358	bool not_used;
359	void *virtual;
360	int ret;
361
362	atomic_inc(&vbo->map_count);
363
364	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
365	if (virtual)
366		return virtual;
367
368	ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
369	if (ret)
370		DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
371			  ret, bo->base.size, size);
372
373	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
374}
375
376
377/**
378 * vmw_bo_unmap - Tear down a cached buffer object map.
379 *
380 * @vbo: The buffer object whose map we are tearing down.
381 *
382 * This function tears down a cached map set up using
383 * vmw_bo_map_and_cache().
384 */
385void vmw_bo_unmap(struct vmw_bo *vbo)
386{
387	int map_count;
388
389	if (vbo->map.bo == NULL)
390		return;
391
392	map_count = atomic_dec_return(&vbo->map_count);
 
 
393
394	if (!map_count) {
395		ttm_bo_kunmap(&vbo->map);
396		vbo->map.bo = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
398}
399
400
401/**
402 * vmw_bo_init - Initialize a vmw buffer object
403 *
404 * @dev_priv: Pointer to the device private struct
405 * @vmw_bo: Buffer object to initialize
406 * @params: Parameters used to initialize the buffer object
407 * @destroy: The function used to delete the buffer object
 
 
408 * Returns: Zero on success, negative error code on error.
409 *
 
410 */
411static int vmw_bo_init(struct vmw_private *dev_priv,
412		       struct vmw_bo *vmw_bo,
413		       struct vmw_bo_params *params,
414		       void (*destroy)(struct ttm_buffer_object *))
415{
416	struct ttm_operation_ctx ctx = {
417		.interruptible = params->bo_type != ttm_bo_type_kernel,
418		.no_wait_gpu = false,
419		.resv = params->resv,
420	};
421	struct ttm_device *bdev = &dev_priv->bdev;
422	struct drm_device *vdev = &dev_priv->drm;
423	int ret;
 
424
425	memset(vmw_bo, 0, sizeof(*vmw_bo));
426
 
 
427	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
428	vmw_bo->tbo.priority = 3;
429	vmw_bo->res_tree = RB_ROOT;
430	xa_init(&vmw_bo->detached_resources);
431	atomic_set(&vmw_bo->map_count, 0);
432
433	params->size = ALIGN(params->size, PAGE_SIZE);
434	drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
 
 
 
 
435
436	vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
437	ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
438				   &vmw_bo->placement, 0, &ctx,
439				   params->sg, params->resv, destroy);
440	if (unlikely(ret))
441		return ret;
442
443	if (params->pin)
444		ttm_bo_pin(&vmw_bo->tbo);
445	if (!params->keep_resv)
446		ttm_bo_unreserve(&vmw_bo->tbo);
 
 
 
 
 
 
 
 
 
 
 
447
448	return 0;
 
 
 
 
 
449}
450
451int vmw_bo_create(struct vmw_private *vmw,
452		  struct vmw_bo_params *params,
453		  struct vmw_bo **p_bo)
 
 
 
 
 
 
 
 
 
 
454{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455	int ret;
456
457	*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
458	if (unlikely(!*p_bo)) {
459		DRM_ERROR("Failed to allocate a buffer.\n");
460		return -ENOMEM;
461	}
462
463	/*
464	 * vmw_bo_init will delete the *p_bo object if it fails
465	 */
466	ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
 
467	if (unlikely(ret != 0))
468		goto out_error;
469
470	return ret;
471out_error:
472	*p_bo = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473	return ret;
474}
475
 
476/**
477 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478 * access, idling previous GPU operations on the buffer and optionally
479 * blocking it for further command submissions.
480 *
481 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
 
482 * @flags: Flags indicating how the grab should be performed.
483 * Return: Zero on success, Negative error code on error. In particular,
484 * -EBUSY will be returned if a dontblock operation is requested and the
485 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
486 * interrupted by a signal.
487 *
488 * A blocking grab will be automatically released when @tfile is closed.
489 */
490static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
 
491				    uint32_t flags)
492{
493	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
494	struct ttm_buffer_object *bo = &vmw_bo->tbo;
 
495	int ret;
496
497	if (flags & drm_vmw_synccpu_allow_cs) {
498		long lret;
499
500		lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
501					     true, nonblock ? 0 :
502					     MAX_SCHEDULE_TIMEOUT);
503		if (!lret)
504			return -EBUSY;
505		else if (lret < 0)
506			return lret;
507		return 0;
508	}
509
510	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
511	if (unlikely(ret != 0))
512		return ret;
513
514	ret = ttm_bo_wait(bo, true, nonblock);
515	if (likely(ret == 0))
516		atomic_inc(&vmw_bo->cpu_writers);
517
518	ttm_bo_unreserve(bo);
519	if (unlikely(ret != 0))
520		return ret;
521
 
 
 
 
 
522	return ret;
523}
524
525/**
526 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
527 * and unblock command submission on the buffer if blocked.
528 *
529 * @filp: Identifying the caller.
530 * @handle: Handle identifying the buffer object.
 
531 * @flags: Flags indicating the type of release.
532 */
533static int vmw_user_bo_synccpu_release(struct drm_file *filp,
534				       uint32_t handle,
535				       uint32_t flags)
536{
537	struct vmw_bo *vmw_bo;
538	int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
539
540	if (!ret) {
541		if (!(flags & drm_vmw_synccpu_allow_cs)) {
542			atomic_dec(&vmw_bo->cpu_writers);
543		}
544		vmw_user_bo_unref(&vmw_bo);
545	}
546
547	return ret;
548}
549
550
551/**
552 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
553 * functionality.
554 *
555 * @dev: Identifies the drm device.
556 * @data: Pointer to the ioctl argument.
557 * @file_priv: Identifies the caller.
558 * Return: Zero on success, negative error code on error.
559 *
560 * This function checks the ioctl arguments for validity and calls the
561 * relevant synccpu functions.
562 */
563int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
564			      struct drm_file *file_priv)
565{
566	struct drm_vmw_synccpu_arg *arg =
567		(struct drm_vmw_synccpu_arg *) data;
568	struct vmw_bo *vbo;
 
 
 
569	int ret;
570
571	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
572	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
573			       drm_vmw_synccpu_dontblock |
574			       drm_vmw_synccpu_allow_cs)) != 0) {
575		DRM_ERROR("Illegal synccpu flags.\n");
576		return -EINVAL;
577	}
578
579	switch (arg->op) {
580	case drm_vmw_synccpu_grab:
581		ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
 
582		if (unlikely(ret != 0))
583			return ret;
584
585		ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
586		vmw_user_bo_unref(&vbo);
587		if (unlikely(ret != 0)) {
588			if (ret == -ERESTARTSYS || ret == -EBUSY)
589				return -EBUSY;
 
 
590			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
591				  (unsigned int) arg->handle);
592			return ret;
593		}
594		break;
595	case drm_vmw_synccpu_release:
596		ret = vmw_user_bo_synccpu_release(file_priv,
597						  arg->handle,
598						  arg->flags);
599		if (unlikely(ret != 0)) {
600			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
601				  (unsigned int) arg->handle);
602			return ret;
603		}
604		break;
605	default:
606		DRM_ERROR("Invalid synccpu operation.\n");
607		return -EINVAL;
608	}
609
610	return 0;
611}
612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613/**
614 * vmw_bo_unref_ioctl - Generic handle close ioctl.
615 *
616 * @dev: Identifies the drm device.
617 * @data: Pointer to the ioctl argument.
618 * @file_priv: Identifies the caller.
619 * Return: Zero on success, negative error code on error.
620 *
621 * This function checks the ioctl arguments for validity and closes a
622 * handle to a TTM base object, optionally freeing the object.
623 */
624int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
625		       struct drm_file *file_priv)
626{
627	struct drm_vmw_unref_dmabuf_arg *arg =
628	    (struct drm_vmw_unref_dmabuf_arg *)data;
629
630	return drm_gem_handle_delete(file_priv, arg->handle);
 
 
631}
632
633
634/**
635 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
636 *
637 * @filp: The file the handle is registered with.
638 * @handle: The user buffer object handle
639 * @out: Pointer to a where a pointer to the embedded
640 * struct vmw_bo should be placed.
 
 
641 * Return: Zero on success, Negative error code on error.
642 *
643 * The vmw buffer object pointer will be refcounted (both ttm and gem)
 
644 */
645int vmw_user_bo_lookup(struct drm_file *filp,
646		       u32 handle,
647		       struct vmw_bo **out)
648{
649	struct drm_gem_object *gobj;
 
650
651	gobj = drm_gem_object_lookup(filp, handle);
652	if (!gobj) {
653		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
654			  (unsigned long)handle);
655		return -ESRCH;
656	}
657
658	*out = to_vmw_bo(gobj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
659
660	return 0;
661}
662
663/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
665 *                       object without unreserving it.
666 *
667 * @bo:             Pointer to the struct ttm_buffer_object to fence.
668 * @fence:          Pointer to the fence. If NULL, this function will
669 *                  insert a fence into the command stream..
670 *
671 * Contrary to the ttm_eu version of this function, it takes only
672 * a single buffer object instead of a list, and it also doesn't
673 * unreserve the buffer object, which needs to be done separately.
674 */
675void vmw_bo_fence_single(struct ttm_buffer_object *bo,
676			 struct vmw_fence_obj *fence)
677{
678	struct ttm_device *bdev = bo->bdev;
679	struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
680	int ret;
681
682	if (fence == NULL)
 
 
 
683		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
684	else
685		dma_fence_get(&fence->base);
 
 
 
686
687	ret = dma_resv_reserve_fences(bo->base.resv, 1);
688	if (!ret)
689		dma_resv_add_fence(bo->base.resv, &fence->base,
690				   DMA_RESV_USAGE_KERNEL);
691	else
692		/* Last resort fallback when we are OOM */
693		dma_fence_wait(&fence->base, false);
694	dma_fence_put(&fence->base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
695}
696
 
697/**
698 * vmw_bo_swap_notify - swapout notify callback.
699 *
700 * @bo: The buffer object to be swapped out.
701 */
702void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
703{
 
 
 
 
 
704	/* Kill any cached kernel maps before swapout */
705	vmw_bo_unmap(to_vmw_bo(&bo->base));
706}
707
708
709/**
710 * vmw_bo_move_notify - TTM move_notify_callback
711 *
712 * @bo: The TTM buffer object about to move.
713 * @mem: The struct ttm_resource indicating to what memory
714 *       region the move is taking place.
715 *
716 * Detaches cached maps and device bindings that require that the
717 * buffer doesn't move.
718 */
719void vmw_bo_move_notify(struct ttm_buffer_object *bo,
720			struct ttm_resource *mem)
721{
722	struct vmw_bo *vbo = to_vmw_bo(&bo->base);
 
 
 
 
 
 
 
 
 
 
723
724	/*
725	 * Kill any cached kernel maps before move to or from VRAM.
726	 * With other types of moves, the underlying pages stay the same,
727	 * and the map can be kept.
728	 */
729	if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
730		vmw_bo_unmap(vbo);
731
732	/*
733	 * If we're moving a backup MOB out of MOB placement, then make sure we
734	 * read back all resource content first, and unbind the MOB from
735	 * the resource.
736	 */
737	if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
738		vmw_resource_unbind_list(vbo);
739}
740
741static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
742{
743	if (desired & fallback & domain)
744		return 0;
745
746	if (desired & domain)
747		return TTM_PL_FLAG_DESIRED;
748
749	return TTM_PL_FLAG_FALLBACK;
750}
751
752static u32
753set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
754{
755	u32 domain = desired | fallback;
756	u32 n = 0;
757
758	/*
759	 * The placements are ordered according to our preferences
760	 */
761	if (domain & VMW_BO_DOMAIN_MOB) {
762		pl[n].mem_type = VMW_PL_MOB;
763		pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
764					      fallback);
765		pl[n].fpfn = 0;
766		pl[n].lpfn = 0;
767		n++;
768	}
769	if (domain & VMW_BO_DOMAIN_GMR) {
770		pl[n].mem_type = VMW_PL_GMR;
771		pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
772					      fallback);
773		pl[n].fpfn = 0;
774		pl[n].lpfn = 0;
775		n++;
776	}
777	if (domain & VMW_BO_DOMAIN_VRAM) {
778		pl[n].mem_type = TTM_PL_VRAM;
779		pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
780					      fallback);
781		pl[n].fpfn = 0;
782		pl[n].lpfn = 0;
783		n++;
784	}
785	if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
786		pl[n].mem_type = VMW_PL_SYSTEM;
787		pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
788					      desired, fallback);
789		pl[n].fpfn = 0;
790		pl[n].lpfn = 0;
791		n++;
792	}
793	if (domain & VMW_BO_DOMAIN_SYS) {
794		pl[n].mem_type = TTM_PL_SYSTEM;
795		pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
796					      fallback);
797		pl[n].fpfn = 0;
798		pl[n].lpfn = 0;
799		n++;
800	}
801
802	WARN_ON(!n);
803	if (!n) {
804		pl[n].mem_type = TTM_PL_SYSTEM;
805		pl[n].flags = 0;
806		pl[n].fpfn = 0;
807		pl[n].lpfn = 0;
808		n++;
809	}
810	return n;
811}
812
813void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
814{
815	struct ttm_device *bdev = bo->tbo.bdev;
816	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
817	struct ttm_placement *pl = &bo->placement;
818	bool mem_compatible = false;
819	u32 i;
820
821	pl->placement = bo->places;
822	pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
823
824	if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
825		for (i = 0; i < pl->num_placement; ++i) {
826			if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
827			    bo->tbo.resource->mem_type == pl->placement[i].mem_type)
828				mem_compatible = true;
829		}
830		if (!mem_compatible)
831			drm_warn(&vmw->drm,
832				 "%s: Incompatible transition from "
833				 "bo->base.resource->mem_type = %u to domain = %u\n",
834				 __func__, bo->tbo.resource->mem_type, domain);
835	}
836
837}
838
839void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
840{
841	struct ttm_device *bdev = bo->tbo.bdev;
842	struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
843	u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
844
845	if (vmw->has_mob)
846		domain = VMW_BO_DOMAIN_MOB;
847
848	vmw_bo_placement_set(bo, domain, domain);
849}
850
851void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
852{
853	xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
854}
855
856void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
857{
858	xa_erase(&vbo->detached_resources, (unsigned long)res);
859}
860
861struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
862{
863	unsigned long index;
864	struct vmw_resource *res = NULL;
865	struct vmw_surface *surf = NULL;
866	struct rb_node *rb_itr = vbo->res_tree.rb_node;
867
868	if (vbo->is_dumb && vbo->dumb_surface) {
869		res = &vbo->dumb_surface->res;
870		goto out;
871	}
872
873	xa_for_each(&vbo->detached_resources, index, res) {
874		if (res->func->res_type == vmw_res_surface)
875			goto out;
876	}
877
878	for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
879	     rb_itr = rb_next(rb_itr)) {
880		res = rb_entry(rb_itr, struct vmw_resource, mob_node);
881		if (res->func->res_type == vmw_res_surface)
882			goto out;
883	}
884
885out:
886	if (res)
887		surf = vmw_res_to_srf(res);
888	return surf;
889}
v5.9
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
   5 * All Rights Reserved.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a
   8 * copy of this software and associated documentation files (the
   9 * "Software"), to deal in the Software without restriction, including
  10 * without limitation the rights to use, copy, modify, merge, publish,
  11 * distribute, sub license, and/or sell copies of the Software, and to
  12 * permit persons to whom the Software is furnished to do so, subject to
  13 * the following conditions:
  14 *
  15 * The above copyright notice and this permission notice (including the
  16 * next paragraph) shall be included in all copies or substantial portions
  17 * of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26 *
  27 **************************************************************************/
  28
  29#include <drm/ttm/ttm_placement.h>
  30
  31#include "vmwgfx_drv.h"
  32#include "ttm_object.h"
  33
 
  34
  35/**
  36 * struct vmw_user_buffer_object - User-space-visible buffer object
  37 *
  38 * @prime: The prime object providing user visibility.
  39 * @vbo: The struct vmw_buffer_object
  40 */
  41struct vmw_user_buffer_object {
  42	struct ttm_prime_object prime;
  43	struct vmw_buffer_object vbo;
  44};
  45
 
 
 
  46
  47/**
  48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
  49 * vmw_buffer_object.
  50 *
  51 * @bo: Pointer to the TTM buffer object.
  52 * Return: Pointer to the struct vmw_buffer_object embedding the
  53 * TTM buffer object.
  54 */
  55static struct vmw_buffer_object *
  56vmw_buffer_object(struct ttm_buffer_object *bo)
  57{
  58	return container_of(bo, struct vmw_buffer_object, base);
 
 
 
 
 
 
 
 
 
 
  59}
  60
  61
  62/**
  63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
  64 * vmw_user_buffer_object.
  65 *
  66 * @bo: Pointer to the TTM buffer object.
  67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
  68 * object.
  69 */
  70static struct vmw_user_buffer_object *
  71vmw_user_buffer_object(struct ttm_buffer_object *bo)
  72{
  73	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
  74
  75	return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
 
 
 
  76}
  77
  78
  79/**
  80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
  81 *
  82 * @dev_priv:  Driver private.
  83 * @buf:  DMA buffer to move.
  84 * @placement:  The placement to pin it.
  85 * @interruptible:  Use interruptible wait.
  86 * Return: Zero on success, Negative error code on failure. In particular
  87 * -ERESTARTSYS if interrupted by a signal
  88 */
  89int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
  90			    struct vmw_buffer_object *buf,
  91			    struct ttm_placement *placement,
  92			    bool interruptible)
  93{
  94	struct ttm_operation_ctx ctx = {interruptible, false };
  95	struct ttm_buffer_object *bo = &buf->base;
  96	int ret;
  97	uint32_t new_flags;
  98
  99	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 100	if (unlikely(ret != 0))
 101		return ret;
 102
 103	vmw_execbuf_release_pinned_bo(dev_priv);
 104
 105	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 106	if (unlikely(ret != 0))
 107		goto err;
 108
 109	if (buf->pin_count > 0)
 110		ret = ttm_bo_mem_compat(placement, &bo->mem,
 111					&new_flags) == true ? 0 : -EINVAL;
 112	else
 113		ret = ttm_bo_validate(bo, placement, &ctx);
 114
 115	if (!ret)
 116		vmw_bo_pin_reserved(buf, true);
 117
 118	ttm_bo_unreserve(bo);
 119
 120err:
 121	ttm_write_unlock(&dev_priv->reservation_sem);
 122	return ret;
 123}
 124
 125
 126/**
 127 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
 128 *
 129 * This function takes the reservation_sem in write mode.
 130 * Flushes and unpins the query bo to avoid failures.
 131 *
 132 * @dev_priv:  Driver private.
 133 * @buf:  DMA buffer to move.
 134 * @pin:  Pin buffer if true.
 135 * @interruptible:  Use interruptible wait.
 136 * Return: Zero on success, Negative error code on failure. In particular
 137 * -ERESTARTSYS if interrupted by a signal
 138 */
 139int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 140			      struct vmw_buffer_object *buf,
 141			      bool interruptible)
 142{
 143	struct ttm_operation_ctx ctx = {interruptible, false };
 144	struct ttm_buffer_object *bo = &buf->base;
 145	int ret;
 146	uint32_t new_flags;
 147
 148	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 149	if (unlikely(ret != 0))
 150		return ret;
 151
 152	vmw_execbuf_release_pinned_bo(dev_priv);
 153
 154	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 155	if (unlikely(ret != 0))
 156		goto err;
 157
 158	if (buf->pin_count > 0) {
 159		ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
 160					&new_flags) == true ? 0 : -EINVAL;
 161		goto out_unreserve;
 162	}
 163
 164	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
 165	if (likely(ret == 0) || ret == -ERESTARTSYS)
 166		goto out_unreserve;
 167
 168	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 
 
 
 169
 170out_unreserve:
 171	if (!ret)
 172		vmw_bo_pin_reserved(buf, true);
 173
 174	ttm_bo_unreserve(bo);
 175err:
 176	ttm_write_unlock(&dev_priv->reservation_sem);
 177	return ret;
 178}
 179
 180
 181/**
 182 * vmw_bo_pin_in_vram - Move a buffer to vram.
 183 *
 184 * This function takes the reservation_sem in write mode.
 185 * Flushes and unpins the query bo to avoid failures.
 186 *
 187 * @dev_priv:  Driver private.
 188 * @buf:  DMA buffer to move.
 189 * @interruptible:  Use interruptible wait.
 190 * Return: Zero on success, Negative error code on failure. In particular
 191 * -ERESTARTSYS if interrupted by a signal
 192 */
 193int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
 194		       struct vmw_buffer_object *buf,
 195		       bool interruptible)
 196{
 197	return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
 198				       interruptible);
 199}
 200
 201
 202/**
 203 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
 204 *
 205 * This function takes the reservation_sem in write mode.
 206 * Flushes and unpins the query bo to avoid failures.
 207 *
 208 * @dev_priv:  Driver private.
 209 * @buf:  DMA buffer to pin.
 210 * @interruptible:  Use interruptible wait.
 211 * Return: Zero on success, Negative error code on failure. In particular
 212 * -ERESTARTSYS if interrupted by a signal
 213 */
 214int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
 215				struct vmw_buffer_object *buf,
 216				bool interruptible)
 217{
 218	struct ttm_operation_ctx ctx = {interruptible, false };
 219	struct ttm_buffer_object *bo = &buf->base;
 220	struct ttm_placement placement;
 221	struct ttm_place place;
 222	int ret = 0;
 223	uint32_t new_flags;
 224
 225	place = vmw_vram_placement.placement[0];
 226	place.lpfn = bo->num_pages;
 227	placement.num_placement = 1;
 228	placement.placement = &place;
 229	placement.num_busy_placement = 1;
 230	placement.busy_placement = &place;
 231
 232	ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
 233	if (unlikely(ret != 0))
 234		return ret;
 235
 236	vmw_execbuf_release_pinned_bo(dev_priv);
 237	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 238	if (unlikely(ret != 0))
 239		goto err_unlock;
 240
 241	/*
 242	 * Is this buffer already in vram but not at the start of it?
 243	 * In that case, evict it first because TTM isn't good at handling
 244	 * that situation.
 245	 */
 246	if (bo->mem.mem_type == TTM_PL_VRAM &&
 247	    bo->mem.start < bo->num_pages &&
 248	    bo->mem.start > 0 &&
 249	    buf->pin_count == 0) {
 250		ctx.interruptible = false;
 251		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
 
 
 
 252	}
 253
 254	if (buf->pin_count > 0)
 255		ret = ttm_bo_mem_compat(&placement, &bo->mem,
 256					&new_flags) == true ? 0 : -EINVAL;
 257	else
 258		ret = ttm_bo_validate(bo, &placement, &ctx);
 259
 260	/* For some reason we didn't end up at the start of vram */
 261	WARN_ON(ret == 0 && bo->mem.start != 0);
 262	if (!ret)
 263		vmw_bo_pin_reserved(buf, true);
 264
 265	ttm_bo_unreserve(bo);
 266err_unlock:
 267	ttm_write_unlock(&dev_priv->reservation_sem);
 268
 269	return ret;
 270}
 271
 272
 273/**
 274 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
 275 *
 276 * This function takes the reservation_sem in write mode.
 277 *
 278 * @dev_priv:  Driver private.
 279 * @buf:  DMA buffer to unpin.
 280 * @interruptible:  Use interruptible wait.
 281 * Return: Zero on success, Negative error code on failure. In particular
 282 * -ERESTARTSYS if interrupted by a signal
 283 */
 284int vmw_bo_unpin(struct vmw_private *dev_priv,
 285		 struct vmw_buffer_object *buf,
 286		 bool interruptible)
 287{
 288	struct ttm_buffer_object *bo = &buf->base;
 289	int ret;
 290
 291	ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
 292	if (unlikely(ret != 0))
 293		return ret;
 294
 295	ret = ttm_bo_reserve(bo, interruptible, false, NULL);
 296	if (unlikely(ret != 0))
 297		goto err;
 298
 299	vmw_bo_pin_reserved(buf, false);
 300
 301	ttm_bo_unreserve(bo);
 302
 303err:
 304	ttm_read_unlock(&dev_priv->reservation_sem);
 305	return ret;
 306}
 307
 308/**
 309 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
 310 * of a buffer.
 311 *
 312 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
 313 * @ptr: SVGAGuestPtr returning the result.
 314 */
 315void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
 316			  SVGAGuestPtr *ptr)
 317{
 318	if (bo->mem.mem_type == TTM_PL_VRAM) {
 319		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
 320		ptr->offset = bo->mem.start << PAGE_SHIFT;
 321	} else {
 322		ptr->gmrId = bo->mem.start;
 323		ptr->offset = 0;
 324	}
 325}
 326
 327
 328/**
 329 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
 330 *
 331 * @vbo: The buffer object. Must be reserved.
 332 * @pin: Whether to pin or unpin.
 333 *
 334 */
 335void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
 336{
 337	struct ttm_operation_ctx ctx = { false, true };
 338	struct ttm_place pl;
 339	struct ttm_placement placement;
 340	struct ttm_buffer_object *bo = &vbo->base;
 341	uint32_t old_mem_type = bo->mem.mem_type;
 342	int ret;
 343
 344	dma_resv_assert_held(bo->base.resv);
 345
 346	if (pin) {
 347		if (vbo->pin_count++ > 0)
 348			return;
 349	} else {
 350		WARN_ON(vbo->pin_count <= 0);
 351		if (--vbo->pin_count > 0)
 352			return;
 353	}
 354
 355	pl.fpfn = 0;
 356	pl.lpfn = 0;
 357	pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
 358		| TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
 359	if (pin)
 360		pl.flags |= TTM_PL_FLAG_NO_EVICT;
 361
 362	memset(&placement, 0, sizeof(placement));
 363	placement.num_placement = 1;
 364	placement.placement = &pl;
 365
 366	ret = ttm_bo_validate(bo, &placement, &ctx);
 367
 368	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 
 
 
 
 
 369}
 370
 371
 372/**
 373 * vmw_bo_map_and_cache - Map a buffer object and cache the map
 374 *
 375 * @vbo: The buffer object to map
 376 * Return: A kernel virtual address or NULL if mapping failed.
 377 *
 378 * This function maps a buffer object into the kernel address space, or
 379 * returns the virtual kernel address of an already existing map. The virtual
 380 * address remains valid as long as the buffer object is pinned or reserved.
 381 * The cached map is torn down on either
 382 * 1) Buffer object move
 383 * 2) Buffer object swapout
 384 * 3) Buffer object destruction
 385 *
 386 */
 387void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
 388{
 389	struct ttm_buffer_object *bo = &vbo->base;
 
 
 
 
 
 390	bool not_used;
 391	void *virtual;
 392	int ret;
 393
 
 
 394	virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
 395	if (virtual)
 396		return virtual;
 397
 398	ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
 399	if (ret)
 400		DRM_ERROR("Buffer object map failed: %d.\n", ret);
 
 401
 402	return ttm_kmap_obj_virtual(&vbo->map, &not_used);
 403}
 404
 405
 406/**
 407 * vmw_bo_unmap - Tear down a cached buffer object map.
 408 *
 409 * @vbo: The buffer object whose map we are tearing down.
 410 *
 411 * This function tears down a cached map set up using
 412 * vmw_buffer_object_map_and_cache().
 413 */
 414void vmw_bo_unmap(struct vmw_buffer_object *vbo)
 415{
 
 
 416	if (vbo->map.bo == NULL)
 417		return;
 418
 419	ttm_bo_kunmap(&vbo->map);
 420}
 421
 422
 423/**
 424 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
 425 *
 426 * @dev_priv: Pointer to a struct vmw_private identifying the device.
 427 * @size: The requested buffer size.
 428 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
 429 */
 430static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
 431			      bool user)
 432{
 433	static size_t struct_size, user_struct_size;
 434	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
 435	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
 436
 437	if (unlikely(struct_size == 0)) {
 438		size_t backend_size = ttm_round_pot(vmw_tt_size);
 439
 440		struct_size = backend_size +
 441			ttm_round_pot(sizeof(struct vmw_buffer_object));
 442		user_struct_size = backend_size +
 443		  ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
 444				      TTM_OBJ_EXTRA_SIZE;
 445	}
 446
 447	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
 448		page_array_size +=
 449			ttm_round_pot(num_pages * sizeof(dma_addr_t));
 450
 451	return ((user) ? user_struct_size : struct_size) +
 452		page_array_size;
 453}
 454
 455
 456/**
 457 * vmw_bo_bo_free - vmw buffer object destructor
 458 *
 459 * @bo: Pointer to the embedded struct ttm_buffer_object
 460 */
 461void vmw_bo_bo_free(struct ttm_buffer_object *bo)
 462{
 463	struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
 464
 465	WARN_ON(vmw_bo->dirty);
 466	WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
 467	vmw_bo_unmap(vmw_bo);
 468	kfree(vmw_bo);
 469}
 470
 471
 472/**
 473 * vmw_user_bo_destroy - vmw buffer object destructor
 474 *
 475 * @bo: Pointer to the embedded struct ttm_buffer_object
 476 */
 477static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
 478{
 479	struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
 480	struct vmw_buffer_object *vbo = &vmw_user_bo->vbo;
 481
 482	WARN_ON(vbo->dirty);
 483	WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
 484	vmw_bo_unmap(vbo);
 485	ttm_prime_object_kfree(vmw_user_bo, prime);
 486}
 487
 488
 489/**
 490 * vmw_bo_init - Initialize a vmw buffer object
 491 *
 492 * @dev_priv: Pointer to the device private struct
 493 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
 494 * @size: Buffer object size in bytes.
 495 * @placement: Initial placement.
 496 * @interruptible: Whether waits should be performed interruptible.
 497 * @bo_free: The buffer object destructor.
 498 * Returns: Zero on success, negative error code on error.
 499 *
 500 * Note that on error, the code will free the buffer object.
 501 */
 502int vmw_bo_init(struct vmw_private *dev_priv,
 503		struct vmw_buffer_object *vmw_bo,
 504		size_t size, struct ttm_placement *placement,
 505		bool interruptible,
 506		void (*bo_free)(struct ttm_buffer_object *bo))
 507{
 508	struct ttm_bo_device *bdev = &dev_priv->bdev;
 509	size_t acc_size;
 
 
 
 
 510	int ret;
 511	bool user = (bo_free == &vmw_user_bo_destroy);
 512
 513	WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
 514
 515	acc_size = vmw_bo_acc_size(dev_priv, size, user);
 516	memset(vmw_bo, 0, sizeof(*vmw_bo));
 517	BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
 518	vmw_bo->base.priority = 3;
 519	vmw_bo->res_tree = RB_ROOT;
 
 
 520
 521	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
 522			  ttm_bo_type_device, placement,
 523			  0, interruptible, acc_size,
 524			  NULL, NULL, bo_free);
 525	return ret;
 526}
 527
 
 
 
 
 
 
 528
 529/**
 530 * vmw_user_bo_release - TTM reference base object release callback for
 531 * vmw user buffer objects
 532 *
 533 * @p_base: The TTM base object pointer about to be unreferenced.
 534 *
 535 * Clears the TTM base object pointer and drops the reference the
 536 * base object has on the underlying struct vmw_buffer_object.
 537 */
 538static void vmw_user_bo_release(struct ttm_base_object **p_base)
 539{
 540	struct vmw_user_buffer_object *vmw_user_bo;
 541	struct ttm_base_object *base = *p_base;
 542
 543	*p_base = NULL;
 544
 545	if (unlikely(base == NULL))
 546		return;
 547
 548	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 549				   prime.base);
 550	ttm_bo_put(&vmw_user_bo->vbo.base);
 551}
 552
 553
 554/**
 555 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
 556 * for vmw user buffer objects
 557 *
 558 * @base: Pointer to the TTM base object
 559 * @ref_type: Reference type of the reference reaching zero.
 560 *
 561 * Called when user-space drops its last synccpu reference on the buffer
 562 * object, Either explicitly or as part of a cleanup file close.
 563 */
 564static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
 565					enum ttm_ref_type ref_type)
 566{
 567	struct vmw_user_buffer_object *user_bo;
 568
 569	user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
 570
 571	switch (ref_type) {
 572	case TTM_REF_SYNCCPU_WRITE:
 573		atomic_dec(&user_bo->vbo.cpu_writers);
 574		break;
 575	default:
 576		WARN_ONCE(true, "Undefined buffer object reference release.\n");
 577	}
 578}
 579
 580
 581/**
 582 * vmw_user_bo_alloc - Allocate a user buffer object
 583 *
 584 * @dev_priv: Pointer to a struct device private.
 585 * @tfile: Pointer to a struct ttm_object_file on which to register the user
 586 * object.
 587 * @size: Size of the buffer object.
 588 * @shareable: Boolean whether the buffer is shareable with other open files.
 589 * @handle: Pointer to where the handle value should be assigned.
 590 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
 591 * should be assigned.
 592 * Return: Zero on success, negative error code on error.
 593 */
 594int vmw_user_bo_alloc(struct vmw_private *dev_priv,
 595		      struct ttm_object_file *tfile,
 596		      uint32_t size,
 597		      bool shareable,
 598		      uint32_t *handle,
 599		      struct vmw_buffer_object **p_vbo,
 600		      struct ttm_base_object **p_base)
 601{
 602	struct vmw_user_buffer_object *user_bo;
 603	int ret;
 604
 605	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
 606	if (unlikely(!user_bo)) {
 607		DRM_ERROR("Failed to allocate a buffer.\n");
 608		return -ENOMEM;
 609	}
 610
 611	ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
 612			  (dev_priv->has_mob) ?
 613			  &vmw_sys_placement :
 614			  &vmw_vram_sys_placement, true,
 615			  &vmw_user_bo_destroy);
 616	if (unlikely(ret != 0))
 617		return ret;
 618
 619	ttm_bo_get(&user_bo->vbo.base);
 620	ret = ttm_prime_object_init(tfile,
 621				    size,
 622				    &user_bo->prime,
 623				    shareable,
 624				    ttm_buffer_type,
 625				    &vmw_user_bo_release,
 626				    &vmw_user_bo_ref_obj_release);
 627	if (unlikely(ret != 0)) {
 628		ttm_bo_put(&user_bo->vbo.base);
 629		goto out_no_base_object;
 630	}
 631
 632	*p_vbo = &user_bo->vbo;
 633	if (p_base) {
 634		*p_base = &user_bo->prime.base;
 635		kref_get(&(*p_base)->refcount);
 636	}
 637	*handle = user_bo->prime.base.handle;
 638
 639out_no_base_object:
 640	return ret;
 641}
 642
 643
 644/**
 645 * vmw_user_bo_verify_access - verify access permissions on this
 646 * buffer object.
 647 *
 648 * @bo: Pointer to the buffer object being accessed
 649 * @tfile: Identifying the caller.
 650 */
 651int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
 652			      struct ttm_object_file *tfile)
 653{
 654	struct vmw_user_buffer_object *vmw_user_bo;
 655
 656	if (unlikely(bo->destroy != vmw_user_bo_destroy))
 657		return -EPERM;
 658
 659	vmw_user_bo = vmw_user_buffer_object(bo);
 660
 661	/* Check that the caller has opened the object. */
 662	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
 663		return 0;
 664
 665	DRM_ERROR("Could not grant buffer access.\n");
 666	return -EPERM;
 667}
 668
 669
 670/**
 671 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
 672 * access, idling previous GPU operations on the buffer and optionally
 673 * blocking it for further command submissions.
 674 *
 675 * @user_bo: Pointer to the buffer object being grabbed for CPU access
 676 * @tfile: Identifying the caller.
 677 * @flags: Flags indicating how the grab should be performed.
 678 * Return: Zero on success, Negative error code on error. In particular,
 679 * -EBUSY will be returned if a dontblock operation is requested and the
 680 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
 681 * interrupted by a signal.
 682 *
 683 * A blocking grab will be automatically released when @tfile is closed.
 684 */
 685static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 686				    struct ttm_object_file *tfile,
 687				    uint32_t flags)
 688{
 689	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 690	struct ttm_buffer_object *bo = &user_bo->vbo.base;
 691	bool existed;
 692	int ret;
 693
 694	if (flags & drm_vmw_synccpu_allow_cs) {
 695		long lret;
 696
 697		lret = dma_resv_wait_timeout_rcu
 698			(bo->base.resv, true, true,
 699			 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
 700		if (!lret)
 701			return -EBUSY;
 702		else if (lret < 0)
 703			return lret;
 704		return 0;
 705	}
 706
 707	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
 708	if (unlikely(ret != 0))
 709		return ret;
 710
 711	ret = ttm_bo_wait(bo, true, nonblock);
 712	if (likely(ret == 0))
 713		atomic_inc(&user_bo->vbo.cpu_writers);
 714
 715	ttm_bo_unreserve(bo);
 716	if (unlikely(ret != 0))
 717		return ret;
 718
 719	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 720				 TTM_REF_SYNCCPU_WRITE, &existed, false);
 721	if (ret != 0 || existed)
 722		atomic_dec(&user_bo->vbo.cpu_writers);
 723
 724	return ret;
 725}
 726
 727/**
 728 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
 729 * and unblock command submission on the buffer if blocked.
 730 *
 
 731 * @handle: Handle identifying the buffer object.
 732 * @tfile: Identifying the caller.
 733 * @flags: Flags indicating the type of release.
 734 */
 735static int vmw_user_bo_synccpu_release(uint32_t handle,
 736					   struct ttm_object_file *tfile,
 737					   uint32_t flags)
 738{
 739	if (!(flags & drm_vmw_synccpu_allow_cs))
 740		return ttm_ref_object_base_unref(tfile, handle,
 741						 TTM_REF_SYNCCPU_WRITE);
 
 
 
 
 
 
 742
 743	return 0;
 744}
 745
 746
 747/**
 748 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
 749 * functionality.
 750 *
 751 * @dev: Identifies the drm device.
 752 * @data: Pointer to the ioctl argument.
 753 * @file_priv: Identifies the caller.
 754 * Return: Zero on success, negative error code on error.
 755 *
 756 * This function checks the ioctl arguments for validity and calls the
 757 * relevant synccpu functions.
 758 */
 759int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
 760			      struct drm_file *file_priv)
 761{
 762	struct drm_vmw_synccpu_arg *arg =
 763		(struct drm_vmw_synccpu_arg *) data;
 764	struct vmw_buffer_object *vbo;
 765	struct vmw_user_buffer_object *user_bo;
 766	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 767	struct ttm_base_object *buffer_base;
 768	int ret;
 769
 770	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
 771	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
 772			       drm_vmw_synccpu_dontblock |
 773			       drm_vmw_synccpu_allow_cs)) != 0) {
 774		DRM_ERROR("Illegal synccpu flags.\n");
 775		return -EINVAL;
 776	}
 777
 778	switch (arg->op) {
 779	case drm_vmw_synccpu_grab:
 780		ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
 781					     &buffer_base);
 782		if (unlikely(ret != 0))
 783			return ret;
 784
 785		user_bo = container_of(vbo, struct vmw_user_buffer_object,
 786				       vbo);
 787		ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
 788		vmw_bo_unreference(&vbo);
 789		ttm_base_object_unref(&buffer_base);
 790		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
 791			     ret != -EBUSY)) {
 792			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
 793				  (unsigned int) arg->handle);
 794			return ret;
 795		}
 796		break;
 797	case drm_vmw_synccpu_release:
 798		ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
 
 799						  arg->flags);
 800		if (unlikely(ret != 0)) {
 801			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
 802				  (unsigned int) arg->handle);
 803			return ret;
 804		}
 805		break;
 806	default:
 807		DRM_ERROR("Invalid synccpu operation.\n");
 808		return -EINVAL;
 809	}
 810
 811	return 0;
 812}
 813
 814
 815/**
 816 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
 817 * allocation functionality.
 818 *
 819 * @dev: Identifies the drm device.
 820 * @data: Pointer to the ioctl argument.
 821 * @file_priv: Identifies the caller.
 822 * Return: Zero on success, negative error code on error.
 823 *
 824 * This function checks the ioctl arguments for validity and allocates a
 825 * struct vmw_user_buffer_object bo.
 826 */
 827int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
 828		       struct drm_file *file_priv)
 829{
 830	struct vmw_private *dev_priv = vmw_priv(dev);
 831	union drm_vmw_alloc_dmabuf_arg *arg =
 832	    (union drm_vmw_alloc_dmabuf_arg *)data;
 833	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
 834	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
 835	struct vmw_buffer_object *vbo;
 836	uint32_t handle;
 837	int ret;
 838
 839	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
 840	if (unlikely(ret != 0))
 841		return ret;
 842
 843	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
 844				req->size, false, &handle, &vbo,
 845				NULL);
 846	if (unlikely(ret != 0))
 847		goto out_no_bo;
 848
 849	rep->handle = handle;
 850	rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
 851	rep->cur_gmr_id = handle;
 852	rep->cur_gmr_offset = 0;
 853
 854	vmw_bo_unreference(&vbo);
 855
 856out_no_bo:
 857	ttm_read_unlock(&dev_priv->reservation_sem);
 858
 859	return ret;
 860}
 861
 862
 863/**
 864 * vmw_bo_unref_ioctl - Generic handle close ioctl.
 865 *
 866 * @dev: Identifies the drm device.
 867 * @data: Pointer to the ioctl argument.
 868 * @file_priv: Identifies the caller.
 869 * Return: Zero on success, negative error code on error.
 870 *
 871 * This function checks the ioctl arguments for validity and closes a
 872 * handle to a TTM base object, optionally freeing the object.
 873 */
 874int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
 875		       struct drm_file *file_priv)
 876{
 877	struct drm_vmw_unref_dmabuf_arg *arg =
 878	    (struct drm_vmw_unref_dmabuf_arg *)data;
 879
 880	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 881					 arg->handle,
 882					 TTM_REF_USAGE);
 883}
 884
 885
 886/**
 887 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
 888 *
 889 * @tfile: The TTM object file the handle is registered with.
 890 * @handle: The user buffer object handle
 891 * @out: Pointer to a where a pointer to the embedded
 892 * struct vmw_buffer_object should be placed.
 893 * @p_base: Pointer to where a pointer to the TTM base object should be
 894 * placed, or NULL if no such pointer is required.
 895 * Return: Zero on success, Negative error code on error.
 896 *
 897 * Both the output base object pointer and the vmw buffer object pointer
 898 * will be refcounted.
 899 */
 900int vmw_user_bo_lookup(struct ttm_object_file *tfile,
 901		       uint32_t handle, struct vmw_buffer_object **out,
 902		       struct ttm_base_object **p_base)
 903{
 904	struct vmw_user_buffer_object *vmw_user_bo;
 905	struct ttm_base_object *base;
 906
 907	base = ttm_base_object_lookup(tfile, handle);
 908	if (unlikely(base == NULL)) {
 909		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 910			  (unsigned long)handle);
 911		return -ESRCH;
 912	}
 913
 914	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 915		ttm_base_object_unref(&base);
 916		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 917			  (unsigned long)handle);
 918		return -EINVAL;
 919	}
 920
 921	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 922				   prime.base);
 923	ttm_bo_get(&vmw_user_bo->vbo.base);
 924	if (p_base)
 925		*p_base = base;
 926	else
 927		ttm_base_object_unref(&base);
 928	*out = &vmw_user_bo->vbo;
 929
 930	return 0;
 931}
 932
 933/**
 934 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
 935 * @tfile: The TTM object file the handle is registered with.
 936 * @handle: The user buffer object handle.
 937 *
 938 * This function looks up a struct vmw_user_bo and returns a pointer to the
 939 * struct vmw_buffer_object it derives from without refcounting the pointer.
 940 * The returned pointer is only valid until vmw_user_bo_noref_release() is
 941 * called, and the object pointed to by the returned pointer may be doomed.
 942 * Any persistent usage of the object requires a refcount to be taken using
 943 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
 944 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
 945 * or scheduling functions may be called inbetween these function calls.
 946 *
 947 * Return: A struct vmw_buffer_object pointer if successful or negative
 948 * error pointer on failure.
 949 */
 950struct vmw_buffer_object *
 951vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
 952{
 953	struct vmw_user_buffer_object *vmw_user_bo;
 954	struct ttm_base_object *base;
 955
 956	base = ttm_base_object_noref_lookup(tfile, handle);
 957	if (!base) {
 958		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 959			  (unsigned long)handle);
 960		return ERR_PTR(-ESRCH);
 961	}
 962
 963	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
 964		ttm_base_object_noref_release();
 965		DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
 966			  (unsigned long)handle);
 967		return ERR_PTR(-EINVAL);
 968	}
 969
 970	vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
 971				   prime.base);
 972	return &vmw_user_bo->vbo;
 973}
 974
 975/**
 976 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
 977 *
 978 * @tfile: The TTM object file to register the handle with.
 979 * @vbo: The embedded vmw buffer object.
 980 * @handle: Pointer to where the new handle should be placed.
 981 * Return: Zero on success, Negative error code on error.
 982 */
 983int vmw_user_bo_reference(struct ttm_object_file *tfile,
 984			  struct vmw_buffer_object *vbo,
 985			  uint32_t *handle)
 986{
 987	struct vmw_user_buffer_object *user_bo;
 988
 989	if (vbo->base.destroy != vmw_user_bo_destroy)
 990		return -EINVAL;
 991
 992	user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
 993
 994	*handle = user_bo->prime.base.handle;
 995	return ttm_ref_object_add(tfile, &user_bo->prime.base,
 996				  TTM_REF_USAGE, NULL, false);
 997}
 998
 999
1000/**
1001 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1002 *                       object without unreserving it.
1003 *
1004 * @bo:             Pointer to the struct ttm_buffer_object to fence.
1005 * @fence:          Pointer to the fence. If NULL, this function will
1006 *                  insert a fence into the command stream..
1007 *
1008 * Contrary to the ttm_eu version of this function, it takes only
1009 * a single buffer object instead of a list, and it also doesn't
1010 * unreserve the buffer object, which needs to be done separately.
1011 */
1012void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1013			 struct vmw_fence_obj *fence)
1014{
1015	struct ttm_bo_device *bdev = bo->bdev;
 
 
1016
1017	struct vmw_private *dev_priv =
1018		container_of(bdev, struct vmw_private, bdev);
1019
1020	if (fence == NULL) {
1021		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1022		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1023		dma_fence_put(&fence->base);
1024	} else
1025		dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1026}
1027
1028
1029/**
1030 * vmw_dumb_create - Create a dumb kms buffer
1031 *
1032 * @file_priv: Pointer to a struct drm_file identifying the caller.
1033 * @dev: Pointer to the drm device.
1034 * @args: Pointer to a struct drm_mode_create_dumb structure
1035 * Return: Zero on success, negative error code on failure.
1036 *
1037 * This is a driver callback for the core drm create_dumb functionality.
1038 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1039 * that the arguments have a different format.
1040 */
1041int vmw_dumb_create(struct drm_file *file_priv,
1042		    struct drm_device *dev,
1043		    struct drm_mode_create_dumb *args)
1044{
1045	struct vmw_private *dev_priv = vmw_priv(dev);
1046	struct vmw_buffer_object *vbo;
1047	int ret;
1048
1049	args->pitch = args->width * ((args->bpp + 7) / 8);
1050	args->size = args->pitch * args->height;
1051
1052	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1053	if (unlikely(ret != 0))
1054		return ret;
1055
1056	ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1057				    args->size, false, &args->handle,
1058				    &vbo, NULL);
1059	if (unlikely(ret != 0))
1060		goto out_no_bo;
1061
1062	vmw_bo_unreference(&vbo);
1063out_no_bo:
1064	ttm_read_unlock(&dev_priv->reservation_sem);
1065	return ret;
1066}
1067
1068
1069/**
1070 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1071 *
1072 * @file_priv: Pointer to a struct drm_file identifying the caller.
1073 * @dev: Pointer to the drm device.
1074 * @handle: Handle identifying the dumb buffer.
1075 * @offset: The address space offset returned.
1076 * Return: Zero on success, negative error code on failure.
1077 *
1078 * This is a driver callback for the core drm dumb_map_offset functionality.
1079 */
1080int vmw_dumb_map_offset(struct drm_file *file_priv,
1081			struct drm_device *dev, uint32_t handle,
1082			uint64_t *offset)
1083{
1084	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1085	struct vmw_buffer_object *out_buf;
1086	int ret;
1087
1088	ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1089	if (ret != 0)
1090		return -EINVAL;
1091
1092	*offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1093	vmw_bo_unreference(&out_buf);
1094	return 0;
1095}
1096
1097
1098/**
1099 * vmw_dumb_destroy - Destroy a dumb boffer
1100 *
1101 * @file_priv: Pointer to a struct drm_file identifying the caller.
1102 * @dev: Pointer to the drm device.
1103 * @handle: Handle identifying the dumb buffer.
1104 * Return: Zero on success, negative error code on failure.
1105 *
1106 * This is a driver callback for the core drm dumb_destroy functionality.
1107 */
1108int vmw_dumb_destroy(struct drm_file *file_priv,
1109		     struct drm_device *dev,
1110		     uint32_t handle)
1111{
1112	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1113					 handle, TTM_REF_USAGE);
1114}
1115
1116
1117/**
1118 * vmw_bo_swap_notify - swapout notify callback.
1119 *
1120 * @bo: The buffer object to be swapped out.
1121 */
1122void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1123{
1124	/* Is @bo embedded in a struct vmw_buffer_object? */
1125	if (bo->destroy != vmw_bo_bo_free &&
1126	    bo->destroy != vmw_user_bo_destroy)
1127		return;
1128
1129	/* Kill any cached kernel maps before swapout */
1130	vmw_bo_unmap(vmw_buffer_object(bo));
1131}
1132
1133
1134/**
1135 * vmw_bo_move_notify - TTM move_notify_callback
1136 *
1137 * @bo: The TTM buffer object about to move.
1138 * @mem: The struct ttm_mem_reg indicating to what memory
1139 *       region the move is taking place.
1140 *
1141 * Detaches cached maps and device bindings that require that the
1142 * buffer doesn't move.
1143 */
1144void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1145			struct ttm_mem_reg *mem)
1146{
1147	struct vmw_buffer_object *vbo;
1148
1149	if (mem == NULL)
1150		return;
1151
1152	/* Make sure @bo is embedded in a struct vmw_buffer_object? */
1153	if (bo->destroy != vmw_bo_bo_free &&
1154	    bo->destroy != vmw_user_bo_destroy)
1155		return;
1156
1157	vbo = container_of(bo, struct vmw_buffer_object, base);
1158
1159	/*
1160	 * Kill any cached kernel maps before move to or from VRAM.
1161	 * With other types of moves, the underlying pages stay the same,
1162	 * and the map can be kept.
1163	 */
1164	if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1165		vmw_bo_unmap(vbo);
1166
1167	/*
1168	 * If we're moving a backup MOB out of MOB placement, then make sure we
1169	 * read back all resource content first, and unbind the MOB from
1170	 * the resource.
1171	 */
1172	if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1173		vmw_resource_unbind_list(vbo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174}