Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_placement.h>
30
31#include "vmwgfx_drv.h"
32#include "ttm_object.h"
33
34
35/**
36 * struct vmw_user_buffer_object - User-space-visible buffer object
37 *
38 * @prime: The prime object providing user visibility.
39 * @vbo: The struct vmw_buffer_object
40 */
41struct vmw_user_buffer_object {
42 struct ttm_prime_object prime;
43 struct vmw_buffer_object vbo;
44};
45
46
47/**
48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
49 * vmw_buffer_object.
50 *
51 * @bo: Pointer to the TTM buffer object.
52 * Return: Pointer to the struct vmw_buffer_object embedding the
53 * TTM buffer object.
54 */
55static struct vmw_buffer_object *
56vmw_buffer_object(struct ttm_buffer_object *bo)
57{
58 return container_of(bo, struct vmw_buffer_object, base);
59}
60
61
62/**
63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64 * vmw_user_buffer_object.
65 *
66 * @bo: Pointer to the TTM buffer object.
67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
68 * object.
69 */
70static struct vmw_user_buffer_object *
71vmw_user_buffer_object(struct ttm_buffer_object *bo)
72{
73 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
74
75 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
76}
77
78
79/**
80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
81 *
82 * @dev_priv: Driver private.
83 * @buf: DMA buffer to move.
84 * @placement: The placement to pin it.
85 * @interruptible: Use interruptible wait.
86 * Return: Zero on success, Negative error code on failure. In particular
87 * -ERESTARTSYS if interrupted by a signal
88 */
89int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
90 struct vmw_buffer_object *buf,
91 struct ttm_placement *placement,
92 bool interruptible)
93{
94 struct ttm_operation_ctx ctx = {interruptible, false };
95 struct ttm_buffer_object *bo = &buf->base;
96 int ret;
97 uint32_t new_flags;
98
99 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
100 if (unlikely(ret != 0))
101 return ret;
102
103 vmw_execbuf_release_pinned_bo(dev_priv);
104
105 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
106 if (unlikely(ret != 0))
107 goto err;
108
109 if (buf->pin_count > 0)
110 ret = ttm_bo_mem_compat(placement, &bo->mem,
111 &new_flags) == true ? 0 : -EINVAL;
112 else
113 ret = ttm_bo_validate(bo, placement, &ctx);
114
115 if (!ret)
116 vmw_bo_pin_reserved(buf, true);
117
118 ttm_bo_unreserve(bo);
119
120err:
121 ttm_write_unlock(&dev_priv->reservation_sem);
122 return ret;
123}
124
125
126/**
127 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
128 *
129 * This function takes the reservation_sem in write mode.
130 * Flushes and unpins the query bo to avoid failures.
131 *
132 * @dev_priv: Driver private.
133 * @buf: DMA buffer to move.
134 * @pin: Pin buffer if true.
135 * @interruptible: Use interruptible wait.
136 * Return: Zero on success, Negative error code on failure. In particular
137 * -ERESTARTSYS if interrupted by a signal
138 */
139int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
140 struct vmw_buffer_object *buf,
141 bool interruptible)
142{
143 struct ttm_operation_ctx ctx = {interruptible, false };
144 struct ttm_buffer_object *bo = &buf->base;
145 int ret;
146 uint32_t new_flags;
147
148 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
149 if (unlikely(ret != 0))
150 return ret;
151
152 vmw_execbuf_release_pinned_bo(dev_priv);
153
154 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
155 if (unlikely(ret != 0))
156 goto err;
157
158 if (buf->pin_count > 0) {
159 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
160 &new_flags) == true ? 0 : -EINVAL;
161 goto out_unreserve;
162 }
163
164 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
165 if (likely(ret == 0) || ret == -ERESTARTSYS)
166 goto out_unreserve;
167
168 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
169
170out_unreserve:
171 if (!ret)
172 vmw_bo_pin_reserved(buf, true);
173
174 ttm_bo_unreserve(bo);
175err:
176 ttm_write_unlock(&dev_priv->reservation_sem);
177 return ret;
178}
179
180
181/**
182 * vmw_bo_pin_in_vram - Move a buffer to vram.
183 *
184 * This function takes the reservation_sem in write mode.
185 * Flushes and unpins the query bo to avoid failures.
186 *
187 * @dev_priv: Driver private.
188 * @buf: DMA buffer to move.
189 * @interruptible: Use interruptible wait.
190 * Return: Zero on success, Negative error code on failure. In particular
191 * -ERESTARTSYS if interrupted by a signal
192 */
193int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
194 struct vmw_buffer_object *buf,
195 bool interruptible)
196{
197 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
198 interruptible);
199}
200
201
202/**
203 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
204 *
205 * This function takes the reservation_sem in write mode.
206 * Flushes and unpins the query bo to avoid failures.
207 *
208 * @dev_priv: Driver private.
209 * @buf: DMA buffer to pin.
210 * @interruptible: Use interruptible wait.
211 * Return: Zero on success, Negative error code on failure. In particular
212 * -ERESTARTSYS if interrupted by a signal
213 */
214int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
215 struct vmw_buffer_object *buf,
216 bool interruptible)
217{
218 struct ttm_operation_ctx ctx = {interruptible, false };
219 struct ttm_buffer_object *bo = &buf->base;
220 struct ttm_placement placement;
221 struct ttm_place place;
222 int ret = 0;
223 uint32_t new_flags;
224
225 place = vmw_vram_placement.placement[0];
226 place.lpfn = bo->num_pages;
227 placement.num_placement = 1;
228 placement.placement = &place;
229 placement.num_busy_placement = 1;
230 placement.busy_placement = &place;
231
232 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
233 if (unlikely(ret != 0))
234 return ret;
235
236 vmw_execbuf_release_pinned_bo(dev_priv);
237 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
238 if (unlikely(ret != 0))
239 goto err_unlock;
240
241 /*
242 * Is this buffer already in vram but not at the start of it?
243 * In that case, evict it first because TTM isn't good at handling
244 * that situation.
245 */
246 if (bo->mem.mem_type == TTM_PL_VRAM &&
247 bo->mem.start < bo->num_pages &&
248 bo->mem.start > 0 &&
249 buf->pin_count == 0) {
250 ctx.interruptible = false;
251 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
252 }
253
254 if (buf->pin_count > 0)
255 ret = ttm_bo_mem_compat(&placement, &bo->mem,
256 &new_flags) == true ? 0 : -EINVAL;
257 else
258 ret = ttm_bo_validate(bo, &placement, &ctx);
259
260 /* For some reason we didn't end up at the start of vram */
261 WARN_ON(ret == 0 && bo->offset != 0);
262 if (!ret)
263 vmw_bo_pin_reserved(buf, true);
264
265 ttm_bo_unreserve(bo);
266err_unlock:
267 ttm_write_unlock(&dev_priv->reservation_sem);
268
269 return ret;
270}
271
272
273/**
274 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
275 *
276 * This function takes the reservation_sem in write mode.
277 *
278 * @dev_priv: Driver private.
279 * @buf: DMA buffer to unpin.
280 * @interruptible: Use interruptible wait.
281 * Return: Zero on success, Negative error code on failure. In particular
282 * -ERESTARTSYS if interrupted by a signal
283 */
284int vmw_bo_unpin(struct vmw_private *dev_priv,
285 struct vmw_buffer_object *buf,
286 bool interruptible)
287{
288 struct ttm_buffer_object *bo = &buf->base;
289 int ret;
290
291 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
292 if (unlikely(ret != 0))
293 return ret;
294
295 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
296 if (unlikely(ret != 0))
297 goto err;
298
299 vmw_bo_pin_reserved(buf, false);
300
301 ttm_bo_unreserve(bo);
302
303err:
304 ttm_read_unlock(&dev_priv->reservation_sem);
305 return ret;
306}
307
308/**
309 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
310 * of a buffer.
311 *
312 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
313 * @ptr: SVGAGuestPtr returning the result.
314 */
315void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
316 SVGAGuestPtr *ptr)
317{
318 if (bo->mem.mem_type == TTM_PL_VRAM) {
319 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
320 ptr->offset = bo->offset;
321 } else {
322 ptr->gmrId = bo->mem.start;
323 ptr->offset = 0;
324 }
325}
326
327
328/**
329 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
330 *
331 * @vbo: The buffer object. Must be reserved.
332 * @pin: Whether to pin or unpin.
333 *
334 */
335void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
336{
337 struct ttm_operation_ctx ctx = { false, true };
338 struct ttm_place pl;
339 struct ttm_placement placement;
340 struct ttm_buffer_object *bo = &vbo->base;
341 uint32_t old_mem_type = bo->mem.mem_type;
342 int ret;
343
344 dma_resv_assert_held(bo->base.resv);
345
346 if (pin) {
347 if (vbo->pin_count++ > 0)
348 return;
349 } else {
350 WARN_ON(vbo->pin_count <= 0);
351 if (--vbo->pin_count > 0)
352 return;
353 }
354
355 pl.fpfn = 0;
356 pl.lpfn = 0;
357 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
358 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
359 if (pin)
360 pl.flags |= TTM_PL_FLAG_NO_EVICT;
361
362 memset(&placement, 0, sizeof(placement));
363 placement.num_placement = 1;
364 placement.placement = &pl;
365
366 ret = ttm_bo_validate(bo, &placement, &ctx);
367
368 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
369}
370
371
372/**
373 * vmw_bo_map_and_cache - Map a buffer object and cache the map
374 *
375 * @vbo: The buffer object to map
376 * Return: A kernel virtual address or NULL if mapping failed.
377 *
378 * This function maps a buffer object into the kernel address space, or
379 * returns the virtual kernel address of an already existing map. The virtual
380 * address remains valid as long as the buffer object is pinned or reserved.
381 * The cached map is torn down on either
382 * 1) Buffer object move
383 * 2) Buffer object swapout
384 * 3) Buffer object destruction
385 *
386 */
387void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
388{
389 struct ttm_buffer_object *bo = &vbo->base;
390 bool not_used;
391 void *virtual;
392 int ret;
393
394 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
395 if (virtual)
396 return virtual;
397
398 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
399 if (ret)
400 DRM_ERROR("Buffer object map failed: %d.\n", ret);
401
402 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
403}
404
405
406/**
407 * vmw_bo_unmap - Tear down a cached buffer object map.
408 *
409 * @vbo: The buffer object whose map we are tearing down.
410 *
411 * This function tears down a cached map set up using
412 * vmw_buffer_object_map_and_cache().
413 */
414void vmw_bo_unmap(struct vmw_buffer_object *vbo)
415{
416 if (vbo->map.bo == NULL)
417 return;
418
419 ttm_bo_kunmap(&vbo->map);
420}
421
422
423/**
424 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
425 *
426 * @dev_priv: Pointer to a struct vmw_private identifying the device.
427 * @size: The requested buffer size.
428 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
429 */
430static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
431 bool user)
432{
433 static size_t struct_size, user_struct_size;
434 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
435 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
436
437 if (unlikely(struct_size == 0)) {
438 size_t backend_size = ttm_round_pot(vmw_tt_size);
439
440 struct_size = backend_size +
441 ttm_round_pot(sizeof(struct vmw_buffer_object));
442 user_struct_size = backend_size +
443 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
444 TTM_OBJ_EXTRA_SIZE;
445 }
446
447 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
448 page_array_size +=
449 ttm_round_pot(num_pages * sizeof(dma_addr_t));
450
451 return ((user) ? user_struct_size : struct_size) +
452 page_array_size;
453}
454
455
456/**
457 * vmw_bo_bo_free - vmw buffer object destructor
458 *
459 * @bo: Pointer to the embedded struct ttm_buffer_object
460 */
461void vmw_bo_bo_free(struct ttm_buffer_object *bo)
462{
463 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
464
465 vmw_bo_unmap(vmw_bo);
466 kfree(vmw_bo);
467}
468
469
470/**
471 * vmw_user_bo_destroy - vmw buffer object destructor
472 *
473 * @bo: Pointer to the embedded struct ttm_buffer_object
474 */
475static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
476{
477 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
478
479 vmw_bo_unmap(&vmw_user_bo->vbo);
480 ttm_prime_object_kfree(vmw_user_bo, prime);
481}
482
483
484/**
485 * vmw_bo_init - Initialize a vmw buffer object
486 *
487 * @dev_priv: Pointer to the device private struct
488 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
489 * @size: Buffer object size in bytes.
490 * @placement: Initial placement.
491 * @interruptible: Whether waits should be performed interruptible.
492 * @bo_free: The buffer object destructor.
493 * Returns: Zero on success, negative error code on error.
494 *
495 * Note that on error, the code will free the buffer object.
496 */
497int vmw_bo_init(struct vmw_private *dev_priv,
498 struct vmw_buffer_object *vmw_bo,
499 size_t size, struct ttm_placement *placement,
500 bool interruptible,
501 void (*bo_free)(struct ttm_buffer_object *bo))
502{
503 struct ttm_bo_device *bdev = &dev_priv->bdev;
504 size_t acc_size;
505 int ret;
506 bool user = (bo_free == &vmw_user_bo_destroy);
507
508 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
509
510 acc_size = vmw_bo_acc_size(dev_priv, size, user);
511 memset(vmw_bo, 0, sizeof(*vmw_bo));
512 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
513 vmw_bo->base.priority = 3;
514
515 INIT_LIST_HEAD(&vmw_bo->res_list);
516
517 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
518 ttm_bo_type_device, placement,
519 0, interruptible, acc_size,
520 NULL, NULL, bo_free);
521 return ret;
522}
523
524
525/**
526 * vmw_user_bo_release - TTM reference base object release callback for
527 * vmw user buffer objects
528 *
529 * @p_base: The TTM base object pointer about to be unreferenced.
530 *
531 * Clears the TTM base object pointer and drops the reference the
532 * base object has on the underlying struct vmw_buffer_object.
533 */
534static void vmw_user_bo_release(struct ttm_base_object **p_base)
535{
536 struct vmw_user_buffer_object *vmw_user_bo;
537 struct ttm_base_object *base = *p_base;
538
539 *p_base = NULL;
540
541 if (unlikely(base == NULL))
542 return;
543
544 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
545 prime.base);
546 ttm_bo_put(&vmw_user_bo->vbo.base);
547}
548
549
550/**
551 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
552 * for vmw user buffer objects
553 *
554 * @base: Pointer to the TTM base object
555 * @ref_type: Reference type of the reference reaching zero.
556 *
557 * Called when user-space drops its last synccpu reference on the buffer
558 * object, Either explicitly or as part of a cleanup file close.
559 */
560static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
561 enum ttm_ref_type ref_type)
562{
563 struct vmw_user_buffer_object *user_bo;
564
565 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
566
567 switch (ref_type) {
568 case TTM_REF_SYNCCPU_WRITE:
569 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
570 break;
571 default:
572 WARN_ONCE(true, "Undefined buffer object reference release.\n");
573 }
574}
575
576
577/**
578 * vmw_user_bo_alloc - Allocate a user buffer object
579 *
580 * @dev_priv: Pointer to a struct device private.
581 * @tfile: Pointer to a struct ttm_object_file on which to register the user
582 * object.
583 * @size: Size of the buffer object.
584 * @shareable: Boolean whether the buffer is shareable with other open files.
585 * @handle: Pointer to where the handle value should be assigned.
586 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
587 * should be assigned.
588 * Return: Zero on success, negative error code on error.
589 */
590int vmw_user_bo_alloc(struct vmw_private *dev_priv,
591 struct ttm_object_file *tfile,
592 uint32_t size,
593 bool shareable,
594 uint32_t *handle,
595 struct vmw_buffer_object **p_vbo,
596 struct ttm_base_object **p_base)
597{
598 struct vmw_user_buffer_object *user_bo;
599 int ret;
600
601 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
602 if (unlikely(!user_bo)) {
603 DRM_ERROR("Failed to allocate a buffer.\n");
604 return -ENOMEM;
605 }
606
607 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
608 (dev_priv->has_mob) ?
609 &vmw_sys_placement :
610 &vmw_vram_sys_placement, true,
611 &vmw_user_bo_destroy);
612 if (unlikely(ret != 0))
613 return ret;
614
615 ttm_bo_get(&user_bo->vbo.base);
616 ret = ttm_prime_object_init(tfile,
617 size,
618 &user_bo->prime,
619 shareable,
620 ttm_buffer_type,
621 &vmw_user_bo_release,
622 &vmw_user_bo_ref_obj_release);
623 if (unlikely(ret != 0)) {
624 ttm_bo_put(&user_bo->vbo.base);
625 goto out_no_base_object;
626 }
627
628 *p_vbo = &user_bo->vbo;
629 if (p_base) {
630 *p_base = &user_bo->prime.base;
631 kref_get(&(*p_base)->refcount);
632 }
633 *handle = user_bo->prime.base.handle;
634
635out_no_base_object:
636 return ret;
637}
638
639
640/**
641 * vmw_user_bo_verify_access - verify access permissions on this
642 * buffer object.
643 *
644 * @bo: Pointer to the buffer object being accessed
645 * @tfile: Identifying the caller.
646 */
647int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
648 struct ttm_object_file *tfile)
649{
650 struct vmw_user_buffer_object *vmw_user_bo;
651
652 if (unlikely(bo->destroy != vmw_user_bo_destroy))
653 return -EPERM;
654
655 vmw_user_bo = vmw_user_buffer_object(bo);
656
657 /* Check that the caller has opened the object. */
658 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
659 return 0;
660
661 DRM_ERROR("Could not grant buffer access.\n");
662 return -EPERM;
663}
664
665
666/**
667 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
668 * access, idling previous GPU operations on the buffer and optionally
669 * blocking it for further command submissions.
670 *
671 * @user_bo: Pointer to the buffer object being grabbed for CPU access
672 * @tfile: Identifying the caller.
673 * @flags: Flags indicating how the grab should be performed.
674 * Return: Zero on success, Negative error code on error. In particular,
675 * -EBUSY will be returned if a dontblock operation is requested and the
676 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
677 * interrupted by a signal.
678 *
679 * A blocking grab will be automatically released when @tfile is closed.
680 */
681static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
682 struct ttm_object_file *tfile,
683 uint32_t flags)
684{
685 struct ttm_buffer_object *bo = &user_bo->vbo.base;
686 bool existed;
687 int ret;
688
689 if (flags & drm_vmw_synccpu_allow_cs) {
690 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
691 long lret;
692
693 lret = dma_resv_wait_timeout_rcu
694 (bo->base.resv, true, true,
695 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
696 if (!lret)
697 return -EBUSY;
698 else if (lret < 0)
699 return lret;
700 return 0;
701 }
702
703 ret = ttm_bo_synccpu_write_grab
704 (bo, !!(flags & drm_vmw_synccpu_dontblock));
705 if (unlikely(ret != 0))
706 return ret;
707
708 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
709 TTM_REF_SYNCCPU_WRITE, &existed, false);
710 if (ret != 0 || existed)
711 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
712
713 return ret;
714}
715
716/**
717 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
718 * and unblock command submission on the buffer if blocked.
719 *
720 * @handle: Handle identifying the buffer object.
721 * @tfile: Identifying the caller.
722 * @flags: Flags indicating the type of release.
723 */
724static int vmw_user_bo_synccpu_release(uint32_t handle,
725 struct ttm_object_file *tfile,
726 uint32_t flags)
727{
728 if (!(flags & drm_vmw_synccpu_allow_cs))
729 return ttm_ref_object_base_unref(tfile, handle,
730 TTM_REF_SYNCCPU_WRITE);
731
732 return 0;
733}
734
735
736/**
737 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
738 * functionality.
739 *
740 * @dev: Identifies the drm device.
741 * @data: Pointer to the ioctl argument.
742 * @file_priv: Identifies the caller.
743 * Return: Zero on success, negative error code on error.
744 *
745 * This function checks the ioctl arguments for validity and calls the
746 * relevant synccpu functions.
747 */
748int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
749 struct drm_file *file_priv)
750{
751 struct drm_vmw_synccpu_arg *arg =
752 (struct drm_vmw_synccpu_arg *) data;
753 struct vmw_buffer_object *vbo;
754 struct vmw_user_buffer_object *user_bo;
755 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
756 struct ttm_base_object *buffer_base;
757 int ret;
758
759 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
760 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
761 drm_vmw_synccpu_dontblock |
762 drm_vmw_synccpu_allow_cs)) != 0) {
763 DRM_ERROR("Illegal synccpu flags.\n");
764 return -EINVAL;
765 }
766
767 switch (arg->op) {
768 case drm_vmw_synccpu_grab:
769 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
770 &buffer_base);
771 if (unlikely(ret != 0))
772 return ret;
773
774 user_bo = container_of(vbo, struct vmw_user_buffer_object,
775 vbo);
776 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
777 vmw_bo_unreference(&vbo);
778 ttm_base_object_unref(&buffer_base);
779 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
780 ret != -EBUSY)) {
781 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
782 (unsigned int) arg->handle);
783 return ret;
784 }
785 break;
786 case drm_vmw_synccpu_release:
787 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
788 arg->flags);
789 if (unlikely(ret != 0)) {
790 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
791 (unsigned int) arg->handle);
792 return ret;
793 }
794 break;
795 default:
796 DRM_ERROR("Invalid synccpu operation.\n");
797 return -EINVAL;
798 }
799
800 return 0;
801}
802
803
804/**
805 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
806 * allocation functionality.
807 *
808 * @dev: Identifies the drm device.
809 * @data: Pointer to the ioctl argument.
810 * @file_priv: Identifies the caller.
811 * Return: Zero on success, negative error code on error.
812 *
813 * This function checks the ioctl arguments for validity and allocates a
814 * struct vmw_user_buffer_object bo.
815 */
816int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
817 struct drm_file *file_priv)
818{
819 struct vmw_private *dev_priv = vmw_priv(dev);
820 union drm_vmw_alloc_dmabuf_arg *arg =
821 (union drm_vmw_alloc_dmabuf_arg *)data;
822 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
823 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
824 struct vmw_buffer_object *vbo;
825 uint32_t handle;
826 int ret;
827
828 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
829 if (unlikely(ret != 0))
830 return ret;
831
832 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
833 req->size, false, &handle, &vbo,
834 NULL);
835 if (unlikely(ret != 0))
836 goto out_no_bo;
837
838 rep->handle = handle;
839 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
840 rep->cur_gmr_id = handle;
841 rep->cur_gmr_offset = 0;
842
843 vmw_bo_unreference(&vbo);
844
845out_no_bo:
846 ttm_read_unlock(&dev_priv->reservation_sem);
847
848 return ret;
849}
850
851
852/**
853 * vmw_bo_unref_ioctl - Generic handle close ioctl.
854 *
855 * @dev: Identifies the drm device.
856 * @data: Pointer to the ioctl argument.
857 * @file_priv: Identifies the caller.
858 * Return: Zero on success, negative error code on error.
859 *
860 * This function checks the ioctl arguments for validity and closes a
861 * handle to a TTM base object, optionally freeing the object.
862 */
863int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
864 struct drm_file *file_priv)
865{
866 struct drm_vmw_unref_dmabuf_arg *arg =
867 (struct drm_vmw_unref_dmabuf_arg *)data;
868
869 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
870 arg->handle,
871 TTM_REF_USAGE);
872}
873
874
875/**
876 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
877 *
878 * @tfile: The TTM object file the handle is registered with.
879 * @handle: The user buffer object handle
880 * @out: Pointer to a where a pointer to the embedded
881 * struct vmw_buffer_object should be placed.
882 * @p_base: Pointer to where a pointer to the TTM base object should be
883 * placed, or NULL if no such pointer is required.
884 * Return: Zero on success, Negative error code on error.
885 *
886 * Both the output base object pointer and the vmw buffer object pointer
887 * will be refcounted.
888 */
889int vmw_user_bo_lookup(struct ttm_object_file *tfile,
890 uint32_t handle, struct vmw_buffer_object **out,
891 struct ttm_base_object **p_base)
892{
893 struct vmw_user_buffer_object *vmw_user_bo;
894 struct ttm_base_object *base;
895
896 base = ttm_base_object_lookup(tfile, handle);
897 if (unlikely(base == NULL)) {
898 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
899 (unsigned long)handle);
900 return -ESRCH;
901 }
902
903 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
904 ttm_base_object_unref(&base);
905 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
906 (unsigned long)handle);
907 return -EINVAL;
908 }
909
910 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
911 prime.base);
912 ttm_bo_get(&vmw_user_bo->vbo.base);
913 if (p_base)
914 *p_base = base;
915 else
916 ttm_base_object_unref(&base);
917 *out = &vmw_user_bo->vbo;
918
919 return 0;
920}
921
922/**
923 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
924 * @tfile: The TTM object file the handle is registered with.
925 * @handle: The user buffer object handle.
926 *
927 * This function looks up a struct vmw_user_bo and returns a pointer to the
928 * struct vmw_buffer_object it derives from without refcounting the pointer.
929 * The returned pointer is only valid until vmw_user_bo_noref_release() is
930 * called, and the object pointed to by the returned pointer may be doomed.
931 * Any persistent usage of the object requires a refcount to be taken using
932 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
933 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
934 * or scheduling functions may be called inbetween these function calls.
935 *
936 * Return: A struct vmw_buffer_object pointer if successful or negative
937 * error pointer on failure.
938 */
939struct vmw_buffer_object *
940vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
941{
942 struct vmw_user_buffer_object *vmw_user_bo;
943 struct ttm_base_object *base;
944
945 base = ttm_base_object_noref_lookup(tfile, handle);
946 if (!base) {
947 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
948 (unsigned long)handle);
949 return ERR_PTR(-ESRCH);
950 }
951
952 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
953 ttm_base_object_noref_release();
954 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
955 (unsigned long)handle);
956 return ERR_PTR(-EINVAL);
957 }
958
959 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
960 prime.base);
961 return &vmw_user_bo->vbo;
962}
963
964/**
965 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
966 *
967 * @tfile: The TTM object file to register the handle with.
968 * @vbo: The embedded vmw buffer object.
969 * @handle: Pointer to where the new handle should be placed.
970 * Return: Zero on success, Negative error code on error.
971 */
972int vmw_user_bo_reference(struct ttm_object_file *tfile,
973 struct vmw_buffer_object *vbo,
974 uint32_t *handle)
975{
976 struct vmw_user_buffer_object *user_bo;
977
978 if (vbo->base.destroy != vmw_user_bo_destroy)
979 return -EINVAL;
980
981 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
982
983 *handle = user_bo->prime.base.handle;
984 return ttm_ref_object_add(tfile, &user_bo->prime.base,
985 TTM_REF_USAGE, NULL, false);
986}
987
988
989/**
990 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
991 * object without unreserving it.
992 *
993 * @bo: Pointer to the struct ttm_buffer_object to fence.
994 * @fence: Pointer to the fence. If NULL, this function will
995 * insert a fence into the command stream..
996 *
997 * Contrary to the ttm_eu version of this function, it takes only
998 * a single buffer object instead of a list, and it also doesn't
999 * unreserve the buffer object, which needs to be done separately.
1000 */
1001void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1002 struct vmw_fence_obj *fence)
1003{
1004 struct ttm_bo_device *bdev = bo->bdev;
1005
1006 struct vmw_private *dev_priv =
1007 container_of(bdev, struct vmw_private, bdev);
1008
1009 if (fence == NULL) {
1010 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1011 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1012 dma_fence_put(&fence->base);
1013 } else
1014 dma_resv_add_excl_fence(bo->base.resv, &fence->base);
1015}
1016
1017
1018/**
1019 * vmw_dumb_create - Create a dumb kms buffer
1020 *
1021 * @file_priv: Pointer to a struct drm_file identifying the caller.
1022 * @dev: Pointer to the drm device.
1023 * @args: Pointer to a struct drm_mode_create_dumb structure
1024 * Return: Zero on success, negative error code on failure.
1025 *
1026 * This is a driver callback for the core drm create_dumb functionality.
1027 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1028 * that the arguments have a different format.
1029 */
1030int vmw_dumb_create(struct drm_file *file_priv,
1031 struct drm_device *dev,
1032 struct drm_mode_create_dumb *args)
1033{
1034 struct vmw_private *dev_priv = vmw_priv(dev);
1035 struct vmw_buffer_object *vbo;
1036 int ret;
1037
1038 args->pitch = args->width * ((args->bpp + 7) / 8);
1039 args->size = args->pitch * args->height;
1040
1041 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1042 if (unlikely(ret != 0))
1043 return ret;
1044
1045 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1046 args->size, false, &args->handle,
1047 &vbo, NULL);
1048 if (unlikely(ret != 0))
1049 goto out_no_bo;
1050
1051 vmw_bo_unreference(&vbo);
1052out_no_bo:
1053 ttm_read_unlock(&dev_priv->reservation_sem);
1054 return ret;
1055}
1056
1057
1058/**
1059 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1060 *
1061 * @file_priv: Pointer to a struct drm_file identifying the caller.
1062 * @dev: Pointer to the drm device.
1063 * @handle: Handle identifying the dumb buffer.
1064 * @offset: The address space offset returned.
1065 * Return: Zero on success, negative error code on failure.
1066 *
1067 * This is a driver callback for the core drm dumb_map_offset functionality.
1068 */
1069int vmw_dumb_map_offset(struct drm_file *file_priv,
1070 struct drm_device *dev, uint32_t handle,
1071 uint64_t *offset)
1072{
1073 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1074 struct vmw_buffer_object *out_buf;
1075 int ret;
1076
1077 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1078 if (ret != 0)
1079 return -EINVAL;
1080
1081 *offset = drm_vma_node_offset_addr(&out_buf->base.base.vma_node);
1082 vmw_bo_unreference(&out_buf);
1083 return 0;
1084}
1085
1086
1087/**
1088 * vmw_dumb_destroy - Destroy a dumb boffer
1089 *
1090 * @file_priv: Pointer to a struct drm_file identifying the caller.
1091 * @dev: Pointer to the drm device.
1092 * @handle: Handle identifying the dumb buffer.
1093 * Return: Zero on success, negative error code on failure.
1094 *
1095 * This is a driver callback for the core drm dumb_destroy functionality.
1096 */
1097int vmw_dumb_destroy(struct drm_file *file_priv,
1098 struct drm_device *dev,
1099 uint32_t handle)
1100{
1101 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1102 handle, TTM_REF_USAGE);
1103}
1104
1105
1106/**
1107 * vmw_bo_swap_notify - swapout notify callback.
1108 *
1109 * @bo: The buffer object to be swapped out.
1110 */
1111void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1112{
1113 /* Is @bo embedded in a struct vmw_buffer_object? */
1114 if (bo->destroy != vmw_bo_bo_free &&
1115 bo->destroy != vmw_user_bo_destroy)
1116 return;
1117
1118 /* Kill any cached kernel maps before swapout */
1119 vmw_bo_unmap(vmw_buffer_object(bo));
1120}
1121
1122
1123/**
1124 * vmw_bo_move_notify - TTM move_notify_callback
1125 *
1126 * @bo: The TTM buffer object about to move.
1127 * @mem: The struct ttm_mem_reg indicating to what memory
1128 * region the move is taking place.
1129 *
1130 * Detaches cached maps and device bindings that require that the
1131 * buffer doesn't move.
1132 */
1133void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1134 struct ttm_mem_reg *mem)
1135{
1136 struct vmw_buffer_object *vbo;
1137
1138 if (mem == NULL)
1139 return;
1140
1141 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1142 if (bo->destroy != vmw_bo_bo_free &&
1143 bo->destroy != vmw_user_bo_destroy)
1144 return;
1145
1146 vbo = container_of(bo, struct vmw_buffer_object, base);
1147
1148 /*
1149 * Kill any cached kernel maps before move to or from VRAM.
1150 * With other types of moves, the underlying pages stay the same,
1151 * and the map can be kept.
1152 */
1153 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1154 vmw_bo_unmap(vbo);
1155
1156 /*
1157 * If we're moving a backup MOB out of MOB placement, then make sure we
1158 * read back all resource content first, and unbind the MOB from
1159 * the resource.
1160 */
1161 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1162 vmw_resource_unbind_list(vbo);
1163}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright (c) 2011-2024 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include "vmwgfx_bo.h"
30#include "vmwgfx_drv.h"
31#include "vmwgfx_resource_priv.h"
32
33#include <drm/ttm/ttm_placement.h>
34
35static void vmw_bo_release(struct vmw_bo *vbo)
36{
37 struct vmw_resource *res;
38
39 WARN_ON(vbo->tbo.base.funcs &&
40 kref_read(&vbo->tbo.base.refcount) != 0);
41 vmw_bo_unmap(vbo);
42
43 xa_destroy(&vbo->detached_resources);
44 WARN_ON(vbo->is_dumb && !vbo->dumb_surface);
45 if (vbo->is_dumb && vbo->dumb_surface) {
46 res = &vbo->dumb_surface->res;
47 WARN_ON(vbo != res->guest_memory_bo);
48 WARN_ON(!res->guest_memory_bo);
49 if (res->guest_memory_bo) {
50 /* Reserve and switch the backing mob. */
51 mutex_lock(&res->dev_priv->cmdbuf_mutex);
52 (void)vmw_resource_reserve(res, false, true);
53 vmw_resource_mob_detach(res);
54 if (res->coherent)
55 vmw_bo_dirty_release(res->guest_memory_bo);
56 res->guest_memory_bo = NULL;
57 res->guest_memory_offset = 0;
58 vmw_resource_unreserve(res, false, false, false, NULL,
59 0);
60 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
61 }
62 vmw_surface_unreference(&vbo->dumb_surface);
63 }
64 drm_gem_object_release(&vbo->tbo.base);
65}
66
67/**
68 * vmw_bo_free - vmw_bo destructor
69 *
70 * @bo: Pointer to the embedded struct ttm_buffer_object
71 */
72static void vmw_bo_free(struct ttm_buffer_object *bo)
73{
74 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
75
76 WARN_ON(vbo->dirty);
77 WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
78 vmw_bo_release(vbo);
79 kfree(vbo);
80}
81
82/**
83 * vmw_bo_pin_in_placement - Validate a buffer to placement.
84 *
85 * @dev_priv: Driver private.
86 * @buf: DMA buffer to move.
87 * @placement: The placement to pin it.
88 * @interruptible: Use interruptible wait.
89 * Return: Zero on success, Negative error code on failure. In particular
90 * -ERESTARTSYS if interrupted by a signal
91 */
92static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
93 struct vmw_bo *buf,
94 struct ttm_placement *placement,
95 bool interruptible)
96{
97 struct ttm_operation_ctx ctx = {interruptible, false };
98 struct ttm_buffer_object *bo = &buf->tbo;
99 int ret;
100
101 vmw_execbuf_release_pinned_bo(dev_priv);
102
103 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
104 if (unlikely(ret != 0))
105 goto err;
106
107 ret = ttm_bo_validate(bo, placement, &ctx);
108 if (!ret)
109 vmw_bo_pin_reserved(buf, true);
110
111 ttm_bo_unreserve(bo);
112err:
113 return ret;
114}
115
116
117/**
118 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
119 *
120 * This function takes the reservation_sem in write mode.
121 * Flushes and unpins the query bo to avoid failures.
122 *
123 * @dev_priv: Driver private.
124 * @buf: DMA buffer to move.
125 * @interruptible: Use interruptible wait.
126 * Return: Zero on success, Negative error code on failure. In particular
127 * -ERESTARTSYS if interrupted by a signal
128 */
129int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
130 struct vmw_bo *buf,
131 bool interruptible)
132{
133 struct ttm_operation_ctx ctx = {interruptible, false };
134 struct ttm_buffer_object *bo = &buf->tbo;
135 int ret;
136
137 vmw_execbuf_release_pinned_bo(dev_priv);
138
139 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
140 if (unlikely(ret != 0))
141 goto err;
142
143 vmw_bo_placement_set(buf,
144 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
145 VMW_BO_DOMAIN_GMR);
146 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
147 if (likely(ret == 0) || ret == -ERESTARTSYS)
148 goto out_unreserve;
149
150 vmw_bo_placement_set(buf,
151 VMW_BO_DOMAIN_VRAM,
152 VMW_BO_DOMAIN_VRAM);
153 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
154
155out_unreserve:
156 if (!ret)
157 vmw_bo_pin_reserved(buf, true);
158
159 ttm_bo_unreserve(bo);
160err:
161 return ret;
162}
163
164
165/**
166 * vmw_bo_pin_in_vram - Move a buffer to vram.
167 *
168 * This function takes the reservation_sem in write mode.
169 * Flushes and unpins the query bo to avoid failures.
170 *
171 * @dev_priv: Driver private.
172 * @buf: DMA buffer to move.
173 * @interruptible: Use interruptible wait.
174 * Return: Zero on success, Negative error code on failure. In particular
175 * -ERESTARTSYS if interrupted by a signal
176 */
177int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
178 struct vmw_bo *buf,
179 bool interruptible)
180{
181 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
182 interruptible);
183}
184
185
186/**
187 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
188 *
189 * This function takes the reservation_sem in write mode.
190 * Flushes and unpins the query bo to avoid failures.
191 *
192 * @dev_priv: Driver private.
193 * @buf: DMA buffer to pin.
194 * @interruptible: Use interruptible wait.
195 * Return: Zero on success, Negative error code on failure. In particular
196 * -ERESTARTSYS if interrupted by a signal
197 */
198int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
199 struct vmw_bo *buf,
200 bool interruptible)
201{
202 struct ttm_operation_ctx ctx = {interruptible, false };
203 struct ttm_buffer_object *bo = &buf->tbo;
204 int ret = 0;
205
206 vmw_execbuf_release_pinned_bo(dev_priv);
207 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
208 if (unlikely(ret != 0))
209 goto err_unlock;
210
211 /*
212 * Is this buffer already in vram but not at the start of it?
213 * In that case, evict it first because TTM isn't good at handling
214 * that situation.
215 */
216 if (bo->resource->mem_type == TTM_PL_VRAM &&
217 bo->resource->start < PFN_UP(bo->resource->size) &&
218 bo->resource->start > 0 &&
219 buf->tbo.pin_count == 0) {
220 ctx.interruptible = false;
221 vmw_bo_placement_set(buf,
222 VMW_BO_DOMAIN_SYS,
223 VMW_BO_DOMAIN_SYS);
224 (void)ttm_bo_validate(bo, &buf->placement, &ctx);
225 }
226
227 vmw_bo_placement_set(buf,
228 VMW_BO_DOMAIN_VRAM,
229 VMW_BO_DOMAIN_VRAM);
230 buf->places[0].lpfn = PFN_UP(bo->resource->size);
231 ret = ttm_bo_validate(bo, &buf->placement, &ctx);
232
233 /* For some reason we didn't end up at the start of vram */
234 WARN_ON(ret == 0 && bo->resource->start != 0);
235 if (!ret)
236 vmw_bo_pin_reserved(buf, true);
237
238 ttm_bo_unreserve(bo);
239err_unlock:
240
241 return ret;
242}
243
244
245/**
246 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
247 *
248 * This function takes the reservation_sem in write mode.
249 *
250 * @dev_priv: Driver private.
251 * @buf: DMA buffer to unpin.
252 * @interruptible: Use interruptible wait.
253 * Return: Zero on success, Negative error code on failure. In particular
254 * -ERESTARTSYS if interrupted by a signal
255 */
256int vmw_bo_unpin(struct vmw_private *dev_priv,
257 struct vmw_bo *buf,
258 bool interruptible)
259{
260 struct ttm_buffer_object *bo = &buf->tbo;
261 int ret;
262
263 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
264 if (unlikely(ret != 0))
265 goto err;
266
267 vmw_bo_pin_reserved(buf, false);
268
269 ttm_bo_unreserve(bo);
270
271err:
272 return ret;
273}
274
275/**
276 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
277 * of a buffer.
278 *
279 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
280 * @ptr: SVGAGuestPtr returning the result.
281 */
282void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
283 SVGAGuestPtr *ptr)
284{
285 if (bo->resource->mem_type == TTM_PL_VRAM) {
286 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
287 ptr->offset = bo->resource->start << PAGE_SHIFT;
288 } else {
289 ptr->gmrId = bo->resource->start;
290 ptr->offset = 0;
291 }
292}
293
294
295/**
296 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
297 *
298 * @vbo: The buffer object. Must be reserved.
299 * @pin: Whether to pin or unpin.
300 *
301 */
302void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
303{
304 struct ttm_operation_ctx ctx = { false, true };
305 struct ttm_place pl;
306 struct ttm_placement placement;
307 struct ttm_buffer_object *bo = &vbo->tbo;
308 uint32_t old_mem_type = bo->resource->mem_type;
309 int ret;
310
311 dma_resv_assert_held(bo->base.resv);
312
313 if (pin == !!bo->pin_count)
314 return;
315
316 pl.fpfn = 0;
317 pl.lpfn = 0;
318 pl.mem_type = bo->resource->mem_type;
319 pl.flags = bo->resource->placement;
320
321 memset(&placement, 0, sizeof(placement));
322 placement.num_placement = 1;
323 placement.placement = &pl;
324
325 ret = ttm_bo_validate(bo, &placement, &ctx);
326
327 BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
328
329 if (pin)
330 ttm_bo_pin(bo);
331 else
332 ttm_bo_unpin(bo);
333}
334
335/**
336 * vmw_bo_map_and_cache - Map a buffer object and cache the map
337 *
338 * @vbo: The buffer object to map
339 * Return: A kernel virtual address or NULL if mapping failed.
340 *
341 * This function maps a buffer object into the kernel address space, or
342 * returns the virtual kernel address of an already existing map. The virtual
343 * address remains valid as long as the buffer object is pinned or reserved.
344 * The cached map is torn down on either
345 * 1) Buffer object move
346 * 2) Buffer object swapout
347 * 3) Buffer object destruction
348 *
349 */
350void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
351{
352 return vmw_bo_map_and_cache_size(vbo, vbo->tbo.base.size);
353}
354
355void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
356{
357 struct ttm_buffer_object *bo = &vbo->tbo;
358 bool not_used;
359 void *virtual;
360 int ret;
361
362 atomic_inc(&vbo->map_count);
363
364 virtual = ttm_kmap_obj_virtual(&vbo->map, ¬_used);
365 if (virtual)
366 return virtual;
367
368 ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vbo->map);
369 if (ret)
370 DRM_ERROR("Buffer object map failed: %d (size: bo = %zu, map = %zu).\n",
371 ret, bo->base.size, size);
372
373 return ttm_kmap_obj_virtual(&vbo->map, ¬_used);
374}
375
376
377/**
378 * vmw_bo_unmap - Tear down a cached buffer object map.
379 *
380 * @vbo: The buffer object whose map we are tearing down.
381 *
382 * This function tears down a cached map set up using
383 * vmw_bo_map_and_cache().
384 */
385void vmw_bo_unmap(struct vmw_bo *vbo)
386{
387 int map_count;
388
389 if (vbo->map.bo == NULL)
390 return;
391
392 map_count = atomic_dec_return(&vbo->map_count);
393
394 if (!map_count) {
395 ttm_bo_kunmap(&vbo->map);
396 vbo->map.bo = NULL;
397 }
398}
399
400
401/**
402 * vmw_bo_init - Initialize a vmw buffer object
403 *
404 * @dev_priv: Pointer to the device private struct
405 * @vmw_bo: Buffer object to initialize
406 * @params: Parameters used to initialize the buffer object
407 * @destroy: The function used to delete the buffer object
408 * Returns: Zero on success, negative error code on error.
409 *
410 */
411static int vmw_bo_init(struct vmw_private *dev_priv,
412 struct vmw_bo *vmw_bo,
413 struct vmw_bo_params *params,
414 void (*destroy)(struct ttm_buffer_object *))
415{
416 struct ttm_operation_ctx ctx = {
417 .interruptible = params->bo_type != ttm_bo_type_kernel,
418 .no_wait_gpu = false,
419 .resv = params->resv,
420 };
421 struct ttm_device *bdev = &dev_priv->bdev;
422 struct drm_device *vdev = &dev_priv->drm;
423 int ret;
424
425 memset(vmw_bo, 0, sizeof(*vmw_bo));
426
427 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
428 vmw_bo->tbo.priority = 3;
429 vmw_bo->res_tree = RB_ROOT;
430 xa_init(&vmw_bo->detached_resources);
431 atomic_set(&vmw_bo->map_count, 0);
432
433 params->size = ALIGN(params->size, PAGE_SIZE);
434 drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
435
436 vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
437 ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
438 &vmw_bo->placement, 0, &ctx,
439 params->sg, params->resv, destroy);
440 if (unlikely(ret))
441 return ret;
442
443 if (params->pin)
444 ttm_bo_pin(&vmw_bo->tbo);
445 if (!params->keep_resv)
446 ttm_bo_unreserve(&vmw_bo->tbo);
447
448 return 0;
449}
450
451int vmw_bo_create(struct vmw_private *vmw,
452 struct vmw_bo_params *params,
453 struct vmw_bo **p_bo)
454{
455 int ret;
456
457 *p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
458 if (unlikely(!*p_bo)) {
459 DRM_ERROR("Failed to allocate a buffer.\n");
460 return -ENOMEM;
461 }
462
463 /*
464 * vmw_bo_init will delete the *p_bo object if it fails
465 */
466 ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
467 if (unlikely(ret != 0))
468 goto out_error;
469
470 return ret;
471out_error:
472 *p_bo = NULL;
473 return ret;
474}
475
476/**
477 * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
478 * access, idling previous GPU operations on the buffer and optionally
479 * blocking it for further command submissions.
480 *
481 * @vmw_bo: Pointer to the buffer object being grabbed for CPU access
482 * @flags: Flags indicating how the grab should be performed.
483 * Return: Zero on success, Negative error code on error. In particular,
484 * -EBUSY will be returned if a dontblock operation is requested and the
485 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
486 * interrupted by a signal.
487 *
488 * A blocking grab will be automatically released when @tfile is closed.
489 */
490static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
491 uint32_t flags)
492{
493 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
494 struct ttm_buffer_object *bo = &vmw_bo->tbo;
495 int ret;
496
497 if (flags & drm_vmw_synccpu_allow_cs) {
498 long lret;
499
500 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
501 true, nonblock ? 0 :
502 MAX_SCHEDULE_TIMEOUT);
503 if (!lret)
504 return -EBUSY;
505 else if (lret < 0)
506 return lret;
507 return 0;
508 }
509
510 ret = ttm_bo_reserve(bo, true, nonblock, NULL);
511 if (unlikely(ret != 0))
512 return ret;
513
514 ret = ttm_bo_wait(bo, true, nonblock);
515 if (likely(ret == 0))
516 atomic_inc(&vmw_bo->cpu_writers);
517
518 ttm_bo_unreserve(bo);
519 if (unlikely(ret != 0))
520 return ret;
521
522 return ret;
523}
524
525/**
526 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
527 * and unblock command submission on the buffer if blocked.
528 *
529 * @filp: Identifying the caller.
530 * @handle: Handle identifying the buffer object.
531 * @flags: Flags indicating the type of release.
532 */
533static int vmw_user_bo_synccpu_release(struct drm_file *filp,
534 uint32_t handle,
535 uint32_t flags)
536{
537 struct vmw_bo *vmw_bo;
538 int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
539
540 if (!ret) {
541 if (!(flags & drm_vmw_synccpu_allow_cs)) {
542 atomic_dec(&vmw_bo->cpu_writers);
543 }
544 vmw_user_bo_unref(&vmw_bo);
545 }
546
547 return ret;
548}
549
550
551/**
552 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
553 * functionality.
554 *
555 * @dev: Identifies the drm device.
556 * @data: Pointer to the ioctl argument.
557 * @file_priv: Identifies the caller.
558 * Return: Zero on success, negative error code on error.
559 *
560 * This function checks the ioctl arguments for validity and calls the
561 * relevant synccpu functions.
562 */
563int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
564 struct drm_file *file_priv)
565{
566 struct drm_vmw_synccpu_arg *arg =
567 (struct drm_vmw_synccpu_arg *) data;
568 struct vmw_bo *vbo;
569 int ret;
570
571 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
572 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
573 drm_vmw_synccpu_dontblock |
574 drm_vmw_synccpu_allow_cs)) != 0) {
575 DRM_ERROR("Illegal synccpu flags.\n");
576 return -EINVAL;
577 }
578
579 switch (arg->op) {
580 case drm_vmw_synccpu_grab:
581 ret = vmw_user_bo_lookup(file_priv, arg->handle, &vbo);
582 if (unlikely(ret != 0))
583 return ret;
584
585 ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
586 vmw_user_bo_unref(&vbo);
587 if (unlikely(ret != 0)) {
588 if (ret == -ERESTARTSYS || ret == -EBUSY)
589 return -EBUSY;
590 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
591 (unsigned int) arg->handle);
592 return ret;
593 }
594 break;
595 case drm_vmw_synccpu_release:
596 ret = vmw_user_bo_synccpu_release(file_priv,
597 arg->handle,
598 arg->flags);
599 if (unlikely(ret != 0)) {
600 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
601 (unsigned int) arg->handle);
602 return ret;
603 }
604 break;
605 default:
606 DRM_ERROR("Invalid synccpu operation.\n");
607 return -EINVAL;
608 }
609
610 return 0;
611}
612
613/**
614 * vmw_bo_unref_ioctl - Generic handle close ioctl.
615 *
616 * @dev: Identifies the drm device.
617 * @data: Pointer to the ioctl argument.
618 * @file_priv: Identifies the caller.
619 * Return: Zero on success, negative error code on error.
620 *
621 * This function checks the ioctl arguments for validity and closes a
622 * handle to a TTM base object, optionally freeing the object.
623 */
624int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
625 struct drm_file *file_priv)
626{
627 struct drm_vmw_unref_dmabuf_arg *arg =
628 (struct drm_vmw_unref_dmabuf_arg *)data;
629
630 return drm_gem_handle_delete(file_priv, arg->handle);
631}
632
633
634/**
635 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
636 *
637 * @filp: The file the handle is registered with.
638 * @handle: The user buffer object handle
639 * @out: Pointer to a where a pointer to the embedded
640 * struct vmw_bo should be placed.
641 * Return: Zero on success, Negative error code on error.
642 *
643 * The vmw buffer object pointer will be refcounted (both ttm and gem)
644 */
645int vmw_user_bo_lookup(struct drm_file *filp,
646 u32 handle,
647 struct vmw_bo **out)
648{
649 struct drm_gem_object *gobj;
650
651 gobj = drm_gem_object_lookup(filp, handle);
652 if (!gobj) {
653 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
654 (unsigned long)handle);
655 return -ESRCH;
656 }
657
658 *out = to_vmw_bo(gobj);
659
660 return 0;
661}
662
663/**
664 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
665 * object without unreserving it.
666 *
667 * @bo: Pointer to the struct ttm_buffer_object to fence.
668 * @fence: Pointer to the fence. If NULL, this function will
669 * insert a fence into the command stream..
670 *
671 * Contrary to the ttm_eu version of this function, it takes only
672 * a single buffer object instead of a list, and it also doesn't
673 * unreserve the buffer object, which needs to be done separately.
674 */
675void vmw_bo_fence_single(struct ttm_buffer_object *bo,
676 struct vmw_fence_obj *fence)
677{
678 struct ttm_device *bdev = bo->bdev;
679 struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
680 int ret;
681
682 if (fence == NULL)
683 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
684 else
685 dma_fence_get(&fence->base);
686
687 ret = dma_resv_reserve_fences(bo->base.resv, 1);
688 if (!ret)
689 dma_resv_add_fence(bo->base.resv, &fence->base,
690 DMA_RESV_USAGE_KERNEL);
691 else
692 /* Last resort fallback when we are OOM */
693 dma_fence_wait(&fence->base, false);
694 dma_fence_put(&fence->base);
695}
696
697/**
698 * vmw_bo_swap_notify - swapout notify callback.
699 *
700 * @bo: The buffer object to be swapped out.
701 */
702void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
703{
704 /* Kill any cached kernel maps before swapout */
705 vmw_bo_unmap(to_vmw_bo(&bo->base));
706}
707
708
709/**
710 * vmw_bo_move_notify - TTM move_notify_callback
711 *
712 * @bo: The TTM buffer object about to move.
713 * @mem: The struct ttm_resource indicating to what memory
714 * region the move is taking place.
715 *
716 * Detaches cached maps and device bindings that require that the
717 * buffer doesn't move.
718 */
719void vmw_bo_move_notify(struct ttm_buffer_object *bo,
720 struct ttm_resource *mem)
721{
722 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
723
724 /*
725 * Kill any cached kernel maps before move to or from VRAM.
726 * With other types of moves, the underlying pages stay the same,
727 * and the map can be kept.
728 */
729 if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
730 vmw_bo_unmap(vbo);
731
732 /*
733 * If we're moving a backup MOB out of MOB placement, then make sure we
734 * read back all resource content first, and unbind the MOB from
735 * the resource.
736 */
737 if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
738 vmw_resource_unbind_list(vbo);
739}
740
741static u32 placement_flags(u32 domain, u32 desired, u32 fallback)
742{
743 if (desired & fallback & domain)
744 return 0;
745
746 if (desired & domain)
747 return TTM_PL_FLAG_DESIRED;
748
749 return TTM_PL_FLAG_FALLBACK;
750}
751
752static u32
753set_placement_list(struct ttm_place *pl, u32 desired, u32 fallback)
754{
755 u32 domain = desired | fallback;
756 u32 n = 0;
757
758 /*
759 * The placements are ordered according to our preferences
760 */
761 if (domain & VMW_BO_DOMAIN_MOB) {
762 pl[n].mem_type = VMW_PL_MOB;
763 pl[n].flags = placement_flags(VMW_BO_DOMAIN_MOB, desired,
764 fallback);
765 pl[n].fpfn = 0;
766 pl[n].lpfn = 0;
767 n++;
768 }
769 if (domain & VMW_BO_DOMAIN_GMR) {
770 pl[n].mem_type = VMW_PL_GMR;
771 pl[n].flags = placement_flags(VMW_BO_DOMAIN_GMR, desired,
772 fallback);
773 pl[n].fpfn = 0;
774 pl[n].lpfn = 0;
775 n++;
776 }
777 if (domain & VMW_BO_DOMAIN_VRAM) {
778 pl[n].mem_type = TTM_PL_VRAM;
779 pl[n].flags = placement_flags(VMW_BO_DOMAIN_VRAM, desired,
780 fallback);
781 pl[n].fpfn = 0;
782 pl[n].lpfn = 0;
783 n++;
784 }
785 if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
786 pl[n].mem_type = VMW_PL_SYSTEM;
787 pl[n].flags = placement_flags(VMW_BO_DOMAIN_WAITABLE_SYS,
788 desired, fallback);
789 pl[n].fpfn = 0;
790 pl[n].lpfn = 0;
791 n++;
792 }
793 if (domain & VMW_BO_DOMAIN_SYS) {
794 pl[n].mem_type = TTM_PL_SYSTEM;
795 pl[n].flags = placement_flags(VMW_BO_DOMAIN_SYS, desired,
796 fallback);
797 pl[n].fpfn = 0;
798 pl[n].lpfn = 0;
799 n++;
800 }
801
802 WARN_ON(!n);
803 if (!n) {
804 pl[n].mem_type = TTM_PL_SYSTEM;
805 pl[n].flags = 0;
806 pl[n].fpfn = 0;
807 pl[n].lpfn = 0;
808 n++;
809 }
810 return n;
811}
812
813void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
814{
815 struct ttm_device *bdev = bo->tbo.bdev;
816 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
817 struct ttm_placement *pl = &bo->placement;
818 bool mem_compatible = false;
819 u32 i;
820
821 pl->placement = bo->places;
822 pl->num_placement = set_placement_list(bo->places, domain, busy_domain);
823
824 if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
825 for (i = 0; i < pl->num_placement; ++i) {
826 if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
827 bo->tbo.resource->mem_type == pl->placement[i].mem_type)
828 mem_compatible = true;
829 }
830 if (!mem_compatible)
831 drm_warn(&vmw->drm,
832 "%s: Incompatible transition from "
833 "bo->base.resource->mem_type = %u to domain = %u\n",
834 __func__, bo->tbo.resource->mem_type, domain);
835 }
836
837}
838
839void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
840{
841 struct ttm_device *bdev = bo->tbo.bdev;
842 struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
843 u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
844
845 if (vmw->has_mob)
846 domain = VMW_BO_DOMAIN_MOB;
847
848 vmw_bo_placement_set(bo, domain, domain);
849}
850
851void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
852{
853 xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL);
854}
855
856void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res)
857{
858 xa_erase(&vbo->detached_resources, (unsigned long)res);
859}
860
861struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo)
862{
863 unsigned long index;
864 struct vmw_resource *res = NULL;
865 struct vmw_surface *surf = NULL;
866 struct rb_node *rb_itr = vbo->res_tree.rb_node;
867
868 if (vbo->is_dumb && vbo->dumb_surface) {
869 res = &vbo->dumb_surface->res;
870 goto out;
871 }
872
873 xa_for_each(&vbo->detached_resources, index, res) {
874 if (res->func->res_type == vmw_res_surface)
875 goto out;
876 }
877
878 for (rb_itr = rb_first(&vbo->res_tree); rb_itr;
879 rb_itr = rb_next(rb_itr)) {
880 res = rb_entry(rb_itr, struct vmw_resource, mob_node);
881 if (res->func->res_type == vmw_res_surface)
882 goto out;
883 }
884
885out:
886 if (res)
887 surf = vmw_res_to_srf(res);
888 return surf;
889}