Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include <drm/ttm/ttm_execbuf_util.h>
 30#include <drm/ttm/ttm_bo_driver.h>
 31#include <drm/ttm/ttm_placement.h>
 32#include <linux/wait.h>
 33#include <linux/sched.h>
 34#include <linux/module.h>
 35
 36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
 37					      struct ttm_validate_buffer *entry)
 38{
 39	list_for_each_entry_continue_reverse(entry, list, head) {
 40		struct ttm_buffer_object *bo = entry->bo;
 41
 42		dma_resv_unlock(bo->base.resv);
 43	}
 44}
 45
 46static void ttm_eu_del_from_lru_locked(struct list_head *list)
 47{
 48	struct ttm_validate_buffer *entry;
 49
 50	list_for_each_entry(entry, list, head) {
 51		struct ttm_buffer_object *bo = entry->bo;
 52		ttm_bo_del_from_lru(bo);
 53	}
 54}
 55
 56void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 57				struct list_head *list)
 58{
 59	struct ttm_validate_buffer *entry;
 60	struct ttm_bo_global *glob;
 61
 62	if (list_empty(list))
 63		return;
 64
 65	entry = list_first_entry(list, struct ttm_validate_buffer, head);
 66	glob = entry->bo->bdev->glob;
 67
 68	spin_lock(&glob->lru_lock);
 69	list_for_each_entry(entry, list, head) {
 70		struct ttm_buffer_object *bo = entry->bo;
 71
 72		if (list_empty(&bo->lru))
 73			ttm_bo_add_to_lru(bo);
 74		dma_resv_unlock(bo->base.resv);
 75	}
 76	spin_unlock(&glob->lru_lock);
 77
 78	if (ticket)
 79		ww_acquire_fini(ticket);
 80}
 81EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 82
 83/*
 84 * Reserve buffers for validation.
 85 *
 86 * If a buffer in the list is marked for CPU access, we back off and
 87 * wait for that buffer to become free for GPU access.
 88 *
 89 * If a buffer is reserved for another validation, the validator with
 90 * the highest validation sequence backs off and waits for that buffer
 91 * to become unreserved. This prevents deadlocks when validating multiple
 92 * buffers in different orders.
 93 */
 94
 95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 96			   struct list_head *list, bool intr,
 97			   struct list_head *dups, bool del_lru)
 98{
 99	struct ttm_bo_global *glob;
100	struct ttm_validate_buffer *entry;
101	int ret;
102
103	if (list_empty(list))
104		return 0;
105
106	entry = list_first_entry(list, struct ttm_validate_buffer, head);
107	glob = entry->bo->bdev->glob;
108
109	if (ticket)
110		ww_acquire_init(ticket, &reservation_ww_class);
111
112	list_for_each_entry(entry, list, head) {
113		struct ttm_buffer_object *bo = entry->bo;
114
115		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
116		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
117			dma_resv_unlock(bo->base.resv);
118
119			ret = -EBUSY;
120
121		} else if (ret == -EALREADY && dups) {
122			struct ttm_validate_buffer *safe = entry;
123			entry = list_prev_entry(entry, head);
124			list_del(&safe->head);
125			list_add(&safe->head, dups);
126			continue;
127		}
128
129		if (!ret) {
130			if (!entry->num_shared)
131				continue;
132
133			ret = dma_resv_reserve_shared(bo->base.resv,
134								entry->num_shared);
135			if (!ret)
136				continue;
137		}
138
139		/* uh oh, we lost out, drop every reservation and try
140		 * to only reserve this buffer, then start over if
141		 * this succeeds.
142		 */
143		ttm_eu_backoff_reservation_reverse(list, entry);
144
145		if (ret == -EDEADLK) {
146			if (intr) {
147				ret = dma_resv_lock_slow_interruptible(bo->base.resv,
148										 ticket);
149			} else {
150				dma_resv_lock_slow(bo->base.resv, ticket);
151				ret = 0;
152			}
153		}
154
155		if (!ret && entry->num_shared)
156			ret = dma_resv_reserve_shared(bo->base.resv,
157								entry->num_shared);
158
159		if (unlikely(ret != 0)) {
160			if (ret == -EINTR)
161				ret = -ERESTARTSYS;
162			if (ticket) {
163				ww_acquire_done(ticket);
164				ww_acquire_fini(ticket);
165			}
166			return ret;
167		}
168
169		/* move this item to the front of the list,
170		 * forces correct iteration of the loop without keeping track
171		 */
172		list_del(&entry->head);
173		list_add(&entry->head, list);
174	}
175
176	if (del_lru) {
177		spin_lock(&glob->lru_lock);
178		ttm_eu_del_from_lru_locked(list);
179		spin_unlock(&glob->lru_lock);
180	}
181	return 0;
182}
183EXPORT_SYMBOL(ttm_eu_reserve_buffers);
184
185void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
186				 struct list_head *list,
187				 struct dma_fence *fence)
188{
189	struct ttm_validate_buffer *entry;
190	struct ttm_buffer_object *bo;
191	struct ttm_bo_global *glob;
192
193	if (list_empty(list))
194		return;
195
196	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
197	glob = bo->bdev->glob;
198
199	spin_lock(&glob->lru_lock);
200
201	list_for_each_entry(entry, list, head) {
202		bo = entry->bo;
 
203		if (entry->num_shared)
204			dma_resv_add_shared_fence(bo->base.resv, fence);
205		else
206			dma_resv_add_excl_fence(bo->base.resv, fence);
207		if (list_empty(&bo->lru))
208			ttm_bo_add_to_lru(bo);
209		else
210			ttm_bo_move_to_lru_tail(bo, NULL);
211		dma_resv_unlock(bo->base.resv);
212	}
213	spin_unlock(&glob->lru_lock);
214	if (ticket)
215		ww_acquire_fini(ticket);
216}
217EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include <drm/ttm/ttm_execbuf_util.h>
 30#include <drm/ttm/ttm_bo_driver.h>
 31#include <drm/ttm/ttm_placement.h>
 32#include <linux/wait.h>
 33#include <linux/sched.h>
 34#include <linux/module.h>
 35
 36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
 37					      struct ttm_validate_buffer *entry)
 38{
 39	list_for_each_entry_continue_reverse(entry, list, head) {
 40		struct ttm_buffer_object *bo = entry->bo;
 41
 42		dma_resv_unlock(bo->base.resv);
 43	}
 44}
 45
 
 
 
 
 
 
 
 
 
 
 46void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 47				struct list_head *list)
 48{
 49	struct ttm_validate_buffer *entry;
 
 50
 51	if (list_empty(list))
 52		return;
 53
 
 
 
 
 54	list_for_each_entry(entry, list, head) {
 55		struct ttm_buffer_object *bo = entry->bo;
 56
 57		ttm_bo_move_to_lru_tail_unlocked(bo);
 
 58		dma_resv_unlock(bo->base.resv);
 59	}
 
 60
 61	if (ticket)
 62		ww_acquire_fini(ticket);
 63}
 64EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 65
 66/*
 67 * Reserve buffers for validation.
 68 *
 69 * If a buffer in the list is marked for CPU access, we back off and
 70 * wait for that buffer to become free for GPU access.
 71 *
 72 * If a buffer is reserved for another validation, the validator with
 73 * the highest validation sequence backs off and waits for that buffer
 74 * to become unreserved. This prevents deadlocks when validating multiple
 75 * buffers in different orders.
 76 */
 77
 78int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 79			   struct list_head *list, bool intr,
 80			   struct list_head *dups)
 81{
 
 82	struct ttm_validate_buffer *entry;
 83	int ret;
 84
 85	if (list_empty(list))
 86		return 0;
 87
 
 
 
 88	if (ticket)
 89		ww_acquire_init(ticket, &reservation_ww_class);
 90
 91	list_for_each_entry(entry, list, head) {
 92		struct ttm_buffer_object *bo = entry->bo;
 93
 94		ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 95		if (ret == -EALREADY && dups) {
 
 
 
 
 
 96			struct ttm_validate_buffer *safe = entry;
 97			entry = list_prev_entry(entry, head);
 98			list_del(&safe->head);
 99			list_add(&safe->head, dups);
100			continue;
101		}
102
103		if (!ret) {
104			if (!entry->num_shared)
105				continue;
106
107			ret = dma_resv_reserve_shared(bo->base.resv,
108								entry->num_shared);
109			if (!ret)
110				continue;
111		}
112
113		/* uh oh, we lost out, drop every reservation and try
114		 * to only reserve this buffer, then start over if
115		 * this succeeds.
116		 */
117		ttm_eu_backoff_reservation_reverse(list, entry);
118
119		if (ret == -EDEADLK) {
120			ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
 
 
 
 
 
 
121		}
122
123		if (!ret && entry->num_shared)
124			ret = dma_resv_reserve_shared(bo->base.resv,
125								entry->num_shared);
126
127		if (unlikely(ret != 0)) {
 
 
128			if (ticket) {
129				ww_acquire_done(ticket);
130				ww_acquire_fini(ticket);
131			}
132			return ret;
133		}
134
135		/* move this item to the front of the list,
136		 * forces correct iteration of the loop without keeping track
137		 */
138		list_del(&entry->head);
139		list_add(&entry->head, list);
140	}
141
 
 
 
 
 
142	return 0;
143}
144EXPORT_SYMBOL(ttm_eu_reserve_buffers);
145
146void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
147				 struct list_head *list,
148				 struct dma_fence *fence)
149{
150	struct ttm_validate_buffer *entry;
 
 
151
152	if (list_empty(list))
153		return;
154
 
 
 
 
 
155	list_for_each_entry(entry, list, head) {
156		struct ttm_buffer_object *bo = entry->bo;
157
158		if (entry->num_shared)
159			dma_resv_add_shared_fence(bo->base.resv, fence);
160		else
161			dma_resv_add_excl_fence(bo->base.resv, fence);
162		ttm_bo_move_to_lru_tail_unlocked(bo);
 
 
 
163		dma_resv_unlock(bo->base.resv);
164	}
 
165	if (ticket)
166		ww_acquire_fini(ticket);
167}
168EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);