Linux Audio

Check our new training course

Loading...
v3.1
 
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "ttm/ttm_execbuf_util.h"
 29#include "ttm/ttm_bo_driver.h"
 30#include "ttm/ttm_placement.h"
 31#include <linux/wait.h>
 32#include <linux/sched.h>
 33#include <linux/module.h>
 34
 35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
 
 36{
 37	struct ttm_validate_buffer *entry;
 38
 39	list_for_each_entry(entry, list, head) {
 40		struct ttm_buffer_object *bo = entry->bo;
 41		if (!entry->reserved)
 42			continue;
 43
 44		if (entry->removed) {
 45			ttm_bo_add_to_lru(bo);
 46			entry->removed = false;
 47
 48		}
 49		entry->reserved = false;
 50		atomic_set(&bo->reserved, 0);
 51		wake_up_all(&bo->event_queue);
 52	}
 53}
 54
 55static void ttm_eu_del_from_lru_locked(struct list_head *list)
 
 56{
 57	struct ttm_validate_buffer *entry;
 58
 59	list_for_each_entry(entry, list, head) {
 60		struct ttm_buffer_object *bo = entry->bo;
 61		if (!entry->reserved)
 62			continue;
 63
 64		if (!entry->removed) {
 65			entry->put_count = ttm_bo_del_from_lru(bo);
 66			entry->removed = true;
 67		}
 68	}
 69}
 70
 71static void ttm_eu_list_ref_sub(struct list_head *list)
 72{
 73	struct ttm_validate_buffer *entry;
 74
 
 75	list_for_each_entry(entry, list, head) {
 76		struct ttm_buffer_object *bo = entry->bo;
 77
 78		if (entry->put_count) {
 79			ttm_bo_list_ref_sub(bo, entry->put_count, true);
 80			entry->put_count = 0;
 81		}
 82	}
 83}
 84
 85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
 86					 struct ttm_buffer_object *bo)
 87{
 88	struct ttm_bo_global *glob = bo->glob;
 89	int ret;
 90
 91	ttm_eu_del_from_lru_locked(list);
 92	spin_unlock(&glob->lru_lock);
 93	ret = ttm_bo_wait_unreserved(bo, true);
 94	spin_lock(&glob->lru_lock);
 95	if (unlikely(ret != 0))
 96		ttm_eu_backoff_reservation_locked(list);
 97	return ret;
 98}
 99
100
101void ttm_eu_backoff_reservation(struct list_head *list)
102{
103	struct ttm_validate_buffer *entry;
104	struct ttm_bo_global *glob;
105
106	if (list_empty(list))
107		return;
108
109	entry = list_first_entry(list, struct ttm_validate_buffer, head);
110	glob = entry->bo->glob;
111	spin_lock(&glob->lru_lock);
112	ttm_eu_backoff_reservation_locked(list);
113	spin_unlock(&glob->lru_lock);
114}
115EXPORT_SYMBOL(ttm_eu_backoff_reservation);
116
117/*
118 * Reserve buffers for validation.
119 *
120 * If a buffer in the list is marked for CPU access, we back off and
121 * wait for that buffer to become free for GPU access.
122 *
123 * If a buffer is reserved for another validation, the validator with
124 * the highest validation sequence backs off and waits for that buffer
125 * to become unreserved. This prevents deadlocks when validating multiple
126 * buffers in different orders.
127 */
128
129int ttm_eu_reserve_buffers(struct list_head *list)
 
 
130{
131	struct ttm_bo_global *glob;
132	struct ttm_validate_buffer *entry;
133	int ret;
134	uint32_t val_seq;
135
136	if (list_empty(list))
137		return 0;
138
 
 
 
139	list_for_each_entry(entry, list, head) {
140		entry->reserved = false;
141		entry->put_count = 0;
142		entry->removed = false;
143	}
144
145	entry = list_first_entry(list, struct ttm_validate_buffer, head);
146	glob = entry->bo->glob;
 
 
 
 
 
 
147
148retry:
149	spin_lock(&glob->lru_lock);
150	val_seq = entry->bo->bdev->val_seq++;
 
 
 
 
 
 
151
152	list_for_each_entry(entry, list, head) {
153		struct ttm_buffer_object *bo = entry->bo;
 
 
 
 
 
 
 
 
 
 
 
 
 
154
155retry_this_bo:
156		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
157		switch (ret) {
158		case 0:
159			break;
160		case -EBUSY:
161			ret = ttm_eu_wait_unreserved_locked(list, bo);
162			if (unlikely(ret != 0)) {
163				spin_unlock(&glob->lru_lock);
164				ttm_eu_list_ref_sub(list);
165				return ret;
166			}
167			goto retry_this_bo;
168		case -EAGAIN:
169			ttm_eu_backoff_reservation_locked(list);
170			spin_unlock(&glob->lru_lock);
171			ttm_eu_list_ref_sub(list);
172			ret = ttm_bo_wait_unreserved(bo, true);
173			if (unlikely(ret != 0))
174				return ret;
175			goto retry;
176		default:
177			ttm_eu_backoff_reservation_locked(list);
178			spin_unlock(&glob->lru_lock);
179			ttm_eu_list_ref_sub(list);
180			return ret;
181		}
182
183		entry->reserved = true;
184		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185			ttm_eu_backoff_reservation_locked(list);
186			spin_unlock(&glob->lru_lock);
187			ttm_eu_list_ref_sub(list);
188			ret = ttm_bo_wait_cpu(bo, false);
189			if (ret)
190				return ret;
191			goto retry;
192		}
193	}
194
195	ttm_eu_del_from_lru_locked(list);
196	spin_unlock(&glob->lru_lock);
197	ttm_eu_list_ref_sub(list);
198
199	return 0;
200}
201EXPORT_SYMBOL(ttm_eu_reserve_buffers);
202
203void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
 
 
204{
205	struct ttm_validate_buffer *entry;
206	struct ttm_buffer_object *bo;
207	struct ttm_bo_global *glob;
208	struct ttm_bo_device *bdev;
209	struct ttm_bo_driver *driver;
210
211	if (list_empty(list))
212		return;
213
214	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215	bdev = bo->bdev;
216	driver = bdev->driver;
217	glob = bo->glob;
218
219	spin_lock(&bdev->fence_lock);
220	spin_lock(&glob->lru_lock);
221
222	list_for_each_entry(entry, list, head) {
223		bo = entry->bo;
224		entry->old_sync_obj = bo->sync_obj;
225		bo->sync_obj = driver->sync_obj_ref(sync_obj);
226		bo->sync_obj_arg = entry->new_sync_obj_arg;
227		ttm_bo_unreserve_locked(bo);
228		entry->reserved = false;
229	}
230	spin_unlock(&glob->lru_lock);
231	spin_unlock(&bdev->fence_lock);
232
233	list_for_each_entry(entry, list, head) {
234		if (entry->old_sync_obj)
235			driver->sync_obj_unref(&entry->old_sync_obj);
236	}
 
 
 
 
 
 
237}
238EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include <drm/ttm/ttm_execbuf_util.h>
 30#include <drm/ttm/ttm_bo_driver.h>
 31#include <drm/ttm/ttm_placement.h>
 32#include <linux/wait.h>
 33#include <linux/sched.h>
 34#include <linux/module.h>
 35
 36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
 37					      struct ttm_validate_buffer *entry)
 38{
 39	list_for_each_entry_continue_reverse(entry, list, head) {
 
 
 40		struct ttm_buffer_object *bo = entry->bo;
 
 
 41
 42		dma_resv_unlock(bo->base.resv);
 
 
 
 
 
 
 
 43	}
 44}
 45
 46void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 47				struct list_head *list)
 48{
 49	struct ttm_validate_buffer *entry;
 50
 51	if (list_empty(list))
 52		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 53
 54	spin_lock(&ttm_bo_glob.lru_lock);
 55	list_for_each_entry(entry, list, head) {
 56		struct ttm_buffer_object *bo = entry->bo;
 57
 58		ttm_bo_move_to_lru_tail(bo, NULL);
 59		dma_resv_unlock(bo->base.resv);
 
 
 60	}
 61	spin_unlock(&ttm_bo_glob.lru_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 63	if (ticket)
 64		ww_acquire_fini(ticket);
 
 
 
 
 
 
 
 
 
 
 
 
 65}
 66EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 67
 68/*
 69 * Reserve buffers for validation.
 70 *
 71 * If a buffer in the list is marked for CPU access, we back off and
 72 * wait for that buffer to become free for GPU access.
 73 *
 74 * If a buffer is reserved for another validation, the validator with
 75 * the highest validation sequence backs off and waits for that buffer
 76 * to become unreserved. This prevents deadlocks when validating multiple
 77 * buffers in different orders.
 78 */
 79
 80int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 81			   struct list_head *list, bool intr,
 82			   struct list_head *dups)
 83{
 
 84	struct ttm_validate_buffer *entry;
 85	int ret;
 
 86
 87	if (list_empty(list))
 88		return 0;
 89
 90	if (ticket)
 91		ww_acquire_init(ticket, &reservation_ww_class);
 92
 93	list_for_each_entry(entry, list, head) {
 94		struct ttm_buffer_object *bo = entry->bo;
 
 
 
 95
 96		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
 97		if (ret == -EALREADY && dups) {
 98			struct ttm_validate_buffer *safe = entry;
 99			entry = list_prev_entry(entry, head);
100			list_del(&safe->head);
101			list_add(&safe->head, dups);
102			continue;
103		}
104
105		if (!ret) {
106			if (!entry->num_shared)
107				continue;
108
109			ret = dma_resv_reserve_shared(bo->base.resv,
110								entry->num_shared);
111			if (!ret)
112				continue;
113		}
114
115		/* uh oh, we lost out, drop every reservation and try
116		 * to only reserve this buffer, then start over if
117		 * this succeeds.
118		 */
119		ttm_eu_backoff_reservation_reverse(list, entry);
120
121		if (ret == -EDEADLK) {
122			if (intr) {
123				ret = dma_resv_lock_slow_interruptible(bo->base.resv,
124										 ticket);
125			} else {
126				dma_resv_lock_slow(bo->base.resv, ticket);
127				ret = 0;
128			}
129		}
130
131		if (!ret && entry->num_shared)
132			ret = dma_resv_reserve_shared(bo->base.resv,
133								entry->num_shared);
134
135		if (unlikely(ret != 0)) {
136			if (ret == -EINTR)
137				ret = -ERESTARTSYS;
138			if (ticket) {
139				ww_acquire_done(ticket);
140				ww_acquire_fini(ticket);
 
141			}
 
 
 
 
 
 
 
 
 
 
 
 
 
142			return ret;
143		}
144
145		/* move this item to the front of the list,
146		 * forces correct iteration of the loop without keeping track
147		 */
148		list_del(&entry->head);
149		list_add(&entry->head, list);
 
 
 
 
 
150	}
151
 
 
 
 
152	return 0;
153}
154EXPORT_SYMBOL(ttm_eu_reserve_buffers);
155
156void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
157				 struct list_head *list,
158				 struct dma_fence *fence)
159{
160	struct ttm_validate_buffer *entry;
 
 
 
 
161
162	if (list_empty(list))
163		return;
164
165	spin_lock(&ttm_bo_glob.lru_lock);
 
 
 
 
 
 
 
166	list_for_each_entry(entry, list, head) {
167		struct ttm_buffer_object *bo = entry->bo;
 
 
 
 
 
 
 
 
168
169		if (entry->num_shared)
170			dma_resv_add_shared_fence(bo->base.resv, fence);
171		else
172			dma_resv_add_excl_fence(bo->base.resv, fence);
173		ttm_bo_move_to_lru_tail(bo, NULL);
174		dma_resv_unlock(bo->base.resv);
175	}
176	spin_unlock(&ttm_bo_glob.lru_lock);
177	if (ticket)
178		ww_acquire_fini(ticket);
179}
180EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);