Linux Audio

Check our new training course

Loading...
v3.1
 
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "ttm/ttm_execbuf_util.h"
 29#include "ttm/ttm_bo_driver.h"
 30#include "ttm/ttm_placement.h"
 31#include <linux/wait.h>
 32#include <linux/sched.h>
 33#include <linux/module.h>
 34
 35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
 
 36{
 37	struct ttm_validate_buffer *entry;
 38
 39	list_for_each_entry(entry, list, head) {
 40		struct ttm_buffer_object *bo = entry->bo;
 41		if (!entry->reserved)
 42			continue;
 43
 44		if (entry->removed) {
 45			ttm_bo_add_to_lru(bo);
 46			entry->removed = false;
 47
 48		}
 49		entry->reserved = false;
 50		atomic_set(&bo->reserved, 0);
 51		wake_up_all(&bo->event_queue);
 52	}
 53}
 54
 55static void ttm_eu_del_from_lru_locked(struct list_head *list)
 56{
 57	struct ttm_validate_buffer *entry;
 58
 59	list_for_each_entry(entry, list, head) {
 60		struct ttm_buffer_object *bo = entry->bo;
 61		if (!entry->reserved)
 62			continue;
 63
 64		if (!entry->removed) {
 65			entry->put_count = ttm_bo_del_from_lru(bo);
 66			entry->removed = true;
 67		}
 68	}
 69}
 70
 71static void ttm_eu_list_ref_sub(struct list_head *list)
 72{
 73	struct ttm_validate_buffer *entry;
 74
 75	list_for_each_entry(entry, list, head) {
 76		struct ttm_buffer_object *bo = entry->bo;
 77
 78		if (entry->put_count) {
 79			ttm_bo_list_ref_sub(bo, entry->put_count, true);
 80			entry->put_count = 0;
 81		}
 82	}
 83}
 84
 85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
 86					 struct ttm_buffer_object *bo)
 87{
 88	struct ttm_bo_global *glob = bo->glob;
 89	int ret;
 90
 91	ttm_eu_del_from_lru_locked(list);
 92	spin_unlock(&glob->lru_lock);
 93	ret = ttm_bo_wait_unreserved(bo, true);
 94	spin_lock(&glob->lru_lock);
 95	if (unlikely(ret != 0))
 96		ttm_eu_backoff_reservation_locked(list);
 97	return ret;
 98}
 99
100
101void ttm_eu_backoff_reservation(struct list_head *list)
102{
103	struct ttm_validate_buffer *entry;
104	struct ttm_bo_global *glob;
105
106	if (list_empty(list))
107		return;
108
109	entry = list_first_entry(list, struct ttm_validate_buffer, head);
110	glob = entry->bo->glob;
 
111	spin_lock(&glob->lru_lock);
112	ttm_eu_backoff_reservation_locked(list);
 
 
 
 
 
 
113	spin_unlock(&glob->lru_lock);
 
 
 
114}
115EXPORT_SYMBOL(ttm_eu_backoff_reservation);
116
117/*
118 * Reserve buffers for validation.
119 *
120 * If a buffer in the list is marked for CPU access, we back off and
121 * wait for that buffer to become free for GPU access.
122 *
123 * If a buffer is reserved for another validation, the validator with
124 * the highest validation sequence backs off and waits for that buffer
125 * to become unreserved. This prevents deadlocks when validating multiple
126 * buffers in different orders.
127 */
128
129int ttm_eu_reserve_buffers(struct list_head *list)
 
 
130{
131	struct ttm_bo_global *glob;
132	struct ttm_validate_buffer *entry;
133	int ret;
134	uint32_t val_seq;
135
136	if (list_empty(list))
137		return 0;
138
139	list_for_each_entry(entry, list, head) {
140		entry->reserved = false;
141		entry->put_count = 0;
142		entry->removed = false;
143	}
144
145	entry = list_first_entry(list, struct ttm_validate_buffer, head);
146	glob = entry->bo->glob;
147
148retry:
149	spin_lock(&glob->lru_lock);
150	val_seq = entry->bo->bdev->val_seq++;
151
152	list_for_each_entry(entry, list, head) {
153		struct ttm_buffer_object *bo = entry->bo;
154
155retry_this_bo:
156		ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
157		switch (ret) {
158		case 0:
159			break;
160		case -EBUSY:
161			ret = ttm_eu_wait_unreserved_locked(list, bo);
162			if (unlikely(ret != 0)) {
163				spin_unlock(&glob->lru_lock);
164				ttm_eu_list_ref_sub(list);
165				return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
166			}
167			goto retry_this_bo;
168		case -EAGAIN:
169			ttm_eu_backoff_reservation_locked(list);
170			spin_unlock(&glob->lru_lock);
171			ttm_eu_list_ref_sub(list);
172			ret = ttm_bo_wait_unreserved(bo, true);
173			if (unlikely(ret != 0))
174				return ret;
175			goto retry;
176		default:
177			ttm_eu_backoff_reservation_locked(list);
178			spin_unlock(&glob->lru_lock);
179			ttm_eu_list_ref_sub(list);
180			return ret;
181		}
182
183		entry->reserved = true;
184		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185			ttm_eu_backoff_reservation_locked(list);
186			spin_unlock(&glob->lru_lock);
187			ttm_eu_list_ref_sub(list);
188			ret = ttm_bo_wait_cpu(bo, false);
189			if (ret)
190				return ret;
191			goto retry;
 
 
 
192		}
193	}
194
195	ttm_eu_del_from_lru_locked(list);
196	spin_unlock(&glob->lru_lock);
197	ttm_eu_list_ref_sub(list);
 
 
 
198
 
 
 
 
 
199	return 0;
200}
201EXPORT_SYMBOL(ttm_eu_reserve_buffers);
202
203void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
 
 
204{
205	struct ttm_validate_buffer *entry;
206	struct ttm_buffer_object *bo;
207	struct ttm_bo_global *glob;
208	struct ttm_bo_device *bdev;
209	struct ttm_bo_driver *driver;
210
211	if (list_empty(list))
212		return;
213
214	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215	bdev = bo->bdev;
216	driver = bdev->driver;
217	glob = bo->glob;
218
219	spin_lock(&bdev->fence_lock);
220	spin_lock(&glob->lru_lock);
221
222	list_for_each_entry(entry, list, head) {
223		bo = entry->bo;
224		entry->old_sync_obj = bo->sync_obj;
225		bo->sync_obj = driver->sync_obj_ref(sync_obj);
226		bo->sync_obj_arg = entry->new_sync_obj_arg;
227		ttm_bo_unreserve_locked(bo);
228		entry->reserved = false;
 
 
 
 
229	}
230	spin_unlock(&glob->lru_lock);
231	spin_unlock(&bdev->fence_lock);
232
233	list_for_each_entry(entry, list, head) {
234		if (entry->old_sync_obj)
235			driver->sync_obj_unref(&entry->old_sync_obj);
236	}
237}
238EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28
 29#include <drm/ttm/ttm_execbuf_util.h>
 30#include <drm/ttm/ttm_bo_driver.h>
 31#include <drm/ttm/ttm_placement.h>
 32#include <linux/wait.h>
 33#include <linux/sched.h>
 34#include <linux/module.h>
 35
 36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
 37					      struct ttm_validate_buffer *entry)
 38{
 39	list_for_each_entry_continue_reverse(entry, list, head) {
 
 
 40		struct ttm_buffer_object *bo = entry->bo;
 
 
 41
 42		dma_resv_unlock(bo->base.resv);
 
 
 
 
 
 
 
 43	}
 44}
 45
 46static void ttm_eu_del_from_lru_locked(struct list_head *list)
 47{
 48	struct ttm_validate_buffer *entry;
 49
 50	list_for_each_entry(entry, list, head) {
 51		struct ttm_buffer_object *bo = entry->bo;
 52		ttm_bo_del_from_lru(bo);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53	}
 54}
 55
 56void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 57				struct list_head *list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58{
 59	struct ttm_validate_buffer *entry;
 60	struct ttm_bo_global *glob;
 61
 62	if (list_empty(list))
 63		return;
 64
 65	entry = list_first_entry(list, struct ttm_validate_buffer, head);
 66	glob = entry->bo->bdev->glob;
 67
 68	spin_lock(&glob->lru_lock);
 69	list_for_each_entry(entry, list, head) {
 70		struct ttm_buffer_object *bo = entry->bo;
 71
 72		if (list_empty(&bo->lru))
 73			ttm_bo_add_to_lru(bo);
 74		dma_resv_unlock(bo->base.resv);
 75	}
 76	spin_unlock(&glob->lru_lock);
 77
 78	if (ticket)
 79		ww_acquire_fini(ticket);
 80}
 81EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 82
 83/*
 84 * Reserve buffers for validation.
 85 *
 86 * If a buffer in the list is marked for CPU access, we back off and
 87 * wait for that buffer to become free for GPU access.
 88 *
 89 * If a buffer is reserved for another validation, the validator with
 90 * the highest validation sequence backs off and waits for that buffer
 91 * to become unreserved. This prevents deadlocks when validating multiple
 92 * buffers in different orders.
 93 */
 94
 95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 96			   struct list_head *list, bool intr,
 97			   struct list_head *dups, bool del_lru)
 98{
 99	struct ttm_bo_global *glob;
100	struct ttm_validate_buffer *entry;
101	int ret;
 
102
103	if (list_empty(list))
104		return 0;
105
 
 
 
 
 
 
106	entry = list_first_entry(list, struct ttm_validate_buffer, head);
107	glob = entry->bo->bdev->glob;
108
109	if (ticket)
110		ww_acquire_init(ticket, &reservation_ww_class);
 
111
112	list_for_each_entry(entry, list, head) {
113		struct ttm_buffer_object *bo = entry->bo;
114
115		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
116		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
117			dma_resv_unlock(bo->base.resv);
118
119			ret = -EBUSY;
120
121		} else if (ret == -EALREADY && dups) {
122			struct ttm_validate_buffer *safe = entry;
123			entry = list_prev_entry(entry, head);
124			list_del(&safe->head);
125			list_add(&safe->head, dups);
126			continue;
127		}
128
129		if (!ret) {
130			if (!entry->num_shared)
131				continue;
132
133			ret = dma_resv_reserve_shared(bo->base.resv,
134								entry->num_shared);
135			if (!ret)
136				continue;
137		}
138
139		/* uh oh, we lost out, drop every reservation and try
140		 * to only reserve this buffer, then start over if
141		 * this succeeds.
142		 */
143		ttm_eu_backoff_reservation_reverse(list, entry);
144
145		if (ret == -EDEADLK) {
146			if (intr) {
147				ret = dma_resv_lock_slow_interruptible(bo->base.resv,
148										 ticket);
149			} else {
150				dma_resv_lock_slow(bo->base.resv, ticket);
151				ret = 0;
152			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153		}
154
155		if (!ret && entry->num_shared)
156			ret = dma_resv_reserve_shared(bo->base.resv,
157								entry->num_shared);
158
159		if (unlikely(ret != 0)) {
160			if (ret == -EINTR)
161				ret = -ERESTARTSYS;
162			if (ticket) {
163				ww_acquire_done(ticket);
164				ww_acquire_fini(ticket);
165			}
166			return ret;
167		}
 
168
169		/* move this item to the front of the list,
170		 * forces correct iteration of the loop without keeping track
171		 */
172		list_del(&entry->head);
173		list_add(&entry->head, list);
174	}
175
176	if (del_lru) {
177		spin_lock(&glob->lru_lock);
178		ttm_eu_del_from_lru_locked(list);
179		spin_unlock(&glob->lru_lock);
180	}
181	return 0;
182}
183EXPORT_SYMBOL(ttm_eu_reserve_buffers);
184
185void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
186				 struct list_head *list,
187				 struct dma_fence *fence)
188{
189	struct ttm_validate_buffer *entry;
190	struct ttm_buffer_object *bo;
191	struct ttm_bo_global *glob;
 
 
192
193	if (list_empty(list))
194		return;
195
196	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
197	glob = bo->bdev->glob;
 
 
198
 
199	spin_lock(&glob->lru_lock);
200
201	list_for_each_entry(entry, list, head) {
202		bo = entry->bo;
203		if (entry->num_shared)
204			dma_resv_add_shared_fence(bo->base.resv, fence);
205		else
206			dma_resv_add_excl_fence(bo->base.resv, fence);
207		if (list_empty(&bo->lru))
208			ttm_bo_add_to_lru(bo);
209		else
210			ttm_bo_move_to_lru_tail(bo, NULL);
211		dma_resv_unlock(bo->base.resv);
212	}
213	spin_unlock(&glob->lru_lock);
214	if (ticket)
215		ww_acquire_fini(ticket);
 
 
 
 
216}
217EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);