Loading...
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_execbuf_util.h>
30#include <drm/ttm/ttm_bo_driver.h>
31#include <drm/ttm/ttm_placement.h>
32#include <linux/wait.h>
33#include <linux/sched.h>
34#include <linux/module.h>
35
36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
38{
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41
42 dma_resv_unlock(bo->base.resv);
43 }
44}
45
46static void ttm_eu_del_from_lru_locked(struct list_head *list)
47{
48 struct ttm_validate_buffer *entry;
49
50 list_for_each_entry(entry, list, head) {
51 struct ttm_buffer_object *bo = entry->bo;
52 ttm_bo_del_from_lru(bo);
53 }
54}
55
56void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
57 struct list_head *list)
58{
59 struct ttm_validate_buffer *entry;
60 struct ttm_bo_global *glob;
61
62 if (list_empty(list))
63 return;
64
65 entry = list_first_entry(list, struct ttm_validate_buffer, head);
66 glob = entry->bo->bdev->glob;
67
68 spin_lock(&glob->lru_lock);
69 list_for_each_entry(entry, list, head) {
70 struct ttm_buffer_object *bo = entry->bo;
71
72 if (list_empty(&bo->lru))
73 ttm_bo_add_to_lru(bo);
74 dma_resv_unlock(bo->base.resv);
75 }
76 spin_unlock(&glob->lru_lock);
77
78 if (ticket)
79 ww_acquire_fini(ticket);
80}
81EXPORT_SYMBOL(ttm_eu_backoff_reservation);
82
83/*
84 * Reserve buffers for validation.
85 *
86 * If a buffer in the list is marked for CPU access, we back off and
87 * wait for that buffer to become free for GPU access.
88 *
89 * If a buffer is reserved for another validation, the validator with
90 * the highest validation sequence backs off and waits for that buffer
91 * to become unreserved. This prevents deadlocks when validating multiple
92 * buffers in different orders.
93 */
94
95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
96 struct list_head *list, bool intr,
97 struct list_head *dups, bool del_lru)
98{
99 struct ttm_bo_global *glob;
100 struct ttm_validate_buffer *entry;
101 int ret;
102
103 if (list_empty(list))
104 return 0;
105
106 entry = list_first_entry(list, struct ttm_validate_buffer, head);
107 glob = entry->bo->bdev->glob;
108
109 if (ticket)
110 ww_acquire_init(ticket, &reservation_ww_class);
111
112 list_for_each_entry(entry, list, head) {
113 struct ttm_buffer_object *bo = entry->bo;
114
115 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
116 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
117 dma_resv_unlock(bo->base.resv);
118
119 ret = -EBUSY;
120
121 } else if (ret == -EALREADY && dups) {
122 struct ttm_validate_buffer *safe = entry;
123 entry = list_prev_entry(entry, head);
124 list_del(&safe->head);
125 list_add(&safe->head, dups);
126 continue;
127 }
128
129 if (!ret) {
130 if (!entry->num_shared)
131 continue;
132
133 ret = dma_resv_reserve_shared(bo->base.resv,
134 entry->num_shared);
135 if (!ret)
136 continue;
137 }
138
139 /* uh oh, we lost out, drop every reservation and try
140 * to only reserve this buffer, then start over if
141 * this succeeds.
142 */
143 ttm_eu_backoff_reservation_reverse(list, entry);
144
145 if (ret == -EDEADLK) {
146 if (intr) {
147 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
148 ticket);
149 } else {
150 dma_resv_lock_slow(bo->base.resv, ticket);
151 ret = 0;
152 }
153 }
154
155 if (!ret && entry->num_shared)
156 ret = dma_resv_reserve_shared(bo->base.resv,
157 entry->num_shared);
158
159 if (unlikely(ret != 0)) {
160 if (ret == -EINTR)
161 ret = -ERESTARTSYS;
162 if (ticket) {
163 ww_acquire_done(ticket);
164 ww_acquire_fini(ticket);
165 }
166 return ret;
167 }
168
169 /* move this item to the front of the list,
170 * forces correct iteration of the loop without keeping track
171 */
172 list_del(&entry->head);
173 list_add(&entry->head, list);
174 }
175
176 if (del_lru) {
177 spin_lock(&glob->lru_lock);
178 ttm_eu_del_from_lru_locked(list);
179 spin_unlock(&glob->lru_lock);
180 }
181 return 0;
182}
183EXPORT_SYMBOL(ttm_eu_reserve_buffers);
184
185void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
186 struct list_head *list,
187 struct dma_fence *fence)
188{
189 struct ttm_validate_buffer *entry;
190 struct ttm_buffer_object *bo;
191 struct ttm_bo_global *glob;
192
193 if (list_empty(list))
194 return;
195
196 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
197 glob = bo->bdev->glob;
198
199 spin_lock(&glob->lru_lock);
200
201 list_for_each_entry(entry, list, head) {
202 bo = entry->bo;
203 if (entry->num_shared)
204 dma_resv_add_shared_fence(bo->base.resv, fence);
205 else
206 dma_resv_add_excl_fence(bo->base.resv, fence);
207 if (list_empty(&bo->lru))
208 ttm_bo_add_to_lru(bo);
209 else
210 ttm_bo_move_to_lru_tail(bo, NULL);
211 dma_resv_unlock(bo->base.resv);
212 }
213 spin_unlock(&glob->lru_lock);
214 if (ticket)
215 ww_acquire_fini(ticket);
216}
217EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_execbuf_util.h>
30#include <drm/ttm/ttm_bo_driver.h>
31#include <drm/ttm/ttm_placement.h>
32#include <linux/wait.h>
33#include <linux/sched.h>
34#include <linux/module.h>
35
36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
38{
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41
42 dma_resv_unlock(bo->base.resv);
43 }
44}
45
46void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47 struct list_head *list)
48{
49 struct ttm_validate_buffer *entry;
50
51 if (list_empty(list))
52 return;
53
54 spin_lock(&ttm_bo_glob.lru_lock);
55 list_for_each_entry(entry, list, head) {
56 struct ttm_buffer_object *bo = entry->bo;
57
58 ttm_bo_move_to_lru_tail(bo, NULL);
59 dma_resv_unlock(bo->base.resv);
60 }
61 spin_unlock(&ttm_bo_glob.lru_lock);
62
63 if (ticket)
64 ww_acquire_fini(ticket);
65}
66EXPORT_SYMBOL(ttm_eu_backoff_reservation);
67
68/*
69 * Reserve buffers for validation.
70 *
71 * If a buffer in the list is marked for CPU access, we back off and
72 * wait for that buffer to become free for GPU access.
73 *
74 * If a buffer is reserved for another validation, the validator with
75 * the highest validation sequence backs off and waits for that buffer
76 * to become unreserved. This prevents deadlocks when validating multiple
77 * buffers in different orders.
78 */
79
80int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
81 struct list_head *list, bool intr,
82 struct list_head *dups)
83{
84 struct ttm_validate_buffer *entry;
85 int ret;
86
87 if (list_empty(list))
88 return 0;
89
90 if (ticket)
91 ww_acquire_init(ticket, &reservation_ww_class);
92
93 list_for_each_entry(entry, list, head) {
94 struct ttm_buffer_object *bo = entry->bo;
95
96 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
97 if (ret == -EALREADY && dups) {
98 struct ttm_validate_buffer *safe = entry;
99 entry = list_prev_entry(entry, head);
100 list_del(&safe->head);
101 list_add(&safe->head, dups);
102 continue;
103 }
104
105 if (!ret) {
106 if (!entry->num_shared)
107 continue;
108
109 ret = dma_resv_reserve_shared(bo->base.resv,
110 entry->num_shared);
111 if (!ret)
112 continue;
113 }
114
115 /* uh oh, we lost out, drop every reservation and try
116 * to only reserve this buffer, then start over if
117 * this succeeds.
118 */
119 ttm_eu_backoff_reservation_reverse(list, entry);
120
121 if (ret == -EDEADLK) {
122 if (intr) {
123 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
124 ticket);
125 } else {
126 dma_resv_lock_slow(bo->base.resv, ticket);
127 ret = 0;
128 }
129 }
130
131 if (!ret && entry->num_shared)
132 ret = dma_resv_reserve_shared(bo->base.resv,
133 entry->num_shared);
134
135 if (unlikely(ret != 0)) {
136 if (ret == -EINTR)
137 ret = -ERESTARTSYS;
138 if (ticket) {
139 ww_acquire_done(ticket);
140 ww_acquire_fini(ticket);
141 }
142 return ret;
143 }
144
145 /* move this item to the front of the list,
146 * forces correct iteration of the loop without keeping track
147 */
148 list_del(&entry->head);
149 list_add(&entry->head, list);
150 }
151
152 return 0;
153}
154EXPORT_SYMBOL(ttm_eu_reserve_buffers);
155
156void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
157 struct list_head *list,
158 struct dma_fence *fence)
159{
160 struct ttm_validate_buffer *entry;
161
162 if (list_empty(list))
163 return;
164
165 spin_lock(&ttm_bo_glob.lru_lock);
166 list_for_each_entry(entry, list, head) {
167 struct ttm_buffer_object *bo = entry->bo;
168
169 if (entry->num_shared)
170 dma_resv_add_shared_fence(bo->base.resv, fence);
171 else
172 dma_resv_add_excl_fence(bo->base.resv, fence);
173 ttm_bo_move_to_lru_tail(bo, NULL);
174 dma_resv_unlock(bo->base.resv);
175 }
176 spin_unlock(&ttm_bo_glob.lru_lock);
177 if (ticket)
178 ww_acquire_fini(ticket);
179}
180EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);