Loading...
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "ttm/ttm_execbuf_util.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36{
37 struct ttm_validate_buffer *entry;
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43
44 if (entry->removed) {
45 ttm_bo_add_to_lru(bo);
46 entry->removed = false;
47
48 }
49 entry->reserved = false;
50 atomic_set(&bo->reserved, 0);
51 wake_up_all(&bo->event_queue);
52 }
53}
54
55static void ttm_eu_del_from_lru_locked(struct list_head *list)
56{
57 struct ttm_validate_buffer *entry;
58
59 list_for_each_entry(entry, list, head) {
60 struct ttm_buffer_object *bo = entry->bo;
61 if (!entry->reserved)
62 continue;
63
64 if (!entry->removed) {
65 entry->put_count = ttm_bo_del_from_lru(bo);
66 entry->removed = true;
67 }
68 }
69}
70
71static void ttm_eu_list_ref_sub(struct list_head *list)
72{
73 struct ttm_validate_buffer *entry;
74
75 list_for_each_entry(entry, list, head) {
76 struct ttm_buffer_object *bo = entry->bo;
77
78 if (entry->put_count) {
79 ttm_bo_list_ref_sub(bo, entry->put_count, true);
80 entry->put_count = 0;
81 }
82 }
83}
84
85static int ttm_eu_wait_unreserved_locked(struct list_head *list,
86 struct ttm_buffer_object *bo)
87{
88 struct ttm_bo_global *glob = bo->glob;
89 int ret;
90
91 ttm_eu_del_from_lru_locked(list);
92 spin_unlock(&glob->lru_lock);
93 ret = ttm_bo_wait_unreserved(bo, true);
94 spin_lock(&glob->lru_lock);
95 if (unlikely(ret != 0))
96 ttm_eu_backoff_reservation_locked(list);
97 return ret;
98}
99
100
101void ttm_eu_backoff_reservation(struct list_head *list)
102{
103 struct ttm_validate_buffer *entry;
104 struct ttm_bo_global *glob;
105
106 if (list_empty(list))
107 return;
108
109 entry = list_first_entry(list, struct ttm_validate_buffer, head);
110 glob = entry->bo->glob;
111 spin_lock(&glob->lru_lock);
112 ttm_eu_backoff_reservation_locked(list);
113 spin_unlock(&glob->lru_lock);
114}
115EXPORT_SYMBOL(ttm_eu_backoff_reservation);
116
117/*
118 * Reserve buffers for validation.
119 *
120 * If a buffer in the list is marked for CPU access, we back off and
121 * wait for that buffer to become free for GPU access.
122 *
123 * If a buffer is reserved for another validation, the validator with
124 * the highest validation sequence backs off and waits for that buffer
125 * to become unreserved. This prevents deadlocks when validating multiple
126 * buffers in different orders.
127 */
128
129int ttm_eu_reserve_buffers(struct list_head *list)
130{
131 struct ttm_bo_global *glob;
132 struct ttm_validate_buffer *entry;
133 int ret;
134 uint32_t val_seq;
135
136 if (list_empty(list))
137 return 0;
138
139 list_for_each_entry(entry, list, head) {
140 entry->reserved = false;
141 entry->put_count = 0;
142 entry->removed = false;
143 }
144
145 entry = list_first_entry(list, struct ttm_validate_buffer, head);
146 glob = entry->bo->glob;
147
148retry:
149 spin_lock(&glob->lru_lock);
150 val_seq = entry->bo->bdev->val_seq++;
151
152 list_for_each_entry(entry, list, head) {
153 struct ttm_buffer_object *bo = entry->bo;
154
155retry_this_bo:
156 ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq);
157 switch (ret) {
158 case 0:
159 break;
160 case -EBUSY:
161 ret = ttm_eu_wait_unreserved_locked(list, bo);
162 if (unlikely(ret != 0)) {
163 spin_unlock(&glob->lru_lock);
164 ttm_eu_list_ref_sub(list);
165 return ret;
166 }
167 goto retry_this_bo;
168 case -EAGAIN:
169 ttm_eu_backoff_reservation_locked(list);
170 spin_unlock(&glob->lru_lock);
171 ttm_eu_list_ref_sub(list);
172 ret = ttm_bo_wait_unreserved(bo, true);
173 if (unlikely(ret != 0))
174 return ret;
175 goto retry;
176 default:
177 ttm_eu_backoff_reservation_locked(list);
178 spin_unlock(&glob->lru_lock);
179 ttm_eu_list_ref_sub(list);
180 return ret;
181 }
182
183 entry->reserved = true;
184 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
185 ttm_eu_backoff_reservation_locked(list);
186 spin_unlock(&glob->lru_lock);
187 ttm_eu_list_ref_sub(list);
188 ret = ttm_bo_wait_cpu(bo, false);
189 if (ret)
190 return ret;
191 goto retry;
192 }
193 }
194
195 ttm_eu_del_from_lru_locked(list);
196 spin_unlock(&glob->lru_lock);
197 ttm_eu_list_ref_sub(list);
198
199 return 0;
200}
201EXPORT_SYMBOL(ttm_eu_reserve_buffers);
202
203void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
204{
205 struct ttm_validate_buffer *entry;
206 struct ttm_buffer_object *bo;
207 struct ttm_bo_global *glob;
208 struct ttm_bo_device *bdev;
209 struct ttm_bo_driver *driver;
210
211 if (list_empty(list))
212 return;
213
214 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215 bdev = bo->bdev;
216 driver = bdev->driver;
217 glob = bo->glob;
218
219 spin_lock(&bdev->fence_lock);
220 spin_lock(&glob->lru_lock);
221
222 list_for_each_entry(entry, list, head) {
223 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj;
225 bo->sync_obj = driver->sync_obj_ref(sync_obj);
226 bo->sync_obj_arg = entry->new_sync_obj_arg;
227 ttm_bo_unreserve_locked(bo);
228 entry->reserved = false;
229 }
230 spin_unlock(&glob->lru_lock);
231 spin_unlock(&bdev->fence_lock);
232
233 list_for_each_entry(entry, list, head) {
234 if (entry->old_sync_obj)
235 driver->sync_obj_unref(&entry->old_sync_obj);
236 }
237}
238EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/ttm/ttm_execbuf_util.h>
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
35static void ttm_eu_backoff_reservation_locked(struct list_head *list)
36{
37 struct ttm_validate_buffer *entry;
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43
44 entry->reserved = false;
45 if (entry->removed) {
46 ttm_bo_add_to_lru(bo);
47 entry->removed = false;
48 }
49 __ttm_bo_unreserve(bo);
50 }
51}
52
53static void ttm_eu_del_from_lru_locked(struct list_head *list)
54{
55 struct ttm_validate_buffer *entry;
56
57 list_for_each_entry(entry, list, head) {
58 struct ttm_buffer_object *bo = entry->bo;
59 if (!entry->reserved)
60 continue;
61
62 if (!entry->removed) {
63 entry->put_count = ttm_bo_del_from_lru(bo);
64 entry->removed = true;
65 }
66 }
67}
68
69static void ttm_eu_list_ref_sub(struct list_head *list)
70{
71 struct ttm_validate_buffer *entry;
72
73 list_for_each_entry(entry, list, head) {
74 struct ttm_buffer_object *bo = entry->bo;
75
76 if (entry->put_count) {
77 ttm_bo_list_ref_sub(bo, entry->put_count, true);
78 entry->put_count = 0;
79 }
80 }
81}
82
83void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
84 struct list_head *list)
85{
86 struct ttm_validate_buffer *entry;
87 struct ttm_bo_global *glob;
88
89 if (list_empty(list))
90 return;
91
92 entry = list_first_entry(list, struct ttm_validate_buffer, head);
93 glob = entry->bo->glob;
94 spin_lock(&glob->lru_lock);
95 ttm_eu_backoff_reservation_locked(list);
96 if (ticket)
97 ww_acquire_fini(ticket);
98 spin_unlock(&glob->lru_lock);
99}
100EXPORT_SYMBOL(ttm_eu_backoff_reservation);
101
102/*
103 * Reserve buffers for validation.
104 *
105 * If a buffer in the list is marked for CPU access, we back off and
106 * wait for that buffer to become free for GPU access.
107 *
108 * If a buffer is reserved for another validation, the validator with
109 * the highest validation sequence backs off and waits for that buffer
110 * to become unreserved. This prevents deadlocks when validating multiple
111 * buffers in different orders.
112 */
113
114int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
115 struct list_head *list)
116{
117 struct ttm_bo_global *glob;
118 struct ttm_validate_buffer *entry;
119 int ret;
120
121 if (list_empty(list))
122 return 0;
123
124 list_for_each_entry(entry, list, head) {
125 entry->reserved = false;
126 entry->put_count = 0;
127 entry->removed = false;
128 }
129
130 entry = list_first_entry(list, struct ttm_validate_buffer, head);
131 glob = entry->bo->glob;
132
133 if (ticket)
134 ww_acquire_init(ticket, &reservation_ww_class);
135retry:
136 list_for_each_entry(entry, list, head) {
137 struct ttm_buffer_object *bo = entry->bo;
138
139 /* already slowpath reserved? */
140 if (entry->reserved)
141 continue;
142
143 ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
144 ticket);
145
146 if (ret == -EDEADLK) {
147 /* uh oh, we lost out, drop every reservation and try
148 * to only reserve this buffer, then start over if
149 * this succeeds.
150 */
151 BUG_ON(ticket == NULL);
152 spin_lock(&glob->lru_lock);
153 ttm_eu_backoff_reservation_locked(list);
154 spin_unlock(&glob->lru_lock);
155 ttm_eu_list_ref_sub(list);
156 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
157 ticket);
158 if (unlikely(ret != 0)) {
159 if (ret == -EINTR)
160 ret = -ERESTARTSYS;
161 goto err_fini;
162 }
163
164 entry->reserved = true;
165 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
166 ret = -EBUSY;
167 goto err;
168 }
169 goto retry;
170 } else if (ret)
171 goto err;
172
173 entry->reserved = true;
174 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
175 ret = -EBUSY;
176 goto err;
177 }
178 }
179
180 if (ticket)
181 ww_acquire_done(ticket);
182 spin_lock(&glob->lru_lock);
183 ttm_eu_del_from_lru_locked(list);
184 spin_unlock(&glob->lru_lock);
185 ttm_eu_list_ref_sub(list);
186 return 0;
187
188err:
189 spin_lock(&glob->lru_lock);
190 ttm_eu_backoff_reservation_locked(list);
191 spin_unlock(&glob->lru_lock);
192 ttm_eu_list_ref_sub(list);
193err_fini:
194 if (ticket) {
195 ww_acquire_done(ticket);
196 ww_acquire_fini(ticket);
197 }
198 return ret;
199}
200EXPORT_SYMBOL(ttm_eu_reserve_buffers);
201
202void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
203 struct list_head *list, void *sync_obj)
204{
205 struct ttm_validate_buffer *entry;
206 struct ttm_buffer_object *bo;
207 struct ttm_bo_global *glob;
208 struct ttm_bo_device *bdev;
209 struct ttm_bo_driver *driver;
210
211 if (list_empty(list))
212 return;
213
214 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
215 bdev = bo->bdev;
216 driver = bdev->driver;
217 glob = bo->glob;
218
219 spin_lock(&glob->lru_lock);
220 spin_lock(&bdev->fence_lock);
221
222 list_for_each_entry(entry, list, head) {
223 bo = entry->bo;
224 entry->old_sync_obj = bo->sync_obj;
225 bo->sync_obj = driver->sync_obj_ref(sync_obj);
226 ttm_bo_add_to_lru(bo);
227 __ttm_bo_unreserve(bo);
228 entry->reserved = false;
229 }
230 spin_unlock(&bdev->fence_lock);
231 spin_unlock(&glob->lru_lock);
232 if (ticket)
233 ww_acquire_fini(ticket);
234
235 list_for_each_entry(entry, list, head) {
236 if (entry->old_sync_obj)
237 driver->sync_obj_unref(&entry->old_sync_obj);
238 }
239}
240EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);