Loading...
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/ttm/ttm_execbuf_util.h>
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
35static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36 struct ttm_validate_buffer *entry)
37{
38 list_for_each_entry_continue_reverse(entry, list, head) {
39 struct ttm_buffer_object *bo = entry->bo;
40
41 __ttm_bo_unreserve(bo);
42 }
43}
44
45static void ttm_eu_del_from_lru_locked(struct list_head *list)
46{
47 struct ttm_validate_buffer *entry;
48
49 list_for_each_entry(entry, list, head) {
50 struct ttm_buffer_object *bo = entry->bo;
51 unsigned put_count = ttm_bo_del_from_lru(bo);
52
53 ttm_bo_list_ref_sub(bo, put_count, true);
54 }
55}
56
57void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
58 struct list_head *list)
59{
60 struct ttm_validate_buffer *entry;
61 struct ttm_bo_global *glob;
62
63 if (list_empty(list))
64 return;
65
66 entry = list_first_entry(list, struct ttm_validate_buffer, head);
67 glob = entry->bo->glob;
68
69 spin_lock(&glob->lru_lock);
70 list_for_each_entry(entry, list, head) {
71 struct ttm_buffer_object *bo = entry->bo;
72
73 ttm_bo_add_to_lru(bo);
74 __ttm_bo_unreserve(bo);
75 }
76 spin_unlock(&glob->lru_lock);
77
78 if (ticket)
79 ww_acquire_fini(ticket);
80}
81EXPORT_SYMBOL(ttm_eu_backoff_reservation);
82
83/*
84 * Reserve buffers for validation.
85 *
86 * If a buffer in the list is marked for CPU access, we back off and
87 * wait for that buffer to become free for GPU access.
88 *
89 * If a buffer is reserved for another validation, the validator with
90 * the highest validation sequence backs off and waits for that buffer
91 * to become unreserved. This prevents deadlocks when validating multiple
92 * buffers in different orders.
93 */
94
95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
96 struct list_head *list, bool intr,
97 struct list_head *dups)
98{
99 struct ttm_bo_global *glob;
100 struct ttm_validate_buffer *entry;
101 int ret;
102
103 if (list_empty(list))
104 return 0;
105
106 entry = list_first_entry(list, struct ttm_validate_buffer, head);
107 glob = entry->bo->glob;
108
109 if (ticket)
110 ww_acquire_init(ticket, &reservation_ww_class);
111
112 list_for_each_entry(entry, list, head) {
113 struct ttm_buffer_object *bo = entry->bo;
114
115 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), true,
116 ticket);
117 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
118 __ttm_bo_unreserve(bo);
119
120 ret = -EBUSY;
121
122 } else if (ret == -EALREADY && dups) {
123 struct ttm_validate_buffer *safe = entry;
124 entry = list_prev_entry(entry, head);
125 list_del(&safe->head);
126 list_add(&safe->head, dups);
127 continue;
128 }
129
130 if (!ret) {
131 if (!entry->shared)
132 continue;
133
134 ret = reservation_object_reserve_shared(bo->resv);
135 if (!ret)
136 continue;
137 }
138
139 /* uh oh, we lost out, drop every reservation and try
140 * to only reserve this buffer, then start over if
141 * this succeeds.
142 */
143 ttm_eu_backoff_reservation_reverse(list, entry);
144
145 if (ret == -EDEADLK && intr) {
146 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
147 ticket);
148 } else if (ret == -EDEADLK) {
149 ww_mutex_lock_slow(&bo->resv->lock, ticket);
150 ret = 0;
151 }
152
153 if (!ret && entry->shared)
154 ret = reservation_object_reserve_shared(bo->resv);
155
156 if (unlikely(ret != 0)) {
157 if (ret == -EINTR)
158 ret = -ERESTARTSYS;
159 if (ticket) {
160 ww_acquire_done(ticket);
161 ww_acquire_fini(ticket);
162 }
163 return ret;
164 }
165
166 /* move this item to the front of the list,
167 * forces correct iteration of the loop without keeping track
168 */
169 list_del(&entry->head);
170 list_add(&entry->head, list);
171 }
172
173 if (ticket)
174 ww_acquire_done(ticket);
175 spin_lock(&glob->lru_lock);
176 ttm_eu_del_from_lru_locked(list);
177 spin_unlock(&glob->lru_lock);
178 return 0;
179}
180EXPORT_SYMBOL(ttm_eu_reserve_buffers);
181
182void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
183 struct list_head *list, struct fence *fence)
184{
185 struct ttm_validate_buffer *entry;
186 struct ttm_buffer_object *bo;
187 struct ttm_bo_global *glob;
188 struct ttm_bo_device *bdev;
189 struct ttm_bo_driver *driver;
190
191 if (list_empty(list))
192 return;
193
194 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
195 bdev = bo->bdev;
196 driver = bdev->driver;
197 glob = bo->glob;
198
199 spin_lock(&glob->lru_lock);
200
201 list_for_each_entry(entry, list, head) {
202 bo = entry->bo;
203 if (entry->shared)
204 reservation_object_add_shared_fence(bo->resv, fence);
205 else
206 reservation_object_add_excl_fence(bo->resv, fence);
207 ttm_bo_add_to_lru(bo);
208 __ttm_bo_unreserve(bo);
209 }
210 spin_unlock(&glob->lru_lock);
211 if (ticket)
212 ww_acquire_fini(ticket);
213}
214EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_execbuf_util.h>
30#include <drm/ttm/ttm_bo_driver.h>
31#include <drm/ttm/ttm_placement.h>
32#include <linux/wait.h>
33#include <linux/sched.h>
34#include <linux/module.h>
35
36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
38{
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41
42 dma_resv_unlock(bo->base.resv);
43 }
44}
45
46void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47 struct list_head *list)
48{
49 struct ttm_validate_buffer *entry;
50
51 if (list_empty(list))
52 return;
53
54 spin_lock(&ttm_bo_glob.lru_lock);
55 list_for_each_entry(entry, list, head) {
56 struct ttm_buffer_object *bo = entry->bo;
57
58 ttm_bo_move_to_lru_tail(bo, NULL);
59 dma_resv_unlock(bo->base.resv);
60 }
61 spin_unlock(&ttm_bo_glob.lru_lock);
62
63 if (ticket)
64 ww_acquire_fini(ticket);
65}
66EXPORT_SYMBOL(ttm_eu_backoff_reservation);
67
68/*
69 * Reserve buffers for validation.
70 *
71 * If a buffer in the list is marked for CPU access, we back off and
72 * wait for that buffer to become free for GPU access.
73 *
74 * If a buffer is reserved for another validation, the validator with
75 * the highest validation sequence backs off and waits for that buffer
76 * to become unreserved. This prevents deadlocks when validating multiple
77 * buffers in different orders.
78 */
79
80int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
81 struct list_head *list, bool intr,
82 struct list_head *dups)
83{
84 struct ttm_validate_buffer *entry;
85 int ret;
86
87 if (list_empty(list))
88 return 0;
89
90 if (ticket)
91 ww_acquire_init(ticket, &reservation_ww_class);
92
93 list_for_each_entry(entry, list, head) {
94 struct ttm_buffer_object *bo = entry->bo;
95
96 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
97 if (ret == -EALREADY && dups) {
98 struct ttm_validate_buffer *safe = entry;
99 entry = list_prev_entry(entry, head);
100 list_del(&safe->head);
101 list_add(&safe->head, dups);
102 continue;
103 }
104
105 if (!ret) {
106 if (!entry->num_shared)
107 continue;
108
109 ret = dma_resv_reserve_shared(bo->base.resv,
110 entry->num_shared);
111 if (!ret)
112 continue;
113 }
114
115 /* uh oh, we lost out, drop every reservation and try
116 * to only reserve this buffer, then start over if
117 * this succeeds.
118 */
119 ttm_eu_backoff_reservation_reverse(list, entry);
120
121 if (ret == -EDEADLK) {
122 if (intr) {
123 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
124 ticket);
125 } else {
126 dma_resv_lock_slow(bo->base.resv, ticket);
127 ret = 0;
128 }
129 }
130
131 if (!ret && entry->num_shared)
132 ret = dma_resv_reserve_shared(bo->base.resv,
133 entry->num_shared);
134
135 if (unlikely(ret != 0)) {
136 if (ret == -EINTR)
137 ret = -ERESTARTSYS;
138 if (ticket) {
139 ww_acquire_done(ticket);
140 ww_acquire_fini(ticket);
141 }
142 return ret;
143 }
144
145 /* move this item to the front of the list,
146 * forces correct iteration of the loop without keeping track
147 */
148 list_del(&entry->head);
149 list_add(&entry->head, list);
150 }
151
152 return 0;
153}
154EXPORT_SYMBOL(ttm_eu_reserve_buffers);
155
156void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
157 struct list_head *list,
158 struct dma_fence *fence)
159{
160 struct ttm_validate_buffer *entry;
161
162 if (list_empty(list))
163 return;
164
165 spin_lock(&ttm_bo_glob.lru_lock);
166 list_for_each_entry(entry, list, head) {
167 struct ttm_buffer_object *bo = entry->bo;
168
169 if (entry->num_shared)
170 dma_resv_add_shared_fence(bo->base.resv, fence);
171 else
172 dma_resv_add_excl_fence(bo->base.resv, fence);
173 ttm_bo_move_to_lru_tail(bo, NULL);
174 dma_resv_unlock(bo->base.resv);
175 }
176 spin_unlock(&ttm_bo_glob.lru_lock);
177 if (ticket)
178 ww_acquire_fini(ticket);
179}
180EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);