Loading...
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <drm/ttm/ttm_execbuf_util.h>
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
35static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
36 struct ttm_validate_buffer *entry)
37{
38 list_for_each_entry_continue_reverse(entry, list, head) {
39 struct ttm_buffer_object *bo = entry->bo;
40
41 reservation_object_unlock(bo->resv);
42 }
43}
44
45static void ttm_eu_del_from_lru_locked(struct list_head *list)
46{
47 struct ttm_validate_buffer *entry;
48
49 list_for_each_entry(entry, list, head) {
50 struct ttm_buffer_object *bo = entry->bo;
51 ttm_bo_del_from_lru(bo);
52 }
53}
54
55void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
56 struct list_head *list)
57{
58 struct ttm_validate_buffer *entry;
59 struct ttm_bo_global *glob;
60
61 if (list_empty(list))
62 return;
63
64 entry = list_first_entry(list, struct ttm_validate_buffer, head);
65 glob = entry->bo->bdev->glob;
66
67 spin_lock(&glob->lru_lock);
68 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo;
70
71 ttm_bo_add_to_lru(bo);
72 reservation_object_unlock(bo->resv);
73 }
74 spin_unlock(&glob->lru_lock);
75
76 if (ticket)
77 ww_acquire_fini(ticket);
78}
79EXPORT_SYMBOL(ttm_eu_backoff_reservation);
80
81/*
82 * Reserve buffers for validation.
83 *
84 * If a buffer in the list is marked for CPU access, we back off and
85 * wait for that buffer to become free for GPU access.
86 *
87 * If a buffer is reserved for another validation, the validator with
88 * the highest validation sequence backs off and waits for that buffer
89 * to become unreserved. This prevents deadlocks when validating multiple
90 * buffers in different orders.
91 */
92
93int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
94 struct list_head *list, bool intr,
95 struct list_head *dups)
96{
97 struct ttm_bo_global *glob;
98 struct ttm_validate_buffer *entry;
99 int ret;
100
101 if (list_empty(list))
102 return 0;
103
104 entry = list_first_entry(list, struct ttm_validate_buffer, head);
105 glob = entry->bo->bdev->glob;
106
107 if (ticket)
108 ww_acquire_init(ticket, &reservation_ww_class);
109
110 list_for_each_entry(entry, list, head) {
111 struct ttm_buffer_object *bo = entry->bo;
112
113 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
114 if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
115 reservation_object_unlock(bo->resv);
116
117 ret = -EBUSY;
118
119 } else if (ret == -EALREADY && dups) {
120 struct ttm_validate_buffer *safe = entry;
121 entry = list_prev_entry(entry, head);
122 list_del(&safe->head);
123 list_add(&safe->head, dups);
124 continue;
125 }
126
127 if (!ret) {
128 if (!entry->shared)
129 continue;
130
131 ret = reservation_object_reserve_shared(bo->resv);
132 if (!ret)
133 continue;
134 }
135
136 /* uh oh, we lost out, drop every reservation and try
137 * to only reserve this buffer, then start over if
138 * this succeeds.
139 */
140 ttm_eu_backoff_reservation_reverse(list, entry);
141
142 if (ret == -EDEADLK) {
143 if (intr) {
144 ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
145 ticket);
146 } else {
147 ww_mutex_lock_slow(&bo->resv->lock, ticket);
148 ret = 0;
149 }
150 }
151
152 if (!ret && entry->shared)
153 ret = reservation_object_reserve_shared(bo->resv);
154
155 if (unlikely(ret != 0)) {
156 if (ret == -EINTR)
157 ret = -ERESTARTSYS;
158 if (ticket) {
159 ww_acquire_done(ticket);
160 ww_acquire_fini(ticket);
161 }
162 return ret;
163 }
164
165 /* move this item to the front of the list,
166 * forces correct iteration of the loop without keeping track
167 */
168 list_del(&entry->head);
169 list_add(&entry->head, list);
170 }
171
172 if (ticket)
173 ww_acquire_done(ticket);
174 spin_lock(&glob->lru_lock);
175 ttm_eu_del_from_lru_locked(list);
176 spin_unlock(&glob->lru_lock);
177 return 0;
178}
179EXPORT_SYMBOL(ttm_eu_reserve_buffers);
180
181void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
182 struct list_head *list,
183 struct dma_fence *fence)
184{
185 struct ttm_validate_buffer *entry;
186 struct ttm_buffer_object *bo;
187 struct ttm_bo_global *glob;
188 struct ttm_bo_device *bdev;
189 struct ttm_bo_driver *driver;
190
191 if (list_empty(list))
192 return;
193
194 bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
195 bdev = bo->bdev;
196 driver = bdev->driver;
197 glob = bo->bdev->glob;
198
199 spin_lock(&glob->lru_lock);
200
201 list_for_each_entry(entry, list, head) {
202 bo = entry->bo;
203 if (entry->shared)
204 reservation_object_add_shared_fence(bo->resv, fence);
205 else
206 reservation_object_add_excl_fence(bo->resv, fence);
207 ttm_bo_add_to_lru(bo);
208 reservation_object_unlock(bo->resv);
209 }
210 spin_unlock(&glob->lru_lock);
211 if (ticket)
212 ww_acquire_fini(ticket);
213}
214EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_execbuf_util.h>
30#include <drm/ttm/ttm_bo_driver.h>
31#include <drm/ttm/ttm_placement.h>
32#include <linux/wait.h>
33#include <linux/sched.h>
34#include <linux/module.h>
35
36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
38{
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41
42 dma_resv_unlock(bo->base.resv);
43 }
44}
45
46void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47 struct list_head *list)
48{
49 struct ttm_validate_buffer *entry;
50
51 if (list_empty(list))
52 return;
53
54 list_for_each_entry(entry, list, head) {
55 struct ttm_buffer_object *bo = entry->bo;
56
57 ttm_bo_move_to_lru_tail_unlocked(bo);
58 dma_resv_unlock(bo->base.resv);
59 }
60
61 if (ticket)
62 ww_acquire_fini(ticket);
63}
64EXPORT_SYMBOL(ttm_eu_backoff_reservation);
65
66/*
67 * Reserve buffers for validation.
68 *
69 * If a buffer in the list is marked for CPU access, we back off and
70 * wait for that buffer to become free for GPU access.
71 *
72 * If a buffer is reserved for another validation, the validator with
73 * the highest validation sequence backs off and waits for that buffer
74 * to become unreserved. This prevents deadlocks when validating multiple
75 * buffers in different orders.
76 */
77
78int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
79 struct list_head *list, bool intr,
80 struct list_head *dups)
81{
82 struct ttm_validate_buffer *entry;
83 int ret;
84
85 if (list_empty(list))
86 return 0;
87
88 if (ticket)
89 ww_acquire_init(ticket, &reservation_ww_class);
90
91 list_for_each_entry(entry, list, head) {
92 struct ttm_buffer_object *bo = entry->bo;
93 unsigned int num_fences;
94
95 ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
96 if (ret == -EALREADY && dups) {
97 struct ttm_validate_buffer *safe = entry;
98 entry = list_prev_entry(entry, head);
99 list_del(&safe->head);
100 list_add(&safe->head, dups);
101 continue;
102 }
103
104 num_fences = max(entry->num_shared, 1u);
105 if (!ret) {
106 ret = dma_resv_reserve_fences(bo->base.resv,
107 num_fences);
108 if (!ret)
109 continue;
110 }
111
112 /* uh oh, we lost out, drop every reservation and try
113 * to only reserve this buffer, then start over if
114 * this succeeds.
115 */
116 ttm_eu_backoff_reservation_reverse(list, entry);
117
118 if (ret == -EDEADLK) {
119 ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
120 }
121
122 if (!ret)
123 ret = dma_resv_reserve_fences(bo->base.resv,
124 num_fences);
125
126 if (unlikely(ret != 0)) {
127 if (ticket) {
128 ww_acquire_done(ticket);
129 ww_acquire_fini(ticket);
130 }
131 return ret;
132 }
133
134 /* move this item to the front of the list,
135 * forces correct iteration of the loop without keeping track
136 */
137 list_del(&entry->head);
138 list_add(&entry->head, list);
139 }
140
141 return 0;
142}
143EXPORT_SYMBOL(ttm_eu_reserve_buffers);
144
145void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
146 struct list_head *list,
147 struct dma_fence *fence)
148{
149 struct ttm_validate_buffer *entry;
150
151 if (list_empty(list))
152 return;
153
154 list_for_each_entry(entry, list, head) {
155 struct ttm_buffer_object *bo = entry->bo;
156
157 dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
158 DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
159 ttm_bo_move_to_lru_tail_unlocked(bo);
160 dma_resv_unlock(bo->base.resv);
161 }
162 if (ticket)
163 ww_acquire_fini(ticket);
164}
165EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);