Loading...
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_execbuf_util.h>
30#include <drm/ttm/ttm_bo.h>
31
32static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
33 struct ttm_validate_buffer *entry)
34{
35 list_for_each_entry_continue_reverse(entry, list, head) {
36 struct ttm_buffer_object *bo = entry->bo;
37
38 dma_resv_unlock(bo->base.resv);
39 }
40}
41
42void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
43 struct list_head *list)
44{
45 struct ttm_validate_buffer *entry;
46
47 if (list_empty(list))
48 return;
49
50 list_for_each_entry(entry, list, head) {
51 struct ttm_buffer_object *bo = entry->bo;
52
53 ttm_bo_move_to_lru_tail_unlocked(bo);
54 dma_resv_unlock(bo->base.resv);
55 }
56
57 if (ticket)
58 ww_acquire_fini(ticket);
59}
60EXPORT_SYMBOL(ttm_eu_backoff_reservation);
61
62/*
63 * Reserve buffers for validation.
64 *
65 * If a buffer in the list is marked for CPU access, we back off and
66 * wait for that buffer to become free for GPU access.
67 *
68 * If a buffer is reserved for another validation, the validator with
69 * the highest validation sequence backs off and waits for that buffer
70 * to become unreserved. This prevents deadlocks when validating multiple
71 * buffers in different orders.
72 */
73
74int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
75 struct list_head *list, bool intr,
76 struct list_head *dups)
77{
78 struct ttm_validate_buffer *entry;
79 int ret;
80
81 if (list_empty(list))
82 return 0;
83
84 if (ticket)
85 ww_acquire_init(ticket, &reservation_ww_class);
86
87 list_for_each_entry(entry, list, head) {
88 struct ttm_buffer_object *bo = entry->bo;
89 unsigned int num_fences;
90
91 ret = ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
92 if (ret == -EALREADY && dups) {
93 struct ttm_validate_buffer *safe = entry;
94 entry = list_prev_entry(entry, head);
95 list_del(&safe->head);
96 list_add(&safe->head, dups);
97 continue;
98 }
99
100 num_fences = max(entry->num_shared, 1u);
101 if (!ret) {
102 ret = dma_resv_reserve_fences(bo->base.resv,
103 num_fences);
104 if (!ret)
105 continue;
106 }
107
108 /* uh oh, we lost out, drop every reservation and try
109 * to only reserve this buffer, then start over if
110 * this succeeds.
111 */
112 ttm_eu_backoff_reservation_reverse(list, entry);
113
114 if (ret == -EDEADLK) {
115 ret = ttm_bo_reserve_slowpath(bo, intr, ticket);
116 }
117
118 if (!ret)
119 ret = dma_resv_reserve_fences(bo->base.resv,
120 num_fences);
121
122 if (unlikely(ret != 0)) {
123 if (ticket) {
124 ww_acquire_done(ticket);
125 ww_acquire_fini(ticket);
126 }
127 return ret;
128 }
129
130 /* move this item to the front of the list,
131 * forces correct iteration of the loop without keeping track
132 */
133 list_del(&entry->head);
134 list_add(&entry->head, list);
135 }
136
137 return 0;
138}
139EXPORT_SYMBOL(ttm_eu_reserve_buffers);
140
141void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
142 struct list_head *list,
143 struct dma_fence *fence)
144{
145 struct ttm_validate_buffer *entry;
146
147 if (list_empty(list))
148 return;
149
150 list_for_each_entry(entry, list, head) {
151 struct ttm_buffer_object *bo = entry->bo;
152
153 dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
154 DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
155 ttm_bo_move_to_lru_tail_unlocked(bo);
156 dma_resv_unlock(bo->base.resv);
157 }
158 if (ticket)
159 ww_acquire_fini(ticket);
160}
161EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29#include <drm/ttm/ttm_execbuf_util.h>
30#include <drm/ttm/ttm_bo_driver.h>
31#include <drm/ttm/ttm_placement.h>
32#include <linux/wait.h>
33#include <linux/sched.h>
34#include <linux/module.h>
35
36static void ttm_eu_backoff_reservation_reverse(struct list_head *list,
37 struct ttm_validate_buffer *entry)
38{
39 list_for_each_entry_continue_reverse(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41
42 dma_resv_unlock(bo->base.resv);
43 }
44}
45
46void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
47 struct list_head *list)
48{
49 struct ttm_validate_buffer *entry;
50
51 if (list_empty(list))
52 return;
53
54 spin_lock(&ttm_bo_glob.lru_lock);
55 list_for_each_entry(entry, list, head) {
56 struct ttm_buffer_object *bo = entry->bo;
57
58 ttm_bo_move_to_lru_tail(bo, NULL);
59 dma_resv_unlock(bo->base.resv);
60 }
61 spin_unlock(&ttm_bo_glob.lru_lock);
62
63 if (ticket)
64 ww_acquire_fini(ticket);
65}
66EXPORT_SYMBOL(ttm_eu_backoff_reservation);
67
68/*
69 * Reserve buffers for validation.
70 *
71 * If a buffer in the list is marked for CPU access, we back off and
72 * wait for that buffer to become free for GPU access.
73 *
74 * If a buffer is reserved for another validation, the validator with
75 * the highest validation sequence backs off and waits for that buffer
76 * to become unreserved. This prevents deadlocks when validating multiple
77 * buffers in different orders.
78 */
79
80int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
81 struct list_head *list, bool intr,
82 struct list_head *dups)
83{
84 struct ttm_validate_buffer *entry;
85 int ret;
86
87 if (list_empty(list))
88 return 0;
89
90 if (ticket)
91 ww_acquire_init(ticket, &reservation_ww_class);
92
93 list_for_each_entry(entry, list, head) {
94 struct ttm_buffer_object *bo = entry->bo;
95
96 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
97 if (ret == -EALREADY && dups) {
98 struct ttm_validate_buffer *safe = entry;
99 entry = list_prev_entry(entry, head);
100 list_del(&safe->head);
101 list_add(&safe->head, dups);
102 continue;
103 }
104
105 if (!ret) {
106 if (!entry->num_shared)
107 continue;
108
109 ret = dma_resv_reserve_shared(bo->base.resv,
110 entry->num_shared);
111 if (!ret)
112 continue;
113 }
114
115 /* uh oh, we lost out, drop every reservation and try
116 * to only reserve this buffer, then start over if
117 * this succeeds.
118 */
119 ttm_eu_backoff_reservation_reverse(list, entry);
120
121 if (ret == -EDEADLK) {
122 if (intr) {
123 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
124 ticket);
125 } else {
126 dma_resv_lock_slow(bo->base.resv, ticket);
127 ret = 0;
128 }
129 }
130
131 if (!ret && entry->num_shared)
132 ret = dma_resv_reserve_shared(bo->base.resv,
133 entry->num_shared);
134
135 if (unlikely(ret != 0)) {
136 if (ret == -EINTR)
137 ret = -ERESTARTSYS;
138 if (ticket) {
139 ww_acquire_done(ticket);
140 ww_acquire_fini(ticket);
141 }
142 return ret;
143 }
144
145 /* move this item to the front of the list,
146 * forces correct iteration of the loop without keeping track
147 */
148 list_del(&entry->head);
149 list_add(&entry->head, list);
150 }
151
152 return 0;
153}
154EXPORT_SYMBOL(ttm_eu_reserve_buffers);
155
156void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
157 struct list_head *list,
158 struct dma_fence *fence)
159{
160 struct ttm_validate_buffer *entry;
161
162 if (list_empty(list))
163 return;
164
165 spin_lock(&ttm_bo_glob.lru_lock);
166 list_for_each_entry(entry, list, head) {
167 struct ttm_buffer_object *bo = entry->bo;
168
169 if (entry->num_shared)
170 dma_resv_add_shared_fence(bo->base.resv, fence);
171 else
172 dma_resv_add_excl_fence(bo->base.resv, fence);
173 ttm_bo_move_to_lru_tail(bo, NULL);
174 dma_resv_unlock(bo->base.resv);
175 }
176 spin_unlock(&ttm_bo_glob.lru_lock);
177 if (ticket)
178 ww_acquire_fini(ticket);
179}
180EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);