Loading...
1/*
2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 */
30/* Algorithm:
31 *
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
36 *
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
40 *
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
43 */
44#include <drm/drmP.h>
45#include "amdgpu.h"
46
47static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49
50int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51 struct amdgpu_sa_manager *sa_manager,
52 unsigned size, u32 align, u32 domain)
53{
54 int i, r;
55
56 init_waitqueue_head(&sa_manager->wq);
57 sa_manager->bo = NULL;
58 sa_manager->size = size;
59 sa_manager->domain = domain;
60 sa_manager->align = align;
61 sa_manager->hole = &sa_manager->olist;
62 INIT_LIST_HEAD(&sa_manager->olist);
63 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65
66 r = amdgpu_bo_create(adev, size, align, true, domain,
67 0, NULL, NULL, &sa_manager->bo);
68 if (r) {
69 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
70 return r;
71 }
72
73 return r;
74}
75
76void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
77 struct amdgpu_sa_manager *sa_manager)
78{
79 struct amdgpu_sa_bo *sa_bo, *tmp;
80
81 if (!list_empty(&sa_manager->olist)) {
82 sa_manager->hole = &sa_manager->olist,
83 amdgpu_sa_bo_try_free(sa_manager);
84 if (!list_empty(&sa_manager->olist)) {
85 dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
86 }
87 }
88 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
89 amdgpu_sa_bo_remove_locked(sa_bo);
90 }
91 amdgpu_bo_unref(&sa_manager->bo);
92 sa_manager->size = 0;
93}
94
95int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
96 struct amdgpu_sa_manager *sa_manager)
97{
98 int r;
99
100 if (sa_manager->bo == NULL) {
101 dev_err(adev->dev, "no bo for sa manager\n");
102 return -EINVAL;
103 }
104
105 /* map the buffer */
106 r = amdgpu_bo_reserve(sa_manager->bo, false);
107 if (r) {
108 dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
109 return r;
110 }
111 r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
112 if (r) {
113 amdgpu_bo_unreserve(sa_manager->bo);
114 dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
115 return r;
116 }
117 r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
118 amdgpu_bo_unreserve(sa_manager->bo);
119 return r;
120}
121
122int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
123 struct amdgpu_sa_manager *sa_manager)
124{
125 int r;
126
127 if (sa_manager->bo == NULL) {
128 dev_err(adev->dev, "no bo for sa manager\n");
129 return -EINVAL;
130 }
131
132 r = amdgpu_bo_reserve(sa_manager->bo, false);
133 if (!r) {
134 amdgpu_bo_kunmap(sa_manager->bo);
135 amdgpu_bo_unpin(sa_manager->bo);
136 amdgpu_bo_unreserve(sa_manager->bo);
137 }
138 return r;
139}
140
141static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
142{
143 struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
144 if (sa_manager->hole == &sa_bo->olist) {
145 sa_manager->hole = sa_bo->olist.prev;
146 }
147 list_del_init(&sa_bo->olist);
148 list_del_init(&sa_bo->flist);
149 fence_put(sa_bo->fence);
150 kfree(sa_bo);
151}
152
153static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
154{
155 struct amdgpu_sa_bo *sa_bo, *tmp;
156
157 if (sa_manager->hole->next == &sa_manager->olist)
158 return;
159
160 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
161 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
162 if (sa_bo->fence == NULL ||
163 !fence_is_signaled(sa_bo->fence)) {
164 return;
165 }
166 amdgpu_sa_bo_remove_locked(sa_bo);
167 }
168}
169
170static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
171{
172 struct list_head *hole = sa_manager->hole;
173
174 if (hole != &sa_manager->olist) {
175 return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
176 }
177 return 0;
178}
179
180static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
181{
182 struct list_head *hole = sa_manager->hole;
183
184 if (hole->next != &sa_manager->olist) {
185 return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
186 }
187 return sa_manager->size;
188}
189
190static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
191 struct amdgpu_sa_bo *sa_bo,
192 unsigned size, unsigned align)
193{
194 unsigned soffset, eoffset, wasted;
195
196 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
197 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
198 wasted = (align - (soffset % align)) % align;
199
200 if ((eoffset - soffset) >= (size + wasted)) {
201 soffset += wasted;
202
203 sa_bo->manager = sa_manager;
204 sa_bo->soffset = soffset;
205 sa_bo->eoffset = soffset + size;
206 list_add(&sa_bo->olist, sa_manager->hole);
207 INIT_LIST_HEAD(&sa_bo->flist);
208 sa_manager->hole = &sa_bo->olist;
209 return true;
210 }
211 return false;
212}
213
214/**
215 * amdgpu_sa_event - Check if we can stop waiting
216 *
217 * @sa_manager: pointer to the sa_manager
218 * @size: number of bytes we want to allocate
219 * @align: alignment we need to match
220 *
221 * Check if either there is a fence we can wait for or
222 * enough free memory to satisfy the allocation directly
223 */
224static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
225 unsigned size, unsigned align)
226{
227 unsigned soffset, eoffset, wasted;
228 int i;
229
230 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
231 if (!list_empty(&sa_manager->flist[i]))
232 return true;
233
234 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
235 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
236 wasted = (align - (soffset % align)) % align;
237
238 if ((eoffset - soffset) >= (size + wasted)) {
239 return true;
240 }
241
242 return false;
243}
244
245static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
246 struct fence **fences,
247 unsigned *tries)
248{
249 struct amdgpu_sa_bo *best_bo = NULL;
250 unsigned i, soffset, best, tmp;
251
252 /* if hole points to the end of the buffer */
253 if (sa_manager->hole->next == &sa_manager->olist) {
254 /* try again with its beginning */
255 sa_manager->hole = &sa_manager->olist;
256 return true;
257 }
258
259 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
260 /* to handle wrap around we add sa_manager->size */
261 best = sa_manager->size * 2;
262 /* go over all fence list and try to find the closest sa_bo
263 * of the current last
264 */
265 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
266 struct amdgpu_sa_bo *sa_bo;
267
268 if (list_empty(&sa_manager->flist[i]))
269 continue;
270
271 sa_bo = list_first_entry(&sa_manager->flist[i],
272 struct amdgpu_sa_bo, flist);
273
274 if (!fence_is_signaled(sa_bo->fence)) {
275 fences[i] = sa_bo->fence;
276 continue;
277 }
278
279 /* limit the number of tries each ring gets */
280 if (tries[i] > 2) {
281 continue;
282 }
283
284 tmp = sa_bo->soffset;
285 if (tmp < soffset) {
286 /* wrap around, pretend it's after */
287 tmp += sa_manager->size;
288 }
289 tmp -= soffset;
290 if (tmp < best) {
291 /* this sa bo is the closest one */
292 best = tmp;
293 best_bo = sa_bo;
294 }
295 }
296
297 if (best_bo) {
298 uint32_t idx = best_bo->fence->context;
299
300 idx %= AMDGPU_SA_NUM_FENCE_LISTS;
301 ++tries[idx];
302 sa_manager->hole = best_bo->olist.prev;
303
304 /* we knew that this one is signaled,
305 so it's save to remote it */
306 amdgpu_sa_bo_remove_locked(best_bo);
307 return true;
308 }
309 return false;
310}
311
312int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
313 struct amdgpu_sa_bo **sa_bo,
314 unsigned size, unsigned align)
315{
316 struct fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
317 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
318 unsigned count;
319 int i, r;
320 signed long t;
321
322 if (WARN_ON_ONCE(align > sa_manager->align))
323 return -EINVAL;
324
325 if (WARN_ON_ONCE(size > sa_manager->size))
326 return -EINVAL;
327
328 *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
329 if ((*sa_bo) == NULL) {
330 return -ENOMEM;
331 }
332 (*sa_bo)->manager = sa_manager;
333 (*sa_bo)->fence = NULL;
334 INIT_LIST_HEAD(&(*sa_bo)->olist);
335 INIT_LIST_HEAD(&(*sa_bo)->flist);
336
337 spin_lock(&sa_manager->wq.lock);
338 do {
339 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
340 fences[i] = NULL;
341 tries[i] = 0;
342 }
343
344 do {
345 amdgpu_sa_bo_try_free(sa_manager);
346
347 if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
348 size, align)) {
349 spin_unlock(&sa_manager->wq.lock);
350 return 0;
351 }
352
353 /* see if we can skip over some allocations */
354 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
355
356 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
357 if (fences[i])
358 fences[count++] = fence_get(fences[i]);
359
360 if (count) {
361 spin_unlock(&sa_manager->wq.lock);
362 t = fence_wait_any_timeout(fences, count, false,
363 MAX_SCHEDULE_TIMEOUT);
364 for (i = 0; i < count; ++i)
365 fence_put(fences[i]);
366
367 r = (t > 0) ? 0 : t;
368 spin_lock(&sa_manager->wq.lock);
369 } else {
370 /* if we have nothing to wait for block */
371 r = wait_event_interruptible_locked(
372 sa_manager->wq,
373 amdgpu_sa_event(sa_manager, size, align)
374 );
375 }
376
377 } while (!r);
378
379 spin_unlock(&sa_manager->wq.lock);
380 kfree(*sa_bo);
381 *sa_bo = NULL;
382 return r;
383}
384
385void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
386 struct fence *fence)
387{
388 struct amdgpu_sa_manager *sa_manager;
389
390 if (sa_bo == NULL || *sa_bo == NULL) {
391 return;
392 }
393
394 sa_manager = (*sa_bo)->manager;
395 spin_lock(&sa_manager->wq.lock);
396 if (fence && !fence_is_signaled(fence)) {
397 uint32_t idx;
398
399 (*sa_bo)->fence = fence_get(fence);
400 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
401 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
402 } else {
403 amdgpu_sa_bo_remove_locked(*sa_bo);
404 }
405 wake_up_all_locked(&sa_manager->wq);
406 spin_unlock(&sa_manager->wq.lock);
407 *sa_bo = NULL;
408}
409
410#if defined(CONFIG_DEBUG_FS)
411
412void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
413 struct seq_file *m)
414{
415 struct amdgpu_sa_bo *i;
416
417 spin_lock(&sa_manager->wq.lock);
418 list_for_each_entry(i, &sa_manager->olist, olist) {
419 uint64_t soffset = i->soffset + sa_manager->gpu_addr;
420 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
421 if (&i->olist == sa_manager->hole) {
422 seq_printf(m, ">");
423 } else {
424 seq_printf(m, " ");
425 }
426 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
427 soffset, eoffset, eoffset - soffset);
428
429 if (i->fence)
430 seq_printf(m, " protected by 0x%08x on context %d",
431 i->fence->seqno, i->fence->context);
432
433 seq_printf(m, "\n");
434 }
435 spin_unlock(&sa_manager->wq.lock);
436}
437#endif
1/*
2 * Copyright 2011 Red Hat Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 */
30/* Algorithm:
31 *
32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
35 * the first one that should no longer be in use by the GPU.
36 *
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
39 * block we report failure to allocate.
40 *
41 * If we are asked to block we wait on all the oldest fence of all
42 * rings. We just wait for any of those fence to complete.
43 */
44
45#include "amdgpu.h"
46
47static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
48static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
49
50int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
51 struct amdgpu_sa_manager *sa_manager,
52 unsigned size, u32 align, u32 domain)
53{
54 int i, r;
55
56 init_waitqueue_head(&sa_manager->wq);
57 sa_manager->bo = NULL;
58 sa_manager->size = size;
59 sa_manager->domain = domain;
60 sa_manager->align = align;
61 sa_manager->hole = &sa_manager->olist;
62 INIT_LIST_HEAD(&sa_manager->olist);
63 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
64 INIT_LIST_HEAD(&sa_manager->flist[i]);
65
66 r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
67 &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
68 if (r) {
69 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
70 return r;
71 }
72
73 memset(sa_manager->cpu_ptr, 0, sa_manager->size);
74 return r;
75}
76
77void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
78 struct amdgpu_sa_manager *sa_manager)
79{
80 struct amdgpu_sa_bo *sa_bo, *tmp;
81
82 if (sa_manager->bo == NULL) {
83 dev_err(adev->dev, "no bo for sa manager\n");
84 return;
85 }
86
87 if (!list_empty(&sa_manager->olist)) {
88 sa_manager->hole = &sa_manager->olist,
89 amdgpu_sa_bo_try_free(sa_manager);
90 if (!list_empty(&sa_manager->olist)) {
91 dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
92 }
93 }
94 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
95 amdgpu_sa_bo_remove_locked(sa_bo);
96 }
97
98 amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
99 sa_manager->size = 0;
100}
101
102static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
103{
104 struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
105 if (sa_manager->hole == &sa_bo->olist) {
106 sa_manager->hole = sa_bo->olist.prev;
107 }
108 list_del_init(&sa_bo->olist);
109 list_del_init(&sa_bo->flist);
110 dma_fence_put(sa_bo->fence);
111 kfree(sa_bo);
112}
113
114static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
115{
116 struct amdgpu_sa_bo *sa_bo, *tmp;
117
118 if (sa_manager->hole->next == &sa_manager->olist)
119 return;
120
121 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
122 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
123 if (sa_bo->fence == NULL ||
124 !dma_fence_is_signaled(sa_bo->fence)) {
125 return;
126 }
127 amdgpu_sa_bo_remove_locked(sa_bo);
128 }
129}
130
131static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
132{
133 struct list_head *hole = sa_manager->hole;
134
135 if (hole != &sa_manager->olist) {
136 return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
137 }
138 return 0;
139}
140
141static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
142{
143 struct list_head *hole = sa_manager->hole;
144
145 if (hole->next != &sa_manager->olist) {
146 return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
147 }
148 return sa_manager->size;
149}
150
151static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
152 struct amdgpu_sa_bo *sa_bo,
153 unsigned size, unsigned align)
154{
155 unsigned soffset, eoffset, wasted;
156
157 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
158 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
159 wasted = (align - (soffset % align)) % align;
160
161 if ((eoffset - soffset) >= (size + wasted)) {
162 soffset += wasted;
163
164 sa_bo->manager = sa_manager;
165 sa_bo->soffset = soffset;
166 sa_bo->eoffset = soffset + size;
167 list_add(&sa_bo->olist, sa_manager->hole);
168 INIT_LIST_HEAD(&sa_bo->flist);
169 sa_manager->hole = &sa_bo->olist;
170 return true;
171 }
172 return false;
173}
174
175/**
176 * amdgpu_sa_event - Check if we can stop waiting
177 *
178 * @sa_manager: pointer to the sa_manager
179 * @size: number of bytes we want to allocate
180 * @align: alignment we need to match
181 *
182 * Check if either there is a fence we can wait for or
183 * enough free memory to satisfy the allocation directly
184 */
185static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
186 unsigned size, unsigned align)
187{
188 unsigned soffset, eoffset, wasted;
189 int i;
190
191 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
192 if (!list_empty(&sa_manager->flist[i]))
193 return true;
194
195 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
196 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
197 wasted = (align - (soffset % align)) % align;
198
199 if ((eoffset - soffset) >= (size + wasted)) {
200 return true;
201 }
202
203 return false;
204}
205
206static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
207 struct dma_fence **fences,
208 unsigned *tries)
209{
210 struct amdgpu_sa_bo *best_bo = NULL;
211 unsigned i, soffset, best, tmp;
212
213 /* if hole points to the end of the buffer */
214 if (sa_manager->hole->next == &sa_manager->olist) {
215 /* try again with its beginning */
216 sa_manager->hole = &sa_manager->olist;
217 return true;
218 }
219
220 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
221 /* to handle wrap around we add sa_manager->size */
222 best = sa_manager->size * 2;
223 /* go over all fence list and try to find the closest sa_bo
224 * of the current last
225 */
226 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
227 struct amdgpu_sa_bo *sa_bo;
228
229 fences[i] = NULL;
230
231 if (list_empty(&sa_manager->flist[i]))
232 continue;
233
234 sa_bo = list_first_entry(&sa_manager->flist[i],
235 struct amdgpu_sa_bo, flist);
236
237 if (!dma_fence_is_signaled(sa_bo->fence)) {
238 fences[i] = sa_bo->fence;
239 continue;
240 }
241
242 /* limit the number of tries each ring gets */
243 if (tries[i] > 2) {
244 continue;
245 }
246
247 tmp = sa_bo->soffset;
248 if (tmp < soffset) {
249 /* wrap around, pretend it's after */
250 tmp += sa_manager->size;
251 }
252 tmp -= soffset;
253 if (tmp < best) {
254 /* this sa bo is the closest one */
255 best = tmp;
256 best_bo = sa_bo;
257 }
258 }
259
260 if (best_bo) {
261 uint32_t idx = best_bo->fence->context;
262
263 idx %= AMDGPU_SA_NUM_FENCE_LISTS;
264 ++tries[idx];
265 sa_manager->hole = best_bo->olist.prev;
266
267 /* we knew that this one is signaled,
268 so it's save to remote it */
269 amdgpu_sa_bo_remove_locked(best_bo);
270 return true;
271 }
272 return false;
273}
274
275int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
276 struct amdgpu_sa_bo **sa_bo,
277 unsigned size, unsigned align)
278{
279 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
280 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
281 unsigned count;
282 int i, r;
283 signed long t;
284
285 if (WARN_ON_ONCE(align > sa_manager->align))
286 return -EINVAL;
287
288 if (WARN_ON_ONCE(size > sa_manager->size))
289 return -EINVAL;
290
291 *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
292 if (!(*sa_bo))
293 return -ENOMEM;
294 (*sa_bo)->manager = sa_manager;
295 (*sa_bo)->fence = NULL;
296 INIT_LIST_HEAD(&(*sa_bo)->olist);
297 INIT_LIST_HEAD(&(*sa_bo)->flist);
298
299 spin_lock(&sa_manager->wq.lock);
300 do {
301 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
302 tries[i] = 0;
303
304 do {
305 amdgpu_sa_bo_try_free(sa_manager);
306
307 if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
308 size, align)) {
309 spin_unlock(&sa_manager->wq.lock);
310 return 0;
311 }
312
313 /* see if we can skip over some allocations */
314 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
315
316 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
317 if (fences[i])
318 fences[count++] = dma_fence_get(fences[i]);
319
320 if (count) {
321 spin_unlock(&sa_manager->wq.lock);
322 t = dma_fence_wait_any_timeout(fences, count, false,
323 MAX_SCHEDULE_TIMEOUT,
324 NULL);
325 for (i = 0; i < count; ++i)
326 dma_fence_put(fences[i]);
327
328 r = (t > 0) ? 0 : t;
329 spin_lock(&sa_manager->wq.lock);
330 } else {
331 /* if we have nothing to wait for block */
332 r = wait_event_interruptible_locked(
333 sa_manager->wq,
334 amdgpu_sa_event(sa_manager, size, align)
335 );
336 }
337
338 } while (!r);
339
340 spin_unlock(&sa_manager->wq.lock);
341 kfree(*sa_bo);
342 *sa_bo = NULL;
343 return r;
344}
345
346void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
347 struct dma_fence *fence)
348{
349 struct amdgpu_sa_manager *sa_manager;
350
351 if (sa_bo == NULL || *sa_bo == NULL) {
352 return;
353 }
354
355 sa_manager = (*sa_bo)->manager;
356 spin_lock(&sa_manager->wq.lock);
357 if (fence && !dma_fence_is_signaled(fence)) {
358 uint32_t idx;
359
360 (*sa_bo)->fence = dma_fence_get(fence);
361 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
362 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
363 } else {
364 amdgpu_sa_bo_remove_locked(*sa_bo);
365 }
366 wake_up_all_locked(&sa_manager->wq);
367 spin_unlock(&sa_manager->wq.lock);
368 *sa_bo = NULL;
369}
370
371#if defined(CONFIG_DEBUG_FS)
372
373void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
374 struct seq_file *m)
375{
376 struct amdgpu_sa_bo *i;
377
378 spin_lock(&sa_manager->wq.lock);
379 list_for_each_entry(i, &sa_manager->olist, olist) {
380 uint64_t soffset = i->soffset + sa_manager->gpu_addr;
381 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
382 if (&i->olist == sa_manager->hole) {
383 seq_printf(m, ">");
384 } else {
385 seq_printf(m, " ");
386 }
387 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
388 soffset, eoffset, eoffset - soffset);
389
390 if (i->fence)
391 seq_printf(m, " protected by 0x%016llx on context %llu",
392 i->fence->seqno, i->fence->context);
393
394 seq_printf(m, "\n");
395 }
396 spin_unlock(&sa_manager->wq.lock);
397}
398#endif