Linux Audio

Check our new training course

Loading...
v6.13.7
  1/*
  2 * Copyright 2011 Red Hat Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 */
 30/* Algorithm:
 31 *
 32 * We store the last allocated bo in "hole", we always try to allocate
 33 * after the last allocated bo. Principle is that in a linear GPU ring
 34 * progression was is after last is the oldest bo we allocated and thus
 35 * the first one that should no longer be in use by the GPU.
 36 *
 37 * If it's not the case we skip over the bo after last to the closest
 38 * done bo if such one exist. If none exist and we are not asked to
 39 * block we report failure to allocate.
 40 *
 41 * If we are asked to block we wait on all the oldest fence of all
 42 * rings. We just wait for any of those fence to complete.
 43 */
 44
 
 45#include "radeon.h"
 46
 
 
 
 47int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 48			      struct radeon_sa_manager *sa_manager,
 49			      unsigned int size, u32 sa_align, u32 domain,
 50			      u32 flags)
 51{
 52	int r;
 
 
 
 
 
 
 
 
 
 
 53
 54	r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
 55			     domain, flags, NULL, NULL, &sa_manager->bo);
 56	if (r) {
 57		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
 58		return r;
 59	}
 60
 61	sa_manager->domain = domain;
 62
 63	drm_suballoc_manager_init(&sa_manager->base, size, sa_align);
 64
 65	return r;
 66}
 67
 68void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
 69			       struct radeon_sa_manager *sa_manager)
 70{
 71	drm_suballoc_manager_fini(&sa_manager->base);
 
 
 
 
 
 
 
 
 
 
 
 72	radeon_bo_unref(&sa_manager->bo);
 
 73}
 74
 75int radeon_sa_bo_manager_start(struct radeon_device *rdev,
 76			       struct radeon_sa_manager *sa_manager)
 77{
 78	int r;
 79
 80	if (sa_manager->bo == NULL) {
 81		dev_err(rdev->dev, "no bo for sa manager\n");
 82		return -EINVAL;
 83	}
 84
 85	/* map the buffer */
 86	r = radeon_bo_reserve(sa_manager->bo, false);
 87	if (r) {
 88		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
 89		return r;
 90	}
 91	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
 92	if (r) {
 93		radeon_bo_unreserve(sa_manager->bo);
 94		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
 95		return r;
 96	}
 97	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
 98	radeon_bo_unreserve(sa_manager->bo);
 99	return r;
100}
101
102int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
103				 struct radeon_sa_manager *sa_manager)
104{
105	int r;
106
107	if (sa_manager->bo == NULL) {
108		dev_err(rdev->dev, "no bo for sa manager\n");
109		return -EINVAL;
110	}
111
112	r = radeon_bo_reserve(sa_manager->bo, false);
113	if (!r) {
114		radeon_bo_kunmap(sa_manager->bo);
115		radeon_bo_unpin(sa_manager->bo);
116		radeon_bo_unreserve(sa_manager->bo);
117	}
118	return r;
119}
120
121int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
122		     struct drm_suballoc **sa_bo,
123		     unsigned int size, unsigned int align)
 
 
 
 
 
 
 
 
 
 
124{
125	struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
126						   GFP_KERNEL, false, align);
127
128	if (IS_ERR(sa)) {
129		*sa_bo = NULL;
130		return PTR_ERR(sa);
 
 
 
 
 
 
131	}
 
132
133	*sa_bo = sa;
 
 
 
 
 
 
134	return 0;
135}
136
137void radeon_sa_bo_free(struct drm_suballoc **sa_bo,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138		       struct radeon_fence *fence)
139{
 
 
140	if (sa_bo == NULL || *sa_bo == NULL) {
141		return;
142	}
143
144	if (fence)
145		drm_suballoc_free(*sa_bo, &fence->base);
146	else
147		drm_suballoc_free(*sa_bo, NULL);
148
 
 
 
 
 
149	*sa_bo = NULL;
150}
151
152#if defined(CONFIG_DEBUG_FS)
153void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
154				  struct seq_file *m)
155{
156	struct drm_printer p = drm_seq_file_printer(m);
157
158	drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159}
160#endif
v3.5.6
  1/*
  2 * Copyright 2011 Red Hat Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 */
 30/* Algorithm:
 31 *
 32 * We store the last allocated bo in "hole", we always try to allocate
 33 * after the last allocated bo. Principle is that in a linear GPU ring
 34 * progression was is after last is the oldest bo we allocated and thus
 35 * the first one that should no longer be in use by the GPU.
 36 *
 37 * If it's not the case we skip over the bo after last to the closest
 38 * done bo if such one exist. If none exist and we are not asked to
 39 * block we report failure to allocate.
 40 *
 41 * If we are asked to block we wait on all the oldest fence of all
 42 * rings. We just wait for any of those fence to complete.
 43 */
 44#include "drmP.h"
 45#include "drm.h"
 46#include "radeon.h"
 47
 48static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
 49static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
 50
 51int radeon_sa_bo_manager_init(struct radeon_device *rdev,
 52			      struct radeon_sa_manager *sa_manager,
 53			      unsigned size, u32 domain)
 
 54{
 55	int i, r;
 56
 57	spin_lock_init(&sa_manager->lock);
 58	sa_manager->bo = NULL;
 59	sa_manager->size = size;
 60	sa_manager->domain = domain;
 61	sa_manager->hole = &sa_manager->olist;
 62	INIT_LIST_HEAD(&sa_manager->olist);
 63	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
 64		INIT_LIST_HEAD(&sa_manager->flist[i]);
 65	}
 66
 67	r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
 68			     RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
 69	if (r) {
 70		dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
 71		return r;
 72	}
 73
 
 
 
 
 74	return r;
 75}
 76
 77void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
 78			       struct radeon_sa_manager *sa_manager)
 79{
 80	struct radeon_sa_bo *sa_bo, *tmp;
 81
 82	if (!list_empty(&sa_manager->olist)) {
 83		sa_manager->hole = &sa_manager->olist,
 84		radeon_sa_bo_try_free(sa_manager);
 85		if (!list_empty(&sa_manager->olist)) {
 86			dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
 87		}
 88	}
 89	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
 90		radeon_sa_bo_remove_locked(sa_bo);
 91	}
 92	radeon_bo_unref(&sa_manager->bo);
 93	sa_manager->size = 0;
 94}
 95
 96int radeon_sa_bo_manager_start(struct radeon_device *rdev,
 97			       struct radeon_sa_manager *sa_manager)
 98{
 99	int r;
100
101	if (sa_manager->bo == NULL) {
102		dev_err(rdev->dev, "no bo for sa manager\n");
103		return -EINVAL;
104	}
105
106	/* map the buffer */
107	r = radeon_bo_reserve(sa_manager->bo, false);
108	if (r) {
109		dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
110		return r;
111	}
112	r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
113	if (r) {
114		radeon_bo_unreserve(sa_manager->bo);
115		dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
116		return r;
117	}
118	r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
119	radeon_bo_unreserve(sa_manager->bo);
120	return r;
121}
122
123int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
124				 struct radeon_sa_manager *sa_manager)
125{
126	int r;
127
128	if (sa_manager->bo == NULL) {
129		dev_err(rdev->dev, "no bo for sa manager\n");
130		return -EINVAL;
131	}
132
133	r = radeon_bo_reserve(sa_manager->bo, false);
134	if (!r) {
135		radeon_bo_kunmap(sa_manager->bo);
136		radeon_bo_unpin(sa_manager->bo);
137		radeon_bo_unreserve(sa_manager->bo);
138	}
139	return r;
140}
141
142static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
143{
144	struct radeon_sa_manager *sa_manager = sa_bo->manager;
145	if (sa_manager->hole == &sa_bo->olist) {
146		sa_manager->hole = sa_bo->olist.prev;
147	}
148	list_del_init(&sa_bo->olist);
149	list_del_init(&sa_bo->flist);
150	radeon_fence_unref(&sa_bo->fence);
151	kfree(sa_bo);
152}
153
154static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
155{
156	struct radeon_sa_bo *sa_bo, *tmp;
 
157
158	if (sa_manager->hole->next == &sa_manager->olist)
159		return;
160
161	sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
162	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
163		if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
164			return;
165		}
166		radeon_sa_bo_remove_locked(sa_bo);
167	}
168}
169
170static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
171{
172	struct list_head *hole = sa_manager->hole;
173
174	if (hole != &sa_manager->olist) {
175		return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
176	}
177	return 0;
178}
179
180static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
181{
182	struct list_head *hole = sa_manager->hole;
183
184	if (hole->next != &sa_manager->olist) {
185		return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
186	}
187	return sa_manager->size;
188}
189
190static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
191				   struct radeon_sa_bo *sa_bo,
192				   unsigned size, unsigned align)
193{
194	unsigned soffset, eoffset, wasted;
195
196	soffset = radeon_sa_bo_hole_soffset(sa_manager);
197	eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
198	wasted = (align - (soffset % align)) % align;
199
200	if ((eoffset - soffset) >= (size + wasted)) {
201		soffset += wasted;
202
203		sa_bo->manager = sa_manager;
204		sa_bo->soffset = soffset;
205		sa_bo->eoffset = soffset + size;
206		list_add(&sa_bo->olist, sa_manager->hole);
207		INIT_LIST_HEAD(&sa_bo->flist);
208		sa_manager->hole = &sa_bo->olist;
209		return true;
210	}
211	return false;
212}
213
214static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
215				   struct radeon_fence **fences,
216				   unsigned *tries)
217{
218	struct radeon_sa_bo *best_bo = NULL;
219	unsigned i, soffset, best, tmp;
220
221	/* if hole points to the end of the buffer */
222	if (sa_manager->hole->next == &sa_manager->olist) {
223		/* try again with its beginning */
224		sa_manager->hole = &sa_manager->olist;
225		return true;
226	}
227
228	soffset = radeon_sa_bo_hole_soffset(sa_manager);
229	/* to handle wrap around we add sa_manager->size */
230	best = sa_manager->size * 2;
231	/* go over all fence list and try to find the closest sa_bo
232	 * of the current last
233	 */
234	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
235		struct radeon_sa_bo *sa_bo;
236
237		if (list_empty(&sa_manager->flist[i])) {
238			continue;
239		}
240
241		sa_bo = list_first_entry(&sa_manager->flist[i],
242					 struct radeon_sa_bo, flist);
243
244		if (!radeon_fence_signaled(sa_bo->fence)) {
245			fences[i] = sa_bo->fence;
246			continue;
247		}
248
249		/* limit the number of tries each ring gets */
250		if (tries[i] > 2) {
251			continue;
252		}
253
254		tmp = sa_bo->soffset;
255		if (tmp < soffset) {
256			/* wrap around, pretend it's after */
257			tmp += sa_manager->size;
258		}
259		tmp -= soffset;
260		if (tmp < best) {
261			/* this sa bo is the closest one */
262			best = tmp;
263			best_bo = sa_bo;
264		}
265	}
266
267	if (best_bo) {
268		++tries[best_bo->fence->ring];
269		sa_manager->hole = best_bo->olist.prev;
270
271		/* we knew that this one is signaled,
272		   so it's save to remote it */
273		radeon_sa_bo_remove_locked(best_bo);
274		return true;
275	}
276	return false;
277}
278
279int radeon_sa_bo_new(struct radeon_device *rdev,
280		     struct radeon_sa_manager *sa_manager,
281		     struct radeon_sa_bo **sa_bo,
282		     unsigned size, unsigned align, bool block)
283{
284	struct radeon_fence *fences[RADEON_NUM_RINGS];
285	unsigned tries[RADEON_NUM_RINGS];
286	int i, r = -ENOMEM;
287
288	BUG_ON(align > RADEON_GPU_PAGE_SIZE);
289	BUG_ON(size > sa_manager->size);
290
291	*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
292	if ((*sa_bo) == NULL) {
293		return -ENOMEM;
294	}
295	(*sa_bo)->manager = sa_manager;
296	(*sa_bo)->fence = NULL;
297	INIT_LIST_HEAD(&(*sa_bo)->olist);
298	INIT_LIST_HEAD(&(*sa_bo)->flist);
299
300	spin_lock(&sa_manager->lock);
301	do {
302		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
303			fences[i] = NULL;
304			tries[i] = 0;
305		}
306
307		do {
308			radeon_sa_bo_try_free(sa_manager);
309
310			if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
311						   size, align)) {
312				spin_unlock(&sa_manager->lock);
313				return 0;
314			}
315
316			/* see if we can skip over some allocations */
317		} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
318
319		if (block) {
320			spin_unlock(&sa_manager->lock);
321			r = radeon_fence_wait_any(rdev, fences, false);
322			spin_lock(&sa_manager->lock);
323			if (r) {
324				/* if we have nothing to wait for we
325				   are practically out of memory */
326				if (r == -ENOENT) {
327					r = -ENOMEM;
328				}
329				goto out_err;
330			}
331		}
332	} while (block);
333
334out_err:
335	spin_unlock(&sa_manager->lock);
336	kfree(*sa_bo);
337	*sa_bo = NULL;
338	return r;
339}
340
341void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
342		       struct radeon_fence *fence)
343{
344	struct radeon_sa_manager *sa_manager;
345
346	if (sa_bo == NULL || *sa_bo == NULL) {
347		return;
348	}
349
350	sa_manager = (*sa_bo)->manager;
351	spin_lock(&sa_manager->lock);
352	if (fence && fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
353		(*sa_bo)->fence = radeon_fence_ref(fence);
354		list_add_tail(&(*sa_bo)->flist,
355			      &sa_manager->flist[fence->ring]);
356	} else {
357		radeon_sa_bo_remove_locked(*sa_bo);
358	}
359	spin_unlock(&sa_manager->lock);
360	*sa_bo = NULL;
361}
362
363#if defined(CONFIG_DEBUG_FS)
364void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
365				  struct seq_file *m)
366{
367	struct radeon_sa_bo *i;
368
369	spin_lock(&sa_manager->lock);
370	list_for_each_entry(i, &sa_manager->olist, olist) {
371		if (&i->olist == sa_manager->hole) {
372			seq_printf(m, ">");
373		} else {
374			seq_printf(m, " ");
375		}
376		seq_printf(m, "[0x%08x 0x%08x] size %8d",
377			   i->soffset, i->eoffset, i->eoffset - i->soffset);
378		if (i->fence) {
379			seq_printf(m, " protected by 0x%016llx on ring %d",
380				   i->fence->seq, i->fence->ring);
381		}
382		seq_printf(m, "\n");
383	}
384	spin_unlock(&sa_manager->lock);
385}
386#endif