Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v3.1
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 30 *    Dave Airlie
 31 */
 32#include <linux/list.h>
 33#include <linux/slab.h>
 34#include <drm/drmP.h>
 35#include "radeon_drm.h"
 36#include "radeon.h"
 37#include "radeon_trace.h"
 38
 39
 40int radeon_ttm_init(struct radeon_device *rdev);
 41void radeon_ttm_fini(struct radeon_device *rdev);
 42static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
 43
 44/*
 45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 46 * function are calling it.
 47 */
 48
 
 
 
 
 
 
 
 
 
 
 49static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 50{
 51	struct radeon_bo *bo;
 52
 53	bo = container_of(tbo, struct radeon_bo, tbo);
 54	mutex_lock(&bo->rdev->gem.mutex);
 55	list_del_init(&bo->list);
 56	mutex_unlock(&bo->rdev->gem.mutex);
 57	radeon_bo_clear_surface_reg(bo);
 
 58	drm_gem_object_release(&bo->gem_base);
 59	kfree(bo);
 60}
 61
 62bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
 63{
 64	if (bo->destroy == &radeon_ttm_bo_destroy)
 65		return true;
 66	return false;
 67}
 68
 69void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 70{
 71	u32 c = 0;
 72
 73	rbo->placement.fpfn = 0;
 74	rbo->placement.lpfn = 0;
 75	rbo->placement.placement = rbo->placements;
 76	rbo->placement.busy_placement = rbo->placements;
 77	if (domain & RADEON_GEM_DOMAIN_VRAM)
 78		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 79					TTM_PL_FLAG_VRAM;
 80	if (domain & RADEON_GEM_DOMAIN_GTT)
 81		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 82	if (domain & RADEON_GEM_DOMAIN_CPU)
 83		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 84	if (!c)
 85		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 86	rbo->placement.num_placement = c;
 87	rbo->placement.num_busy_placement = c;
 88}
 89
 90int radeon_bo_create(struct radeon_device *rdev,
 91		     unsigned long size, int byte_align, bool kernel, u32 domain,
 92		     struct radeon_bo **bo_ptr)
 93{
 94	struct radeon_bo *bo;
 95	enum ttm_bo_type type;
 96	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 97	unsigned long max_size = 0;
 
 98	int r;
 99
100	size = ALIGN(size, PAGE_SIZE);
101
102	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
103		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
104	}
105	if (kernel) {
106		type = ttm_bo_type_kernel;
 
 
107	} else {
108		type = ttm_bo_type_device;
109	}
110	*bo_ptr = NULL;
111
112	/* maximun bo size is the minimun btw visible vram and gtt size */
113	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
114	if ((page_align << PAGE_SHIFT) >= max_size) {
115		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
116			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
117		return -ENOMEM;
118	}
119
 
 
 
120retry:
121	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
122	if (bo == NULL)
123		return -ENOMEM;
124	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
125	if (unlikely(r)) {
126		kfree(bo);
127		return r;
128	}
129	bo->rdev = rdev;
130	bo->gem_base.driver_private = NULL;
131	bo->surface_reg = -1;
132	INIT_LIST_HEAD(&bo->list);
 
133	radeon_ttm_placement_from_domain(bo, domain);
134	/* Kernel allocation are uninterruptible */
135	mutex_lock(&rdev->vram_mutex);
136	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
137			&bo->placement, page_align, 0, !kernel, NULL, size,
138			&radeon_ttm_bo_destroy);
139	mutex_unlock(&rdev->vram_mutex);
140	if (unlikely(r != 0)) {
141		if (r != -ERESTARTSYS) {
142			if (domain == RADEON_GEM_DOMAIN_VRAM) {
143				domain |= RADEON_GEM_DOMAIN_GTT;
144				goto retry;
145			}
146			dev_err(rdev->dev,
147				"object_init failed for (%lu, 0x%08X)\n",
148				size, domain);
149		}
150		return r;
151	}
152	*bo_ptr = bo;
153
154	trace_radeon_bo_create(bo);
155
156	return 0;
157}
158
159int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
160{
161	bool is_iomem;
162	int r;
163
164	if (bo->kptr) {
165		if (ptr) {
166			*ptr = bo->kptr;
167		}
168		return 0;
169	}
170	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
171	if (r) {
172		return r;
173	}
174	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
175	if (ptr) {
176		*ptr = bo->kptr;
177	}
178	radeon_bo_check_tiling(bo, 0, 0);
179	return 0;
180}
181
182void radeon_bo_kunmap(struct radeon_bo *bo)
183{
184	if (bo->kptr == NULL)
185		return;
186	bo->kptr = NULL;
187	radeon_bo_check_tiling(bo, 0, 0);
188	ttm_bo_kunmap(&bo->kmap);
189}
190
191void radeon_bo_unref(struct radeon_bo **bo)
192{
193	struct ttm_buffer_object *tbo;
194	struct radeon_device *rdev;
195
196	if ((*bo) == NULL)
197		return;
198	rdev = (*bo)->rdev;
199	tbo = &((*bo)->tbo);
200	mutex_lock(&rdev->vram_mutex);
201	ttm_bo_unref(&tbo);
202	mutex_unlock(&rdev->vram_mutex);
203	if (tbo == NULL)
204		*bo = NULL;
205}
206
207int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 
208{
209	int r, i;
210
211	if (bo->pin_count) {
212		bo->pin_count++;
213		if (gpu_addr)
214			*gpu_addr = radeon_bo_gpu_offset(bo);
 
 
 
 
 
 
 
 
 
 
 
 
215		return 0;
216	}
217	radeon_ttm_placement_from_domain(bo, domain);
218	if (domain == RADEON_GEM_DOMAIN_VRAM) {
219		/* force to pin into visible video ram */
220		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
221	}
 
 
 
 
 
 
 
 
 
222	for (i = 0; i < bo->placement.num_placement; i++)
223		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
224	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
225	if (likely(r == 0)) {
226		bo->pin_count = 1;
227		if (gpu_addr != NULL)
228			*gpu_addr = radeon_bo_gpu_offset(bo);
229	}
230	if (unlikely(r != 0))
231		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
232	return r;
233}
234
 
 
 
 
 
235int radeon_bo_unpin(struct radeon_bo *bo)
236{
237	int r, i;
238
239	if (!bo->pin_count) {
240		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
241		return 0;
242	}
243	bo->pin_count--;
244	if (bo->pin_count)
245		return 0;
246	for (i = 0; i < bo->placement.num_placement; i++)
247		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
248	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
249	if (unlikely(r != 0))
250		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
251	return r;
252}
253
254int radeon_bo_evict_vram(struct radeon_device *rdev)
255{
256	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
257	if (0 && (rdev->flags & RADEON_IS_IGP)) {
258		if (rdev->mc.igp_sideport_enabled == false)
259			/* Useless to evict on IGP chips */
260			return 0;
261	}
262	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
263}
264
265void radeon_bo_force_delete(struct radeon_device *rdev)
266{
267	struct radeon_bo *bo, *n;
268
269	if (list_empty(&rdev->gem.objects)) {
270		return;
271	}
272	dev_err(rdev->dev, "Userspace still has active objects !\n");
273	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
274		mutex_lock(&rdev->ddev->struct_mutex);
275		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
276			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
277			*((unsigned long *)&bo->gem_base.refcount));
278		mutex_lock(&bo->rdev->gem.mutex);
279		list_del_init(&bo->list);
280		mutex_unlock(&bo->rdev->gem.mutex);
281		/* this should unref the ttm bo */
282		drm_gem_object_unreference(&bo->gem_base);
283		mutex_unlock(&rdev->ddev->struct_mutex);
284	}
285}
286
287int radeon_bo_init(struct radeon_device *rdev)
288{
289	/* Add an MTRR for the VRAM */
290	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
291			MTRR_TYPE_WRCOMB, 1);
292	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
293		rdev->mc.mc_vram_size >> 20,
294		(unsigned long long)rdev->mc.aper_size >> 20);
295	DRM_INFO("RAM width %dbits %cDR\n",
296			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
297	return radeon_ttm_init(rdev);
298}
299
300void radeon_bo_fini(struct radeon_device *rdev)
301{
302	radeon_ttm_fini(rdev);
303}
304
305void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
306				struct list_head *head)
307{
308	if (lobj->wdomain) {
309		list_add(&lobj->tv.head, head);
310	} else {
311		list_add_tail(&lobj->tv.head, head);
312	}
313}
314
315int radeon_bo_list_validate(struct list_head *head)
316{
317	struct radeon_bo_list *lobj;
318	struct radeon_bo *bo;
319	u32 domain;
320	int r;
321
322	r = ttm_eu_reserve_buffers(head);
323	if (unlikely(r != 0)) {
324		return r;
325	}
326	list_for_each_entry(lobj, head, tv.head) {
327		bo = lobj->bo;
328		if (!bo->pin_count) {
329			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
330			
331		retry:
332			radeon_ttm_placement_from_domain(bo, domain);
333			r = ttm_bo_validate(&bo->tbo, &bo->placement,
334						true, false, false);
335			if (unlikely(r)) {
336				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
337					domain |= RADEON_GEM_DOMAIN_GTT;
338					goto retry;
339				}
340				return r;
341			}
342		}
343		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
344		lobj->tiling_flags = bo->tiling_flags;
345	}
346	return 0;
347}
348
349int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
350			     struct vm_area_struct *vma)
351{
352	return ttm_fbdev_mmap(vma, &bo->tbo);
353}
354
355int radeon_bo_get_surface_reg(struct radeon_bo *bo)
356{
357	struct radeon_device *rdev = bo->rdev;
358	struct radeon_surface_reg *reg;
359	struct radeon_bo *old_object;
360	int steal;
361	int i;
362
363	BUG_ON(!atomic_read(&bo->tbo.reserved));
364
365	if (!bo->tiling_flags)
366		return 0;
367
368	if (bo->surface_reg >= 0) {
369		reg = &rdev->surface_regs[bo->surface_reg];
370		i = bo->surface_reg;
371		goto out;
372	}
373
374	steal = -1;
375	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
376
377		reg = &rdev->surface_regs[i];
378		if (!reg->bo)
379			break;
380
381		old_object = reg->bo;
382		if (old_object->pin_count == 0)
383			steal = i;
384	}
385
386	/* if we are all out */
387	if (i == RADEON_GEM_MAX_SURFACES) {
388		if (steal == -1)
389			return -ENOMEM;
390		/* find someone with a surface reg and nuke their BO */
391		reg = &rdev->surface_regs[steal];
392		old_object = reg->bo;
393		/* blow away the mapping */
394		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
395		ttm_bo_unmap_virtual(&old_object->tbo);
396		old_object->surface_reg = -1;
397		i = steal;
398	}
399
400	bo->surface_reg = i;
401	reg->bo = bo;
402
403out:
404	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
405			       bo->tbo.mem.start << PAGE_SHIFT,
406			       bo->tbo.num_pages << PAGE_SHIFT);
407	return 0;
408}
409
410static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
411{
412	struct radeon_device *rdev = bo->rdev;
413	struct radeon_surface_reg *reg;
414
415	if (bo->surface_reg == -1)
416		return;
417
418	reg = &rdev->surface_regs[bo->surface_reg];
419	radeon_clear_surface_reg(rdev, bo->surface_reg);
420
421	reg->bo = NULL;
422	bo->surface_reg = -1;
423}
424
425int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
426				uint32_t tiling_flags, uint32_t pitch)
427{
 
428	int r;
429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430	r = radeon_bo_reserve(bo, false);
431	if (unlikely(r != 0))
432		return r;
433	bo->tiling_flags = tiling_flags;
434	bo->pitch = pitch;
435	radeon_bo_unreserve(bo);
436	return 0;
437}
438
439void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
440				uint32_t *tiling_flags,
441				uint32_t *pitch)
442{
443	BUG_ON(!atomic_read(&bo->tbo.reserved));
444	if (tiling_flags)
445		*tiling_flags = bo->tiling_flags;
446	if (pitch)
447		*pitch = bo->pitch;
448}
449
450int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
451				bool force_drop)
452{
453	BUG_ON(!atomic_read(&bo->tbo.reserved));
454
455	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
456		return 0;
457
458	if (force_drop) {
459		radeon_bo_clear_surface_reg(bo);
460		return 0;
461	}
462
463	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
464		if (!has_moved)
465			return 0;
466
467		if (bo->surface_reg >= 0)
468			radeon_bo_clear_surface_reg(bo);
469		return 0;
470	}
471
472	if ((bo->surface_reg >= 0) && !has_moved)
473		return 0;
474
475	return radeon_bo_get_surface_reg(bo);
476}
477
478void radeon_bo_move_notify(struct ttm_buffer_object *bo,
479			   struct ttm_mem_reg *mem)
480{
481	struct radeon_bo *rbo;
482	if (!radeon_ttm_bo_is_radeon_bo(bo))
483		return;
484	rbo = container_of(bo, struct radeon_bo, tbo);
485	radeon_bo_check_tiling(rbo, 0, 1);
 
486}
487
488int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
489{
490	struct radeon_device *rdev;
491	struct radeon_bo *rbo;
492	unsigned long offset, size;
493	int r;
494
495	if (!radeon_ttm_bo_is_radeon_bo(bo))
496		return 0;
497	rbo = container_of(bo, struct radeon_bo, tbo);
498	radeon_bo_check_tiling(rbo, 0, 0);
499	rdev = rbo->rdev;
500	if (bo->mem.mem_type == TTM_PL_VRAM) {
501		size = bo->mem.num_pages << PAGE_SHIFT;
502		offset = bo->mem.start << PAGE_SHIFT;
503		if ((offset + size) > rdev->mc.visible_vram_size) {
504			/* hurrah the memory is not visible ! */
505			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
506			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
507			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
508			if (unlikely(r != 0))
509				return r;
510			offset = bo->mem.start << PAGE_SHIFT;
511			/* this should not happen */
512			if ((offset + size) > rdev->mc.visible_vram_size)
513				return -EINVAL;
514		}
515	}
516	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517}
v3.5.6
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 30 *    Dave Airlie
 31 */
 32#include <linux/list.h>
 33#include <linux/slab.h>
 34#include <drm/drmP.h>
 35#include "radeon_drm.h"
 36#include "radeon.h"
 37#include "radeon_trace.h"
 38
 39
 40int radeon_ttm_init(struct radeon_device *rdev);
 41void radeon_ttm_fini(struct radeon_device *rdev);
 42static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
 43
 44/*
 45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 46 * function are calling it.
 47 */
 48
 49void radeon_bo_clear_va(struct radeon_bo *bo)
 50{
 51	struct radeon_bo_va *bo_va, *tmp;
 52
 53	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
 54		/* remove from all vm address space */
 55		radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
 56	}
 57}
 58
 59static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 60{
 61	struct radeon_bo *bo;
 62
 63	bo = container_of(tbo, struct radeon_bo, tbo);
 64	mutex_lock(&bo->rdev->gem.mutex);
 65	list_del_init(&bo->list);
 66	mutex_unlock(&bo->rdev->gem.mutex);
 67	radeon_bo_clear_surface_reg(bo);
 68	radeon_bo_clear_va(bo);
 69	drm_gem_object_release(&bo->gem_base);
 70	kfree(bo);
 71}
 72
 73bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
 74{
 75	if (bo->destroy == &radeon_ttm_bo_destroy)
 76		return true;
 77	return false;
 78}
 79
 80void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 81{
 82	u32 c = 0;
 83
 84	rbo->placement.fpfn = 0;
 85	rbo->placement.lpfn = 0;
 86	rbo->placement.placement = rbo->placements;
 87	rbo->placement.busy_placement = rbo->placements;
 88	if (domain & RADEON_GEM_DOMAIN_VRAM)
 89		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 90					TTM_PL_FLAG_VRAM;
 91	if (domain & RADEON_GEM_DOMAIN_GTT)
 92		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 93	if (domain & RADEON_GEM_DOMAIN_CPU)
 94		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 95	if (!c)
 96		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 97	rbo->placement.num_placement = c;
 98	rbo->placement.num_busy_placement = c;
 99}
100
101int radeon_bo_create(struct radeon_device *rdev,
102		     unsigned long size, int byte_align, bool kernel, u32 domain,
103		     struct sg_table *sg, struct radeon_bo **bo_ptr)
104{
105	struct radeon_bo *bo;
106	enum ttm_bo_type type;
107	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
108	unsigned long max_size = 0;
109	size_t acc_size;
110	int r;
111
112	size = ALIGN(size, PAGE_SIZE);
113
114	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
115		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
116	}
117	if (kernel) {
118		type = ttm_bo_type_kernel;
119	} else if (sg) {
120		type = ttm_bo_type_sg;
121	} else {
122		type = ttm_bo_type_device;
123	}
124	*bo_ptr = NULL;
125
126	/* maximun bo size is the minimun btw visible vram and gtt size */
127	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
128	if ((page_align << PAGE_SHIFT) >= max_size) {
129		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
130			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
131		return -ENOMEM;
132	}
133
134	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
135				       sizeof(struct radeon_bo));
136
137retry:
138	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
139	if (bo == NULL)
140		return -ENOMEM;
141	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
142	if (unlikely(r)) {
143		kfree(bo);
144		return r;
145	}
146	bo->rdev = rdev;
147	bo->gem_base.driver_private = NULL;
148	bo->surface_reg = -1;
149	INIT_LIST_HEAD(&bo->list);
150	INIT_LIST_HEAD(&bo->va);
151	radeon_ttm_placement_from_domain(bo, domain);
152	/* Kernel allocation are uninterruptible */
153	mutex_lock(&rdev->vram_mutex);
154	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
155			&bo->placement, page_align, 0, !kernel, NULL,
156			acc_size, sg, &radeon_ttm_bo_destroy);
157	mutex_unlock(&rdev->vram_mutex);
158	if (unlikely(r != 0)) {
159		if (r != -ERESTARTSYS) {
160			if (domain == RADEON_GEM_DOMAIN_VRAM) {
161				domain |= RADEON_GEM_DOMAIN_GTT;
162				goto retry;
163			}
164			dev_err(rdev->dev,
165				"object_init failed for (%lu, 0x%08X)\n",
166				size, domain);
167		}
168		return r;
169	}
170	*bo_ptr = bo;
171
172	trace_radeon_bo_create(bo);
173
174	return 0;
175}
176
177int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
178{
179	bool is_iomem;
180	int r;
181
182	if (bo->kptr) {
183		if (ptr) {
184			*ptr = bo->kptr;
185		}
186		return 0;
187	}
188	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
189	if (r) {
190		return r;
191	}
192	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
193	if (ptr) {
194		*ptr = bo->kptr;
195	}
196	radeon_bo_check_tiling(bo, 0, 0);
197	return 0;
198}
199
200void radeon_bo_kunmap(struct radeon_bo *bo)
201{
202	if (bo->kptr == NULL)
203		return;
204	bo->kptr = NULL;
205	radeon_bo_check_tiling(bo, 0, 0);
206	ttm_bo_kunmap(&bo->kmap);
207}
208
209void radeon_bo_unref(struct radeon_bo **bo)
210{
211	struct ttm_buffer_object *tbo;
212	struct radeon_device *rdev;
213
214	if ((*bo) == NULL)
215		return;
216	rdev = (*bo)->rdev;
217	tbo = &((*bo)->tbo);
218	mutex_lock(&rdev->vram_mutex);
219	ttm_bo_unref(&tbo);
220	mutex_unlock(&rdev->vram_mutex);
221	if (tbo == NULL)
222		*bo = NULL;
223}
224
225int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
226			     u64 *gpu_addr)
227{
228	int r, i;
229
230	if (bo->pin_count) {
231		bo->pin_count++;
232		if (gpu_addr)
233			*gpu_addr = radeon_bo_gpu_offset(bo);
234
235		if (max_offset != 0) {
236			u64 domain_start;
237
238			if (domain == RADEON_GEM_DOMAIN_VRAM)
239				domain_start = bo->rdev->mc.vram_start;
240			else
241				domain_start = bo->rdev->mc.gtt_start;
242			WARN_ON_ONCE(max_offset <
243				     (radeon_bo_gpu_offset(bo) - domain_start));
244		}
245
246		return 0;
247	}
248	radeon_ttm_placement_from_domain(bo, domain);
249	if (domain == RADEON_GEM_DOMAIN_VRAM) {
250		/* force to pin into visible video ram */
251		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
252	}
253	if (max_offset) {
254		u64 lpfn = max_offset >> PAGE_SHIFT;
255
256		if (!bo->placement.lpfn)
257			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
258
259		if (lpfn < bo->placement.lpfn)
260			bo->placement.lpfn = lpfn;
261	}
262	for (i = 0; i < bo->placement.num_placement; i++)
263		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
264	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
265	if (likely(r == 0)) {
266		bo->pin_count = 1;
267		if (gpu_addr != NULL)
268			*gpu_addr = radeon_bo_gpu_offset(bo);
269	}
270	if (unlikely(r != 0))
271		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
272	return r;
273}
274
275int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
276{
277	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
278}
279
280int radeon_bo_unpin(struct radeon_bo *bo)
281{
282	int r, i;
283
284	if (!bo->pin_count) {
285		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
286		return 0;
287	}
288	bo->pin_count--;
289	if (bo->pin_count)
290		return 0;
291	for (i = 0; i < bo->placement.num_placement; i++)
292		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
293	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
294	if (unlikely(r != 0))
295		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
296	return r;
297}
298
299int radeon_bo_evict_vram(struct radeon_device *rdev)
300{
301	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
302	if (0 && (rdev->flags & RADEON_IS_IGP)) {
303		if (rdev->mc.igp_sideport_enabled == false)
304			/* Useless to evict on IGP chips */
305			return 0;
306	}
307	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
308}
309
310void radeon_bo_force_delete(struct radeon_device *rdev)
311{
312	struct radeon_bo *bo, *n;
313
314	if (list_empty(&rdev->gem.objects)) {
315		return;
316	}
317	dev_err(rdev->dev, "Userspace still has active objects !\n");
318	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
319		mutex_lock(&rdev->ddev->struct_mutex);
320		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
321			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
322			*((unsigned long *)&bo->gem_base.refcount));
323		mutex_lock(&bo->rdev->gem.mutex);
324		list_del_init(&bo->list);
325		mutex_unlock(&bo->rdev->gem.mutex);
326		/* this should unref the ttm bo */
327		drm_gem_object_unreference(&bo->gem_base);
328		mutex_unlock(&rdev->ddev->struct_mutex);
329	}
330}
331
332int radeon_bo_init(struct radeon_device *rdev)
333{
334	/* Add an MTRR for the VRAM */
335	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
336			MTRR_TYPE_WRCOMB, 1);
337	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
338		rdev->mc.mc_vram_size >> 20,
339		(unsigned long long)rdev->mc.aper_size >> 20);
340	DRM_INFO("RAM width %dbits %cDR\n",
341			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
342	return radeon_ttm_init(rdev);
343}
344
345void radeon_bo_fini(struct radeon_device *rdev)
346{
347	radeon_ttm_fini(rdev);
348}
349
350void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
351				struct list_head *head)
352{
353	if (lobj->wdomain) {
354		list_add(&lobj->tv.head, head);
355	} else {
356		list_add_tail(&lobj->tv.head, head);
357	}
358}
359
360int radeon_bo_list_validate(struct list_head *head)
361{
362	struct radeon_bo_list *lobj;
363	struct radeon_bo *bo;
364	u32 domain;
365	int r;
366
367	r = ttm_eu_reserve_buffers(head);
368	if (unlikely(r != 0)) {
369		return r;
370	}
371	list_for_each_entry(lobj, head, tv.head) {
372		bo = lobj->bo;
373		if (!bo->pin_count) {
374			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
375			
376		retry:
377			radeon_ttm_placement_from_domain(bo, domain);
378			r = ttm_bo_validate(&bo->tbo, &bo->placement,
379						true, false, false);
380			if (unlikely(r)) {
381				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
382					domain |= RADEON_GEM_DOMAIN_GTT;
383					goto retry;
384				}
385				return r;
386			}
387		}
388		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
389		lobj->tiling_flags = bo->tiling_flags;
390	}
391	return 0;
392}
393
394int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
395			     struct vm_area_struct *vma)
396{
397	return ttm_fbdev_mmap(vma, &bo->tbo);
398}
399
400int radeon_bo_get_surface_reg(struct radeon_bo *bo)
401{
402	struct radeon_device *rdev = bo->rdev;
403	struct radeon_surface_reg *reg;
404	struct radeon_bo *old_object;
405	int steal;
406	int i;
407
408	BUG_ON(!atomic_read(&bo->tbo.reserved));
409
410	if (!bo->tiling_flags)
411		return 0;
412
413	if (bo->surface_reg >= 0) {
414		reg = &rdev->surface_regs[bo->surface_reg];
415		i = bo->surface_reg;
416		goto out;
417	}
418
419	steal = -1;
420	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
421
422		reg = &rdev->surface_regs[i];
423		if (!reg->bo)
424			break;
425
426		old_object = reg->bo;
427		if (old_object->pin_count == 0)
428			steal = i;
429	}
430
431	/* if we are all out */
432	if (i == RADEON_GEM_MAX_SURFACES) {
433		if (steal == -1)
434			return -ENOMEM;
435		/* find someone with a surface reg and nuke their BO */
436		reg = &rdev->surface_regs[steal];
437		old_object = reg->bo;
438		/* blow away the mapping */
439		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
440		ttm_bo_unmap_virtual(&old_object->tbo);
441		old_object->surface_reg = -1;
442		i = steal;
443	}
444
445	bo->surface_reg = i;
446	reg->bo = bo;
447
448out:
449	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
450			       bo->tbo.mem.start << PAGE_SHIFT,
451			       bo->tbo.num_pages << PAGE_SHIFT);
452	return 0;
453}
454
455static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
456{
457	struct radeon_device *rdev = bo->rdev;
458	struct radeon_surface_reg *reg;
459
460	if (bo->surface_reg == -1)
461		return;
462
463	reg = &rdev->surface_regs[bo->surface_reg];
464	radeon_clear_surface_reg(rdev, bo->surface_reg);
465
466	reg->bo = NULL;
467	bo->surface_reg = -1;
468}
469
470int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
471				uint32_t tiling_flags, uint32_t pitch)
472{
473	struct radeon_device *rdev = bo->rdev;
474	int r;
475
476	if (rdev->family >= CHIP_CEDAR) {
477		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
478
479		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
480		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
481		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
482		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
483		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
484		switch (bankw) {
485		case 0:
486		case 1:
487		case 2:
488		case 4:
489		case 8:
490			break;
491		default:
492			return -EINVAL;
493		}
494		switch (bankh) {
495		case 0:
496		case 1:
497		case 2:
498		case 4:
499		case 8:
500			break;
501		default:
502			return -EINVAL;
503		}
504		switch (mtaspect) {
505		case 0:
506		case 1:
507		case 2:
508		case 4:
509		case 8:
510			break;
511		default:
512			return -EINVAL;
513		}
514		if (tilesplit > 6) {
515			return -EINVAL;
516		}
517		if (stilesplit > 6) {
518			return -EINVAL;
519		}
520	}
521	r = radeon_bo_reserve(bo, false);
522	if (unlikely(r != 0))
523		return r;
524	bo->tiling_flags = tiling_flags;
525	bo->pitch = pitch;
526	radeon_bo_unreserve(bo);
527	return 0;
528}
529
530void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
531				uint32_t *tiling_flags,
532				uint32_t *pitch)
533{
534	BUG_ON(!atomic_read(&bo->tbo.reserved));
535	if (tiling_flags)
536		*tiling_flags = bo->tiling_flags;
537	if (pitch)
538		*pitch = bo->pitch;
539}
540
541int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
542				bool force_drop)
543{
544	BUG_ON(!atomic_read(&bo->tbo.reserved));
545
546	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
547		return 0;
548
549	if (force_drop) {
550		radeon_bo_clear_surface_reg(bo);
551		return 0;
552	}
553
554	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
555		if (!has_moved)
556			return 0;
557
558		if (bo->surface_reg >= 0)
559			radeon_bo_clear_surface_reg(bo);
560		return 0;
561	}
562
563	if ((bo->surface_reg >= 0) && !has_moved)
564		return 0;
565
566	return radeon_bo_get_surface_reg(bo);
567}
568
569void radeon_bo_move_notify(struct ttm_buffer_object *bo,
570			   struct ttm_mem_reg *mem)
571{
572	struct radeon_bo *rbo;
573	if (!radeon_ttm_bo_is_radeon_bo(bo))
574		return;
575	rbo = container_of(bo, struct radeon_bo, tbo);
576	radeon_bo_check_tiling(rbo, 0, 1);
577	radeon_vm_bo_invalidate(rbo->rdev, rbo);
578}
579
580int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
581{
582	struct radeon_device *rdev;
583	struct radeon_bo *rbo;
584	unsigned long offset, size;
585	int r;
586
587	if (!radeon_ttm_bo_is_radeon_bo(bo))
588		return 0;
589	rbo = container_of(bo, struct radeon_bo, tbo);
590	radeon_bo_check_tiling(rbo, 0, 0);
591	rdev = rbo->rdev;
592	if (bo->mem.mem_type == TTM_PL_VRAM) {
593		size = bo->mem.num_pages << PAGE_SHIFT;
594		offset = bo->mem.start << PAGE_SHIFT;
595		if ((offset + size) > rdev->mc.visible_vram_size) {
596			/* hurrah the memory is not visible ! */
597			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
598			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
599			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
600			if (unlikely(r != 0))
601				return r;
602			offset = bo->mem.start << PAGE_SHIFT;
603			/* this should not happen */
604			if ((offset + size) > rdev->mc.visible_vram_size)
605				return -EINVAL;
606		}
607	}
608	return 0;
609}
610
611int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
612{
613	int r;
614
615	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
616	if (unlikely(r != 0))
617		return r;
618	spin_lock(&bo->tbo.bdev->fence_lock);
619	if (mem_type)
620		*mem_type = bo->tbo.mem.mem_type;
621	if (bo->tbo.sync_obj)
622		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
623	spin_unlock(&bo->tbo.bdev->fence_lock);
624	ttm_bo_unreserve(&bo->tbo);
625	return r;
626}
627
628
629/**
630 * radeon_bo_reserve - reserve bo
631 * @bo:		bo structure
632 * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
633 *
634 * Returns:
635 * -EBUSY: buffer is busy and @no_wait is true
636 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
637 * a signal. Release all buffer reservations and return to user-space.
638 */
639int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
640{
641	int r;
642
643	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
644	if (unlikely(r != 0)) {
645		if (r != -ERESTARTSYS)
646			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
647		return r;
648	}
649	return 0;
650}
651
652/* object have to be reserved */
653struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
654{
655	struct radeon_bo_va *bo_va;
656
657	list_for_each_entry(bo_va, &rbo->va, bo_list) {
658		if (bo_va->vm == vm) {
659			return bo_va;
660		}
661	}
662	return NULL;
663}