Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 30 *    Dave Airlie
 31 */
 32#include <linux/list.h>
 33#include <linux/slab.h>
 34#include <drm/drmP.h>
 35#include "radeon_drm.h"
 
 36#include "radeon.h"
 37#include "radeon_trace.h"
 38
 39
 40int radeon_ttm_init(struct radeon_device *rdev);
 41void radeon_ttm_fini(struct radeon_device *rdev);
 42static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
 43
 44/*
 45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 46 * function are calling it.
 47 */
 48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 50{
 51	struct radeon_bo *bo;
 52
 53	bo = container_of(tbo, struct radeon_bo, tbo);
 
 
 
 54	mutex_lock(&bo->rdev->gem.mutex);
 55	list_del_init(&bo->list);
 56	mutex_unlock(&bo->rdev->gem.mutex);
 57	radeon_bo_clear_surface_reg(bo);
 
 
 
 58	drm_gem_object_release(&bo->gem_base);
 59	kfree(bo);
 60}
 61
 62bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
 63{
 64	if (bo->destroy == &radeon_ttm_bo_destroy)
 65		return true;
 66	return false;
 67}
 68
 69void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 70{
 71	u32 c = 0;
 72
 73	rbo->placement.fpfn = 0;
 74	rbo->placement.lpfn = 0;
 75	rbo->placement.placement = rbo->placements;
 76	rbo->placement.busy_placement = rbo->placements;
 77	if (domain & RADEON_GEM_DOMAIN_VRAM)
 78		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 79					TTM_PL_FLAG_VRAM;
 80	if (domain & RADEON_GEM_DOMAIN_GTT)
 81		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 82	if (domain & RADEON_GEM_DOMAIN_CPU)
 83		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 84	if (!c)
 85		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86	rbo->placement.num_placement = c;
 87	rbo->placement.num_busy_placement = c;
 
 
 
 
 
 
 
 
 
 
 88}
 89
 90int radeon_bo_create(struct radeon_device *rdev,
 91		     unsigned long size, int byte_align, bool kernel, u32 domain,
 
 
 92		     struct radeon_bo **bo_ptr)
 93{
 94	struct radeon_bo *bo;
 95	enum ttm_bo_type type;
 96	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 97	unsigned long max_size = 0;
 98	int r;
 99
100	size = ALIGN(size, PAGE_SIZE);
101
102	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
103		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
104	}
105	if (kernel) {
106		type = ttm_bo_type_kernel;
 
 
107	} else {
108		type = ttm_bo_type_device;
109	}
110	*bo_ptr = NULL;
111
112	/* maximun bo size is the minimun btw visible vram and gtt size */
113	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
114	if ((page_align << PAGE_SHIFT) >= max_size) {
115		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
116			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
117		return -ENOMEM;
118	}
119
120retry:
121	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
122	if (bo == NULL)
123		return -ENOMEM;
124	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
125	if (unlikely(r)) {
126		kfree(bo);
127		return r;
128	}
129	bo->rdev = rdev;
130	bo->gem_base.driver_private = NULL;
131	bo->surface_reg = -1;
132	INIT_LIST_HEAD(&bo->list);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133	radeon_ttm_placement_from_domain(bo, domain);
134	/* Kernel allocation are uninterruptible */
135	mutex_lock(&rdev->vram_mutex);
136	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
137			&bo->placement, page_align, 0, !kernel, NULL, size,
138			&radeon_ttm_bo_destroy);
139	mutex_unlock(&rdev->vram_mutex);
140	if (unlikely(r != 0)) {
141		if (r != -ERESTARTSYS) {
142			if (domain == RADEON_GEM_DOMAIN_VRAM) {
143				domain |= RADEON_GEM_DOMAIN_GTT;
144				goto retry;
145			}
146			dev_err(rdev->dev,
147				"object_init failed for (%lu, 0x%08X)\n",
148				size, domain);
149		}
150		return r;
151	}
152	*bo_ptr = bo;
153
154	trace_radeon_bo_create(bo);
155
156	return 0;
157}
158
159int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
160{
161	bool is_iomem;
162	int r;
163
164	if (bo->kptr) {
165		if (ptr) {
166			*ptr = bo->kptr;
167		}
168		return 0;
169	}
170	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
171	if (r) {
172		return r;
173	}
174	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
175	if (ptr) {
176		*ptr = bo->kptr;
177	}
178	radeon_bo_check_tiling(bo, 0, 0);
179	return 0;
180}
181
182void radeon_bo_kunmap(struct radeon_bo *bo)
183{
184	if (bo->kptr == NULL)
185		return;
186	bo->kptr = NULL;
187	radeon_bo_check_tiling(bo, 0, 0);
188	ttm_bo_kunmap(&bo->kmap);
189}
190
 
 
 
 
 
 
 
 
 
191void radeon_bo_unref(struct radeon_bo **bo)
192{
193	struct ttm_buffer_object *tbo;
194	struct radeon_device *rdev;
195
196	if ((*bo) == NULL)
197		return;
198	rdev = (*bo)->rdev;
199	tbo = &((*bo)->tbo);
200	mutex_lock(&rdev->vram_mutex);
201	ttm_bo_unref(&tbo);
202	mutex_unlock(&rdev->vram_mutex);
203	if (tbo == NULL)
204		*bo = NULL;
205}
206
207int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 
208{
 
209	int r, i;
210
 
 
 
211	if (bo->pin_count) {
212		bo->pin_count++;
213		if (gpu_addr)
214			*gpu_addr = radeon_bo_gpu_offset(bo);
 
 
 
 
 
 
 
 
 
 
 
 
215		return 0;
216	}
 
 
 
 
 
217	radeon_ttm_placement_from_domain(bo, domain);
218	if (domain == RADEON_GEM_DOMAIN_VRAM) {
219		/* force to pin into visible video ram */
220		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
221	}
222	for (i = 0; i < bo->placement.num_placement; i++)
223		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
224	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
225	if (likely(r == 0)) {
226		bo->pin_count = 1;
227		if (gpu_addr != NULL)
228			*gpu_addr = radeon_bo_gpu_offset(bo);
229	}
230	if (unlikely(r != 0))
 
 
 
231		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
 
232	return r;
233}
234
 
 
 
 
 
235int radeon_bo_unpin(struct radeon_bo *bo)
236{
 
237	int r, i;
238
239	if (!bo->pin_count) {
240		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
241		return 0;
242	}
243	bo->pin_count--;
244	if (bo->pin_count)
245		return 0;
246	for (i = 0; i < bo->placement.num_placement; i++)
247		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
248	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
249	if (unlikely(r != 0))
 
 
 
 
 
 
 
250		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
 
251	return r;
252}
253
254int radeon_bo_evict_vram(struct radeon_device *rdev)
255{
256	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
257	if (0 && (rdev->flags & RADEON_IS_IGP)) {
258		if (rdev->mc.igp_sideport_enabled == false)
259			/* Useless to evict on IGP chips */
260			return 0;
261	}
262	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
263}
264
265void radeon_bo_force_delete(struct radeon_device *rdev)
266{
267	struct radeon_bo *bo, *n;
268
269	if (list_empty(&rdev->gem.objects)) {
270		return;
271	}
272	dev_err(rdev->dev, "Userspace still has active objects !\n");
273	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
274		mutex_lock(&rdev->ddev->struct_mutex);
275		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
276			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
277			*((unsigned long *)&bo->gem_base.refcount));
278		mutex_lock(&bo->rdev->gem.mutex);
279		list_del_init(&bo->list);
280		mutex_unlock(&bo->rdev->gem.mutex);
281		/* this should unref the ttm bo */
282		drm_gem_object_unreference(&bo->gem_base);
283		mutex_unlock(&rdev->ddev->struct_mutex);
284	}
285}
286
287int radeon_bo_init(struct radeon_device *rdev)
288{
 
 
 
 
289	/* Add an MTRR for the VRAM */
290	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
291			MTRR_TYPE_WRCOMB, 1);
 
 
292	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
293		rdev->mc.mc_vram_size >> 20,
294		(unsigned long long)rdev->mc.aper_size >> 20);
295	DRM_INFO("RAM width %dbits %cDR\n",
296			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
297	return radeon_ttm_init(rdev);
298}
299
300void radeon_bo_fini(struct radeon_device *rdev)
301{
302	radeon_ttm_fini(rdev);
 
 
303}
304
305void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
306				struct list_head *head)
 
307{
308	if (lobj->wdomain) {
309		list_add(&lobj->tv.head, head);
310	} else {
311		list_add_tail(&lobj->tv.head, head);
312	}
313}
314
315int radeon_bo_list_validate(struct list_head *head)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
316{
 
317	struct radeon_bo_list *lobj;
318	struct radeon_bo *bo;
319	u32 domain;
320	int r;
 
 
321
322	r = ttm_eu_reserve_buffers(head);
 
323	if (unlikely(r != 0)) {
324		return r;
325	}
 
326	list_for_each_entry(lobj, head, tv.head) {
327		bo = lobj->bo;
328		if (!bo->pin_count) {
329			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
330			
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331		retry:
332			radeon_ttm_placement_from_domain(bo, domain);
333			r = ttm_bo_validate(&bo->tbo, &bo->placement,
334						true, false, false);
 
 
 
 
 
 
335			if (unlikely(r)) {
336				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
337					domain |= RADEON_GEM_DOMAIN_GTT;
 
338					goto retry;
339				}
 
340				return r;
341			}
342		}
343		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
344		lobj->tiling_flags = bo->tiling_flags;
345	}
346	return 0;
347}
348
349int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
350			     struct vm_area_struct *vma)
351{
352	return ttm_fbdev_mmap(vma, &bo->tbo);
 
 
353}
354
355int radeon_bo_get_surface_reg(struct radeon_bo *bo)
356{
357	struct radeon_device *rdev = bo->rdev;
358	struct radeon_surface_reg *reg;
359	struct radeon_bo *old_object;
360	int steal;
361	int i;
362
363	BUG_ON(!atomic_read(&bo->tbo.reserved));
364
365	if (!bo->tiling_flags)
366		return 0;
367
368	if (bo->surface_reg >= 0) {
369		reg = &rdev->surface_regs[bo->surface_reg];
370		i = bo->surface_reg;
371		goto out;
372	}
373
374	steal = -1;
375	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
376
377		reg = &rdev->surface_regs[i];
378		if (!reg->bo)
379			break;
380
381		old_object = reg->bo;
382		if (old_object->pin_count == 0)
383			steal = i;
384	}
385
386	/* if we are all out */
387	if (i == RADEON_GEM_MAX_SURFACES) {
388		if (steal == -1)
389			return -ENOMEM;
390		/* find someone with a surface reg and nuke their BO */
391		reg = &rdev->surface_regs[steal];
392		old_object = reg->bo;
393		/* blow away the mapping */
394		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
395		ttm_bo_unmap_virtual(&old_object->tbo);
396		old_object->surface_reg = -1;
397		i = steal;
398	}
399
400	bo->surface_reg = i;
401	reg->bo = bo;
402
403out:
404	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
405			       bo->tbo.mem.start << PAGE_SHIFT,
406			       bo->tbo.num_pages << PAGE_SHIFT);
407	return 0;
408}
409
410static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
411{
412	struct radeon_device *rdev = bo->rdev;
413	struct radeon_surface_reg *reg;
414
415	if (bo->surface_reg == -1)
416		return;
417
418	reg = &rdev->surface_regs[bo->surface_reg];
419	radeon_clear_surface_reg(rdev, bo->surface_reg);
420
421	reg->bo = NULL;
422	bo->surface_reg = -1;
423}
424
425int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
426				uint32_t tiling_flags, uint32_t pitch)
427{
 
428	int r;
429
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430	r = radeon_bo_reserve(bo, false);
431	if (unlikely(r != 0))
432		return r;
433	bo->tiling_flags = tiling_flags;
434	bo->pitch = pitch;
435	radeon_bo_unreserve(bo);
436	return 0;
437}
438
439void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
440				uint32_t *tiling_flags,
441				uint32_t *pitch)
442{
443	BUG_ON(!atomic_read(&bo->tbo.reserved));
 
444	if (tiling_flags)
445		*tiling_flags = bo->tiling_flags;
446	if (pitch)
447		*pitch = bo->pitch;
448}
449
450int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
451				bool force_drop)
452{
453	BUG_ON(!atomic_read(&bo->tbo.reserved));
 
454
455	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
456		return 0;
457
458	if (force_drop) {
459		radeon_bo_clear_surface_reg(bo);
460		return 0;
461	}
462
463	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
464		if (!has_moved)
465			return 0;
466
467		if (bo->surface_reg >= 0)
468			radeon_bo_clear_surface_reg(bo);
469		return 0;
470	}
471
472	if ((bo->surface_reg >= 0) && !has_moved)
473		return 0;
474
475	return radeon_bo_get_surface_reg(bo);
476}
477
478void radeon_bo_move_notify(struct ttm_buffer_object *bo,
479			   struct ttm_mem_reg *mem)
 
480{
481	struct radeon_bo *rbo;
 
482	if (!radeon_ttm_bo_is_radeon_bo(bo))
483		return;
 
484	rbo = container_of(bo, struct radeon_bo, tbo);
485	radeon_bo_check_tiling(rbo, 0, 1);
 
 
 
 
 
 
 
 
486}
487
488int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
489{
 
490	struct radeon_device *rdev;
491	struct radeon_bo *rbo;
492	unsigned long offset, size;
493	int r;
494
495	if (!radeon_ttm_bo_is_radeon_bo(bo))
496		return 0;
497	rbo = container_of(bo, struct radeon_bo, tbo);
498	radeon_bo_check_tiling(rbo, 0, 0);
499	rdev = rbo->rdev;
500	if (bo->mem.mem_type == TTM_PL_VRAM) {
501		size = bo->mem.num_pages << PAGE_SHIFT;
502		offset = bo->mem.start << PAGE_SHIFT;
503		if ((offset + size) > rdev->mc.visible_vram_size) {
504			/* hurrah the memory is not visible ! */
505			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
506			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
507			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
508			if (unlikely(r != 0))
509				return r;
510			offset = bo->mem.start << PAGE_SHIFT;
511			/* this should not happen */
512			if ((offset + size) > rdev->mc.visible_vram_size)
513				return -EINVAL;
514		}
 
 
 
 
 
 
 
 
 
 
 
 
515	}
 
 
 
 
 
 
516	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
517}
v4.17
  1/*
  2 * Copyright 2009 Jerome Glisse.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Jerome Glisse <glisse@freedesktop.org>
 29 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 30 *    Dave Airlie
 31 */
 32#include <linux/list.h>
 33#include <linux/slab.h>
 34#include <drm/drmP.h>
 35#include <drm/radeon_drm.h>
 36#include <drm/drm_cache.h>
 37#include "radeon.h"
 38#include "radeon_trace.h"
 39
 40
 41int radeon_ttm_init(struct radeon_device *rdev);
 42void radeon_ttm_fini(struct radeon_device *rdev);
 43static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
 44
 45/*
 46 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 47 * function are calling it.
 48 */
 49
 50static void radeon_update_memory_usage(struct radeon_bo *bo,
 51				       unsigned mem_type, int sign)
 52{
 53	struct radeon_device *rdev = bo->rdev;
 54	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
 55
 56	switch (mem_type) {
 57	case TTM_PL_TT:
 58		if (sign > 0)
 59			atomic64_add(size, &rdev->gtt_usage);
 60		else
 61			atomic64_sub(size, &rdev->gtt_usage);
 62		break;
 63	case TTM_PL_VRAM:
 64		if (sign > 0)
 65			atomic64_add(size, &rdev->vram_usage);
 66		else
 67			atomic64_sub(size, &rdev->vram_usage);
 68		break;
 69	}
 70}
 71
 72static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 73{
 74	struct radeon_bo *bo;
 75
 76	bo = container_of(tbo, struct radeon_bo, tbo);
 77
 78	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
 79
 80	mutex_lock(&bo->rdev->gem.mutex);
 81	list_del_init(&bo->list);
 82	mutex_unlock(&bo->rdev->gem.mutex);
 83	radeon_bo_clear_surface_reg(bo);
 84	WARN_ON_ONCE(!list_empty(&bo->va));
 85	if (bo->gem_base.import_attach)
 86		drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
 87	drm_gem_object_release(&bo->gem_base);
 88	kfree(bo);
 89}
 90
 91bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
 92{
 93	if (bo->destroy == &radeon_ttm_bo_destroy)
 94		return true;
 95	return false;
 96}
 97
 98void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
 99{
100	u32 c = 0, i;
101
 
 
102	rbo->placement.placement = rbo->placements;
103	rbo->placement.busy_placement = rbo->placements;
104	if (domain & RADEON_GEM_DOMAIN_VRAM) {
105		/* Try placing BOs which don't need CPU access outside of the
106		 * CPU accessible part of VRAM
107		 */
108		if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
109		    rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
110			rbo->placements[c].fpfn =
111				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
112			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
113						     TTM_PL_FLAG_UNCACHED |
114						     TTM_PL_FLAG_VRAM;
115		}
116
117		rbo->placements[c].fpfn = 0;
118		rbo->placements[c++].flags = TTM_PL_FLAG_WC |
119					     TTM_PL_FLAG_UNCACHED |
120					     TTM_PL_FLAG_VRAM;
121	}
122
123	if (domain & RADEON_GEM_DOMAIN_GTT) {
124		if (rbo->flags & RADEON_GEM_GTT_UC) {
125			rbo->placements[c].fpfn = 0;
126			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
127				TTM_PL_FLAG_TT;
128
129		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
130			   (rbo->rdev->flags & RADEON_IS_AGP)) {
131			rbo->placements[c].fpfn = 0;
132			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
133				TTM_PL_FLAG_UNCACHED |
134				TTM_PL_FLAG_TT;
135		} else {
136			rbo->placements[c].fpfn = 0;
137			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
138						     TTM_PL_FLAG_TT;
139		}
140	}
141
142	if (domain & RADEON_GEM_DOMAIN_CPU) {
143		if (rbo->flags & RADEON_GEM_GTT_UC) {
144			rbo->placements[c].fpfn = 0;
145			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
146				TTM_PL_FLAG_SYSTEM;
147
148		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
149		    rbo->rdev->flags & RADEON_IS_AGP) {
150			rbo->placements[c].fpfn = 0;
151			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
152				TTM_PL_FLAG_UNCACHED |
153				TTM_PL_FLAG_SYSTEM;
154		} else {
155			rbo->placements[c].fpfn = 0;
156			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
157						     TTM_PL_FLAG_SYSTEM;
158		}
159	}
160	if (!c) {
161		rbo->placements[c].fpfn = 0;
162		rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
163					     TTM_PL_FLAG_SYSTEM;
164	}
165
166	rbo->placement.num_placement = c;
167	rbo->placement.num_busy_placement = c;
168
169	for (i = 0; i < c; ++i) {
170		if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
171		    (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
172		    !rbo->placements[i].fpfn)
173			rbo->placements[i].lpfn =
174				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
175		else
176			rbo->placements[i].lpfn = 0;
177	}
178}
179
180int radeon_bo_create(struct radeon_device *rdev,
181		     unsigned long size, int byte_align, bool kernel,
182		     u32 domain, u32 flags, struct sg_table *sg,
183		     struct reservation_object *resv,
184		     struct radeon_bo **bo_ptr)
185{
186	struct radeon_bo *bo;
187	enum ttm_bo_type type;
188	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
189	size_t acc_size;
190	int r;
191
192	size = ALIGN(size, PAGE_SIZE);
193
 
 
 
194	if (kernel) {
195		type = ttm_bo_type_kernel;
196	} else if (sg) {
197		type = ttm_bo_type_sg;
198	} else {
199		type = ttm_bo_type_device;
200	}
201	*bo_ptr = NULL;
202
203	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
204				       sizeof(struct radeon_bo));
 
 
 
 
 
205
 
206	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
207	if (bo == NULL)
208		return -ENOMEM;
209	drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size);
 
 
 
 
210	bo->rdev = rdev;
 
211	bo->surface_reg = -1;
212	INIT_LIST_HEAD(&bo->list);
213	INIT_LIST_HEAD(&bo->va);
214	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
215				       RADEON_GEM_DOMAIN_GTT |
216				       RADEON_GEM_DOMAIN_CPU);
217
218	bo->flags = flags;
219	/* PCI GART is always snooped */
220	if (!(rdev->flags & RADEON_IS_PCIE))
221		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
222
223	/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
224	 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
225	 */
226	if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
227		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
228
229#ifdef CONFIG_X86_32
230	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
231	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
232	 */
233	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
234#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
235	/* Don't try to enable write-combining when it can't work, or things
236	 * may be slow
237	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
238	 */
239#ifndef CONFIG_COMPILE_TEST
240#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
241	 thanks to write-combining
242#endif
243
244	if (bo->flags & RADEON_GEM_GTT_WC)
245		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
246			      "better performance thanks to write-combining\n");
247	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
248#else
249	/* For architectures that don't support WC memory,
250	 * mask out the WC flag from the BO
251	 */
252	if (!drm_arch_can_wc_memory())
253		bo->flags &= ~RADEON_GEM_GTT_WC;
254#endif
255
256	radeon_ttm_placement_from_domain(bo, domain);
257	/* Kernel allocation are uninterruptible */
258	down_read(&rdev->pm.mclk_lock);
259	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
260			&bo->placement, page_align, !kernel, acc_size,
261			sg, resv, &radeon_ttm_bo_destroy);
262	up_read(&rdev->pm.mclk_lock);
263	if (unlikely(r != 0)) {
 
 
 
 
 
 
 
 
 
264		return r;
265	}
266	*bo_ptr = bo;
267
268	trace_radeon_bo_create(bo);
269
270	return 0;
271}
272
273int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
274{
275	bool is_iomem;
276	int r;
277
278	if (bo->kptr) {
279		if (ptr) {
280			*ptr = bo->kptr;
281		}
282		return 0;
283	}
284	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
285	if (r) {
286		return r;
287	}
288	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
289	if (ptr) {
290		*ptr = bo->kptr;
291	}
292	radeon_bo_check_tiling(bo, 0, 0);
293	return 0;
294}
295
296void radeon_bo_kunmap(struct radeon_bo *bo)
297{
298	if (bo->kptr == NULL)
299		return;
300	bo->kptr = NULL;
301	radeon_bo_check_tiling(bo, 0, 0);
302	ttm_bo_kunmap(&bo->kmap);
303}
304
305struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
306{
307	if (bo == NULL)
308		return NULL;
309
310	ttm_bo_reference(&bo->tbo);
311	return bo;
312}
313
314void radeon_bo_unref(struct radeon_bo **bo)
315{
316	struct ttm_buffer_object *tbo;
317	struct radeon_device *rdev;
318
319	if ((*bo) == NULL)
320		return;
321	rdev = (*bo)->rdev;
322	tbo = &((*bo)->tbo);
 
323	ttm_bo_unref(&tbo);
 
324	if (tbo == NULL)
325		*bo = NULL;
326}
327
328int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
329			     u64 *gpu_addr)
330{
331	struct ttm_operation_ctx ctx = { false, false };
332	int r, i;
333
334	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
335		return -EPERM;
336
337	if (bo->pin_count) {
338		bo->pin_count++;
339		if (gpu_addr)
340			*gpu_addr = radeon_bo_gpu_offset(bo);
341
342		if (max_offset != 0) {
343			u64 domain_start;
344
345			if (domain == RADEON_GEM_DOMAIN_VRAM)
346				domain_start = bo->rdev->mc.vram_start;
347			else
348				domain_start = bo->rdev->mc.gtt_start;
349			WARN_ON_ONCE(max_offset <
350				     (radeon_bo_gpu_offset(bo) - domain_start));
351		}
352
353		return 0;
354	}
355	if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
356		/* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
357		return -EINVAL;
358	}
359
360	radeon_ttm_placement_from_domain(bo, domain);
361	for (i = 0; i < bo->placement.num_placement; i++) {
362		/* force to pin into visible video ram */
363		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
364		    !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
365		    (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
366			bo->placements[i].lpfn =
367				bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
368		else
369			bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
370
371		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
372	}
373
374	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 
375	if (likely(r == 0)) {
376		bo->pin_count = 1;
377		if (gpu_addr != NULL)
378			*gpu_addr = radeon_bo_gpu_offset(bo);
379		if (domain == RADEON_GEM_DOMAIN_VRAM)
380			bo->rdev->vram_pin_size += radeon_bo_size(bo);
381		else
382			bo->rdev->gart_pin_size += radeon_bo_size(bo);
383	} else {
384		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
385	}
386	return r;
387}
388
389int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
390{
391	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
392}
393
394int radeon_bo_unpin(struct radeon_bo *bo)
395{
396	struct ttm_operation_ctx ctx = { false, false };
397	int r, i;
398
399	if (!bo->pin_count) {
400		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
401		return 0;
402	}
403	bo->pin_count--;
404	if (bo->pin_count)
405		return 0;
406	for (i = 0; i < bo->placement.num_placement; i++) {
407		bo->placements[i].lpfn = 0;
408		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
409	}
410	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
411	if (likely(r == 0)) {
412		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
413			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
414		else
415			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
416	} else {
417		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
418	}
419	return r;
420}
421
422int radeon_bo_evict_vram(struct radeon_device *rdev)
423{
424	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
425	if (0 && (rdev->flags & RADEON_IS_IGP)) {
426		if (rdev->mc.igp_sideport_enabled == false)
427			/* Useless to evict on IGP chips */
428			return 0;
429	}
430	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
431}
432
433void radeon_bo_force_delete(struct radeon_device *rdev)
434{
435	struct radeon_bo *bo, *n;
436
437	if (list_empty(&rdev->gem.objects)) {
438		return;
439	}
440	dev_err(rdev->dev, "Userspace still has active objects !\n");
441	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
 
442		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
443			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
444			*((unsigned long *)&bo->gem_base.refcount));
445		mutex_lock(&bo->rdev->gem.mutex);
446		list_del_init(&bo->list);
447		mutex_unlock(&bo->rdev->gem.mutex);
448		/* this should unref the ttm bo */
449		drm_gem_object_put_unlocked(&bo->gem_base);
 
450	}
451}
452
453int radeon_bo_init(struct radeon_device *rdev)
454{
455	/* reserve PAT memory space to WC for VRAM */
456	arch_io_reserve_memtype_wc(rdev->mc.aper_base,
457				   rdev->mc.aper_size);
458
459	/* Add an MTRR for the VRAM */
460	if (!rdev->fastfb_working) {
461		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
462						      rdev->mc.aper_size);
463	}
464	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
465		rdev->mc.mc_vram_size >> 20,
466		(unsigned long long)rdev->mc.aper_size >> 20);
467	DRM_INFO("RAM width %dbits %cDR\n",
468			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
469	return radeon_ttm_init(rdev);
470}
471
472void radeon_bo_fini(struct radeon_device *rdev)
473{
474	radeon_ttm_fini(rdev);
475	arch_phys_wc_del(rdev->mc.vram_mtrr);
476	arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
477}
478
479/* Returns how many bytes TTM can move per IB.
480 */
481static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
482{
483	u64 real_vram_size = rdev->mc.real_vram_size;
484	u64 vram_usage = atomic64_read(&rdev->vram_usage);
 
 
 
 
485
486	/* This function is based on the current VRAM usage.
487	 *
488	 * - If all of VRAM is free, allow relocating the number of bytes that
489	 *   is equal to 1/4 of the size of VRAM for this IB.
490
491	 * - If more than one half of VRAM is occupied, only allow relocating
492	 *   1 MB of data for this IB.
493	 *
494	 * - From 0 to one half of used VRAM, the threshold decreases
495	 *   linearly.
496	 *         __________________
497	 * 1/4 of -|\               |
498	 * VRAM    | \              |
499	 *         |  \             |
500	 *         |   \            |
501	 *         |    \           |
502	 *         |     \          |
503	 *         |      \         |
504	 *         |       \________|1 MB
505	 *         |----------------|
506	 *    VRAM 0 %             100 %
507	 *         used            used
508	 *
509	 * Note: It's a threshold, not a limit. The threshold must be crossed
510	 * for buffer relocations to stop, so any buffer of an arbitrary size
511	 * can be moved as long as the threshold isn't crossed before
512	 * the relocation takes place. We don't want to disable buffer
513	 * relocations completely.
514	 *
515	 * The idea is that buffers should be placed in VRAM at creation time
516	 * and TTM should only do a minimum number of relocations during
517	 * command submission. In practice, you need to submit at least
518	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
519	 *
520	 * Also, things can get pretty crazy under memory pressure and actual
521	 * VRAM usage can change a lot, so playing safe even at 50% does
522	 * consistently increase performance.
523	 */
524
525	u64 half_vram = real_vram_size >> 1;
526	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
527	u64 bytes_moved_threshold = half_free_vram >> 1;
528	return max(bytes_moved_threshold, 1024*1024ull);
529}
530
531int radeon_bo_list_validate(struct radeon_device *rdev,
532			    struct ww_acquire_ctx *ticket,
533			    struct list_head *head, int ring)
534{
535	struct ttm_operation_ctx ctx = { true, false };
536	struct radeon_bo_list *lobj;
537	struct list_head duplicates;
 
538	int r;
539	u64 bytes_moved = 0, initial_bytes_moved;
540	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
541
542	INIT_LIST_HEAD(&duplicates);
543	r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
544	if (unlikely(r != 0)) {
545		return r;
546	}
547
548	list_for_each_entry(lobj, head, tv.head) {
549		struct radeon_bo *bo = lobj->robj;
550		if (!bo->pin_count) {
551			u32 domain = lobj->preferred_domains;
552			u32 allowed = lobj->allowed_domains;
553			u32 current_domain =
554				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
555
556			/* Check if this buffer will be moved and don't move it
557			 * if we have moved too many buffers for this IB already.
558			 *
559			 * Note that this allows moving at least one buffer of
560			 * any size, because it doesn't take the current "bo"
561			 * into account. We don't want to disallow buffer moves
562			 * completely.
563			 */
564			if ((allowed & current_domain) != 0 &&
565			    (domain & current_domain) == 0 && /* will be moved */
566			    bytes_moved > bytes_moved_threshold) {
567				/* don't move it */
568				domain = current_domain;
569			}
570
571		retry:
572			radeon_ttm_placement_from_domain(bo, domain);
573			if (ring == R600_RING_TYPE_UVD_INDEX)
574				radeon_uvd_force_into_uvd_segment(bo, allowed);
575
576			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
577			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
578			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
579				       initial_bytes_moved;
580
581			if (unlikely(r)) {
582				if (r != -ERESTARTSYS &&
583				    domain != lobj->allowed_domains) {
584					domain = lobj->allowed_domains;
585					goto retry;
586				}
587				ttm_eu_backoff_reservation(ticket, head);
588				return r;
589			}
590		}
591		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
592		lobj->tiling_flags = bo->tiling_flags;
593	}
 
 
594
595	list_for_each_entry(lobj, &duplicates, tv.head) {
596		lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
597		lobj->tiling_flags = lobj->robj->tiling_flags;
598	}
599
600	return 0;
601}
602
603int radeon_bo_get_surface_reg(struct radeon_bo *bo)
604{
605	struct radeon_device *rdev = bo->rdev;
606	struct radeon_surface_reg *reg;
607	struct radeon_bo *old_object;
608	int steal;
609	int i;
610
611	lockdep_assert_held(&bo->tbo.resv->lock.base);
612
613	if (!bo->tiling_flags)
614		return 0;
615
616	if (bo->surface_reg >= 0) {
617		reg = &rdev->surface_regs[bo->surface_reg];
618		i = bo->surface_reg;
619		goto out;
620	}
621
622	steal = -1;
623	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
624
625		reg = &rdev->surface_regs[i];
626		if (!reg->bo)
627			break;
628
629		old_object = reg->bo;
630		if (old_object->pin_count == 0)
631			steal = i;
632	}
633
634	/* if we are all out */
635	if (i == RADEON_GEM_MAX_SURFACES) {
636		if (steal == -1)
637			return -ENOMEM;
638		/* find someone with a surface reg and nuke their BO */
639		reg = &rdev->surface_regs[steal];
640		old_object = reg->bo;
641		/* blow away the mapping */
642		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
643		ttm_bo_unmap_virtual(&old_object->tbo);
644		old_object->surface_reg = -1;
645		i = steal;
646	}
647
648	bo->surface_reg = i;
649	reg->bo = bo;
650
651out:
652	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
653			       bo->tbo.mem.start << PAGE_SHIFT,
654			       bo->tbo.num_pages << PAGE_SHIFT);
655	return 0;
656}
657
658static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
659{
660	struct radeon_device *rdev = bo->rdev;
661	struct radeon_surface_reg *reg;
662
663	if (bo->surface_reg == -1)
664		return;
665
666	reg = &rdev->surface_regs[bo->surface_reg];
667	radeon_clear_surface_reg(rdev, bo->surface_reg);
668
669	reg->bo = NULL;
670	bo->surface_reg = -1;
671}
672
673int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
674				uint32_t tiling_flags, uint32_t pitch)
675{
676	struct radeon_device *rdev = bo->rdev;
677	int r;
678
679	if (rdev->family >= CHIP_CEDAR) {
680		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
681
682		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
683		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
684		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
685		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
686		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
687		switch (bankw) {
688		case 0:
689		case 1:
690		case 2:
691		case 4:
692		case 8:
693			break;
694		default:
695			return -EINVAL;
696		}
697		switch (bankh) {
698		case 0:
699		case 1:
700		case 2:
701		case 4:
702		case 8:
703			break;
704		default:
705			return -EINVAL;
706		}
707		switch (mtaspect) {
708		case 0:
709		case 1:
710		case 2:
711		case 4:
712		case 8:
713			break;
714		default:
715			return -EINVAL;
716		}
717		if (tilesplit > 6) {
718			return -EINVAL;
719		}
720		if (stilesplit > 6) {
721			return -EINVAL;
722		}
723	}
724	r = radeon_bo_reserve(bo, false);
725	if (unlikely(r != 0))
726		return r;
727	bo->tiling_flags = tiling_flags;
728	bo->pitch = pitch;
729	radeon_bo_unreserve(bo);
730	return 0;
731}
732
733void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
734				uint32_t *tiling_flags,
735				uint32_t *pitch)
736{
737	lockdep_assert_held(&bo->tbo.resv->lock.base);
738
739	if (tiling_flags)
740		*tiling_flags = bo->tiling_flags;
741	if (pitch)
742		*pitch = bo->pitch;
743}
744
745int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
746				bool force_drop)
747{
748	if (!force_drop)
749		lockdep_assert_held(&bo->tbo.resv->lock.base);
750
751	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
752		return 0;
753
754	if (force_drop) {
755		radeon_bo_clear_surface_reg(bo);
756		return 0;
757	}
758
759	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
760		if (!has_moved)
761			return 0;
762
763		if (bo->surface_reg >= 0)
764			radeon_bo_clear_surface_reg(bo);
765		return 0;
766	}
767
768	if ((bo->surface_reg >= 0) && !has_moved)
769		return 0;
770
771	return radeon_bo_get_surface_reg(bo);
772}
773
774void radeon_bo_move_notify(struct ttm_buffer_object *bo,
775			   bool evict,
776			   struct ttm_mem_reg *new_mem)
777{
778	struct radeon_bo *rbo;
779
780	if (!radeon_ttm_bo_is_radeon_bo(bo))
781		return;
782
783	rbo = container_of(bo, struct radeon_bo, tbo);
784	radeon_bo_check_tiling(rbo, 0, 1);
785	radeon_vm_bo_invalidate(rbo->rdev, rbo);
786
787	/* update statistics */
788	if (!new_mem)
789		return;
790
791	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
792	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
793}
794
795int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
796{
797	struct ttm_operation_ctx ctx = { false, false };
798	struct radeon_device *rdev;
799	struct radeon_bo *rbo;
800	unsigned long offset, size, lpfn;
801	int i, r;
802
803	if (!radeon_ttm_bo_is_radeon_bo(bo))
804		return 0;
805	rbo = container_of(bo, struct radeon_bo, tbo);
806	radeon_bo_check_tiling(rbo, 0, 0);
807	rdev = rbo->rdev;
808	if (bo->mem.mem_type != TTM_PL_VRAM)
809		return 0;
810
811	size = bo->mem.num_pages << PAGE_SHIFT;
812	offset = bo->mem.start << PAGE_SHIFT;
813	if ((offset + size) <= rdev->mc.visible_vram_size)
814		return 0;
815
816	/* Can't move a pinned BO to visible VRAM */
817	if (rbo->pin_count > 0)
818		return -EINVAL;
819
820	/* hurrah the memory is not visible ! */
821	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
822	lpfn =	rdev->mc.visible_vram_size >> PAGE_SHIFT;
823	for (i = 0; i < rbo->placement.num_placement; i++) {
824		/* Force into visible VRAM */
825		if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
826		    (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
827			rbo->placements[i].lpfn = lpfn;
828	}
829	r = ttm_bo_validate(bo, &rbo->placement, &ctx);
830	if (unlikely(r == -ENOMEM)) {
831		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
832		return ttm_bo_validate(bo, &rbo->placement, &ctx);
833	} else if (unlikely(r != 0)) {
834		return r;
835	}
836
837	offset = bo->mem.start << PAGE_SHIFT;
838	/* this should never happen */
839	if ((offset + size) > rdev->mc.visible_vram_size)
840		return -EINVAL;
841
842	return 0;
843}
844
845int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
846{
847	int r;
848
849	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
850	if (unlikely(r != 0))
851		return r;
852	if (mem_type)
853		*mem_type = bo->tbo.mem.mem_type;
854
855	r = ttm_bo_wait(&bo->tbo, true, no_wait);
856	ttm_bo_unreserve(&bo->tbo);
857	return r;
858}
859
860/**
861 * radeon_bo_fence - add fence to buffer object
862 *
863 * @bo: buffer object in question
864 * @fence: fence to add
865 * @shared: true if fence should be added shared
866 *
867 */
868void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
869		     bool shared)
870{
871	struct reservation_object *resv = bo->tbo.resv;
872
873	if (shared)
874		reservation_object_add_shared_fence(resv, &fence->base);
875	else
876		reservation_object_add_excl_fence(resv, &fence->base);
877}