Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright 2008 Advanced Micro Devices, Inc.
  3 * Copyright 2008 Red Hat Inc.
  4 * Copyright 2009 Jerome Glisse.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the "Software"),
  8 * to deal in the Software without restriction, including without limitation
  9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 10 * and/or sell copies of the Software, and to permit persons to whom the
 11 * Software is furnished to do so, subject to the following conditions:
 12 *
 13 * The above copyright notice and this permission notice shall be included in
 14 * all copies or substantial portions of the Software.
 15 *
 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 22 * OTHER DEALINGS IN THE SOFTWARE.
 23 *
 24 * Authors: Dave Airlie
 25 *          Alex Deucher
 26 *          Jerome Glisse
 27 */
 28#include "drmP.h"
 29#include "radeon_drm.h"
 30#include "radeon.h"
 31#include "radeon_reg.h"
 32
 33/*
 34 * Common GART table functions.
 35 */
 36int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
 37{
 38	void *ptr;
 39
 40	ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
 41				   &rdev->gart.table_addr);
 42	if (ptr == NULL) {
 43		return -ENOMEM;
 44	}
 45#ifdef CONFIG_X86
 46	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
 47	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
 48		set_memory_uc((unsigned long)ptr,
 49			      rdev->gart.table_size >> PAGE_SHIFT);
 50	}
 51#endif
 52	rdev->gart.ptr = ptr;
 53	memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
 54	return 0;
 55}
 56
 57void radeon_gart_table_ram_free(struct radeon_device *rdev)
 58{
 59	if (rdev->gart.ptr == NULL) {
 60		return;
 61	}
 62#ifdef CONFIG_X86
 63	if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
 64	    rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
 65		set_memory_wb((unsigned long)rdev->gart.ptr,
 66			      rdev->gart.table_size >> PAGE_SHIFT);
 67	}
 68#endif
 69	pci_free_consistent(rdev->pdev, rdev->gart.table_size,
 70			    (void *)rdev->gart.ptr,
 71			    rdev->gart.table_addr);
 72	rdev->gart.ptr = NULL;
 73	rdev->gart.table_addr = 0;
 74}
 75
 76int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
 77{
 78	int r;
 79
 80	if (rdev->gart.robj == NULL) {
 81		r = radeon_bo_create(rdev, rdev->gart.table_size,
 82				     PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
 83				     NULL, &rdev->gart.robj);
 84		if (r) {
 85			return r;
 86		}
 87	}
 88	return 0;
 89}
 90
 91int radeon_gart_table_vram_pin(struct radeon_device *rdev)
 92{
 93	uint64_t gpu_addr;
 94	int r;
 95
 96	r = radeon_bo_reserve(rdev->gart.robj, false);
 97	if (unlikely(r != 0))
 98		return r;
 99	r = radeon_bo_pin(rdev->gart.robj,
100				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
101	if (r) {
102		radeon_bo_unreserve(rdev->gart.robj);
103		return r;
104	}
105	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
106	if (r)
107		radeon_bo_unpin(rdev->gart.robj);
108	radeon_bo_unreserve(rdev->gart.robj);
109	rdev->gart.table_addr = gpu_addr;
110	return r;
111}
112
113void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
114{
115	int r;
116
117	if (rdev->gart.robj == NULL) {
118		return;
119	}
120	r = radeon_bo_reserve(rdev->gart.robj, false);
121	if (likely(r == 0)) {
122		radeon_bo_kunmap(rdev->gart.robj);
123		radeon_bo_unpin(rdev->gart.robj);
124		radeon_bo_unreserve(rdev->gart.robj);
125		rdev->gart.ptr = NULL;
126	}
127}
128
129void radeon_gart_table_vram_free(struct radeon_device *rdev)
130{
131	if (rdev->gart.robj == NULL) {
132		return;
133	}
134	radeon_gart_table_vram_unpin(rdev);
135	radeon_bo_unref(&rdev->gart.robj);
136}
137
138
139
140
141/*
142 * Common gart functions.
143 */
144void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
145			int pages)
146{
147	unsigned t;
148	unsigned p;
149	int i, j;
150	u64 page_base;
151
152	if (!rdev->gart.ready) {
153		WARN(1, "trying to unbind memory from uninitialized GART !\n");
154		return;
155	}
156	t = offset / RADEON_GPU_PAGE_SIZE;
157	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
158	for (i = 0; i < pages; i++, p++) {
159		if (rdev->gart.pages[p]) {
160			rdev->gart.pages[p] = NULL;
161			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
162			page_base = rdev->gart.pages_addr[p];
163			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
164				if (rdev->gart.ptr) {
165					radeon_gart_set_page(rdev, t, page_base);
166				}
167				page_base += RADEON_GPU_PAGE_SIZE;
168			}
169		}
170	}
171	mb();
172	radeon_gart_tlb_flush(rdev);
173}
174
175int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
176		     int pages, struct page **pagelist, dma_addr_t *dma_addr)
177{
178	unsigned t;
179	unsigned p;
180	uint64_t page_base;
181	int i, j;
182
183	if (!rdev->gart.ready) {
184		WARN(1, "trying to bind memory to uninitialized GART !\n");
185		return -EINVAL;
186	}
187	t = offset / RADEON_GPU_PAGE_SIZE;
188	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
189
190	for (i = 0; i < pages; i++, p++) {
191		rdev->gart.pages_addr[p] = dma_addr[i];
192		rdev->gart.pages[p] = pagelist[i];
193		if (rdev->gart.ptr) {
194			page_base = rdev->gart.pages_addr[p];
195			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
196				radeon_gart_set_page(rdev, t, page_base);
197				page_base += RADEON_GPU_PAGE_SIZE;
198			}
199		}
200	}
201	mb();
202	radeon_gart_tlb_flush(rdev);
203	return 0;
204}
205
206void radeon_gart_restore(struct radeon_device *rdev)
207{
208	int i, j, t;
209	u64 page_base;
210
211	if (!rdev->gart.ptr) {
212		return;
213	}
214	for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
215		page_base = rdev->gart.pages_addr[i];
216		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
217			radeon_gart_set_page(rdev, t, page_base);
218			page_base += RADEON_GPU_PAGE_SIZE;
219		}
220	}
221	mb();
222	radeon_gart_tlb_flush(rdev);
223}
224
225int radeon_gart_init(struct radeon_device *rdev)
226{
227	int r, i;
228
229	if (rdev->gart.pages) {
230		return 0;
231	}
232	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
233	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
234		DRM_ERROR("Page size is smaller than GPU page size!\n");
235		return -EINVAL;
236	}
237	r = radeon_dummy_page_init(rdev);
238	if (r)
239		return r;
240	/* Compute table size */
241	rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
242	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
243	DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
244		 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
245	/* Allocate pages table */
246	rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
247				   GFP_KERNEL);
248	if (rdev->gart.pages == NULL) {
249		radeon_gart_fini(rdev);
250		return -ENOMEM;
251	}
252	rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
253					rdev->gart.num_cpu_pages, GFP_KERNEL);
254	if (rdev->gart.pages_addr == NULL) {
255		radeon_gart_fini(rdev);
256		return -ENOMEM;
257	}
258	/* set GART entry to point to the dummy page by default */
259	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
260		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
261	}
262	return 0;
263}
264
265void radeon_gart_fini(struct radeon_device *rdev)
266{
267	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
268		/* unbind pages */
269		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
270	}
271	rdev->gart.ready = false;
272	kfree(rdev->gart.pages);
273	kfree(rdev->gart.pages_addr);
274	rdev->gart.pages = NULL;
275	rdev->gart.pages_addr = NULL;
276
277	radeon_dummy_page_fini(rdev);
278}
279
280/*
281 * vm helpers
282 *
283 * TODO bind a default page at vm initialization for default address
284 */
285int radeon_vm_manager_init(struct radeon_device *rdev)
286{
287	int r;
288
289	rdev->vm_manager.enabled = false;
290
291	/* mark first vm as always in use, it's the system one */
292	/* allocate enough for 2 full VM pts */
293	r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
294				      rdev->vm_manager.max_pfn * 8 * 2,
295				      RADEON_GEM_DOMAIN_VRAM);
296	if (r) {
297		dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
298			(rdev->vm_manager.max_pfn * 8) >> 10);
299		return r;
300	}
301
302	r = rdev->vm_manager.funcs->init(rdev);
303	if (r == 0)
304		rdev->vm_manager.enabled = true;
305
306	return r;
307}
308
309/* cs mutex must be lock */
310static void radeon_vm_unbind_locked(struct radeon_device *rdev,
311				    struct radeon_vm *vm)
312{
313	struct radeon_bo_va *bo_va;
314
315	if (vm->id == -1) {
316		return;
317	}
318
319	/* wait for vm use to end */
320	if (vm->fence) {
321		radeon_fence_wait(vm->fence, false);
322		radeon_fence_unref(&vm->fence);
323	}
324
325	/* hw unbind */
326	rdev->vm_manager.funcs->unbind(rdev, vm);
327	rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
328	list_del_init(&vm->list);
329	vm->id = -1;
330	radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
331	vm->pt = NULL;
332
333	list_for_each_entry(bo_va, &vm->va, vm_list) {
334		bo_va->valid = false;
335	}
336}
337
338void radeon_vm_manager_fini(struct radeon_device *rdev)
339{
340	if (rdev->vm_manager.sa_manager.bo == NULL)
341		return;
342	radeon_vm_manager_suspend(rdev);
343	rdev->vm_manager.funcs->fini(rdev);
344	radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
345	rdev->vm_manager.enabled = false;
346}
347
348int radeon_vm_manager_start(struct radeon_device *rdev)
349{
350	if (rdev->vm_manager.sa_manager.bo == NULL) {
351		return -EINVAL;
352	}
353	return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
354}
355
356int radeon_vm_manager_suspend(struct radeon_device *rdev)
357{
358	struct radeon_vm *vm, *tmp;
359
360	radeon_mutex_lock(&rdev->cs_mutex);
361	/* unbind all active vm */
362	list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
363		radeon_vm_unbind_locked(rdev, vm);
364	}
365	rdev->vm_manager.funcs->fini(rdev);
366	radeon_mutex_unlock(&rdev->cs_mutex);
367	return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
368}
369
370/* cs mutex must be lock */
371void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
372{
373	mutex_lock(&vm->mutex);
374	radeon_vm_unbind_locked(rdev, vm);
375	mutex_unlock(&vm->mutex);
376}
377
378/* cs mutex must be lock & vm mutex must be lock */
379int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
380{
381	struct radeon_vm *vm_evict;
382	unsigned i;
383	int id = -1, r;
384
385	if (vm == NULL) {
386		return -EINVAL;
387	}
388
389	if (vm->id != -1) {
390		/* update lru */
391		list_del_init(&vm->list);
392		list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
393		return 0;
394	}
395
396retry:
397	r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
398			     RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
399			     RADEON_GPU_PAGE_SIZE, false);
400	if (r) {
401		if (list_empty(&rdev->vm_manager.lru_vm)) {
402			return r;
403		}
404		vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
405		radeon_vm_unbind(rdev, vm_evict);
406		goto retry;
407	}
408	vm->pt = radeon_sa_bo_cpu_addr(vm->sa_bo);
409	vm->pt_gpu_addr = radeon_sa_bo_gpu_addr(vm->sa_bo);
410	memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
411
412retry_id:
413	/* search for free vm */
414	for (i = 0; i < rdev->vm_manager.nvm; i++) {
415		if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
416			id = i;
417			break;
418		}
419	}
420	/* evict vm if necessary */
421	if (id == -1) {
422		vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
423		radeon_vm_unbind(rdev, vm_evict);
424		goto retry_id;
425	}
426
427	/* do hw bind */
428	r = rdev->vm_manager.funcs->bind(rdev, vm, id);
429	if (r) {
430		radeon_sa_bo_free(rdev, &vm->sa_bo, NULL);
431		return r;
432	}
433	rdev->vm_manager.use_bitmap |= 1 << id;
434	vm->id = id;
435	list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
436	return radeon_vm_bo_update_pte(rdev, vm, rdev->ring_tmp_bo.bo,
437				       &rdev->ring_tmp_bo.bo->tbo.mem);
438}
439
440/* object have to be reserved */
441int radeon_vm_bo_add(struct radeon_device *rdev,
442		     struct radeon_vm *vm,
443		     struct radeon_bo *bo,
444		     uint64_t offset,
445		     uint32_t flags)
446{
447	struct radeon_bo_va *bo_va, *tmp;
448	struct list_head *head;
449	uint64_t size = radeon_bo_size(bo), last_offset = 0;
450	unsigned last_pfn;
451
452	bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
453	if (bo_va == NULL) {
454		return -ENOMEM;
455	}
456	bo_va->vm = vm;
457	bo_va->bo = bo;
458	bo_va->soffset = offset;
459	bo_va->eoffset = offset + size;
460	bo_va->flags = flags;
461	bo_va->valid = false;
462	INIT_LIST_HEAD(&bo_va->bo_list);
463	INIT_LIST_HEAD(&bo_va->vm_list);
464	/* make sure object fit at this offset */
465	if (bo_va->soffset >= bo_va->eoffset) {
466		kfree(bo_va);
467		return -EINVAL;
468	}
469
470	last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
471	if (last_pfn > rdev->vm_manager.max_pfn) {
472		kfree(bo_va);
473		dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
474			last_pfn, rdev->vm_manager.max_pfn);
475		return -EINVAL;
476	}
477
478	mutex_lock(&vm->mutex);
479	if (last_pfn > vm->last_pfn) {
480		/* release mutex and lock in right order */
481		mutex_unlock(&vm->mutex);
482		radeon_mutex_lock(&rdev->cs_mutex);
483		mutex_lock(&vm->mutex);
484		/* and check again */
485		if (last_pfn > vm->last_pfn) {
486			/* grow va space 32M by 32M */
487			unsigned align = ((32 << 20) >> 12) - 1;
488			radeon_vm_unbind_locked(rdev, vm);
489			vm->last_pfn = (last_pfn + align) & ~align;
490		}
491		radeon_mutex_unlock(&rdev->cs_mutex);
492	}
493	head = &vm->va;
494	last_offset = 0;
495	list_for_each_entry(tmp, &vm->va, vm_list) {
496		if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
497			/* bo can be added before this one */
498			break;
499		}
500		if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
501			/* bo and tmp overlap, invalid offset */
502			dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
503				bo, (unsigned)bo_va->soffset, tmp->bo,
504				(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
505			kfree(bo_va);
506			mutex_unlock(&vm->mutex);
507			return -EINVAL;
508		}
509		last_offset = tmp->eoffset;
510		head = &tmp->vm_list;
511	}
512	list_add(&bo_va->vm_list, head);
513	list_add_tail(&bo_va->bo_list, &bo->va);
514	mutex_unlock(&vm->mutex);
515	return 0;
516}
517
518static u64 radeon_vm_get_addr(struct radeon_device *rdev,
519			      struct ttm_mem_reg *mem,
520			      unsigned pfn)
521{
522	u64 addr = 0;
523
524	switch (mem->mem_type) {
525	case TTM_PL_VRAM:
526		addr = (mem->start << PAGE_SHIFT);
527		addr += pfn * RADEON_GPU_PAGE_SIZE;
528		addr += rdev->vm_manager.vram_base_offset;
529		break;
530	case TTM_PL_TT:
531		/* offset inside page table */
532		addr = mem->start << PAGE_SHIFT;
533		addr += pfn * RADEON_GPU_PAGE_SIZE;
534		addr = addr >> PAGE_SHIFT;
535		/* page table offset */
536		addr = rdev->gart.pages_addr[addr];
537		/* in case cpu page size != gpu page size*/
538		addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
539		break;
540	default:
541		break;
542	}
543	return addr;
544}
545
546/* object have to be reserved & cs mutex took & vm mutex took */
547int radeon_vm_bo_update_pte(struct radeon_device *rdev,
548			    struct radeon_vm *vm,
549			    struct radeon_bo *bo,
550			    struct ttm_mem_reg *mem)
551{
552	struct radeon_bo_va *bo_va;
553	unsigned ngpu_pages, i;
554	uint64_t addr = 0, pfn;
555	uint32_t flags;
556
557	/* nothing to do if vm isn't bound */
558	if (vm->id == -1)
559		return 0;
560
561	bo_va = radeon_bo_va(bo, vm);
562	if (bo_va == NULL) {
563		dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
564		return -EINVAL;
565	}
566
567	if (bo_va->valid && mem)
568		return 0;
569
570	ngpu_pages = radeon_bo_ngpu_pages(bo);
571	bo_va->flags &= ~RADEON_VM_PAGE_VALID;
572	bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
573	if (mem) {
574		if (mem->mem_type != TTM_PL_SYSTEM) {
575			bo_va->flags |= RADEON_VM_PAGE_VALID;
576			bo_va->valid = true;
577		}
578		if (mem->mem_type == TTM_PL_TT) {
579			bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
580		}
581	}
582	pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
583	flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
584	for (i = 0, addr = 0; i < ngpu_pages; i++) {
585		if (mem && bo_va->valid) {
586			addr = radeon_vm_get_addr(rdev, mem, i);
587		}
588		rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
589	}
590	rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
591	return 0;
592}
593
594/* object have to be reserved */
595int radeon_vm_bo_rmv(struct radeon_device *rdev,
596		     struct radeon_vm *vm,
597		     struct radeon_bo *bo)
598{
599	struct radeon_bo_va *bo_va;
600	int r;
601
602	bo_va = radeon_bo_va(bo, vm);
603	if (bo_va == NULL)
604		return 0;
605
606	/* wait for va use to end */
607	while (bo_va->fence) {
608		r = radeon_fence_wait(bo_va->fence, false);
609		if (r) {
610			DRM_ERROR("error while waiting for fence: %d\n", r);
611		}
612		if (r == -EDEADLK) {
613			r = radeon_gpu_reset(rdev);
614			if (!r)
615				continue;
616		}
617		break;
618	}
619	radeon_fence_unref(&bo_va->fence);
620
621	radeon_mutex_lock(&rdev->cs_mutex);
622	mutex_lock(&vm->mutex);
623	radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
624	radeon_mutex_unlock(&rdev->cs_mutex);
625	list_del(&bo_va->vm_list);
626	mutex_unlock(&vm->mutex);
627	list_del(&bo_va->bo_list);
628
629	kfree(bo_va);
630	return 0;
631}
632
633void radeon_vm_bo_invalidate(struct radeon_device *rdev,
634			     struct radeon_bo *bo)
635{
636	struct radeon_bo_va *bo_va;
637
638	BUG_ON(!atomic_read(&bo->tbo.reserved));
639	list_for_each_entry(bo_va, &bo->va, bo_list) {
640		bo_va->valid = false;
641	}
642}
643
644int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
645{
646	int r;
647
648	vm->id = -1;
649	vm->fence = NULL;
650	mutex_init(&vm->mutex);
651	INIT_LIST_HEAD(&vm->list);
652	INIT_LIST_HEAD(&vm->va);
653	/* SI requires equal sized PTs for all VMs, so always set
654	 * last_pfn to max_pfn.  cayman allows variable sized
655	 * pts so we can grow then as needed.  Once we switch
656	 * to two level pts we can unify this again.
657	 */
658	if (rdev->family >= CHIP_TAHITI)
659		vm->last_pfn = rdev->vm_manager.max_pfn;
660	else
661		vm->last_pfn = 0;
662	/* map the ib pool buffer at 0 in virtual address space, set
663	 * read only
664	 */
665	r = radeon_vm_bo_add(rdev, vm, rdev->ring_tmp_bo.bo, 0,
666			     RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
667	return r;
668}
669
670void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
671{
672	struct radeon_bo_va *bo_va, *tmp;
673	int r;
674
675	radeon_mutex_lock(&rdev->cs_mutex);
676	mutex_lock(&vm->mutex);
677	radeon_vm_unbind_locked(rdev, vm);
678	radeon_mutex_unlock(&rdev->cs_mutex);
679
680	/* remove all bo at this point non are busy any more because unbind
681	 * waited for the last vm fence to signal
682	 */
683	r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
684	if (!r) {
685		bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm);
686		list_del_init(&bo_va->bo_list);
687		list_del_init(&bo_va->vm_list);
688		radeon_fence_unref(&bo_va->fence);
689		radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
690		kfree(bo_va);
691	}
692	if (!list_empty(&vm->va)) {
693		dev_err(rdev->dev, "still active bo inside vm\n");
694	}
695	list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
696		list_del_init(&bo_va->vm_list);
697		r = radeon_bo_reserve(bo_va->bo, false);
698		if (!r) {
699			list_del_init(&bo_va->bo_list);
700			radeon_fence_unref(&bo_va->fence);
701			radeon_bo_unreserve(bo_va->bo);
702			kfree(bo_va);
703		}
704	}
705	mutex_unlock(&vm->mutex);
706}