Linux Audio

Check our new training course

Loading...
v4.6
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include <drm/ttm/ttm_bo_driver.h>
 32#include <drm/ttm/ttm_placement.h>
 33#include <drm/drm_vma_manager.h>
 34#include <linux/io.h>
 35#include <linux/highmem.h>
 36#include <linux/wait.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/module.h>
 40#include <linux/reservation.h>
 41
 42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 43{
 44	ttm_bo_mem_put(bo, &bo->mem);
 45}
 46
 47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 48		    bool evict,
 49		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 50{
 51	struct ttm_tt *ttm = bo->ttm;
 52	struct ttm_mem_reg *old_mem = &bo->mem;
 53	int ret;
 54
 55	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 56		ttm_tt_unbind(ttm);
 57		ttm_bo_free_old_node(bo);
 58		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 59				TTM_PL_MASK_MEM);
 60		old_mem->mem_type = TTM_PL_SYSTEM;
 61	}
 62
 63	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 64	if (unlikely(ret != 0))
 65		return ret;
 66
 67	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 68		ret = ttm_tt_bind(ttm, new_mem);
 69		if (unlikely(ret != 0))
 70			return ret;
 71	}
 72
 73	*old_mem = *new_mem;
 74	new_mem->mm_node = NULL;
 75
 76	return 0;
 77}
 78EXPORT_SYMBOL(ttm_bo_move_ttm);
 79
 80int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 81{
 82	if (likely(man->io_reserve_fastpath))
 83		return 0;
 84
 85	if (interruptible)
 86		return mutex_lock_interruptible(&man->io_reserve_mutex);
 87
 88	mutex_lock(&man->io_reserve_mutex);
 89	return 0;
 90}
 91EXPORT_SYMBOL(ttm_mem_io_lock);
 92
 93void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 94{
 95	if (likely(man->io_reserve_fastpath))
 96		return;
 97
 98	mutex_unlock(&man->io_reserve_mutex);
 99}
100EXPORT_SYMBOL(ttm_mem_io_unlock);
101
102static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103{
104	struct ttm_buffer_object *bo;
105
106	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107		return -EAGAIN;
108
109	bo = list_first_entry(&man->io_reserve_lru,
110			      struct ttm_buffer_object,
111			      io_reserve_lru);
112	list_del_init(&bo->io_reserve_lru);
113	ttm_bo_unmap_virtual_locked(bo);
114
115	return 0;
116}
117
118
119int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
120		       struct ttm_mem_reg *mem)
121{
122	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
123	int ret = 0;
124
125	if (!bdev->driver->io_mem_reserve)
126		return 0;
127	if (likely(man->io_reserve_fastpath))
128		return bdev->driver->io_mem_reserve(bdev, mem);
129
130	if (bdev->driver->io_mem_reserve &&
131	    mem->bus.io_reserved_count++ == 0) {
132retry:
133		ret = bdev->driver->io_mem_reserve(bdev, mem);
134		if (ret == -EAGAIN) {
135			ret = ttm_mem_io_evict(man);
136			if (ret == 0)
137				goto retry;
138		}
139	}
140	return ret;
141}
142EXPORT_SYMBOL(ttm_mem_io_reserve);
143
144void ttm_mem_io_free(struct ttm_bo_device *bdev,
145		     struct ttm_mem_reg *mem)
146{
147	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149	if (likely(man->io_reserve_fastpath))
150		return;
151
152	if (bdev->driver->io_mem_reserve &&
153	    --mem->bus.io_reserved_count == 0 &&
154	    bdev->driver->io_mem_free)
155		bdev->driver->io_mem_free(bdev, mem);
156
157}
158EXPORT_SYMBOL(ttm_mem_io_free);
159
160int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161{
162	struct ttm_mem_reg *mem = &bo->mem;
163	int ret;
164
165	if (!mem->bus.io_reserved_vm) {
166		struct ttm_mem_type_manager *man =
167			&bo->bdev->man[mem->mem_type];
168
169		ret = ttm_mem_io_reserve(bo->bdev, mem);
170		if (unlikely(ret != 0))
171			return ret;
172		mem->bus.io_reserved_vm = true;
173		if (man->use_io_reserve_lru)
174			list_add_tail(&bo->io_reserve_lru,
175				      &man->io_reserve_lru);
176	}
177	return 0;
178}
179
180void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
181{
182	struct ttm_mem_reg *mem = &bo->mem;
183
184	if (mem->bus.io_reserved_vm) {
185		mem->bus.io_reserved_vm = false;
186		list_del_init(&bo->io_reserve_lru);
187		ttm_mem_io_free(bo->bdev, mem);
188	}
189}
190
191static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
192			void **virtual)
193{
194	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195	int ret;
196	void *addr;
197
198	*virtual = NULL;
199	(void) ttm_mem_io_lock(man, false);
200	ret = ttm_mem_io_reserve(bdev, mem);
201	ttm_mem_io_unlock(man);
202	if (ret || !mem->bus.is_iomem)
203		return ret;
204
205	if (mem->bus.addr) {
206		addr = mem->bus.addr;
207	} else {
208		if (mem->placement & TTM_PL_FLAG_WC)
209			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
210		else
211			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212		if (!addr) {
213			(void) ttm_mem_io_lock(man, false);
214			ttm_mem_io_free(bdev, mem);
215			ttm_mem_io_unlock(man);
216			return -ENOMEM;
217		}
218	}
219	*virtual = addr;
220	return 0;
221}
222
223static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
224			 void *virtual)
225{
226	struct ttm_mem_type_manager *man;
227
228	man = &bdev->man[mem->mem_type];
229
230	if (virtual && mem->bus.addr == NULL)
231		iounmap(virtual);
232	(void) ttm_mem_io_lock(man, false);
233	ttm_mem_io_free(bdev, mem);
234	ttm_mem_io_unlock(man);
235}
236
237static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
238{
239	uint32_t *dstP =
240	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241	uint32_t *srcP =
242	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
243
244	int i;
245	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246		iowrite32(ioread32(srcP++), dstP++);
247	return 0;
248}
249
250static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251				unsigned long page,
252				pgprot_t prot)
253{
254	struct page *d = ttm->pages[page];
255	void *dst;
256
257	if (!d)
258		return -ENOMEM;
259
260	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
261
262#ifdef CONFIG_X86
263	dst = kmap_atomic_prot(d, prot);
264#else
265	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266		dst = vmap(&d, 1, 0, prot);
267	else
268		dst = kmap(d);
269#endif
270	if (!dst)
271		return -ENOMEM;
272
273	memcpy_fromio(dst, src, PAGE_SIZE);
274
275#ifdef CONFIG_X86
276	kunmap_atomic(dst);
277#else
278	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
279		vunmap(dst);
280	else
281		kunmap(d);
282#endif
283
284	return 0;
285}
286
287static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
288				unsigned long page,
289				pgprot_t prot)
290{
291	struct page *s = ttm->pages[page];
292	void *src;
293
294	if (!s)
295		return -ENOMEM;
296
297	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
298#ifdef CONFIG_X86
299	src = kmap_atomic_prot(s, prot);
300#else
301	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
302		src = vmap(&s, 1, 0, prot);
303	else
304		src = kmap(s);
305#endif
306	if (!src)
307		return -ENOMEM;
308
309	memcpy_toio(dst, src, PAGE_SIZE);
310
311#ifdef CONFIG_X86
312	kunmap_atomic(src);
313#else
314	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
315		vunmap(src);
316	else
317		kunmap(s);
318#endif
319
320	return 0;
321}
322
323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
324		       bool evict, bool no_wait_gpu,
325		       struct ttm_mem_reg *new_mem)
326{
327	struct ttm_bo_device *bdev = bo->bdev;
328	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
329	struct ttm_tt *ttm = bo->ttm;
330	struct ttm_mem_reg *old_mem = &bo->mem;
331	struct ttm_mem_reg old_copy = *old_mem;
332	void *old_iomap;
333	void *new_iomap;
334	int ret;
335	unsigned long i;
336	unsigned long page;
337	unsigned long add = 0;
338	int dir;
339
340	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
341	if (ret)
342		return ret;
343	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
344	if (ret)
345		goto out;
346
347	/*
348	 * Single TTM move. NOP.
349	 */
350	if (old_iomap == NULL && new_iomap == NULL)
351		goto out2;
352
353	/*
354	 * Don't move nonexistent data. Clear destination instead.
355	 */
356	if (old_iomap == NULL &&
357	    (ttm == NULL || (ttm->state == tt_unpopulated &&
358			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
359		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
360		goto out2;
361	}
362
363	/*
364	 * TTM might be null for moves within the same region.
365	 */
366	if (ttm && ttm->state == tt_unpopulated) {
367		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
368		if (ret)
369			goto out1;
370	}
371
372	add = 0;
373	dir = 1;
374
375	if ((old_mem->mem_type == new_mem->mem_type) &&
376	    (new_mem->start < old_mem->start + old_mem->size)) {
377		dir = -1;
378		add = new_mem->num_pages - 1;
379	}
380
381	for (i = 0; i < new_mem->num_pages; ++i) {
382		page = i * dir + add;
383		if (old_iomap == NULL) {
384			pgprot_t prot = ttm_io_prot(old_mem->placement,
385						    PAGE_KERNEL);
386			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
387						   prot);
388		} else if (new_iomap == NULL) {
389			pgprot_t prot = ttm_io_prot(new_mem->placement,
390						    PAGE_KERNEL);
391			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
392						   prot);
393		} else
394			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
395		if (ret)
396			goto out1;
397	}
398	mb();
399out2:
400	old_copy = *old_mem;
401	*old_mem = *new_mem;
402	new_mem->mm_node = NULL;
403
404	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
405		ttm_tt_unbind(ttm);
406		ttm_tt_destroy(ttm);
407		bo->ttm = NULL;
408	}
409
410out1:
411	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
412out:
413	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
414
415	/*
416	 * On error, keep the mm node!
417	 */
418	if (!ret)
419		ttm_bo_mem_put(bo, &old_copy);
420	return ret;
421}
422EXPORT_SYMBOL(ttm_bo_move_memcpy);
423
424static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
425{
426	kfree(bo);
427}
428
429/**
430 * ttm_buffer_object_transfer
431 *
432 * @bo: A pointer to a struct ttm_buffer_object.
433 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
434 * holding the data of @bo with the old placement.
435 *
436 * This is a utility function that may be called after an accelerated move
437 * has been scheduled. A new buffer object is created as a placeholder for
438 * the old data while it's being copied. When that buffer object is idle,
439 * it can be destroyed, releasing the space of the old placement.
440 * Returns:
441 * !0: Failure.
442 */
443
444static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445				      struct ttm_buffer_object **new_obj)
446{
447	struct ttm_buffer_object *fbo;
448	int ret;
 
449
450	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
451	if (!fbo)
452		return -ENOMEM;
453
454	*fbo = *bo;
455
456	/**
457	 * Fix up members that we shouldn't copy directly:
458	 * TODO: Explicit member copy would probably be better here.
459	 */
460
 
461	INIT_LIST_HEAD(&fbo->ddestroy);
462	INIT_LIST_HEAD(&fbo->lru);
463	INIT_LIST_HEAD(&fbo->swap);
464	INIT_LIST_HEAD(&fbo->io_reserve_lru);
465	drm_vma_node_reset(&fbo->vma_node);
466	atomic_set(&fbo->cpu_writers, 0);
467
 
468	kref_init(&fbo->list_kref);
469	kref_init(&fbo->kref);
470	fbo->destroy = &ttm_transfered_destroy;
471	fbo->acc_size = 0;
472	fbo->resv = &fbo->ttm_resv;
473	reservation_object_init(fbo->resv);
474	ret = ww_mutex_trylock(&fbo->resv->lock);
475	WARN_ON(!ret);
476
477	*new_obj = fbo;
478	return 0;
479}
480
481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
482{
483	/* Cached mappings need no adjustment */
484	if (caching_flags & TTM_PL_FLAG_CACHED)
485		return tmp;
486
487#if defined(__i386__) || defined(__x86_64__)
488	if (caching_flags & TTM_PL_FLAG_WC)
489		tmp = pgprot_writecombine(tmp);
490	else if (boot_cpu_data.x86 > 3)
491		tmp = pgprot_noncached(tmp);
 
 
 
 
 
 
 
492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494    defined(__powerpc__)
495	if (caching_flags & TTM_PL_FLAG_WC)
496		tmp = pgprot_writecombine(tmp);
497	else
498		tmp = pgprot_noncached(tmp);
499#endif
500#if defined(__sparc__) || defined(__mips__)
501	tmp = pgprot_noncached(tmp);
 
502#endif
503	return tmp;
504}
505EXPORT_SYMBOL(ttm_io_prot);
506
507static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
508			  unsigned long offset,
509			  unsigned long size,
510			  struct ttm_bo_kmap_obj *map)
511{
512	struct ttm_mem_reg *mem = &bo->mem;
513
514	if (bo->mem.bus.addr) {
515		map->bo_kmap_type = ttm_bo_map_premapped;
516		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
517	} else {
518		map->bo_kmap_type = ttm_bo_map_iomap;
519		if (mem->placement & TTM_PL_FLAG_WC)
520			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
521						  size);
522		else
523			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
524						       size);
525	}
526	return (!map->virtual) ? -ENOMEM : 0;
527}
528
529static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
530			   unsigned long start_page,
531			   unsigned long num_pages,
532			   struct ttm_bo_kmap_obj *map)
533{
534	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
535	struct ttm_tt *ttm = bo->ttm;
536	int ret;
537
538	BUG_ON(!ttm);
539
540	if (ttm->state == tt_unpopulated) {
541		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
542		if (ret)
543			return ret;
544	}
545
546	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
547		/*
548		 * We're mapping a single page, and the desired
549		 * page protection is consistent with the bo.
550		 */
551
552		map->bo_kmap_type = ttm_bo_map_kmap;
553		map->page = ttm->pages[start_page];
554		map->virtual = kmap(map->page);
555	} else {
556		/*
557		 * We need to use vmap to get the desired page protection
558		 * or to make the buffer object look contiguous.
559		 */
560		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
 
 
561		map->bo_kmap_type = ttm_bo_map_vmap;
562		map->virtual = vmap(ttm->pages + start_page, num_pages,
563				    0, prot);
564	}
565	return (!map->virtual) ? -ENOMEM : 0;
566}
567
568int ttm_bo_kmap(struct ttm_buffer_object *bo,
569		unsigned long start_page, unsigned long num_pages,
570		struct ttm_bo_kmap_obj *map)
571{
572	struct ttm_mem_type_manager *man =
573		&bo->bdev->man[bo->mem.mem_type];
574	unsigned long offset, size;
575	int ret;
576
577	BUG_ON(!list_empty(&bo->swap));
578	map->virtual = NULL;
579	map->bo = bo;
580	if (num_pages > bo->num_pages)
581		return -EINVAL;
582	if (start_page > bo->num_pages)
583		return -EINVAL;
584#if 0
585	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
586		return -EPERM;
587#endif
588	(void) ttm_mem_io_lock(man, false);
589	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
590	ttm_mem_io_unlock(man);
591	if (ret)
592		return ret;
593	if (!bo->mem.bus.is_iomem) {
594		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
595	} else {
596		offset = start_page << PAGE_SHIFT;
597		size = num_pages << PAGE_SHIFT;
598		return ttm_bo_ioremap(bo, offset, size, map);
599	}
600}
601EXPORT_SYMBOL(ttm_bo_kmap);
602
603void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
604{
605	struct ttm_buffer_object *bo = map->bo;
606	struct ttm_mem_type_manager *man =
607		&bo->bdev->man[bo->mem.mem_type];
608
609	if (!map->virtual)
610		return;
611	switch (map->bo_kmap_type) {
612	case ttm_bo_map_iomap:
613		iounmap(map->virtual);
614		break;
615	case ttm_bo_map_vmap:
616		vunmap(map->virtual);
617		break;
618	case ttm_bo_map_kmap:
619		kunmap(map->page);
620		break;
621	case ttm_bo_map_premapped:
622		break;
623	default:
624		BUG();
625	}
626	(void) ttm_mem_io_lock(man, false);
627	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
628	ttm_mem_io_unlock(man);
629	map->virtual = NULL;
630	map->page = NULL;
631}
632EXPORT_SYMBOL(ttm_bo_kunmap);
633
634int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635			      struct fence *fence,
636			      bool evict,
 
637			      bool no_wait_gpu,
638			      struct ttm_mem_reg *new_mem)
639{
640	struct ttm_bo_device *bdev = bo->bdev;
 
641	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
642	struct ttm_mem_reg *old_mem = &bo->mem;
643	int ret;
644	struct ttm_buffer_object *ghost_obj;
 
645
646	reservation_object_add_excl_fence(bo->resv, fence);
 
 
 
 
 
 
647	if (evict) {
648		ret = ttm_bo_wait(bo, false, false, false);
 
 
 
649		if (ret)
650			return ret;
651
652		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
653		    (bo->ttm != NULL)) {
654			ttm_tt_unbind(bo->ttm);
655			ttm_tt_destroy(bo->ttm);
656			bo->ttm = NULL;
657		}
658		ttm_bo_free_old_node(bo);
659	} else {
660		/**
661		 * This should help pipeline ordinary buffer moves.
662		 *
663		 * Hang old buffer memory on a new buffer object,
664		 * and leave it to be released when the GPU
665		 * operation has completed.
666		 */
667
668		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
 
 
 
669
670		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
671		if (ret)
672			return ret;
673
674		reservation_object_add_excl_fence(ghost_obj->resv, fence);
675
676		/**
677		 * If we're not moving to fixed memory, the TTM object
678		 * needs to stay alive. Otherwhise hang it on the ghost
679		 * bo to be unbound and destroyed.
680		 */
681
682		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
683			ghost_obj->ttm = NULL;
684		else
685			bo->ttm = NULL;
686
687		ttm_bo_unreserve(ghost_obj);
688		ttm_bo_unref(&ghost_obj);
689	}
690
691	*old_mem = *new_mem;
692	new_mem->mm_node = NULL;
693
694	return 0;
695}
696EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
v3.5.6
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include "ttm/ttm_bo_driver.h"
 32#include "ttm/ttm_placement.h"
 
 33#include <linux/io.h>
 34#include <linux/highmem.h>
 35#include <linux/wait.h>
 36#include <linux/slab.h>
 37#include <linux/vmalloc.h>
 38#include <linux/module.h>
 
 39
 40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 41{
 42	ttm_bo_mem_put(bo, &bo->mem);
 43}
 44
 45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 46		    bool evict, bool no_wait_reserve,
 47		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 48{
 49	struct ttm_tt *ttm = bo->ttm;
 50	struct ttm_mem_reg *old_mem = &bo->mem;
 51	int ret;
 52
 53	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 54		ttm_tt_unbind(ttm);
 55		ttm_bo_free_old_node(bo);
 56		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 57				TTM_PL_MASK_MEM);
 58		old_mem->mem_type = TTM_PL_SYSTEM;
 59	}
 60
 61	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 62	if (unlikely(ret != 0))
 63		return ret;
 64
 65	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 66		ret = ttm_tt_bind(ttm, new_mem);
 67		if (unlikely(ret != 0))
 68			return ret;
 69	}
 70
 71	*old_mem = *new_mem;
 72	new_mem->mm_node = NULL;
 73
 74	return 0;
 75}
 76EXPORT_SYMBOL(ttm_bo_move_ttm);
 77
 78int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 79{
 80	if (likely(man->io_reserve_fastpath))
 81		return 0;
 82
 83	if (interruptible)
 84		return mutex_lock_interruptible(&man->io_reserve_mutex);
 85
 86	mutex_lock(&man->io_reserve_mutex);
 87	return 0;
 88}
 
 89
 90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 91{
 92	if (likely(man->io_reserve_fastpath))
 93		return;
 94
 95	mutex_unlock(&man->io_reserve_mutex);
 96}
 
 97
 98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
 99{
100	struct ttm_buffer_object *bo;
101
102	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103		return -EAGAIN;
104
105	bo = list_first_entry(&man->io_reserve_lru,
106			      struct ttm_buffer_object,
107			      io_reserve_lru);
108	list_del_init(&bo->io_reserve_lru);
109	ttm_bo_unmap_virtual_locked(bo);
110
111	return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115			      struct ttm_mem_reg *mem)
 
116{
117	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118	int ret = 0;
119
120	if (!bdev->driver->io_mem_reserve)
121		return 0;
122	if (likely(man->io_reserve_fastpath))
123		return bdev->driver->io_mem_reserve(bdev, mem);
124
125	if (bdev->driver->io_mem_reserve &&
126	    mem->bus.io_reserved_count++ == 0) {
127retry:
128		ret = bdev->driver->io_mem_reserve(bdev, mem);
129		if (ret == -EAGAIN) {
130			ret = ttm_mem_io_evict(man);
131			if (ret == 0)
132				goto retry;
133		}
134	}
135	return ret;
136}
 
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139			    struct ttm_mem_reg *mem)
140{
141	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143	if (likely(man->io_reserve_fastpath))
144		return;
145
146	if (bdev->driver->io_mem_reserve &&
147	    --mem->bus.io_reserved_count == 0 &&
148	    bdev->driver->io_mem_free)
149		bdev->driver->io_mem_free(bdev, mem);
150
151}
 
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155	struct ttm_mem_reg *mem = &bo->mem;
156	int ret;
157
158	if (!mem->bus.io_reserved_vm) {
159		struct ttm_mem_type_manager *man =
160			&bo->bdev->man[mem->mem_type];
161
162		ret = ttm_mem_io_reserve(bo->bdev, mem);
163		if (unlikely(ret != 0))
164			return ret;
165		mem->bus.io_reserved_vm = true;
166		if (man->use_io_reserve_lru)
167			list_add_tail(&bo->io_reserve_lru,
168				      &man->io_reserve_lru);
169	}
170	return 0;
171}
172
173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174{
175	struct ttm_mem_reg *mem = &bo->mem;
176
177	if (mem->bus.io_reserved_vm) {
178		mem->bus.io_reserved_vm = false;
179		list_del_init(&bo->io_reserve_lru);
180		ttm_mem_io_free(bo->bdev, mem);
181	}
182}
183
184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185			void **virtual)
186{
187	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188	int ret;
189	void *addr;
190
191	*virtual = NULL;
192	(void) ttm_mem_io_lock(man, false);
193	ret = ttm_mem_io_reserve(bdev, mem);
194	ttm_mem_io_unlock(man);
195	if (ret || !mem->bus.is_iomem)
196		return ret;
197
198	if (mem->bus.addr) {
199		addr = mem->bus.addr;
200	} else {
201		if (mem->placement & TTM_PL_FLAG_WC)
202			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
203		else
204			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205		if (!addr) {
206			(void) ttm_mem_io_lock(man, false);
207			ttm_mem_io_free(bdev, mem);
208			ttm_mem_io_unlock(man);
209			return -ENOMEM;
210		}
211	}
212	*virtual = addr;
213	return 0;
214}
215
216void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217			 void *virtual)
218{
219	struct ttm_mem_type_manager *man;
220
221	man = &bdev->man[mem->mem_type];
222
223	if (virtual && mem->bus.addr == NULL)
224		iounmap(virtual);
225	(void) ttm_mem_io_lock(man, false);
226	ttm_mem_io_free(bdev, mem);
227	ttm_mem_io_unlock(man);
228}
229
230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231{
232	uint32_t *dstP =
233	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234	uint32_t *srcP =
235	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236
237	int i;
238	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239		iowrite32(ioread32(srcP++), dstP++);
240	return 0;
241}
242
243static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244				unsigned long page,
245				pgprot_t prot)
246{
247	struct page *d = ttm->pages[page];
248	void *dst;
249
250	if (!d)
251		return -ENOMEM;
252
253	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
254
255#ifdef CONFIG_X86
256	dst = kmap_atomic_prot(d, prot);
257#else
258	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
259		dst = vmap(&d, 1, 0, prot);
260	else
261		dst = kmap(d);
262#endif
263	if (!dst)
264		return -ENOMEM;
265
266	memcpy_fromio(dst, src, PAGE_SIZE);
267
268#ifdef CONFIG_X86
269	kunmap_atomic(dst);
270#else
271	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
272		vunmap(dst);
273	else
274		kunmap(d);
275#endif
276
277	return 0;
278}
279
280static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281				unsigned long page,
282				pgprot_t prot)
283{
284	struct page *s = ttm->pages[page];
285	void *src;
286
287	if (!s)
288		return -ENOMEM;
289
290	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
291#ifdef CONFIG_X86
292	src = kmap_atomic_prot(s, prot);
293#else
294	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
295		src = vmap(&s, 1, 0, prot);
296	else
297		src = kmap(s);
298#endif
299	if (!src)
300		return -ENOMEM;
301
302	memcpy_toio(dst, src, PAGE_SIZE);
303
304#ifdef CONFIG_X86
305	kunmap_atomic(src);
306#else
307	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
308		vunmap(src);
309	else
310		kunmap(s);
311#endif
312
313	return 0;
314}
315
316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317		       bool evict, bool no_wait_reserve, bool no_wait_gpu,
318		       struct ttm_mem_reg *new_mem)
319{
320	struct ttm_bo_device *bdev = bo->bdev;
321	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322	struct ttm_tt *ttm = bo->ttm;
323	struct ttm_mem_reg *old_mem = &bo->mem;
324	struct ttm_mem_reg old_copy = *old_mem;
325	void *old_iomap;
326	void *new_iomap;
327	int ret;
328	unsigned long i;
329	unsigned long page;
330	unsigned long add = 0;
331	int dir;
332
333	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334	if (ret)
335		return ret;
336	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337	if (ret)
338		goto out;
339
 
 
 
340	if (old_iomap == NULL && new_iomap == NULL)
341		goto out2;
342	if (old_iomap == NULL && ttm == NULL)
 
 
 
 
 
 
 
343		goto out2;
 
344
345	if (ttm->state == tt_unpopulated) {
 
 
 
346		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
347		if (ret)
348			goto out1;
349	}
350
351	add = 0;
352	dir = 1;
353
354	if ((old_mem->mem_type == new_mem->mem_type) &&
355	    (new_mem->start < old_mem->start + old_mem->size)) {
356		dir = -1;
357		add = new_mem->num_pages - 1;
358	}
359
360	for (i = 0; i < new_mem->num_pages; ++i) {
361		page = i * dir + add;
362		if (old_iomap == NULL) {
363			pgprot_t prot = ttm_io_prot(old_mem->placement,
364						    PAGE_KERNEL);
365			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
366						   prot);
367		} else if (new_iomap == NULL) {
368			pgprot_t prot = ttm_io_prot(new_mem->placement,
369						    PAGE_KERNEL);
370			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
371						   prot);
372		} else
373			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
374		if (ret)
375			goto out1;
376	}
377	mb();
378out2:
379	old_copy = *old_mem;
380	*old_mem = *new_mem;
381	new_mem->mm_node = NULL;
382
383	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
384		ttm_tt_unbind(ttm);
385		ttm_tt_destroy(ttm);
386		bo->ttm = NULL;
387	}
388
389out1:
390	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
391out:
392	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
393	ttm_bo_mem_put(bo, &old_copy);
 
 
 
 
 
394	return ret;
395}
396EXPORT_SYMBOL(ttm_bo_move_memcpy);
397
398static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
399{
400	kfree(bo);
401}
402
403/**
404 * ttm_buffer_object_transfer
405 *
406 * @bo: A pointer to a struct ttm_buffer_object.
407 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
408 * holding the data of @bo with the old placement.
409 *
410 * This is a utility function that may be called after an accelerated move
411 * has been scheduled. A new buffer object is created as a placeholder for
412 * the old data while it's being copied. When that buffer object is idle,
413 * it can be destroyed, releasing the space of the old placement.
414 * Returns:
415 * !0: Failure.
416 */
417
418static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
419				      struct ttm_buffer_object **new_obj)
420{
421	struct ttm_buffer_object *fbo;
422	struct ttm_bo_device *bdev = bo->bdev;
423	struct ttm_bo_driver *driver = bdev->driver;
424
425	fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
426	if (!fbo)
427		return -ENOMEM;
428
429	*fbo = *bo;
430
431	/**
432	 * Fix up members that we shouldn't copy directly:
433	 * TODO: Explicit member copy would probably be better here.
434	 */
435
436	init_waitqueue_head(&fbo->event_queue);
437	INIT_LIST_HEAD(&fbo->ddestroy);
438	INIT_LIST_HEAD(&fbo->lru);
439	INIT_LIST_HEAD(&fbo->swap);
440	INIT_LIST_HEAD(&fbo->io_reserve_lru);
441	fbo->vm_node = NULL;
442	atomic_set(&fbo->cpu_writers, 0);
443
444	fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
445	kref_init(&fbo->list_kref);
446	kref_init(&fbo->kref);
447	fbo->destroy = &ttm_transfered_destroy;
448	fbo->acc_size = 0;
 
 
 
 
449
450	*new_obj = fbo;
451	return 0;
452}
453
454pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
455{
 
 
 
 
456#if defined(__i386__) || defined(__x86_64__)
457	if (caching_flags & TTM_PL_FLAG_WC)
458		tmp = pgprot_writecombine(tmp);
459	else if (boot_cpu_data.x86 > 3)
460		tmp = pgprot_noncached(tmp);
461
462#elif defined(__powerpc__)
463	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
464		pgprot_val(tmp) |= _PAGE_NO_CACHE;
465		if (caching_flags & TTM_PL_FLAG_UNCACHED)
466			pgprot_val(tmp) |= _PAGE_GUARDED;
467	}
468#endif
469#if defined(__ia64__)
 
470	if (caching_flags & TTM_PL_FLAG_WC)
471		tmp = pgprot_writecombine(tmp);
472	else
473		tmp = pgprot_noncached(tmp);
474#endif
475#if defined(__sparc__)
476	if (!(caching_flags & TTM_PL_FLAG_CACHED))
477		tmp = pgprot_noncached(tmp);
478#endif
479	return tmp;
480}
481EXPORT_SYMBOL(ttm_io_prot);
482
483static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
484			  unsigned long offset,
485			  unsigned long size,
486			  struct ttm_bo_kmap_obj *map)
487{
488	struct ttm_mem_reg *mem = &bo->mem;
489
490	if (bo->mem.bus.addr) {
491		map->bo_kmap_type = ttm_bo_map_premapped;
492		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
493	} else {
494		map->bo_kmap_type = ttm_bo_map_iomap;
495		if (mem->placement & TTM_PL_FLAG_WC)
496			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
497						  size);
498		else
499			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
500						       size);
501	}
502	return (!map->virtual) ? -ENOMEM : 0;
503}
504
505static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
506			   unsigned long start_page,
507			   unsigned long num_pages,
508			   struct ttm_bo_kmap_obj *map)
509{
510	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
511	struct ttm_tt *ttm = bo->ttm;
512	int ret;
513
514	BUG_ON(!ttm);
515
516	if (ttm->state == tt_unpopulated) {
517		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
518		if (ret)
519			return ret;
520	}
521
522	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
523		/*
524		 * We're mapping a single page, and the desired
525		 * page protection is consistent with the bo.
526		 */
527
528		map->bo_kmap_type = ttm_bo_map_kmap;
529		map->page = ttm->pages[start_page];
530		map->virtual = kmap(map->page);
531	} else {
532		/*
533		 * We need to use vmap to get the desired page protection
534		 * or to make the buffer object look contiguous.
535		 */
536		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
537			PAGE_KERNEL :
538			ttm_io_prot(mem->placement, PAGE_KERNEL);
539		map->bo_kmap_type = ttm_bo_map_vmap;
540		map->virtual = vmap(ttm->pages + start_page, num_pages,
541				    0, prot);
542	}
543	return (!map->virtual) ? -ENOMEM : 0;
544}
545
546int ttm_bo_kmap(struct ttm_buffer_object *bo,
547		unsigned long start_page, unsigned long num_pages,
548		struct ttm_bo_kmap_obj *map)
549{
550	struct ttm_mem_type_manager *man =
551		&bo->bdev->man[bo->mem.mem_type];
552	unsigned long offset, size;
553	int ret;
554
555	BUG_ON(!list_empty(&bo->swap));
556	map->virtual = NULL;
557	map->bo = bo;
558	if (num_pages > bo->num_pages)
559		return -EINVAL;
560	if (start_page > bo->num_pages)
561		return -EINVAL;
562#if 0
563	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
564		return -EPERM;
565#endif
566	(void) ttm_mem_io_lock(man, false);
567	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
568	ttm_mem_io_unlock(man);
569	if (ret)
570		return ret;
571	if (!bo->mem.bus.is_iomem) {
572		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
573	} else {
574		offset = start_page << PAGE_SHIFT;
575		size = num_pages << PAGE_SHIFT;
576		return ttm_bo_ioremap(bo, offset, size, map);
577	}
578}
579EXPORT_SYMBOL(ttm_bo_kmap);
580
581void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
582{
583	struct ttm_buffer_object *bo = map->bo;
584	struct ttm_mem_type_manager *man =
585		&bo->bdev->man[bo->mem.mem_type];
586
587	if (!map->virtual)
588		return;
589	switch (map->bo_kmap_type) {
590	case ttm_bo_map_iomap:
591		iounmap(map->virtual);
592		break;
593	case ttm_bo_map_vmap:
594		vunmap(map->virtual);
595		break;
596	case ttm_bo_map_kmap:
597		kunmap(map->page);
598		break;
599	case ttm_bo_map_premapped:
600		break;
601	default:
602		BUG();
603	}
604	(void) ttm_mem_io_lock(man, false);
605	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
606	ttm_mem_io_unlock(man);
607	map->virtual = NULL;
608	map->page = NULL;
609}
610EXPORT_SYMBOL(ttm_bo_kunmap);
611
612int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
613			      void *sync_obj,
614			      void *sync_obj_arg,
615			      bool evict, bool no_wait_reserve,
616			      bool no_wait_gpu,
617			      struct ttm_mem_reg *new_mem)
618{
619	struct ttm_bo_device *bdev = bo->bdev;
620	struct ttm_bo_driver *driver = bdev->driver;
621	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
622	struct ttm_mem_reg *old_mem = &bo->mem;
623	int ret;
624	struct ttm_buffer_object *ghost_obj;
625	void *tmp_obj = NULL;
626
627	spin_lock(&bdev->fence_lock);
628	if (bo->sync_obj) {
629		tmp_obj = bo->sync_obj;
630		bo->sync_obj = NULL;
631	}
632	bo->sync_obj = driver->sync_obj_ref(sync_obj);
633	bo->sync_obj_arg = sync_obj_arg;
634	if (evict) {
635		ret = ttm_bo_wait(bo, false, false, false);
636		spin_unlock(&bdev->fence_lock);
637		if (tmp_obj)
638			driver->sync_obj_unref(&tmp_obj);
639		if (ret)
640			return ret;
641
642		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
643		    (bo->ttm != NULL)) {
644			ttm_tt_unbind(bo->ttm);
645			ttm_tt_destroy(bo->ttm);
646			bo->ttm = NULL;
647		}
648		ttm_bo_free_old_node(bo);
649	} else {
650		/**
651		 * This should help pipeline ordinary buffer moves.
652		 *
653		 * Hang old buffer memory on a new buffer object,
654		 * and leave it to be released when the GPU
655		 * operation has completed.
656		 */
657
658		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
659		spin_unlock(&bdev->fence_lock);
660		if (tmp_obj)
661			driver->sync_obj_unref(&tmp_obj);
662
663		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
664		if (ret)
665			return ret;
 
 
666
667		/**
668		 * If we're not moving to fixed memory, the TTM object
669		 * needs to stay alive. Otherwhise hang it on the ghost
670		 * bo to be unbound and destroyed.
671		 */
672
673		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
674			ghost_obj->ttm = NULL;
675		else
676			bo->ttm = NULL;
677
678		ttm_bo_unreserve(ghost_obj);
679		ttm_bo_unref(&ghost_obj);
680	}
681
682	*old_mem = *new_mem;
683	new_mem->mm_node = NULL;
684
685	return 0;
686}
687EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);