Linux Audio

Check our new training course

Loading...
v4.6
 
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include <drm/ttm/ttm_bo_driver.h>
 32#include <drm/ttm/ttm_placement.h>
 
 33#include <drm/drm_vma_manager.h>
 
 34#include <linux/io.h>
 35#include <linux/highmem.h>
 36#include <linux/wait.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/module.h>
 40#include <linux/reservation.h>
 41
 42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 43{
 44	ttm_bo_mem_put(bo, &bo->mem);
 45}
 46
 47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 48		    bool evict,
 49		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 50{
 51	struct ttm_tt *ttm = bo->ttm;
 52	struct ttm_mem_reg *old_mem = &bo->mem;
 53	int ret;
 54
 55	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 56		ttm_tt_unbind(ttm);
 57		ttm_bo_free_old_node(bo);
 58		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 59				TTM_PL_MASK_MEM);
 60		old_mem->mem_type = TTM_PL_SYSTEM;
 61	}
 62
 63	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 64	if (unlikely(ret != 0))
 65		return ret;
 66
 67	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 68		ret = ttm_tt_bind(ttm, new_mem);
 69		if (unlikely(ret != 0))
 70			return ret;
 71	}
 72
 73	*old_mem = *new_mem;
 74	new_mem->mm_node = NULL;
 75
 76	return 0;
 77}
 78EXPORT_SYMBOL(ttm_bo_move_ttm);
 79
 80int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 
 81{
 82	if (likely(man->io_reserve_fastpath))
 83		return 0;
 84
 85	if (interruptible)
 86		return mutex_lock_interruptible(&man->io_reserve_mutex);
 
 87
 88	mutex_lock(&man->io_reserve_mutex);
 89	return 0;
 90}
 91EXPORT_SYMBOL(ttm_mem_io_lock);
 92
 93void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 
 94{
 95	if (likely(man->io_reserve_fastpath))
 96		return;
 97
 98	mutex_unlock(&man->io_reserve_mutex);
 99}
100EXPORT_SYMBOL(ttm_mem_io_unlock);
101
102static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103{
104	struct ttm_buffer_object *bo;
105
106	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107		return -EAGAIN;
108
109	bo = list_first_entry(&man->io_reserve_lru,
110			      struct ttm_buffer_object,
111			      io_reserve_lru);
112	list_del_init(&bo->io_reserve_lru);
113	ttm_bo_unmap_virtual_locked(bo);
114
115	return 0;
 
116}
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
119int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
120		       struct ttm_mem_reg *mem)
121{
122	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
123	int ret = 0;
124
125	if (!bdev->driver->io_mem_reserve)
126		return 0;
127	if (likely(man->io_reserve_fastpath))
128		return bdev->driver->io_mem_reserve(bdev, mem);
129
130	if (bdev->driver->io_mem_reserve &&
131	    mem->bus.io_reserved_count++ == 0) {
132retry:
133		ret = bdev->driver->io_mem_reserve(bdev, mem);
134		if (ret == -EAGAIN) {
135			ret = ttm_mem_io_evict(man);
136			if (ret == 0)
137				goto retry;
 
 
138		}
139	}
140	return ret;
141}
142EXPORT_SYMBOL(ttm_mem_io_reserve);
143
144void ttm_mem_io_free(struct ttm_bo_device *bdev,
145		     struct ttm_mem_reg *mem)
146{
147	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149	if (likely(man->io_reserve_fastpath))
150		return;
151
152	if (bdev->driver->io_mem_reserve &&
153	    --mem->bus.io_reserved_count == 0 &&
154	    bdev->driver->io_mem_free)
155		bdev->driver->io_mem_free(bdev, mem);
156
157}
158EXPORT_SYMBOL(ttm_mem_io_free);
159
160int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161{
162	struct ttm_mem_reg *mem = &bo->mem;
163	int ret;
164
165	if (!mem->bus.io_reserved_vm) {
166		struct ttm_mem_type_manager *man =
167			&bo->bdev->man[mem->mem_type];
168
169		ret = ttm_mem_io_reserve(bo->bdev, mem);
170		if (unlikely(ret != 0))
171			return ret;
172		mem->bus.io_reserved_vm = true;
173		if (man->use_io_reserve_lru)
174			list_add_tail(&bo->io_reserve_lru,
175				      &man->io_reserve_lru);
176	}
177	return 0;
178}
179
180void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
181{
182	struct ttm_mem_reg *mem = &bo->mem;
183
184	if (mem->bus.io_reserved_vm) {
185		mem->bus.io_reserved_vm = false;
186		list_del_init(&bo->io_reserve_lru);
187		ttm_mem_io_free(bo->bdev, mem);
188	}
189}
190
191static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
192			void **virtual)
193{
194	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195	int ret;
196	void *addr;
197
198	*virtual = NULL;
199	(void) ttm_mem_io_lock(man, false);
200	ret = ttm_mem_io_reserve(bdev, mem);
201	ttm_mem_io_unlock(man);
202	if (ret || !mem->bus.is_iomem)
203		return ret;
204
205	if (mem->bus.addr) {
206		addr = mem->bus.addr;
207	} else {
208		if (mem->placement & TTM_PL_FLAG_WC)
209			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
210		else
211			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212		if (!addr) {
213			(void) ttm_mem_io_lock(man, false);
214			ttm_mem_io_free(bdev, mem);
215			ttm_mem_io_unlock(man);
216			return -ENOMEM;
217		}
218	}
219	*virtual = addr;
220	return 0;
221}
222
223static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
224			 void *virtual)
225{
226	struct ttm_mem_type_manager *man;
227
228	man = &bdev->man[mem->mem_type];
229
230	if (virtual && mem->bus.addr == NULL)
231		iounmap(virtual);
232	(void) ttm_mem_io_lock(man, false);
233	ttm_mem_io_free(bdev, mem);
234	ttm_mem_io_unlock(man);
235}
236
237static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
238{
239	uint32_t *dstP =
240	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241	uint32_t *srcP =
242	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
243
244	int i;
245	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246		iowrite32(ioread32(srcP++), dstP++);
247	return 0;
248}
249
250static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251				unsigned long page,
252				pgprot_t prot)
253{
254	struct page *d = ttm->pages[page];
255	void *dst;
256
257	if (!d)
258		return -ENOMEM;
259
260	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
261
262#ifdef CONFIG_X86
263	dst = kmap_atomic_prot(d, prot);
264#else
265	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266		dst = vmap(&d, 1, 0, prot);
267	else
268		dst = kmap(d);
269#endif
270	if (!dst)
271		return -ENOMEM;
272
273	memcpy_fromio(dst, src, PAGE_SIZE);
274
275#ifdef CONFIG_X86
276	kunmap_atomic(dst);
277#else
278	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
279		vunmap(dst);
280	else
281		kunmap(d);
282#endif
283
284	return 0;
285}
286
287static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
288				unsigned long page,
289				pgprot_t prot)
290{
291	struct page *s = ttm->pages[page];
292	void *src;
293
294	if (!s)
295		return -ENOMEM;
296
297	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
298#ifdef CONFIG_X86
299	src = kmap_atomic_prot(s, prot);
300#else
301	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
302		src = vmap(&s, 1, 0, prot);
303	else
304		src = kmap(s);
305#endif
306	if (!src)
307		return -ENOMEM;
308
309	memcpy_toio(dst, src, PAGE_SIZE);
310
311#ifdef CONFIG_X86
312	kunmap_atomic(src);
313#else
314	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
315		vunmap(src);
316	else
317		kunmap(s);
318#endif
319
320	return 0;
321}
 
322
323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
324		       bool evict, bool no_wait_gpu,
325		       struct ttm_mem_reg *new_mem)
326{
327	struct ttm_bo_device *bdev = bo->bdev;
328	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 
329	struct ttm_tt *ttm = bo->ttm;
330	struct ttm_mem_reg *old_mem = &bo->mem;
331	struct ttm_mem_reg old_copy = *old_mem;
332	void *old_iomap;
333	void *new_iomap;
334	int ret;
335	unsigned long i;
336	unsigned long page;
337	unsigned long add = 0;
338	int dir;
339
340	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
341	if (ret)
342		return ret;
343	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
344	if (ret)
345		goto out;
346
347	/*
348	 * Single TTM move. NOP.
349	 */
350	if (old_iomap == NULL && new_iomap == NULL)
351		goto out2;
352
353	/*
354	 * Don't move nonexistent data. Clear destination instead.
355	 */
356	if (old_iomap == NULL &&
357	    (ttm == NULL || (ttm->state == tt_unpopulated &&
358			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
359		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
360		goto out2;
361	}
362
363	/*
364	 * TTM might be null for moves within the same region.
365	 */
366	if (ttm && ttm->state == tt_unpopulated) {
367		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
368		if (ret)
369			goto out1;
370	}
371
372	add = 0;
373	dir = 1;
374
375	if ((old_mem->mem_type == new_mem->mem_type) &&
376	    (new_mem->start < old_mem->start + old_mem->size)) {
377		dir = -1;
378		add = new_mem->num_pages - 1;
379	}
380
381	for (i = 0; i < new_mem->num_pages; ++i) {
382		page = i * dir + add;
383		if (old_iomap == NULL) {
384			pgprot_t prot = ttm_io_prot(old_mem->placement,
385						    PAGE_KERNEL);
386			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
387						   prot);
388		} else if (new_iomap == NULL) {
389			pgprot_t prot = ttm_io_prot(new_mem->placement,
390						    PAGE_KERNEL);
391			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
392						   prot);
393		} else
394			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
395		if (ret)
396			goto out1;
397	}
398	mb();
399out2:
400	old_copy = *old_mem;
401	*old_mem = *new_mem;
402	new_mem->mm_node = NULL;
403
404	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
405		ttm_tt_unbind(ttm);
406		ttm_tt_destroy(ttm);
407		bo->ttm = NULL;
408	}
409
410out1:
411	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
412out:
413	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414
415	/*
416	 * On error, keep the mm node!
417	 */
418	if (!ret)
419		ttm_bo_mem_put(bo, &old_copy);
420	return ret;
421}
422EXPORT_SYMBOL(ttm_bo_move_memcpy);
423
424static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
425{
426	kfree(bo);
 
 
 
 
 
427}
428
429/**
430 * ttm_buffer_object_transfer
431 *
432 * @bo: A pointer to a struct ttm_buffer_object.
433 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
434 * holding the data of @bo with the old placement.
435 *
436 * This is a utility function that may be called after an accelerated move
437 * has been scheduled. A new buffer object is created as a placeholder for
438 * the old data while it's being copied. When that buffer object is idle,
439 * it can be destroyed, releasing the space of the old placement.
440 * Returns:
441 * !0: Failure.
442 */
443
444static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445				      struct ttm_buffer_object **new_obj)
446{
447	struct ttm_buffer_object *fbo;
448	int ret;
449
450	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
451	if (!fbo)
452		return -ENOMEM;
453
454	*fbo = *bo;
455
456	/**
457	 * Fix up members that we shouldn't copy directly:
458	 * TODO: Explicit member copy would probably be better here.
459	 */
460
461	INIT_LIST_HEAD(&fbo->ddestroy);
462	INIT_LIST_HEAD(&fbo->lru);
463	INIT_LIST_HEAD(&fbo->swap);
464	INIT_LIST_HEAD(&fbo->io_reserve_lru);
465	drm_vma_node_reset(&fbo->vma_node);
466	atomic_set(&fbo->cpu_writers, 0);
467
468	kref_init(&fbo->list_kref);
469	kref_init(&fbo->kref);
470	fbo->destroy = &ttm_transfered_destroy;
471	fbo->acc_size = 0;
472	fbo->resv = &fbo->ttm_resv;
473	reservation_object_init(fbo->resv);
474	ret = ww_mutex_trylock(&fbo->resv->lock);
475	WARN_ON(!ret);
476
477	*new_obj = fbo;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478	return 0;
479}
480
481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 
482{
483	/* Cached mappings need no adjustment */
484	if (caching_flags & TTM_PL_FLAG_CACHED)
485		return tmp;
486
487#if defined(__i386__) || defined(__x86_64__)
488	if (caching_flags & TTM_PL_FLAG_WC)
489		tmp = pgprot_writecombine(tmp);
490	else if (boot_cpu_data.x86 > 3)
491		tmp = pgprot_noncached(tmp);
492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494    defined(__powerpc__)
495	if (caching_flags & TTM_PL_FLAG_WC)
496		tmp = pgprot_writecombine(tmp);
497	else
498		tmp = pgprot_noncached(tmp);
499#endif
500#if defined(__sparc__) || defined(__mips__)
501	tmp = pgprot_noncached(tmp);
502#endif
503	return tmp;
504}
505EXPORT_SYMBOL(ttm_io_prot);
506
507static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
508			  unsigned long offset,
509			  unsigned long size,
510			  struct ttm_bo_kmap_obj *map)
511{
512	struct ttm_mem_reg *mem = &bo->mem;
513
514	if (bo->mem.bus.addr) {
515		map->bo_kmap_type = ttm_bo_map_premapped;
516		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
517	} else {
 
 
518		map->bo_kmap_type = ttm_bo_map_iomap;
519		if (mem->placement & TTM_PL_FLAG_WC)
520			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
521						  size);
 
 
 
522		else
523			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
524						       size);
525	}
526	return (!map->virtual) ? -ENOMEM : 0;
527}
528
529static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
530			   unsigned long start_page,
531			   unsigned long num_pages,
532			   struct ttm_bo_kmap_obj *map)
533{
534	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
 
 
 
 
535	struct ttm_tt *ttm = bo->ttm;
 
536	int ret;
537
538	BUG_ON(!ttm);
539
540	if (ttm->state == tt_unpopulated) {
541		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
542		if (ret)
543			return ret;
544	}
545
546	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
547		/*
548		 * We're mapping a single page, and the desired
549		 * page protection is consistent with the bo.
550		 */
551
552		map->bo_kmap_type = ttm_bo_map_kmap;
553		map->page = ttm->pages[start_page];
554		map->virtual = kmap(map->page);
555	} else {
556		/*
557		 * We need to use vmap to get the desired page protection
558		 * or to make the buffer object look contiguous.
559		 */
560		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
561		map->bo_kmap_type = ttm_bo_map_vmap;
562		map->virtual = vmap(ttm->pages + start_page, num_pages,
563				    0, prot);
564	}
565	return (!map->virtual) ? -ENOMEM : 0;
566}
567
568int ttm_bo_kmap(struct ttm_buffer_object *bo,
569		unsigned long start_page, unsigned long num_pages,
570		struct ttm_bo_kmap_obj *map)
571{
572	struct ttm_mem_type_manager *man =
573		&bo->bdev->man[bo->mem.mem_type];
574	unsigned long offset, size;
575	int ret;
576
577	BUG_ON(!list_empty(&bo->swap));
578	map->virtual = NULL;
579	map->bo = bo;
580	if (num_pages > bo->num_pages)
581		return -EINVAL;
582	if (start_page > bo->num_pages)
583		return -EINVAL;
584#if 0
585	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
586		return -EPERM;
587#endif
588	(void) ttm_mem_io_lock(man, false);
589	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
590	ttm_mem_io_unlock(man);
591	if (ret)
592		return ret;
593	if (!bo->mem.bus.is_iomem) {
594		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
595	} else {
596		offset = start_page << PAGE_SHIFT;
597		size = num_pages << PAGE_SHIFT;
598		return ttm_bo_ioremap(bo, offset, size, map);
599	}
600}
601EXPORT_SYMBOL(ttm_bo_kmap);
602
603void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
604{
605	struct ttm_buffer_object *bo = map->bo;
606	struct ttm_mem_type_manager *man =
607		&bo->bdev->man[bo->mem.mem_type];
608
609	if (!map->virtual)
610		return;
611	switch (map->bo_kmap_type) {
612	case ttm_bo_map_iomap:
613		iounmap(map->virtual);
614		break;
615	case ttm_bo_map_vmap:
616		vunmap(map->virtual);
617		break;
618	case ttm_bo_map_kmap:
619		kunmap(map->page);
620		break;
621	case ttm_bo_map_premapped:
622		break;
623	default:
624		BUG();
625	}
626	(void) ttm_mem_io_lock(man, false);
627	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
628	ttm_mem_io_unlock(man);
629	map->virtual = NULL;
630	map->page = NULL;
631}
632EXPORT_SYMBOL(ttm_bo_kunmap);
633
634int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635			      struct fence *fence,
636			      bool evict,
637			      bool no_wait_gpu,
638			      struct ttm_mem_reg *new_mem)
639{
640	struct ttm_bo_device *bdev = bo->bdev;
641	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
642	struct ttm_mem_reg *old_mem = &bo->mem;
643	int ret;
644	struct ttm_buffer_object *ghost_obj;
645
646	reservation_object_add_excl_fence(bo->resv, fence);
647	if (evict) {
648		ret = ttm_bo_wait(bo, false, false, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
649		if (ret)
650			return ret;
651
652		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
653		    (bo->ttm != NULL)) {
654			ttm_tt_unbind(bo->ttm);
655			ttm_tt_destroy(bo->ttm);
656			bo->ttm = NULL;
657		}
658		ttm_bo_free_old_node(bo);
659	} else {
660		/**
661		 * This should help pipeline ordinary buffer moves.
662		 *
663		 * Hang old buffer memory on a new buffer object,
664		 * and leave it to be released when the GPU
665		 * operation has completed.
666		 */
 
 
 
 
667
668		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
 
669
670		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
671		if (ret)
672			return ret;
673
674		reservation_object_add_excl_fence(ghost_obj->resv, fence);
 
 
675
676		/**
677		 * If we're not moving to fixed memory, the TTM object
678		 * needs to stay alive. Otherwhise hang it on the ghost
679		 * bo to be unbound and destroyed.
680		 */
681
682		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
683			ghost_obj->ttm = NULL;
684		else
685			bo->ttm = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686
687		ttm_bo_unreserve(ghost_obj);
688		ttm_bo_unref(&ghost_obj);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
689	}
 
 
 
 
690
691	*old_mem = *new_mem;
692	new_mem->mm_node = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
693
694	return 0;
695}
696EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 */
 31
 32#include <drm/ttm/ttm_bo_driver.h>
 33#include <drm/ttm/ttm_placement.h>
 34#include <drm/drm_cache.h>
 35#include <drm/drm_vma_manager.h>
 36#include <linux/iosys-map.h>
 37#include <linux/io.h>
 38#include <linux/highmem.h>
 39#include <linux/wait.h>
 40#include <linux/slab.h>
 41#include <linux/vmalloc.h>
 42#include <linux/module.h>
 43#include <linux/dma-resv.h>
 44
 45struct ttm_transfer_obj {
 46	struct ttm_buffer_object base;
 47	struct ttm_buffer_object *bo;
 48};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49
 50int ttm_mem_io_reserve(struct ttm_device *bdev,
 51		       struct ttm_resource *mem)
 52{
 53	if (mem->bus.offset || mem->bus.addr)
 54		return 0;
 55
 56	mem->bus.is_iomem = false;
 57	if (!bdev->funcs->io_mem_reserve)
 58		return 0;
 59
 60	return bdev->funcs->io_mem_reserve(bdev, mem);
 
 61}
 
 62
 63void ttm_mem_io_free(struct ttm_device *bdev,
 64		     struct ttm_resource *mem)
 65{
 66	if (!mem)
 67		return;
 68
 69	if (!mem->bus.offset && !mem->bus.addr)
 70		return;
 
 
 
 
 
 
 
 
 71
 72	if (bdev->funcs->io_mem_free)
 73		bdev->funcs->io_mem_free(bdev, mem);
 
 
 
 74
 75	mem->bus.offset = 0;
 76	mem->bus.addr = NULL;
 77}
 78
 79/**
 80 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
 81 * @clear: Whether to clear rather than copy.
 82 * @num_pages: Number of pages of the operation.
 83 * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
 84 * @src_iter: A struct ttm_kmap_iter representing the source resource.
 85 *
 86 * This function is intended to be able to move out async under a
 87 * dma-fence if desired.
 88 */
 89void ttm_move_memcpy(bool clear,
 90		     u32 num_pages,
 91		     struct ttm_kmap_iter *dst_iter,
 92		     struct ttm_kmap_iter *src_iter)
 93{
 94	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
 95	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
 96	struct iosys_map src_map, dst_map;
 97	pgoff_t i;
 98
 99	/* Single TTM move. NOP */
100	if (dst_ops->maps_tt && src_ops->maps_tt)
101		return;
 
 
 
 
 
 
 
102
103	/* Don't move nonexistent data. Clear destination instead. */
104	if (clear) {
105		for (i = 0; i < num_pages; ++i) {
106			dst_ops->map_local(dst_iter, &dst_map, i);
107			if (dst_map.is_iomem)
108				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
109			else
110				memset(dst_map.vaddr, 0, PAGE_SIZE);
111			if (dst_ops->unmap_local)
112				dst_ops->unmap_local(dst_iter, &dst_map);
113		}
 
 
 
 
 
 
 
 
 
 
 
114		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115	}
 
116
117	for (i = 0; i < num_pages; ++i) {
118		dst_ops->map_local(dst_iter, &dst_map, i);
119		src_ops->map_local(src_iter, &src_map, i);
 
 
 
120
121		drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
 
 
 
 
 
122
123		if (src_ops->unmap_local)
124			src_ops->unmap_local(src_iter, &src_map);
125		if (dst_ops->unmap_local)
126			dst_ops->unmap_local(dst_iter, &dst_map);
 
 
 
 
 
 
 
 
 
127	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128}
129EXPORT_SYMBOL(ttm_move_memcpy);
130
131int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
132		       struct ttm_operation_ctx *ctx,
133		       struct ttm_resource *dst_mem)
134{
135	struct ttm_device *bdev = bo->bdev;
136	struct ttm_resource_manager *dst_man =
137		ttm_manager_type(bo->bdev, dst_mem->mem_type);
138	struct ttm_tt *ttm = bo->ttm;
139	struct ttm_resource *src_mem = bo->resource;
140	struct ttm_resource_manager *src_man;
141	union {
142		struct ttm_kmap_iter_tt tt;
143		struct ttm_kmap_iter_linear_io io;
144	} _dst_iter, _src_iter;
145	struct ttm_kmap_iter *dst_iter, *src_iter;
146	bool clear;
147	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
149	if (!src_mem)
150		return 0;
151
152	src_man = ttm_manager_type(bdev, src_mem->mem_type);
153	if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
154		    dst_man->use_tt)) {
155		ret = ttm_tt_populate(bdev, ttm, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156		if (ret)
157			return ret;
 
 
 
 
 
 
 
 
 
 
 
158	}
159
160	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
161	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
162		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
163	if (IS_ERR(dst_iter))
164		return PTR_ERR(dst_iter);
165
166	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
167	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
168		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
169	if (IS_ERR(src_iter)) {
170		ret = PTR_ERR(src_iter);
171		goto out_src_iter;
172	}
173
174	clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
175	if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
176		ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
177
178	if (!src_iter->ops->maps_tt)
179		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
180	ttm_bo_move_sync_cleanup(bo, dst_mem);
181
182out_src_iter:
183	if (!dst_iter->ops->maps_tt)
184		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
185
 
 
 
 
 
186	return ret;
187}
188EXPORT_SYMBOL(ttm_bo_move_memcpy);
189
190static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
191{
192	struct ttm_transfer_obj *fbo;
193
194	fbo = container_of(bo, struct ttm_transfer_obj, base);
195	dma_resv_fini(&fbo->base.base._resv);
196	ttm_bo_put(fbo->bo);
197	kfree(fbo);
198}
199
200/**
201 * ttm_buffer_object_transfer
202 *
203 * @bo: A pointer to a struct ttm_buffer_object.
204 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
205 * holding the data of @bo with the old placement.
206 *
207 * This is a utility function that may be called after an accelerated move
208 * has been scheduled. A new buffer object is created as a placeholder for
209 * the old data while it's being copied. When that buffer object is idle,
210 * it can be destroyed, releasing the space of the old placement.
211 * Returns:
212 * !0: Failure.
213 */
214
215static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
216				      struct ttm_buffer_object **new_obj)
217{
218	struct ttm_transfer_obj *fbo;
219	int ret;
220
221	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
222	if (!fbo)
223		return -ENOMEM;
224
225	fbo->base = *bo;
226
227	/**
228	 * Fix up members that we shouldn't copy directly:
229	 * TODO: Explicit member copy would probably be better here.
230	 */
231
232	atomic_inc(&ttm_glob.bo_count);
233	INIT_LIST_HEAD(&fbo->base.ddestroy);
234	drm_vma_node_reset(&fbo->base.base.vma_node);
235
236	kref_init(&fbo->base.kref);
237	fbo->base.destroy = &ttm_transfered_destroy;
238	fbo->base.pin_count = 0;
239	if (bo->type != ttm_bo_type_sg)
240		fbo->base.base.resv = &fbo->base.base._resv;
241
242	dma_resv_init(&fbo->base.base._resv);
243	fbo->base.base.dev = NULL;
244	ret = dma_resv_trylock(&fbo->base.base._resv);
 
245	WARN_ON(!ret);
246
247	if (fbo->base.resource) {
248		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
249		bo->resource = NULL;
250		ttm_bo_set_bulk_move(&fbo->base, NULL);
251	} else {
252		fbo->base.bulk_move = NULL;
253	}
254
255	ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
256	if (ret) {
257		kfree(fbo);
258		return ret;
259	}
260
261	ttm_bo_get(bo);
262	fbo->bo = bo;
263
264	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
265
266	*new_obj = &fbo->base;
267	return 0;
268}
269
270pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
271		     pgprot_t tmp)
272{
273	struct ttm_resource_manager *man;
274	enum ttm_caching caching;
275
276	man = ttm_manager_type(bo->bdev, res->mem_type);
277	caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
278
279	return ttm_prot_from_caching(caching, tmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280}
281EXPORT_SYMBOL(ttm_io_prot);
282
283static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
284			  unsigned long offset,
285			  unsigned long size,
286			  struct ttm_bo_kmap_obj *map)
287{
288	struct ttm_resource *mem = bo->resource;
289
290	if (bo->resource->bus.addr) {
291		map->bo_kmap_type = ttm_bo_map_premapped;
292		map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
293	} else {
294		resource_size_t res = bo->resource->bus.offset + offset;
295
296		map->bo_kmap_type = ttm_bo_map_iomap;
297		if (mem->bus.caching == ttm_write_combined)
298			map->virtual = ioremap_wc(res, size);
299#ifdef CONFIG_X86
300		else if (mem->bus.caching == ttm_cached)
301			map->virtual = ioremap_cache(res, size);
302#endif
303		else
304			map->virtual = ioremap(res, size);
 
305	}
306	return (!map->virtual) ? -ENOMEM : 0;
307}
308
309static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
310			   unsigned long start_page,
311			   unsigned long num_pages,
312			   struct ttm_bo_kmap_obj *map)
313{
314	struct ttm_resource *mem = bo->resource;
315	struct ttm_operation_ctx ctx = {
316		.interruptible = false,
317		.no_wait_gpu = false
318	};
319	struct ttm_tt *ttm = bo->ttm;
320	pgprot_t prot;
321	int ret;
322
323	BUG_ON(!ttm);
324
325	ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
326	if (ret)
327		return ret;
 
 
328
329	if (num_pages == 1 && ttm->caching == ttm_cached) {
330		/*
331		 * We're mapping a single page, and the desired
332		 * page protection is consistent with the bo.
333		 */
334
335		map->bo_kmap_type = ttm_bo_map_kmap;
336		map->page = ttm->pages[start_page];
337		map->virtual = kmap(map->page);
338	} else {
339		/*
340		 * We need to use vmap to get the desired page protection
341		 * or to make the buffer object look contiguous.
342		 */
343		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
344		map->bo_kmap_type = ttm_bo_map_vmap;
345		map->virtual = vmap(ttm->pages + start_page, num_pages,
346				    0, prot);
347	}
348	return (!map->virtual) ? -ENOMEM : 0;
349}
350
351int ttm_bo_kmap(struct ttm_buffer_object *bo,
352		unsigned long start_page, unsigned long num_pages,
353		struct ttm_bo_kmap_obj *map)
354{
 
 
355	unsigned long offset, size;
356	int ret;
357
 
358	map->virtual = NULL;
359	map->bo = bo;
360	if (num_pages > PFN_UP(bo->resource->size))
361		return -EINVAL;
362	if ((start_page + num_pages) > PFN_UP(bo->resource->size))
363		return -EINVAL;
364
365	ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
 
 
 
 
 
366	if (ret)
367		return ret;
368	if (!bo->resource->bus.is_iomem) {
369		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
370	} else {
371		offset = start_page << PAGE_SHIFT;
372		size = num_pages << PAGE_SHIFT;
373		return ttm_bo_ioremap(bo, offset, size, map);
374	}
375}
376EXPORT_SYMBOL(ttm_bo_kmap);
377
378void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
379{
 
 
 
 
380	if (!map->virtual)
381		return;
382	switch (map->bo_kmap_type) {
383	case ttm_bo_map_iomap:
384		iounmap(map->virtual);
385		break;
386	case ttm_bo_map_vmap:
387		vunmap(map->virtual);
388		break;
389	case ttm_bo_map_kmap:
390		kunmap(map->page);
391		break;
392	case ttm_bo_map_premapped:
393		break;
394	default:
395		BUG();
396	}
397	ttm_mem_io_free(map->bo->bdev, map->bo->resource);
 
 
398	map->virtual = NULL;
399	map->page = NULL;
400}
401EXPORT_SYMBOL(ttm_bo_kunmap);
402
403int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
 
 
 
 
404{
405	struct ttm_resource *mem = bo->resource;
 
 
406	int ret;
 
407
408	dma_resv_assert_held(bo->base.resv);
409
410	ret = ttm_mem_io_reserve(bo->bdev, mem);
411	if (ret)
412		return ret;
413
414	if (mem->bus.is_iomem) {
415		void __iomem *vaddr_iomem;
416
417		if (mem->bus.addr)
418			vaddr_iomem = (void __iomem *)mem->bus.addr;
419		else if (mem->bus.caching == ttm_write_combined)
420			vaddr_iomem = ioremap_wc(mem->bus.offset,
421						 bo->base.size);
422#ifdef CONFIG_X86
423		else if (mem->bus.caching == ttm_cached)
424			vaddr_iomem = ioremap_cache(mem->bus.offset,
425						  bo->base.size);
426#endif
427		else
428			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
429
430		if (!vaddr_iomem)
431			return -ENOMEM;
432
433		iosys_map_set_vaddr_iomem(map, vaddr_iomem);
434
435	} else {
436		struct ttm_operation_ctx ctx = {
437			.interruptible = false,
438			.no_wait_gpu = false
439		};
440		struct ttm_tt *ttm = bo->ttm;
441		pgprot_t prot;
442		void *vaddr;
443
444		ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
445		if (ret)
446			return ret;
447
448		/*
449		 * We need to use vmap to get the desired page protection
450		 * or to make the buffer object look contiguous.
 
 
 
 
 
 
 
 
 
 
 
451		 */
452		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
453		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
454		if (!vaddr)
455			return -ENOMEM;
456
457		iosys_map_set_vaddr(map, vaddr);
458	}
459
460	return 0;
461}
462EXPORT_SYMBOL(ttm_bo_vmap);
463
464void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
465{
466	struct ttm_resource *mem = bo->resource;
467
468	dma_resv_assert_held(bo->base.resv);
 
 
 
 
469
470	if (iosys_map_is_null(map))
471		return;
472
473	if (!map->is_iomem)
474		vunmap(map->vaddr);
475	else if (!mem->bus.addr)
476		iounmap(map->vaddr_iomem);
477	iosys_map_clear(map);
478
479	ttm_mem_io_free(bo->bdev, bo->resource);
480}
481EXPORT_SYMBOL(ttm_bo_vunmap);
482
483static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
484				 bool dst_use_tt)
485{
486	int ret;
487	ret = ttm_bo_wait(bo, false, false);
488	if (ret)
489		return ret;
490
491	if (!dst_use_tt)
492		ttm_bo_tt_destroy(bo);
493	ttm_resource_free(bo, &bo->resource);
494	return 0;
495}
496
497static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
498				struct dma_fence *fence,
499				bool dst_use_tt)
500{
501	struct ttm_buffer_object *ghost_obj;
502	int ret;
503
504	/**
505	 * This should help pipeline ordinary buffer moves.
506	 *
507	 * Hang old buffer memory on a new buffer object,
508	 * and leave it to be released when the GPU
509	 * operation has completed.
510	 */
511
512	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
513	if (ret)
514		return ret;
515
516	dma_resv_add_fence(&ghost_obj->base._resv, fence,
517			   DMA_RESV_USAGE_KERNEL);
518
519	/**
520	 * If we're not moving to fixed memory, the TTM object
521	 * needs to stay alive. Otherwhise hang it on the ghost
522	 * bo to be unbound and destroyed.
523	 */
524
525	if (dst_use_tt)
526		ghost_obj->ttm = NULL;
527	else
528		bo->ttm = NULL;
529
530	dma_resv_unlock(&ghost_obj->base._resv);
531	ttm_bo_put(ghost_obj);
532	return 0;
533}
534
535static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
536				       struct dma_fence *fence)
537{
538	struct ttm_device *bdev = bo->bdev;
539	struct ttm_resource_manager *from;
540
541	from = ttm_manager_type(bdev, bo->resource->mem_type);
542
543	/**
544	 * BO doesn't have a TTM we need to bind/unbind. Just remember
545	 * this eviction and free up the allocation
546	 */
547	spin_lock(&from->move_lock);
548	if (!from->move || dma_fence_is_later(fence, from->move)) {
549		dma_fence_put(from->move);
550		from->move = dma_fence_get(fence);
551	}
552	spin_unlock(&from->move_lock);
553
554	ttm_resource_free(bo, &bo->resource);
555}
556
557int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
558			      struct dma_fence *fence,
559			      bool evict,
560			      bool pipeline,
561			      struct ttm_resource *new_mem)
562{
563	struct ttm_device *bdev = bo->bdev;
564	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
565	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
566	int ret = 0;
567
568	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
569	if (!evict)
570		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
571	else if (!from->use_tt && pipeline)
572		ttm_bo_move_pipeline_evict(bo, fence);
573	else
574		ret = ttm_bo_wait_free_node(bo, man->use_tt);
575
576	if (ret)
577		return ret;
578
579	ttm_bo_assign_mem(bo, new_mem);
580
581	return 0;
582}
583EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
584
585void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
586			      struct ttm_resource *new_mem)
587{
588	struct ttm_device *bdev = bo->bdev;
589	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
590	int ret;
591
592	ret = ttm_bo_wait_free_node(bo, man->use_tt);
593	if (WARN_ON(ret))
594		return;
595
596	ttm_bo_assign_mem(bo, new_mem);
597}
598EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
599
600/**
601 * ttm_bo_pipeline_gutting - purge the contents of a bo
602 * @bo: The buffer object
603 *
604 * Purge the contents of a bo, async if the bo is not idle.
605 * After a successful call, the bo is left unpopulated in
606 * system placement. The function may wait uninterruptible
607 * for idle on OOM.
608 *
609 * Return: 0 if successful, negative error code on failure.
610 */
611int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
612{
613	static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
614	struct ttm_buffer_object *ghost;
615	struct ttm_resource *sys_res;
616	struct ttm_tt *ttm;
617	int ret;
618
619	ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
620	if (ret)
621		return ret;
622
623	/* If already idle, no need for ghost object dance. */
624	ret = ttm_bo_wait(bo, false, true);
625	if (ret != -EBUSY) {
626		if (!bo->ttm) {
627			/* See comment below about clearing. */
628			ret = ttm_tt_create(bo, true);
629			if (ret)
630				goto error_free_sys_mem;
631		} else {
632			ttm_tt_unpopulate(bo->bdev, bo->ttm);
633			if (bo->type == ttm_bo_type_device)
634				ttm_tt_mark_for_clear(bo->ttm);
635		}
636		ttm_resource_free(bo, &bo->resource);
637		ttm_bo_assign_mem(bo, sys_res);
638		return 0;
639	}
640
641	/*
642	 * We need an unpopulated ttm_tt after giving our current one,
643	 * if any, to the ghost object. And we can't afford to fail
644	 * creating one *after* the operation. If the bo subsequently gets
645	 * resurrected, make sure it's cleared (if ttm_bo_type_device)
646	 * to avoid leaking sensitive information to user-space.
647	 */
648
649	ttm = bo->ttm;
650	bo->ttm = NULL;
651	ret = ttm_tt_create(bo, true);
652	swap(bo->ttm, ttm);
653	if (ret)
654		goto error_free_sys_mem;
655
656	ret = ttm_buffer_object_transfer(bo, &ghost);
657	if (ret)
658		goto error_destroy_tt;
659
660	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
661	/* Last resort, wait for the BO to be idle when we are OOM */
662	if (ret)
663		ttm_bo_wait(bo, false, false);
664
665	dma_resv_unlock(&ghost->base._resv);
666	ttm_bo_put(ghost);
667	bo->ttm = ttm;
668	ttm_bo_assign_mem(bo, sys_res);
669	return 0;
670
671error_destroy_tt:
672	ttm_tt_destroy(bo->bdev, ttm);
673
674error_free_sys_mem:
675	ttm_resource_free(bo, &sys_res);
676	return ret;
677}