Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include "ttm/ttm_bo_driver.h"
 32#include "ttm/ttm_placement.h"
 
 33#include <linux/io.h>
 34#include <linux/highmem.h>
 35#include <linux/wait.h>
 36#include <linux/slab.h>
 37#include <linux/vmalloc.h>
 38#include <linux/module.h>
 
 39
 40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 41{
 42	ttm_bo_mem_put(bo, &bo->mem);
 43}
 44
 45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 46		    bool evict, bool no_wait_reserve,
 47		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 48{
 49	struct ttm_tt *ttm = bo->ttm;
 50	struct ttm_mem_reg *old_mem = &bo->mem;
 51	int ret;
 52
 53	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 
 
 
 
 
 
 
 
 54		ttm_tt_unbind(ttm);
 55		ttm_bo_free_old_node(bo);
 56		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 57				TTM_PL_MASK_MEM);
 58		old_mem->mem_type = TTM_PL_SYSTEM;
 59	}
 60
 61	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 62	if (unlikely(ret != 0))
 63		return ret;
 64
 65	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 66		ret = ttm_tt_bind(ttm, new_mem);
 67		if (unlikely(ret != 0))
 68			return ret;
 69	}
 70
 71	*old_mem = *new_mem;
 72	new_mem->mm_node = NULL;
 73
 74	return 0;
 75}
 76EXPORT_SYMBOL(ttm_bo_move_ttm);
 77
 78int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 79{
 80	if (likely(man->io_reserve_fastpath))
 81		return 0;
 82
 83	if (interruptible)
 84		return mutex_lock_interruptible(&man->io_reserve_mutex);
 85
 86	mutex_lock(&man->io_reserve_mutex);
 87	return 0;
 88}
 
 89
 90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 91{
 92	if (likely(man->io_reserve_fastpath))
 93		return;
 94
 95	mutex_unlock(&man->io_reserve_mutex);
 96}
 
 97
 98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
 99{
100	struct ttm_buffer_object *bo;
101
102	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103		return -EAGAIN;
104
105	bo = list_first_entry(&man->io_reserve_lru,
106			      struct ttm_buffer_object,
107			      io_reserve_lru);
108	list_del_init(&bo->io_reserve_lru);
109	ttm_bo_unmap_virtual_locked(bo);
110
111	return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115			      struct ttm_mem_reg *mem)
 
116{
117	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118	int ret = 0;
119
120	if (!bdev->driver->io_mem_reserve)
121		return 0;
122	if (likely(man->io_reserve_fastpath))
123		return bdev->driver->io_mem_reserve(bdev, mem);
124
125	if (bdev->driver->io_mem_reserve &&
126	    mem->bus.io_reserved_count++ == 0) {
127retry:
128		ret = bdev->driver->io_mem_reserve(bdev, mem);
129		if (ret == -EAGAIN) {
130			ret = ttm_mem_io_evict(man);
131			if (ret == 0)
132				goto retry;
133		}
134	}
135	return ret;
136}
 
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139			    struct ttm_mem_reg *mem)
140{
141	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143	if (likely(man->io_reserve_fastpath))
144		return;
145
146	if (bdev->driver->io_mem_reserve &&
147	    --mem->bus.io_reserved_count == 0 &&
148	    bdev->driver->io_mem_free)
149		bdev->driver->io_mem_free(bdev, mem);
150
151}
 
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155	struct ttm_mem_reg *mem = &bo->mem;
156	int ret;
157
158	if (!mem->bus.io_reserved_vm) {
159		struct ttm_mem_type_manager *man =
160			&bo->bdev->man[mem->mem_type];
161
162		ret = ttm_mem_io_reserve(bo->bdev, mem);
163		if (unlikely(ret != 0))
164			return ret;
165		mem->bus.io_reserved_vm = true;
166		if (man->use_io_reserve_lru)
167			list_add_tail(&bo->io_reserve_lru,
168				      &man->io_reserve_lru);
169	}
170	return 0;
171}
172
173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174{
175	struct ttm_mem_reg *mem = &bo->mem;
176
177	if (mem->bus.io_reserved_vm) {
178		mem->bus.io_reserved_vm = false;
179		list_del_init(&bo->io_reserve_lru);
180		ttm_mem_io_free(bo->bdev, mem);
181	}
182}
183
184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185			void **virtual)
186{
187	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188	int ret;
189	void *addr;
190
191	*virtual = NULL;
192	(void) ttm_mem_io_lock(man, false);
193	ret = ttm_mem_io_reserve(bdev, mem);
194	ttm_mem_io_unlock(man);
195	if (ret || !mem->bus.is_iomem)
196		return ret;
197
198	if (mem->bus.addr) {
199		addr = mem->bus.addr;
200	} else {
201		if (mem->placement & TTM_PL_FLAG_WC)
202			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
203		else
204			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205		if (!addr) {
206			(void) ttm_mem_io_lock(man, false);
207			ttm_mem_io_free(bdev, mem);
208			ttm_mem_io_unlock(man);
209			return -ENOMEM;
210		}
211	}
212	*virtual = addr;
213	return 0;
214}
215
216void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217			 void *virtual)
218{
219	struct ttm_mem_type_manager *man;
220
221	man = &bdev->man[mem->mem_type];
222
223	if (virtual && mem->bus.addr == NULL)
224		iounmap(virtual);
225	(void) ttm_mem_io_lock(man, false);
226	ttm_mem_io_free(bdev, mem);
227	ttm_mem_io_unlock(man);
228}
229
230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231{
232	uint32_t *dstP =
233	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234	uint32_t *srcP =
235	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236
237	int i;
238	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239		iowrite32(ioread32(srcP++), dstP++);
240	return 0;
241}
242
243static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244				unsigned long page,
245				pgprot_t prot)
246{
247	struct page *d = ttm_tt_get_page(ttm, page);
248	void *dst;
249
250	if (!d)
251		return -ENOMEM;
252
253	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
254
255#ifdef CONFIG_X86
256	dst = kmap_atomic_prot(d, prot);
257#else
258	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
259		dst = vmap(&d, 1, 0, prot);
260	else
261		dst = kmap(d);
262#endif
263	if (!dst)
264		return -ENOMEM;
265
266	memcpy_fromio(dst, src, PAGE_SIZE);
267
268#ifdef CONFIG_X86
269	kunmap_atomic(dst);
270#else
271	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
272		vunmap(dst);
273	else
274		kunmap(d);
275#endif
276
277	return 0;
278}
279
280static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281				unsigned long page,
282				pgprot_t prot)
283{
284	struct page *s = ttm_tt_get_page(ttm, page);
285	void *src;
286
287	if (!s)
288		return -ENOMEM;
289
290	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
291#ifdef CONFIG_X86
292	src = kmap_atomic_prot(s, prot);
293#else
294	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
295		src = vmap(&s, 1, 0, prot);
296	else
297		src = kmap(s);
298#endif
299	if (!src)
300		return -ENOMEM;
301
302	memcpy_toio(dst, src, PAGE_SIZE);
303
304#ifdef CONFIG_X86
305	kunmap_atomic(src);
306#else
307	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
308		vunmap(src);
309	else
310		kunmap(s);
311#endif
312
313	return 0;
314}
315
316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317		       bool evict, bool no_wait_reserve, bool no_wait_gpu,
318		       struct ttm_mem_reg *new_mem)
319{
320	struct ttm_bo_device *bdev = bo->bdev;
321	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322	struct ttm_tt *ttm = bo->ttm;
323	struct ttm_mem_reg *old_mem = &bo->mem;
324	struct ttm_mem_reg old_copy = *old_mem;
325	void *old_iomap;
326	void *new_iomap;
327	int ret;
328	unsigned long i;
329	unsigned long page;
330	unsigned long add = 0;
331	int dir;
332
 
 
 
 
333	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334	if (ret)
335		return ret;
336	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337	if (ret)
338		goto out;
339
 
 
 
340	if (old_iomap == NULL && new_iomap == NULL)
341		goto out2;
342	if (old_iomap == NULL && ttm == NULL)
 
 
 
 
 
 
 
343		goto out2;
 
 
 
 
 
 
 
 
 
 
344
345	add = 0;
346	dir = 1;
347
348	if ((old_mem->mem_type == new_mem->mem_type) &&
349	    (new_mem->start < old_mem->start + old_mem->size)) {
350		dir = -1;
351		add = new_mem->num_pages - 1;
352	}
353
354	for (i = 0; i < new_mem->num_pages; ++i) {
355		page = i * dir + add;
356		if (old_iomap == NULL) {
357			pgprot_t prot = ttm_io_prot(old_mem->placement,
358						    PAGE_KERNEL);
359			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
360						   prot);
361		} else if (new_iomap == NULL) {
362			pgprot_t prot = ttm_io_prot(new_mem->placement,
363						    PAGE_KERNEL);
364			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
365						   prot);
366		} else
367			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
368		if (ret)
369			goto out1;
370	}
371	mb();
372out2:
373	old_copy = *old_mem;
374	*old_mem = *new_mem;
375	new_mem->mm_node = NULL;
376
377	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
378		ttm_tt_unbind(ttm);
379		ttm_tt_destroy(ttm);
380		bo->ttm = NULL;
381	}
382
383out1:
384	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
385out:
386	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
387	ttm_bo_mem_put(bo, &old_copy);
 
 
 
 
 
388	return ret;
389}
390EXPORT_SYMBOL(ttm_bo_move_memcpy);
391
392static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
393{
394	kfree(bo);
395}
396
397/**
398 * ttm_buffer_object_transfer
399 *
400 * @bo: A pointer to a struct ttm_buffer_object.
401 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
402 * holding the data of @bo with the old placement.
403 *
404 * This is a utility function that may be called after an accelerated move
405 * has been scheduled. A new buffer object is created as a placeholder for
406 * the old data while it's being copied. When that buffer object is idle,
407 * it can be destroyed, releasing the space of the old placement.
408 * Returns:
409 * !0: Failure.
410 */
411
412static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
413				      struct ttm_buffer_object **new_obj)
414{
415	struct ttm_buffer_object *fbo;
416	struct ttm_bo_device *bdev = bo->bdev;
417	struct ttm_bo_driver *driver = bdev->driver;
418
419	fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
420	if (!fbo)
421		return -ENOMEM;
422
423	*fbo = *bo;
424
425	/**
426	 * Fix up members that we shouldn't copy directly:
427	 * TODO: Explicit member copy would probably be better here.
428	 */
429
430	init_waitqueue_head(&fbo->event_queue);
431	INIT_LIST_HEAD(&fbo->ddestroy);
432	INIT_LIST_HEAD(&fbo->lru);
433	INIT_LIST_HEAD(&fbo->swap);
434	INIT_LIST_HEAD(&fbo->io_reserve_lru);
435	fbo->vm_node = NULL;
 
436	atomic_set(&fbo->cpu_writers, 0);
437
438	fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
439	kref_init(&fbo->list_kref);
440	kref_init(&fbo->kref);
441	fbo->destroy = &ttm_transfered_destroy;
 
 
 
 
 
442
443	*new_obj = fbo;
444	return 0;
445}
446
447pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
448{
 
 
 
 
449#if defined(__i386__) || defined(__x86_64__)
450	if (caching_flags & TTM_PL_FLAG_WC)
451		tmp = pgprot_writecombine(tmp);
452	else if (boot_cpu_data.x86 > 3)
453		tmp = pgprot_noncached(tmp);
454
455#elif defined(__powerpc__)
456	if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
457		pgprot_val(tmp) |= _PAGE_NO_CACHE;
458		if (caching_flags & TTM_PL_FLAG_UNCACHED)
459			pgprot_val(tmp) |= _PAGE_GUARDED;
460	}
461#endif
462#if defined(__ia64__)
 
463	if (caching_flags & TTM_PL_FLAG_WC)
464		tmp = pgprot_writecombine(tmp);
465	else
466		tmp = pgprot_noncached(tmp);
467#endif
468#if defined(__sparc__)
469	if (!(caching_flags & TTM_PL_FLAG_CACHED))
470		tmp = pgprot_noncached(tmp);
471#endif
472	return tmp;
473}
474EXPORT_SYMBOL(ttm_io_prot);
475
476static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
477			  unsigned long offset,
478			  unsigned long size,
479			  struct ttm_bo_kmap_obj *map)
480{
481	struct ttm_mem_reg *mem = &bo->mem;
482
483	if (bo->mem.bus.addr) {
484		map->bo_kmap_type = ttm_bo_map_premapped;
485		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
486	} else {
487		map->bo_kmap_type = ttm_bo_map_iomap;
488		if (mem->placement & TTM_PL_FLAG_WC)
489			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
490						  size);
491		else
492			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
493						       size);
494	}
495	return (!map->virtual) ? -ENOMEM : 0;
496}
497
498static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
499			   unsigned long start_page,
500			   unsigned long num_pages,
501			   struct ttm_bo_kmap_obj *map)
502{
503	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
504	struct ttm_tt *ttm = bo->ttm;
505	struct page *d;
506	int i;
507
508	BUG_ON(!ttm);
 
 
 
 
 
 
 
509	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
510		/*
511		 * We're mapping a single page, and the desired
512		 * page protection is consistent with the bo.
513		 */
514
515		map->bo_kmap_type = ttm_bo_map_kmap;
516		map->page = ttm_tt_get_page(ttm, start_page);
517		map->virtual = kmap(map->page);
518	} else {
519	    /*
520	     * Populate the part we're mapping;
521	     */
522		for (i = start_page; i < start_page + num_pages; ++i) {
523			d = ttm_tt_get_page(ttm, i);
524			if (!d)
525				return -ENOMEM;
526		}
527
528		/*
529		 * We need to use vmap to get the desired page protection
530		 * or to make the buffer object look contiguous.
531		 */
532		prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
533			PAGE_KERNEL :
534			ttm_io_prot(mem->placement, PAGE_KERNEL);
535		map->bo_kmap_type = ttm_bo_map_vmap;
536		map->virtual = vmap(ttm->pages + start_page, num_pages,
537				    0, prot);
538	}
539	return (!map->virtual) ? -ENOMEM : 0;
540}
541
542int ttm_bo_kmap(struct ttm_buffer_object *bo,
543		unsigned long start_page, unsigned long num_pages,
544		struct ttm_bo_kmap_obj *map)
545{
546	struct ttm_mem_type_manager *man =
547		&bo->bdev->man[bo->mem.mem_type];
548	unsigned long offset, size;
549	int ret;
550
551	BUG_ON(!list_empty(&bo->swap));
552	map->virtual = NULL;
553	map->bo = bo;
554	if (num_pages > bo->num_pages)
555		return -EINVAL;
556	if (start_page > bo->num_pages)
557		return -EINVAL;
558#if 0
559	if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
560		return -EPERM;
561#endif
562	(void) ttm_mem_io_lock(man, false);
563	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564	ttm_mem_io_unlock(man);
565	if (ret)
566		return ret;
567	if (!bo->mem.bus.is_iomem) {
568		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
569	} else {
570		offset = start_page << PAGE_SHIFT;
571		size = num_pages << PAGE_SHIFT;
572		return ttm_bo_ioremap(bo, offset, size, map);
573	}
574}
575EXPORT_SYMBOL(ttm_bo_kmap);
576
577void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
578{
579	struct ttm_buffer_object *bo = map->bo;
580	struct ttm_mem_type_manager *man =
581		&bo->bdev->man[bo->mem.mem_type];
582
583	if (!map->virtual)
584		return;
585	switch (map->bo_kmap_type) {
586	case ttm_bo_map_iomap:
587		iounmap(map->virtual);
588		break;
589	case ttm_bo_map_vmap:
590		vunmap(map->virtual);
591		break;
592	case ttm_bo_map_kmap:
593		kunmap(map->page);
594		break;
595	case ttm_bo_map_premapped:
596		break;
597	default:
598		BUG();
599	}
600	(void) ttm_mem_io_lock(man, false);
601	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
602	ttm_mem_io_unlock(man);
603	map->virtual = NULL;
604	map->page = NULL;
605}
606EXPORT_SYMBOL(ttm_bo_kunmap);
607
608int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
609			      void *sync_obj,
610			      void *sync_obj_arg,
611			      bool evict, bool no_wait_reserve,
612			      bool no_wait_gpu,
613			      struct ttm_mem_reg *new_mem)
614{
615	struct ttm_bo_device *bdev = bo->bdev;
616	struct ttm_bo_driver *driver = bdev->driver;
617	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
618	struct ttm_mem_reg *old_mem = &bo->mem;
619	int ret;
620	struct ttm_buffer_object *ghost_obj;
621	void *tmp_obj = NULL;
622
623	spin_lock(&bdev->fence_lock);
624	if (bo->sync_obj) {
625		tmp_obj = bo->sync_obj;
626		bo->sync_obj = NULL;
627	}
628	bo->sync_obj = driver->sync_obj_ref(sync_obj);
629	bo->sync_obj_arg = sync_obj_arg;
630	if (evict) {
631		ret = ttm_bo_wait(bo, false, false, false);
632		spin_unlock(&bdev->fence_lock);
633		if (tmp_obj)
634			driver->sync_obj_unref(&tmp_obj);
635		if (ret)
636			return ret;
637
638		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
639		    (bo->ttm != NULL)) {
640			ttm_tt_unbind(bo->ttm);
641			ttm_tt_destroy(bo->ttm);
642			bo->ttm = NULL;
643		}
644		ttm_bo_free_old_node(bo);
645	} else {
646		/**
647		 * This should help pipeline ordinary buffer moves.
648		 *
649		 * Hang old buffer memory on a new buffer object,
650		 * and leave it to be released when the GPU
651		 * operation has completed.
652		 */
653
654		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
655		spin_unlock(&bdev->fence_lock);
656		if (tmp_obj)
657			driver->sync_obj_unref(&tmp_obj);
658
659		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
660		if (ret)
661			return ret;
662
 
 
663		/**
664		 * If we're not moving to fixed memory, the TTM object
665		 * needs to stay alive. Otherwhise hang it on the ghost
666		 * bo to be unbound and destroyed.
667		 */
668
669		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
670			ghost_obj->ttm = NULL;
671		else
672			bo->ttm = NULL;
673
674		ttm_bo_unreserve(ghost_obj);
675		ttm_bo_unref(&ghost_obj);
676	}
677
678	*old_mem = *new_mem;
679	new_mem->mm_node = NULL;
680
681	return 0;
682}
683EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
v4.10.11
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include <drm/ttm/ttm_bo_driver.h>
 32#include <drm/ttm/ttm_placement.h>
 33#include <drm/drm_vma_manager.h>
 34#include <linux/io.h>
 35#include <linux/highmem.h>
 36#include <linux/wait.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/module.h>
 40#include <linux/reservation.h>
 41
 42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 43{
 44	ttm_bo_mem_put(bo, &bo->mem);
 45}
 46
 47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 48		    bool interruptible, bool no_wait_gpu,
 49		    struct ttm_mem_reg *new_mem)
 50{
 51	struct ttm_tt *ttm = bo->ttm;
 52	struct ttm_mem_reg *old_mem = &bo->mem;
 53	int ret;
 54
 55	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 56		ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
 57
 58		if (unlikely(ret != 0)) {
 59			if (ret != -ERESTARTSYS)
 60				pr_err("Failed to expire sync object before unbinding TTM\n");
 61			return ret;
 62		}
 63
 64		ttm_tt_unbind(ttm);
 65		ttm_bo_free_old_node(bo);
 66		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 67				TTM_PL_MASK_MEM);
 68		old_mem->mem_type = TTM_PL_SYSTEM;
 69	}
 70
 71	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 72	if (unlikely(ret != 0))
 73		return ret;
 74
 75	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 76		ret = ttm_tt_bind(ttm, new_mem);
 77		if (unlikely(ret != 0))
 78			return ret;
 79	}
 80
 81	*old_mem = *new_mem;
 82	new_mem->mm_node = NULL;
 83
 84	return 0;
 85}
 86EXPORT_SYMBOL(ttm_bo_move_ttm);
 87
 88int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 89{
 90	if (likely(man->io_reserve_fastpath))
 91		return 0;
 92
 93	if (interruptible)
 94		return mutex_lock_interruptible(&man->io_reserve_mutex);
 95
 96	mutex_lock(&man->io_reserve_mutex);
 97	return 0;
 98}
 99EXPORT_SYMBOL(ttm_mem_io_lock);
100
101void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
102{
103	if (likely(man->io_reserve_fastpath))
104		return;
105
106	mutex_unlock(&man->io_reserve_mutex);
107}
108EXPORT_SYMBOL(ttm_mem_io_unlock);
109
110static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111{
112	struct ttm_buffer_object *bo;
113
114	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
115		return -EAGAIN;
116
117	bo = list_first_entry(&man->io_reserve_lru,
118			      struct ttm_buffer_object,
119			      io_reserve_lru);
120	list_del_init(&bo->io_reserve_lru);
121	ttm_bo_unmap_virtual_locked(bo);
122
123	return 0;
124}
125
126
127int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
128		       struct ttm_mem_reg *mem)
129{
130	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
131	int ret = 0;
132
133	if (!bdev->driver->io_mem_reserve)
134		return 0;
135	if (likely(man->io_reserve_fastpath))
136		return bdev->driver->io_mem_reserve(bdev, mem);
137
138	if (bdev->driver->io_mem_reserve &&
139	    mem->bus.io_reserved_count++ == 0) {
140retry:
141		ret = bdev->driver->io_mem_reserve(bdev, mem);
142		if (ret == -EAGAIN) {
143			ret = ttm_mem_io_evict(man);
144			if (ret == 0)
145				goto retry;
146		}
147	}
148	return ret;
149}
150EXPORT_SYMBOL(ttm_mem_io_reserve);
151
152void ttm_mem_io_free(struct ttm_bo_device *bdev,
153		     struct ttm_mem_reg *mem)
154{
155	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
156
157	if (likely(man->io_reserve_fastpath))
158		return;
159
160	if (bdev->driver->io_mem_reserve &&
161	    --mem->bus.io_reserved_count == 0 &&
162	    bdev->driver->io_mem_free)
163		bdev->driver->io_mem_free(bdev, mem);
164
165}
166EXPORT_SYMBOL(ttm_mem_io_free);
167
168int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
169{
170	struct ttm_mem_reg *mem = &bo->mem;
171	int ret;
172
173	if (!mem->bus.io_reserved_vm) {
174		struct ttm_mem_type_manager *man =
175			&bo->bdev->man[mem->mem_type];
176
177		ret = ttm_mem_io_reserve(bo->bdev, mem);
178		if (unlikely(ret != 0))
179			return ret;
180		mem->bus.io_reserved_vm = true;
181		if (man->use_io_reserve_lru)
182			list_add_tail(&bo->io_reserve_lru,
183				      &man->io_reserve_lru);
184	}
185	return 0;
186}
187
188void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
189{
190	struct ttm_mem_reg *mem = &bo->mem;
191
192	if (mem->bus.io_reserved_vm) {
193		mem->bus.io_reserved_vm = false;
194		list_del_init(&bo->io_reserve_lru);
195		ttm_mem_io_free(bo->bdev, mem);
196	}
197}
198
199static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
200			void **virtual)
201{
202	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
203	int ret;
204	void *addr;
205
206	*virtual = NULL;
207	(void) ttm_mem_io_lock(man, false);
208	ret = ttm_mem_io_reserve(bdev, mem);
209	ttm_mem_io_unlock(man);
210	if (ret || !mem->bus.is_iomem)
211		return ret;
212
213	if (mem->bus.addr) {
214		addr = mem->bus.addr;
215	} else {
216		if (mem->placement & TTM_PL_FLAG_WC)
217			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
218		else
219			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
220		if (!addr) {
221			(void) ttm_mem_io_lock(man, false);
222			ttm_mem_io_free(bdev, mem);
223			ttm_mem_io_unlock(man);
224			return -ENOMEM;
225		}
226	}
227	*virtual = addr;
228	return 0;
229}
230
231static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
232			 void *virtual)
233{
234	struct ttm_mem_type_manager *man;
235
236	man = &bdev->man[mem->mem_type];
237
238	if (virtual && mem->bus.addr == NULL)
239		iounmap(virtual);
240	(void) ttm_mem_io_lock(man, false);
241	ttm_mem_io_free(bdev, mem);
242	ttm_mem_io_unlock(man);
243}
244
245static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
246{
247	uint32_t *dstP =
248	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
249	uint32_t *srcP =
250	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
251
252	int i;
253	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254		iowrite32(ioread32(srcP++), dstP++);
255	return 0;
256}
257
258static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
259				unsigned long page,
260				pgprot_t prot)
261{
262	struct page *d = ttm->pages[page];
263	void *dst;
264
265	if (!d)
266		return -ENOMEM;
267
268	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
269
270#ifdef CONFIG_X86
271	dst = kmap_atomic_prot(d, prot);
272#else
273	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
274		dst = vmap(&d, 1, 0, prot);
275	else
276		dst = kmap(d);
277#endif
278	if (!dst)
279		return -ENOMEM;
280
281	memcpy_fromio(dst, src, PAGE_SIZE);
282
283#ifdef CONFIG_X86
284	kunmap_atomic(dst);
285#else
286	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
287		vunmap(dst);
288	else
289		kunmap(d);
290#endif
291
292	return 0;
293}
294
295static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
296				unsigned long page,
297				pgprot_t prot)
298{
299	struct page *s = ttm->pages[page];
300	void *src;
301
302	if (!s)
303		return -ENOMEM;
304
305	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
306#ifdef CONFIG_X86
307	src = kmap_atomic_prot(s, prot);
308#else
309	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
310		src = vmap(&s, 1, 0, prot);
311	else
312		src = kmap(s);
313#endif
314	if (!src)
315		return -ENOMEM;
316
317	memcpy_toio(dst, src, PAGE_SIZE);
318
319#ifdef CONFIG_X86
320	kunmap_atomic(src);
321#else
322	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
323		vunmap(src);
324	else
325		kunmap(s);
326#endif
327
328	return 0;
329}
330
331int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
332		       bool interruptible, bool no_wait_gpu,
333		       struct ttm_mem_reg *new_mem)
334{
335	struct ttm_bo_device *bdev = bo->bdev;
336	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
337	struct ttm_tt *ttm = bo->ttm;
338	struct ttm_mem_reg *old_mem = &bo->mem;
339	struct ttm_mem_reg old_copy = *old_mem;
340	void *old_iomap;
341	void *new_iomap;
342	int ret;
343	unsigned long i;
344	unsigned long page;
345	unsigned long add = 0;
346	int dir;
347
348	ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
349	if (ret)
350		return ret;
351
352	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
353	if (ret)
354		return ret;
355	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
356	if (ret)
357		goto out;
358
359	/*
360	 * Single TTM move. NOP.
361	 */
362	if (old_iomap == NULL && new_iomap == NULL)
363		goto out2;
364
365	/*
366	 * Don't move nonexistent data. Clear destination instead.
367	 */
368	if (old_iomap == NULL &&
369	    (ttm == NULL || (ttm->state == tt_unpopulated &&
370			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
371		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
372		goto out2;
373	}
374
375	/*
376	 * TTM might be null for moves within the same region.
377	 */
378	if (ttm && ttm->state == tt_unpopulated) {
379		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
380		if (ret)
381			goto out1;
382	}
383
384	add = 0;
385	dir = 1;
386
387	if ((old_mem->mem_type == new_mem->mem_type) &&
388	    (new_mem->start < old_mem->start + old_mem->size)) {
389		dir = -1;
390		add = new_mem->num_pages - 1;
391	}
392
393	for (i = 0; i < new_mem->num_pages; ++i) {
394		page = i * dir + add;
395		if (old_iomap == NULL) {
396			pgprot_t prot = ttm_io_prot(old_mem->placement,
397						    PAGE_KERNEL);
398			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
399						   prot);
400		} else if (new_iomap == NULL) {
401			pgprot_t prot = ttm_io_prot(new_mem->placement,
402						    PAGE_KERNEL);
403			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
404						   prot);
405		} else
406			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
407		if (ret)
408			goto out1;
409	}
410	mb();
411out2:
412	old_copy = *old_mem;
413	*old_mem = *new_mem;
414	new_mem->mm_node = NULL;
415
416	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 
417		ttm_tt_destroy(ttm);
418		bo->ttm = NULL;
419	}
420
421out1:
422	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
423out:
424	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
425
426	/*
427	 * On error, keep the mm node!
428	 */
429	if (!ret)
430		ttm_bo_mem_put(bo, &old_copy);
431	return ret;
432}
433EXPORT_SYMBOL(ttm_bo_move_memcpy);
434
435static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
436{
437	kfree(bo);
438}
439
440/**
441 * ttm_buffer_object_transfer
442 *
443 * @bo: A pointer to a struct ttm_buffer_object.
444 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
445 * holding the data of @bo with the old placement.
446 *
447 * This is a utility function that may be called after an accelerated move
448 * has been scheduled. A new buffer object is created as a placeholder for
449 * the old data while it's being copied. When that buffer object is idle,
450 * it can be destroyed, releasing the space of the old placement.
451 * Returns:
452 * !0: Failure.
453 */
454
455static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
456				      struct ttm_buffer_object **new_obj)
457{
458	struct ttm_buffer_object *fbo;
459	int ret;
 
460
461	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
462	if (!fbo)
463		return -ENOMEM;
464
465	*fbo = *bo;
466
467	/**
468	 * Fix up members that we shouldn't copy directly:
469	 * TODO: Explicit member copy would probably be better here.
470	 */
471
 
472	INIT_LIST_HEAD(&fbo->ddestroy);
473	INIT_LIST_HEAD(&fbo->lru);
474	INIT_LIST_HEAD(&fbo->swap);
475	INIT_LIST_HEAD(&fbo->io_reserve_lru);
476	fbo->moving = NULL;
477	drm_vma_node_reset(&fbo->vma_node);
478	atomic_set(&fbo->cpu_writers, 0);
479
 
480	kref_init(&fbo->list_kref);
481	kref_init(&fbo->kref);
482	fbo->destroy = &ttm_transfered_destroy;
483	fbo->acc_size = 0;
484	fbo->resv = &fbo->ttm_resv;
485	reservation_object_init(fbo->resv);
486	ret = ww_mutex_trylock(&fbo->resv->lock);
487	WARN_ON(!ret);
488
489	*new_obj = fbo;
490	return 0;
491}
492
493pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
494{
495	/* Cached mappings need no adjustment */
496	if (caching_flags & TTM_PL_FLAG_CACHED)
497		return tmp;
498
499#if defined(__i386__) || defined(__x86_64__)
500	if (caching_flags & TTM_PL_FLAG_WC)
501		tmp = pgprot_writecombine(tmp);
502	else if (boot_cpu_data.x86 > 3)
503		tmp = pgprot_noncached(tmp);
 
 
 
 
 
 
 
504#endif
505#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
506    defined(__powerpc__)
507	if (caching_flags & TTM_PL_FLAG_WC)
508		tmp = pgprot_writecombine(tmp);
509	else
510		tmp = pgprot_noncached(tmp);
511#endif
512#if defined(__sparc__) || defined(__mips__)
513	tmp = pgprot_noncached(tmp);
 
514#endif
515	return tmp;
516}
517EXPORT_SYMBOL(ttm_io_prot);
518
519static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
520			  unsigned long offset,
521			  unsigned long size,
522			  struct ttm_bo_kmap_obj *map)
523{
524	struct ttm_mem_reg *mem = &bo->mem;
525
526	if (bo->mem.bus.addr) {
527		map->bo_kmap_type = ttm_bo_map_premapped;
528		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
529	} else {
530		map->bo_kmap_type = ttm_bo_map_iomap;
531		if (mem->placement & TTM_PL_FLAG_WC)
532			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
533						  size);
534		else
535			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
536						       size);
537	}
538	return (!map->virtual) ? -ENOMEM : 0;
539}
540
541static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
542			   unsigned long start_page,
543			   unsigned long num_pages,
544			   struct ttm_bo_kmap_obj *map)
545{
546	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
547	struct ttm_tt *ttm = bo->ttm;
548	int ret;
 
549
550	BUG_ON(!ttm);
551
552	if (ttm->state == tt_unpopulated) {
553		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
554		if (ret)
555			return ret;
556	}
557
558	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
559		/*
560		 * We're mapping a single page, and the desired
561		 * page protection is consistent with the bo.
562		 */
563
564		map->bo_kmap_type = ttm_bo_map_kmap;
565		map->page = ttm->pages[start_page];
566		map->virtual = kmap(map->page);
567	} else {
 
 
 
 
 
 
 
 
 
568		/*
569		 * We need to use vmap to get the desired page protection
570		 * or to make the buffer object look contiguous.
571		 */
572		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
 
 
573		map->bo_kmap_type = ttm_bo_map_vmap;
574		map->virtual = vmap(ttm->pages + start_page, num_pages,
575				    0, prot);
576	}
577	return (!map->virtual) ? -ENOMEM : 0;
578}
579
580int ttm_bo_kmap(struct ttm_buffer_object *bo,
581		unsigned long start_page, unsigned long num_pages,
582		struct ttm_bo_kmap_obj *map)
583{
584	struct ttm_mem_type_manager *man =
585		&bo->bdev->man[bo->mem.mem_type];
586	unsigned long offset, size;
587	int ret;
588
589	BUG_ON(!list_empty(&bo->swap));
590	map->virtual = NULL;
591	map->bo = bo;
592	if (num_pages > bo->num_pages)
593		return -EINVAL;
594	if (start_page > bo->num_pages)
595		return -EINVAL;
596#if 0
597	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
598		return -EPERM;
599#endif
600	(void) ttm_mem_io_lock(man, false);
601	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
602	ttm_mem_io_unlock(man);
603	if (ret)
604		return ret;
605	if (!bo->mem.bus.is_iomem) {
606		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
607	} else {
608		offset = start_page << PAGE_SHIFT;
609		size = num_pages << PAGE_SHIFT;
610		return ttm_bo_ioremap(bo, offset, size, map);
611	}
612}
613EXPORT_SYMBOL(ttm_bo_kmap);
614
615void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
616{
617	struct ttm_buffer_object *bo = map->bo;
618	struct ttm_mem_type_manager *man =
619		&bo->bdev->man[bo->mem.mem_type];
620
621	if (!map->virtual)
622		return;
623	switch (map->bo_kmap_type) {
624	case ttm_bo_map_iomap:
625		iounmap(map->virtual);
626		break;
627	case ttm_bo_map_vmap:
628		vunmap(map->virtual);
629		break;
630	case ttm_bo_map_kmap:
631		kunmap(map->page);
632		break;
633	case ttm_bo_map_premapped:
634		break;
635	default:
636		BUG();
637	}
638	(void) ttm_mem_io_lock(man, false);
639	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
640	ttm_mem_io_unlock(man);
641	map->virtual = NULL;
642	map->page = NULL;
643}
644EXPORT_SYMBOL(ttm_bo_kunmap);
645
646int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
647			      struct dma_fence *fence,
648			      bool evict,
 
 
649			      struct ttm_mem_reg *new_mem)
650{
651	struct ttm_bo_device *bdev = bo->bdev;
 
652	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
653	struct ttm_mem_reg *old_mem = &bo->mem;
654	int ret;
655	struct ttm_buffer_object *ghost_obj;
 
656
657	reservation_object_add_excl_fence(bo->resv, fence);
 
 
 
 
 
 
658	if (evict) {
659		ret = ttm_bo_wait(bo, false, false);
 
 
 
660		if (ret)
661			return ret;
662
663		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 
 
664			ttm_tt_destroy(bo->ttm);
665			bo->ttm = NULL;
666		}
667		ttm_bo_free_old_node(bo);
668	} else {
669		/**
670		 * This should help pipeline ordinary buffer moves.
671		 *
672		 * Hang old buffer memory on a new buffer object,
673		 * and leave it to be released when the GPU
674		 * operation has completed.
675		 */
676
677		dma_fence_put(bo->moving);
678		bo->moving = dma_fence_get(fence);
 
 
679
680		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
681		if (ret)
682			return ret;
683
684		reservation_object_add_excl_fence(ghost_obj->resv, fence);
685
686		/**
687		 * If we're not moving to fixed memory, the TTM object
688		 * needs to stay alive. Otherwhise hang it on the ghost
689		 * bo to be unbound and destroyed.
690		 */
691
692		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
693			ghost_obj->ttm = NULL;
694		else
695			bo->ttm = NULL;
696
697		ttm_bo_unreserve(ghost_obj);
698		ttm_bo_unref(&ghost_obj);
699	}
700
701	*old_mem = *new_mem;
702	new_mem->mm_node = NULL;
703
704	return 0;
705}
706EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
707
708int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
709			 struct dma_fence *fence, bool evict,
710			 struct ttm_mem_reg *new_mem)
711{
712	struct ttm_bo_device *bdev = bo->bdev;
713	struct ttm_mem_reg *old_mem = &bo->mem;
714
715	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
716	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
717
718	int ret;
719
720	reservation_object_add_excl_fence(bo->resv, fence);
721
722	if (!evict) {
723		struct ttm_buffer_object *ghost_obj;
724
725		/**
726		 * This should help pipeline ordinary buffer moves.
727		 *
728		 * Hang old buffer memory on a new buffer object,
729		 * and leave it to be released when the GPU
730		 * operation has completed.
731		 */
732
733		dma_fence_put(bo->moving);
734		bo->moving = dma_fence_get(fence);
735
736		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
737		if (ret)
738			return ret;
739
740		reservation_object_add_excl_fence(ghost_obj->resv, fence);
741
742		/**
743		 * If we're not moving to fixed memory, the TTM object
744		 * needs to stay alive. Otherwhise hang it on the ghost
745		 * bo to be unbound and destroyed.
746		 */
747
748		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
749			ghost_obj->ttm = NULL;
750		else
751			bo->ttm = NULL;
752
753		ttm_bo_unreserve(ghost_obj);
754		ttm_bo_unref(&ghost_obj);
755
756	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
757
758		/**
759		 * BO doesn't have a TTM we need to bind/unbind. Just remember
760		 * this eviction and free up the allocation
761		 */
762
763		spin_lock(&from->move_lock);
764		if (!from->move || dma_fence_is_later(fence, from->move)) {
765			dma_fence_put(from->move);
766			from->move = dma_fence_get(fence);
767		}
768		spin_unlock(&from->move_lock);
769
770		ttm_bo_free_old_node(bo);
771
772		dma_fence_put(bo->moving);
773		bo->moving = dma_fence_get(fence);
774
775	} else {
776		/**
777		 * Last resort, wait for the move to be completed.
778		 *
779		 * Should never happen in pratice.
780		 */
781
782		ret = ttm_bo_wait(bo, false, false);
783		if (ret)
784			return ret;
785
786		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
787			ttm_tt_destroy(bo->ttm);
788			bo->ttm = NULL;
789		}
790		ttm_bo_free_old_node(bo);
791	}
792
793	*old_mem = *new_mem;
794	new_mem->mm_node = NULL;
795
796	return 0;
797}
798EXPORT_SYMBOL(ttm_bo_pipeline_move);