Linux Audio

Check our new training course

Loading...
v4.6
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include <drm/ttm/ttm_bo_driver.h>
 32#include <drm/ttm/ttm_placement.h>
 33#include <drm/drm_vma_manager.h>
 34#include <linux/io.h>
 35#include <linux/highmem.h>
 36#include <linux/wait.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/module.h>
 40#include <linux/reservation.h>
 41
 42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 43{
 44	ttm_bo_mem_put(bo, &bo->mem);
 45}
 46
 47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 48		    bool evict,
 49		    bool no_wait_gpu, struct ttm_mem_reg *new_mem)
 50{
 51	struct ttm_tt *ttm = bo->ttm;
 52	struct ttm_mem_reg *old_mem = &bo->mem;
 53	int ret;
 54
 55	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 
 
 
 
 
 
 
 
 56		ttm_tt_unbind(ttm);
 57		ttm_bo_free_old_node(bo);
 58		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 59				TTM_PL_MASK_MEM);
 60		old_mem->mem_type = TTM_PL_SYSTEM;
 61	}
 62
 63	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 64	if (unlikely(ret != 0))
 65		return ret;
 66
 67	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 68		ret = ttm_tt_bind(ttm, new_mem);
 69		if (unlikely(ret != 0))
 70			return ret;
 71	}
 72
 73	*old_mem = *new_mem;
 74	new_mem->mm_node = NULL;
 75
 76	return 0;
 77}
 78EXPORT_SYMBOL(ttm_bo_move_ttm);
 79
 80int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 81{
 82	if (likely(man->io_reserve_fastpath))
 83		return 0;
 84
 85	if (interruptible)
 86		return mutex_lock_interruptible(&man->io_reserve_mutex);
 87
 88	mutex_lock(&man->io_reserve_mutex);
 89	return 0;
 90}
 91EXPORT_SYMBOL(ttm_mem_io_lock);
 92
 93void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 94{
 95	if (likely(man->io_reserve_fastpath))
 96		return;
 97
 98	mutex_unlock(&man->io_reserve_mutex);
 99}
100EXPORT_SYMBOL(ttm_mem_io_unlock);
101
102static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
103{
104	struct ttm_buffer_object *bo;
105
106	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
107		return -EAGAIN;
108
109	bo = list_first_entry(&man->io_reserve_lru,
110			      struct ttm_buffer_object,
111			      io_reserve_lru);
112	list_del_init(&bo->io_reserve_lru);
113	ttm_bo_unmap_virtual_locked(bo);
114
115	return 0;
116}
117
118
119int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
120		       struct ttm_mem_reg *mem)
121{
122	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
123	int ret = 0;
124
125	if (!bdev->driver->io_mem_reserve)
126		return 0;
127	if (likely(man->io_reserve_fastpath))
128		return bdev->driver->io_mem_reserve(bdev, mem);
129
130	if (bdev->driver->io_mem_reserve &&
131	    mem->bus.io_reserved_count++ == 0) {
132retry:
133		ret = bdev->driver->io_mem_reserve(bdev, mem);
134		if (ret == -EAGAIN) {
135			ret = ttm_mem_io_evict(man);
136			if (ret == 0)
137				goto retry;
138		}
139	}
140	return ret;
141}
142EXPORT_SYMBOL(ttm_mem_io_reserve);
143
144void ttm_mem_io_free(struct ttm_bo_device *bdev,
145		     struct ttm_mem_reg *mem)
146{
147	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
148
149	if (likely(man->io_reserve_fastpath))
150		return;
151
152	if (bdev->driver->io_mem_reserve &&
153	    --mem->bus.io_reserved_count == 0 &&
154	    bdev->driver->io_mem_free)
155		bdev->driver->io_mem_free(bdev, mem);
156
157}
158EXPORT_SYMBOL(ttm_mem_io_free);
159
160int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
161{
162	struct ttm_mem_reg *mem = &bo->mem;
163	int ret;
164
165	if (!mem->bus.io_reserved_vm) {
166		struct ttm_mem_type_manager *man =
167			&bo->bdev->man[mem->mem_type];
168
169		ret = ttm_mem_io_reserve(bo->bdev, mem);
170		if (unlikely(ret != 0))
171			return ret;
172		mem->bus.io_reserved_vm = true;
173		if (man->use_io_reserve_lru)
174			list_add_tail(&bo->io_reserve_lru,
175				      &man->io_reserve_lru);
176	}
177	return 0;
178}
179
180void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
181{
182	struct ttm_mem_reg *mem = &bo->mem;
183
184	if (mem->bus.io_reserved_vm) {
185		mem->bus.io_reserved_vm = false;
186		list_del_init(&bo->io_reserve_lru);
187		ttm_mem_io_free(bo->bdev, mem);
188	}
189}
190
191static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
192			void **virtual)
193{
194	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
195	int ret;
196	void *addr;
197
198	*virtual = NULL;
199	(void) ttm_mem_io_lock(man, false);
200	ret = ttm_mem_io_reserve(bdev, mem);
201	ttm_mem_io_unlock(man);
202	if (ret || !mem->bus.is_iomem)
203		return ret;
204
205	if (mem->bus.addr) {
206		addr = mem->bus.addr;
207	} else {
208		if (mem->placement & TTM_PL_FLAG_WC)
209			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
210		else
211			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
212		if (!addr) {
213			(void) ttm_mem_io_lock(man, false);
214			ttm_mem_io_free(bdev, mem);
215			ttm_mem_io_unlock(man);
216			return -ENOMEM;
217		}
218	}
219	*virtual = addr;
220	return 0;
221}
222
223static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
224			 void *virtual)
225{
226	struct ttm_mem_type_manager *man;
227
228	man = &bdev->man[mem->mem_type];
229
230	if (virtual && mem->bus.addr == NULL)
231		iounmap(virtual);
232	(void) ttm_mem_io_lock(man, false);
233	ttm_mem_io_free(bdev, mem);
234	ttm_mem_io_unlock(man);
235}
236
237static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
238{
239	uint32_t *dstP =
240	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
241	uint32_t *srcP =
242	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
243
244	int i;
245	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
246		iowrite32(ioread32(srcP++), dstP++);
247	return 0;
248}
249
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
251				unsigned long page,
252				pgprot_t prot)
253{
254	struct page *d = ttm->pages[page];
255	void *dst;
256
257	if (!d)
258		return -ENOMEM;
259
260	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
261
262#ifdef CONFIG_X86
263	dst = kmap_atomic_prot(d, prot);
264#else
265	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
266		dst = vmap(&d, 1, 0, prot);
267	else
268		dst = kmap(d);
269#endif
270	if (!dst)
271		return -ENOMEM;
272
273	memcpy_fromio(dst, src, PAGE_SIZE);
274
275#ifdef CONFIG_X86
276	kunmap_atomic(dst);
277#else
278	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
279		vunmap(dst);
280	else
281		kunmap(d);
282#endif
283
284	return 0;
285}
286
287static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
288				unsigned long page,
289				pgprot_t prot)
290{
291	struct page *s = ttm->pages[page];
292	void *src;
293
294	if (!s)
295		return -ENOMEM;
296
297	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
298#ifdef CONFIG_X86
299	src = kmap_atomic_prot(s, prot);
300#else
301	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
302		src = vmap(&s, 1, 0, prot);
303	else
304		src = kmap(s);
305#endif
306	if (!src)
307		return -ENOMEM;
308
309	memcpy_toio(dst, src, PAGE_SIZE);
310
311#ifdef CONFIG_X86
312	kunmap_atomic(src);
313#else
314	if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
315		vunmap(src);
316	else
317		kunmap(s);
318#endif
319
320	return 0;
321}
322
323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
324		       bool evict, bool no_wait_gpu,
325		       struct ttm_mem_reg *new_mem)
326{
327	struct ttm_bo_device *bdev = bo->bdev;
328	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
329	struct ttm_tt *ttm = bo->ttm;
330	struct ttm_mem_reg *old_mem = &bo->mem;
331	struct ttm_mem_reg old_copy = *old_mem;
332	void *old_iomap;
333	void *new_iomap;
334	int ret;
335	unsigned long i;
336	unsigned long page;
337	unsigned long add = 0;
338	int dir;
339
 
 
 
 
340	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
341	if (ret)
342		return ret;
343	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
344	if (ret)
345		goto out;
346
347	/*
348	 * Single TTM move. NOP.
349	 */
350	if (old_iomap == NULL && new_iomap == NULL)
351		goto out2;
352
353	/*
354	 * Don't move nonexistent data. Clear destination instead.
355	 */
356	if (old_iomap == NULL &&
357	    (ttm == NULL || (ttm->state == tt_unpopulated &&
358			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
359		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
360		goto out2;
361	}
362
363	/*
364	 * TTM might be null for moves within the same region.
365	 */
366	if (ttm && ttm->state == tt_unpopulated) {
367		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
368		if (ret)
369			goto out1;
370	}
371
372	add = 0;
373	dir = 1;
374
375	if ((old_mem->mem_type == new_mem->mem_type) &&
376	    (new_mem->start < old_mem->start + old_mem->size)) {
377		dir = -1;
378		add = new_mem->num_pages - 1;
379	}
380
381	for (i = 0; i < new_mem->num_pages; ++i) {
382		page = i * dir + add;
383		if (old_iomap == NULL) {
384			pgprot_t prot = ttm_io_prot(old_mem->placement,
385						    PAGE_KERNEL);
386			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
387						   prot);
388		} else if (new_iomap == NULL) {
389			pgprot_t prot = ttm_io_prot(new_mem->placement,
390						    PAGE_KERNEL);
391			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
392						   prot);
393		} else
394			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
 
395		if (ret)
396			goto out1;
397	}
398	mb();
399out2:
400	old_copy = *old_mem;
401	*old_mem = *new_mem;
402	new_mem->mm_node = NULL;
403
404	if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
405		ttm_tt_unbind(ttm);
406		ttm_tt_destroy(ttm);
407		bo->ttm = NULL;
408	}
409
410out1:
411	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
412out:
413	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
414
415	/*
416	 * On error, keep the mm node!
417	 */
418	if (!ret)
419		ttm_bo_mem_put(bo, &old_copy);
420	return ret;
421}
422EXPORT_SYMBOL(ttm_bo_move_memcpy);
423
424static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
425{
426	kfree(bo);
427}
428
429/**
430 * ttm_buffer_object_transfer
431 *
432 * @bo: A pointer to a struct ttm_buffer_object.
433 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
434 * holding the data of @bo with the old placement.
435 *
436 * This is a utility function that may be called after an accelerated move
437 * has been scheduled. A new buffer object is created as a placeholder for
438 * the old data while it's being copied. When that buffer object is idle,
439 * it can be destroyed, releasing the space of the old placement.
440 * Returns:
441 * !0: Failure.
442 */
443
444static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
445				      struct ttm_buffer_object **new_obj)
446{
447	struct ttm_buffer_object *fbo;
448	int ret;
449
450	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
451	if (!fbo)
452		return -ENOMEM;
453
454	*fbo = *bo;
455
456	/**
457	 * Fix up members that we shouldn't copy directly:
458	 * TODO: Explicit member copy would probably be better here.
459	 */
460
 
461	INIT_LIST_HEAD(&fbo->ddestroy);
462	INIT_LIST_HEAD(&fbo->lru);
463	INIT_LIST_HEAD(&fbo->swap);
464	INIT_LIST_HEAD(&fbo->io_reserve_lru);
 
 
465	drm_vma_node_reset(&fbo->vma_node);
466	atomic_set(&fbo->cpu_writers, 0);
467
468	kref_init(&fbo->list_kref);
469	kref_init(&fbo->kref);
470	fbo->destroy = &ttm_transfered_destroy;
471	fbo->acc_size = 0;
472	fbo->resv = &fbo->ttm_resv;
473	reservation_object_init(fbo->resv);
474	ret = ww_mutex_trylock(&fbo->resv->lock);
475	WARN_ON(!ret);
476
477	*new_obj = fbo;
478	return 0;
479}
480
481pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
482{
483	/* Cached mappings need no adjustment */
484	if (caching_flags & TTM_PL_FLAG_CACHED)
485		return tmp;
486
487#if defined(__i386__) || defined(__x86_64__)
488	if (caching_flags & TTM_PL_FLAG_WC)
489		tmp = pgprot_writecombine(tmp);
490	else if (boot_cpu_data.x86 > 3)
491		tmp = pgprot_noncached(tmp);
492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494    defined(__powerpc__)
495	if (caching_flags & TTM_PL_FLAG_WC)
496		tmp = pgprot_writecombine(tmp);
497	else
498		tmp = pgprot_noncached(tmp);
499#endif
500#if defined(__sparc__) || defined(__mips__)
501	tmp = pgprot_noncached(tmp);
502#endif
503	return tmp;
504}
505EXPORT_SYMBOL(ttm_io_prot);
506
507static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
508			  unsigned long offset,
509			  unsigned long size,
510			  struct ttm_bo_kmap_obj *map)
511{
512	struct ttm_mem_reg *mem = &bo->mem;
513
514	if (bo->mem.bus.addr) {
515		map->bo_kmap_type = ttm_bo_map_premapped;
516		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
517	} else {
518		map->bo_kmap_type = ttm_bo_map_iomap;
519		if (mem->placement & TTM_PL_FLAG_WC)
520			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
521						  size);
522		else
523			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
524						       size);
525	}
526	return (!map->virtual) ? -ENOMEM : 0;
527}
528
529static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
530			   unsigned long start_page,
531			   unsigned long num_pages,
532			   struct ttm_bo_kmap_obj *map)
533{
534	struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
 
 
 
 
535	struct ttm_tt *ttm = bo->ttm;
 
536	int ret;
537
538	BUG_ON(!ttm);
539
540	if (ttm->state == tt_unpopulated) {
541		ret = ttm->bdev->driver->ttm_tt_populate(ttm);
542		if (ret)
543			return ret;
544	}
545
546	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
547		/*
548		 * We're mapping a single page, and the desired
549		 * page protection is consistent with the bo.
550		 */
551
552		map->bo_kmap_type = ttm_bo_map_kmap;
553		map->page = ttm->pages[start_page];
554		map->virtual = kmap(map->page);
555	} else {
556		/*
557		 * We need to use vmap to get the desired page protection
558		 * or to make the buffer object look contiguous.
559		 */
560		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
561		map->bo_kmap_type = ttm_bo_map_vmap;
562		map->virtual = vmap(ttm->pages + start_page, num_pages,
563				    0, prot);
564	}
565	return (!map->virtual) ? -ENOMEM : 0;
566}
567
568int ttm_bo_kmap(struct ttm_buffer_object *bo,
569		unsigned long start_page, unsigned long num_pages,
570		struct ttm_bo_kmap_obj *map)
571{
572	struct ttm_mem_type_manager *man =
573		&bo->bdev->man[bo->mem.mem_type];
574	unsigned long offset, size;
575	int ret;
576
577	BUG_ON(!list_empty(&bo->swap));
578	map->virtual = NULL;
579	map->bo = bo;
580	if (num_pages > bo->num_pages)
581		return -EINVAL;
582	if (start_page > bo->num_pages)
583		return -EINVAL;
584#if 0
585	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
586		return -EPERM;
587#endif
588	(void) ttm_mem_io_lock(man, false);
589	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
590	ttm_mem_io_unlock(man);
591	if (ret)
592		return ret;
593	if (!bo->mem.bus.is_iomem) {
594		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
595	} else {
596		offset = start_page << PAGE_SHIFT;
597		size = num_pages << PAGE_SHIFT;
598		return ttm_bo_ioremap(bo, offset, size, map);
599	}
600}
601EXPORT_SYMBOL(ttm_bo_kmap);
602
603void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
604{
605	struct ttm_buffer_object *bo = map->bo;
606	struct ttm_mem_type_manager *man =
607		&bo->bdev->man[bo->mem.mem_type];
608
609	if (!map->virtual)
610		return;
611	switch (map->bo_kmap_type) {
612	case ttm_bo_map_iomap:
613		iounmap(map->virtual);
614		break;
615	case ttm_bo_map_vmap:
616		vunmap(map->virtual);
617		break;
618	case ttm_bo_map_kmap:
619		kunmap(map->page);
620		break;
621	case ttm_bo_map_premapped:
622		break;
623	default:
624		BUG();
625	}
626	(void) ttm_mem_io_lock(man, false);
627	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
628	ttm_mem_io_unlock(man);
629	map->virtual = NULL;
630	map->page = NULL;
631}
632EXPORT_SYMBOL(ttm_bo_kunmap);
633
634int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635			      struct fence *fence,
636			      bool evict,
637			      bool no_wait_gpu,
638			      struct ttm_mem_reg *new_mem)
639{
640	struct ttm_bo_device *bdev = bo->bdev;
641	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
642	struct ttm_mem_reg *old_mem = &bo->mem;
643	int ret;
644	struct ttm_buffer_object *ghost_obj;
645
646	reservation_object_add_excl_fence(bo->resv, fence);
647	if (evict) {
648		ret = ttm_bo_wait(bo, false, false, false);
649		if (ret)
650			return ret;
651
652		if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
653		    (bo->ttm != NULL)) {
654			ttm_tt_unbind(bo->ttm);
655			ttm_tt_destroy(bo->ttm);
656			bo->ttm = NULL;
657		}
658		ttm_bo_free_old_node(bo);
659	} else {
660		/**
661		 * This should help pipeline ordinary buffer moves.
662		 *
663		 * Hang old buffer memory on a new buffer object,
664		 * and leave it to be released when the GPU
665		 * operation has completed.
666		 */
667
668		set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
 
669
670		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
671		if (ret)
672			return ret;
673
674		reservation_object_add_excl_fence(ghost_obj->resv, fence);
675
676		/**
677		 * If we're not moving to fixed memory, the TTM object
678		 * needs to stay alive. Otherwhise hang it on the ghost
679		 * bo to be unbound and destroyed.
680		 */
681
682		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
683			ghost_obj->ttm = NULL;
684		else
685			bo->ttm = NULL;
686
687		ttm_bo_unreserve(ghost_obj);
688		ttm_bo_unref(&ghost_obj);
689	}
690
691	*old_mem = *new_mem;
692	new_mem->mm_node = NULL;
693
694	return 0;
695}
696EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
v4.17
  1/**************************************************************************
  2 *
  3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27/*
 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 29 */
 30
 31#include <drm/ttm/ttm_bo_driver.h>
 32#include <drm/ttm/ttm_placement.h>
 33#include <drm/drm_vma_manager.h>
 34#include <linux/io.h>
 35#include <linux/highmem.h>
 36#include <linux/wait.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/module.h>
 40#include <linux/reservation.h>
 41
 42void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 43{
 44	ttm_bo_mem_put(bo, &bo->mem);
 45}
 46
 47int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 48		   struct ttm_operation_ctx *ctx,
 49		    struct ttm_mem_reg *new_mem)
 50{
 51	struct ttm_tt *ttm = bo->ttm;
 52	struct ttm_mem_reg *old_mem = &bo->mem;
 53	int ret;
 54
 55	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 56		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
 57
 58		if (unlikely(ret != 0)) {
 59			if (ret != -ERESTARTSYS)
 60				pr_err("Failed to expire sync object before unbinding TTM\n");
 61			return ret;
 62		}
 63
 64		ttm_tt_unbind(ttm);
 65		ttm_bo_free_old_node(bo);
 66		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 67				TTM_PL_MASK_MEM);
 68		old_mem->mem_type = TTM_PL_SYSTEM;
 69	}
 70
 71	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 72	if (unlikely(ret != 0))
 73		return ret;
 74
 75	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 76		ret = ttm_tt_bind(ttm, new_mem, ctx);
 77		if (unlikely(ret != 0))
 78			return ret;
 79	}
 80
 81	*old_mem = *new_mem;
 82	new_mem->mm_node = NULL;
 83
 84	return 0;
 85}
 86EXPORT_SYMBOL(ttm_bo_move_ttm);
 87
 88int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 89{
 90	if (likely(man->io_reserve_fastpath))
 91		return 0;
 92
 93	if (interruptible)
 94		return mutex_lock_interruptible(&man->io_reserve_mutex);
 95
 96	mutex_lock(&man->io_reserve_mutex);
 97	return 0;
 98}
 99EXPORT_SYMBOL(ttm_mem_io_lock);
100
101void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
102{
103	if (likely(man->io_reserve_fastpath))
104		return;
105
106	mutex_unlock(&man->io_reserve_mutex);
107}
108EXPORT_SYMBOL(ttm_mem_io_unlock);
109
110static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
111{
112	struct ttm_buffer_object *bo;
113
114	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
115		return -EAGAIN;
116
117	bo = list_first_entry(&man->io_reserve_lru,
118			      struct ttm_buffer_object,
119			      io_reserve_lru);
120	list_del_init(&bo->io_reserve_lru);
121	ttm_bo_unmap_virtual_locked(bo);
122
123	return 0;
124}
125
126
127int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
128		       struct ttm_mem_reg *mem)
129{
130	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
131	int ret = 0;
132
133	if (!bdev->driver->io_mem_reserve)
134		return 0;
135	if (likely(man->io_reserve_fastpath))
136		return bdev->driver->io_mem_reserve(bdev, mem);
137
138	if (bdev->driver->io_mem_reserve &&
139	    mem->bus.io_reserved_count++ == 0) {
140retry:
141		ret = bdev->driver->io_mem_reserve(bdev, mem);
142		if (ret == -EAGAIN) {
143			ret = ttm_mem_io_evict(man);
144			if (ret == 0)
145				goto retry;
146		}
147	}
148	return ret;
149}
150EXPORT_SYMBOL(ttm_mem_io_reserve);
151
152void ttm_mem_io_free(struct ttm_bo_device *bdev,
153		     struct ttm_mem_reg *mem)
154{
155	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
156
157	if (likely(man->io_reserve_fastpath))
158		return;
159
160	if (bdev->driver->io_mem_reserve &&
161	    --mem->bus.io_reserved_count == 0 &&
162	    bdev->driver->io_mem_free)
163		bdev->driver->io_mem_free(bdev, mem);
164
165}
166EXPORT_SYMBOL(ttm_mem_io_free);
167
168int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
169{
170	struct ttm_mem_reg *mem = &bo->mem;
171	int ret;
172
173	if (!mem->bus.io_reserved_vm) {
174		struct ttm_mem_type_manager *man =
175			&bo->bdev->man[mem->mem_type];
176
177		ret = ttm_mem_io_reserve(bo->bdev, mem);
178		if (unlikely(ret != 0))
179			return ret;
180		mem->bus.io_reserved_vm = true;
181		if (man->use_io_reserve_lru)
182			list_add_tail(&bo->io_reserve_lru,
183				      &man->io_reserve_lru);
184	}
185	return 0;
186}
187
188void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
189{
190	struct ttm_mem_reg *mem = &bo->mem;
191
192	if (mem->bus.io_reserved_vm) {
193		mem->bus.io_reserved_vm = false;
194		list_del_init(&bo->io_reserve_lru);
195		ttm_mem_io_free(bo->bdev, mem);
196	}
197}
198
199static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
200			void **virtual)
201{
202	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
203	int ret;
204	void *addr;
205
206	*virtual = NULL;
207	(void) ttm_mem_io_lock(man, false);
208	ret = ttm_mem_io_reserve(bdev, mem);
209	ttm_mem_io_unlock(man);
210	if (ret || !mem->bus.is_iomem)
211		return ret;
212
213	if (mem->bus.addr) {
214		addr = mem->bus.addr;
215	} else {
216		if (mem->placement & TTM_PL_FLAG_WC)
217			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
218		else
219			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
220		if (!addr) {
221			(void) ttm_mem_io_lock(man, false);
222			ttm_mem_io_free(bdev, mem);
223			ttm_mem_io_unlock(man);
224			return -ENOMEM;
225		}
226	}
227	*virtual = addr;
228	return 0;
229}
230
231static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
232			 void *virtual)
233{
234	struct ttm_mem_type_manager *man;
235
236	man = &bdev->man[mem->mem_type];
237
238	if (virtual && mem->bus.addr == NULL)
239		iounmap(virtual);
240	(void) ttm_mem_io_lock(man, false);
241	ttm_mem_io_free(bdev, mem);
242	ttm_mem_io_unlock(man);
243}
244
245static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
246{
247	uint32_t *dstP =
248	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
249	uint32_t *srcP =
250	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
251
252	int i;
253	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
254		iowrite32(ioread32(srcP++), dstP++);
255	return 0;
256}
257
258#ifdef CONFIG_X86
259#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
260#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
261#else
262#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
263#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
264#endif
265
266
267/**
268 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
269 * specified page protection.
270 *
271 * @page: The page to map.
272 * @prot: The page protection.
273 *
274 * This function maps a TTM page using the kmap_atomic api if available,
275 * otherwise falls back to vmap. The user must make sure that the
276 * specified page does not have an aliased mapping with a different caching
277 * policy unless the architecture explicitly allows it. Also mapping and
278 * unmapping using this api must be correctly nested. Unmapping should
279 * occur in the reverse order of mapping.
280 */
281void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
282{
283	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
284		return kmap_atomic(page);
285	else
286		return __ttm_kmap_atomic_prot(page, prot);
287}
288EXPORT_SYMBOL(ttm_kmap_atomic_prot);
289
290/**
291 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
292 * ttm_kmap_atomic_prot.
293 *
294 * @addr: The virtual address from the map.
295 * @prot: The page protection.
296 */
297void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
298{
299	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
300		kunmap_atomic(addr);
301	else
302		__ttm_kunmap_atomic(addr);
303}
304EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
305
306static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
307				unsigned long page,
308				pgprot_t prot)
309{
310	struct page *d = ttm->pages[page];
311	void *dst;
312
313	if (!d)
314		return -ENOMEM;
315
316	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
317	dst = ttm_kmap_atomic_prot(d, prot);
 
 
 
 
 
 
 
 
318	if (!dst)
319		return -ENOMEM;
320
321	memcpy_fromio(dst, src, PAGE_SIZE);
322
323	ttm_kunmap_atomic_prot(dst, prot);
 
 
 
 
 
 
 
324
325	return 0;
326}
327
328static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
329				unsigned long page,
330				pgprot_t prot)
331{
332	struct page *s = ttm->pages[page];
333	void *src;
334
335	if (!s)
336		return -ENOMEM;
337
338	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
339	src = ttm_kmap_atomic_prot(s, prot);
 
 
 
 
 
 
 
340	if (!src)
341		return -ENOMEM;
342
343	memcpy_toio(dst, src, PAGE_SIZE);
344
345	ttm_kunmap_atomic_prot(src, prot);
 
 
 
 
 
 
 
346
347	return 0;
348}
349
350int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
351		       struct ttm_operation_ctx *ctx,
352		       struct ttm_mem_reg *new_mem)
353{
354	struct ttm_bo_device *bdev = bo->bdev;
355	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
356	struct ttm_tt *ttm = bo->ttm;
357	struct ttm_mem_reg *old_mem = &bo->mem;
358	struct ttm_mem_reg old_copy = *old_mem;
359	void *old_iomap;
360	void *new_iomap;
361	int ret;
362	unsigned long i;
363	unsigned long page;
364	unsigned long add = 0;
365	int dir;
366
367	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
368	if (ret)
369		return ret;
370
371	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
372	if (ret)
373		return ret;
374	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
375	if (ret)
376		goto out;
377
378	/*
379	 * Single TTM move. NOP.
380	 */
381	if (old_iomap == NULL && new_iomap == NULL)
382		goto out2;
383
384	/*
385	 * Don't move nonexistent data. Clear destination instead.
386	 */
387	if (old_iomap == NULL &&
388	    (ttm == NULL || (ttm->state == tt_unpopulated &&
389			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
390		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
391		goto out2;
392	}
393
394	/*
395	 * TTM might be null for moves within the same region.
396	 */
397	if (ttm) {
398		ret = ttm_tt_populate(ttm, ctx);
399		if (ret)
400			goto out1;
401	}
402
403	add = 0;
404	dir = 1;
405
406	if ((old_mem->mem_type == new_mem->mem_type) &&
407	    (new_mem->start < old_mem->start + old_mem->size)) {
408		dir = -1;
409		add = new_mem->num_pages - 1;
410	}
411
412	for (i = 0; i < new_mem->num_pages; ++i) {
413		page = i * dir + add;
414		if (old_iomap == NULL) {
415			pgprot_t prot = ttm_io_prot(old_mem->placement,
416						    PAGE_KERNEL);
417			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
418						   prot);
419		} else if (new_iomap == NULL) {
420			pgprot_t prot = ttm_io_prot(new_mem->placement,
421						    PAGE_KERNEL);
422			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
423						   prot);
424		} else {
425			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
426		}
427		if (ret)
428			goto out1;
429	}
430	mb();
431out2:
432	old_copy = *old_mem;
433	*old_mem = *new_mem;
434	new_mem->mm_node = NULL;
435
436	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 
437		ttm_tt_destroy(ttm);
438		bo->ttm = NULL;
439	}
440
441out1:
442	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
443out:
444	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
445
446	/*
447	 * On error, keep the mm node!
448	 */
449	if (!ret)
450		ttm_bo_mem_put(bo, &old_copy);
451	return ret;
452}
453EXPORT_SYMBOL(ttm_bo_move_memcpy);
454
455static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
456{
457	kfree(bo);
458}
459
460/**
461 * ttm_buffer_object_transfer
462 *
463 * @bo: A pointer to a struct ttm_buffer_object.
464 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
465 * holding the data of @bo with the old placement.
466 *
467 * This is a utility function that may be called after an accelerated move
468 * has been scheduled. A new buffer object is created as a placeholder for
469 * the old data while it's being copied. When that buffer object is idle,
470 * it can be destroyed, releasing the space of the old placement.
471 * Returns:
472 * !0: Failure.
473 */
474
475static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
476				      struct ttm_buffer_object **new_obj)
477{
478	struct ttm_buffer_object *fbo;
479	int ret;
480
481	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
482	if (!fbo)
483		return -ENOMEM;
484
485	*fbo = *bo;
486
487	/**
488	 * Fix up members that we shouldn't copy directly:
489	 * TODO: Explicit member copy would probably be better here.
490	 */
491
492	atomic_inc(&bo->bdev->glob->bo_count);
493	INIT_LIST_HEAD(&fbo->ddestroy);
494	INIT_LIST_HEAD(&fbo->lru);
495	INIT_LIST_HEAD(&fbo->swap);
496	INIT_LIST_HEAD(&fbo->io_reserve_lru);
497	mutex_init(&fbo->wu_mutex);
498	fbo->moving = NULL;
499	drm_vma_node_reset(&fbo->vma_node);
500	atomic_set(&fbo->cpu_writers, 0);
501
502	kref_init(&fbo->list_kref);
503	kref_init(&fbo->kref);
504	fbo->destroy = &ttm_transfered_destroy;
505	fbo->acc_size = 0;
506	fbo->resv = &fbo->ttm_resv;
507	reservation_object_init(fbo->resv);
508	ret = reservation_object_trylock(fbo->resv);
509	WARN_ON(!ret);
510
511	*new_obj = fbo;
512	return 0;
513}
514
515pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
516{
517	/* Cached mappings need no adjustment */
518	if (caching_flags & TTM_PL_FLAG_CACHED)
519		return tmp;
520
521#if defined(__i386__) || defined(__x86_64__)
522	if (caching_flags & TTM_PL_FLAG_WC)
523		tmp = pgprot_writecombine(tmp);
524	else if (boot_cpu_data.x86 > 3)
525		tmp = pgprot_noncached(tmp);
526#endif
527#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
528    defined(__powerpc__)
529	if (caching_flags & TTM_PL_FLAG_WC)
530		tmp = pgprot_writecombine(tmp);
531	else
532		tmp = pgprot_noncached(tmp);
533#endif
534#if defined(__sparc__) || defined(__mips__)
535	tmp = pgprot_noncached(tmp);
536#endif
537	return tmp;
538}
539EXPORT_SYMBOL(ttm_io_prot);
540
541static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
542			  unsigned long offset,
543			  unsigned long size,
544			  struct ttm_bo_kmap_obj *map)
545{
546	struct ttm_mem_reg *mem = &bo->mem;
547
548	if (bo->mem.bus.addr) {
549		map->bo_kmap_type = ttm_bo_map_premapped;
550		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
551	} else {
552		map->bo_kmap_type = ttm_bo_map_iomap;
553		if (mem->placement & TTM_PL_FLAG_WC)
554			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
555						  size);
556		else
557			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
558						       size);
559	}
560	return (!map->virtual) ? -ENOMEM : 0;
561}
562
563static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
564			   unsigned long start_page,
565			   unsigned long num_pages,
566			   struct ttm_bo_kmap_obj *map)
567{
568	struct ttm_mem_reg *mem = &bo->mem;
569	struct ttm_operation_ctx ctx = {
570		.interruptible = false,
571		.no_wait_gpu = false
572	};
573	struct ttm_tt *ttm = bo->ttm;
574	pgprot_t prot;
575	int ret;
576
577	BUG_ON(!ttm);
578
579	ret = ttm_tt_populate(ttm, &ctx);
580	if (ret)
581		return ret;
 
 
582
583	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
584		/*
585		 * We're mapping a single page, and the desired
586		 * page protection is consistent with the bo.
587		 */
588
589		map->bo_kmap_type = ttm_bo_map_kmap;
590		map->page = ttm->pages[start_page];
591		map->virtual = kmap(map->page);
592	} else {
593		/*
594		 * We need to use vmap to get the desired page protection
595		 * or to make the buffer object look contiguous.
596		 */
597		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
598		map->bo_kmap_type = ttm_bo_map_vmap;
599		map->virtual = vmap(ttm->pages + start_page, num_pages,
600				    0, prot);
601	}
602	return (!map->virtual) ? -ENOMEM : 0;
603}
604
605int ttm_bo_kmap(struct ttm_buffer_object *bo,
606		unsigned long start_page, unsigned long num_pages,
607		struct ttm_bo_kmap_obj *map)
608{
609	struct ttm_mem_type_manager *man =
610		&bo->bdev->man[bo->mem.mem_type];
611	unsigned long offset, size;
612	int ret;
613
 
614	map->virtual = NULL;
615	map->bo = bo;
616	if (num_pages > bo->num_pages)
617		return -EINVAL;
618	if (start_page > bo->num_pages)
619		return -EINVAL;
620#if 0
621	if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
622		return -EPERM;
623#endif
624	(void) ttm_mem_io_lock(man, false);
625	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
626	ttm_mem_io_unlock(man);
627	if (ret)
628		return ret;
629	if (!bo->mem.bus.is_iomem) {
630		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
631	} else {
632		offset = start_page << PAGE_SHIFT;
633		size = num_pages << PAGE_SHIFT;
634		return ttm_bo_ioremap(bo, offset, size, map);
635	}
636}
637EXPORT_SYMBOL(ttm_bo_kmap);
638
639void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
640{
641	struct ttm_buffer_object *bo = map->bo;
642	struct ttm_mem_type_manager *man =
643		&bo->bdev->man[bo->mem.mem_type];
644
645	if (!map->virtual)
646		return;
647	switch (map->bo_kmap_type) {
648	case ttm_bo_map_iomap:
649		iounmap(map->virtual);
650		break;
651	case ttm_bo_map_vmap:
652		vunmap(map->virtual);
653		break;
654	case ttm_bo_map_kmap:
655		kunmap(map->page);
656		break;
657	case ttm_bo_map_premapped:
658		break;
659	default:
660		BUG();
661	}
662	(void) ttm_mem_io_lock(man, false);
663	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
664	ttm_mem_io_unlock(man);
665	map->virtual = NULL;
666	map->page = NULL;
667}
668EXPORT_SYMBOL(ttm_bo_kunmap);
669
670int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
671			      struct dma_fence *fence,
672			      bool evict,
 
673			      struct ttm_mem_reg *new_mem)
674{
675	struct ttm_bo_device *bdev = bo->bdev;
676	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
677	struct ttm_mem_reg *old_mem = &bo->mem;
678	int ret;
679	struct ttm_buffer_object *ghost_obj;
680
681	reservation_object_add_excl_fence(bo->resv, fence);
682	if (evict) {
683		ret = ttm_bo_wait(bo, false, false);
684		if (ret)
685			return ret;
686
687		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
 
 
688			ttm_tt_destroy(bo->ttm);
689			bo->ttm = NULL;
690		}
691		ttm_bo_free_old_node(bo);
692	} else {
693		/**
694		 * This should help pipeline ordinary buffer moves.
695		 *
696		 * Hang old buffer memory on a new buffer object,
697		 * and leave it to be released when the GPU
698		 * operation has completed.
699		 */
700
701		dma_fence_put(bo->moving);
702		bo->moving = dma_fence_get(fence);
703
704		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
705		if (ret)
706			return ret;
707
708		reservation_object_add_excl_fence(ghost_obj->resv, fence);
709
710		/**
711		 * If we're not moving to fixed memory, the TTM object
712		 * needs to stay alive. Otherwhise hang it on the ghost
713		 * bo to be unbound and destroyed.
714		 */
715
716		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
717			ghost_obj->ttm = NULL;
718		else
719			bo->ttm = NULL;
720
721		ttm_bo_unreserve(ghost_obj);
722		ttm_bo_unref(&ghost_obj);
723	}
724
725	*old_mem = *new_mem;
726	new_mem->mm_node = NULL;
727
728	return 0;
729}
730EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
731
732int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
733			 struct dma_fence *fence, bool evict,
734			 struct ttm_mem_reg *new_mem)
735{
736	struct ttm_bo_device *bdev = bo->bdev;
737	struct ttm_mem_reg *old_mem = &bo->mem;
738
739	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
740	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
741
742	int ret;
743
744	reservation_object_add_excl_fence(bo->resv, fence);
745
746	if (!evict) {
747		struct ttm_buffer_object *ghost_obj;
748
749		/**
750		 * This should help pipeline ordinary buffer moves.
751		 *
752		 * Hang old buffer memory on a new buffer object,
753		 * and leave it to be released when the GPU
754		 * operation has completed.
755		 */
756
757		dma_fence_put(bo->moving);
758		bo->moving = dma_fence_get(fence);
759
760		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
761		if (ret)
762			return ret;
763
764		reservation_object_add_excl_fence(ghost_obj->resv, fence);
765
766		/**
767		 * If we're not moving to fixed memory, the TTM object
768		 * needs to stay alive. Otherwhise hang it on the ghost
769		 * bo to be unbound and destroyed.
770		 */
771
772		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
773			ghost_obj->ttm = NULL;
774		else
775			bo->ttm = NULL;
776
777		ttm_bo_unreserve(ghost_obj);
778		ttm_bo_unref(&ghost_obj);
779
780	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
781
782		/**
783		 * BO doesn't have a TTM we need to bind/unbind. Just remember
784		 * this eviction and free up the allocation
785		 */
786
787		spin_lock(&from->move_lock);
788		if (!from->move || dma_fence_is_later(fence, from->move)) {
789			dma_fence_put(from->move);
790			from->move = dma_fence_get(fence);
791		}
792		spin_unlock(&from->move_lock);
793
794		ttm_bo_free_old_node(bo);
795
796		dma_fence_put(bo->moving);
797		bo->moving = dma_fence_get(fence);
798
799	} else {
800		/**
801		 * Last resort, wait for the move to be completed.
802		 *
803		 * Should never happen in pratice.
804		 */
805
806		ret = ttm_bo_wait(bo, false, false);
807		if (ret)
808			return ret;
809
810		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
811			ttm_tt_destroy(bo->ttm);
812			bo->ttm = NULL;
813		}
814		ttm_bo_free_old_node(bo);
815	}
816
817	*old_mem = *new_mem;
818	new_mem->mm_node = NULL;
819
820	return 0;
821}
822EXPORT_SYMBOL(ttm_bo_pipeline_move);
823
824int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
825{
826	struct ttm_buffer_object *ghost;
827	int ret;
828
829	ret = ttm_buffer_object_transfer(bo, &ghost);
830	if (ret)
831		return ret;
832
833	ret = reservation_object_copy_fences(ghost->resv, bo->resv);
834	/* Last resort, wait for the BO to be idle when we are OOM */
835	if (ret)
836		ttm_bo_wait(bo, false, false);
837
838	memset(&bo->mem, 0, sizeof(bo->mem));
839	bo->mem.mem_type = TTM_PL_SYSTEM;
840	bo->ttm = NULL;
841
842	ttm_bo_unreserve(ghost);
843	ttm_bo_unref(&ghost);
844
845	return 0;
846}