Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 */
 31
 32#include <drm/ttm/ttm_bo_driver.h>
 33#include <drm/ttm/ttm_placement.h>
 34#include <drm/drm_vma_manager.h>
 35#include <linux/io.h>
 36#include <linux/highmem.h>
 37#include <linux/wait.h>
 38#include <linux/slab.h>
 39#include <linux/vmalloc.h>
 40#include <linux/module.h>
 41#include <linux/dma-resv.h>
 
 
 
 
 42
 43struct ttm_transfer_obj {
 44	struct ttm_buffer_object base;
 45	struct ttm_buffer_object *bo;
 46};
 47
 48void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
 49{
 50	ttm_bo_mem_put(bo, &bo->mem);
 51}
 52
 53int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 54		   struct ttm_operation_ctx *ctx,
 55		    struct ttm_mem_reg *new_mem)
 56{
 57	struct ttm_tt *ttm = bo->ttm;
 58	struct ttm_mem_reg *old_mem = &bo->mem;
 59	int ret;
 60
 61	if (old_mem->mem_type != TTM_PL_SYSTEM) {
 62		ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
 63
 64		if (unlikely(ret != 0)) {
 65			if (ret != -ERESTARTSYS)
 66				pr_err("Failed to expire sync object before unbinding TTM\n");
 67			return ret;
 68		}
 69
 70		ttm_tt_unbind(ttm);
 71		ttm_bo_free_old_node(bo);
 72		ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
 73				TTM_PL_MASK_MEM);
 74		old_mem->mem_type = TTM_PL_SYSTEM;
 75	}
 76
 77	ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
 78	if (unlikely(ret != 0))
 79		return ret;
 80
 81	if (new_mem->mem_type != TTM_PL_SYSTEM) {
 82		ret = ttm_tt_bind(ttm, new_mem, ctx);
 83		if (unlikely(ret != 0))
 84			return ret;
 85	}
 86
 87	*old_mem = *new_mem;
 88	new_mem->mm_node = NULL;
 89
 90	return 0;
 91}
 92EXPORT_SYMBOL(ttm_bo_move_ttm);
 93
 94int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
 95{
 96	if (likely(man->io_reserve_fastpath))
 97		return 0;
 98
 99	if (interruptible)
100		return mutex_lock_interruptible(&man->io_reserve_mutex);
 
101
102	mutex_lock(&man->io_reserve_mutex);
103	return 0;
104}
105EXPORT_SYMBOL(ttm_mem_io_lock);
106
107void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
 
108{
109	if (likely(man->io_reserve_fastpath))
110		return;
111
112	mutex_unlock(&man->io_reserve_mutex);
113}
114EXPORT_SYMBOL(ttm_mem_io_unlock);
115
116static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
117{
118	struct ttm_buffer_object *bo;
119
120	if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
121		return -EAGAIN;
122
123	bo = list_first_entry(&man->io_reserve_lru,
124			      struct ttm_buffer_object,
125			      io_reserve_lru);
126	list_del_init(&bo->io_reserve_lru);
127	ttm_bo_unmap_virtual_locked(bo);
128
129	return 0;
 
130}
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132
133int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
134		       struct ttm_mem_reg *mem)
135{
136	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
137	int ret = 0;
138
139	if (!bdev->driver->io_mem_reserve)
140		return 0;
141	if (likely(man->io_reserve_fastpath))
142		return bdev->driver->io_mem_reserve(bdev, mem);
143
144	if (bdev->driver->io_mem_reserve &&
145	    mem->bus.io_reserved_count++ == 0) {
146retry:
147		ret = bdev->driver->io_mem_reserve(bdev, mem);
148		if (ret == -EAGAIN) {
149			ret = ttm_mem_io_evict(man);
150			if (ret == 0)
151				goto retry;
 
 
152		}
153	}
154	return ret;
155}
156EXPORT_SYMBOL(ttm_mem_io_reserve);
157
158void ttm_mem_io_free(struct ttm_bo_device *bdev,
159		     struct ttm_mem_reg *mem)
160{
161	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
162
163	if (likely(man->io_reserve_fastpath))
164		return;
165
166	if (bdev->driver->io_mem_reserve &&
167	    --mem->bus.io_reserved_count == 0 &&
168	    bdev->driver->io_mem_free)
169		bdev->driver->io_mem_free(bdev, mem);
170
171}
172EXPORT_SYMBOL(ttm_mem_io_free);
173
174int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
175{
176	struct ttm_mem_reg *mem = &bo->mem;
177	int ret;
178
179	if (!mem->bus.io_reserved_vm) {
180		struct ttm_mem_type_manager *man =
181			&bo->bdev->man[mem->mem_type];
182
183		ret = ttm_mem_io_reserve(bo->bdev, mem);
184		if (unlikely(ret != 0))
185			return ret;
186		mem->bus.io_reserved_vm = true;
187		if (man->use_io_reserve_lru)
188			list_add_tail(&bo->io_reserve_lru,
189				      &man->io_reserve_lru);
190	}
191	return 0;
192}
193
194void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
195{
196	struct ttm_mem_reg *mem = &bo->mem;
197
198	if (mem->bus.io_reserved_vm) {
199		mem->bus.io_reserved_vm = false;
200		list_del_init(&bo->io_reserve_lru);
201		ttm_mem_io_free(bo->bdev, mem);
202	}
203}
204
205static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
206			void **virtual)
207{
208	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
209	int ret;
210	void *addr;
211
212	*virtual = NULL;
213	(void) ttm_mem_io_lock(man, false);
214	ret = ttm_mem_io_reserve(bdev, mem);
215	ttm_mem_io_unlock(man);
216	if (ret || !mem->bus.is_iomem)
217		return ret;
218
219	if (mem->bus.addr) {
220		addr = mem->bus.addr;
221	} else {
222		if (mem->placement & TTM_PL_FLAG_WC)
223			addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
224		else
225			addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
226		if (!addr) {
227			(void) ttm_mem_io_lock(man, false);
228			ttm_mem_io_free(bdev, mem);
229			ttm_mem_io_unlock(man);
230			return -ENOMEM;
231		}
232	}
233	*virtual = addr;
234	return 0;
235}
236
237static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
238			 void *virtual)
239{
240	struct ttm_mem_type_manager *man;
241
242	man = &bdev->man[mem->mem_type];
243
244	if (virtual && mem->bus.addr == NULL)
245		iounmap(virtual);
246	(void) ttm_mem_io_lock(man, false);
247	ttm_mem_io_free(bdev, mem);
248	ttm_mem_io_unlock(man);
249}
250
251static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
252{
253	uint32_t *dstP =
254	    (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
255	uint32_t *srcP =
256	    (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
257
258	int i;
259	for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
260		iowrite32(ioread32(srcP++), dstP++);
261	return 0;
262}
263
264#ifdef CONFIG_X86
265#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
266#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
267#else
268#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0,  __prot)
269#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
270#endif
271
272
273/**
274 * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
275 * specified page protection.
276 *
277 * @page: The page to map.
278 * @prot: The page protection.
279 *
280 * This function maps a TTM page using the kmap_atomic api if available,
281 * otherwise falls back to vmap. The user must make sure that the
282 * specified page does not have an aliased mapping with a different caching
283 * policy unless the architecture explicitly allows it. Also mapping and
284 * unmapping using this api must be correctly nested. Unmapping should
285 * occur in the reverse order of mapping.
286 */
287void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
288{
289	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
290		return kmap_atomic(page);
291	else
292		return __ttm_kmap_atomic_prot(page, prot);
293}
294EXPORT_SYMBOL(ttm_kmap_atomic_prot);
295
296/**
297 * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
298 * ttm_kmap_atomic_prot.
299 *
300 * @addr: The virtual address from the map.
301 * @prot: The page protection.
 
 
 
 
 
 
302 */
303void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
304{
305	if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
306		kunmap_atomic(addr);
307	else
308		__ttm_kunmap_atomic(addr);
309}
310EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
311
312static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
313				unsigned long page,
314				pgprot_t prot)
315{
316	struct page *d = ttm->pages[page];
317	void *dst;
318
319	if (!d)
320		return -ENOMEM;
321
322	src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
323	dst = ttm_kmap_atomic_prot(d, prot);
324	if (!dst)
325		return -ENOMEM;
326
327	memcpy_fromio(dst, src, PAGE_SIZE);
328
329	ttm_kunmap_atomic_prot(dst, prot);
330
331	return 0;
332}
333
334static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
335				unsigned long page,
336				pgprot_t prot)
337{
338	struct page *s = ttm->pages[page];
339	void *src;
340
341	if (!s)
342		return -ENOMEM;
343
344	dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
345	src = ttm_kmap_atomic_prot(s, prot);
346	if (!src)
347		return -ENOMEM;
348
349	memcpy_toio(dst, src, PAGE_SIZE);
350
351	ttm_kunmap_atomic_prot(src, prot);
352
353	return 0;
354}
355
356int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
357		       struct ttm_operation_ctx *ctx,
358		       struct ttm_mem_reg *new_mem)
359{
360	struct ttm_bo_device *bdev = bo->bdev;
361	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
 
362	struct ttm_tt *ttm = bo->ttm;
363	struct ttm_mem_reg *old_mem = &bo->mem;
364	struct ttm_mem_reg old_copy = *old_mem;
365	void *old_iomap;
366	void *new_iomap;
367	int ret;
368	unsigned long i;
369	unsigned long page;
370	unsigned long add = 0;
371	int dir;
372
373	ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
374	if (ret)
375		return ret;
376
377	ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
378	if (ret)
379		return ret;
380	ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
381	if (ret)
382		goto out;
383
384	/*
385	 * Single TTM move. NOP.
386	 */
387	if (old_iomap == NULL && new_iomap == NULL)
388		goto out2;
389
390	/*
391	 * Don't move nonexistent data. Clear destination instead.
392	 */
393	if (old_iomap == NULL &&
394	    (ttm == NULL || (ttm->state == tt_unpopulated &&
395			     !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
396		memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
397		goto out2;
398	}
399
400	/*
401	 * TTM might be null for moves within the same region.
402	 */
403	if (ttm) {
404		ret = ttm_tt_populate(ttm, ctx);
405		if (ret)
406			goto out1;
407	}
408
409	add = 0;
410	dir = 1;
411
412	if ((old_mem->mem_type == new_mem->mem_type) &&
413	    (new_mem->start < old_mem->start + old_mem->size)) {
414		dir = -1;
415		add = new_mem->num_pages - 1;
416	}
417
418	for (i = 0; i < new_mem->num_pages; ++i) {
419		page = i * dir + add;
420		if (old_iomap == NULL) {
421			pgprot_t prot = ttm_io_prot(old_mem->placement,
422						    PAGE_KERNEL);
423			ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
424						   prot);
425		} else if (new_iomap == NULL) {
426			pgprot_t prot = ttm_io_prot(new_mem->placement,
427						    PAGE_KERNEL);
428			ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
429						   prot);
430		} else {
431			ret = ttm_copy_io_page(new_iomap, old_iomap, page);
432		}
433		if (ret)
434			goto out1;
435	}
436	mb();
437out2:
438	old_copy = *old_mem;
439	*old_mem = *new_mem;
440	new_mem->mm_node = NULL;
441
442	if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
443		ttm_tt_destroy(ttm);
444		bo->ttm = NULL;
445	}
446
447out1:
448	ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
449out:
450	ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
452	/*
453	 * On error, keep the mm node!
454	 */
455	if (!ret)
456		ttm_bo_mem_put(bo, &old_copy);
457	return ret;
458}
459EXPORT_SYMBOL(ttm_bo_move_memcpy);
460
461static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
462{
463	struct ttm_transfer_obj *fbo;
464
465	fbo = container_of(bo, struct ttm_transfer_obj, base);
 
466	ttm_bo_put(fbo->bo);
467	kfree(fbo);
468}
469
470/**
471 * ttm_buffer_object_transfer
472 *
473 * @bo: A pointer to a struct ttm_buffer_object.
474 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
475 * holding the data of @bo with the old placement.
476 *
477 * This is a utility function that may be called after an accelerated move
478 * has been scheduled. A new buffer object is created as a placeholder for
479 * the old data while it's being copied. When that buffer object is idle,
480 * it can be destroyed, releasing the space of the old placement.
481 * Returns:
482 * !0: Failure.
483 */
484
485static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
486				      struct ttm_buffer_object **new_obj)
487{
488	struct ttm_transfer_obj *fbo;
489	int ret;
490
491	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
492	if (!fbo)
493		return -ENOMEM;
494
495	fbo->base = *bo;
496	fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
497
498	ttm_bo_get(bo);
499	fbo->bo = bo;
500
501	/**
502	 * Fix up members that we shouldn't copy directly:
503	 * TODO: Explicit member copy would probably be better here.
504	 */
505
506	atomic_inc(&bo->bdev->glob->bo_count);
507	INIT_LIST_HEAD(&fbo->base.ddestroy);
508	INIT_LIST_HEAD(&fbo->base.lru);
509	INIT_LIST_HEAD(&fbo->base.swap);
510	INIT_LIST_HEAD(&fbo->base.io_reserve_lru);
511	mutex_init(&fbo->base.wu_mutex);
512	fbo->base.moving = NULL;
513	drm_vma_node_reset(&fbo->base.base.vma_node);
514	atomic_set(&fbo->base.cpu_writers, 0);
515
516	kref_init(&fbo->base.list_kref);
517	kref_init(&fbo->base.kref);
518	fbo->base.destroy = &ttm_transfered_destroy;
519	fbo->base.acc_size = 0;
520	fbo->base.base.resv = &fbo->base.base._resv;
521	dma_resv_init(fbo->base.base.resv);
522	ret = dma_resv_trylock(fbo->base.base.resv);
 
 
 
523	WARN_ON(!ret);
524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
525	*new_obj = &fbo->base;
526	return 0;
527}
528
529pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 
 
 
 
 
 
 
 
 
 
 
530{
531	/* Cached mappings need no adjustment */
532	if (caching_flags & TTM_PL_FLAG_CACHED)
533		return tmp;
534
535#if defined(__i386__) || defined(__x86_64__)
536	if (caching_flags & TTM_PL_FLAG_WC)
537		tmp = pgprot_writecombine(tmp);
538	else if (boot_cpu_data.x86 > 3)
539		tmp = pgprot_noncached(tmp);
540#endif
541#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
542    defined(__powerpc__) || defined(__mips__)
543	if (caching_flags & TTM_PL_FLAG_WC)
544		tmp = pgprot_writecombine(tmp);
545	else
546		tmp = pgprot_noncached(tmp);
547#endif
548#if defined(__sparc__)
549	tmp = pgprot_noncached(tmp);
550#endif
551	return tmp;
552}
553EXPORT_SYMBOL(ttm_io_prot);
554
555static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
556			  unsigned long offset,
557			  unsigned long size,
558			  struct ttm_bo_kmap_obj *map)
559{
560	struct ttm_mem_reg *mem = &bo->mem;
561
562	if (bo->mem.bus.addr) {
563		map->bo_kmap_type = ttm_bo_map_premapped;
564		map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
565	} else {
 
 
566		map->bo_kmap_type = ttm_bo_map_iomap;
567		if (mem->placement & TTM_PL_FLAG_WC)
568			map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
569						  size);
 
 
 
570		else
571			map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
572						       size);
573	}
574	return (!map->virtual) ? -ENOMEM : 0;
575}
576
577static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
578			   unsigned long start_page,
579			   unsigned long num_pages,
580			   struct ttm_bo_kmap_obj *map)
581{
582	struct ttm_mem_reg *mem = &bo->mem;
583	struct ttm_operation_ctx ctx = {
584		.interruptible = false,
585		.no_wait_gpu = false
586	};
587	struct ttm_tt *ttm = bo->ttm;
 
 
588	pgprot_t prot;
589	int ret;
590
591	BUG_ON(!ttm);
592
593	ret = ttm_tt_populate(ttm, &ctx);
594	if (ret)
595		return ret;
596
597	if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
 
598		/*
599		 * We're mapping a single page, and the desired
600		 * page protection is consistent with the bo.
601		 */
602
603		map->bo_kmap_type = ttm_bo_map_kmap;
604		map->page = ttm->pages[start_page];
605		map->virtual = kmap(map->page);
606	} else {
607		/*
608		 * We need to use vmap to get the desired page protection
609		 * or to make the buffer object look contiguous.
610		 */
611		prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
612		map->bo_kmap_type = ttm_bo_map_vmap;
613		map->virtual = vmap(ttm->pages + start_page, num_pages,
614				    0, prot);
615	}
616	return (!map->virtual) ? -ENOMEM : 0;
617}
618
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619int ttm_bo_kmap(struct ttm_buffer_object *bo,
620		unsigned long start_page, unsigned long num_pages,
621		struct ttm_bo_kmap_obj *map)
622{
623	struct ttm_mem_type_manager *man =
624		&bo->bdev->man[bo->mem.mem_type];
625	unsigned long offset, size;
626	int ret;
627
628	map->virtual = NULL;
629	map->bo = bo;
630	if (num_pages > bo->num_pages)
631		return -EINVAL;
632	if (start_page > bo->num_pages)
633		return -EINVAL;
634
635	(void) ttm_mem_io_lock(man, false);
636	ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
637	ttm_mem_io_unlock(man);
638	if (ret)
639		return ret;
640	if (!bo->mem.bus.is_iomem) {
641		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
642	} else {
643		offset = start_page << PAGE_SHIFT;
644		size = num_pages << PAGE_SHIFT;
645		return ttm_bo_ioremap(bo, offset, size, map);
646	}
647}
648EXPORT_SYMBOL(ttm_bo_kmap);
649
 
 
 
 
 
 
 
650void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
651{
652	struct ttm_buffer_object *bo = map->bo;
653	struct ttm_mem_type_manager *man =
654		&bo->bdev->man[bo->mem.mem_type];
655
656	if (!map->virtual)
657		return;
658	switch (map->bo_kmap_type) {
659	case ttm_bo_map_iomap:
660		iounmap(map->virtual);
661		break;
662	case ttm_bo_map_vmap:
663		vunmap(map->virtual);
664		break;
665	case ttm_bo_map_kmap:
666		kunmap(map->page);
667		break;
668	case ttm_bo_map_premapped:
669		break;
670	default:
671		BUG();
672	}
673	(void) ttm_mem_io_lock(man, false);
674	ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
675	ttm_mem_io_unlock(man);
676	map->virtual = NULL;
677	map->page = NULL;
678}
679EXPORT_SYMBOL(ttm_bo_kunmap);
680
681int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
682			      struct dma_fence *fence,
683			      bool evict,
684			      struct ttm_mem_reg *new_mem)
 
 
 
 
 
 
 
 
 
 
 
685{
686	struct ttm_bo_device *bdev = bo->bdev;
687	struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
688	struct ttm_mem_reg *old_mem = &bo->mem;
689	int ret;
690	struct ttm_buffer_object *ghost_obj;
691
692	dma_resv_add_excl_fence(bo->base.resv, fence);
693	if (evict) {
694		ret = ttm_bo_wait(bo, false, false);
695		if (ret)
696			return ret;
697
698		if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
699			ttm_tt_destroy(bo->ttm);
700			bo->ttm = NULL;
701		}
702		ttm_bo_free_old_node(bo);
703	} else {
704		/**
705		 * This should help pipeline ordinary buffer moves.
706		 *
707		 * Hang old buffer memory on a new buffer object,
708		 * and leave it to be released when the GPU
709		 * operation has completed.
710		 */
 
 
 
 
 
 
 
 
 
 
 
711
712		dma_fence_put(bo->moving);
713		bo->moving = dma_fence_get(fence);
 
 
 
 
 
 
714
715		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
716		if (ret)
717			return ret;
718
719		dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
720
721		/**
722		 * If we're not moving to fixed memory, the TTM object
723		 * needs to stay alive. Otherwhise hang it on the ghost
724		 * bo to be unbound and destroyed.
725		 */
 
 
 
 
726
727		if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
728			ghost_obj->ttm = NULL;
729		else
730			bo->ttm = NULL;
731
732		ttm_bo_unreserve(ghost_obj);
733		ttm_bo_put(ghost_obj);
734	}
735
736	*old_mem = *new_mem;
737	new_mem->mm_node = NULL;
738
739	return 0;
740}
741EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
742
743int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
744			 struct dma_fence *fence, bool evict,
745			 struct ttm_mem_reg *new_mem)
 
 
 
 
 
 
746{
747	struct ttm_bo_device *bdev = bo->bdev;
748	struct ttm_mem_reg *old_mem = &bo->mem;
749
750	struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
751	struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
752
753	int ret;
754
755	dma_resv_add_excl_fence(bo->base.resv, fence);
756
757	if (!evict) {
758		struct ttm_buffer_object *ghost_obj;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
759
760		/**
761		 * This should help pipeline ordinary buffer moves.
762		 *
763		 * Hang old buffer memory on a new buffer object,
764		 * and leave it to be released when the GPU
765		 * operation has completed.
766		 */
767
768		dma_fence_put(bo->moving);
769		bo->moving = dma_fence_get(fence);
 
 
 
 
770
771		ret = ttm_buffer_object_transfer(bo, &ghost_obj);
772		if (ret)
773			return ret;
 
 
 
 
774
775		dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
 
 
776
777		/**
778		 * If we're not moving to fixed memory, the TTM object
779		 * needs to stay alive. Otherwhise hang it on the ghost
780		 * bo to be unbound and destroyed.
781		 */
782
783		if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
784			ghost_obj->ttm = NULL;
785		else
786			bo->ttm = NULL;
 
787
788		ttm_bo_unreserve(ghost_obj);
789		ttm_bo_put(ghost_obj);
 
 
790
791	} else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
 
 
 
792
793		/**
794		 * BO doesn't have a TTM we need to bind/unbind. Just remember
795		 * this eviction and free up the allocation
796		 */
 
797
798		spin_lock(&from->move_lock);
799		if (!from->move || dma_fence_is_later(fence, from->move)) {
800			dma_fence_put(from->move);
801			from->move = dma_fence_get(fence);
802		}
803		spin_unlock(&from->move_lock);
804
805		ttm_bo_free_old_node(bo);
 
 
 
 
 
 
 
 
 
806
807		dma_fence_put(bo->moving);
808		bo->moving = dma_fence_get(fence);
809
810	} else {
811		/**
812		 * Last resort, wait for the move to be completed.
813		 *
814		 * Should never happen in pratice.
815		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
816
817		ret = ttm_bo_wait(bo, false, false);
818		if (ret)
819			return ret;
 
 
 
 
820
821		if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
822			ttm_tt_destroy(bo->ttm);
823			bo->ttm = NULL;
824		}
825		ttm_bo_free_old_node(bo);
826	}
827
828	*old_mem = *new_mem;
829	new_mem->mm_node = NULL;
830
831	return 0;
832}
833EXPORT_SYMBOL(ttm_bo_pipeline_move);
834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
835int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
836{
837	struct ttm_buffer_object *ghost;
 
838	int ret;
839
840	ret = ttm_buffer_object_transfer(bo, &ghost);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
841	if (ret)
842		return ret;
843
844	ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
845	/* Last resort, wait for the BO to be idle when we are OOM */
846	if (ret)
847		ttm_bo_wait(bo, false, false);
848
849	memset(&bo->mem, 0, sizeof(bo->mem));
850	bo->mem.mem_type = TTM_PL_SYSTEM;
851	bo->ttm = NULL;
 
 
 
852
853	ttm_bo_unreserve(ghost);
854	ttm_bo_put(ghost);
855
856	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
857}
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
  2/**************************************************************************
  3 *
  4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  5 * All Rights Reserved.
  6 *
  7 * Permission is hereby granted, free of charge, to any person obtaining a
  8 * copy of this software and associated documentation files (the
  9 * "Software"), to deal in the Software without restriction, including
 10 * without limitation the rights to use, copy, modify, merge, publish,
 11 * distribute, sub license, and/or sell copies of the Software, and to
 12 * permit persons to whom the Software is furnished to do so, subject to
 13 * the following conditions:
 14 *
 15 * The above copyright notice and this permission notice (including the
 16 * next paragraph) shall be included in all copies or substantial portions
 17 * of the Software.
 18 *
 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 26 *
 27 **************************************************************************/
 28/*
 29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
 30 */
 31
 
 
 
 
 
 
 
 32#include <linux/vmalloc.h>
 33
 34#include <drm/ttm/ttm_bo.h>
 35#include <drm/ttm/ttm_placement.h>
 36#include <drm/ttm/ttm_tt.h>
 37
 38#include <drm/drm_cache.h>
 39
 40struct ttm_transfer_obj {
 41	struct ttm_buffer_object base;
 42	struct ttm_buffer_object *bo;
 43};
 44
 45int ttm_mem_io_reserve(struct ttm_device *bdev,
 46		       struct ttm_resource *mem)
 
 
 
 
 
 
 47{
 48	if (mem->bus.offset || mem->bus.addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 49		return 0;
 50
 51	mem->bus.is_iomem = false;
 52	if (!bdev->funcs->io_mem_reserve)
 53		return 0;
 54
 55	return bdev->funcs->io_mem_reserve(bdev, mem);
 
 56}
 
 57
 58void ttm_mem_io_free(struct ttm_device *bdev,
 59		     struct ttm_resource *mem)
 60{
 61	if (!mem)
 62		return;
 63
 64	if (!mem->bus.offset && !mem->bus.addr)
 65		return;
 
 
 
 
 
 
 
 
 66
 67	if (bdev->funcs->io_mem_free)
 68		bdev->funcs->io_mem_free(bdev, mem);
 
 
 
 69
 70	mem->bus.offset = 0;
 71	mem->bus.addr = NULL;
 72}
 73
 74/**
 75 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
 76 * @clear: Whether to clear rather than copy.
 77 * @num_pages: Number of pages of the operation.
 78 * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
 79 * @src_iter: A struct ttm_kmap_iter representing the source resource.
 80 *
 81 * This function is intended to be able to move out async under a
 82 * dma-fence if desired.
 83 */
 84void ttm_move_memcpy(bool clear,
 85		     u32 num_pages,
 86		     struct ttm_kmap_iter *dst_iter,
 87		     struct ttm_kmap_iter *src_iter)
 88{
 89	const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
 90	const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
 91	struct iosys_map src_map, dst_map;
 92	pgoff_t i;
 93
 94	/* Single TTM move. NOP */
 95	if (dst_ops->maps_tt && src_ops->maps_tt)
 96		return;
 
 
 
 
 
 
 
 97
 98	/* Don't move nonexistent data. Clear destination instead. */
 99	if (clear) {
100		for (i = 0; i < num_pages; ++i) {
101			dst_ops->map_local(dst_iter, &dst_map, i);
102			if (dst_map.is_iomem)
103				memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
104			else
105				memset(dst_map.vaddr, 0, PAGE_SIZE);
106			if (dst_ops->unmap_local)
107				dst_ops->unmap_local(dst_iter, &dst_map);
108		}
 
 
 
 
 
 
 
 
 
 
 
109		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110	}
 
 
111
112	for (i = 0; i < num_pages; ++i) {
113		dst_ops->map_local(dst_iter, &dst_map, i);
114		src_ops->map_local(src_iter, &src_map, i);
115
116		drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
118		if (src_ops->unmap_local)
119			src_ops->unmap_local(src_iter, &src_map);
120		if (dst_ops->unmap_local)
121			dst_ops->unmap_local(dst_iter, &dst_map);
 
 
 
 
 
 
 
 
 
122	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123}
124EXPORT_SYMBOL(ttm_move_memcpy);
 
 
 
 
 
 
 
 
125
126/**
127 * ttm_bo_move_memcpy
 
 
 
 
128 *
129 * @bo: A pointer to a struct ttm_buffer_object.
130 * @ctx: operation context
131 * @dst_mem: struct ttm_resource indicating where to move.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132 *
133 * Fallback move function for a mappable buffer object in mappable memory.
134 * The function will, if successful,
135 * free any old aperture space, and set (@new_mem)->mm_node to NULL,
136 * and update the (@bo)->mem placement flags. If unsuccessful, the old
137 * data remains untouched, and it's up to the caller to free the
138 * memory space indicated by @new_mem.
139 * Returns:
140 * !0: Failure.
141 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
143		       struct ttm_operation_ctx *ctx,
144		       struct ttm_resource *dst_mem)
145{
146	struct ttm_device *bdev = bo->bdev;
147	struct ttm_resource_manager *dst_man =
148		ttm_manager_type(bo->bdev, dst_mem->mem_type);
149	struct ttm_tt *ttm = bo->ttm;
150	struct ttm_resource *src_mem = bo->resource;
151	struct ttm_resource_manager *src_man;
152	union {
153		struct ttm_kmap_iter_tt tt;
154		struct ttm_kmap_iter_linear_io io;
155	} _dst_iter, _src_iter;
156	struct ttm_kmap_iter *dst_iter, *src_iter;
157	bool clear;
158	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
160	if (WARN_ON(!src_mem))
161		return -EINVAL;
162
163	src_man = ttm_manager_type(bdev, src_mem->mem_type);
164	if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
165		    dst_man->use_tt)) {
166		ret = ttm_bo_populate(bo, ctx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167		if (ret)
168			return ret;
 
 
 
 
 
 
 
 
 
 
169	}
170
171	dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
172	if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
173		dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
174	if (IS_ERR(dst_iter))
175		return PTR_ERR(dst_iter);
176
177	src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
178	if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
179		src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
180	if (IS_ERR(src_iter)) {
181		ret = PTR_ERR(src_iter);
182		goto out_src_iter;
183	}
184
185	clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
186	if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
187		ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
188
189	if (!src_iter->ops->maps_tt)
190		ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
191	ttm_bo_move_sync_cleanup(bo, dst_mem);
192
193out_src_iter:
194	if (!dst_iter->ops->maps_tt)
195		ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
196
 
 
 
 
 
197	return ret;
198}
199EXPORT_SYMBOL(ttm_bo_move_memcpy);
200
201static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
202{
203	struct ttm_transfer_obj *fbo;
204
205	fbo = container_of(bo, struct ttm_transfer_obj, base);
206	dma_resv_fini(&fbo->base.base._resv);
207	ttm_bo_put(fbo->bo);
208	kfree(fbo);
209}
210
211/**
212 * ttm_buffer_object_transfer
213 *
214 * @bo: A pointer to a struct ttm_buffer_object.
215 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
216 * holding the data of @bo with the old placement.
217 *
218 * This is a utility function that may be called after an accelerated move
219 * has been scheduled. A new buffer object is created as a placeholder for
220 * the old data while it's being copied. When that buffer object is idle,
221 * it can be destroyed, releasing the space of the old placement.
222 * Returns:
223 * !0: Failure.
224 */
225
226static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
227				      struct ttm_buffer_object **new_obj)
228{
229	struct ttm_transfer_obj *fbo;
230	int ret;
231
232	fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
233	if (!fbo)
234		return -ENOMEM;
235
236	fbo->base = *bo;
 
 
 
 
237
238	/**
239	 * Fix up members that we shouldn't copy directly:
240	 * TODO: Explicit member copy would probably be better here.
241	 */
242
243	atomic_inc(&ttm_glob.bo_count);
 
 
 
 
 
 
244	drm_vma_node_reset(&fbo->base.base.vma_node);
 
245
 
246	kref_init(&fbo->base.kref);
247	fbo->base.destroy = &ttm_transfered_destroy;
248	fbo->base.pin_count = 0;
249	if (bo->type != ttm_bo_type_sg)
250		fbo->base.base.resv = &fbo->base.base._resv;
251
252	dma_resv_init(&fbo->base.base._resv);
253	fbo->base.base.dev = NULL;
254	ret = dma_resv_trylock(&fbo->base.base._resv);
255	WARN_ON(!ret);
256
257	if (fbo->base.resource) {
258		ttm_resource_set_bo(fbo->base.resource, &fbo->base);
259		bo->resource = NULL;
260		ttm_bo_set_bulk_move(&fbo->base, NULL);
261	} else {
262		fbo->base.bulk_move = NULL;
263	}
264
265	ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
266	if (ret) {
267		kfree(fbo);
268		return ret;
269	}
270
271	ttm_bo_get(bo);
272	fbo->bo = bo;
273
274	ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
275
276	*new_obj = &fbo->base;
277	return 0;
278}
279
280/**
281 * ttm_io_prot
282 *
283 * @bo: ttm buffer object
284 * @res: ttm resource object
285 * @tmp: Page protection flag for a normal, cached mapping.
286 *
287 * Utility function that returns the pgprot_t that should be used for
288 * setting up a PTE with the caching model indicated by @c_state.
289 */
290pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
291		     pgprot_t tmp)
292{
293	struct ttm_resource_manager *man;
294	enum ttm_caching caching;
295
296	man = ttm_manager_type(bo->bdev, res->mem_type);
297	if (man->use_tt) {
298		caching = bo->ttm->caching;
299		if (bo->ttm->page_flags & TTM_TT_FLAG_DECRYPTED)
300			tmp = pgprot_decrypted(tmp);
301	} else  {
302		caching = res->bus.caching;
303	}
304
305	return ttm_prot_from_caching(caching, tmp);
 
 
 
 
 
 
 
 
306}
307EXPORT_SYMBOL(ttm_io_prot);
308
309static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
310			  unsigned long offset,
311			  unsigned long size,
312			  struct ttm_bo_kmap_obj *map)
313{
314	struct ttm_resource *mem = bo->resource;
315
316	if (bo->resource->bus.addr) {
317		map->bo_kmap_type = ttm_bo_map_premapped;
318		map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
319	} else {
320		resource_size_t res = bo->resource->bus.offset + offset;
321
322		map->bo_kmap_type = ttm_bo_map_iomap;
323		if (mem->bus.caching == ttm_write_combined)
324			map->virtual = ioremap_wc(res, size);
325#ifdef CONFIG_X86
326		else if (mem->bus.caching == ttm_cached)
327			map->virtual = ioremap_cache(res, size);
328#endif
329		else
330			map->virtual = ioremap(res, size);
 
331	}
332	return (!map->virtual) ? -ENOMEM : 0;
333}
334
335static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
336			   unsigned long start_page,
337			   unsigned long num_pages,
338			   struct ttm_bo_kmap_obj *map)
339{
340	struct ttm_resource *mem = bo->resource;
341	struct ttm_operation_ctx ctx = {
342		.interruptible = false,
343		.no_wait_gpu = false
344	};
345	struct ttm_tt *ttm = bo->ttm;
346	struct ttm_resource_manager *man =
347			ttm_manager_type(bo->bdev, bo->resource->mem_type);
348	pgprot_t prot;
349	int ret;
350
351	BUG_ON(!ttm);
352
353	ret = ttm_bo_populate(bo, &ctx);
354	if (ret)
355		return ret;
356
357	if (num_pages == 1 && ttm->caching == ttm_cached &&
358	    !(man->use_tt && (ttm->page_flags & TTM_TT_FLAG_DECRYPTED))) {
359		/*
360		 * We're mapping a single page, and the desired
361		 * page protection is consistent with the bo.
362		 */
363
364		map->bo_kmap_type = ttm_bo_map_kmap;
365		map->page = ttm->pages[start_page];
366		map->virtual = kmap(map->page);
367	} else {
368		/*
369		 * We need to use vmap to get the desired page protection
370		 * or to make the buffer object look contiguous.
371		 */
372		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
373		map->bo_kmap_type = ttm_bo_map_vmap;
374		map->virtual = vmap(ttm->pages + start_page, num_pages,
375				    0, prot);
376	}
377	return (!map->virtual) ? -ENOMEM : 0;
378}
379
380/**
381 * ttm_bo_kmap
382 *
383 * @bo: The buffer object.
384 * @start_page: The first page to map.
385 * @num_pages: Number of pages to map.
386 * @map: pointer to a struct ttm_bo_kmap_obj representing the map.
387 *
388 * Sets up a kernel virtual mapping, using ioremap, vmap or kmap to the
389 * data in the buffer object. The ttm_kmap_obj_virtual function can then be
390 * used to obtain a virtual address to the data.
391 *
392 * Returns
393 * -ENOMEM: Out of memory.
394 * -EINVAL: Invalid range.
395 */
396int ttm_bo_kmap(struct ttm_buffer_object *bo,
397		unsigned long start_page, unsigned long num_pages,
398		struct ttm_bo_kmap_obj *map)
399{
 
 
400	unsigned long offset, size;
401	int ret;
402
403	map->virtual = NULL;
404	map->bo = bo;
405	if (num_pages > PFN_UP(bo->resource->size))
406		return -EINVAL;
407	if ((start_page + num_pages) > PFN_UP(bo->resource->size))
408		return -EINVAL;
409
410	ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
 
 
411	if (ret)
412		return ret;
413	if (!bo->resource->bus.is_iomem) {
414		return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
415	} else {
416		offset = start_page << PAGE_SHIFT;
417		size = num_pages << PAGE_SHIFT;
418		return ttm_bo_ioremap(bo, offset, size, map);
419	}
420}
421EXPORT_SYMBOL(ttm_bo_kmap);
422
423/**
424 * ttm_bo_kunmap
425 *
426 * @map: Object describing the map to unmap.
427 *
428 * Unmaps a kernel map set up by ttm_bo_kmap.
429 */
430void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
431{
 
 
 
 
432	if (!map->virtual)
433		return;
434	switch (map->bo_kmap_type) {
435	case ttm_bo_map_iomap:
436		iounmap(map->virtual);
437		break;
438	case ttm_bo_map_vmap:
439		vunmap(map->virtual);
440		break;
441	case ttm_bo_map_kmap:
442		kunmap(map->page);
443		break;
444	case ttm_bo_map_premapped:
445		break;
446	default:
447		BUG();
448	}
449	ttm_mem_io_free(map->bo->bdev, map->bo->resource);
 
 
450	map->virtual = NULL;
451	map->page = NULL;
452}
453EXPORT_SYMBOL(ttm_bo_kunmap);
454
455/**
456 * ttm_bo_vmap
457 *
458 * @bo: The buffer object.
459 * @map: pointer to a struct iosys_map representing the map.
460 *
461 * Sets up a kernel virtual mapping, using ioremap or vmap to the
462 * data in the buffer object. The parameter @map returns the virtual
463 * address as struct iosys_map. Unmap the buffer with ttm_bo_vunmap().
464 *
465 * Returns
466 * -ENOMEM: Out of memory.
467 * -EINVAL: Invalid range.
468 */
469int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map)
470{
471	struct ttm_resource *mem = bo->resource;
 
 
472	int ret;
 
473
474	dma_resv_assert_held(bo->base.resv);
 
 
 
 
475
476	ret = ttm_mem_io_reserve(bo->bdev, mem);
477	if (ret)
478		return ret;
479
480	if (mem->bus.is_iomem) {
481		void __iomem *vaddr_iomem;
482
483		if (mem->bus.addr)
484			vaddr_iomem = (void __iomem *)mem->bus.addr;
485		else if (mem->bus.caching == ttm_write_combined)
486			vaddr_iomem = ioremap_wc(mem->bus.offset,
487						 bo->base.size);
488#ifdef CONFIG_X86
489		else if (mem->bus.caching == ttm_cached)
490			vaddr_iomem = ioremap_cache(mem->bus.offset,
491						  bo->base.size);
492#endif
493		else
494			vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
495
496		if (!vaddr_iomem)
497			return -ENOMEM;
498
499		iosys_map_set_vaddr_iomem(map, vaddr_iomem);
500
501	} else {
502		struct ttm_operation_ctx ctx = {
503			.interruptible = false,
504			.no_wait_gpu = false
505		};
506		struct ttm_tt *ttm = bo->ttm;
507		pgprot_t prot;
508		void *vaddr;
509
510		ret = ttm_bo_populate(bo, &ctx);
511		if (ret)
512			return ret;
513
514		/*
515		 * We need to use vmap to get the desired page protection
516		 * or to make the buffer object look contiguous.
 
 
 
517		 */
518		prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
519		vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
520		if (!vaddr)
521			return -ENOMEM;
522
523		iosys_map_set_vaddr(map, vaddr);
 
 
 
 
 
 
524	}
525
 
 
 
526	return 0;
527}
528EXPORT_SYMBOL(ttm_bo_vmap);
529
530/**
531 * ttm_bo_vunmap
532 *
533 * @bo: The buffer object.
534 * @map: Object describing the map to unmap.
535 *
536 * Unmaps a kernel map set up by ttm_bo_vmap().
537 */
538void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map)
539{
540	struct ttm_resource *mem = bo->resource;
 
541
542	dma_resv_assert_held(bo->base.resv);
 
543
544	if (iosys_map_is_null(map))
545		return;
 
546
547	if (!map->is_iomem)
548		vunmap(map->vaddr);
549	else if (!mem->bus.addr)
550		iounmap(map->vaddr_iomem);
551	iosys_map_clear(map);
552
553	ttm_mem_io_free(bo->bdev, bo->resource);
554}
555EXPORT_SYMBOL(ttm_bo_vunmap);
556
557static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
558				 bool dst_use_tt)
559{
560	long ret;
561
562	ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
563				    false, 15 * HZ);
564	if (ret == 0)
565		return -EBUSY;
566	if (ret < 0)
567		return ret;
568
569	if (!dst_use_tt)
570		ttm_bo_tt_destroy(bo);
571	ttm_resource_free(bo, &bo->resource);
572	return 0;
573}
 
 
574
575static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
576				struct dma_fence *fence,
577				bool dst_use_tt)
578{
579	struct ttm_buffer_object *ghost_obj;
580	int ret;
581
582	/**
583	 * This should help pipeline ordinary buffer moves.
584	 *
585	 * Hang old buffer memory on a new buffer object,
586	 * and leave it to be released when the GPU
587	 * operation has completed.
588	 */
589
590	ret = ttm_buffer_object_transfer(bo, &ghost_obj);
591	if (ret)
592		return ret;
593
594	dma_resv_add_fence(&ghost_obj->base._resv, fence,
595			   DMA_RESV_USAGE_KERNEL);
 
 
 
596
597	/**
598	 * If we're not moving to fixed memory, the TTM object
599	 * needs to stay alive. Otherwhise hang it on the ghost
600	 * bo to be unbound and destroyed.
601	 */
602
603	if (dst_use_tt)
604		ghost_obj->ttm = NULL;
605	else
606		bo->ttm = NULL;
607
608	dma_resv_unlock(&ghost_obj->base._resv);
609	ttm_bo_put(ghost_obj);
610	return 0;
611}
612
613static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
614				       struct dma_fence *fence)
615{
616	struct ttm_device *bdev = bo->bdev;
617	struct ttm_resource_manager *from;
618
619	from = ttm_manager_type(bdev, bo->resource->mem_type);
 
 
 
 
 
620
621	/**
622	 * BO doesn't have a TTM we need to bind/unbind. Just remember
623	 * this eviction and free up the allocation
624	 */
625	spin_lock(&from->move_lock);
626	if (!from->move || dma_fence_is_later(fence, from->move)) {
627		dma_fence_put(from->move);
628		from->move = dma_fence_get(fence);
629	}
630	spin_unlock(&from->move_lock);
631
632	ttm_resource_free(bo, &bo->resource);
633}
634
635/**
636 * ttm_bo_move_accel_cleanup - cleanup helper for hw copies
637 *
638 * @bo: A pointer to a struct ttm_buffer_object.
639 * @fence: A fence object that signals when moving is complete.
640 * @evict: This is an evict move. Don't return until the buffer is idle.
641 * @pipeline: evictions are to be pipelined.
642 * @new_mem: struct ttm_resource indicating where to move.
643 *
644 * Accelerated move function to be called when an accelerated move
645 * has been scheduled. The function will create a new temporary buffer object
646 * representing the old placement, and put the sync object on both buffer
647 * objects. After that the newly created buffer object is unref'd to be
648 * destroyed when the move is complete. This will help pipeline
649 * buffer moves.
650 */
651int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
652			      struct dma_fence *fence,
653			      bool evict,
654			      bool pipeline,
655			      struct ttm_resource *new_mem)
656{
657	struct ttm_device *bdev = bo->bdev;
658	struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
659	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
660	int ret = 0;
661
662	dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL);
663	if (!evict)
664		ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
665	else if (!from->use_tt && pipeline)
666		ttm_bo_move_pipeline_evict(bo, fence);
667	else
668		ret = ttm_bo_wait_free_node(bo, man->use_tt);
669
670	if (ret)
671		return ret;
 
 
 
 
672
673	ttm_bo_assign_mem(bo, new_mem);
 
674
675	return 0;
676}
677EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
678
679/**
680 * ttm_bo_move_sync_cleanup - cleanup by waiting for the move to finish
681 *
682 * @bo: A pointer to a struct ttm_buffer_object.
683 * @new_mem: struct ttm_resource indicating where to move.
684 *
685 * Special case of ttm_bo_move_accel_cleanup where the bo is guaranteed
686 * by the caller to be idle. Typically used after memcpy buffer moves.
687 */
688void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo,
689			      struct ttm_resource *new_mem)
690{
691	struct ttm_device *bdev = bo->bdev;
692	struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
693	int ret;
694
695	ret = ttm_bo_wait_free_node(bo, man->use_tt);
696	if (WARN_ON(ret))
697		return;
698
699	ttm_bo_assign_mem(bo, new_mem);
700}
701EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
702
703/**
704 * ttm_bo_pipeline_gutting - purge the contents of a bo
705 * @bo: The buffer object
706 *
707 * Purge the contents of a bo, async if the bo is not idle.
708 * After a successful call, the bo is left unpopulated in
709 * system placement. The function may wait uninterruptible
710 * for idle on OOM.
711 *
712 * Return: 0 if successful, negative error code on failure.
713 */
714int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
715{
716	struct ttm_buffer_object *ghost;
717	struct ttm_tt *ttm;
718	int ret;
719
720	/* If already idle, no need for ghost object dance. */
721	if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
722		if (!bo->ttm) {
723			/* See comment below about clearing. */
724			ret = ttm_tt_create(bo, true);
725			if (ret)
726				return ret;
727		} else {
728			ttm_tt_unpopulate(bo->bdev, bo->ttm);
729			if (bo->type == ttm_bo_type_device)
730				ttm_tt_mark_for_clear(bo->ttm);
731		}
732		ttm_resource_free(bo, &bo->resource);
733		return 0;
734	}
735
736	/*
737	 * We need an unpopulated ttm_tt after giving our current one,
738	 * if any, to the ghost object. And we can't afford to fail
739	 * creating one *after* the operation. If the bo subsequently gets
740	 * resurrected, make sure it's cleared (if ttm_bo_type_device)
741	 * to avoid leaking sensitive information to user-space.
742	 */
743
744	ttm = bo->ttm;
745	bo->ttm = NULL;
746	ret = ttm_tt_create(bo, true);
747	swap(bo->ttm, ttm);
748	if (ret)
749		return ret;
750
751	ret = ttm_buffer_object_transfer(bo, &ghost);
 
752	if (ret)
753		goto error_destroy_tt;
754
755	ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
756	/* Last resort, wait for the BO to be idle when we are OOM */
757	if (ret) {
758		dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP,
759				      false, MAX_SCHEDULE_TIMEOUT);
760	}
761
762	dma_resv_unlock(&ghost->base._resv);
763	ttm_bo_put(ghost);
764	bo->ttm = ttm;
765	return 0;
766
767error_destroy_tt:
768	ttm_tt_destroy(bo->bdev, ttm);
769	return ret;
770}
771
772static bool ttm_lru_walk_trylock(struct ttm_lru_walk *walk,
773				 struct ttm_buffer_object *bo,
774				 bool *needs_unlock)
775{
776	struct ttm_operation_ctx *ctx = walk->ctx;
777
778	*needs_unlock = false;
779
780	if (dma_resv_trylock(bo->base.resv)) {
781		*needs_unlock = true;
782		return true;
783	}
784
785	if (bo->base.resv == ctx->resv && ctx->allow_res_evict) {
786		dma_resv_assert_held(bo->base.resv);
787		return true;
788	}
789
790	return false;
791}
792
793static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk,
794				   struct ttm_buffer_object *bo,
795				   bool *needs_unlock)
796{
797	struct dma_resv *resv = bo->base.resv;
798	int ret;
799
800	if (walk->ctx->interruptible)
801		ret = dma_resv_lock_interruptible(resv, walk->ticket);
802	else
803		ret = dma_resv_lock(resv, walk->ticket);
804
805	if (!ret) {
806		*needs_unlock = true;
807		/*
808		 * Only a single ticketlock per loop. Ticketlocks are prone
809		 * to return -EDEADLK causing the eviction to fail, so
810		 * after waiting for the ticketlock, revert back to
811		 * trylocking for this walk.
812		 */
813		walk->ticket = NULL;
814	} else if (ret == -EDEADLK) {
815		/* Caller needs to exit the ww transaction. */
816		ret = -ENOSPC;
817	}
818
819	return ret;
820}
821
822static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked)
823{
824	if (locked)
825		dma_resv_unlock(bo->base.resv);
826}
827
828/**
829 * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on
830 * valid items.
831 * @walk: describe the walks and actions taken
832 * @bdev: The TTM device.
833 * @man: The struct ttm_resource manager whose LRU lists we're walking.
834 * @target: The end condition for the walk.
835 *
836 * The LRU lists of @man are walk, and for each struct ttm_resource encountered,
837 * the corresponding ttm_buffer_object is locked and taken a reference on, and
838 * the LRU lock is dropped. the LRU lock may be dropped before locking and, in
839 * that case, it's verified that the item actually remains on the LRU list after
840 * the lock, and that the buffer object didn't switch resource in between.
841 *
842 * With a locked object, the actions indicated by @walk->process_bo are
843 * performed, and after that, the bo is unlocked, the refcount dropped and the
844 * next struct ttm_resource is processed. Here, the walker relies on
845 * TTM's restartable LRU list implementation.
846 *
847 * Typically @walk->process_bo() would return the number of pages evicted,
848 * swapped or shrunken, so that when the total exceeds @target, or when the
849 * LRU list has been walked in full, iteration is terminated. It's also terminated
850 * on error. Note that the definition of @target is done by the caller, it
851 * could have a different meaning than the number of pages.
852 *
853 * Note that the way dma_resv individualization is done, locking needs to be done
854 * either with the LRU lock held (trylocking only) or with a reference on the
855 * object.
856 *
857 * Return: The progress made towards target or negative error code on error.
858 */
859s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
860			   struct ttm_resource_manager *man, s64 target)
861{
862	struct ttm_resource_cursor cursor;
863	struct ttm_resource *res;
864	s64 progress = 0;
865	s64 lret;
866
867	spin_lock(&bdev->lru_lock);
868	ttm_resource_manager_for_each_res(man, &cursor, res) {
869		struct ttm_buffer_object *bo = res->bo;
870		bool bo_needs_unlock = false;
871		bool bo_locked = false;
872		int mem_type;
873
874		/*
875		 * Attempt a trylock before taking a reference on the bo,
876		 * since if we do it the other way around, and the trylock fails,
877		 * we need to drop the lru lock to put the bo.
878		 */
879		if (ttm_lru_walk_trylock(walk, bo, &bo_needs_unlock))
880			bo_locked = true;
881		else if (!walk->ticket || walk->ctx->no_wait_gpu ||
882			 walk->trylock_only)
883			continue;
884
885		if (!ttm_bo_get_unless_zero(bo)) {
886			ttm_lru_walk_unlock(bo, bo_needs_unlock);
887			continue;
888		}
889
890		mem_type = res->mem_type;
891		spin_unlock(&bdev->lru_lock);
892
893		lret = 0;
894		if (!bo_locked)
895			lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock);
896
897		/*
898		 * Note that in between the release of the lru lock and the
899		 * ticketlock, the bo may have switched resource,
900		 * and also memory type, since the resource may have been
901		 * freed and allocated again with a different memory type.
902		 * In that case, just skip it.
903		 */
904		if (!lret && bo->resource && bo->resource->mem_type == mem_type)
905			lret = walk->ops->process_bo(walk, bo);
906
907		ttm_lru_walk_unlock(bo, bo_needs_unlock);
908		ttm_bo_put(bo);
909		if (lret == -EBUSY || lret == -EALREADY)
910			lret = 0;
911		progress = (lret < 0) ? lret : progress + lret;
912
913		spin_lock(&bdev->lru_lock);
914		if (progress < 0 || progress >= target)
915			break;
916	}
917	ttm_resource_cursor_fini(&cursor);
918	spin_unlock(&bdev->lru_lock);
919
920	return progress;
921}