Loading...
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_bo_driver.h"
32#include "ttm/ttm_placement.h"
33#include <linux/io.h>
34#include <linux/highmem.h>
35#include <linux/wait.h>
36#include <linux/slab.h>
37#include <linux/vmalloc.h>
38#include <linux/module.h>
39
40void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41{
42 ttm_bo_mem_put(bo, &bo->mem);
43}
44
45int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 bool evict, bool no_wait_reserve,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48{
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
51 int ret;
52
53 if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 ttm_tt_unbind(ttm);
55 ttm_bo_free_old_node(bo);
56 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 TTM_PL_MASK_MEM);
58 old_mem->mem_type = TTM_PL_SYSTEM;
59 }
60
61 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 if (unlikely(ret != 0))
63 return ret;
64
65 if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 ret = ttm_tt_bind(ttm, new_mem);
67 if (unlikely(ret != 0))
68 return ret;
69 }
70
71 *old_mem = *new_mem;
72 new_mem->mm_node = NULL;
73
74 return 0;
75}
76EXPORT_SYMBOL(ttm_bo_move_ttm);
77
78int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79{
80 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88}
89
90void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91{
92 if (likely(man->io_reserve_fastpath))
93 return;
94
95 mutex_unlock(&man->io_reserve_mutex);
96}
97
98static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99{
100 struct ttm_buffer_object *bo;
101
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 return -EAGAIN;
104
105 bo = list_first_entry(&man->io_reserve_lru,
106 struct ttm_buffer_object,
107 io_reserve_lru);
108 list_del_init(&bo->io_reserve_lru);
109 ttm_bo_unmap_virtual_locked(bo);
110
111 return 0;
112}
113
114static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 struct ttm_mem_reg *mem)
116{
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0;
119
120 if (!bdev->driver->io_mem_reserve)
121 return 0;
122 if (likely(man->io_reserve_fastpath))
123 return bdev->driver->io_mem_reserve(bdev, mem);
124
125 if (bdev->driver->io_mem_reserve &&
126 mem->bus.io_reserved_count++ == 0) {
127retry:
128 ret = bdev->driver->io_mem_reserve(bdev, mem);
129 if (ret == -EAGAIN) {
130 ret = ttm_mem_io_evict(man);
131 if (ret == 0)
132 goto retry;
133 }
134 }
135 return ret;
136}
137
138static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem)
140{
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143 if (likely(man->io_reserve_fastpath))
144 return;
145
146 if (bdev->driver->io_mem_reserve &&
147 --mem->bus.io_reserved_count == 0 &&
148 bdev->driver->io_mem_free)
149 bdev->driver->io_mem_free(bdev, mem);
150
151}
152
153int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154{
155 struct ttm_mem_reg *mem = &bo->mem;
156 int ret;
157
158 if (!mem->bus.io_reserved_vm) {
159 struct ttm_mem_type_manager *man =
160 &bo->bdev->man[mem->mem_type];
161
162 ret = ttm_mem_io_reserve(bo->bdev, mem);
163 if (unlikely(ret != 0))
164 return ret;
165 mem->bus.io_reserved_vm = true;
166 if (man->use_io_reserve_lru)
167 list_add_tail(&bo->io_reserve_lru,
168 &man->io_reserve_lru);
169 }
170 return 0;
171}
172
173void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174{
175 struct ttm_mem_reg *mem = &bo->mem;
176
177 if (mem->bus.io_reserved_vm) {
178 mem->bus.io_reserved_vm = false;
179 list_del_init(&bo->io_reserve_lru);
180 ttm_mem_io_free(bo->bdev, mem);
181 }
182}
183
184int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185 void **virtual)
186{
187 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188 int ret;
189 void *addr;
190
191 *virtual = NULL;
192 (void) ttm_mem_io_lock(man, false);
193 ret = ttm_mem_io_reserve(bdev, mem);
194 ttm_mem_io_unlock(man);
195 if (ret || !mem->bus.is_iomem)
196 return ret;
197
198 if (mem->bus.addr) {
199 addr = mem->bus.addr;
200 } else {
201 if (mem->placement & TTM_PL_FLAG_WC)
202 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
203 else
204 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205 if (!addr) {
206 (void) ttm_mem_io_lock(man, false);
207 ttm_mem_io_free(bdev, mem);
208 ttm_mem_io_unlock(man);
209 return -ENOMEM;
210 }
211 }
212 *virtual = addr;
213 return 0;
214}
215
216void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217 void *virtual)
218{
219 struct ttm_mem_type_manager *man;
220
221 man = &bdev->man[mem->mem_type];
222
223 if (virtual && mem->bus.addr == NULL)
224 iounmap(virtual);
225 (void) ttm_mem_io_lock(man, false);
226 ttm_mem_io_free(bdev, mem);
227 ttm_mem_io_unlock(man);
228}
229
230static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231{
232 uint32_t *dstP =
233 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234 uint32_t *srcP =
235 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236
237 int i;
238 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239 iowrite32(ioread32(srcP++), dstP++);
240 return 0;
241}
242
243static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244 unsigned long page,
245 pgprot_t prot)
246{
247 struct page *d = ttm_tt_get_page(ttm, page);
248 void *dst;
249
250 if (!d)
251 return -ENOMEM;
252
253 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
254
255#ifdef CONFIG_X86
256 dst = kmap_atomic_prot(d, prot);
257#else
258 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
259 dst = vmap(&d, 1, 0, prot);
260 else
261 dst = kmap(d);
262#endif
263 if (!dst)
264 return -ENOMEM;
265
266 memcpy_fromio(dst, src, PAGE_SIZE);
267
268#ifdef CONFIG_X86
269 kunmap_atomic(dst);
270#else
271 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
272 vunmap(dst);
273 else
274 kunmap(d);
275#endif
276
277 return 0;
278}
279
280static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281 unsigned long page,
282 pgprot_t prot)
283{
284 struct page *s = ttm_tt_get_page(ttm, page);
285 void *src;
286
287 if (!s)
288 return -ENOMEM;
289
290 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
291#ifdef CONFIG_X86
292 src = kmap_atomic_prot(s, prot);
293#else
294 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
295 src = vmap(&s, 1, 0, prot);
296 else
297 src = kmap(s);
298#endif
299 if (!src)
300 return -ENOMEM;
301
302 memcpy_toio(dst, src, PAGE_SIZE);
303
304#ifdef CONFIG_X86
305 kunmap_atomic(src);
306#else
307 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
308 vunmap(src);
309 else
310 kunmap(s);
311#endif
312
313 return 0;
314}
315
316int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317 bool evict, bool no_wait_reserve, bool no_wait_gpu,
318 struct ttm_mem_reg *new_mem)
319{
320 struct ttm_bo_device *bdev = bo->bdev;
321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322 struct ttm_tt *ttm = bo->ttm;
323 struct ttm_mem_reg *old_mem = &bo->mem;
324 struct ttm_mem_reg old_copy = *old_mem;
325 void *old_iomap;
326 void *new_iomap;
327 int ret;
328 unsigned long i;
329 unsigned long page;
330 unsigned long add = 0;
331 int dir;
332
333 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334 if (ret)
335 return ret;
336 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337 if (ret)
338 goto out;
339
340 if (old_iomap == NULL && new_iomap == NULL)
341 goto out2;
342 if (old_iomap == NULL && ttm == NULL)
343 goto out2;
344
345 add = 0;
346 dir = 1;
347
348 if ((old_mem->mem_type == new_mem->mem_type) &&
349 (new_mem->start < old_mem->start + old_mem->size)) {
350 dir = -1;
351 add = new_mem->num_pages - 1;
352 }
353
354 for (i = 0; i < new_mem->num_pages; ++i) {
355 page = i * dir + add;
356 if (old_iomap == NULL) {
357 pgprot_t prot = ttm_io_prot(old_mem->placement,
358 PAGE_KERNEL);
359 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
360 prot);
361 } else if (new_iomap == NULL) {
362 pgprot_t prot = ttm_io_prot(new_mem->placement,
363 PAGE_KERNEL);
364 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
365 prot);
366 } else
367 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
368 if (ret)
369 goto out1;
370 }
371 mb();
372out2:
373 old_copy = *old_mem;
374 *old_mem = *new_mem;
375 new_mem->mm_node = NULL;
376
377 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
378 ttm_tt_unbind(ttm);
379 ttm_tt_destroy(ttm);
380 bo->ttm = NULL;
381 }
382
383out1:
384 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
385out:
386 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
387 ttm_bo_mem_put(bo, &old_copy);
388 return ret;
389}
390EXPORT_SYMBOL(ttm_bo_move_memcpy);
391
392static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
393{
394 kfree(bo);
395}
396
397/**
398 * ttm_buffer_object_transfer
399 *
400 * @bo: A pointer to a struct ttm_buffer_object.
401 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
402 * holding the data of @bo with the old placement.
403 *
404 * This is a utility function that may be called after an accelerated move
405 * has been scheduled. A new buffer object is created as a placeholder for
406 * the old data while it's being copied. When that buffer object is idle,
407 * it can be destroyed, releasing the space of the old placement.
408 * Returns:
409 * !0: Failure.
410 */
411
412static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
413 struct ttm_buffer_object **new_obj)
414{
415 struct ttm_buffer_object *fbo;
416 struct ttm_bo_device *bdev = bo->bdev;
417 struct ttm_bo_driver *driver = bdev->driver;
418
419 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
420 if (!fbo)
421 return -ENOMEM;
422
423 *fbo = *bo;
424
425 /**
426 * Fix up members that we shouldn't copy directly:
427 * TODO: Explicit member copy would probably be better here.
428 */
429
430 init_waitqueue_head(&fbo->event_queue);
431 INIT_LIST_HEAD(&fbo->ddestroy);
432 INIT_LIST_HEAD(&fbo->lru);
433 INIT_LIST_HEAD(&fbo->swap);
434 INIT_LIST_HEAD(&fbo->io_reserve_lru);
435 fbo->vm_node = NULL;
436 atomic_set(&fbo->cpu_writers, 0);
437
438 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
439 kref_init(&fbo->list_kref);
440 kref_init(&fbo->kref);
441 fbo->destroy = &ttm_transfered_destroy;
442
443 *new_obj = fbo;
444 return 0;
445}
446
447pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
448{
449#if defined(__i386__) || defined(__x86_64__)
450 if (caching_flags & TTM_PL_FLAG_WC)
451 tmp = pgprot_writecombine(tmp);
452 else if (boot_cpu_data.x86 > 3)
453 tmp = pgprot_noncached(tmp);
454
455#elif defined(__powerpc__)
456 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
457 pgprot_val(tmp) |= _PAGE_NO_CACHE;
458 if (caching_flags & TTM_PL_FLAG_UNCACHED)
459 pgprot_val(tmp) |= _PAGE_GUARDED;
460 }
461#endif
462#if defined(__ia64__)
463 if (caching_flags & TTM_PL_FLAG_WC)
464 tmp = pgprot_writecombine(tmp);
465 else
466 tmp = pgprot_noncached(tmp);
467#endif
468#if defined(__sparc__)
469 if (!(caching_flags & TTM_PL_FLAG_CACHED))
470 tmp = pgprot_noncached(tmp);
471#endif
472 return tmp;
473}
474EXPORT_SYMBOL(ttm_io_prot);
475
476static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
477 unsigned long offset,
478 unsigned long size,
479 struct ttm_bo_kmap_obj *map)
480{
481 struct ttm_mem_reg *mem = &bo->mem;
482
483 if (bo->mem.bus.addr) {
484 map->bo_kmap_type = ttm_bo_map_premapped;
485 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
486 } else {
487 map->bo_kmap_type = ttm_bo_map_iomap;
488 if (mem->placement & TTM_PL_FLAG_WC)
489 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
490 size);
491 else
492 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
493 size);
494 }
495 return (!map->virtual) ? -ENOMEM : 0;
496}
497
498static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
499 unsigned long start_page,
500 unsigned long num_pages,
501 struct ttm_bo_kmap_obj *map)
502{
503 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
504 struct ttm_tt *ttm = bo->ttm;
505 struct page *d;
506 int i;
507
508 BUG_ON(!ttm);
509 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
510 /*
511 * We're mapping a single page, and the desired
512 * page protection is consistent with the bo.
513 */
514
515 map->bo_kmap_type = ttm_bo_map_kmap;
516 map->page = ttm_tt_get_page(ttm, start_page);
517 map->virtual = kmap(map->page);
518 } else {
519 /*
520 * Populate the part we're mapping;
521 */
522 for (i = start_page; i < start_page + num_pages; ++i) {
523 d = ttm_tt_get_page(ttm, i);
524 if (!d)
525 return -ENOMEM;
526 }
527
528 /*
529 * We need to use vmap to get the desired page protection
530 * or to make the buffer object look contiguous.
531 */
532 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
533 PAGE_KERNEL :
534 ttm_io_prot(mem->placement, PAGE_KERNEL);
535 map->bo_kmap_type = ttm_bo_map_vmap;
536 map->virtual = vmap(ttm->pages + start_page, num_pages,
537 0, prot);
538 }
539 return (!map->virtual) ? -ENOMEM : 0;
540}
541
542int ttm_bo_kmap(struct ttm_buffer_object *bo,
543 unsigned long start_page, unsigned long num_pages,
544 struct ttm_bo_kmap_obj *map)
545{
546 struct ttm_mem_type_manager *man =
547 &bo->bdev->man[bo->mem.mem_type];
548 unsigned long offset, size;
549 int ret;
550
551 BUG_ON(!list_empty(&bo->swap));
552 map->virtual = NULL;
553 map->bo = bo;
554 if (num_pages > bo->num_pages)
555 return -EINVAL;
556 if (start_page > bo->num_pages)
557 return -EINVAL;
558#if 0
559 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
560 return -EPERM;
561#endif
562 (void) ttm_mem_io_lock(man, false);
563 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
564 ttm_mem_io_unlock(man);
565 if (ret)
566 return ret;
567 if (!bo->mem.bus.is_iomem) {
568 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
569 } else {
570 offset = start_page << PAGE_SHIFT;
571 size = num_pages << PAGE_SHIFT;
572 return ttm_bo_ioremap(bo, offset, size, map);
573 }
574}
575EXPORT_SYMBOL(ttm_bo_kmap);
576
577void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
578{
579 struct ttm_buffer_object *bo = map->bo;
580 struct ttm_mem_type_manager *man =
581 &bo->bdev->man[bo->mem.mem_type];
582
583 if (!map->virtual)
584 return;
585 switch (map->bo_kmap_type) {
586 case ttm_bo_map_iomap:
587 iounmap(map->virtual);
588 break;
589 case ttm_bo_map_vmap:
590 vunmap(map->virtual);
591 break;
592 case ttm_bo_map_kmap:
593 kunmap(map->page);
594 break;
595 case ttm_bo_map_premapped:
596 break;
597 default:
598 BUG();
599 }
600 (void) ttm_mem_io_lock(man, false);
601 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
602 ttm_mem_io_unlock(man);
603 map->virtual = NULL;
604 map->page = NULL;
605}
606EXPORT_SYMBOL(ttm_bo_kunmap);
607
608int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
609 void *sync_obj,
610 void *sync_obj_arg,
611 bool evict, bool no_wait_reserve,
612 bool no_wait_gpu,
613 struct ttm_mem_reg *new_mem)
614{
615 struct ttm_bo_device *bdev = bo->bdev;
616 struct ttm_bo_driver *driver = bdev->driver;
617 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
618 struct ttm_mem_reg *old_mem = &bo->mem;
619 int ret;
620 struct ttm_buffer_object *ghost_obj;
621 void *tmp_obj = NULL;
622
623 spin_lock(&bdev->fence_lock);
624 if (bo->sync_obj) {
625 tmp_obj = bo->sync_obj;
626 bo->sync_obj = NULL;
627 }
628 bo->sync_obj = driver->sync_obj_ref(sync_obj);
629 bo->sync_obj_arg = sync_obj_arg;
630 if (evict) {
631 ret = ttm_bo_wait(bo, false, false, false);
632 spin_unlock(&bdev->fence_lock);
633 if (tmp_obj)
634 driver->sync_obj_unref(&tmp_obj);
635 if (ret)
636 return ret;
637
638 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
639 (bo->ttm != NULL)) {
640 ttm_tt_unbind(bo->ttm);
641 ttm_tt_destroy(bo->ttm);
642 bo->ttm = NULL;
643 }
644 ttm_bo_free_old_node(bo);
645 } else {
646 /**
647 * This should help pipeline ordinary buffer moves.
648 *
649 * Hang old buffer memory on a new buffer object,
650 * and leave it to be released when the GPU
651 * operation has completed.
652 */
653
654 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
655 spin_unlock(&bdev->fence_lock);
656 if (tmp_obj)
657 driver->sync_obj_unref(&tmp_obj);
658
659 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
660 if (ret)
661 return ret;
662
663 /**
664 * If we're not moving to fixed memory, the TTM object
665 * needs to stay alive. Otherwhise hang it on the ghost
666 * bo to be unbound and destroyed.
667 */
668
669 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
670 ghost_obj->ttm = NULL;
671 else
672 bo->ttm = NULL;
673
674 ttm_bo_unreserve(ghost_obj);
675 ttm_bo_unref(&ghost_obj);
676 }
677
678 *old_mem = *new_mem;
679 new_mem->mm_node = NULL;
680
681 return 0;
682}
683EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
1/* SPDX-License-Identifier: GPL-2.0 OR MIT */
2/**************************************************************************
3 *
4 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28/*
29 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
30 */
31
32#include <drm/ttm/ttm_bo_driver.h>
33#include <drm/ttm/ttm_placement.h>
34#include <drm/drm_cache.h>
35#include <drm/drm_vma_manager.h>
36#include <linux/dma-buf-map.h>
37#include <linux/io.h>
38#include <linux/highmem.h>
39#include <linux/wait.h>
40#include <linux/slab.h>
41#include <linux/vmalloc.h>
42#include <linux/module.h>
43#include <linux/dma-resv.h>
44
45struct ttm_transfer_obj {
46 struct ttm_buffer_object base;
47 struct ttm_buffer_object *bo;
48};
49
50int ttm_mem_io_reserve(struct ttm_device *bdev,
51 struct ttm_resource *mem)
52{
53 if (mem->bus.offset || mem->bus.addr)
54 return 0;
55
56 mem->bus.is_iomem = false;
57 if (!bdev->funcs->io_mem_reserve)
58 return 0;
59
60 return bdev->funcs->io_mem_reserve(bdev, mem);
61}
62
63void ttm_mem_io_free(struct ttm_device *bdev,
64 struct ttm_resource *mem)
65{
66 if (!mem)
67 return;
68
69 if (!mem->bus.offset && !mem->bus.addr)
70 return;
71
72 if (bdev->funcs->io_mem_free)
73 bdev->funcs->io_mem_free(bdev, mem);
74
75 mem->bus.offset = 0;
76 mem->bus.addr = NULL;
77}
78
79/**
80 * ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
81 * @bo: The struct ttm_buffer_object.
82 * @new_mem: The struct ttm_resource we're moving to (copy destination).
83 * @new_iter: A struct ttm_kmap_iter representing the destination resource.
84 * @src_iter: A struct ttm_kmap_iter representing the source resource.
85 *
86 * This function is intended to be able to move out async under a
87 * dma-fence if desired.
88 */
89void ttm_move_memcpy(struct ttm_buffer_object *bo,
90 u32 num_pages,
91 struct ttm_kmap_iter *dst_iter,
92 struct ttm_kmap_iter *src_iter)
93{
94 const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
95 const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
96 struct ttm_tt *ttm = bo->ttm;
97 struct dma_buf_map src_map, dst_map;
98 pgoff_t i;
99
100 /* Single TTM move. NOP */
101 if (dst_ops->maps_tt && src_ops->maps_tt)
102 return;
103
104 /* Don't move nonexistent data. Clear destination instead. */
105 if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
106 if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
107 return;
108
109 for (i = 0; i < num_pages; ++i) {
110 dst_ops->map_local(dst_iter, &dst_map, i);
111 if (dst_map.is_iomem)
112 memset_io(dst_map.vaddr_iomem, 0, PAGE_SIZE);
113 else
114 memset(dst_map.vaddr, 0, PAGE_SIZE);
115 if (dst_ops->unmap_local)
116 dst_ops->unmap_local(dst_iter, &dst_map);
117 }
118 return;
119 }
120
121 for (i = 0; i < num_pages; ++i) {
122 dst_ops->map_local(dst_iter, &dst_map, i);
123 src_ops->map_local(src_iter, &src_map, i);
124
125 drm_memcpy_from_wc(&dst_map, &src_map, PAGE_SIZE);
126
127 if (src_ops->unmap_local)
128 src_ops->unmap_local(src_iter, &src_map);
129 if (dst_ops->unmap_local)
130 dst_ops->unmap_local(dst_iter, &dst_map);
131 }
132}
133EXPORT_SYMBOL(ttm_move_memcpy);
134
135int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
136 struct ttm_operation_ctx *ctx,
137 struct ttm_resource *dst_mem)
138{
139 struct ttm_device *bdev = bo->bdev;
140 struct ttm_resource_manager *dst_man =
141 ttm_manager_type(bo->bdev, dst_mem->mem_type);
142 struct ttm_tt *ttm = bo->ttm;
143 struct ttm_resource *src_mem = bo->resource;
144 struct ttm_resource_manager *src_man =
145 ttm_manager_type(bdev, src_mem->mem_type);
146 union {
147 struct ttm_kmap_iter_tt tt;
148 struct ttm_kmap_iter_linear_io io;
149 } _dst_iter, _src_iter;
150 struct ttm_kmap_iter *dst_iter, *src_iter;
151 int ret = 0;
152
153 if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
154 dst_man->use_tt)) {
155 ret = ttm_tt_populate(bdev, ttm, ctx);
156 if (ret)
157 return ret;
158 }
159
160 dst_iter = ttm_kmap_iter_linear_io_init(&_dst_iter.io, bdev, dst_mem);
161 if (PTR_ERR(dst_iter) == -EINVAL && dst_man->use_tt)
162 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm);
163 if (IS_ERR(dst_iter))
164 return PTR_ERR(dst_iter);
165
166 src_iter = ttm_kmap_iter_linear_io_init(&_src_iter.io, bdev, src_mem);
167 if (PTR_ERR(src_iter) == -EINVAL && src_man->use_tt)
168 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm);
169 if (IS_ERR(src_iter)) {
170 ret = PTR_ERR(src_iter);
171 goto out_src_iter;
172 }
173
174 ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
175
176 if (!src_iter->ops->maps_tt)
177 ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
178 ttm_bo_move_sync_cleanup(bo, dst_mem);
179
180out_src_iter:
181 if (!dst_iter->ops->maps_tt)
182 ttm_kmap_iter_linear_io_fini(&_dst_iter.io, bdev, dst_mem);
183
184 return ret;
185}
186EXPORT_SYMBOL(ttm_bo_move_memcpy);
187
188static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
189{
190 struct ttm_transfer_obj *fbo;
191
192 fbo = container_of(bo, struct ttm_transfer_obj, base);
193 ttm_bo_put(fbo->bo);
194 kfree(fbo);
195}
196
197/**
198 * ttm_buffer_object_transfer
199 *
200 * @bo: A pointer to a struct ttm_buffer_object.
201 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
202 * holding the data of @bo with the old placement.
203 *
204 * This is a utility function that may be called after an accelerated move
205 * has been scheduled. A new buffer object is created as a placeholder for
206 * the old data while it's being copied. When that buffer object is idle,
207 * it can be destroyed, releasing the space of the old placement.
208 * Returns:
209 * !0: Failure.
210 */
211
212static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
213 struct ttm_buffer_object **new_obj)
214{
215 struct ttm_transfer_obj *fbo;
216 int ret;
217
218 fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
219 if (!fbo)
220 return -ENOMEM;
221
222 fbo->base = *bo;
223
224 ttm_bo_get(bo);
225 fbo->bo = bo;
226
227 /**
228 * Fix up members that we shouldn't copy directly:
229 * TODO: Explicit member copy would probably be better here.
230 */
231
232 atomic_inc(&ttm_glob.bo_count);
233 INIT_LIST_HEAD(&fbo->base.ddestroy);
234 INIT_LIST_HEAD(&fbo->base.lru);
235 fbo->base.moving = NULL;
236 drm_vma_node_reset(&fbo->base.base.vma_node);
237
238 kref_init(&fbo->base.kref);
239 fbo->base.destroy = &ttm_transfered_destroy;
240 fbo->base.pin_count = 0;
241 if (bo->type != ttm_bo_type_sg)
242 fbo->base.base.resv = &fbo->base.base._resv;
243
244 dma_resv_init(&fbo->base.base._resv);
245 fbo->base.base.dev = NULL;
246 ret = dma_resv_trylock(&fbo->base.base._resv);
247 WARN_ON(!ret);
248
249 ttm_bo_move_to_lru_tail_unlocked(&fbo->base);
250
251 *new_obj = &fbo->base;
252 return 0;
253}
254
255pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
256 pgprot_t tmp)
257{
258 struct ttm_resource_manager *man;
259 enum ttm_caching caching;
260
261 man = ttm_manager_type(bo->bdev, res->mem_type);
262 caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
263
264 return ttm_prot_from_caching(caching, tmp);
265}
266EXPORT_SYMBOL(ttm_io_prot);
267
268static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
269 unsigned long offset,
270 unsigned long size,
271 struct ttm_bo_kmap_obj *map)
272{
273 struct ttm_resource *mem = bo->resource;
274
275 if (bo->resource->bus.addr) {
276 map->bo_kmap_type = ttm_bo_map_premapped;
277 map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
278 } else {
279 resource_size_t res = bo->resource->bus.offset + offset;
280
281 map->bo_kmap_type = ttm_bo_map_iomap;
282 if (mem->bus.caching == ttm_write_combined)
283 map->virtual = ioremap_wc(res, size);
284#ifdef CONFIG_X86
285 else if (mem->bus.caching == ttm_cached)
286 map->virtual = ioremap_cache(res, size);
287#endif
288 else
289 map->virtual = ioremap(res, size);
290 }
291 return (!map->virtual) ? -ENOMEM : 0;
292}
293
294static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
295 unsigned long start_page,
296 unsigned long num_pages,
297 struct ttm_bo_kmap_obj *map)
298{
299 struct ttm_resource *mem = bo->resource;
300 struct ttm_operation_ctx ctx = {
301 .interruptible = false,
302 .no_wait_gpu = false
303 };
304 struct ttm_tt *ttm = bo->ttm;
305 pgprot_t prot;
306 int ret;
307
308 BUG_ON(!ttm);
309
310 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
311 if (ret)
312 return ret;
313
314 if (num_pages == 1 && ttm->caching == ttm_cached) {
315 /*
316 * We're mapping a single page, and the desired
317 * page protection is consistent with the bo.
318 */
319
320 map->bo_kmap_type = ttm_bo_map_kmap;
321 map->page = ttm->pages[start_page];
322 map->virtual = kmap(map->page);
323 } else {
324 /*
325 * We need to use vmap to get the desired page protection
326 * or to make the buffer object look contiguous.
327 */
328 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
329 map->bo_kmap_type = ttm_bo_map_vmap;
330 map->virtual = vmap(ttm->pages + start_page, num_pages,
331 0, prot);
332 }
333 return (!map->virtual) ? -ENOMEM : 0;
334}
335
336int ttm_bo_kmap(struct ttm_buffer_object *bo,
337 unsigned long start_page, unsigned long num_pages,
338 struct ttm_bo_kmap_obj *map)
339{
340 unsigned long offset, size;
341 int ret;
342
343 map->virtual = NULL;
344 map->bo = bo;
345 if (num_pages > bo->resource->num_pages)
346 return -EINVAL;
347 if ((start_page + num_pages) > bo->resource->num_pages)
348 return -EINVAL;
349
350 ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
351 if (ret)
352 return ret;
353 if (!bo->resource->bus.is_iomem) {
354 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
355 } else {
356 offset = start_page << PAGE_SHIFT;
357 size = num_pages << PAGE_SHIFT;
358 return ttm_bo_ioremap(bo, offset, size, map);
359 }
360}
361EXPORT_SYMBOL(ttm_bo_kmap);
362
363void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
364{
365 if (!map->virtual)
366 return;
367 switch (map->bo_kmap_type) {
368 case ttm_bo_map_iomap:
369 iounmap(map->virtual);
370 break;
371 case ttm_bo_map_vmap:
372 vunmap(map->virtual);
373 break;
374 case ttm_bo_map_kmap:
375 kunmap(map->page);
376 break;
377 case ttm_bo_map_premapped:
378 break;
379 default:
380 BUG();
381 }
382 ttm_mem_io_free(map->bo->bdev, map->bo->resource);
383 map->virtual = NULL;
384 map->page = NULL;
385}
386EXPORT_SYMBOL(ttm_bo_kunmap);
387
388int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
389{
390 struct ttm_resource *mem = bo->resource;
391 int ret;
392
393 ret = ttm_mem_io_reserve(bo->bdev, mem);
394 if (ret)
395 return ret;
396
397 if (mem->bus.is_iomem) {
398 void __iomem *vaddr_iomem;
399
400 if (mem->bus.addr)
401 vaddr_iomem = (void __iomem *)mem->bus.addr;
402 else if (mem->bus.caching == ttm_write_combined)
403 vaddr_iomem = ioremap_wc(mem->bus.offset,
404 bo->base.size);
405#ifdef CONFIG_X86
406 else if (mem->bus.caching == ttm_cached)
407 vaddr_iomem = ioremap_cache(mem->bus.offset,
408 bo->base.size);
409#endif
410 else
411 vaddr_iomem = ioremap(mem->bus.offset, bo->base.size);
412
413 if (!vaddr_iomem)
414 return -ENOMEM;
415
416 dma_buf_map_set_vaddr_iomem(map, vaddr_iomem);
417
418 } else {
419 struct ttm_operation_ctx ctx = {
420 .interruptible = false,
421 .no_wait_gpu = false
422 };
423 struct ttm_tt *ttm = bo->ttm;
424 pgprot_t prot;
425 void *vaddr;
426
427 ret = ttm_tt_populate(bo->bdev, ttm, &ctx);
428 if (ret)
429 return ret;
430
431 /*
432 * We need to use vmap to get the desired page protection
433 * or to make the buffer object look contiguous.
434 */
435 prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
436 vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot);
437 if (!vaddr)
438 return -ENOMEM;
439
440 dma_buf_map_set_vaddr(map, vaddr);
441 }
442
443 return 0;
444}
445EXPORT_SYMBOL(ttm_bo_vmap);
446
447void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
448{
449 struct ttm_resource *mem = bo->resource;
450
451 if (dma_buf_map_is_null(map))
452 return;
453
454 if (!map->is_iomem)
455 vunmap(map->vaddr);
456 else if (!mem->bus.addr)
457 iounmap(map->vaddr_iomem);
458 dma_buf_map_clear(map);
459
460 ttm_mem_io_free(bo->bdev, bo->resource);
461}
462EXPORT_SYMBOL(ttm_bo_vunmap);
463
464static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
465 bool dst_use_tt)
466{
467 int ret;
468 ret = ttm_bo_wait(bo, false, false);
469 if (ret)
470 return ret;
471
472 if (!dst_use_tt)
473 ttm_bo_tt_destroy(bo);
474 ttm_resource_free(bo, &bo->resource);
475 return 0;
476}
477
478static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
479 struct dma_fence *fence,
480 bool dst_use_tt)
481{
482 struct ttm_buffer_object *ghost_obj;
483 int ret;
484
485 /**
486 * This should help pipeline ordinary buffer moves.
487 *
488 * Hang old buffer memory on a new buffer object,
489 * and leave it to be released when the GPU
490 * operation has completed.
491 */
492
493 dma_fence_put(bo->moving);
494 bo->moving = dma_fence_get(fence);
495
496 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
497 if (ret)
498 return ret;
499
500 dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
501
502 /**
503 * If we're not moving to fixed memory, the TTM object
504 * needs to stay alive. Otherwhise hang it on the ghost
505 * bo to be unbound and destroyed.
506 */
507
508 if (dst_use_tt)
509 ghost_obj->ttm = NULL;
510 else
511 bo->ttm = NULL;
512 bo->resource = NULL;
513
514 dma_resv_unlock(&ghost_obj->base._resv);
515 ttm_bo_put(ghost_obj);
516 return 0;
517}
518
519static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
520 struct dma_fence *fence)
521{
522 struct ttm_device *bdev = bo->bdev;
523 struct ttm_resource_manager *from;
524
525 from = ttm_manager_type(bdev, bo->resource->mem_type);
526
527 /**
528 * BO doesn't have a TTM we need to bind/unbind. Just remember
529 * this eviction and free up the allocation
530 */
531 spin_lock(&from->move_lock);
532 if (!from->move || dma_fence_is_later(fence, from->move)) {
533 dma_fence_put(from->move);
534 from->move = dma_fence_get(fence);
535 }
536 spin_unlock(&from->move_lock);
537
538 ttm_resource_free(bo, &bo->resource);
539
540 dma_fence_put(bo->moving);
541 bo->moving = dma_fence_get(fence);
542}
543
544int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
545 struct dma_fence *fence,
546 bool evict,
547 bool pipeline,
548 struct ttm_resource *new_mem)
549{
550 struct ttm_device *bdev = bo->bdev;
551 struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
552 struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
553 int ret = 0;
554
555 dma_resv_add_excl_fence(bo->base.resv, fence);
556 if (!evict)
557 ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
558 else if (!from->use_tt && pipeline)
559 ttm_bo_move_pipeline_evict(bo, fence);
560 else
561 ret = ttm_bo_wait_free_node(bo, man->use_tt);
562
563 if (ret)
564 return ret;
565
566 ttm_bo_assign_mem(bo, new_mem);
567
568 return 0;
569}
570EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
571
572/**
573 * ttm_bo_pipeline_gutting - purge the contents of a bo
574 * @bo: The buffer object
575 *
576 * Purge the contents of a bo, async if the bo is not idle.
577 * After a successful call, the bo is left unpopulated in
578 * system placement. The function may wait uninterruptible
579 * for idle on OOM.
580 *
581 * Return: 0 if successful, negative error code on failure.
582 */
583int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
584{
585 static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
586 struct ttm_buffer_object *ghost;
587 struct ttm_resource *sys_res;
588 struct ttm_tt *ttm;
589 int ret;
590
591 ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
592 if (ret)
593 return ret;
594
595 /* If already idle, no need for ghost object dance. */
596 ret = ttm_bo_wait(bo, false, true);
597 if (ret != -EBUSY) {
598 if (!bo->ttm) {
599 /* See comment below about clearing. */
600 ret = ttm_tt_create(bo, true);
601 if (ret)
602 goto error_free_sys_mem;
603 } else {
604 ttm_tt_unpopulate(bo->bdev, bo->ttm);
605 if (bo->type == ttm_bo_type_device)
606 ttm_tt_mark_for_clear(bo->ttm);
607 }
608 ttm_resource_free(bo, &bo->resource);
609 ttm_bo_assign_mem(bo, sys_res);
610 return 0;
611 }
612
613 /*
614 * We need an unpopulated ttm_tt after giving our current one,
615 * if any, to the ghost object. And we can't afford to fail
616 * creating one *after* the operation. If the bo subsequently gets
617 * resurrected, make sure it's cleared (if ttm_bo_type_device)
618 * to avoid leaking sensitive information to user-space.
619 */
620
621 ttm = bo->ttm;
622 bo->ttm = NULL;
623 ret = ttm_tt_create(bo, true);
624 swap(bo->ttm, ttm);
625 if (ret)
626 goto error_free_sys_mem;
627
628 ret = ttm_buffer_object_transfer(bo, &ghost);
629 if (ret)
630 goto error_destroy_tt;
631
632 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
633 /* Last resort, wait for the BO to be idle when we are OOM */
634 if (ret)
635 ttm_bo_wait(bo, false, false);
636
637 dma_resv_unlock(&ghost->base._resv);
638 ttm_bo_put(ghost);
639 bo->ttm = ttm;
640 bo->resource = NULL;
641 ttm_bo_assign_mem(bo, sys_res);
642 return 0;
643
644error_destroy_tt:
645 ttm_tt_destroy(bo->bdev, ttm);
646
647error_free_sys_mem:
648 ttm_resource_free(bo, &sys_res);
649 return ret;
650}