Loading...
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include <linux/dma-mapping.h>
31#include <linux/swiotlb.h>
32
33#include "nouveau_drm.h"
34#include "nouveau_dma.h"
35#include "nouveau_fence.h"
36
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
40
41/*
42 * NV10-NV40 tiling helpers
43 */
44
45static void
46nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47 u32 addr, u32 size, u32 pitch, u32 flags)
48{
49 struct nouveau_drm *drm = nouveau_drm(dev);
50 int i = reg - drm->tile.reg;
51 struct nvkm_device *device = nvxx_device(&drm->device);
52 struct nvkm_fb *fb = device->fb;
53 struct nvkm_fb_tile *tile = &fb->tile.region[i];
54
55 nouveau_fence_unref(®->fence);
56
57 if (tile->pitch)
58 nvkm_fb_tile_fini(fb, i, tile);
59
60 if (pitch)
61 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
62
63 nvkm_fb_tile_prog(fb, i, tile);
64}
65
66static struct nouveau_drm_tile *
67nv10_bo_get_tile_region(struct drm_device *dev, int i)
68{
69 struct nouveau_drm *drm = nouveau_drm(dev);
70 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
71
72 spin_lock(&drm->tile.lock);
73
74 if (!tile->used &&
75 (!tile->fence || nouveau_fence_done(tile->fence)))
76 tile->used = true;
77 else
78 tile = NULL;
79
80 spin_unlock(&drm->tile.lock);
81 return tile;
82}
83
84static void
85nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
86 struct fence *fence)
87{
88 struct nouveau_drm *drm = nouveau_drm(dev);
89
90 if (tile) {
91 spin_lock(&drm->tile.lock);
92 tile->fence = (struct nouveau_fence *)fence_get(fence);
93 tile->used = false;
94 spin_unlock(&drm->tile.lock);
95 }
96}
97
98static struct nouveau_drm_tile *
99nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
100 u32 size, u32 pitch, u32 flags)
101{
102 struct nouveau_drm *drm = nouveau_drm(dev);
103 struct nvkm_fb *fb = nvxx_fb(&drm->device);
104 struct nouveau_drm_tile *tile, *found = NULL;
105 int i;
106
107 for (i = 0; i < fb->tile.regions; i++) {
108 tile = nv10_bo_get_tile_region(dev, i);
109
110 if (pitch && !found) {
111 found = tile;
112 continue;
113
114 } else if (tile && fb->tile.region[i].pitch) {
115 /* Kill an unused tile region. */
116 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
117 }
118
119 nv10_bo_put_tile_region(dev, tile, NULL);
120 }
121
122 if (found)
123 nv10_bo_update_tile_region(dev, found, addr, size,
124 pitch, flags);
125 return found;
126}
127
128static void
129nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
130{
131 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
132 struct drm_device *dev = drm->dev;
133 struct nouveau_bo *nvbo = nouveau_bo(bo);
134
135 if (unlikely(nvbo->gem.filp))
136 DRM_ERROR("bo %p still attached to GEM object\n", bo);
137 WARN_ON(nvbo->pin_refcnt > 0);
138 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
139 kfree(nvbo);
140}
141
142static void
143nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
144 int *align, int *size)
145{
146 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
147 struct nvif_device *device = &drm->device;
148
149 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
150 if (nvbo->tile_mode) {
151 if (device->info.chipset >= 0x40) {
152 *align = 65536;
153 *size = roundup(*size, 64 * nvbo->tile_mode);
154
155 } else if (device->info.chipset >= 0x30) {
156 *align = 32768;
157 *size = roundup(*size, 64 * nvbo->tile_mode);
158
159 } else if (device->info.chipset >= 0x20) {
160 *align = 16384;
161 *size = roundup(*size, 64 * nvbo->tile_mode);
162
163 } else if (device->info.chipset >= 0x10) {
164 *align = 16384;
165 *size = roundup(*size, 32 * nvbo->tile_mode);
166 }
167 }
168 } else {
169 *size = roundup(*size, (1 << nvbo->page_shift));
170 *align = max((1 << nvbo->page_shift), *align);
171 }
172
173 *size = roundup(*size, PAGE_SIZE);
174}
175
176int
177nouveau_bo_new(struct drm_device *dev, int size, int align,
178 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
179 struct sg_table *sg, struct reservation_object *robj,
180 struct nouveau_bo **pnvbo)
181{
182 struct nouveau_drm *drm = nouveau_drm(dev);
183 struct nouveau_bo *nvbo;
184 size_t acc_size;
185 int ret;
186 int type = ttm_bo_type_device;
187 int lpg_shift = 12;
188 int max_size;
189
190 if (drm->client.vm)
191 lpg_shift = drm->client.vm->mmu->lpg_shift;
192 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
193
194 if (size <= 0 || size > max_size) {
195 NV_WARN(drm, "skipped size %x\n", (u32)size);
196 return -EINVAL;
197 }
198
199 if (sg)
200 type = ttm_bo_type_sg;
201
202 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
203 if (!nvbo)
204 return -ENOMEM;
205 INIT_LIST_HEAD(&nvbo->head);
206 INIT_LIST_HEAD(&nvbo->entry);
207 INIT_LIST_HEAD(&nvbo->vma_list);
208 nvbo->tile_mode = tile_mode;
209 nvbo->tile_flags = tile_flags;
210 nvbo->bo.bdev = &drm->ttm.bdev;
211
212 if (!nvxx_device(&drm->device)->func->cpu_coherent)
213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
214
215 nvbo->page_shift = 12;
216 if (drm->client.vm) {
217 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
218 nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
219 }
220
221 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
222 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
223 nouveau_bo_placement_set(nvbo, flags, 0);
224
225 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
226 sizeof(struct nouveau_bo));
227
228 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
229 type, &nvbo->placement,
230 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
231 robj, nouveau_bo_del_ttm);
232 if (ret) {
233 /* ttm will call nouveau_bo_del_ttm if it fails.. */
234 return ret;
235 }
236
237 *pnvbo = nvbo;
238 return 0;
239}
240
241static void
242set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
243{
244 *n = 0;
245
246 if (type & TTM_PL_FLAG_VRAM)
247 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
248 if (type & TTM_PL_FLAG_TT)
249 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
250 if (type & TTM_PL_FLAG_SYSTEM)
251 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
252}
253
254static void
255set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
256{
257 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
258 u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
259 unsigned i, fpfn, lpfn;
260
261 if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
262 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
263 nvbo->bo.mem.num_pages < vram_pages / 4) {
264 /*
265 * Make sure that the color and depth buffers are handled
266 * by independent memory controller units. Up to a 9x
267 * speed up when alpha-blending and depth-test are enabled
268 * at the same time.
269 */
270 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
271 fpfn = vram_pages / 2;
272 lpfn = ~0;
273 } else {
274 fpfn = 0;
275 lpfn = vram_pages / 2;
276 }
277 for (i = 0; i < nvbo->placement.num_placement; ++i) {
278 nvbo->placements[i].fpfn = fpfn;
279 nvbo->placements[i].lpfn = lpfn;
280 }
281 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
282 nvbo->busy_placements[i].fpfn = fpfn;
283 nvbo->busy_placements[i].lpfn = lpfn;
284 }
285 }
286}
287
288void
289nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
290{
291 struct ttm_placement *pl = &nvbo->placement;
292 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
293 TTM_PL_MASK_CACHING) |
294 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
295
296 pl->placement = nvbo->placements;
297 set_placement_list(nvbo->placements, &pl->num_placement,
298 type, flags);
299
300 pl->busy_placement = nvbo->busy_placements;
301 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
302 type | busy, flags);
303
304 set_placement_range(nvbo, type);
305}
306
307int
308nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
309{
310 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
311 struct ttm_buffer_object *bo = &nvbo->bo;
312 bool force = false, evict = false;
313 int ret;
314
315 ret = ttm_bo_reserve(bo, false, false, false, NULL);
316 if (ret)
317 return ret;
318
319 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
320 memtype == TTM_PL_FLAG_VRAM && contig) {
321 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
322 if (bo->mem.mem_type == TTM_PL_VRAM) {
323 struct nvkm_mem *mem = bo->mem.mm_node;
324 if (!list_is_singular(&mem->regions))
325 evict = true;
326 }
327 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
328 force = true;
329 }
330 }
331
332 if (nvbo->pin_refcnt) {
333 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
334 NV_ERROR(drm, "bo %p pinned elsewhere: "
335 "0x%08x vs 0x%08x\n", bo,
336 1 << bo->mem.mem_type, memtype);
337 ret = -EBUSY;
338 }
339 nvbo->pin_refcnt++;
340 goto out;
341 }
342
343 if (evict) {
344 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
345 ret = nouveau_bo_validate(nvbo, false, false);
346 if (ret)
347 goto out;
348 }
349
350 nvbo->pin_refcnt++;
351 nouveau_bo_placement_set(nvbo, memtype, 0);
352
353 /* drop pin_refcnt temporarily, so we don't trip the assertion
354 * in nouveau_bo_move() that makes sure we're not trying to
355 * move a pinned buffer
356 */
357 nvbo->pin_refcnt--;
358 ret = nouveau_bo_validate(nvbo, false, false);
359 if (ret)
360 goto out;
361 nvbo->pin_refcnt++;
362
363 switch (bo->mem.mem_type) {
364 case TTM_PL_VRAM:
365 drm->gem.vram_available -= bo->mem.size;
366 break;
367 case TTM_PL_TT:
368 drm->gem.gart_available -= bo->mem.size;
369 break;
370 default:
371 break;
372 }
373
374out:
375 if (force && ret)
376 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
377 ttm_bo_unreserve(bo);
378 return ret;
379}
380
381int
382nouveau_bo_unpin(struct nouveau_bo *nvbo)
383{
384 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
385 struct ttm_buffer_object *bo = &nvbo->bo;
386 int ret, ref;
387
388 ret = ttm_bo_reserve(bo, false, false, false, NULL);
389 if (ret)
390 return ret;
391
392 ref = --nvbo->pin_refcnt;
393 WARN_ON_ONCE(ref < 0);
394 if (ref)
395 goto out;
396
397 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
398
399 ret = nouveau_bo_validate(nvbo, false, false);
400 if (ret == 0) {
401 switch (bo->mem.mem_type) {
402 case TTM_PL_VRAM:
403 drm->gem.vram_available += bo->mem.size;
404 break;
405 case TTM_PL_TT:
406 drm->gem.gart_available += bo->mem.size;
407 break;
408 default:
409 break;
410 }
411 }
412
413out:
414 ttm_bo_unreserve(bo);
415 return ret;
416}
417
418int
419nouveau_bo_map(struct nouveau_bo *nvbo)
420{
421 int ret;
422
423 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
424 if (ret)
425 return ret;
426
427 /*
428 * TTM buffers allocated using the DMA API already have a mapping, let's
429 * use it instead.
430 */
431 if (!nvbo->force_coherent)
432 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
433 &nvbo->kmap);
434
435 ttm_bo_unreserve(&nvbo->bo);
436 return ret;
437}
438
439void
440nouveau_bo_unmap(struct nouveau_bo *nvbo)
441{
442 if (!nvbo)
443 return;
444
445 /*
446 * TTM buffers allocated using the DMA API already had a coherent
447 * mapping which we used, no need to unmap.
448 */
449 if (!nvbo->force_coherent)
450 ttm_bo_kunmap(&nvbo->kmap);
451}
452
453void
454nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
455{
456 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
457 struct nvkm_device *device = nvxx_device(&drm->device);
458 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
459 int i;
460
461 if (!ttm_dma)
462 return;
463
464 /* Don't waste time looping if the object is coherent */
465 if (nvbo->force_coherent)
466 return;
467
468 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
469 dma_sync_single_for_device(device->dev, ttm_dma->dma_address[i],
470 PAGE_SIZE, DMA_TO_DEVICE);
471}
472
473void
474nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
475{
476 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
477 struct nvkm_device *device = nvxx_device(&drm->device);
478 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
479 int i;
480
481 if (!ttm_dma)
482 return;
483
484 /* Don't waste time looping if the object is coherent */
485 if (nvbo->force_coherent)
486 return;
487
488 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
489 dma_sync_single_for_cpu(device->dev, ttm_dma->dma_address[i],
490 PAGE_SIZE, DMA_FROM_DEVICE);
491}
492
493int
494nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
495 bool no_wait_gpu)
496{
497 int ret;
498
499 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
500 interruptible, no_wait_gpu);
501 if (ret)
502 return ret;
503
504 nouveau_bo_sync_for_device(nvbo);
505
506 return 0;
507}
508
509static inline void *
510_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
511{
512 struct ttm_dma_tt *dma_tt;
513 u8 *m = mem;
514
515 index *= sz;
516
517 if (m) {
518 /* kmap'd address, return the corresponding offset */
519 m += index;
520 } else {
521 /* DMA-API mapping, lookup the right address */
522 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
523 m = dma_tt->cpu_address[index / PAGE_SIZE];
524 m += index % PAGE_SIZE;
525 }
526
527 return m;
528}
529#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
530
531void
532nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
533{
534 bool is_iomem;
535 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
536
537 mem = nouveau_bo_mem_index(nvbo, index, mem);
538
539 if (is_iomem)
540 iowrite16_native(val, (void __force __iomem *)mem);
541 else
542 *mem = val;
543}
544
545u32
546nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
547{
548 bool is_iomem;
549 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
550
551 mem = nouveau_bo_mem_index(nvbo, index, mem);
552
553 if (is_iomem)
554 return ioread32_native((void __force __iomem *)mem);
555 else
556 return *mem;
557}
558
559void
560nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
561{
562 bool is_iomem;
563 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
564
565 mem = nouveau_bo_mem_index(nvbo, index, mem);
566
567 if (is_iomem)
568 iowrite32_native(val, (void __force __iomem *)mem);
569 else
570 *mem = val;
571}
572
573static struct ttm_tt *
574nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
575 uint32_t page_flags, struct page *dummy_read)
576{
577#if IS_ENABLED(CONFIG_AGP)
578 struct nouveau_drm *drm = nouveau_bdev(bdev);
579
580 if (drm->agp.bridge) {
581 return ttm_agp_tt_create(bdev, drm->agp.bridge, size,
582 page_flags, dummy_read);
583 }
584#endif
585
586 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
587}
588
589static int
590nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
591{
592 /* We'll do this from user space. */
593 return 0;
594}
595
596static int
597nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
598 struct ttm_mem_type_manager *man)
599{
600 struct nouveau_drm *drm = nouveau_bdev(bdev);
601
602 switch (type) {
603 case TTM_PL_SYSTEM:
604 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
605 man->available_caching = TTM_PL_MASK_CACHING;
606 man->default_caching = TTM_PL_FLAG_CACHED;
607 break;
608 case TTM_PL_VRAM:
609 man->flags = TTM_MEMTYPE_FLAG_FIXED |
610 TTM_MEMTYPE_FLAG_MAPPABLE;
611 man->available_caching = TTM_PL_FLAG_UNCACHED |
612 TTM_PL_FLAG_WC;
613 man->default_caching = TTM_PL_FLAG_WC;
614
615 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
616 /* Some BARs do not support being ioremapped WC */
617 if (nvxx_bar(&drm->device)->iomap_uncached) {
618 man->available_caching = TTM_PL_FLAG_UNCACHED;
619 man->default_caching = TTM_PL_FLAG_UNCACHED;
620 }
621
622 man->func = &nouveau_vram_manager;
623 man->io_reserve_fastpath = false;
624 man->use_io_reserve_lru = true;
625 } else {
626 man->func = &ttm_bo_manager_func;
627 }
628 break;
629 case TTM_PL_TT:
630 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
631 man->func = &nouveau_gart_manager;
632 else
633 if (!drm->agp.bridge)
634 man->func = &nv04_gart_manager;
635 else
636 man->func = &ttm_bo_manager_func;
637
638 if (drm->agp.bridge) {
639 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
640 man->available_caching = TTM_PL_FLAG_UNCACHED |
641 TTM_PL_FLAG_WC;
642 man->default_caching = TTM_PL_FLAG_WC;
643 } else {
644 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
645 TTM_MEMTYPE_FLAG_CMA;
646 man->available_caching = TTM_PL_MASK_CACHING;
647 man->default_caching = TTM_PL_FLAG_CACHED;
648 }
649
650 break;
651 default:
652 return -EINVAL;
653 }
654 return 0;
655}
656
657static void
658nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
659{
660 struct nouveau_bo *nvbo = nouveau_bo(bo);
661
662 switch (bo->mem.mem_type) {
663 case TTM_PL_VRAM:
664 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
665 TTM_PL_FLAG_SYSTEM);
666 break;
667 default:
668 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
669 break;
670 }
671
672 *pl = nvbo->placement;
673}
674
675
676static int
677nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
678{
679 int ret = RING_SPACE(chan, 2);
680 if (ret == 0) {
681 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
682 OUT_RING (chan, handle & 0x0000ffff);
683 FIRE_RING (chan);
684 }
685 return ret;
686}
687
688static int
689nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
690 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
691{
692 struct nvkm_mem *node = old_mem->mm_node;
693 int ret = RING_SPACE(chan, 10);
694 if (ret == 0) {
695 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
696 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
697 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
698 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
699 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
700 OUT_RING (chan, PAGE_SIZE);
701 OUT_RING (chan, PAGE_SIZE);
702 OUT_RING (chan, PAGE_SIZE);
703 OUT_RING (chan, new_mem->num_pages);
704 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
705 }
706 return ret;
707}
708
709static int
710nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
711{
712 int ret = RING_SPACE(chan, 2);
713 if (ret == 0) {
714 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
715 OUT_RING (chan, handle);
716 }
717 return ret;
718}
719
720static int
721nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
722 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
723{
724 struct nvkm_mem *node = old_mem->mm_node;
725 u64 src_offset = node->vma[0].offset;
726 u64 dst_offset = node->vma[1].offset;
727 u32 page_count = new_mem->num_pages;
728 int ret;
729
730 page_count = new_mem->num_pages;
731 while (page_count) {
732 int line_count = (page_count > 8191) ? 8191 : page_count;
733
734 ret = RING_SPACE(chan, 11);
735 if (ret)
736 return ret;
737
738 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
739 OUT_RING (chan, upper_32_bits(src_offset));
740 OUT_RING (chan, lower_32_bits(src_offset));
741 OUT_RING (chan, upper_32_bits(dst_offset));
742 OUT_RING (chan, lower_32_bits(dst_offset));
743 OUT_RING (chan, PAGE_SIZE);
744 OUT_RING (chan, PAGE_SIZE);
745 OUT_RING (chan, PAGE_SIZE);
746 OUT_RING (chan, line_count);
747 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
748 OUT_RING (chan, 0x00000110);
749
750 page_count -= line_count;
751 src_offset += (PAGE_SIZE * line_count);
752 dst_offset += (PAGE_SIZE * line_count);
753 }
754
755 return 0;
756}
757
758static int
759nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
760 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
761{
762 struct nvkm_mem *node = old_mem->mm_node;
763 u64 src_offset = node->vma[0].offset;
764 u64 dst_offset = node->vma[1].offset;
765 u32 page_count = new_mem->num_pages;
766 int ret;
767
768 page_count = new_mem->num_pages;
769 while (page_count) {
770 int line_count = (page_count > 2047) ? 2047 : page_count;
771
772 ret = RING_SPACE(chan, 12);
773 if (ret)
774 return ret;
775
776 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
777 OUT_RING (chan, upper_32_bits(dst_offset));
778 OUT_RING (chan, lower_32_bits(dst_offset));
779 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
780 OUT_RING (chan, upper_32_bits(src_offset));
781 OUT_RING (chan, lower_32_bits(src_offset));
782 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
783 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
784 OUT_RING (chan, PAGE_SIZE); /* line_length */
785 OUT_RING (chan, line_count);
786 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
787 OUT_RING (chan, 0x00100110);
788
789 page_count -= line_count;
790 src_offset += (PAGE_SIZE * line_count);
791 dst_offset += (PAGE_SIZE * line_count);
792 }
793
794 return 0;
795}
796
797static int
798nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
799 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
800{
801 struct nvkm_mem *node = old_mem->mm_node;
802 u64 src_offset = node->vma[0].offset;
803 u64 dst_offset = node->vma[1].offset;
804 u32 page_count = new_mem->num_pages;
805 int ret;
806
807 page_count = new_mem->num_pages;
808 while (page_count) {
809 int line_count = (page_count > 8191) ? 8191 : page_count;
810
811 ret = RING_SPACE(chan, 11);
812 if (ret)
813 return ret;
814
815 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
816 OUT_RING (chan, upper_32_bits(src_offset));
817 OUT_RING (chan, lower_32_bits(src_offset));
818 OUT_RING (chan, upper_32_bits(dst_offset));
819 OUT_RING (chan, lower_32_bits(dst_offset));
820 OUT_RING (chan, PAGE_SIZE);
821 OUT_RING (chan, PAGE_SIZE);
822 OUT_RING (chan, PAGE_SIZE);
823 OUT_RING (chan, line_count);
824 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
825 OUT_RING (chan, 0x00000110);
826
827 page_count -= line_count;
828 src_offset += (PAGE_SIZE * line_count);
829 dst_offset += (PAGE_SIZE * line_count);
830 }
831
832 return 0;
833}
834
835static int
836nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
837 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
838{
839 struct nvkm_mem *node = old_mem->mm_node;
840 int ret = RING_SPACE(chan, 7);
841 if (ret == 0) {
842 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
843 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
844 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
845 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
846 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
847 OUT_RING (chan, 0x00000000 /* COPY */);
848 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
849 }
850 return ret;
851}
852
853static int
854nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
855 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
856{
857 struct nvkm_mem *node = old_mem->mm_node;
858 int ret = RING_SPACE(chan, 7);
859 if (ret == 0) {
860 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
861 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
862 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
863 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
864 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
865 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
866 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
867 }
868 return ret;
869}
870
871static int
872nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
873{
874 int ret = RING_SPACE(chan, 6);
875 if (ret == 0) {
876 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
877 OUT_RING (chan, handle);
878 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
879 OUT_RING (chan, chan->drm->ntfy.handle);
880 OUT_RING (chan, chan->vram.handle);
881 OUT_RING (chan, chan->vram.handle);
882 }
883
884 return ret;
885}
886
887static int
888nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
889 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
890{
891 struct nvkm_mem *node = old_mem->mm_node;
892 u64 length = (new_mem->num_pages << PAGE_SHIFT);
893 u64 src_offset = node->vma[0].offset;
894 u64 dst_offset = node->vma[1].offset;
895 int src_tiled = !!node->memtype;
896 int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
897 int ret;
898
899 while (length) {
900 u32 amount, stride, height;
901
902 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
903 if (ret)
904 return ret;
905
906 amount = min(length, (u64)(4 * 1024 * 1024));
907 stride = 16 * 4;
908 height = amount / stride;
909
910 if (src_tiled) {
911 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
912 OUT_RING (chan, 0);
913 OUT_RING (chan, 0);
914 OUT_RING (chan, stride);
915 OUT_RING (chan, height);
916 OUT_RING (chan, 1);
917 OUT_RING (chan, 0);
918 OUT_RING (chan, 0);
919 } else {
920 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
921 OUT_RING (chan, 1);
922 }
923 if (dst_tiled) {
924 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
925 OUT_RING (chan, 0);
926 OUT_RING (chan, 0);
927 OUT_RING (chan, stride);
928 OUT_RING (chan, height);
929 OUT_RING (chan, 1);
930 OUT_RING (chan, 0);
931 OUT_RING (chan, 0);
932 } else {
933 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
934 OUT_RING (chan, 1);
935 }
936
937 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
938 OUT_RING (chan, upper_32_bits(src_offset));
939 OUT_RING (chan, upper_32_bits(dst_offset));
940 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
941 OUT_RING (chan, lower_32_bits(src_offset));
942 OUT_RING (chan, lower_32_bits(dst_offset));
943 OUT_RING (chan, stride);
944 OUT_RING (chan, stride);
945 OUT_RING (chan, stride);
946 OUT_RING (chan, height);
947 OUT_RING (chan, 0x00000101);
948 OUT_RING (chan, 0x00000000);
949 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
950 OUT_RING (chan, 0);
951
952 length -= amount;
953 src_offset += amount;
954 dst_offset += amount;
955 }
956
957 return 0;
958}
959
960static int
961nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
962{
963 int ret = RING_SPACE(chan, 4);
964 if (ret == 0) {
965 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
966 OUT_RING (chan, handle);
967 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
968 OUT_RING (chan, chan->drm->ntfy.handle);
969 }
970
971 return ret;
972}
973
974static inline uint32_t
975nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
976 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
977{
978 if (mem->mem_type == TTM_PL_TT)
979 return NvDmaTT;
980 return chan->vram.handle;
981}
982
983static int
984nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
985 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
986{
987 u32 src_offset = old_mem->start << PAGE_SHIFT;
988 u32 dst_offset = new_mem->start << PAGE_SHIFT;
989 u32 page_count = new_mem->num_pages;
990 int ret;
991
992 ret = RING_SPACE(chan, 3);
993 if (ret)
994 return ret;
995
996 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
997 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
998 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
999
1000 page_count = new_mem->num_pages;
1001 while (page_count) {
1002 int line_count = (page_count > 2047) ? 2047 : page_count;
1003
1004 ret = RING_SPACE(chan, 11);
1005 if (ret)
1006 return ret;
1007
1008 BEGIN_NV04(chan, NvSubCopy,
1009 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1010 OUT_RING (chan, src_offset);
1011 OUT_RING (chan, dst_offset);
1012 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1013 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1014 OUT_RING (chan, PAGE_SIZE); /* line_length */
1015 OUT_RING (chan, line_count);
1016 OUT_RING (chan, 0x00000101);
1017 OUT_RING (chan, 0x00000000);
1018 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1019 OUT_RING (chan, 0);
1020
1021 page_count -= line_count;
1022 src_offset += (PAGE_SIZE * line_count);
1023 dst_offset += (PAGE_SIZE * line_count);
1024 }
1025
1026 return 0;
1027}
1028
1029static int
1030nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1031 struct ttm_mem_reg *mem)
1032{
1033 struct nvkm_mem *old_node = bo->mem.mm_node;
1034 struct nvkm_mem *new_node = mem->mm_node;
1035 u64 size = (u64)mem->num_pages << PAGE_SHIFT;
1036 int ret;
1037
1038 ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
1039 NV_MEM_ACCESS_RW, &old_node->vma[0]);
1040 if (ret)
1041 return ret;
1042
1043 ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
1044 NV_MEM_ACCESS_RW, &old_node->vma[1]);
1045 if (ret) {
1046 nvkm_vm_put(&old_node->vma[0]);
1047 return ret;
1048 }
1049
1050 nvkm_vm_map(&old_node->vma[0], old_node);
1051 nvkm_vm_map(&old_node->vma[1], new_node);
1052 return 0;
1053}
1054
1055static int
1056nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1057 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1058{
1059 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1060 struct nouveau_channel *chan = drm->ttm.chan;
1061 struct nouveau_cli *cli = (void *)chan->user.client;
1062 struct nouveau_fence *fence;
1063 int ret;
1064
1065 /* create temporary vmas for the transfer and attach them to the
1066 * old nvkm_mem node, these will get cleaned up after ttm has
1067 * destroyed the ttm_mem_reg
1068 */
1069 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1070 ret = nouveau_bo_move_prep(drm, bo, new_mem);
1071 if (ret)
1072 return ret;
1073 }
1074
1075 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1076 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1077 if (ret == 0) {
1078 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1079 if (ret == 0) {
1080 ret = nouveau_fence_new(chan, false, &fence);
1081 if (ret == 0) {
1082 ret = ttm_bo_move_accel_cleanup(bo,
1083 &fence->base,
1084 evict,
1085 no_wait_gpu,
1086 new_mem);
1087 nouveau_fence_unref(&fence);
1088 }
1089 }
1090 }
1091 mutex_unlock(&cli->mutex);
1092 return ret;
1093}
1094
1095void
1096nouveau_bo_move_init(struct nouveau_drm *drm)
1097{
1098 static const struct {
1099 const char *name;
1100 int engine;
1101 s32 oclass;
1102 int (*exec)(struct nouveau_channel *,
1103 struct ttm_buffer_object *,
1104 struct ttm_mem_reg *, struct ttm_mem_reg *);
1105 int (*init)(struct nouveau_channel *, u32 handle);
1106 } _methods[] = {
1107 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1108 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1109 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1110 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1111 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1112 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1113 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1114 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1115 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1116 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1117 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1118 {},
1119 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1120 }, *mthd = _methods;
1121 const char *name = "CPU";
1122 int ret;
1123
1124 do {
1125 struct nouveau_channel *chan;
1126
1127 if (mthd->engine)
1128 chan = drm->cechan;
1129 else
1130 chan = drm->channel;
1131 if (chan == NULL)
1132 continue;
1133
1134 ret = nvif_object_init(&chan->user,
1135 mthd->oclass | (mthd->engine << 16),
1136 mthd->oclass, NULL, 0,
1137 &drm->ttm.copy);
1138 if (ret == 0) {
1139 ret = mthd->init(chan, drm->ttm.copy.handle);
1140 if (ret) {
1141 nvif_object_fini(&drm->ttm.copy);
1142 continue;
1143 }
1144
1145 drm->ttm.move = mthd->exec;
1146 drm->ttm.chan = chan;
1147 name = mthd->name;
1148 break;
1149 }
1150 } while ((++mthd)->exec);
1151
1152 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1153}
1154
1155static int
1156nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1157 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1158{
1159 struct ttm_place placement_memtype = {
1160 .fpfn = 0,
1161 .lpfn = 0,
1162 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1163 };
1164 struct ttm_placement placement;
1165 struct ttm_mem_reg tmp_mem;
1166 int ret;
1167
1168 placement.num_placement = placement.num_busy_placement = 1;
1169 placement.placement = placement.busy_placement = &placement_memtype;
1170
1171 tmp_mem = *new_mem;
1172 tmp_mem.mm_node = NULL;
1173 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1174 if (ret)
1175 return ret;
1176
1177 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1178 if (ret)
1179 goto out;
1180
1181 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1182 if (ret)
1183 goto out;
1184
1185 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1186out:
1187 ttm_bo_mem_put(bo, &tmp_mem);
1188 return ret;
1189}
1190
1191static int
1192nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1193 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1194{
1195 struct ttm_place placement_memtype = {
1196 .fpfn = 0,
1197 .lpfn = 0,
1198 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1199 };
1200 struct ttm_placement placement;
1201 struct ttm_mem_reg tmp_mem;
1202 int ret;
1203
1204 placement.num_placement = placement.num_busy_placement = 1;
1205 placement.placement = placement.busy_placement = &placement_memtype;
1206
1207 tmp_mem = *new_mem;
1208 tmp_mem.mm_node = NULL;
1209 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1210 if (ret)
1211 return ret;
1212
1213 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1214 if (ret)
1215 goto out;
1216
1217 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1218 if (ret)
1219 goto out;
1220
1221out:
1222 ttm_bo_mem_put(bo, &tmp_mem);
1223 return ret;
1224}
1225
1226static void
1227nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1228{
1229 struct nouveau_bo *nvbo = nouveau_bo(bo);
1230 struct nvkm_vma *vma;
1231
1232 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1233 if (bo->destroy != nouveau_bo_del_ttm)
1234 return;
1235
1236 list_for_each_entry(vma, &nvbo->vma_list, head) {
1237 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1238 (new_mem->mem_type == TTM_PL_VRAM ||
1239 nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
1240 nvkm_vm_map(vma, new_mem->mm_node);
1241 } else {
1242 nvkm_vm_unmap(vma);
1243 }
1244 }
1245}
1246
1247static int
1248nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1249 struct nouveau_drm_tile **new_tile)
1250{
1251 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1252 struct drm_device *dev = drm->dev;
1253 struct nouveau_bo *nvbo = nouveau_bo(bo);
1254 u64 offset = new_mem->start << PAGE_SHIFT;
1255
1256 *new_tile = NULL;
1257 if (new_mem->mem_type != TTM_PL_VRAM)
1258 return 0;
1259
1260 if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1261 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1262 nvbo->tile_mode,
1263 nvbo->tile_flags);
1264 }
1265
1266 return 0;
1267}
1268
1269static void
1270nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1271 struct nouveau_drm_tile *new_tile,
1272 struct nouveau_drm_tile **old_tile)
1273{
1274 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1275 struct drm_device *dev = drm->dev;
1276 struct fence *fence = reservation_object_get_excl(bo->resv);
1277
1278 nv10_bo_put_tile_region(dev, *old_tile, fence);
1279 *old_tile = new_tile;
1280}
1281
1282static int
1283nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1284 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1285{
1286 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1287 struct nouveau_bo *nvbo = nouveau_bo(bo);
1288 struct ttm_mem_reg *old_mem = &bo->mem;
1289 struct nouveau_drm_tile *new_tile = NULL;
1290 int ret = 0;
1291
1292 if (nvbo->pin_refcnt)
1293 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1294
1295 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1296 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1297 if (ret)
1298 return ret;
1299 }
1300
1301 /* Fake bo copy. */
1302 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1303 BUG_ON(bo->mem.mm_node != NULL);
1304 bo->mem = *new_mem;
1305 new_mem->mm_node = NULL;
1306 goto out;
1307 }
1308
1309 /* Hardware assisted copy. */
1310 if (drm->ttm.move) {
1311 if (new_mem->mem_type == TTM_PL_SYSTEM)
1312 ret = nouveau_bo_move_flipd(bo, evict, intr,
1313 no_wait_gpu, new_mem);
1314 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1315 ret = nouveau_bo_move_flips(bo, evict, intr,
1316 no_wait_gpu, new_mem);
1317 else
1318 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1319 no_wait_gpu, new_mem);
1320 if (!ret)
1321 goto out;
1322 }
1323
1324 /* Fallback to software copy. */
1325 ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1326 if (ret == 0)
1327 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1328
1329out:
1330 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1331 if (ret)
1332 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1333 else
1334 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1335 }
1336
1337 return ret;
1338}
1339
1340static int
1341nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1342{
1343 struct nouveau_bo *nvbo = nouveau_bo(bo);
1344
1345 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1346}
1347
1348static int
1349nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1350{
1351 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1352 struct nouveau_drm *drm = nouveau_bdev(bdev);
1353 struct nvkm_device *device = nvxx_device(&drm->device);
1354 struct nvkm_mem *node = mem->mm_node;
1355 int ret;
1356
1357 mem->bus.addr = NULL;
1358 mem->bus.offset = 0;
1359 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1360 mem->bus.base = 0;
1361 mem->bus.is_iomem = false;
1362 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1363 return -EINVAL;
1364 switch (mem->mem_type) {
1365 case TTM_PL_SYSTEM:
1366 /* System memory */
1367 return 0;
1368 case TTM_PL_TT:
1369#if IS_ENABLED(CONFIG_AGP)
1370 if (drm->agp.bridge) {
1371 mem->bus.offset = mem->start << PAGE_SHIFT;
1372 mem->bus.base = drm->agp.base;
1373 mem->bus.is_iomem = !drm->agp.cma;
1374 }
1375#endif
1376 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1377 /* untiled */
1378 break;
1379 /* fallthrough, tiled memory */
1380 case TTM_PL_VRAM:
1381 mem->bus.offset = mem->start << PAGE_SHIFT;
1382 mem->bus.base = device->func->resource_addr(device, 1);
1383 mem->bus.is_iomem = true;
1384 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1385 struct nvkm_bar *bar = nvxx_bar(&drm->device);
1386 int page_shift = 12;
1387 if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1388 page_shift = node->page_shift;
1389
1390 ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1391 &node->bar_vma);
1392 if (ret)
1393 return ret;
1394
1395 nvkm_vm_map(&node->bar_vma, node);
1396 mem->bus.offset = node->bar_vma.offset;
1397 }
1398 break;
1399 default:
1400 return -EINVAL;
1401 }
1402 return 0;
1403}
1404
1405static void
1406nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1407{
1408 struct nvkm_mem *node = mem->mm_node;
1409
1410 if (!node->bar_vma.node)
1411 return;
1412
1413 nvkm_vm_unmap(&node->bar_vma);
1414 nvkm_vm_put(&node->bar_vma);
1415}
1416
1417static int
1418nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1419{
1420 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1421 struct nouveau_bo *nvbo = nouveau_bo(bo);
1422 struct nvkm_device *device = nvxx_device(&drm->device);
1423 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1424 int i, ret;
1425
1426 /* as long as the bo isn't in vram, and isn't tiled, we've got
1427 * nothing to do here.
1428 */
1429 if (bo->mem.mem_type != TTM_PL_VRAM) {
1430 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1431 !nouveau_bo_tile_layout(nvbo))
1432 return 0;
1433
1434 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1435 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1436
1437 ret = nouveau_bo_validate(nvbo, false, false);
1438 if (ret)
1439 return ret;
1440 }
1441 return 0;
1442 }
1443
1444 /* make sure bo is in mappable vram */
1445 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1446 bo->mem.start + bo->mem.num_pages < mappable)
1447 return 0;
1448
1449 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1450 nvbo->placements[i].fpfn = 0;
1451 nvbo->placements[i].lpfn = mappable;
1452 }
1453
1454 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1455 nvbo->busy_placements[i].fpfn = 0;
1456 nvbo->busy_placements[i].lpfn = mappable;
1457 }
1458
1459 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1460 return nouveau_bo_validate(nvbo, false, false);
1461}
1462
1463static int
1464nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1465{
1466 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1467 struct nouveau_drm *drm;
1468 struct nvkm_device *device;
1469 struct drm_device *dev;
1470 struct device *pdev;
1471 unsigned i;
1472 int r;
1473 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1474
1475 if (ttm->state != tt_unpopulated)
1476 return 0;
1477
1478 if (slave && ttm->sg) {
1479 /* make userspace faulting work */
1480 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1481 ttm_dma->dma_address, ttm->num_pages);
1482 ttm->state = tt_unbound;
1483 return 0;
1484 }
1485
1486 drm = nouveau_bdev(ttm->bdev);
1487 device = nvxx_device(&drm->device);
1488 dev = drm->dev;
1489 pdev = device->dev;
1490
1491 /*
1492 * Objects matching this condition have been marked as force_coherent,
1493 * so use the DMA API for them.
1494 */
1495 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1496 ttm->caching_state == tt_uncached)
1497 return ttm_dma_populate(ttm_dma, dev->dev);
1498
1499#if IS_ENABLED(CONFIG_AGP)
1500 if (drm->agp.bridge) {
1501 return ttm_agp_tt_populate(ttm);
1502 }
1503#endif
1504
1505#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1506 if (swiotlb_nr_tbl()) {
1507 return ttm_dma_populate((void *)ttm, dev->dev);
1508 }
1509#endif
1510
1511 r = ttm_pool_populate(ttm);
1512 if (r) {
1513 return r;
1514 }
1515
1516 for (i = 0; i < ttm->num_pages; i++) {
1517 dma_addr_t addr;
1518
1519 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1520 DMA_BIDIRECTIONAL);
1521
1522 if (dma_mapping_error(pdev, addr)) {
1523 while (i--) {
1524 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1525 PAGE_SIZE, DMA_BIDIRECTIONAL);
1526 ttm_dma->dma_address[i] = 0;
1527 }
1528 ttm_pool_unpopulate(ttm);
1529 return -EFAULT;
1530 }
1531
1532 ttm_dma->dma_address[i] = addr;
1533 }
1534 return 0;
1535}
1536
1537static void
1538nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1539{
1540 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1541 struct nouveau_drm *drm;
1542 struct nvkm_device *device;
1543 struct drm_device *dev;
1544 struct device *pdev;
1545 unsigned i;
1546 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1547
1548 if (slave)
1549 return;
1550
1551 drm = nouveau_bdev(ttm->bdev);
1552 device = nvxx_device(&drm->device);
1553 dev = drm->dev;
1554 pdev = device->dev;
1555
1556 /*
1557 * Objects matching this condition have been marked as force_coherent,
1558 * so use the DMA API for them.
1559 */
1560 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1561 ttm->caching_state == tt_uncached) {
1562 ttm_dma_unpopulate(ttm_dma, dev->dev);
1563 return;
1564 }
1565
1566#if IS_ENABLED(CONFIG_AGP)
1567 if (drm->agp.bridge) {
1568 ttm_agp_tt_unpopulate(ttm);
1569 return;
1570 }
1571#endif
1572
1573#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1574 if (swiotlb_nr_tbl()) {
1575 ttm_dma_unpopulate((void *)ttm, dev->dev);
1576 return;
1577 }
1578#endif
1579
1580 for (i = 0; i < ttm->num_pages; i++) {
1581 if (ttm_dma->dma_address[i]) {
1582 dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1583 DMA_BIDIRECTIONAL);
1584 }
1585 }
1586
1587 ttm_pool_unpopulate(ttm);
1588}
1589
1590void
1591nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1592{
1593 struct reservation_object *resv = nvbo->bo.resv;
1594
1595 if (exclusive)
1596 reservation_object_add_excl_fence(resv, &fence->base);
1597 else if (fence)
1598 reservation_object_add_shared_fence(resv, &fence->base);
1599}
1600
1601struct ttm_bo_driver nouveau_bo_driver = {
1602 .ttm_tt_create = &nouveau_ttm_tt_create,
1603 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1604 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1605 .invalidate_caches = nouveau_bo_invalidate_caches,
1606 .init_mem_type = nouveau_bo_init_mem_type,
1607 .evict_flags = nouveau_bo_evict_flags,
1608 .move_notify = nouveau_bo_move_ntfy,
1609 .move = nouveau_bo_move,
1610 .verify_access = nouveau_bo_verify_access,
1611 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1612 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1613 .io_mem_free = &nouveau_ttm_io_mem_free,
1614};
1615
1616struct nvkm_vma *
1617nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
1618{
1619 struct nvkm_vma *vma;
1620 list_for_each_entry(vma, &nvbo->vma_list, head) {
1621 if (vma->vm == vm)
1622 return vma;
1623 }
1624
1625 return NULL;
1626}
1627
1628int
1629nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1630 struct nvkm_vma *vma)
1631{
1632 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1633 int ret;
1634
1635 ret = nvkm_vm_get(vm, size, nvbo->page_shift,
1636 NV_MEM_ACCESS_RW, vma);
1637 if (ret)
1638 return ret;
1639
1640 if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1641 (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1642 nvbo->page_shift != vma->vm->mmu->lpg_shift))
1643 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
1644
1645 list_add_tail(&vma->head, &nvbo->vma_list);
1646 vma->refcount = 1;
1647 return 0;
1648}
1649
1650void
1651nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
1652{
1653 if (vma->node) {
1654 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1655 nvkm_vm_unmap(vma);
1656 nvkm_vm_put(vma);
1657 list_del(&vma->head);
1658 }
1659}
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include <linux/dma-mapping.h>
31#include <linux/swiotlb.h>
32
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35#include "nouveau_fence.h"
36
37#include "nouveau_bo.h"
38#include "nouveau_ttm.h"
39#include "nouveau_gem.h"
40#include "nouveau_mem.h"
41#include "nouveau_vmm.h"
42
43#include <nvif/class.h>
44#include <nvif/if500b.h>
45#include <nvif/if900b.h>
46
47/*
48 * NV10-NV40 tiling helpers
49 */
50
51static void
52nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
53 u32 addr, u32 size, u32 pitch, u32 flags)
54{
55 struct nouveau_drm *drm = nouveau_drm(dev);
56 int i = reg - drm->tile.reg;
57 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
58 struct nvkm_fb_tile *tile = &fb->tile.region[i];
59
60 nouveau_fence_unref(®->fence);
61
62 if (tile->pitch)
63 nvkm_fb_tile_fini(fb, i, tile);
64
65 if (pitch)
66 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
67
68 nvkm_fb_tile_prog(fb, i, tile);
69}
70
71static struct nouveau_drm_tile *
72nv10_bo_get_tile_region(struct drm_device *dev, int i)
73{
74 struct nouveau_drm *drm = nouveau_drm(dev);
75 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
76
77 spin_lock(&drm->tile.lock);
78
79 if (!tile->used &&
80 (!tile->fence || nouveau_fence_done(tile->fence)))
81 tile->used = true;
82 else
83 tile = NULL;
84
85 spin_unlock(&drm->tile.lock);
86 return tile;
87}
88
89static void
90nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
91 struct dma_fence *fence)
92{
93 struct nouveau_drm *drm = nouveau_drm(dev);
94
95 if (tile) {
96 spin_lock(&drm->tile.lock);
97 tile->fence = (struct nouveau_fence *)dma_fence_get(fence);
98 tile->used = false;
99 spin_unlock(&drm->tile.lock);
100 }
101}
102
103static struct nouveau_drm_tile *
104nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
105 u32 size, u32 pitch, u32 zeta)
106{
107 struct nouveau_drm *drm = nouveau_drm(dev);
108 struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
109 struct nouveau_drm_tile *tile, *found = NULL;
110 int i;
111
112 for (i = 0; i < fb->tile.regions; i++) {
113 tile = nv10_bo_get_tile_region(dev, i);
114
115 if (pitch && !found) {
116 found = tile;
117 continue;
118
119 } else if (tile && fb->tile.region[i].pitch) {
120 /* Kill an unused tile region. */
121 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
122 }
123
124 nv10_bo_put_tile_region(dev, tile, NULL);
125 }
126
127 if (found)
128 nv10_bo_update_tile_region(dev, found, addr, size, pitch, zeta);
129 return found;
130}
131
132static void
133nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
134{
135 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
136 struct drm_device *dev = drm->dev;
137 struct nouveau_bo *nvbo = nouveau_bo(bo);
138
139 WARN_ON(nvbo->pin_refcnt > 0);
140 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
141
142 /*
143 * If nouveau_bo_new() allocated this buffer, the GEM object was never
144 * initialized, so don't attempt to release it.
145 */
146 if (bo->base.dev)
147 drm_gem_object_release(&bo->base);
148
149 kfree(nvbo);
150}
151
152static inline u64
153roundup_64(u64 x, u32 y)
154{
155 x += y - 1;
156 do_div(x, y);
157 return x * y;
158}
159
160static void
161nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
162 int *align, u64 *size)
163{
164 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
165 struct nvif_device *device = &drm->client.device;
166
167 if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
168 if (nvbo->mode) {
169 if (device->info.chipset >= 0x40) {
170 *align = 65536;
171 *size = roundup_64(*size, 64 * nvbo->mode);
172
173 } else if (device->info.chipset >= 0x30) {
174 *align = 32768;
175 *size = roundup_64(*size, 64 * nvbo->mode);
176
177 } else if (device->info.chipset >= 0x20) {
178 *align = 16384;
179 *size = roundup_64(*size, 64 * nvbo->mode);
180
181 } else if (device->info.chipset >= 0x10) {
182 *align = 16384;
183 *size = roundup_64(*size, 32 * nvbo->mode);
184 }
185 }
186 } else {
187 *size = roundup_64(*size, (1 << nvbo->page));
188 *align = max((1 << nvbo->page), *align);
189 }
190
191 *size = roundup_64(*size, PAGE_SIZE);
192}
193
194struct nouveau_bo *
195nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 flags,
196 u32 tile_mode, u32 tile_flags)
197{
198 struct nouveau_drm *drm = cli->drm;
199 struct nouveau_bo *nvbo;
200 struct nvif_mmu *mmu = &cli->mmu;
201 struct nvif_vmm *vmm = cli->svm.cli ? &cli->svm.vmm : &cli->vmm.vmm;
202 int i, pi = -1;
203
204 if (!*size) {
205 NV_WARN(drm, "skipped size %016llx\n", *size);
206 return ERR_PTR(-EINVAL);
207 }
208
209 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
210 if (!nvbo)
211 return ERR_PTR(-ENOMEM);
212 INIT_LIST_HEAD(&nvbo->head);
213 INIT_LIST_HEAD(&nvbo->entry);
214 INIT_LIST_HEAD(&nvbo->vma_list);
215 nvbo->bo.bdev = &drm->ttm.bdev;
216
217 /* This is confusing, and doesn't actually mean we want an uncached
218 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
219 * into in nouveau_gem_new().
220 */
221 if (flags & TTM_PL_FLAG_UNCACHED) {
222 /* Determine if we can get a cache-coherent map, forcing
223 * uncached mapping if we can't.
224 */
225 if (!nouveau_drm_use_coherent_gpu_mapping(drm))
226 nvbo->force_coherent = true;
227 }
228
229 if (cli->device.info.family >= NV_DEVICE_INFO_V0_FERMI) {
230 nvbo->kind = (tile_flags & 0x0000ff00) >> 8;
231 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
232 kfree(nvbo);
233 return ERR_PTR(-EINVAL);
234 }
235
236 nvbo->comp = mmu->kind[nvbo->kind] != nvbo->kind;
237 } else
238 if (cli->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
239 nvbo->kind = (tile_flags & 0x00007f00) >> 8;
240 nvbo->comp = (tile_flags & 0x00030000) >> 16;
241 if (!nvif_mmu_kind_valid(mmu, nvbo->kind)) {
242 kfree(nvbo);
243 return ERR_PTR(-EINVAL);
244 }
245 } else {
246 nvbo->zeta = (tile_flags & 0x00000007);
247 }
248 nvbo->mode = tile_mode;
249 nvbo->contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG);
250
251 /* Determine the desirable target GPU page size for the buffer. */
252 for (i = 0; i < vmm->page_nr; i++) {
253 /* Because we cannot currently allow VMM maps to fail
254 * during buffer migration, we need to determine page
255 * size for the buffer up-front, and pre-allocate its
256 * page tables.
257 *
258 * Skip page sizes that can't support needed domains.
259 */
260 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE &&
261 (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram)
262 continue;
263 if ((flags & TTM_PL_FLAG_TT) &&
264 (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
265 continue;
266
267 /* Select this page size if it's the first that supports
268 * the potential memory domains, or when it's compatible
269 * with the requested compression settings.
270 */
271 if (pi < 0 || !nvbo->comp || vmm->page[i].comp)
272 pi = i;
273
274 /* Stop once the buffer is larger than the current page size. */
275 if (*size >= 1ULL << vmm->page[i].shift)
276 break;
277 }
278
279 if (WARN_ON(pi < 0))
280 return ERR_PTR(-EINVAL);
281
282 /* Disable compression if suitable settings couldn't be found. */
283 if (nvbo->comp && !vmm->page[pi].comp) {
284 if (mmu->object.oclass >= NVIF_CLASS_MMU_GF100)
285 nvbo->kind = mmu->kind[nvbo->kind];
286 nvbo->comp = 0;
287 }
288 nvbo->page = vmm->page[pi].shift;
289
290 nouveau_bo_fixup_align(nvbo, flags, align, size);
291
292 return nvbo;
293}
294
295int
296nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 flags,
297 struct sg_table *sg, struct dma_resv *robj)
298{
299 int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
300 size_t acc_size;
301 int ret;
302
303 acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
304
305 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
306 nouveau_bo_placement_set(nvbo, flags, 0);
307
308 ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
309 &nvbo->placement, align >> PAGE_SHIFT, false,
310 acc_size, sg, robj, nouveau_bo_del_ttm);
311 if (ret) {
312 /* ttm will call nouveau_bo_del_ttm if it fails.. */
313 return ret;
314 }
315
316 return 0;
317}
318
319int
320nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
321 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
322 struct sg_table *sg, struct dma_resv *robj,
323 struct nouveau_bo **pnvbo)
324{
325 struct nouveau_bo *nvbo;
326 int ret;
327
328 nvbo = nouveau_bo_alloc(cli, &size, &align, flags, tile_mode,
329 tile_flags);
330 if (IS_ERR(nvbo))
331 return PTR_ERR(nvbo);
332
333 ret = nouveau_bo_init(nvbo, size, align, flags, sg, robj);
334 if (ret)
335 return ret;
336
337 *pnvbo = nvbo;
338 return 0;
339}
340
341static void
342set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
343{
344 *n = 0;
345
346 if (type & TTM_PL_FLAG_VRAM)
347 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
348 if (type & TTM_PL_FLAG_TT)
349 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
350 if (type & TTM_PL_FLAG_SYSTEM)
351 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
352}
353
354static void
355set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
356{
357 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
358 u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
359 unsigned i, fpfn, lpfn;
360
361 if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
362 nvbo->mode && (type & TTM_PL_FLAG_VRAM) &&
363 nvbo->bo.mem.num_pages < vram_pages / 4) {
364 /*
365 * Make sure that the color and depth buffers are handled
366 * by independent memory controller units. Up to a 9x
367 * speed up when alpha-blending and depth-test are enabled
368 * at the same time.
369 */
370 if (nvbo->zeta) {
371 fpfn = vram_pages / 2;
372 lpfn = ~0;
373 } else {
374 fpfn = 0;
375 lpfn = vram_pages / 2;
376 }
377 for (i = 0; i < nvbo->placement.num_placement; ++i) {
378 nvbo->placements[i].fpfn = fpfn;
379 nvbo->placements[i].lpfn = lpfn;
380 }
381 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
382 nvbo->busy_placements[i].fpfn = fpfn;
383 nvbo->busy_placements[i].lpfn = lpfn;
384 }
385 }
386}
387
388void
389nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
390{
391 struct ttm_placement *pl = &nvbo->placement;
392 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
393 TTM_PL_MASK_CACHING) |
394 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
395
396 pl->placement = nvbo->placements;
397 set_placement_list(nvbo->placements, &pl->num_placement,
398 type, flags);
399
400 pl->busy_placement = nvbo->busy_placements;
401 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
402 type | busy, flags);
403
404 set_placement_range(nvbo, type);
405}
406
407int
408nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
409{
410 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
411 struct ttm_buffer_object *bo = &nvbo->bo;
412 bool force = false, evict = false;
413 int ret;
414
415 ret = ttm_bo_reserve(bo, false, false, NULL);
416 if (ret)
417 return ret;
418
419 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
420 memtype == TTM_PL_FLAG_VRAM && contig) {
421 if (!nvbo->contig) {
422 nvbo->contig = true;
423 force = true;
424 evict = true;
425 }
426 }
427
428 if (nvbo->pin_refcnt) {
429 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
430 NV_ERROR(drm, "bo %p pinned elsewhere: "
431 "0x%08x vs 0x%08x\n", bo,
432 1 << bo->mem.mem_type, memtype);
433 ret = -EBUSY;
434 }
435 nvbo->pin_refcnt++;
436 goto out;
437 }
438
439 if (evict) {
440 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
441 ret = nouveau_bo_validate(nvbo, false, false);
442 if (ret)
443 goto out;
444 }
445
446 nvbo->pin_refcnt++;
447 nouveau_bo_placement_set(nvbo, memtype, 0);
448
449 /* drop pin_refcnt temporarily, so we don't trip the assertion
450 * in nouveau_bo_move() that makes sure we're not trying to
451 * move a pinned buffer
452 */
453 nvbo->pin_refcnt--;
454 ret = nouveau_bo_validate(nvbo, false, false);
455 if (ret)
456 goto out;
457 nvbo->pin_refcnt++;
458
459 switch (bo->mem.mem_type) {
460 case TTM_PL_VRAM:
461 drm->gem.vram_available -= bo->mem.size;
462 break;
463 case TTM_PL_TT:
464 drm->gem.gart_available -= bo->mem.size;
465 break;
466 default:
467 break;
468 }
469
470out:
471 if (force && ret)
472 nvbo->contig = false;
473 ttm_bo_unreserve(bo);
474 return ret;
475}
476
477int
478nouveau_bo_unpin(struct nouveau_bo *nvbo)
479{
480 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
481 struct ttm_buffer_object *bo = &nvbo->bo;
482 int ret, ref;
483
484 ret = ttm_bo_reserve(bo, false, false, NULL);
485 if (ret)
486 return ret;
487
488 ref = --nvbo->pin_refcnt;
489 WARN_ON_ONCE(ref < 0);
490 if (ref)
491 goto out;
492
493 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
494
495 ret = nouveau_bo_validate(nvbo, false, false);
496 if (ret == 0) {
497 switch (bo->mem.mem_type) {
498 case TTM_PL_VRAM:
499 drm->gem.vram_available += bo->mem.size;
500 break;
501 case TTM_PL_TT:
502 drm->gem.gart_available += bo->mem.size;
503 break;
504 default:
505 break;
506 }
507 }
508
509out:
510 ttm_bo_unreserve(bo);
511 return ret;
512}
513
514int
515nouveau_bo_map(struct nouveau_bo *nvbo)
516{
517 int ret;
518
519 ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
520 if (ret)
521 return ret;
522
523 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
524
525 ttm_bo_unreserve(&nvbo->bo);
526 return ret;
527}
528
529void
530nouveau_bo_unmap(struct nouveau_bo *nvbo)
531{
532 if (!nvbo)
533 return;
534
535 ttm_bo_kunmap(&nvbo->kmap);
536}
537
538void
539nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
540{
541 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
542 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
543 int i;
544
545 if (!ttm_dma)
546 return;
547
548 /* Don't waste time looping if the object is coherent */
549 if (nvbo->force_coherent)
550 return;
551
552 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
553 dma_sync_single_for_device(drm->dev->dev,
554 ttm_dma->dma_address[i],
555 PAGE_SIZE, DMA_TO_DEVICE);
556}
557
558void
559nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
560{
561 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
562 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
563 int i;
564
565 if (!ttm_dma)
566 return;
567
568 /* Don't waste time looping if the object is coherent */
569 if (nvbo->force_coherent)
570 return;
571
572 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
573 dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
574 PAGE_SIZE, DMA_FROM_DEVICE);
575}
576
577int
578nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
579 bool no_wait_gpu)
580{
581 struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
582 int ret;
583
584 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
585 if (ret)
586 return ret;
587
588 nouveau_bo_sync_for_device(nvbo);
589
590 return 0;
591}
592
593void
594nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
595{
596 bool is_iomem;
597 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
598
599 mem += index;
600
601 if (is_iomem)
602 iowrite16_native(val, (void __force __iomem *)mem);
603 else
604 *mem = val;
605}
606
607u32
608nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
609{
610 bool is_iomem;
611 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
612
613 mem += index;
614
615 if (is_iomem)
616 return ioread32_native((void __force __iomem *)mem);
617 else
618 return *mem;
619}
620
621void
622nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
623{
624 bool is_iomem;
625 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
626
627 mem += index;
628
629 if (is_iomem)
630 iowrite32_native(val, (void __force __iomem *)mem);
631 else
632 *mem = val;
633}
634
635static struct ttm_tt *
636nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
637{
638#if IS_ENABLED(CONFIG_AGP)
639 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
640
641 if (drm->agp.bridge) {
642 return ttm_agp_tt_create(bo, drm->agp.bridge, page_flags);
643 }
644#endif
645
646 return nouveau_sgdma_create_ttm(bo, page_flags);
647}
648
649static int
650nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
651{
652 /* We'll do this from user space. */
653 return 0;
654}
655
656static int
657nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
658 struct ttm_mem_type_manager *man)
659{
660 struct nouveau_drm *drm = nouveau_bdev(bdev);
661 struct nvif_mmu *mmu = &drm->client.mmu;
662
663 switch (type) {
664 case TTM_PL_SYSTEM:
665 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
666 man->available_caching = TTM_PL_MASK_CACHING;
667 man->default_caching = TTM_PL_FLAG_CACHED;
668 break;
669 case TTM_PL_VRAM:
670 man->flags = TTM_MEMTYPE_FLAG_FIXED |
671 TTM_MEMTYPE_FLAG_MAPPABLE;
672 man->available_caching = TTM_PL_FLAG_UNCACHED |
673 TTM_PL_FLAG_WC;
674 man->default_caching = TTM_PL_FLAG_WC;
675
676 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
677 /* Some BARs do not support being ioremapped WC */
678 const u8 type = mmu->type[drm->ttm.type_vram].type;
679 if (type & NVIF_MEM_UNCACHED) {
680 man->available_caching = TTM_PL_FLAG_UNCACHED;
681 man->default_caching = TTM_PL_FLAG_UNCACHED;
682 }
683
684 man->func = &nouveau_vram_manager;
685 man->io_reserve_fastpath = false;
686 man->use_io_reserve_lru = true;
687 } else {
688 man->func = &ttm_bo_manager_func;
689 }
690 break;
691 case TTM_PL_TT:
692 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
693 man->func = &nouveau_gart_manager;
694 else
695 if (!drm->agp.bridge)
696 man->func = &nv04_gart_manager;
697 else
698 man->func = &ttm_bo_manager_func;
699
700 if (drm->agp.bridge) {
701 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
702 man->available_caching = TTM_PL_FLAG_UNCACHED |
703 TTM_PL_FLAG_WC;
704 man->default_caching = TTM_PL_FLAG_WC;
705 } else {
706 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
707 TTM_MEMTYPE_FLAG_CMA;
708 man->available_caching = TTM_PL_MASK_CACHING;
709 man->default_caching = TTM_PL_FLAG_CACHED;
710 }
711
712 break;
713 default:
714 return -EINVAL;
715 }
716 return 0;
717}
718
719static void
720nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
721{
722 struct nouveau_bo *nvbo = nouveau_bo(bo);
723
724 switch (bo->mem.mem_type) {
725 case TTM_PL_VRAM:
726 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
727 TTM_PL_FLAG_SYSTEM);
728 break;
729 default:
730 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
731 break;
732 }
733
734 *pl = nvbo->placement;
735}
736
737
738static int
739nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
740{
741 int ret = RING_SPACE(chan, 2);
742 if (ret == 0) {
743 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
744 OUT_RING (chan, handle & 0x0000ffff);
745 FIRE_RING (chan);
746 }
747 return ret;
748}
749
750static int
751nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
752 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
753{
754 struct nouveau_mem *mem = nouveau_mem(old_reg);
755 int ret = RING_SPACE(chan, 10);
756 if (ret == 0) {
757 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
758 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
759 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
760 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
761 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
762 OUT_RING (chan, PAGE_SIZE);
763 OUT_RING (chan, PAGE_SIZE);
764 OUT_RING (chan, PAGE_SIZE);
765 OUT_RING (chan, new_reg->num_pages);
766 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
767 }
768 return ret;
769}
770
771static int
772nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
773{
774 int ret = RING_SPACE(chan, 2);
775 if (ret == 0) {
776 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
777 OUT_RING (chan, handle);
778 }
779 return ret;
780}
781
782static int
783nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
784 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
785{
786 struct nouveau_mem *mem = nouveau_mem(old_reg);
787 u64 src_offset = mem->vma[0].addr;
788 u64 dst_offset = mem->vma[1].addr;
789 u32 page_count = new_reg->num_pages;
790 int ret;
791
792 page_count = new_reg->num_pages;
793 while (page_count) {
794 int line_count = (page_count > 8191) ? 8191 : page_count;
795
796 ret = RING_SPACE(chan, 11);
797 if (ret)
798 return ret;
799
800 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
801 OUT_RING (chan, upper_32_bits(src_offset));
802 OUT_RING (chan, lower_32_bits(src_offset));
803 OUT_RING (chan, upper_32_bits(dst_offset));
804 OUT_RING (chan, lower_32_bits(dst_offset));
805 OUT_RING (chan, PAGE_SIZE);
806 OUT_RING (chan, PAGE_SIZE);
807 OUT_RING (chan, PAGE_SIZE);
808 OUT_RING (chan, line_count);
809 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
810 OUT_RING (chan, 0x00000110);
811
812 page_count -= line_count;
813 src_offset += (PAGE_SIZE * line_count);
814 dst_offset += (PAGE_SIZE * line_count);
815 }
816
817 return 0;
818}
819
820static int
821nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
822 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
823{
824 struct nouveau_mem *mem = nouveau_mem(old_reg);
825 u64 src_offset = mem->vma[0].addr;
826 u64 dst_offset = mem->vma[1].addr;
827 u32 page_count = new_reg->num_pages;
828 int ret;
829
830 page_count = new_reg->num_pages;
831 while (page_count) {
832 int line_count = (page_count > 2047) ? 2047 : page_count;
833
834 ret = RING_SPACE(chan, 12);
835 if (ret)
836 return ret;
837
838 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
839 OUT_RING (chan, upper_32_bits(dst_offset));
840 OUT_RING (chan, lower_32_bits(dst_offset));
841 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
842 OUT_RING (chan, upper_32_bits(src_offset));
843 OUT_RING (chan, lower_32_bits(src_offset));
844 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
845 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
846 OUT_RING (chan, PAGE_SIZE); /* line_length */
847 OUT_RING (chan, line_count);
848 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
849 OUT_RING (chan, 0x00100110);
850
851 page_count -= line_count;
852 src_offset += (PAGE_SIZE * line_count);
853 dst_offset += (PAGE_SIZE * line_count);
854 }
855
856 return 0;
857}
858
859static int
860nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
861 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
862{
863 struct nouveau_mem *mem = nouveau_mem(old_reg);
864 u64 src_offset = mem->vma[0].addr;
865 u64 dst_offset = mem->vma[1].addr;
866 u32 page_count = new_reg->num_pages;
867 int ret;
868
869 page_count = new_reg->num_pages;
870 while (page_count) {
871 int line_count = (page_count > 8191) ? 8191 : page_count;
872
873 ret = RING_SPACE(chan, 11);
874 if (ret)
875 return ret;
876
877 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
878 OUT_RING (chan, upper_32_bits(src_offset));
879 OUT_RING (chan, lower_32_bits(src_offset));
880 OUT_RING (chan, upper_32_bits(dst_offset));
881 OUT_RING (chan, lower_32_bits(dst_offset));
882 OUT_RING (chan, PAGE_SIZE);
883 OUT_RING (chan, PAGE_SIZE);
884 OUT_RING (chan, PAGE_SIZE);
885 OUT_RING (chan, line_count);
886 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
887 OUT_RING (chan, 0x00000110);
888
889 page_count -= line_count;
890 src_offset += (PAGE_SIZE * line_count);
891 dst_offset += (PAGE_SIZE * line_count);
892 }
893
894 return 0;
895}
896
897static int
898nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
899 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
900{
901 struct nouveau_mem *mem = nouveau_mem(old_reg);
902 int ret = RING_SPACE(chan, 7);
903 if (ret == 0) {
904 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
905 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
906 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
907 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
908 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
909 OUT_RING (chan, 0x00000000 /* COPY */);
910 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
911 }
912 return ret;
913}
914
915static int
916nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
917 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
918{
919 struct nouveau_mem *mem = nouveau_mem(old_reg);
920 int ret = RING_SPACE(chan, 7);
921 if (ret == 0) {
922 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
923 OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
924 OUT_RING (chan, upper_32_bits(mem->vma[0].addr));
925 OUT_RING (chan, lower_32_bits(mem->vma[0].addr));
926 OUT_RING (chan, upper_32_bits(mem->vma[1].addr));
927 OUT_RING (chan, lower_32_bits(mem->vma[1].addr));
928 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
929 }
930 return ret;
931}
932
933static int
934nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
935{
936 int ret = RING_SPACE(chan, 6);
937 if (ret == 0) {
938 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
939 OUT_RING (chan, handle);
940 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
941 OUT_RING (chan, chan->drm->ntfy.handle);
942 OUT_RING (chan, chan->vram.handle);
943 OUT_RING (chan, chan->vram.handle);
944 }
945
946 return ret;
947}
948
949static int
950nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
951 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
952{
953 struct nouveau_mem *mem = nouveau_mem(old_reg);
954 u64 length = (new_reg->num_pages << PAGE_SHIFT);
955 u64 src_offset = mem->vma[0].addr;
956 u64 dst_offset = mem->vma[1].addr;
957 int src_tiled = !!mem->kind;
958 int dst_tiled = !!nouveau_mem(new_reg)->kind;
959 int ret;
960
961 while (length) {
962 u32 amount, stride, height;
963
964 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
965 if (ret)
966 return ret;
967
968 amount = min(length, (u64)(4 * 1024 * 1024));
969 stride = 16 * 4;
970 height = amount / stride;
971
972 if (src_tiled) {
973 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
974 OUT_RING (chan, 0);
975 OUT_RING (chan, 0);
976 OUT_RING (chan, stride);
977 OUT_RING (chan, height);
978 OUT_RING (chan, 1);
979 OUT_RING (chan, 0);
980 OUT_RING (chan, 0);
981 } else {
982 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
983 OUT_RING (chan, 1);
984 }
985 if (dst_tiled) {
986 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
987 OUT_RING (chan, 0);
988 OUT_RING (chan, 0);
989 OUT_RING (chan, stride);
990 OUT_RING (chan, height);
991 OUT_RING (chan, 1);
992 OUT_RING (chan, 0);
993 OUT_RING (chan, 0);
994 } else {
995 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
996 OUT_RING (chan, 1);
997 }
998
999 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
1000 OUT_RING (chan, upper_32_bits(src_offset));
1001 OUT_RING (chan, upper_32_bits(dst_offset));
1002 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
1003 OUT_RING (chan, lower_32_bits(src_offset));
1004 OUT_RING (chan, lower_32_bits(dst_offset));
1005 OUT_RING (chan, stride);
1006 OUT_RING (chan, stride);
1007 OUT_RING (chan, stride);
1008 OUT_RING (chan, height);
1009 OUT_RING (chan, 0x00000101);
1010 OUT_RING (chan, 0x00000000);
1011 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1012 OUT_RING (chan, 0);
1013
1014 length -= amount;
1015 src_offset += amount;
1016 dst_offset += amount;
1017 }
1018
1019 return 0;
1020}
1021
1022static int
1023nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
1024{
1025 int ret = RING_SPACE(chan, 4);
1026 if (ret == 0) {
1027 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
1028 OUT_RING (chan, handle);
1029 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
1030 OUT_RING (chan, chan->drm->ntfy.handle);
1031 }
1032
1033 return ret;
1034}
1035
1036static inline uint32_t
1037nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
1038 struct nouveau_channel *chan, struct ttm_mem_reg *reg)
1039{
1040 if (reg->mem_type == TTM_PL_TT)
1041 return NvDmaTT;
1042 return chan->vram.handle;
1043}
1044
1045static int
1046nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
1047 struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
1048{
1049 u32 src_offset = old_reg->start << PAGE_SHIFT;
1050 u32 dst_offset = new_reg->start << PAGE_SHIFT;
1051 u32 page_count = new_reg->num_pages;
1052 int ret;
1053
1054 ret = RING_SPACE(chan, 3);
1055 if (ret)
1056 return ret;
1057
1058 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
1059 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
1060 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
1061
1062 page_count = new_reg->num_pages;
1063 while (page_count) {
1064 int line_count = (page_count > 2047) ? 2047 : page_count;
1065
1066 ret = RING_SPACE(chan, 11);
1067 if (ret)
1068 return ret;
1069
1070 BEGIN_NV04(chan, NvSubCopy,
1071 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1072 OUT_RING (chan, src_offset);
1073 OUT_RING (chan, dst_offset);
1074 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
1075 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
1076 OUT_RING (chan, PAGE_SIZE); /* line_length */
1077 OUT_RING (chan, line_count);
1078 OUT_RING (chan, 0x00000101);
1079 OUT_RING (chan, 0x00000000);
1080 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1081 OUT_RING (chan, 0);
1082
1083 page_count -= line_count;
1084 src_offset += (PAGE_SIZE * line_count);
1085 dst_offset += (PAGE_SIZE * line_count);
1086 }
1087
1088 return 0;
1089}
1090
1091static int
1092nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1093 struct ttm_mem_reg *reg)
1094{
1095 struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
1096 struct nouveau_mem *new_mem = nouveau_mem(reg);
1097 struct nvif_vmm *vmm = &drm->client.vmm.vmm;
1098 int ret;
1099
1100 ret = nvif_vmm_get(vmm, LAZY, false, old_mem->mem.page, 0,
1101 old_mem->mem.size, &old_mem->vma[0]);
1102 if (ret)
1103 return ret;
1104
1105 ret = nvif_vmm_get(vmm, LAZY, false, new_mem->mem.page, 0,
1106 new_mem->mem.size, &old_mem->vma[1]);
1107 if (ret)
1108 goto done;
1109
1110 ret = nouveau_mem_map(old_mem, vmm, &old_mem->vma[0]);
1111 if (ret)
1112 goto done;
1113
1114 ret = nouveau_mem_map(new_mem, vmm, &old_mem->vma[1]);
1115done:
1116 if (ret) {
1117 nvif_vmm_put(vmm, &old_mem->vma[1]);
1118 nvif_vmm_put(vmm, &old_mem->vma[0]);
1119 }
1120 return 0;
1121}
1122
1123static int
1124nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1125 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1126{
1127 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1128 struct nouveau_channel *chan = drm->ttm.chan;
1129 struct nouveau_cli *cli = (void *)chan->user.client;
1130 struct nouveau_fence *fence;
1131 int ret;
1132
1133 /* create temporary vmas for the transfer and attach them to the
1134 * old nvkm_mem node, these will get cleaned up after ttm has
1135 * destroyed the ttm_mem_reg
1136 */
1137 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1138 ret = nouveau_bo_move_prep(drm, bo, new_reg);
1139 if (ret)
1140 return ret;
1141 }
1142
1143 mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1144 ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1145 if (ret == 0) {
1146 ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
1147 if (ret == 0) {
1148 ret = nouveau_fence_new(chan, false, &fence);
1149 if (ret == 0) {
1150 ret = ttm_bo_move_accel_cleanup(bo,
1151 &fence->base,
1152 evict,
1153 new_reg);
1154 nouveau_fence_unref(&fence);
1155 }
1156 }
1157 }
1158 mutex_unlock(&cli->mutex);
1159 return ret;
1160}
1161
1162void
1163nouveau_bo_move_init(struct nouveau_drm *drm)
1164{
1165 static const struct {
1166 const char *name;
1167 int engine;
1168 s32 oclass;
1169 int (*exec)(struct nouveau_channel *,
1170 struct ttm_buffer_object *,
1171 struct ttm_mem_reg *, struct ttm_mem_reg *);
1172 int (*init)(struct nouveau_channel *, u32 handle);
1173 } _methods[] = {
1174 { "COPY", 4, 0xc5b5, nve0_bo_move_copy, nve0_bo_move_init },
1175 { "GRCE", 0, 0xc5b5, nve0_bo_move_copy, nvc0_bo_move_init },
1176 { "COPY", 4, 0xc3b5, nve0_bo_move_copy, nve0_bo_move_init },
1177 { "GRCE", 0, 0xc3b5, nve0_bo_move_copy, nvc0_bo_move_init },
1178 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1179 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1180 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1181 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1182 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1183 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1184 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1185 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1186 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1187 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1188 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1189 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1190 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1191 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1192 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1193 {},
1194 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1195 }, *mthd = _methods;
1196 const char *name = "CPU";
1197 int ret;
1198
1199 do {
1200 struct nouveau_channel *chan;
1201
1202 if (mthd->engine)
1203 chan = drm->cechan;
1204 else
1205 chan = drm->channel;
1206 if (chan == NULL)
1207 continue;
1208
1209 ret = nvif_object_init(&chan->user,
1210 mthd->oclass | (mthd->engine << 16),
1211 mthd->oclass, NULL, 0,
1212 &drm->ttm.copy);
1213 if (ret == 0) {
1214 ret = mthd->init(chan, drm->ttm.copy.handle);
1215 if (ret) {
1216 nvif_object_fini(&drm->ttm.copy);
1217 continue;
1218 }
1219
1220 drm->ttm.move = mthd->exec;
1221 drm->ttm.chan = chan;
1222 name = mthd->name;
1223 break;
1224 }
1225 } while ((++mthd)->exec);
1226
1227 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1228}
1229
1230static int
1231nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1232 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1233{
1234 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1235 struct ttm_place placement_memtype = {
1236 .fpfn = 0,
1237 .lpfn = 0,
1238 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1239 };
1240 struct ttm_placement placement;
1241 struct ttm_mem_reg tmp_reg;
1242 int ret;
1243
1244 placement.num_placement = placement.num_busy_placement = 1;
1245 placement.placement = placement.busy_placement = &placement_memtype;
1246
1247 tmp_reg = *new_reg;
1248 tmp_reg.mm_node = NULL;
1249 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1250 if (ret)
1251 return ret;
1252
1253 ret = ttm_tt_bind(bo->ttm, &tmp_reg, &ctx);
1254 if (ret)
1255 goto out;
1256
1257 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
1258 if (ret)
1259 goto out;
1260
1261 ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
1262out:
1263 ttm_bo_mem_put(bo, &tmp_reg);
1264 return ret;
1265}
1266
1267static int
1268nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1269 bool no_wait_gpu, struct ttm_mem_reg *new_reg)
1270{
1271 struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
1272 struct ttm_place placement_memtype = {
1273 .fpfn = 0,
1274 .lpfn = 0,
1275 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1276 };
1277 struct ttm_placement placement;
1278 struct ttm_mem_reg tmp_reg;
1279 int ret;
1280
1281 placement.num_placement = placement.num_busy_placement = 1;
1282 placement.placement = placement.busy_placement = &placement_memtype;
1283
1284 tmp_reg = *new_reg;
1285 tmp_reg.mm_node = NULL;
1286 ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
1287 if (ret)
1288 return ret;
1289
1290 ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
1291 if (ret)
1292 goto out;
1293
1294 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
1295 if (ret)
1296 goto out;
1297
1298out:
1299 ttm_bo_mem_put(bo, &tmp_reg);
1300 return ret;
1301}
1302
1303static void
1304nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
1305 struct ttm_mem_reg *new_reg)
1306{
1307 struct nouveau_mem *mem = new_reg ? nouveau_mem(new_reg) : NULL;
1308 struct nouveau_bo *nvbo = nouveau_bo(bo);
1309 struct nouveau_vma *vma;
1310
1311 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1312 if (bo->destroy != nouveau_bo_del_ttm)
1313 return;
1314
1315 if (mem && new_reg->mem_type != TTM_PL_SYSTEM &&
1316 mem->mem.page == nvbo->page) {
1317 list_for_each_entry(vma, &nvbo->vma_list, head) {
1318 nouveau_vma_map(vma, mem);
1319 }
1320 } else {
1321 list_for_each_entry(vma, &nvbo->vma_list, head) {
1322 WARN_ON(ttm_bo_wait(bo, false, false));
1323 nouveau_vma_unmap(vma);
1324 }
1325 }
1326}
1327
1328static int
1329nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
1330 struct nouveau_drm_tile **new_tile)
1331{
1332 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1333 struct drm_device *dev = drm->dev;
1334 struct nouveau_bo *nvbo = nouveau_bo(bo);
1335 u64 offset = new_reg->start << PAGE_SHIFT;
1336
1337 *new_tile = NULL;
1338 if (new_reg->mem_type != TTM_PL_VRAM)
1339 return 0;
1340
1341 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1342 *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
1343 nvbo->mode, nvbo->zeta);
1344 }
1345
1346 return 0;
1347}
1348
1349static void
1350nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1351 struct nouveau_drm_tile *new_tile,
1352 struct nouveau_drm_tile **old_tile)
1353{
1354 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1355 struct drm_device *dev = drm->dev;
1356 struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
1357
1358 nv10_bo_put_tile_region(dev, *old_tile, fence);
1359 *old_tile = new_tile;
1360}
1361
1362static int
1363nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
1364 struct ttm_operation_ctx *ctx,
1365 struct ttm_mem_reg *new_reg)
1366{
1367 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1368 struct nouveau_bo *nvbo = nouveau_bo(bo);
1369 struct ttm_mem_reg *old_reg = &bo->mem;
1370 struct nouveau_drm_tile *new_tile = NULL;
1371 int ret = 0;
1372
1373 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1374 if (ret)
1375 return ret;
1376
1377 if (nvbo->pin_refcnt)
1378 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1379
1380 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1381 ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
1382 if (ret)
1383 return ret;
1384 }
1385
1386 /* Fake bo copy. */
1387 if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1388 BUG_ON(bo->mem.mm_node != NULL);
1389 bo->mem = *new_reg;
1390 new_reg->mm_node = NULL;
1391 goto out;
1392 }
1393
1394 /* Hardware assisted copy. */
1395 if (drm->ttm.move) {
1396 if (new_reg->mem_type == TTM_PL_SYSTEM)
1397 ret = nouveau_bo_move_flipd(bo, evict,
1398 ctx->interruptible,
1399 ctx->no_wait_gpu, new_reg);
1400 else if (old_reg->mem_type == TTM_PL_SYSTEM)
1401 ret = nouveau_bo_move_flips(bo, evict,
1402 ctx->interruptible,
1403 ctx->no_wait_gpu, new_reg);
1404 else
1405 ret = nouveau_bo_move_m2mf(bo, evict,
1406 ctx->interruptible,
1407 ctx->no_wait_gpu, new_reg);
1408 if (!ret)
1409 goto out;
1410 }
1411
1412 /* Fallback to software copy. */
1413 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
1414 if (ret == 0)
1415 ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
1416
1417out:
1418 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1419 if (ret)
1420 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1421 else
1422 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1423 }
1424
1425 return ret;
1426}
1427
1428static int
1429nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1430{
1431 struct nouveau_bo *nvbo = nouveau_bo(bo);
1432
1433 return drm_vma_node_verify_access(&nvbo->bo.base.vma_node,
1434 filp->private_data);
1435}
1436
1437static int
1438nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1439{
1440 struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
1441 struct nouveau_drm *drm = nouveau_bdev(bdev);
1442 struct nvkm_device *device = nvxx_device(&drm->client.device);
1443 struct nouveau_mem *mem = nouveau_mem(reg);
1444
1445 reg->bus.addr = NULL;
1446 reg->bus.offset = 0;
1447 reg->bus.size = reg->num_pages << PAGE_SHIFT;
1448 reg->bus.base = 0;
1449 reg->bus.is_iomem = false;
1450 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1451 return -EINVAL;
1452 switch (reg->mem_type) {
1453 case TTM_PL_SYSTEM:
1454 /* System memory */
1455 return 0;
1456 case TTM_PL_TT:
1457#if IS_ENABLED(CONFIG_AGP)
1458 if (drm->agp.bridge) {
1459 reg->bus.offset = reg->start << PAGE_SHIFT;
1460 reg->bus.base = drm->agp.base;
1461 reg->bus.is_iomem = !drm->agp.cma;
1462 }
1463#endif
1464 if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || !mem->kind)
1465 /* untiled */
1466 break;
1467 /* fall through - tiled memory */
1468 case TTM_PL_VRAM:
1469 reg->bus.offset = reg->start << PAGE_SHIFT;
1470 reg->bus.base = device->func->resource_addr(device, 1);
1471 reg->bus.is_iomem = true;
1472 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1473 union {
1474 struct nv50_mem_map_v0 nv50;
1475 struct gf100_mem_map_v0 gf100;
1476 } args;
1477 u64 handle, length;
1478 u32 argc = 0;
1479 int ret;
1480
1481 switch (mem->mem.object.oclass) {
1482 case NVIF_CLASS_MEM_NV50:
1483 args.nv50.version = 0;
1484 args.nv50.ro = 0;
1485 args.nv50.kind = mem->kind;
1486 args.nv50.comp = mem->comp;
1487 argc = sizeof(args.nv50);
1488 break;
1489 case NVIF_CLASS_MEM_GF100:
1490 args.gf100.version = 0;
1491 args.gf100.ro = 0;
1492 args.gf100.kind = mem->kind;
1493 argc = sizeof(args.gf100);
1494 break;
1495 default:
1496 WARN_ON(1);
1497 break;
1498 }
1499
1500 ret = nvif_object_map_handle(&mem->mem.object,
1501 &args, argc,
1502 &handle, &length);
1503 if (ret != 1)
1504 return ret ? ret : -EINVAL;
1505
1506 reg->bus.base = 0;
1507 reg->bus.offset = handle;
1508 }
1509 break;
1510 default:
1511 return -EINVAL;
1512 }
1513 return 0;
1514}
1515
1516static void
1517nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
1518{
1519 struct nouveau_drm *drm = nouveau_bdev(bdev);
1520 struct nouveau_mem *mem = nouveau_mem(reg);
1521
1522 if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
1523 switch (reg->mem_type) {
1524 case TTM_PL_TT:
1525 if (mem->kind)
1526 nvif_object_unmap_handle(&mem->mem.object);
1527 break;
1528 case TTM_PL_VRAM:
1529 nvif_object_unmap_handle(&mem->mem.object);
1530 break;
1531 default:
1532 break;
1533 }
1534 }
1535}
1536
1537static int
1538nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1539{
1540 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1541 struct nouveau_bo *nvbo = nouveau_bo(bo);
1542 struct nvkm_device *device = nvxx_device(&drm->client.device);
1543 u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
1544 int i, ret;
1545
1546 /* as long as the bo isn't in vram, and isn't tiled, we've got
1547 * nothing to do here.
1548 */
1549 if (bo->mem.mem_type != TTM_PL_VRAM) {
1550 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1551 !nvbo->kind)
1552 return 0;
1553
1554 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1555 nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1556
1557 ret = nouveau_bo_validate(nvbo, false, false);
1558 if (ret)
1559 return ret;
1560 }
1561 return 0;
1562 }
1563
1564 /* make sure bo is in mappable vram */
1565 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1566 bo->mem.start + bo->mem.num_pages < mappable)
1567 return 0;
1568
1569 for (i = 0; i < nvbo->placement.num_placement; ++i) {
1570 nvbo->placements[i].fpfn = 0;
1571 nvbo->placements[i].lpfn = mappable;
1572 }
1573
1574 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1575 nvbo->busy_placements[i].fpfn = 0;
1576 nvbo->busy_placements[i].lpfn = mappable;
1577 }
1578
1579 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1580 return nouveau_bo_validate(nvbo, false, false);
1581}
1582
1583static int
1584nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
1585{
1586 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1587 struct nouveau_drm *drm;
1588 struct device *dev;
1589 unsigned i;
1590 int r;
1591 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1592
1593 if (ttm->state != tt_unpopulated)
1594 return 0;
1595
1596 if (slave && ttm->sg) {
1597 /* make userspace faulting work */
1598 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1599 ttm_dma->dma_address, ttm->num_pages);
1600 ttm->state = tt_unbound;
1601 return 0;
1602 }
1603
1604 drm = nouveau_bdev(ttm->bdev);
1605 dev = drm->dev->dev;
1606
1607#if IS_ENABLED(CONFIG_AGP)
1608 if (drm->agp.bridge) {
1609 return ttm_agp_tt_populate(ttm, ctx);
1610 }
1611#endif
1612
1613#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1614 if (swiotlb_nr_tbl()) {
1615 return ttm_dma_populate((void *)ttm, dev, ctx);
1616 }
1617#endif
1618
1619 r = ttm_pool_populate(ttm, ctx);
1620 if (r) {
1621 return r;
1622 }
1623
1624 for (i = 0; i < ttm->num_pages; i++) {
1625 dma_addr_t addr;
1626
1627 addr = dma_map_page(dev, ttm->pages[i], 0, PAGE_SIZE,
1628 DMA_BIDIRECTIONAL);
1629
1630 if (dma_mapping_error(dev, addr)) {
1631 while (i--) {
1632 dma_unmap_page(dev, ttm_dma->dma_address[i],
1633 PAGE_SIZE, DMA_BIDIRECTIONAL);
1634 ttm_dma->dma_address[i] = 0;
1635 }
1636 ttm_pool_unpopulate(ttm);
1637 return -EFAULT;
1638 }
1639
1640 ttm_dma->dma_address[i] = addr;
1641 }
1642 return 0;
1643}
1644
1645static void
1646nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1647{
1648 struct ttm_dma_tt *ttm_dma = (void *)ttm;
1649 struct nouveau_drm *drm;
1650 struct device *dev;
1651 unsigned i;
1652 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1653
1654 if (slave)
1655 return;
1656
1657 drm = nouveau_bdev(ttm->bdev);
1658 dev = drm->dev->dev;
1659
1660#if IS_ENABLED(CONFIG_AGP)
1661 if (drm->agp.bridge) {
1662 ttm_agp_tt_unpopulate(ttm);
1663 return;
1664 }
1665#endif
1666
1667#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
1668 if (swiotlb_nr_tbl()) {
1669 ttm_dma_unpopulate((void *)ttm, dev);
1670 return;
1671 }
1672#endif
1673
1674 for (i = 0; i < ttm->num_pages; i++) {
1675 if (ttm_dma->dma_address[i]) {
1676 dma_unmap_page(dev, ttm_dma->dma_address[i], PAGE_SIZE,
1677 DMA_BIDIRECTIONAL);
1678 }
1679 }
1680
1681 ttm_pool_unpopulate(ttm);
1682}
1683
1684void
1685nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1686{
1687 struct dma_resv *resv = nvbo->bo.base.resv;
1688
1689 if (exclusive)
1690 dma_resv_add_excl_fence(resv, &fence->base);
1691 else if (fence)
1692 dma_resv_add_shared_fence(resv, &fence->base);
1693}
1694
1695struct ttm_bo_driver nouveau_bo_driver = {
1696 .ttm_tt_create = &nouveau_ttm_tt_create,
1697 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1698 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1699 .invalidate_caches = nouveau_bo_invalidate_caches,
1700 .init_mem_type = nouveau_bo_init_mem_type,
1701 .eviction_valuable = ttm_bo_eviction_valuable,
1702 .evict_flags = nouveau_bo_evict_flags,
1703 .move_notify = nouveau_bo_move_ntfy,
1704 .move = nouveau_bo_move,
1705 .verify_access = nouveau_bo_verify_access,
1706 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1707 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1708 .io_mem_free = &nouveau_ttm_io_mem_free,
1709};