Loading...
1#include <linux/pagemap.h>
2#include <linux/slab.h>
3
4#include "nouveau_drm.h"
5#include "nouveau_ttm.h"
6
7struct nouveau_sgdma_be {
8 /* this has to be the first field so populate/unpopulated in
9 * nouve_bo.c works properly, otherwise have to move them here
10 */
11 struct ttm_dma_tt ttm;
12 struct nvkm_mem *node;
13};
14
15static void
16nouveau_sgdma_destroy(struct ttm_tt *ttm)
17{
18 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
19
20 if (ttm) {
21 ttm_dma_tt_fini(&nvbe->ttm);
22 kfree(nvbe);
23 }
24}
25
26static int
27nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
28{
29 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
30 struct nvkm_mem *node = mem->mm_node;
31
32 if (ttm->sg) {
33 node->sg = ttm->sg;
34 node->pages = NULL;
35 } else {
36 node->sg = NULL;
37 node->pages = nvbe->ttm.dma_address;
38 }
39 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
40
41 nvkm_vm_map(&node->vma[0], node);
42 nvbe->node = node;
43 return 0;
44}
45
46static int
47nv04_sgdma_unbind(struct ttm_tt *ttm)
48{
49 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
50 nvkm_vm_unmap(&nvbe->node->vma[0]);
51 return 0;
52}
53
54static struct ttm_backend_func nv04_sgdma_backend = {
55 .bind = nv04_sgdma_bind,
56 .unbind = nv04_sgdma_unbind,
57 .destroy = nouveau_sgdma_destroy
58};
59
60static int
61nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
62{
63 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
64 struct nvkm_mem *node = mem->mm_node;
65
66 /* noop: bound in move_notify() */
67 if (ttm->sg) {
68 node->sg = ttm->sg;
69 node->pages = NULL;
70 } else {
71 node->sg = NULL;
72 node->pages = nvbe->ttm.dma_address;
73 }
74 node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
75 return 0;
76}
77
78static int
79nv50_sgdma_unbind(struct ttm_tt *ttm)
80{
81 /* noop: unbound in move_notify() */
82 return 0;
83}
84
85static struct ttm_backend_func nv50_sgdma_backend = {
86 .bind = nv50_sgdma_bind,
87 .unbind = nv50_sgdma_unbind,
88 .destroy = nouveau_sgdma_destroy
89};
90
91struct ttm_tt *
92nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
93 unsigned long size, uint32_t page_flags,
94 struct page *dummy_read_page)
95{
96 struct nouveau_drm *drm = nouveau_bdev(bdev);
97 struct nouveau_sgdma_be *nvbe;
98
99 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
100 if (!nvbe)
101 return NULL;
102
103 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
104 nvbe->ttm.ttm.func = &nv04_sgdma_backend;
105 else
106 nvbe->ttm.ttm.func = &nv50_sgdma_backend;
107
108 if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page))
109 /*
110 * A failing ttm_dma_tt_init() will call ttm_tt_destroy()
111 * and thus our nouveau_sgdma_destroy() hook, so we don't need
112 * to free nvbe here.
113 */
114 return NULL;
115 return &nvbe->ttm.ttm;
116}
1// SPDX-License-Identifier: MIT
2#include <linux/pagemap.h>
3#include <linux/slab.h>
4
5#include "nouveau_drv.h"
6#include "nouveau_mem.h"
7#include "nouveau_ttm.h"
8#include "nouveau_bo.h"
9
10struct nouveau_sgdma_be {
11 /* this has to be the first field so populate/unpopulated in
12 * nouve_bo.c works properly, otherwise have to move them here
13 */
14 struct ttm_tt ttm;
15 struct nouveau_mem *mem;
16};
17
18void
19nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
20{
21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
22
23 if (ttm) {
24 nouveau_sgdma_unbind(bdev, ttm);
25 ttm_tt_destroy_common(bdev, ttm);
26 ttm_tt_fini(&nvbe->ttm);
27 kfree(nvbe);
28 }
29}
30
31int
32nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
33{
34 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
35 struct nouveau_drm *drm = nouveau_bdev(bdev);
36 struct nouveau_mem *mem = nouveau_mem(reg);
37 int ret;
38
39 if (nvbe->mem)
40 return 0;
41
42 ret = nouveau_mem_host(reg, &nvbe->ttm);
43 if (ret)
44 return ret;
45
46 if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
47 ret = nouveau_mem_map(mem, &mem->cli->vmm.vmm, &mem->vma[0]);
48 if (ret) {
49 nouveau_mem_fini(mem);
50 return ret;
51 }
52 }
53
54 nvbe->mem = mem;
55 return 0;
56}
57
58void
59nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
60{
61 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
62 if (nvbe->mem) {
63 nouveau_mem_fini(nvbe->mem);
64 nvbe->mem = NULL;
65 }
66}
67
68struct ttm_tt *
69nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
70{
71 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
72 struct nouveau_bo *nvbo = nouveau_bo(bo);
73 struct nouveau_sgdma_be *nvbe;
74 enum ttm_caching caching;
75
76 if (nvbo->force_coherent)
77 caching = ttm_uncached;
78 else if (drm->agp.bridge)
79 caching = ttm_write_combined;
80 else
81 caching = ttm_cached;
82
83 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
84 if (!nvbe)
85 return NULL;
86
87 if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
88 kfree(nvbe);
89 return NULL;
90 }
91 return &nvbe->ttm;
92}