Loading...
1/* SPDX-License-Identifier: MIT */
2#ifndef __NVKM_MMU_H__
3#define __NVKM_MMU_H__
4#include <core/subdev.h>
5#include <subdev/gsp.h>
6
7struct nvkm_vma {
8 struct list_head head;
9 struct rb_node tree;
10 u64 addr;
11 u64 size:50;
12 bool mapref:1; /* PTs (de)referenced on (un)map (vs pre-allocated). */
13 bool sparse:1; /* Unmapped PDEs/PTEs will not trigger MMU faults. */
14#define NVKM_VMA_PAGE_NONE 7
15 u8 page:3; /* Requested page type (index, or NONE for automatic). */
16 u8 refd:3; /* Current page type (index, or NONE for unreferenced). */
17 bool used:1; /* Region allocated. */
18 bool part:1; /* Region was split from an allocated region by map(). */
19 bool busy:1; /* Region busy (for temporarily preventing user access). */
20 bool mapped:1; /* Region contains valid pages. */
21 bool no_comp:1; /* Force no memory compression. */
22 struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
23 struct nvkm_tags *tags; /* Compression tag reference. */
24};
25
26struct nvkm_vmm {
27 const struct nvkm_vmm_func *func;
28 struct nvkm_mmu *mmu;
29 const char *name;
30 u32 debug;
31 struct kref kref;
32
33 struct {
34 struct mutex vmm;
35 struct mutex ref;
36 struct mutex map;
37 } mutex;
38
39 u64 start;
40 u64 limit;
41 struct {
42 struct {
43 u64 addr;
44 u64 size;
45 } p;
46 struct {
47 u64 addr;
48 u64 size;
49 } n;
50 bool raw;
51 } managed;
52
53 struct nvkm_vmm_pt *pd;
54 struct list_head join;
55
56 struct list_head list;
57 struct rb_root free;
58 struct rb_root root;
59
60 bool bootstrapped;
61 atomic_t engref[NVKM_SUBDEV_NR];
62
63 dma_addr_t null;
64 void *nullp;
65
66 bool replay;
67
68 struct {
69 u64 bar2_pdb;
70
71 struct nvkm_gsp_client client;
72 struct nvkm_gsp_device device;
73 struct nvkm_gsp_object object;
74
75 struct nvkm_vma *rsvd;
76 } rm;
77};
78
79int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
80 struct lock_class_key *, const char *name, struct nvkm_vmm **);
81struct nvkm_vmm *nvkm_vmm_ref(struct nvkm_vmm *);
82void nvkm_vmm_unref(struct nvkm_vmm **);
83int nvkm_vmm_boot(struct nvkm_vmm *);
84int nvkm_vmm_join(struct nvkm_vmm *, struct nvkm_memory *inst);
85void nvkm_vmm_part(struct nvkm_vmm *, struct nvkm_memory *inst);
86int nvkm_vmm_get(struct nvkm_vmm *, u8 page, u64 size, struct nvkm_vma **);
87void nvkm_vmm_put(struct nvkm_vmm *, struct nvkm_vma **);
88
89struct nvkm_vmm_map {
90 struct nvkm_memory *memory;
91 u64 offset;
92
93 struct nvkm_mm_node *mem;
94 struct scatterlist *sgl;
95 dma_addr_t *dma;
96 u64 *pfn;
97 u64 off;
98
99 const struct nvkm_vmm_page *page;
100
101 bool no_comp;
102 struct nvkm_tags *tags;
103 u64 next;
104 u64 type;
105 u64 ctag;
106};
107
108int nvkm_vmm_map(struct nvkm_vmm *, struct nvkm_vma *, void *argv, u32 argc,
109 struct nvkm_vmm_map *);
110void nvkm_vmm_unmap(struct nvkm_vmm *, struct nvkm_vma *);
111
112struct nvkm_memory *nvkm_umem_search(struct nvkm_client *, u64);
113struct nvkm_vmm *nvkm_uvmm_search(struct nvkm_client *, u64 handle);
114
115struct nvkm_mmu {
116 const struct nvkm_mmu_func *func;
117 struct nvkm_subdev subdev;
118
119 u8 dma_bits;
120
121 int heap_nr;
122 struct {
123#define NVKM_MEM_VRAM 0x01
124#define NVKM_MEM_HOST 0x02
125#define NVKM_MEM_COMP 0x04
126#define NVKM_MEM_DISP 0x08
127 u8 type;
128 u64 size;
129 } heap[4];
130
131 int type_nr;
132 struct {
133#define NVKM_MEM_KIND 0x10
134#define NVKM_MEM_MAPPABLE 0x20
135#define NVKM_MEM_COHERENT 0x40
136#define NVKM_MEM_UNCACHED 0x80
137 u8 type;
138 u8 heap;
139 } type[16];
140
141 struct nvkm_vmm *vmm;
142
143 struct {
144 struct mutex mutex;
145 struct list_head list;
146 } ptc, ptp;
147
148 struct mutex mutex; /* serialises mmu invalidations */
149
150 struct nvkm_device_oclass user;
151};
152
153int nv04_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
154int nv41_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
155int nv44_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
156int nv50_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
157int g84_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
158int mcp77_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
159int gf100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
160int gk104_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
161int gk20a_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
162int gm200_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
163int gm20b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
164int gp100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
165int gp10b_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
166int gv100_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
167int tu102_mmu_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_mmu **);
168#endif