Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_OBJECT_H__
29#define __AMDGPU_OBJECT_H__
30
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33#include "amdgpu_res_cursor.h"
34
35#ifdef CONFIG_MMU_NOTIFIER
36#include <linux/mmu_notifier.h>
37#endif
38
39#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
40#define AMDGPU_BO_MAX_PLACEMENTS 3
41
42/* BO flag to indicate a KFD userptr BO */
43#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
44
45#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
46#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
47
48struct amdgpu_bo_param {
49 unsigned long size;
50 int byte_align;
51 u32 bo_ptr_size;
52 u32 domain;
53 u32 preferred_domain;
54 u64 flags;
55 enum ttm_bo_type type;
56 bool no_wait_gpu;
57 struct dma_resv *resv;
58 void (*destroy)(struct ttm_buffer_object *bo);
59};
60
61/* bo virtual addresses in a vm */
62struct amdgpu_bo_va_mapping {
63 struct amdgpu_bo_va *bo_va;
64 struct list_head list;
65 struct rb_node rb;
66 uint64_t start;
67 uint64_t last;
68 uint64_t __subtree_last;
69 uint64_t offset;
70 uint64_t flags;
71};
72
73/* User space allocated BO in a VM */
74struct amdgpu_bo_va {
75 struct amdgpu_vm_bo_base base;
76
77 /* protected by bo being reserved */
78 unsigned ref_count;
79
80 /* all other members protected by the VM PD being reserved */
81 struct dma_fence *last_pt_update;
82
83 /* mappings for this bo_va */
84 struct list_head invalids;
85 struct list_head valids;
86
87 /* If the mappings are cleared or filled */
88 bool cleared;
89
90 bool is_xgmi;
91};
92
93struct amdgpu_bo {
94 /* Protected by tbo.reserved */
95 u32 preferred_domains;
96 u32 allowed_domains;
97 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
98 struct ttm_placement placement;
99 struct ttm_buffer_object tbo;
100 struct ttm_bo_kmap_obj kmap;
101 u64 flags;
102 /* per VM structure for page tables and with virtual addresses */
103 struct amdgpu_vm_bo_base *vm_bo;
104 /* Constant after initialization */
105 struct amdgpu_bo *parent;
106
107#ifdef CONFIG_MMU_NOTIFIER
108 struct mmu_interval_notifier notifier;
109#endif
110 struct kgd_mem *kfd_bo;
111};
112
113struct amdgpu_bo_user {
114 struct amdgpu_bo bo;
115 u64 tiling_flags;
116 u64 metadata_flags;
117 void *metadata;
118 u32 metadata_size;
119
120};
121
122struct amdgpu_bo_vm {
123 struct amdgpu_bo bo;
124 struct amdgpu_bo *shadow;
125 struct list_head shadow_list;
126 struct amdgpu_vm_bo_base entries[];
127};
128
129static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
130{
131 return container_of(tbo, struct amdgpu_bo, tbo);
132}
133
134/**
135 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
136 * @mem_type: ttm memory type
137 *
138 * Returns corresponding domain of the ttm mem_type
139 */
140static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
141{
142 switch (mem_type) {
143 case TTM_PL_VRAM:
144 return AMDGPU_GEM_DOMAIN_VRAM;
145 case TTM_PL_TT:
146 return AMDGPU_GEM_DOMAIN_GTT;
147 case TTM_PL_SYSTEM:
148 return AMDGPU_GEM_DOMAIN_CPU;
149 case AMDGPU_PL_GDS:
150 return AMDGPU_GEM_DOMAIN_GDS;
151 case AMDGPU_PL_GWS:
152 return AMDGPU_GEM_DOMAIN_GWS;
153 case AMDGPU_PL_OA:
154 return AMDGPU_GEM_DOMAIN_OA;
155 default:
156 break;
157 }
158 return 0;
159}
160
161/**
162 * amdgpu_bo_reserve - reserve bo
163 * @bo: bo structure
164 * @no_intr: don't return -ERESTARTSYS on pending signal
165 *
166 * Returns:
167 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
168 * a signal. Release all buffer reservations and return to user-space.
169 */
170static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
171{
172 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
173 int r;
174
175 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
176 if (unlikely(r != 0)) {
177 if (r != -ERESTARTSYS)
178 dev_err(adev->dev, "%p reserve failed\n", bo);
179 return r;
180 }
181 return 0;
182}
183
184static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
185{
186 ttm_bo_unreserve(&bo->tbo);
187}
188
189static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
190{
191 return bo->tbo.base.size;
192}
193
194static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
195{
196 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
197}
198
199static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
200{
201 return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
202}
203
204/**
205 * amdgpu_bo_mmap_offset - return mmap offset of bo
206 * @bo: amdgpu object for which we query the offset
207 *
208 * Returns mmap offset of the object.
209 */
210static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
211{
212 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
213}
214
215/**
216 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
217 */
218static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
219{
220 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
221 struct amdgpu_res_cursor cursor;
222
223 if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
224 return false;
225
226 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
227 while (cursor.remaining) {
228 if (cursor.start < adev->gmc.visible_vram_size)
229 return true;
230
231 amdgpu_res_next(&cursor, cursor.size);
232 }
233
234 return false;
235}
236
237/**
238 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
239 */
240static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
241{
242 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
243}
244
245/**
246 * amdgpu_bo_encrypted - test if the BO is encrypted
247 * @bo: pointer to a buffer object
248 *
249 * Return true if the buffer object is encrypted, false otherwise.
250 */
251static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
252{
253 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
254}
255
256/**
257 * amdgpu_bo_shadowed - check if the BO is shadowed
258 *
259 * @bo: BO to be tested.
260 *
261 * Returns:
262 * NULL if not shadowed or else return a BO pointer.
263 */
264static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
265{
266 if (bo->tbo.type == ttm_bo_type_kernel)
267 return to_amdgpu_bo_vm(bo)->shadow;
268
269 return NULL;
270}
271
272bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
273void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
274
275int amdgpu_bo_create(struct amdgpu_device *adev,
276 struct amdgpu_bo_param *bp,
277 struct amdgpu_bo **bo_ptr);
278int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
279 unsigned long size, int align,
280 u32 domain, struct amdgpu_bo **bo_ptr,
281 u64 *gpu_addr, void **cpu_addr);
282int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
283 unsigned long size, int align,
284 u32 domain, struct amdgpu_bo **bo_ptr,
285 u64 *gpu_addr, void **cpu_addr);
286int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
287 uint64_t offset, uint64_t size,
288 struct amdgpu_bo **bo_ptr, void **cpu_addr);
289int amdgpu_bo_create_user(struct amdgpu_device *adev,
290 struct amdgpu_bo_param *bp,
291 struct amdgpu_bo_user **ubo_ptr);
292int amdgpu_bo_create_vm(struct amdgpu_device *adev,
293 struct amdgpu_bo_param *bp,
294 struct amdgpu_bo_vm **ubo_ptr);
295void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
296 void **cpu_addr);
297int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
298void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
299void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
300struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
301void amdgpu_bo_unref(struct amdgpu_bo **bo);
302int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
303int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
304 u64 min_offset, u64 max_offset);
305void amdgpu_bo_unpin(struct amdgpu_bo *bo);
306int amdgpu_bo_init(struct amdgpu_device *adev);
307void amdgpu_bo_fini(struct amdgpu_device *adev);
308int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
309void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
310int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
311 uint32_t metadata_size, uint64_t flags);
312int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
313 size_t buffer_size, uint32_t *metadata_size,
314 uint64_t *flags);
315void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
316 bool evict,
317 struct ttm_resource *new_mem);
318void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
319vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
320void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
321 bool shared);
322int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
323 enum amdgpu_sync_mode sync_mode, void *owner,
324 bool intr);
325int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
326u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
327u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
328void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
329 uint64_t *gtt_mem, uint64_t *cpu_mem);
330void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
331int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
332 struct dma_fence **fence);
333uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
334 uint32_t domain);
335
336/*
337 * sub allocation
338 */
339
340static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
341{
342 return sa_bo->manager->gpu_addr + sa_bo->soffset;
343}
344
345static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
346{
347 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
348}
349
350int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
351 struct amdgpu_sa_manager *sa_manager,
352 unsigned size, u32 align, u32 domain);
353void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
354 struct amdgpu_sa_manager *sa_manager);
355int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
356 struct amdgpu_sa_manager *sa_manager);
357int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
358 struct amdgpu_sa_bo **sa_bo,
359 unsigned size, unsigned align);
360void amdgpu_sa_bo_free(struct amdgpu_device *adev,
361 struct amdgpu_sa_bo **sa_bo,
362 struct dma_fence *fence);
363#if defined(CONFIG_DEBUG_FS)
364void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
365 struct seq_file *m);
366u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
367#endif
368void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
369
370bool amdgpu_bo_support_uswc(u64 bo_flags);
371
372
373#endif
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_OBJECT_H__
29#define __AMDGPU_OBJECT_H__
30
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33#include "amdgpu_res_cursor.h"
34
35#ifdef CONFIG_MMU_NOTIFIER
36#include <linux/mmu_notifier.h>
37#endif
38
39#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
40#define AMDGPU_BO_MAX_PLACEMENTS 3
41
42/* BO flag to indicate a KFD userptr BO */
43#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63)
44#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62)
45
46#define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
47#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
48
49struct amdgpu_bo_param {
50 unsigned long size;
51 int byte_align;
52 u32 bo_ptr_size;
53 u32 domain;
54 u32 preferred_domain;
55 u64 flags;
56 enum ttm_bo_type type;
57 bool no_wait_gpu;
58 struct dma_resv *resv;
59 void (*destroy)(struct ttm_buffer_object *bo);
60};
61
62/* bo virtual addresses in a vm */
63struct amdgpu_bo_va_mapping {
64 struct amdgpu_bo_va *bo_va;
65 struct list_head list;
66 struct rb_node rb;
67 uint64_t start;
68 uint64_t last;
69 uint64_t __subtree_last;
70 uint64_t offset;
71 uint64_t flags;
72};
73
74/* User space allocated BO in a VM */
75struct amdgpu_bo_va {
76 struct amdgpu_vm_bo_base base;
77
78 /* protected by bo being reserved */
79 unsigned ref_count;
80
81 /* all other members protected by the VM PD being reserved */
82 struct dma_fence *last_pt_update;
83
84 /* mappings for this bo_va */
85 struct list_head invalids;
86 struct list_head valids;
87
88 /* If the mappings are cleared or filled */
89 bool cleared;
90
91 bool is_xgmi;
92};
93
94struct amdgpu_bo {
95 /* Protected by tbo.reserved */
96 u32 preferred_domains;
97 u32 allowed_domains;
98 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
99 struct ttm_placement placement;
100 struct ttm_buffer_object tbo;
101 struct ttm_bo_kmap_obj kmap;
102 u64 flags;
103 unsigned prime_shared_count;
104 /* per VM structure for page tables and with virtual addresses */
105 struct amdgpu_vm_bo_base *vm_bo;
106 /* Constant after initialization */
107 struct amdgpu_bo *parent;
108
109#ifdef CONFIG_MMU_NOTIFIER
110 struct mmu_interval_notifier notifier;
111#endif
112 struct kgd_mem *kfd_bo;
113};
114
115struct amdgpu_bo_user {
116 struct amdgpu_bo bo;
117 u64 tiling_flags;
118 u64 metadata_flags;
119 void *metadata;
120 u32 metadata_size;
121
122};
123
124struct amdgpu_bo_vm {
125 struct amdgpu_bo bo;
126 struct amdgpu_bo *shadow;
127 struct list_head shadow_list;
128 struct amdgpu_vm_bo_base entries[];
129};
130
131static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
132{
133 return container_of(tbo, struct amdgpu_bo, tbo);
134}
135
136/**
137 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
138 * @mem_type: ttm memory type
139 *
140 * Returns corresponding domain of the ttm mem_type
141 */
142static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
143{
144 switch (mem_type) {
145 case TTM_PL_VRAM:
146 return AMDGPU_GEM_DOMAIN_VRAM;
147 case TTM_PL_TT:
148 return AMDGPU_GEM_DOMAIN_GTT;
149 case TTM_PL_SYSTEM:
150 return AMDGPU_GEM_DOMAIN_CPU;
151 case AMDGPU_PL_GDS:
152 return AMDGPU_GEM_DOMAIN_GDS;
153 case AMDGPU_PL_GWS:
154 return AMDGPU_GEM_DOMAIN_GWS;
155 case AMDGPU_PL_OA:
156 return AMDGPU_GEM_DOMAIN_OA;
157 default:
158 break;
159 }
160 return 0;
161}
162
163/**
164 * amdgpu_bo_reserve - reserve bo
165 * @bo: bo structure
166 * @no_intr: don't return -ERESTARTSYS on pending signal
167 *
168 * Returns:
169 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
170 * a signal. Release all buffer reservations and return to user-space.
171 */
172static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
173{
174 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
175 int r;
176
177 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
178 if (unlikely(r != 0)) {
179 if (r != -ERESTARTSYS)
180 dev_err(adev->dev, "%p reserve failed\n", bo);
181 return r;
182 }
183 return 0;
184}
185
186static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
187{
188 ttm_bo_unreserve(&bo->tbo);
189}
190
191static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
192{
193 return bo->tbo.base.size;
194}
195
196static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
197{
198 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE;
199}
200
201static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
202{
203 return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
204}
205
206/**
207 * amdgpu_bo_mmap_offset - return mmap offset of bo
208 * @bo: amdgpu object for which we query the offset
209 *
210 * Returns mmap offset of the object.
211 */
212static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
213{
214 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
215}
216
217/**
218 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
219 */
220static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
221{
222 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
223 struct amdgpu_res_cursor cursor;
224
225 if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
226 return false;
227
228 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
229 while (cursor.remaining) {
230 if (cursor.start < adev->gmc.visible_vram_size)
231 return true;
232
233 amdgpu_res_next(&cursor, cursor.size);
234 }
235
236 return false;
237}
238
239/**
240 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
241 */
242static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
243{
244 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
245}
246
247/**
248 * amdgpu_bo_encrypted - test if the BO is encrypted
249 * @bo: pointer to a buffer object
250 *
251 * Return true if the buffer object is encrypted, false otherwise.
252 */
253static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
254{
255 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
256}
257
258/**
259 * amdgpu_bo_shadowed - check if the BO is shadowed
260 *
261 * @bo: BO to be tested.
262 *
263 * Returns:
264 * NULL if not shadowed or else return a BO pointer.
265 */
266static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
267{
268 if (bo->tbo.type == ttm_bo_type_kernel)
269 return to_amdgpu_bo_vm(bo)->shadow;
270
271 return NULL;
272}
273
274bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
275void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
276
277int amdgpu_bo_create(struct amdgpu_device *adev,
278 struct amdgpu_bo_param *bp,
279 struct amdgpu_bo **bo_ptr);
280int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
281 unsigned long size, int align,
282 u32 domain, struct amdgpu_bo **bo_ptr,
283 u64 *gpu_addr, void **cpu_addr);
284int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
285 unsigned long size, int align,
286 u32 domain, struct amdgpu_bo **bo_ptr,
287 u64 *gpu_addr, void **cpu_addr);
288int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
289 uint64_t offset, uint64_t size, uint32_t domain,
290 struct amdgpu_bo **bo_ptr, void **cpu_addr);
291int amdgpu_bo_create_user(struct amdgpu_device *adev,
292 struct amdgpu_bo_param *bp,
293 struct amdgpu_bo_user **ubo_ptr);
294int amdgpu_bo_create_vm(struct amdgpu_device *adev,
295 struct amdgpu_bo_param *bp,
296 struct amdgpu_bo_vm **ubo_ptr);
297void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
298 void **cpu_addr);
299int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
300void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
301void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
302struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
303void amdgpu_bo_unref(struct amdgpu_bo **bo);
304int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
305int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
306 u64 min_offset, u64 max_offset);
307void amdgpu_bo_unpin(struct amdgpu_bo *bo);
308int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
309int amdgpu_bo_init(struct amdgpu_device *adev);
310void amdgpu_bo_fini(struct amdgpu_device *adev);
311int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
312void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
313int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
314 uint32_t metadata_size, uint64_t flags);
315int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
316 size_t buffer_size, uint32_t *metadata_size,
317 uint64_t *flags);
318void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
319 bool evict,
320 struct ttm_resource *new_mem);
321void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
322vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
323void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
324 bool shared);
325int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
326 enum amdgpu_sync_mode sync_mode, void *owner,
327 bool intr);
328int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
329u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
330u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
331int amdgpu_bo_validate(struct amdgpu_bo *bo);
332void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
333 uint64_t *gtt_mem, uint64_t *cpu_mem);
334void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
335int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
336 struct dma_fence **fence);
337uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
338 uint32_t domain);
339
340/*
341 * sub allocation
342 */
343
344static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
345{
346 return sa_bo->manager->gpu_addr + sa_bo->soffset;
347}
348
349static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
350{
351 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
352}
353
354int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
355 struct amdgpu_sa_manager *sa_manager,
356 unsigned size, u32 align, u32 domain);
357void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
358 struct amdgpu_sa_manager *sa_manager);
359int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
360 struct amdgpu_sa_manager *sa_manager);
361int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
362 struct amdgpu_sa_bo **sa_bo,
363 unsigned size, unsigned align);
364void amdgpu_sa_bo_free(struct amdgpu_device *adev,
365 struct amdgpu_sa_bo **sa_bo,
366 struct dma_fence *fence);
367#if defined(CONFIG_DEBUG_FS)
368void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
369 struct seq_file *m);
370u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
371#endif
372void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
373
374bool amdgpu_bo_support_uswc(u64 bo_flags);
375
376
377#endif