Loading...
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_OBJECT_H__
29#define __AMDGPU_OBJECT_H__
30
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33
34#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
35
36/* bo virtual addresses in a vm */
37struct amdgpu_bo_va_mapping {
38 struct amdgpu_bo_va *bo_va;
39 struct list_head list;
40 struct rb_node rb;
41 uint64_t start;
42 uint64_t last;
43 uint64_t __subtree_last;
44 uint64_t offset;
45 uint64_t flags;
46};
47
48/* User space allocated BO in a VM */
49struct amdgpu_bo_va {
50 struct amdgpu_vm_bo_base base;
51
52 /* protected by bo being reserved */
53 unsigned ref_count;
54
55 /* all other members protected by the VM PD being reserved */
56 struct dma_fence *last_pt_update;
57
58 /* mappings for this bo_va */
59 struct list_head invalids;
60 struct list_head valids;
61
62 /* If the mappings are cleared or filled */
63 bool cleared;
64};
65
66struct amdgpu_bo {
67 /* Protected by tbo.reserved */
68 u32 preferred_domains;
69 u32 allowed_domains;
70 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
71 struct ttm_placement placement;
72 struct ttm_buffer_object tbo;
73 struct ttm_bo_kmap_obj kmap;
74 u64 flags;
75 unsigned pin_count;
76 u64 tiling_flags;
77 u64 metadata_flags;
78 void *metadata;
79 u32 metadata_size;
80 unsigned prime_shared_count;
81 /* list of all virtual address to which this bo is associated to */
82 struct list_head va;
83 /* Constant after initialization */
84 struct drm_gem_object gem_base;
85 struct amdgpu_bo *parent;
86 struct amdgpu_bo *shadow;
87
88 struct ttm_bo_kmap_obj dma_buf_vmap;
89 struct amdgpu_mn *mn;
90
91 union {
92 struct list_head mn_list;
93 struct list_head shadow_list;
94 };
95
96 struct kgd_mem *kfd_bo;
97};
98
99static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
100{
101 return container_of(tbo, struct amdgpu_bo, tbo);
102}
103
104/**
105 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
106 * @mem_type: ttm memory type
107 *
108 * Returns corresponding domain of the ttm mem_type
109 */
110static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
111{
112 switch (mem_type) {
113 case TTM_PL_VRAM:
114 return AMDGPU_GEM_DOMAIN_VRAM;
115 case TTM_PL_TT:
116 return AMDGPU_GEM_DOMAIN_GTT;
117 case TTM_PL_SYSTEM:
118 return AMDGPU_GEM_DOMAIN_CPU;
119 case AMDGPU_PL_GDS:
120 return AMDGPU_GEM_DOMAIN_GDS;
121 case AMDGPU_PL_GWS:
122 return AMDGPU_GEM_DOMAIN_GWS;
123 case AMDGPU_PL_OA:
124 return AMDGPU_GEM_DOMAIN_OA;
125 default:
126 break;
127 }
128 return 0;
129}
130
131/**
132 * amdgpu_bo_reserve - reserve bo
133 * @bo: bo structure
134 * @no_intr: don't return -ERESTARTSYS on pending signal
135 *
136 * Returns:
137 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
138 * a signal. Release all buffer reservations and return to user-space.
139 */
140static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
141{
142 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
143 int r;
144
145 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
146 if (unlikely(r != 0)) {
147 if (r != -ERESTARTSYS)
148 dev_err(adev->dev, "%p reserve failed\n", bo);
149 return r;
150 }
151 return 0;
152}
153
154static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
155{
156 ttm_bo_unreserve(&bo->tbo);
157}
158
159static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
160{
161 return bo->tbo.num_pages << PAGE_SHIFT;
162}
163
164static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
165{
166 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
167}
168
169static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
170{
171 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
172}
173
174/**
175 * amdgpu_bo_mmap_offset - return mmap offset of bo
176 * @bo: amdgpu object for which we query the offset
177 *
178 * Returns mmap offset of the object.
179 */
180static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
181{
182 return drm_vma_node_offset_addr(&bo->tbo.vma_node);
183}
184
185/**
186 * amdgpu_bo_gpu_accessible - return whether the bo is currently in memory that
187 * is accessible to the GPU.
188 */
189static inline bool amdgpu_bo_gpu_accessible(struct amdgpu_bo *bo)
190{
191 switch (bo->tbo.mem.mem_type) {
192 case TTM_PL_TT: return amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem);
193 case TTM_PL_VRAM: return true;
194 default: return false;
195 }
196}
197
198/**
199 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
200 */
201static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
202{
203 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
204}
205
206int amdgpu_bo_create(struct amdgpu_device *adev, unsigned long size,
207 int byte_align, u32 domain,
208 u64 flags, enum ttm_bo_type type,
209 struct reservation_object *resv,
210 struct amdgpu_bo **bo_ptr);
211int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
212 unsigned long size, int align,
213 u32 domain, struct amdgpu_bo **bo_ptr,
214 u64 *gpu_addr, void **cpu_addr);
215int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
216 unsigned long size, int align,
217 u32 domain, struct amdgpu_bo **bo_ptr,
218 u64 *gpu_addr, void **cpu_addr);
219void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
220 void **cpu_addr);
221int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
222void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
223void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
224struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
225void amdgpu_bo_unref(struct amdgpu_bo **bo);
226int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr);
227int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
228 u64 min_offset, u64 max_offset,
229 u64 *gpu_addr);
230int amdgpu_bo_unpin(struct amdgpu_bo *bo);
231int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
232int amdgpu_bo_init(struct amdgpu_device *adev);
233void amdgpu_bo_fini(struct amdgpu_device *adev);
234int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
235 struct vm_area_struct *vma);
236int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
237void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
238int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
239 uint32_t metadata_size, uint64_t flags);
240int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
241 size_t buffer_size, uint32_t *metadata_size,
242 uint64_t *flags);
243void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
244 bool evict,
245 struct ttm_mem_reg *new_mem);
246int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
247void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
248 bool shared);
249u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
250int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
251 struct amdgpu_ring *ring,
252 struct amdgpu_bo *bo,
253 struct reservation_object *resv,
254 struct dma_fence **fence, bool direct);
255int amdgpu_bo_validate(struct amdgpu_bo *bo);
256int amdgpu_bo_restore_from_shadow(struct amdgpu_device *adev,
257 struct amdgpu_ring *ring,
258 struct amdgpu_bo *bo,
259 struct reservation_object *resv,
260 struct dma_fence **fence,
261 bool direct);
262
263
264/*
265 * sub allocation
266 */
267
268static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
269{
270 return sa_bo->manager->gpu_addr + sa_bo->soffset;
271}
272
273static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
274{
275 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
276}
277
278int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
279 struct amdgpu_sa_manager *sa_manager,
280 unsigned size, u32 align, u32 domain);
281void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
282 struct amdgpu_sa_manager *sa_manager);
283int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
284 struct amdgpu_sa_manager *sa_manager);
285int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
286 struct amdgpu_sa_bo **sa_bo,
287 unsigned size, unsigned align);
288void amdgpu_sa_bo_free(struct amdgpu_device *adev,
289 struct amdgpu_sa_bo **sa_bo,
290 struct dma_fence *fence);
291#if defined(CONFIG_DEBUG_FS)
292void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
293 struct seq_file *m);
294#endif
295
296
297#endif
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_OBJECT_H__
29#define __AMDGPU_OBJECT_H__
30
31#include <drm/amdgpu_drm.h>
32#include "amdgpu.h"
33#ifdef CONFIG_MMU_NOTIFIER
34#include <linux/mmu_notifier.h>
35#endif
36
37#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
38#define AMDGPU_BO_MAX_PLACEMENTS 3
39
40struct amdgpu_bo_param {
41 unsigned long size;
42 int byte_align;
43 u32 domain;
44 u32 preferred_domain;
45 u64 flags;
46 enum ttm_bo_type type;
47 bool no_wait_gpu;
48 struct dma_resv *resv;
49};
50
51/* bo virtual addresses in a vm */
52struct amdgpu_bo_va_mapping {
53 struct amdgpu_bo_va *bo_va;
54 struct list_head list;
55 struct rb_node rb;
56 uint64_t start;
57 uint64_t last;
58 uint64_t __subtree_last;
59 uint64_t offset;
60 uint64_t flags;
61};
62
63/* User space allocated BO in a VM */
64struct amdgpu_bo_va {
65 struct amdgpu_vm_bo_base base;
66
67 /* protected by bo being reserved */
68 unsigned ref_count;
69
70 /* all other members protected by the VM PD being reserved */
71 struct dma_fence *last_pt_update;
72
73 /* mappings for this bo_va */
74 struct list_head invalids;
75 struct list_head valids;
76
77 /* If the mappings are cleared or filled */
78 bool cleared;
79
80 bool is_xgmi;
81};
82
83struct amdgpu_bo {
84 /* Protected by tbo.reserved */
85 u32 preferred_domains;
86 u32 allowed_domains;
87 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
88 struct ttm_placement placement;
89 struct ttm_buffer_object tbo;
90 struct ttm_bo_kmap_obj kmap;
91 u64 flags;
92 unsigned pin_count;
93 u64 tiling_flags;
94 u64 metadata_flags;
95 void *metadata;
96 u32 metadata_size;
97 unsigned prime_shared_count;
98 /* per VM structure for page tables and with virtual addresses */
99 struct amdgpu_vm_bo_base *vm_bo;
100 /* Constant after initialization */
101 struct amdgpu_bo *parent;
102 struct amdgpu_bo *shadow;
103
104 struct ttm_bo_kmap_obj dma_buf_vmap;
105 struct amdgpu_mn *mn;
106
107
108#ifdef CONFIG_MMU_NOTIFIER
109 struct mmu_interval_notifier notifier;
110#endif
111
112 struct list_head shadow_list;
113
114 struct kgd_mem *kfd_bo;
115};
116
117static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
118{
119 return container_of(tbo, struct amdgpu_bo, tbo);
120}
121
122/**
123 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
124 * @mem_type: ttm memory type
125 *
126 * Returns corresponding domain of the ttm mem_type
127 */
128static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
129{
130 switch (mem_type) {
131 case TTM_PL_VRAM:
132 return AMDGPU_GEM_DOMAIN_VRAM;
133 case TTM_PL_TT:
134 return AMDGPU_GEM_DOMAIN_GTT;
135 case TTM_PL_SYSTEM:
136 return AMDGPU_GEM_DOMAIN_CPU;
137 case AMDGPU_PL_GDS:
138 return AMDGPU_GEM_DOMAIN_GDS;
139 case AMDGPU_PL_GWS:
140 return AMDGPU_GEM_DOMAIN_GWS;
141 case AMDGPU_PL_OA:
142 return AMDGPU_GEM_DOMAIN_OA;
143 default:
144 break;
145 }
146 return 0;
147}
148
149/**
150 * amdgpu_bo_reserve - reserve bo
151 * @bo: bo structure
152 * @no_intr: don't return -ERESTARTSYS on pending signal
153 *
154 * Returns:
155 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
156 * a signal. Release all buffer reservations and return to user-space.
157 */
158static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
159{
160 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
161 int r;
162
163 r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
164 if (unlikely(r != 0)) {
165 if (r != -ERESTARTSYS)
166 dev_err(adev->dev, "%p reserve failed\n", bo);
167 return r;
168 }
169 return 0;
170}
171
172static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
173{
174 ttm_bo_unreserve(&bo->tbo);
175}
176
177static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
178{
179 return bo->tbo.num_pages << PAGE_SHIFT;
180}
181
182static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
183{
184 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
185}
186
187static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
188{
189 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
190}
191
192/**
193 * amdgpu_bo_mmap_offset - return mmap offset of bo
194 * @bo: amdgpu object for which we query the offset
195 *
196 * Returns mmap offset of the object.
197 */
198static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
199{
200 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
201}
202
203/**
204 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
205 */
206static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
207{
208 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
209 unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
210 struct drm_mm_node *node = bo->tbo.mem.mm_node;
211 unsigned long pages_left;
212
213 if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
214 return false;
215
216 for (pages_left = bo->tbo.mem.num_pages; pages_left;
217 pages_left -= node->size, node++)
218 if (node->start < fpfn)
219 return true;
220
221 return false;
222}
223
224/**
225 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
226 */
227static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
228{
229 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
230}
231
232/**
233 * amdgpu_bo_encrypted - test if the BO is encrypted
234 * @bo: pointer to a buffer object
235 *
236 * Return true if the buffer object is encrypted, false otherwise.
237 */
238static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
239{
240 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
241}
242
243bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
244void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
245
246int amdgpu_bo_create(struct amdgpu_device *adev,
247 struct amdgpu_bo_param *bp,
248 struct amdgpu_bo **bo_ptr);
249int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
250 unsigned long size, int align,
251 u32 domain, struct amdgpu_bo **bo_ptr,
252 u64 *gpu_addr, void **cpu_addr);
253int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
254 unsigned long size, int align,
255 u32 domain, struct amdgpu_bo **bo_ptr,
256 u64 *gpu_addr, void **cpu_addr);
257int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
258 uint64_t offset, uint64_t size, uint32_t domain,
259 struct amdgpu_bo **bo_ptr, void **cpu_addr);
260void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
261 void **cpu_addr);
262int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
263void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
264void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
265struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
266void amdgpu_bo_unref(struct amdgpu_bo **bo);
267int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
268int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
269 u64 min_offset, u64 max_offset);
270int amdgpu_bo_unpin(struct amdgpu_bo *bo);
271int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
272int amdgpu_bo_init(struct amdgpu_device *adev);
273int amdgpu_bo_late_init(struct amdgpu_device *adev);
274void amdgpu_bo_fini(struct amdgpu_device *adev);
275int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
276 struct vm_area_struct *vma);
277int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
278void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
279int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
280 uint32_t metadata_size, uint64_t flags);
281int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
282 size_t buffer_size, uint32_t *metadata_size,
283 uint64_t *flags);
284void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
285 bool evict,
286 struct ttm_mem_reg *new_mem);
287void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
288int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
289void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
290 bool shared);
291int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
292 enum amdgpu_sync_mode sync_mode, void *owner,
293 bool intr);
294int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
295u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
296u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
297int amdgpu_bo_validate(struct amdgpu_bo *bo);
298int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
299 struct dma_fence **fence);
300uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
301 uint32_t domain);
302
303/*
304 * sub allocation
305 */
306
307static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
308{
309 return sa_bo->manager->gpu_addr + sa_bo->soffset;
310}
311
312static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
313{
314 return sa_bo->manager->cpu_ptr + sa_bo->soffset;
315}
316
317int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
318 struct amdgpu_sa_manager *sa_manager,
319 unsigned size, u32 align, u32 domain);
320void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
321 struct amdgpu_sa_manager *sa_manager);
322int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
323 struct amdgpu_sa_manager *sa_manager);
324int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
325 struct amdgpu_sa_bo **sa_bo,
326 unsigned size, unsigned align);
327void amdgpu_sa_bo_free(struct amdgpu_device *adev,
328 struct amdgpu_sa_bo **sa_bo,
329 struct dma_fence *fence);
330#if defined(CONFIG_DEBUG_FS)
331void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
332 struct seq_file *m);
333#endif
334int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
335
336bool amdgpu_bo_support_uswc(u64 bo_flags);
337
338
339#endif