Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* SPDX-License-Identifier: MIT */
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5
  6#ifndef _XE_BO_H_
  7#define _XE_BO_H_
  8
  9#include <drm/ttm/ttm_tt.h>
 10
 11#include "xe_bo_types.h"
 12#include "xe_macros.h"
 13#include "xe_vm_types.h"
 14#include "xe_vm.h"
 15
 16/**
 17 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
 18 * @vm: The vm
 19 */
 20#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
 21
 22
 23
 24#define XE_DEFAULT_GTT_SIZE_MB          3072ULL /* 3GB by default */
 25
 26#define XE_BO_CREATE_USER_BIT		BIT(0)
 27/* The bits below need to be contiguous, or things break */
 28#define XE_BO_CREATE_SYSTEM_BIT		BIT(1)
 29#define XE_BO_CREATE_VRAM0_BIT		BIT(2)
 30#define XE_BO_CREATE_VRAM1_BIT		BIT(3)
 31#define XE_BO_CREATE_VRAM_MASK		(XE_BO_CREATE_VRAM0_BIT | \
 32					 XE_BO_CREATE_VRAM1_BIT)
 33/* -- */
 34#define XE_BO_CREATE_STOLEN_BIT		BIT(4)
 35#define XE_BO_CREATE_VRAM_IF_DGFX(tile) \
 36	(IS_DGFX(tile_to_xe(tile)) ? XE_BO_CREATE_VRAM0_BIT << (tile)->id : \
 37	 XE_BO_CREATE_SYSTEM_BIT)
 38#define XE_BO_CREATE_GGTT_BIT		BIT(5)
 39#define XE_BO_CREATE_IGNORE_MIN_PAGE_SIZE_BIT BIT(6)
 40#define XE_BO_CREATE_PINNED_BIT		BIT(7)
 41#define XE_BO_CREATE_NO_RESV_EVICT	BIT(8)
 42#define XE_BO_DEFER_BACKING		BIT(9)
 43#define XE_BO_SCANOUT_BIT		BIT(10)
 44#define XE_BO_FIXED_PLACEMENT_BIT	BIT(11)
 45#define XE_BO_PAGETABLE			BIT(12)
 46#define XE_BO_NEEDS_CPU_ACCESS		BIT(13)
 47/* this one is trigger internally only */
 48#define XE_BO_INTERNAL_TEST		BIT(30)
 49#define XE_BO_INTERNAL_64K		BIT(31)
 50
 51#define XELPG_PPGTT_PTE_PAT3		BIT_ULL(62)
 52#define XE2_PPGTT_PTE_PAT4		BIT_ULL(61)
 53#define XE_PPGTT_PDE_PDPE_PAT2		BIT_ULL(12)
 54#define XE_PPGTT_PTE_PAT2		BIT_ULL(7)
 55#define XE_PPGTT_PTE_PAT1		BIT_ULL(4)
 56#define XE_PPGTT_PTE_PAT0		BIT_ULL(3)
 57
 58#define XE_PTE_SHIFT			12
 59#define XE_PAGE_SIZE			(1 << XE_PTE_SHIFT)
 60#define XE_PTE_MASK			(XE_PAGE_SIZE - 1)
 61#define XE_PDE_SHIFT			(XE_PTE_SHIFT - 3)
 62#define XE_PDES				(1 << XE_PDE_SHIFT)
 63#define XE_PDE_MASK			(XE_PDES - 1)
 64
 65#define XE_64K_PTE_SHIFT		16
 66#define XE_64K_PAGE_SIZE		(1 << XE_64K_PTE_SHIFT)
 67#define XE_64K_PTE_MASK			(XE_64K_PAGE_SIZE - 1)
 68#define XE_64K_PDE_MASK			(XE_PDE_MASK >> 4)
 69
 70#define XE_PDE_PS_2M			BIT_ULL(7)
 71#define XE_PDPE_PS_1G			BIT_ULL(7)
 72#define XE_PDE_IPS_64K			BIT_ULL(11)
 73
 74#define XE_GGTT_PTE_DM			BIT_ULL(1)
 75#define XE_USM_PPGTT_PTE_AE		BIT_ULL(10)
 76#define XE_PPGTT_PTE_DM			BIT_ULL(11)
 77#define XE_PDE_64K			BIT_ULL(6)
 78#define XE_PTE_PS64			BIT_ULL(8)
 79#define XE_PTE_NULL			BIT_ULL(9)
 80
 81#define XE_PAGE_PRESENT			BIT_ULL(0)
 82#define XE_PAGE_RW			BIT_ULL(1)
 83
 84#define XE_PL_SYSTEM		TTM_PL_SYSTEM
 85#define XE_PL_TT		TTM_PL_TT
 86#define XE_PL_VRAM0		TTM_PL_VRAM
 87#define XE_PL_VRAM1		(XE_PL_VRAM0 + 1)
 88#define XE_PL_STOLEN		(TTM_NUM_MEM_TYPES - 1)
 89
 90#define XE_BO_PROPS_INVALID	(-1)
 91
 92struct sg_table;
 93
 94struct xe_bo *xe_bo_alloc(void);
 95void xe_bo_free(struct xe_bo *bo);
 96
 97struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
 98				     struct xe_tile *tile, struct dma_resv *resv,
 99				     struct ttm_lru_bulk_move *bulk, size_t size,
100				     u16 cpu_caching, enum ttm_bo_type type,
101				     u32 flags);
102struct xe_bo *
103xe_bo_create_locked_range(struct xe_device *xe,
104			  struct xe_tile *tile, struct xe_vm *vm,
105			  size_t size, u64 start, u64 end,
106			  enum ttm_bo_type type, u32 flags);
107struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile,
108				  struct xe_vm *vm, size_t size,
109				  enum ttm_bo_type type, u32 flags);
110struct xe_bo *xe_bo_create(struct xe_device *xe, struct xe_tile *tile,
111			   struct xe_vm *vm, size_t size,
112			   enum ttm_bo_type type, u32 flags);
113struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile,
114				struct xe_vm *vm, size_t size,
115				u16 cpu_caching,
116				enum ttm_bo_type type,
117				u32 flags);
118struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
119				   struct xe_vm *vm, size_t size,
120				   enum ttm_bo_type type, u32 flags);
121struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile,
122				      struct xe_vm *vm, size_t size, u64 offset,
123				      enum ttm_bo_type type, u32 flags);
124struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
125				     const void *data, size_t size,
126				     enum ttm_bo_type type, u32 flags);
127struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile,
128					   size_t size, u32 flags);
129struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
130					     const void *data, size_t size, u32 flags);
131
132int xe_bo_placement_for_flags(struct xe_device *xe, struct xe_bo *bo,
133			      u32 bo_flags);
134
135static inline struct xe_bo *ttm_to_xe_bo(const struct ttm_buffer_object *bo)
136{
137	return container_of(bo, struct xe_bo, ttm);
138}
139
140static inline struct xe_bo *gem_to_xe_bo(const struct drm_gem_object *obj)
141{
142	return container_of(obj, struct xe_bo, ttm.base);
143}
144
145#define xe_bo_device(bo) ttm_to_xe_device((bo)->ttm.bdev)
146
147static inline struct xe_bo *xe_bo_get(struct xe_bo *bo)
148{
149	if (bo)
150		drm_gem_object_get(&bo->ttm.base);
151
152	return bo;
153}
154
155static inline void xe_bo_put(struct xe_bo *bo)
156{
157	if (bo)
158		drm_gem_object_put(&bo->ttm.base);
159}
160
161static inline void __xe_bo_unset_bulk_move(struct xe_bo *bo)
162{
163	if (bo)
164		ttm_bo_set_bulk_move(&bo->ttm, NULL);
165}
166
167static inline void xe_bo_assert_held(struct xe_bo *bo)
168{
169	if (bo)
170		dma_resv_assert_held((bo)->ttm.base.resv);
171}
172
173int xe_bo_lock(struct xe_bo *bo, bool intr);
174
175void xe_bo_unlock(struct xe_bo *bo);
176
177static inline void xe_bo_unlock_vm_held(struct xe_bo *bo)
178{
179	if (bo) {
180		XE_WARN_ON(bo->vm && bo->ttm.base.resv != xe_vm_resv(bo->vm));
181		if (bo->vm)
182			xe_vm_assert_held(bo->vm);
183		else
184			dma_resv_unlock(bo->ttm.base.resv);
185	}
186}
187
188int xe_bo_pin_external(struct xe_bo *bo);
189int xe_bo_pin(struct xe_bo *bo);
190void xe_bo_unpin_external(struct xe_bo *bo);
191void xe_bo_unpin(struct xe_bo *bo);
192int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict);
193
194static inline bool xe_bo_is_pinned(struct xe_bo *bo)
195{
196	return bo->ttm.pin_count;
197}
198
199static inline void xe_bo_unpin_map_no_vm(struct xe_bo *bo)
200{
201	if (likely(bo)) {
202		xe_bo_lock(bo, false);
203		xe_bo_unpin(bo);
204		xe_bo_unlock(bo);
205
206		xe_bo_put(bo);
207	}
208}
209
210bool xe_bo_is_xe_bo(struct ttm_buffer_object *bo);
211dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
212dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size);
213
214static inline dma_addr_t
215xe_bo_main_addr(struct xe_bo *bo, size_t page_size)
216{
217	return xe_bo_addr(bo, 0, page_size);
218}
219
220static inline u32
221xe_bo_ggtt_addr(struct xe_bo *bo)
222{
223	XE_WARN_ON(bo->ggtt_node.size > bo->size);
224	XE_WARN_ON(bo->ggtt_node.start + bo->ggtt_node.size > (1ull << 32));
225	return bo->ggtt_node.start;
226}
227
228int xe_bo_vmap(struct xe_bo *bo);
229void xe_bo_vunmap(struct xe_bo *bo);
230
231bool mem_type_is_vram(u32 mem_type);
232bool xe_bo_is_vram(struct xe_bo *bo);
233bool xe_bo_is_stolen(struct xe_bo *bo);
234bool xe_bo_is_stolen_devmem(struct xe_bo *bo);
235uint64_t vram_region_gpu_offset(struct ttm_resource *res);
236
237bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type);
238
239int xe_bo_migrate(struct xe_bo *bo, u32 mem_type);
240int xe_bo_evict(struct xe_bo *bo, bool force_alloc);
241
242int xe_bo_evict_pinned(struct xe_bo *bo);
243int xe_bo_restore_pinned(struct xe_bo *bo);
244
245extern struct ttm_device_funcs xe_ttm_funcs;
246extern const char *const xe_mem_type_to_name[];
247
248int xe_gem_create_ioctl(struct drm_device *dev, void *data,
249			struct drm_file *file);
250int xe_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
251			     struct drm_file *file);
252int xe_bo_dumb_create(struct drm_file *file_priv,
253		      struct drm_device *dev,
254		      struct drm_mode_create_dumb *args);
255
256bool xe_bo_needs_ccs_pages(struct xe_bo *bo);
257
258static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo)
259{
260	return PAGE_ALIGN(bo->ttm.base.size);
261}
262
263static inline bool xe_bo_has_pages(struct xe_bo *bo)
264{
265	if ((bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm)) ||
266	    xe_bo_is_vram(bo))
267		return true;
268
269	return false;
270}
271
272void __xe_bo_release_dummy(struct kref *kref);
273
274/**
275 * xe_bo_put_deferred() - Put a buffer object with delayed final freeing
276 * @bo: The bo to put.
277 * @deferred: List to which to add the buffer object if we cannot put, or
278 * NULL if the function is to put unconditionally.
279 *
280 * Since the final freeing of an object includes both sleeping and (!)
281 * memory allocation in the dma_resv individualization, it's not ok
282 * to put an object from atomic context nor from within a held lock
283 * tainted by reclaim. In such situations we want to defer the final
284 * freeing until we've exited the restricting context, or in the worst
285 * case to a workqueue.
286 * This function either puts the object if possible without the refcount
287 * reaching zero, or adds it to the @deferred list if that was not possible.
288 * The caller needs to follow up with a call to xe_bo_put_commit() to actually
289 * put the bo iff this function returns true. It's safe to always
290 * follow up with a call to xe_bo_put_commit().
291 * TODO: It's TTM that is the villain here. Perhaps TTM should add an
292 * interface like this.
293 *
294 * Return: true if @bo was the first object put on the @freed list,
295 * false otherwise.
296 */
297static inline bool
298xe_bo_put_deferred(struct xe_bo *bo, struct llist_head *deferred)
299{
300	if (!deferred) {
301		xe_bo_put(bo);
302		return false;
303	}
304
305	if (!kref_put(&bo->ttm.base.refcount, __xe_bo_release_dummy))
306		return false;
307
308	return llist_add(&bo->freed, deferred);
309}
310
311void xe_bo_put_commit(struct llist_head *deferred);
312
313struct sg_table *xe_bo_sg(struct xe_bo *bo);
314
315/*
316 * xe_sg_segment_size() - Provides upper limit for sg segment size.
317 * @dev: device pointer
318 *
319 * Returns the maximum segment size for the 'struct scatterlist'
320 * elements.
321 */
322static inline unsigned int xe_sg_segment_size(struct device *dev)
323{
324	struct scatterlist __maybe_unused sg;
325	size_t max = BIT_ULL(sizeof(sg.length) * 8) - 1;
326
327	max = min_t(size_t, max, dma_max_mapping_size(dev));
328
329	/*
330	 * The iommu_dma_map_sg() function ensures iova allocation doesn't
331	 * cross dma segment boundary. It does so by padding some sg elements.
332	 * This can cause overflow, ending up with sg->length being set to 0.
333	 * Avoid this by ensuring maximum segment size is half of 'max'
334	 * rounded down to PAGE_SIZE.
335	 */
336	return round_down(max / 2, PAGE_SIZE);
337}
338
339#define i915_gem_object_flush_if_display(obj)		((void)(obj))
340
341#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
342/**
343 * xe_bo_is_mem_type - Whether the bo currently resides in the given
344 * TTM memory type
345 * @bo: The bo to check.
346 * @mem_type: The TTM memory type.
347 *
348 * Return: true iff the bo resides in @mem_type, false otherwise.
349 */
350static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type)
351{
352	xe_bo_assert_held(bo);
353	return bo->ttm.resource->mem_type == mem_type;
354}
355#endif
356#endif