Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1/* SPDX-License-Identifier: MIT */
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5
  6#ifndef _XE_VM_H_
  7#define _XE_VM_H_
  8
  9#include "xe_assert.h"
 10#include "xe_bo_types.h"
 11#include "xe_macros.h"
 12#include "xe_map.h"
 13#include "xe_vm_types.h"
 14
 15struct drm_device;
 16struct drm_printer;
 17struct drm_file;
 18
 19struct ttm_buffer_object;
 20struct ttm_validate_buffer;
 21
 22struct xe_exec_queue;
 23struct xe_file;
 24struct xe_sync_entry;
 25struct drm_exec;
 26
 27struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
 28
 29struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
 30int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
 31
 32static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
 33{
 34	drm_gpuvm_get(&vm->gpuvm);
 35	return vm;
 36}
 37
 38static inline void xe_vm_put(struct xe_vm *vm)
 39{
 40	drm_gpuvm_put(&vm->gpuvm);
 41}
 42
 43int xe_vm_lock(struct xe_vm *vm, bool intr);
 44
 45void xe_vm_unlock(struct xe_vm *vm);
 46
 47static inline bool xe_vm_is_closed(struct xe_vm *vm)
 48{
 49	/* Only guaranteed not to change when vm->lock is held */
 50	return !vm->size;
 51}
 52
 53static inline bool xe_vm_is_banned(struct xe_vm *vm)
 54{
 55	return vm->flags & XE_VM_FLAG_BANNED;
 56}
 57
 58static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
 59{
 60	lockdep_assert_held(&vm->lock);
 61	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
 62}
 63
 64struct xe_vma *
 65xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
 66
 67/**
 68 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
 69 * @vm: The vm
 70 *
 71 * Return: whether the vm populates unmapped areas with scratch PTEs
 72 */
 73static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
 74{
 75	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
 76}
 77
 78/**
 79 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
 80 * @gpuvm: The struct drm_gpuvm pointer
 81 *
 82 * Return: Pointer to the embedding struct xe_vm.
 83 */
 84static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
 85{
 86	return container_of(gpuvm, struct xe_vm, gpuvm);
 87}
 88
 89static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
 90{
 91	return gpuvm_to_vm(gpuva->vm);
 92}
 93
 94static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
 95{
 96	return container_of(gpuva, struct xe_vma, gpuva);
 97}
 98
 99static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
100{
101	return container_of(op, struct xe_vma_op, base);
102}
103
104/**
105 * DOC: Provide accessors for vma members to facilitate easy change of
106 * implementation.
107 */
108static inline u64 xe_vma_start(struct xe_vma *vma)
109{
110	return vma->gpuva.va.addr;
111}
112
113static inline u64 xe_vma_size(struct xe_vma *vma)
114{
115	return vma->gpuva.va.range;
116}
117
118static inline u64 xe_vma_end(struct xe_vma *vma)
119{
120	return xe_vma_start(vma) + xe_vma_size(vma);
121}
122
123static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
124{
125	return vma->gpuva.gem.offset;
126}
127
128static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
129{
130	return !vma->gpuva.gem.obj ? NULL :
131		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
132}
133
134static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
135{
136	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
137}
138
139static inline bool xe_vma_read_only(struct xe_vma *vma)
140{
141	return vma->gpuva.flags & XE_VMA_READ_ONLY;
142}
143
144static inline u64 xe_vma_userptr(struct xe_vma *vma)
145{
146	return vma->gpuva.gem.offset;
147}
148
149static inline bool xe_vma_is_null(struct xe_vma *vma)
150{
151	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
152}
153
154static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
155{
156	return !xe_vma_bo(vma);
157}
158
159static inline bool xe_vma_is_userptr(struct xe_vma *vma)
160{
161	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
162}
163
164/**
165 * to_userptr_vma() - Return a pointer to an embedding userptr vma
166 * @vma: Pointer to the embedded struct xe_vma
167 *
168 * Return: Pointer to the embedding userptr vma
169 */
170static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
171{
172	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
173	return container_of(vma, struct xe_userptr_vma, vma);
174}
175
176u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
177
178int xe_vm_create_ioctl(struct drm_device *dev, void *data,
179		       struct drm_file *file);
180int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
181			struct drm_file *file);
182int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
183		     struct drm_file *file);
184
185void xe_vm_close_and_put(struct xe_vm *vm);
186
187static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
188{
189	return vm->flags & XE_VM_FLAG_FAULT_MODE;
190}
191
192static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
193{
194	return vm->flags & XE_VM_FLAG_LR_MODE;
195}
196
197static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
198{
199	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
200}
201
202int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
203void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
204
205int xe_vm_userptr_pin(struct xe_vm *vm);
206
207int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
208
209int xe_vm_userptr_check_repin(struct xe_vm *vm);
210
211int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
212struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
213				u8 tile_mask);
214
215int xe_vm_invalidate_vma(struct xe_vma *vma);
216
217static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
218{
219	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
220	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
221}
222
223/**
224 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
225 * vms.
226 * @vm: The vm.
227 *
228 * If the rebind functionality on a compute vm was disabled due
229 * to nothing to execute. Reactivate it and run the rebind worker.
230 * This function should be called after submitting a batch to a compute vm.
231 */
232static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
233{
234	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
235		vm->preempt.rebind_deactivated = false;
236		xe_vm_queue_rebind_worker(vm);
237	}
238}
239
240int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
241
242int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
243
244bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
245
246int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
247
248int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
249			  unsigned int num_fences);
250
251/**
252 * xe_vm_resv() - Return's the vm's reservation object
253 * @vm: The vm
254 *
255 * Return: Pointer to the vm's reservation object.
256 */
257static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
258{
259	return drm_gpuvm_resv(&vm->gpuvm);
260}
261
262void xe_vm_kill(struct xe_vm *vm, bool unlocked);
263
264/**
265 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
266 * @vm: The vm
267 */
268#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
269
270#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
271#define vm_dbg drm_dbg
272#else
273__printf(2, 3)
274static inline void vm_dbg(const struct drm_device *dev,
275			  const char *format, ...)
276{ /* noop */ }
277#endif
278
279struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
280void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
281void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
282void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
283
284#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
285void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
286#else
287static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
288{
289}
290#endif
291#endif