Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/* SPDX-License-Identifier: MIT */
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5
  6#ifndef _XE_VM_H_
  7#define _XE_VM_H_
  8
  9#include "xe_bo_types.h"
 10#include "xe_macros.h"
 11#include "xe_map.h"
 12#include "xe_vm_types.h"
 13
 14struct drm_device;
 15struct drm_printer;
 16struct drm_file;
 17
 18struct ttm_buffer_object;
 19struct ttm_validate_buffer;
 20
 21struct xe_exec_queue;
 22struct xe_file;
 23struct xe_sync_entry;
 24struct drm_exec;
 25
 26struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
 27
 28struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
 29int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
 30
 31static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
 32{
 33	drm_gpuvm_get(&vm->gpuvm);
 34	return vm;
 35}
 36
 37static inline void xe_vm_put(struct xe_vm *vm)
 38{
 39	drm_gpuvm_put(&vm->gpuvm);
 40}
 41
 42int xe_vm_lock(struct xe_vm *vm, bool intr);
 43
 44void xe_vm_unlock(struct xe_vm *vm);
 45
 46static inline bool xe_vm_is_closed(struct xe_vm *vm)
 47{
 48	/* Only guaranteed not to change when vm->lock is held */
 49	return !vm->size;
 50}
 51
 52static inline bool xe_vm_is_banned(struct xe_vm *vm)
 53{
 54	return vm->flags & XE_VM_FLAG_BANNED;
 55}
 56
 57static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
 58{
 59	lockdep_assert_held(&vm->lock);
 60	return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
 61}
 62
 63struct xe_vma *
 64xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
 65
 66/**
 67 * xe_vm_has_scratch() - Whether the vm is configured for scratch PTEs
 68 * @vm: The vm
 69 *
 70 * Return: whether the vm populates unmapped areas with scratch PTEs
 71 */
 72static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
 73{
 74	return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
 75}
 76
 77/**
 78 * gpuvm_to_vm() - Return the embedding xe_vm from a struct drm_gpuvm pointer
 79 * @gpuvm: The struct drm_gpuvm pointer
 80 *
 81 * Return: Pointer to the embedding struct xe_vm.
 82 */
 83static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
 84{
 85	return container_of(gpuvm, struct xe_vm, gpuvm);
 86}
 87
 88static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
 89{
 90	return gpuvm_to_vm(gpuva->vm);
 91}
 92
 93static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
 94{
 95	return container_of(gpuva, struct xe_vma, gpuva);
 96}
 97
 98static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
 99{
100	return container_of(op, struct xe_vma_op, base);
101}
102
103/**
104 * DOC: Provide accessors for vma members to facilitate easy change of
105 * implementation.
106 */
107static inline u64 xe_vma_start(struct xe_vma *vma)
108{
109	return vma->gpuva.va.addr;
110}
111
112static inline u64 xe_vma_size(struct xe_vma *vma)
113{
114	return vma->gpuva.va.range;
115}
116
117static inline u64 xe_vma_end(struct xe_vma *vma)
118{
119	return xe_vma_start(vma) + xe_vma_size(vma);
120}
121
122static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
123{
124	return vma->gpuva.gem.offset;
125}
126
127static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
128{
129	return !vma->gpuva.gem.obj ? NULL :
130		container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
131}
132
133static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
134{
135	return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
136}
137
138static inline bool xe_vma_read_only(struct xe_vma *vma)
139{
140	return vma->gpuva.flags & XE_VMA_READ_ONLY;
141}
142
143static inline u64 xe_vma_userptr(struct xe_vma *vma)
144{
145	return vma->gpuva.gem.offset;
146}
147
148static inline bool xe_vma_is_null(struct xe_vma *vma)
149{
150	return vma->gpuva.flags & DRM_GPUVA_SPARSE;
151}
152
153static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
154{
155	return !xe_vma_bo(vma);
156}
157
158static inline bool xe_vma_is_userptr(struct xe_vma *vma)
159{
160	return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma);
161}
162
163/**
164 * to_userptr_vma() - Return a pointer to an embedding userptr vma
165 * @vma: Pointer to the embedded struct xe_vma
166 *
167 * Return: Pointer to the embedding userptr vma
168 */
169static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
170{
171	xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
172	return container_of(vma, struct xe_userptr_vma, vma);
173}
174
175u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
176
177int xe_vm_create_ioctl(struct drm_device *dev, void *data,
178		       struct drm_file *file);
179int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
180			struct drm_file *file);
181int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
182		     struct drm_file *file);
183
184void xe_vm_close_and_put(struct xe_vm *vm);
185
186static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
187{
188	return vm->flags & XE_VM_FLAG_FAULT_MODE;
189}
190
191static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
192{
193	return vm->flags & XE_VM_FLAG_LR_MODE;
194}
195
196static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
197{
198	return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
199}
200
201int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
202void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
203
204int xe_vm_userptr_pin(struct xe_vm *vm);
205
206int __xe_vm_userptr_needs_repin(struct xe_vm *vm);
207
208int xe_vm_userptr_check_repin(struct xe_vm *vm);
209
210struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
211
212int xe_vm_invalidate_vma(struct xe_vma *vma);
213
214extern struct ttm_device_funcs xe_ttm_funcs;
215
216static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
217{
218	xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
219	queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
220}
221
222/**
223 * xe_vm_reactivate_rebind() - Reactivate the rebind functionality on compute
224 * vms.
225 * @vm: The vm.
226 *
227 * If the rebind functionality on a compute vm was disabled due
228 * to nothing to execute. Reactivate it and run the rebind worker.
229 * This function should be called after submitting a batch to a compute vm.
230 */
231static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
232{
233	if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
234		vm->preempt.rebind_deactivated = false;
235		xe_vm_queue_rebind_worker(vm);
236	}
237}
238
239int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma);
240
241int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma);
242
243bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end);
244
245int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id);
246
247int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma,
248		      unsigned int num_shared);
249
250/**
251 * xe_vm_resv() - Return's the vm's reservation object
252 * @vm: The vm
253 *
254 * Return: Pointer to the vm's reservation object.
255 */
256static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
257{
258	return drm_gpuvm_resv(&vm->gpuvm);
259}
260
261/**
262 * xe_vm_assert_held(vm) - Assert that the vm's reservation object is held.
263 * @vm: The vm
264 */
265#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
266
267#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
268#define vm_dbg drm_dbg
269#else
270__printf(2, 3)
271static inline void vm_dbg(const struct drm_device *dev,
272			  const char *format, ...)
273{ /* noop */ }
274#endif
275#endif