Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* SPDX-License-Identifier: MIT */
  2/*
  3 * Copyright © 2021 Intel Corporation
  4 */
  5
  6#ifndef __I915_VMA_RESOURCE_H__
  7#define __I915_VMA_RESOURCE_H__
  8
  9#include <linux/dma-fence.h>
 10#include <linux/refcount.h>
 11
 12#include "i915_gem.h"
 13#include "i915_scatterlist.h"
 14#include "i915_sw_fence.h"
 15#include "intel_runtime_pm.h"
 16
 17struct intel_memory_region;
 18
 19struct i915_page_sizes {
 20	/**
 21	 * The sg mask of the pages sg_table. i.e the mask of
 22	 * the lengths for each sg entry.
 23	 */
 24	unsigned int phys;
 25
 26	/**
 27	 * The gtt page sizes we are allowed to use given the
 28	 * sg mask and the supported page sizes. This will
 29	 * express the smallest unit we can use for the whole
 30	 * object, as well as the larger sizes we may be able
 31	 * to use opportunistically.
 32	 */
 33	unsigned int sg;
 34};
 35
 36/**
 37 * struct i915_vma_resource - Snapshotted unbind information.
 38 * @unbind_fence: Fence to mark unbinding complete. Note that this fence
 39 * is not considered published until unbind is scheduled, and as such it
 40 * is illegal to access this fence before scheduled unbind other than
 41 * for refcounting.
 42 * @lock: The @unbind_fence lock.
 43 * @hold_count: Number of holders blocking the fence from finishing.
 44 * The vma itself is keeping a hold, which is released when unbind
 45 * is scheduled.
 46 * @work: Work struct for deferred unbind work.
 47 * @chain: Pointer to struct i915_sw_fence used to await dependencies.
 48 * @rb: Rb node for the vm's pending unbind interval tree.
 49 * @__subtree_last: Interval tree private member.
 50 * @vm: non-refcounted pointer to the vm. This is for internal use only and
 51 * this member is cleared after vm_resource unbind.
 52 * @mr: The memory region of the object pointed to by the vma.
 53 * @ops: Pointer to the backend i915_vma_ops.
 54 * @private: Bind backend private info.
 55 * @start: Offset into the address space of bind range start.
 56 * @node_size: Size of the allocated range manager node.
 57 * @vma_size: Bind size.
 58 * @page_sizes_gtt: Resulting page sizes from the bind operation.
 59 * @bound_flags: Flags indicating binding status.
 60 * @allocated: Backend private data. TODO: Should move into @private.
 61 * @immediate_unbind: Unbind can be done immediately and doesn't need to be
 62 * deferred to a work item awaiting unsignaled fences. This is a hack.
 63 * (dma_fence_work uses a fence flag for this, but this seems slightly
 64 * cleaner).
 65 * @needs_wakeref: Whether a wakeref is needed during unbind. Since we can't
 66 * take a wakeref in the dma-fence signalling critical path, it needs to be
 67 * taken when the unbind is scheduled.
 68 * @skip_pte_rewrite: During ggtt suspend and vm takedown pte rewriting
 69 * needs to be skipped for unbind.
 70 * @tlb: pointer for obj->mm.tlb, if async unbind. Otherwise, NULL
 71 *
 72 * The lifetime of a struct i915_vma_resource is from a binding request to
 73 * the actual possible asynchronous unbind has completed.
 74 */
 75struct i915_vma_resource {
 76	struct dma_fence unbind_fence;
 77	/* See above for description of the lock. */
 78	spinlock_t lock;
 79	refcount_t hold_count;
 80	struct work_struct work;
 81	struct i915_sw_fence chain;
 82	struct rb_node rb;
 83	u64 __subtree_last;
 84	struct i915_address_space *vm;
 85	intel_wakeref_t wakeref;
 86
 87	/**
 88	 * struct i915_vma_bindinfo - Information needed for async bind
 89	 * only but that can be dropped after the bind has taken place.
 90	 * Consider making this a separate argument to the bind_vma
 91	 * op, coalescing with other arguments like vm, stash, cache_level
 92	 * and flags
 93	 * @pages: The pages sg-table.
 94	 * @page_sizes: Page sizes of the pages.
 95	 * @pages_rsgt: Refcounted sg-table when delayed object destruction
 96	 * is supported. May be NULL.
 97	 * @readonly: Whether the vma should be bound read-only.
 98	 * @lmem: Whether the vma points to lmem.
 99	 */
100	struct i915_vma_bindinfo {
101		struct sg_table *pages;
102		struct i915_page_sizes page_sizes;
103		struct i915_refct_sgt *pages_rsgt;
104		bool readonly:1;
105		bool lmem:1;
106	} bi;
107
108#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
109	struct intel_memory_region *mr;
110#endif
111	const struct i915_vma_ops *ops;
112	void *private;
113	u64 start;
114	u64 node_size;
115	u64 vma_size;
116	u32 page_sizes_gtt;
117
118	u32 bound_flags;
119	bool allocated:1;
120	bool immediate_unbind:1;
121	bool needs_wakeref:1;
122	bool skip_pte_rewrite:1;
123
124	u32 *tlb;
125};
126
127bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
128			    bool *lockdep_cookie);
129
130void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
131			      bool lockdep_cookie);
132
133struct i915_vma_resource *i915_vma_resource_alloc(void);
134
135void i915_vma_resource_free(struct i915_vma_resource *vma_res);
136
137struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
138					   u32 *tlb);
139
140void __i915_vma_resource_init(struct i915_vma_resource *vma_res);
141
142/**
143 * i915_vma_resource_get - Take a reference on a vma resource
144 * @vma_res: The vma resource on which to take a reference.
145 *
146 * Return: The @vma_res pointer
147 */
148static inline struct i915_vma_resource
149*i915_vma_resource_get(struct i915_vma_resource *vma_res)
150{
151	dma_fence_get(&vma_res->unbind_fence);
152	return vma_res;
153}
154
155/**
156 * i915_vma_resource_put - Release a reference to a struct i915_vma_resource
157 * @vma_res: The resource
158 */
159static inline void i915_vma_resource_put(struct i915_vma_resource *vma_res)
160{
161	dma_fence_put(&vma_res->unbind_fence);
162}
163
164/**
165 * i915_vma_resource_init - Initialize a vma resource.
166 * @vma_res: The vma resource to initialize
167 * @vm: Pointer to the vm.
168 * @pages: The pages sg-table.
169 * @page_sizes: Page sizes of the pages.
170 * @pages_rsgt: Pointer to a struct i915_refct_sgt of an object with
171 * delayed destruction.
172 * @readonly: Whether the vma should be bound read-only.
173 * @lmem: Whether the vma points to lmem.
174 * @mr: The memory region of the object the vma points to.
175 * @ops: The backend ops.
176 * @private: Bind backend private info.
177 * @start: Offset into the address space of bind range start.
178 * @node_size: Size of the allocated range manager node.
179 * @size: Bind size.
180 *
181 * Initializes a vma resource allocated using i915_vma_resource_alloc().
182 * The reason for having separate allocate and initialize function is that
183 * initialization may need to be performed from under a lock where
184 * allocation is not allowed.
185 */
186static inline void i915_vma_resource_init(struct i915_vma_resource *vma_res,
187					  struct i915_address_space *vm,
188					  struct sg_table *pages,
189					  const struct i915_page_sizes *page_sizes,
190					  struct i915_refct_sgt *pages_rsgt,
191					  bool readonly,
192					  bool lmem,
193					  struct intel_memory_region *mr,
194					  const struct i915_vma_ops *ops,
195					  void *private,
196					  u64 start,
197					  u64 node_size,
198					  u64 size)
199{
200	__i915_vma_resource_init(vma_res);
201	vma_res->vm = vm;
202	vma_res->bi.pages = pages;
203	vma_res->bi.page_sizes = *page_sizes;
204	if (pages_rsgt)
205		vma_res->bi.pages_rsgt = i915_refct_sgt_get(pages_rsgt);
206	vma_res->bi.readonly = readonly;
207	vma_res->bi.lmem = lmem;
208#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
209	vma_res->mr = mr;
210#endif
211	vma_res->ops = ops;
212	vma_res->private = private;
213	vma_res->start = start;
214	vma_res->node_size = node_size;
215	vma_res->vma_size = size;
216}
217
218static inline void i915_vma_resource_fini(struct i915_vma_resource *vma_res)
219{
220	GEM_BUG_ON(refcount_read(&vma_res->hold_count) != 1);
221	if (vma_res->bi.pages_rsgt)
222		i915_refct_sgt_put(vma_res->bi.pages_rsgt);
223	i915_sw_fence_fini(&vma_res->chain);
224}
225
226int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
227				    u64 first,
228				    u64 last,
229				    bool intr);
230
231int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
232				     struct i915_sw_fence *sw_fence,
233				     u64 first,
234				     u64 last,
235				     bool intr,
236				     gfp_t gfp);
237
238void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
239
240void i915_vma_resource_module_exit(void);
241
242int i915_vma_resource_module_init(void);
243
244#endif