Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2016 Intel Corporation
  5 */
  6
  7#ifndef I915_SCATTERLIST_H
  8#define I915_SCATTERLIST_H
  9
 10#include <linux/pfn.h>
 11#include <linux/scatterlist.h>
 12#include <linux/dma-mapping.h>
 13#include <xen/xen.h>
 14
 15#include "i915_gem.h"
 16
 17struct drm_mm_node;
 18struct ttm_resource;
 19
 20/*
 21 * Optimised SGL iterator for GEM objects
 22 */
 23static __always_inline struct sgt_iter {
 24	struct scatterlist *sgp;
 25	union {
 26		unsigned long pfn;
 27		dma_addr_t dma;
 28	};
 29	unsigned int curr;
 30	unsigned int max;
 31} __sgt_iter(struct scatterlist *sgl, bool dma) {
 32	struct sgt_iter s = { .sgp = sgl };
 33
 34	if (dma && s.sgp && sg_dma_len(s.sgp) == 0) {
 35		s.sgp = NULL;
 36	} else if (s.sgp) {
 37		s.max = s.curr = s.sgp->offset;
 38		if (dma) {
 39			s.dma = sg_dma_address(s.sgp);
 40			s.max += sg_dma_len(s.sgp);
 41		} else {
 42			s.pfn = page_to_pfn(sg_page(s.sgp));
 43			s.max += s.sgp->length;
 44		}
 45	}
 46
 47	return s;
 48}
 49
 50static inline int __sg_page_count(const struct scatterlist *sg)
 51{
 52	return sg->length >> PAGE_SHIFT;
 53}
 54
 55static inline int __sg_dma_page_count(const struct scatterlist *sg)
 56{
 57	return sg_dma_len(sg) >> PAGE_SHIFT;
 58}
 59
 60static inline struct scatterlist *____sg_next(struct scatterlist *sg)
 61{
 62	++sg;
 63	if (unlikely(sg_is_chain(sg)))
 64		sg = sg_chain_ptr(sg);
 65	return sg;
 66}
 67
 68/**
 69 * __sg_next - return the next scatterlist entry in a list
 70 * @sg:		The current sg entry
 71 *
 72 * Description:
 73 *   If the entry is the last, return NULL; otherwise, step to the next
 74 *   element in the array (@sg@+1). If that's a chain pointer, follow it;
 75 *   otherwise just return the pointer to the current element.
 76 **/
 77static inline struct scatterlist *__sg_next(struct scatterlist *sg)
 78{
 79	return sg_is_last(sg) ? NULL : ____sg_next(sg);
 80}
 81
 82/**
 83 * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table
 84 * @__dp:	Device address (output)
 85 * @__iter:	'struct sgt_iter' (iterator state, internal)
 86 * @__sgt:	sg_table to iterate over (input)
 87 * @__step:	step size
 88 */
 89#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step)		\
 90	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
 91	     ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp;	\
 92	     (((__iter).curr += (__step)) >= (__iter).max) ?		\
 93	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
 94/**
 95 * __for_each_daddr_next - iterates over the device addresses with pre-initialized iterator.
 96 * @__dp:	Device address (output)
 97 * @__iter:	'struct sgt_iter' (iterator state, external)
 98 * @__step:	step size
 99 */
100#define __for_each_daddr_next(__dp, __iter, __step)                  \
101	for (; ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp;   \
102	     (((__iter).curr += (__step)) >= (__iter).max) ?            \
103	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
104
105/**
106 * for_each_sgt_page - iterate over the pages of the given sg_table
107 * @__pp:	page pointer (output)
108 * @__iter:	'struct sgt_iter' (iterator state, internal)
109 * @__sgt:	sg_table to iterate over (input)
110 */
111#define for_each_sgt_page(__pp, __iter, __sgt)				\
112	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
113	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
114	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
115	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
116	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
117
118/**
119 * i915_sg_dma_sizes - Record the dma segment sizes of a scatterlist
120 * @sg: The scatterlist
121 *
122 * Return: An unsigned int with segment sizes logically or'ed together.
123 * A caller can use this information to determine what hardware page table
124 * entry sizes can be used to map the memory represented by the scatterlist.
125 */
126static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg)
127{
128	unsigned int page_sizes;
129
130	page_sizes = 0;
131	while (sg && sg_dma_len(sg)) {
132		GEM_BUG_ON(sg->offset);
133		GEM_BUG_ON(!IS_ALIGNED(sg_dma_len(sg), PAGE_SIZE));
134		page_sizes |= sg_dma_len(sg);
135		sg = __sg_next(sg);
136	}
137
138	return page_sizes;
139}
140
141static inline unsigned int i915_sg_segment_size(struct device *dev)
142{
143	size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev));
144
145	/*
146	 * For Xen PV guests pages aren't contiguous in DMA (machine) address
147	 * space.  The DMA API takes care of that both in dma_alloc_* (by
148	 * calling into the hypervisor to make the pages contiguous) and in
149	 * dma_map_* (by bounce buffering).  But i915 abuses ignores the
150	 * coherency aspects of the DMA API and thus can't cope with bounce
151	 * buffering actually happening, so add a hack here to force small
152	 * allocations and mappings when running in PV mode on Xen.
153	 *
154	 * Note this will still break if bounce buffering is required for other
155	 * reasons, like confidential computing hypervisors or PCIe root ports
156	 * with addressing limitations.
157	 */
158	if (xen_pv_domain())
159		max = PAGE_SIZE;
160	return round_down(max, PAGE_SIZE);
161}
162
163bool i915_sg_trim(struct sg_table *orig_st);
164
165/**
166 * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt
167 */
168struct i915_refct_sgt_ops {
169	/**
170	 * @release: Free the memory of the struct i915_refct_sgt
171	 */
172	void (*release)(struct kref *ref);
173};
174
175/**
176 * struct i915_refct_sgt - A refcounted scatter-gather table
177 * @kref: struct kref for refcounting
178 * @table: struct sg_table holding the scatter-gather table itself. Note that
179 * @table->sgl = NULL can be used to determine whether a scatter-gather table
180 * is present or not.
181 * @size: The size in bytes of the underlying memory buffer
182 * @ops: The operations structure.
183 */
184struct i915_refct_sgt {
185	struct kref kref;
186	struct sg_table table;
187	size_t size;
188	const struct i915_refct_sgt_ops *ops;
189};
190
191/**
192 * i915_refct_sgt_put - Put a refcounted sg-table
193 * @rsgt: the struct i915_refct_sgt to put.
194 */
195static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt)
196{
197	if (rsgt)
198		kref_put(&rsgt->kref, rsgt->ops->release);
199}
200
201/**
202 * i915_refct_sgt_get - Get a refcounted sg-table
203 * @rsgt: the struct i915_refct_sgt to get.
204 */
205static inline struct i915_refct_sgt *
206i915_refct_sgt_get(struct i915_refct_sgt *rsgt)
207{
208	kref_get(&rsgt->kref);
209	return rsgt;
210}
211
212/**
213 * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom
214 * operations structure
215 * @rsgt: The struct i915_refct_sgt to initialize.
216 * @size: Size in bytes of the underlying memory buffer.
217 * @ops: A customized operations structure in case the refcounted sg-list
218 * is embedded into another structure.
219 */
220static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,
221					 size_t size,
222					 const struct i915_refct_sgt_ops *ops)
223{
224	kref_init(&rsgt->kref);
225	rsgt->table.sgl = NULL;
226	rsgt->size = size;
227	rsgt->ops = ops;
228}
229
230void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);
231
232struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
233					      u64 region_start,
234					      u32 page_alignment);
235
236struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
237						     u64 region_start,
238						     u32 page_alignment);
239
240#endif