Linux Audio

Check our new training course

Loading...
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2016 Intel Corporation
  5 */
  6
  7#include "i915_scatterlist.h"
  8#include "i915_ttm_buddy_manager.h"
  9
 10#include <drm/drm_buddy.h>
 11#include <drm/drm_mm.h>
 12
 13#include <linux/slab.h>
 14
 15bool i915_sg_trim(struct sg_table *orig_st)
 16{
 17	struct sg_table new_st;
 18	struct scatterlist *sg, *new_sg;
 19	unsigned int i;
 20
 21	if (orig_st->nents == orig_st->orig_nents)
 22		return false;
 23
 24	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
 25		return false;
 26
 27	new_sg = new_st.sgl;
 28	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
 29		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
 30		sg_dma_address(new_sg) = sg_dma_address(sg);
 31		sg_dma_len(new_sg) = sg_dma_len(sg);
 32
 33		new_sg = sg_next(new_sg);
 34	}
 35	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
 36
 37	sg_free_table(orig_st);
 38
 39	*orig_st = new_st;
 40	return true;
 41}
 42
 43static void i915_refct_sgt_release(struct kref *ref)
 44{
 45	struct i915_refct_sgt *rsgt =
 46		container_of(ref, typeof(*rsgt), kref);
 47
 48	sg_free_table(&rsgt->table);
 49	kfree(rsgt);
 50}
 51
 52static const struct i915_refct_sgt_ops rsgt_ops = {
 53	.release = i915_refct_sgt_release
 54};
 55
 56/**
 57 * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops
 58 * @rsgt: The struct i915_refct_sgt to initialize.
 59 * @size: The size of the underlying memory buffer.
 60 */
 61void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)
 62{
 63	__i915_refct_sgt_init(rsgt, size, &rsgt_ops);
 64}
 65
 66/**
 67 * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct
 68 * drm_mm_node
 69 * @node: The drm_mm_node.
 70 * @region_start: An offset to add to the dma addresses of the sg list.
 71 * @page_alignment: Required page alignment for each sg entry. Power of two.
 72 *
 73 * Create a struct sg_table, initializing it from a struct drm_mm_node,
 74 * taking a maximum segment length into account, splitting into segments
 75 * if necessary.
 76 *
 77 * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative
 78 * error code cast to an error pointer on failure.
 79 */
 80struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,
 81					      u64 region_start,
 82					      u32 page_alignment)
 83{
 84	const u32 max_segment = round_down(UINT_MAX, page_alignment);
 85	const u32 segment_pages = max_segment >> PAGE_SHIFT;
 86	u64 block_size, offset, prev_end;
 87	struct i915_refct_sgt *rsgt;
 88	struct sg_table *st;
 89	struct scatterlist *sg;
 90
 91	GEM_BUG_ON(!max_segment);
 92
 93	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
 94	if (!rsgt)
 95		return ERR_PTR(-ENOMEM);
 96
 97	i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT);
 98	st = &rsgt->table;
 99	/* restricted by sg_alloc_table */
100	if (WARN_ON(overflows_type(DIV_ROUND_UP_ULL(node->size, segment_pages),
101				   unsigned int))) {
102		i915_refct_sgt_put(rsgt);
103		return ERR_PTR(-E2BIG);
104	}
105
106	if (sg_alloc_table(st, DIV_ROUND_UP_ULL(node->size, segment_pages),
107			   GFP_KERNEL)) {
108		i915_refct_sgt_put(rsgt);
109		return ERR_PTR(-ENOMEM);
110	}
111
112	sg = st->sgl;
113	st->nents = 0;
114	prev_end = (resource_size_t)-1;
115	block_size = node->size << PAGE_SHIFT;
116	offset = node->start << PAGE_SHIFT;
117
118	while (block_size) {
119		u64 len;
120
121		if (offset != prev_end || sg->length >= max_segment) {
122			if (st->nents)
123				sg = __sg_next(sg);
124
125			sg_dma_address(sg) = region_start + offset;
126			GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
127					       page_alignment));
128			sg_dma_len(sg) = 0;
129			sg->length = 0;
130			st->nents++;
131		}
132
133		len = min_t(u64, block_size, max_segment - sg->length);
134		sg->length += len;
135		sg_dma_len(sg) += len;
136
137		offset += len;
138		block_size -= len;
139
140		prev_end = offset;
141	}
142
143	sg_mark_end(sg);
144	i915_sg_trim(st);
145
146	return rsgt;
147}
148
149/**
150 * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct
151 * i915_buddy_block list
152 * @res: The struct i915_ttm_buddy_resource.
153 * @region_start: An offset to add to the dma addresses of the sg list.
154 * @page_alignment: Required page alignment for each sg entry. Power of two.
155 *
156 * Create a struct sg_table, initializing it from struct i915_buddy_block list,
157 * taking a maximum segment length into account, splitting into segments
158 * if necessary.
159 *
160 * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative
161 * error code cast to an error pointer on failure.
162 */
163struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,
164						     u64 region_start,
165						     u32 page_alignment)
166{
167	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
168	const u64 size = res->size;
169	const u32 max_segment = round_down(UINT_MAX, page_alignment);
170	struct drm_buddy *mm = bman_res->mm;
171	struct list_head *blocks = &bman_res->blocks;
172	struct drm_buddy_block *block;
173	struct i915_refct_sgt *rsgt;
174	struct scatterlist *sg;
175	struct sg_table *st;
176	resource_size_t prev_end;
177
178	GEM_BUG_ON(list_empty(blocks));
179	GEM_BUG_ON(!max_segment);
180
181	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);
182	if (!rsgt)
183		return ERR_PTR(-ENOMEM);
184
185	i915_refct_sgt_init(rsgt, size);
186	st = &rsgt->table;
187	/* restricted by sg_alloc_table */
188	if (WARN_ON(overflows_type(PFN_UP(res->size), unsigned int))) {
189		i915_refct_sgt_put(rsgt);
190		return ERR_PTR(-E2BIG);
191	}
192
193	if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) {
194		i915_refct_sgt_put(rsgt);
195		return ERR_PTR(-ENOMEM);
196	}
197
198	sg = st->sgl;
199	st->nents = 0;
200	prev_end = (resource_size_t)-1;
201
202	list_for_each_entry(block, blocks, link) {
203		u64 block_size, offset;
204
205		block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
206		offset = drm_buddy_block_offset(block);
207
208		while (block_size) {
209			u64 len;
210
211			if (offset != prev_end || sg->length >= max_segment) {
212				if (st->nents)
213					sg = __sg_next(sg);
214
215				sg_dma_address(sg) = region_start + offset;
216				GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg),
217						       page_alignment));
218				sg_dma_len(sg) = 0;
219				sg->length = 0;
220				st->nents++;
221			}
222
223			len = min_t(u64, block_size, max_segment - sg->length);
224			sg->length += len;
225			sg_dma_len(sg) += len;
226
227			offset += len;
228			block_size -= len;
229
230			prev_end = offset;
231		}
232	}
233
234	sg_mark_end(sg);
235	i915_sg_trim(st);
236
237	return rsgt;
238}
239
240#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
241#include "selftests/scatterlist.c"
242#endif
1