Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
Note: File does not exist in v3.5.6.
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Christian König <deathsimple@vodafone.de>
 29 */
 30
 31#include <drm/drmP.h>
 32#include "amdgpu.h"
 33#include "amdgpu_trace.h"
 34
 35#define AMDGPU_BO_LIST_MAX_PRIORITY	32u
 36#define AMDGPU_BO_LIST_NUM_BUCKETS	(AMDGPU_BO_LIST_MAX_PRIORITY + 1)
 37
 38static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
 39				 struct amdgpu_bo_list **result,
 40				 int *id)
 41{
 42	int r;
 43
 44	*result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
 45	if (!*result)
 46		return -ENOMEM;
 47
 48	mutex_lock(&fpriv->bo_list_lock);
 49	r = idr_alloc(&fpriv->bo_list_handles, *result,
 50		      1, 0, GFP_KERNEL);
 51	if (r < 0) {
 52		mutex_unlock(&fpriv->bo_list_lock);
 53		kfree(*result);
 54		return r;
 55	}
 56	*id = r;
 57
 58	mutex_init(&(*result)->lock);
 59	(*result)->num_entries = 0;
 60	(*result)->array = NULL;
 61
 62	mutex_lock(&(*result)->lock);
 63	mutex_unlock(&fpriv->bo_list_lock);
 64
 65	return 0;
 66}
 67
 68static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
 69{
 70	struct amdgpu_bo_list *list;
 71
 72	mutex_lock(&fpriv->bo_list_lock);
 73	list = idr_find(&fpriv->bo_list_handles, id);
 74	if (list) {
 75		mutex_lock(&list->lock);
 76		idr_remove(&fpriv->bo_list_handles, id);
 77		mutex_unlock(&list->lock);
 78		amdgpu_bo_list_free(list);
 79	}
 80	mutex_unlock(&fpriv->bo_list_lock);
 81}
 82
 83static int amdgpu_bo_list_set(struct amdgpu_device *adev,
 84				     struct drm_file *filp,
 85				     struct amdgpu_bo_list *list,
 86				     struct drm_amdgpu_bo_list_entry *info,
 87				     unsigned num_entries)
 88{
 89	struct amdgpu_bo_list_entry *array;
 90	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
 91	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
 92	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
 93
 94	unsigned last_entry = 0, first_userptr = num_entries;
 95	unsigned i;
 96	int r;
 97
 98	array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
 99	if (!array)
100		return -ENOMEM;
101	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
102
103	for (i = 0; i < num_entries; ++i) {
104		struct amdgpu_bo_list_entry *entry;
105		struct drm_gem_object *gobj;
106		struct amdgpu_bo *bo;
107		struct mm_struct *usermm;
108
109		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
110		if (!gobj) {
111			r = -ENOENT;
112			goto error_free;
113		}
114
115		bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
116		drm_gem_object_unreference_unlocked(gobj);
117
118		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
119		if (usermm) {
120			if (usermm != current->mm) {
121				amdgpu_bo_unref(&bo);
122				r = -EPERM;
123				goto error_free;
124			}
125			entry = &array[--first_userptr];
126		} else {
127			entry = &array[last_entry++];
128		}
129
130		entry->robj = bo;
131		entry->priority = min(info[i].bo_priority,
132				      AMDGPU_BO_LIST_MAX_PRIORITY);
133		entry->tv.bo = &entry->robj->tbo;
134		entry->tv.shared = true;
135
136		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
137			gds_obj = entry->robj;
138		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
139			gws_obj = entry->robj;
140		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
141			oa_obj = entry->robj;
142
143		trace_amdgpu_bo_list_set(list, entry->robj);
144	}
145
146	for (i = 0; i < list->num_entries; ++i)
147		amdgpu_bo_unref(&list->array[i].robj);
148
149	drm_free_large(list->array);
150
151	list->gds_obj = gds_obj;
152	list->gws_obj = gws_obj;
153	list->oa_obj = oa_obj;
154	list->first_userptr = first_userptr;
155	list->array = array;
156	list->num_entries = num_entries;
157
158	return 0;
159
160error_free:
161	while (i--)
162		amdgpu_bo_unref(&array[i].robj);
163	drm_free_large(array);
164	return r;
165}
166
167struct amdgpu_bo_list *
168amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
169{
170	struct amdgpu_bo_list *result;
171
172	mutex_lock(&fpriv->bo_list_lock);
173	result = idr_find(&fpriv->bo_list_handles, id);
174	if (result)
175		mutex_lock(&result->lock);
176	mutex_unlock(&fpriv->bo_list_lock);
177	return result;
178}
179
180void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
181			     struct list_head *validated)
182{
183	/* This is based on the bucket sort with O(n) time complexity.
184	 * An item with priority "i" is added to bucket[i]. The lists are then
185	 * concatenated in descending order.
186	 */
187	struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
188	unsigned i;
189
190	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
191		INIT_LIST_HEAD(&bucket[i]);
192
193	/* Since buffers which appear sooner in the relocation list are
194	 * likely to be used more often than buffers which appear later
195	 * in the list, the sort mustn't change the ordering of buffers
196	 * with the same priority, i.e. it must be stable.
197	 */
198	for (i = 0; i < list->num_entries; i++) {
199		unsigned priority = list->array[i].priority;
200
201		list_add_tail(&list->array[i].tv.head,
202			      &bucket[priority]);
203		list->array[i].user_pages = NULL;
204	}
205
206	/* Connect the sorted buckets in the output list. */
207	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
208		list_splice(&bucket[i], validated);
209}
210
211void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
212{
213	mutex_unlock(&list->lock);
214}
215
216void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
217{
218	unsigned i;
219
220	for (i = 0; i < list->num_entries; ++i)
221		amdgpu_bo_unref(&list->array[i].robj);
222
223	mutex_destroy(&list->lock);
224	drm_free_large(list->array);
225	kfree(list);
226}
227
228int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
229				struct drm_file *filp)
230{
231	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
232
233	struct amdgpu_device *adev = dev->dev_private;
234	struct amdgpu_fpriv *fpriv = filp->driver_priv;
235	union drm_amdgpu_bo_list *args = data;
236	uint32_t handle = args->in.list_handle;
237	const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
238
239	struct drm_amdgpu_bo_list_entry *info;
240	struct amdgpu_bo_list *list;
241
242	int r;
243
244	info = drm_malloc_ab(args->in.bo_number,
245			     sizeof(struct drm_amdgpu_bo_list_entry));
246	if (!info)
247		return -ENOMEM;
248
249	/* copy the handle array from userspace to a kernel buffer */
250	r = -EFAULT;
251	if (likely(info_size == args->in.bo_info_size)) {
252		unsigned long bytes = args->in.bo_number *
253			args->in.bo_info_size;
254
255		if (copy_from_user(info, uptr, bytes))
256			goto error_free;
257
258	} else {
259		unsigned long bytes = min(args->in.bo_info_size, info_size);
260		unsigned i;
261
262		memset(info, 0, args->in.bo_number * info_size);
263		for (i = 0; i < args->in.bo_number; ++i) {
264			if (copy_from_user(&info[i], uptr, bytes))
265				goto error_free;
266			
267			uptr += args->in.bo_info_size;
268		}
269	}
270
271	switch (args->in.operation) {
272	case AMDGPU_BO_LIST_OP_CREATE:
273		r = amdgpu_bo_list_create(fpriv, &list, &handle);
274		if (r) 
275			goto error_free;
276
277		r = amdgpu_bo_list_set(adev, filp, list, info,
278					      args->in.bo_number);
279		amdgpu_bo_list_put(list);
280		if (r)
281			goto error_free;
282
283		break;
284		
285	case AMDGPU_BO_LIST_OP_DESTROY:
286		amdgpu_bo_list_destroy(fpriv, handle);
287		handle = 0;
288		break;
289
290	case AMDGPU_BO_LIST_OP_UPDATE:
291		r = -ENOENT;
292		list = amdgpu_bo_list_get(fpriv, handle);
293		if (!list)
294			goto error_free;
295
296		r = amdgpu_bo_list_set(adev, filp, list, info,
297					      args->in.bo_number);
298		amdgpu_bo_list_put(list);
299		if (r)
300			goto error_free;
301
302		break;
303
304	default:
305		r = -EINVAL;
306		goto error_free;
307	}
308
309	memset(args, 0, sizeof(*args));
310	args->out.list_handle = handle;
311	drm_free_large(info);
312
313	return 0;
314
315error_free:
316	drm_free_large(info);
317	return r;
318}