Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Christian König <deathsimple@vodafone.de>
 29 */
 30
 31#include <linux/uaccess.h>
 32
 33#include "amdgpu.h"
 34#include "amdgpu_trace.h"
 35
 36#define AMDGPU_BO_LIST_MAX_PRIORITY	32u
 37#define AMDGPU_BO_LIST_NUM_BUCKETS	(AMDGPU_BO_LIST_MAX_PRIORITY + 1)
 38
 39static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
 40{
 41	struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
 42						   rhead);
 43
 44	kvfree(list);
 45}
 46
 47static void amdgpu_bo_list_free(struct kref *ref)
 48{
 49	struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
 50						   refcount);
 51	struct amdgpu_bo_list_entry *e;
 52
 53	amdgpu_bo_list_for_each_entry(e, list) {
 54		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 55
 56		amdgpu_bo_unref(&bo);
 57	}
 58
 59	call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
 60}
 61
 62int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 63			  struct drm_amdgpu_bo_list_entry *info,
 64			  unsigned num_entries, struct amdgpu_bo_list **result)
 65{
 66	unsigned last_entry = 0, first_userptr = num_entries;
 67	struct amdgpu_bo_list_entry *array;
 68	struct amdgpu_bo_list *list;
 69	uint64_t total_size = 0;
 70	size_t size;
 71	unsigned i;
 72	int r;
 73
 74	if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list))
 75				/ sizeof(struct amdgpu_bo_list_entry))
 76		return -EINVAL;
 77
 78	size = sizeof(struct amdgpu_bo_list);
 79	size += num_entries * sizeof(struct amdgpu_bo_list_entry);
 80	list = kvmalloc(size, GFP_KERNEL);
 81	if (!list)
 82		return -ENOMEM;
 83
 84	kref_init(&list->refcount);
 85	list->gds_obj = NULL;
 86	list->gws_obj = NULL;
 87	list->oa_obj = NULL;
 88
 89	array = amdgpu_bo_list_array_entry(list, 0);
 90	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
 91
 92	for (i = 0; i < num_entries; ++i) {
 93		struct amdgpu_bo_list_entry *entry;
 94		struct drm_gem_object *gobj;
 95		struct amdgpu_bo *bo;
 96		struct mm_struct *usermm;
 97
 98		gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
 99		if (!gobj) {
100			r = -ENOENT;
101			goto error_free;
102		}
103
104		bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
105		drm_gem_object_put_unlocked(gobj);
106
107		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
108		if (usermm) {
109			if (usermm != current->mm) {
110				amdgpu_bo_unref(&bo);
111				r = -EPERM;
112				goto error_free;
113			}
114			entry = &array[--first_userptr];
115		} else {
116			entry = &array[last_entry++];
117		}
118
119		entry->priority = min(info[i].bo_priority,
120				      AMDGPU_BO_LIST_MAX_PRIORITY);
121		entry->tv.bo = &bo->tbo;
122
123		if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
124			list->gds_obj = bo;
125		if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
126			list->gws_obj = bo;
127		if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
128			list->oa_obj = bo;
129
130		total_size += amdgpu_bo_size(bo);
131		trace_amdgpu_bo_list_set(list, bo);
132	}
133
134	list->first_userptr = first_userptr;
135	list->num_entries = num_entries;
136
137	trace_amdgpu_cs_bo_status(list->num_entries, total_size);
138
139	*result = list;
140	return 0;
141
142error_free:
143	for (i = 0; i < last_entry; ++i) {
144		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
145
146		amdgpu_bo_unref(&bo);
147	}
148	for (i = first_userptr; i < num_entries; ++i) {
149		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
150
151		amdgpu_bo_unref(&bo);
152	}
153	kvfree(list);
154	return r;
155
156}
157
158static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
159{
160	struct amdgpu_bo_list *list;
161
162	mutex_lock(&fpriv->bo_list_lock);
163	list = idr_remove(&fpriv->bo_list_handles, id);
164	mutex_unlock(&fpriv->bo_list_lock);
165	if (list)
166		kref_put(&list->refcount, amdgpu_bo_list_free);
167}
168
169int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
170		       struct amdgpu_bo_list **result)
171{
172	rcu_read_lock();
173	*result = idr_find(&fpriv->bo_list_handles, id);
174
175	if (*result && kref_get_unless_zero(&(*result)->refcount)) {
176		rcu_read_unlock();
177		return 0;
178	}
179
180	rcu_read_unlock();
181	return -ENOENT;
182}
183
184void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
185			     struct list_head *validated)
186{
187	/* This is based on the bucket sort with O(n) time complexity.
188	 * An item with priority "i" is added to bucket[i]. The lists are then
189	 * concatenated in descending order.
190	 */
191	struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
192	struct amdgpu_bo_list_entry *e;
193	unsigned i;
194
195	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
196		INIT_LIST_HEAD(&bucket[i]);
197
198	/* Since buffers which appear sooner in the relocation list are
199	 * likely to be used more often than buffers which appear later
200	 * in the list, the sort mustn't change the ordering of buffers
201	 * with the same priority, i.e. it must be stable.
202	 */
203	amdgpu_bo_list_for_each_entry(e, list) {
204		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
205		unsigned priority = e->priority;
206
207		if (!bo->parent)
208			list_add_tail(&e->tv.head, &bucket[priority]);
209
210		e->user_pages = NULL;
211	}
212
213	/* Connect the sorted buckets in the output list. */
214	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
215		list_splice(&bucket[i], validated);
216}
217
218void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
219{
220	kref_put(&list->refcount, amdgpu_bo_list_free);
221}
222
223int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
224				      struct drm_amdgpu_bo_list_entry **info_param)
225{
226	const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
227	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
228	struct drm_amdgpu_bo_list_entry *info;
229	int r;
230
231	info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
232	if (!info)
233		return -ENOMEM;
234
235	/* copy the handle array from userspace to a kernel buffer */
236	r = -EFAULT;
237	if (likely(info_size == in->bo_info_size)) {
238		unsigned long bytes = in->bo_number *
239			in->bo_info_size;
240
241		if (copy_from_user(info, uptr, bytes))
242			goto error_free;
243
244	} else {
245		unsigned long bytes = min(in->bo_info_size, info_size);
246		unsigned i;
247
248		memset(info, 0, in->bo_number * info_size);
249		for (i = 0; i < in->bo_number; ++i) {
250			if (copy_from_user(&info[i], uptr, bytes))
251				goto error_free;
252
253			uptr += in->bo_info_size;
254		}
255	}
256
257	*info_param = info;
258	return 0;
259
260error_free:
261	kvfree(info);
262	return r;
263}
264
265int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
266				struct drm_file *filp)
267{
268	struct amdgpu_device *adev = dev->dev_private;
269	struct amdgpu_fpriv *fpriv = filp->driver_priv;
270	union drm_amdgpu_bo_list *args = data;
271	uint32_t handle = args->in.list_handle;
272	struct drm_amdgpu_bo_list_entry *info = NULL;
273	struct amdgpu_bo_list *list, *old;
274	int r;
275
276	r = amdgpu_bo_create_list_entry_array(&args->in, &info);
277	if (r)
278		return r;
279
280	switch (args->in.operation) {
281	case AMDGPU_BO_LIST_OP_CREATE:
282		r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
283					  &list);
284		if (r)
285			goto error_free;
286
287		mutex_lock(&fpriv->bo_list_lock);
288		r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
289		mutex_unlock(&fpriv->bo_list_lock);
290		if (r < 0) {
291			goto error_put_list;
292		}
293
294		handle = r;
295		break;
296
297	case AMDGPU_BO_LIST_OP_DESTROY:
298		amdgpu_bo_list_destroy(fpriv, handle);
299		handle = 0;
300		break;
301
302	case AMDGPU_BO_LIST_OP_UPDATE:
303		r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
304					  &list);
305		if (r)
306			goto error_free;
307
308		mutex_lock(&fpriv->bo_list_lock);
309		old = idr_replace(&fpriv->bo_list_handles, list, handle);
310		mutex_unlock(&fpriv->bo_list_lock);
311
312		if (IS_ERR(old)) {
313			r = PTR_ERR(old);
314			goto error_put_list;
315		}
316
317		amdgpu_bo_list_put(old);
318		break;
319
320	default:
321		r = -EINVAL;
322		goto error_free;
323	}
324
325	memset(args, 0, sizeof(*args));
326	args->out.list_handle = handle;
327	kvfree(info);
328
329	return 0;
330
331error_put_list:
332	amdgpu_bo_list_put(list);
333
334error_free:
335	kvfree(info);
336	return r;
337}