Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Christian König <deathsimple@vodafone.de>
 29 */
 30
 31#include <drm/drmP.h>
 
 32#include "amdgpu.h"
 33#include "amdgpu_trace.h"
 34
 35#define AMDGPU_BO_LIST_MAX_PRIORITY	32u
 36#define AMDGPU_BO_LIST_NUM_BUCKETS	(AMDGPU_BO_LIST_MAX_PRIORITY + 1)
 37
 38static int amdgpu_bo_list_set(struct amdgpu_device *adev,
 39				     struct drm_file *filp,
 40				     struct amdgpu_bo_list *list,
 41				     struct drm_amdgpu_bo_list_entry *info,
 42				     unsigned num_entries);
 43
 44static void amdgpu_bo_list_release_rcu(struct kref *ref)
 45{
 46	unsigned i;
 47	struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
 48						   refcount);
 49
 50	for (i = 0; i < list->num_entries; ++i)
 51		amdgpu_bo_unref(&list->array[i].robj);
 52
 53	mutex_destroy(&list->lock);
 54	kvfree(list->array);
 55	kfree_rcu(list, rhead);
 56}
 57
 58static int amdgpu_bo_list_create(struct amdgpu_device *adev,
 59				 struct drm_file *filp,
 60				 struct drm_amdgpu_bo_list_entry *info,
 61				 unsigned num_entries,
 62				 int *id)
 63{
 64	int r;
 65	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 66	struct amdgpu_bo_list *list;
 67
 68	list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
 69	if (!list)
 70		return -ENOMEM;
 71
 72	/* initialize bo list*/
 73	mutex_init(&list->lock);
 74	kref_init(&list->refcount);
 75	r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
 76	if (r) {
 77		kfree(list);
 78		return r;
 79	}
 80
 81	/* idr alloc should be called only after initialization of bo list. */
 82	mutex_lock(&fpriv->bo_list_lock);
 83	r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
 84	mutex_unlock(&fpriv->bo_list_lock);
 85	if (r < 0) {
 86		amdgpu_bo_list_free(list);
 87		return r;
 88	}
 89	*id = r;
 90
 91	return 0;
 92}
 93
 94static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
 
 
 95{
 96	struct amdgpu_bo_list *list;
 97
 98	mutex_lock(&fpriv->bo_list_lock);
 99	list = idr_remove(&fpriv->bo_list_handles, id);
100	mutex_unlock(&fpriv->bo_list_lock);
101	if (list)
102		kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
103}
104
105static int amdgpu_bo_list_set(struct amdgpu_device *adev,
106				     struct drm_file *filp,
107				     struct amdgpu_bo_list *list,
108				     struct drm_amdgpu_bo_list_entry *info,
109				     unsigned num_entries)
110{
111	struct amdgpu_bo_list_entry *array;
112	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
113	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
114	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
115
116	unsigned last_entry = 0, first_userptr = num_entries;
 
 
 
 
117	unsigned i;
118	int r;
119	unsigned long total_size = 0;
120
121	array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
122	if (!array)
 
 
 
 
 
 
123		return -ENOMEM;
 
 
 
 
 
 
 
124	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
125
126	for (i = 0; i < num_entries; ++i) {
127		struct amdgpu_bo_list_entry *entry;
128		struct drm_gem_object *gobj;
129		struct amdgpu_bo *bo;
130		struct mm_struct *usermm;
131
132		gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
133		if (!gobj) {
134			r = -ENOENT;
135			goto error_free;
136		}
137
138		bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
139		drm_gem_object_put_unlocked(gobj);
140
141		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
142		if (usermm) {
143			if (usermm != current->mm) {
144				amdgpu_bo_unref(&bo);
145				r = -EPERM;
146				goto error_free;
147			}
148			entry = &array[--first_userptr];
149		} else {
150			entry = &array[last_entry++];
151		}
152
153		entry->robj = bo;
154		entry->priority = min(info[i].bo_priority,
155				      AMDGPU_BO_LIST_MAX_PRIORITY);
156		entry->tv.bo = &entry->robj->tbo;
157		entry->tv.shared = !entry->robj->prime_shared_count;
158
159		if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
160			gds_obj = entry->robj;
161		if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
162			gws_obj = entry->robj;
163		if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
164			oa_obj = entry->robj;
165
166		total_size += amdgpu_bo_size(entry->robj);
167		trace_amdgpu_bo_list_set(list, entry->robj);
168	}
169
170	for (i = 0; i < list->num_entries; ++i)
171		amdgpu_bo_unref(&list->array[i].robj);
172
173	kvfree(list->array);
174
175	list->gds_obj = gds_obj;
176	list->gws_obj = gws_obj;
177	list->oa_obj = oa_obj;
178	list->first_userptr = first_userptr;
179	list->array = array;
180	list->num_entries = num_entries;
181
182	trace_amdgpu_cs_bo_status(list->num_entries, total_size);
 
 
 
183	return 0;
184
185error_free:
186	while (i--)
187		amdgpu_bo_unref(&array[i].robj);
188	kvfree(array);
 
 
 
 
 
 
 
 
189	return r;
 
190}
191
192struct amdgpu_bo_list *
193amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
194{
195	struct amdgpu_bo_list *result;
196
 
 
 
 
 
 
 
 
 
 
197	rcu_read_lock();
198	result = idr_find(&fpriv->bo_list_handles, id);
199
200	if (result) {
201		if (kref_get_unless_zero(&result->refcount)) {
202			rcu_read_unlock();
203			mutex_lock(&result->lock);
204		} else {
205			rcu_read_unlock();
206			result = NULL;
207		}
208	} else {
209		rcu_read_unlock();
 
210	}
211
212	return result;
 
213}
214
215void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
216			     struct list_head *validated)
217{
218	/* This is based on the bucket sort with O(n) time complexity.
219	 * An item with priority "i" is added to bucket[i]. The lists are then
220	 * concatenated in descending order.
221	 */
222	struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
 
223	unsigned i;
224
225	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
226		INIT_LIST_HEAD(&bucket[i]);
227
228	/* Since buffers which appear sooner in the relocation list are
229	 * likely to be used more often than buffers which appear later
230	 * in the list, the sort mustn't change the ordering of buffers
231	 * with the same priority, i.e. it must be stable.
232	 */
233	for (i = 0; i < list->num_entries; i++) {
234		unsigned priority = list->array[i].priority;
 
235
236		if (!list->array[i].robj->parent)
237			list_add_tail(&list->array[i].tv.head,
238				      &bucket[priority]);
239
240		list->array[i].user_pages = NULL;
 
241	}
242
243	/* Connect the sorted buckets in the output list. */
244	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
245		list_splice(&bucket[i], validated);
246}
247
248void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
249{
250	mutex_unlock(&list->lock);
251	kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
252}
253
254void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
255{
256	unsigned i;
257
258	for (i = 0; i < list->num_entries; ++i)
259		amdgpu_bo_unref(&list->array[i].robj);
260
261	mutex_destroy(&list->lock);
262	kvfree(list->array);
263	kfree(list);
264}
265
266int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
267				struct drm_file *filp)
268{
 
269	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
270
271	struct amdgpu_device *adev = dev->dev_private;
272	struct amdgpu_fpriv *fpriv = filp->driver_priv;
273	union drm_amdgpu_bo_list *args = data;
274	uint32_t handle = args->in.list_handle;
275	const void __user *uptr = u64_to_user_ptr(args->in.bo_info_ptr);
276
277	struct drm_amdgpu_bo_list_entry *info;
278	struct amdgpu_bo_list *list;
279
280	int r;
281
282	info = kvmalloc_array(args->in.bo_number,
283			     sizeof(struct drm_amdgpu_bo_list_entry), GFP_KERNEL);
284	if (!info)
285		return -ENOMEM;
286
287	/* copy the handle array from userspace to a kernel buffer */
288	r = -EFAULT;
289	if (likely(info_size == args->in.bo_info_size)) {
290		unsigned long bytes = args->in.bo_number *
291			args->in.bo_info_size;
292
293		if (copy_from_user(info, uptr, bytes))
294			goto error_free;
295
296	} else {
297		unsigned long bytes = min(args->in.bo_info_size, info_size);
298		unsigned i;
299
300		memset(info, 0, args->in.bo_number * info_size);
301		for (i = 0; i < args->in.bo_number; ++i) {
302			if (copy_from_user(&info[i], uptr, bytes))
303				goto error_free;
304
305			uptr += args->in.bo_info_size;
306		}
307	}
308
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309	switch (args->in.operation) {
310	case AMDGPU_BO_LIST_OP_CREATE:
311		r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
312					  &handle);
313		if (r)
314			goto error_free;
 
 
 
 
 
 
 
 
 
315		break;
316
317	case AMDGPU_BO_LIST_OP_DESTROY:
318		amdgpu_bo_list_destroy(fpriv, handle);
319		handle = 0;
320		break;
321
322	case AMDGPU_BO_LIST_OP_UPDATE:
323		r = -ENOENT;
324		list = amdgpu_bo_list_get(fpriv, handle);
325		if (!list)
326			goto error_free;
327
328		r = amdgpu_bo_list_set(adev, filp, list, info,
329					      args->in.bo_number);
330		amdgpu_bo_list_put(list);
331		if (r)
332			goto error_free;
333
 
 
 
 
 
 
 
 
 
 
334		break;
335
336	default:
337		r = -EINVAL;
338		goto error_free;
339	}
340
341	memset(args, 0, sizeof(*args));
342	args->out.list_handle = handle;
343	kvfree(info);
344
345	return 0;
 
 
 
346
347error_free:
348	kvfree(info);
349	return r;
350}
v6.2
  1/*
  2 * Copyright 2015 Advanced Micro Devices, Inc.
  3 * All Rights Reserved.
  4 *
  5 * Permission is hereby granted, free of charge, to any person obtaining a
  6 * copy of this software and associated documentation files (the
  7 * "Software"), to deal in the Software without restriction, including
  8 * without limitation the rights to use, copy, modify, merge, publish,
  9 * distribute, sub license, and/or sell copies of the Software, and to
 10 * permit persons to whom the Software is furnished to do so, subject to
 11 * the following conditions:
 12 *
 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 20 *
 21 * The above copyright notice and this permission notice (including the
 22 * next paragraph) shall be included in all copies or substantial portions
 23 * of the Software.
 24 *
 25 */
 26/*
 27 * Authors:
 28 *    Christian König <deathsimple@vodafone.de>
 29 */
 30
 31#include <linux/uaccess.h>
 32
 33#include "amdgpu.h"
 34#include "amdgpu_trace.h"
 35
 36#define AMDGPU_BO_LIST_MAX_PRIORITY	32u
 37#define AMDGPU_BO_LIST_NUM_BUCKETS	(AMDGPU_BO_LIST_MAX_PRIORITY + 1)
 38
 39static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
 
 
 
 
 
 
 40{
 41	struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
 42						   rhead);
 43	mutex_destroy(&list->bo_list_mutex);
 44	kvfree(list);
 
 
 
 
 
 
 45}
 46
 47static void amdgpu_bo_list_free(struct kref *ref)
 
 
 
 
 48{
 49	struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
 50						   refcount);
 51	struct amdgpu_bo_list_entry *e;
 
 
 
 
 52
 53	amdgpu_bo_list_for_each_entry(e, list) {
 54		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
 
 
 
 
 
 
 55
 56		amdgpu_bo_unref(&bo);
 
 
 
 
 
 
 57	}
 
 58
 59	call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
 60}
 61
 62int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
 63			  struct drm_amdgpu_bo_list_entry *info,
 64			  size_t num_entries, struct amdgpu_bo_list **result)
 65{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 66	unsigned last_entry = 0, first_userptr = num_entries;
 67	struct amdgpu_bo_list_entry *array;
 68	struct amdgpu_bo_list *list;
 69	uint64_t total_size = 0;
 70	size_t size;
 71	unsigned i;
 72	int r;
 
 73
 74	if (num_entries > (SIZE_MAX - sizeof(struct amdgpu_bo_list))
 75				/ sizeof(struct amdgpu_bo_list_entry))
 76		return -EINVAL;
 77
 78	size = sizeof(struct amdgpu_bo_list);
 79	size += num_entries * sizeof(struct amdgpu_bo_list_entry);
 80	list = kvmalloc(size, GFP_KERNEL);
 81	if (!list)
 82		return -ENOMEM;
 83
 84	kref_init(&list->refcount);
 85	list->gds_obj = NULL;
 86	list->gws_obj = NULL;
 87	list->oa_obj = NULL;
 88
 89	array = amdgpu_bo_list_array_entry(list, 0);
 90	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
 91
 92	for (i = 0; i < num_entries; ++i) {
 93		struct amdgpu_bo_list_entry *entry;
 94		struct drm_gem_object *gobj;
 95		struct amdgpu_bo *bo;
 96		struct mm_struct *usermm;
 97
 98		gobj = drm_gem_object_lookup(filp, info[i].bo_handle);
 99		if (!gobj) {
100			r = -ENOENT;
101			goto error_free;
102		}
103
104		bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
105		drm_gem_object_put(gobj);
106
107		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
108		if (usermm) {
109			if (usermm != current->mm) {
110				amdgpu_bo_unref(&bo);
111				r = -EPERM;
112				goto error_free;
113			}
114			entry = &array[--first_userptr];
115		} else {
116			entry = &array[last_entry++];
117		}
118
 
119		entry->priority = min(info[i].bo_priority,
120				      AMDGPU_BO_LIST_MAX_PRIORITY);
121		entry->tv.bo = &bo->tbo;
 
122
123		if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
124			list->gds_obj = bo;
125		if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
126			list->gws_obj = bo;
127		if (bo->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
128			list->oa_obj = bo;
129
130		total_size += amdgpu_bo_size(bo);
131		trace_amdgpu_bo_list_set(list, bo);
132	}
133
 
 
 
 
 
 
 
 
134	list->first_userptr = first_userptr;
 
135	list->num_entries = num_entries;
136
137	trace_amdgpu_cs_bo_status(list->num_entries, total_size);
138
139	mutex_init(&list->bo_list_mutex);
140	*result = list;
141	return 0;
142
143error_free:
144	for (i = 0; i < last_entry; ++i) {
145		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
146
147		amdgpu_bo_unref(&bo);
148	}
149	for (i = first_userptr; i < num_entries; ++i) {
150		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
151
152		amdgpu_bo_unref(&bo);
153	}
154	kvfree(list);
155	return r;
156
157}
158
159static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
 
160{
161	struct amdgpu_bo_list *list;
162
163	mutex_lock(&fpriv->bo_list_lock);
164	list = idr_remove(&fpriv->bo_list_handles, id);
165	mutex_unlock(&fpriv->bo_list_lock);
166	if (list)
167		kref_put(&list->refcount, amdgpu_bo_list_free);
168}
169
170int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
171		       struct amdgpu_bo_list **result)
172{
173	rcu_read_lock();
174	*result = idr_find(&fpriv->bo_list_handles, id);
175
176	if (*result && kref_get_unless_zero(&(*result)->refcount)) {
 
 
 
 
 
 
 
 
177		rcu_read_unlock();
178		return 0;
179	}
180
181	rcu_read_unlock();
182	return -ENOENT;
183}
184
185void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
186			     struct list_head *validated)
187{
188	/* This is based on the bucket sort with O(n) time complexity.
189	 * An item with priority "i" is added to bucket[i]. The lists are then
190	 * concatenated in descending order.
191	 */
192	struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
193	struct amdgpu_bo_list_entry *e;
194	unsigned i;
195
196	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
197		INIT_LIST_HEAD(&bucket[i]);
198
199	/* Since buffers which appear sooner in the relocation list are
200	 * likely to be used more often than buffers which appear later
201	 * in the list, the sort mustn't change the ordering of buffers
202	 * with the same priority, i.e. it must be stable.
203	 */
204	amdgpu_bo_list_for_each_entry(e, list) {
205		struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
206		unsigned priority = e->priority;
207
208		if (!bo->parent)
209			list_add_tail(&e->tv.head, &bucket[priority]);
 
210
211		e->user_pages = NULL;
212		e->range = NULL;
213	}
214
215	/* Connect the sorted buckets in the output list. */
216	for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
217		list_splice(&bucket[i], validated);
218}
219
220void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
221{
222	kref_put(&list->refcount, amdgpu_bo_list_free);
 
223}
224
225int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
226				      struct drm_amdgpu_bo_list_entry **info_param)
 
 
 
 
 
 
 
 
 
 
 
 
227{
228	const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
229	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
 
 
 
 
 
 
 
230	struct drm_amdgpu_bo_list_entry *info;
 
 
231	int r;
232
233	info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
 
234	if (!info)
235		return -ENOMEM;
236
237	/* copy the handle array from userspace to a kernel buffer */
238	r = -EFAULT;
239	if (likely(info_size == in->bo_info_size)) {
240		unsigned long bytes = in->bo_number *
241			in->bo_info_size;
242
243		if (copy_from_user(info, uptr, bytes))
244			goto error_free;
245
246	} else {
247		unsigned long bytes = min(in->bo_info_size, info_size);
248		unsigned i;
249
250		memset(info, 0, in->bo_number * info_size);
251		for (i = 0; i < in->bo_number; ++i) {
252			if (copy_from_user(&info[i], uptr, bytes))
253				goto error_free;
254
255			uptr += in->bo_info_size;
256		}
257	}
258
259	*info_param = info;
260	return 0;
261
262error_free:
263	kvfree(info);
264	return r;
265}
266
267int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
268				struct drm_file *filp)
269{
270	struct amdgpu_device *adev = drm_to_adev(dev);
271	struct amdgpu_fpriv *fpriv = filp->driver_priv;
272	union drm_amdgpu_bo_list *args = data;
273	uint32_t handle = args->in.list_handle;
274	struct drm_amdgpu_bo_list_entry *info = NULL;
275	struct amdgpu_bo_list *list, *old;
276	int r;
277
278	r = amdgpu_bo_create_list_entry_array(&args->in, &info);
279	if (r)
280		return r;
281
282	switch (args->in.operation) {
283	case AMDGPU_BO_LIST_OP_CREATE:
284		r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
285					  &list);
286		if (r)
287			goto error_free;
288
289		mutex_lock(&fpriv->bo_list_lock);
290		r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
291		mutex_unlock(&fpriv->bo_list_lock);
292		if (r < 0) {
293			goto error_put_list;
294		}
295
296		handle = r;
297		break;
298
299	case AMDGPU_BO_LIST_OP_DESTROY:
300		amdgpu_bo_list_destroy(fpriv, handle);
301		handle = 0;
302		break;
303
304	case AMDGPU_BO_LIST_OP_UPDATE:
305		r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
306					  &list);
 
 
 
 
 
 
307		if (r)
308			goto error_free;
309
310		mutex_lock(&fpriv->bo_list_lock);
311		old = idr_replace(&fpriv->bo_list_handles, list, handle);
312		mutex_unlock(&fpriv->bo_list_lock);
313
314		if (IS_ERR(old)) {
315			r = PTR_ERR(old);
316			goto error_put_list;
317		}
318
319		amdgpu_bo_list_put(old);
320		break;
321
322	default:
323		r = -EINVAL;
324		goto error_free;
325	}
326
327	memset(args, 0, sizeof(*args));
328	args->out.list_handle = handle;
329	kvfree(info);
330
331	return 0;
332
333error_put_list:
334	amdgpu_bo_list_put(list);
335
336error_free:
337	kvfree(info);
338	return r;
339}