Linux Audio

Check our new training course

Loading...
v5.9
  1/*
  2 * Legacy: Generic DRM Contexts
  3 *
  4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6 * All Rights Reserved.
  7 *
  8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
  9 * Author: Gareth Hughes <gareth@valinux.com>
 10 *
 11 * Permission is hereby granted, free of charge, to any person obtaining a
 12 * copy of this software and associated documentation files (the "Software"),
 13 * to deal in the Software without restriction, including without limitation
 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 15 * and/or sell copies of the Software, and to permit persons to whom the
 16 * Software is furnished to do so, subject to the following conditions:
 17 *
 18 * The above copyright notice and this permission notice (including the next
 19 * paragraph) shall be included in all copies or substantial portions of the
 20 * Software.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 28 * OTHER DEALINGS IN THE SOFTWARE.
 29 */
 30
 31#include <linux/slab.h>
 32#include <linux/uaccess.h>
 33
 34#include <drm/drm_drv.h>
 35#include <drm/drm_file.h>
 36#include <drm/drm_print.h>
 37
 38#include "drm_legacy.h"
 39
 40struct drm_ctx_list {
 41	struct list_head head;
 42	drm_context_t handle;
 43	struct drm_file *tag;
 44};
 45
 46/******************************************************************/
 47/** \name Context bitmap support */
 48/*@{*/
 49
 50/*
 51 * Free a handle from the context bitmap.
 52 *
 53 * \param dev DRM device.
 54 * \param ctx_handle context handle.
 55 *
 56 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
 57 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
 58 * lock.
 59 */
 60void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 61{
 62	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 63	    !drm_core_check_feature(dev, DRIVER_LEGACY))
 64		return;
 65
 66	mutex_lock(&dev->struct_mutex);
 67	idr_remove(&dev->ctx_idr, ctx_handle);
 68	mutex_unlock(&dev->struct_mutex);
 69}
 70
 71/*
 72 * Context bitmap allocation.
 73 *
 74 * \param dev DRM device.
 75 * \return (non-negative) context handle on success or a negative number on failure.
 76 *
 77 * Allocate a new idr from drm_device::ctx_idr while holding the
 78 * drm_device::struct_mutex lock.
 79 */
 80static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
 81{
 82	int ret;
 83
 84	mutex_lock(&dev->struct_mutex);
 85	ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
 86			GFP_KERNEL);
 87	mutex_unlock(&dev->struct_mutex);
 88	return ret;
 89}
 90
 91/*
 92 * Context bitmap initialization.
 93 *
 94 * \param dev DRM device.
 95 *
 96 * Initialise the drm_device::ctx_idr
 97 */
 98void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 99{
100	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
101	    !drm_core_check_feature(dev, DRIVER_LEGACY))
102		return;
103
104	idr_init(&dev->ctx_idr);
105}
106
107/*
108 * Context bitmap cleanup.
109 *
110 * \param dev DRM device.
111 *
112 * Free all idr members using drm_ctx_sarea_free helper function
113 * while holding the drm_device::struct_mutex lock.
114 */
115void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
116{
117	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
118	    !drm_core_check_feature(dev, DRIVER_LEGACY))
119		return;
120
121	mutex_lock(&dev->struct_mutex);
122	idr_destroy(&dev->ctx_idr);
123	mutex_unlock(&dev->struct_mutex);
124}
125
126/**
127 * drm_ctxbitmap_flush() - Flush all contexts owned by a file
128 * @dev: DRM device to operate on
129 * @file: Open file to flush contexts for
130 *
131 * This iterates over all contexts on @dev and drops them if they're owned by
132 * @file. Note that after this call returns, new contexts might be added if
133 * the file is still alive.
134 */
135void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
136{
137	struct drm_ctx_list *pos, *tmp;
138
139	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
140	    !drm_core_check_feature(dev, DRIVER_LEGACY))
141		return;
142
143	mutex_lock(&dev->ctxlist_mutex);
144
145	list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
146		if (pos->tag == file &&
147		    pos->handle != DRM_KERNEL_CONTEXT) {
148			if (dev->driver->context_dtor)
149				dev->driver->context_dtor(dev, pos->handle);
150
151			drm_legacy_ctxbitmap_free(dev, pos->handle);
152			list_del(&pos->head);
153			kfree(pos);
154		}
155	}
156
157	mutex_unlock(&dev->ctxlist_mutex);
158}
159
160/*@}*/
161
162/******************************************************************/
163/** \name Per Context SAREA Support */
164/*@{*/
165
166/*
167 * Get per-context SAREA.
168 *
169 * \param inode device inode.
170 * \param file_priv DRM file private.
171 * \param cmd command.
172 * \param arg user argument pointing to a drm_ctx_priv_map structure.
173 * \return zero on success or a negative number on failure.
174 *
175 * Gets the map from drm_device::ctx_idr with the handle specified and
176 * returns its handle.
177 */
178int drm_legacy_getsareactx(struct drm_device *dev, void *data,
179			   struct drm_file *file_priv)
180{
181	struct drm_ctx_priv_map *request = data;
182	struct drm_local_map *map;
183	struct drm_map_list *_entry;
184
185	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
186	    !drm_core_check_feature(dev, DRIVER_LEGACY))
187		return -EOPNOTSUPP;
188
189	mutex_lock(&dev->struct_mutex);
190
191	map = idr_find(&dev->ctx_idr, request->ctx_id);
192	if (!map) {
193		mutex_unlock(&dev->struct_mutex);
194		return -EINVAL;
195	}
196
197	request->handle = NULL;
198	list_for_each_entry(_entry, &dev->maplist, head) {
199		if (_entry->map == map) {
200			request->handle =
201			    (void *)(unsigned long)_entry->user_token;
202			break;
203		}
204	}
205
206	mutex_unlock(&dev->struct_mutex);
207
208	if (request->handle == NULL)
209		return -EINVAL;
210
211	return 0;
212}
213
214/*
215 * Set per-context SAREA.
216 *
217 * \param inode device inode.
218 * \param file_priv DRM file private.
219 * \param cmd command.
220 * \param arg user argument pointing to a drm_ctx_priv_map structure.
221 * \return zero on success or a negative number on failure.
222 *
223 * Searches the mapping specified in \p arg and update the entry in
224 * drm_device::ctx_idr with it.
225 */
226int drm_legacy_setsareactx(struct drm_device *dev, void *data,
227			   struct drm_file *file_priv)
228{
229	struct drm_ctx_priv_map *request = data;
230	struct drm_local_map *map = NULL;
231	struct drm_map_list *r_list = NULL;
232
233	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
234	    !drm_core_check_feature(dev, DRIVER_LEGACY))
235		return -EOPNOTSUPP;
236
237	mutex_lock(&dev->struct_mutex);
238	list_for_each_entry(r_list, &dev->maplist, head) {
239		if (r_list->map
240		    && r_list->user_token == (unsigned long) request->handle)
241			goto found;
242	}
243      bad:
244	mutex_unlock(&dev->struct_mutex);
245	return -EINVAL;
246
247      found:
248	map = r_list->map;
249	if (!map)
250		goto bad;
251
252	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
253		goto bad;
254
255	mutex_unlock(&dev->struct_mutex);
256
257	return 0;
258}
259
260/*@}*/
261
262/******************************************************************/
263/** \name The actual DRM context handling routines */
264/*@{*/
265
266/*
267 * Switch context.
268 *
269 * \param dev DRM device.
270 * \param old old context handle.
271 * \param new new context handle.
272 * \return zero on success or a negative number on failure.
273 *
274 * Attempt to set drm_device::context_flag.
275 */
276static int drm_context_switch(struct drm_device * dev, int old, int new)
277{
278	if (test_and_set_bit(0, &dev->context_flag)) {
279		DRM_ERROR("Reentering -- FIXME\n");
280		return -EBUSY;
281	}
282
283	DRM_DEBUG("Context switch from %d to %d\n", old, new);
284
285	if (new == dev->last_context) {
286		clear_bit(0, &dev->context_flag);
287		return 0;
288	}
289
290	return 0;
291}
292
293/*
294 * Complete context switch.
295 *
296 * \param dev DRM device.
297 * \param new new context handle.
298 * \return zero on success or a negative number on failure.
299 *
300 * Updates drm_device::last_context and drm_device::last_switch. Verifies the
301 * hardware lock is held, clears the drm_device::context_flag and wakes up
302 * drm_device::context_wait.
303 */
304static int drm_context_switch_complete(struct drm_device *dev,
305				       struct drm_file *file_priv, int new)
306{
307	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
308
309	if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
310		DRM_ERROR("Lock isn't held after context switch\n");
311	}
312
313	/* If a context switch is ever initiated
314	   when the kernel holds the lock, release
315	   that lock here. */
316	clear_bit(0, &dev->context_flag);
317
318	return 0;
319}
320
321/*
322 * Reserve contexts.
323 *
324 * \param inode device inode.
325 * \param file_priv DRM file private.
326 * \param cmd command.
327 * \param arg user argument pointing to a drm_ctx_res structure.
328 * \return zero on success or a negative number on failure.
329 */
330int drm_legacy_resctx(struct drm_device *dev, void *data,
331		      struct drm_file *file_priv)
332{
333	struct drm_ctx_res *res = data;
334	struct drm_ctx ctx;
335	int i;
336
337	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
338	    !drm_core_check_feature(dev, DRIVER_LEGACY))
339		return -EOPNOTSUPP;
340
341	if (res->count >= DRM_RESERVED_CONTEXTS) {
342		memset(&ctx, 0, sizeof(ctx));
343		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
344			ctx.handle = i;
345			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
346				return -EFAULT;
347		}
348	}
349	res->count = DRM_RESERVED_CONTEXTS;
350
351	return 0;
352}
353
354/*
355 * Add context.
356 *
357 * \param inode device inode.
358 * \param file_priv DRM file private.
359 * \param cmd command.
360 * \param arg user argument pointing to a drm_ctx structure.
361 * \return zero on success or a negative number on failure.
362 *
363 * Get a new handle for the context and copy to userspace.
364 */
365int drm_legacy_addctx(struct drm_device *dev, void *data,
366		      struct drm_file *file_priv)
367{
368	struct drm_ctx_list *ctx_entry;
369	struct drm_ctx *ctx = data;
370	int tmp_handle;
371
372	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
373	    !drm_core_check_feature(dev, DRIVER_LEGACY))
374		return -EOPNOTSUPP;
375
376	tmp_handle = drm_legacy_ctxbitmap_next(dev);
377	if (tmp_handle == DRM_KERNEL_CONTEXT) {
378		/* Skip kernel's context and get a new one. */
379		tmp_handle = drm_legacy_ctxbitmap_next(dev);
380	}
381	DRM_DEBUG("%d\n", tmp_handle);
382	if (tmp_handle < 0) {
383		DRM_DEBUG("Not enough free contexts.\n");
384		/* Should this return -EBUSY instead? */
385		return tmp_handle;
386	}
387
388	ctx->handle = tmp_handle;
389
390	ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
391	if (!ctx_entry) {
392		DRM_DEBUG("out of memory\n");
393		return -ENOMEM;
394	}
395
396	INIT_LIST_HEAD(&ctx_entry->head);
397	ctx_entry->handle = ctx->handle;
398	ctx_entry->tag = file_priv;
399
400	mutex_lock(&dev->ctxlist_mutex);
401	list_add(&ctx_entry->head, &dev->ctxlist);
402	mutex_unlock(&dev->ctxlist_mutex);
403
404	return 0;
405}
406
407/*
408 * Get context.
409 *
410 * \param inode device inode.
411 * \param file_priv DRM file private.
412 * \param cmd command.
413 * \param arg user argument pointing to a drm_ctx structure.
414 * \return zero on success or a negative number on failure.
415 */
416int drm_legacy_getctx(struct drm_device *dev, void *data,
417		      struct drm_file *file_priv)
418{
419	struct drm_ctx *ctx = data;
420
421	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
422	    !drm_core_check_feature(dev, DRIVER_LEGACY))
423		return -EOPNOTSUPP;
424
425	/* This is 0, because we don't handle any context flags */
426	ctx->flags = 0;
427
428	return 0;
429}
430
431/*
432 * Switch context.
433 *
434 * \param inode device inode.
435 * \param file_priv DRM file private.
436 * \param cmd command.
437 * \param arg user argument pointing to a drm_ctx structure.
438 * \return zero on success or a negative number on failure.
439 *
440 * Calls context_switch().
441 */
442int drm_legacy_switchctx(struct drm_device *dev, void *data,
443			 struct drm_file *file_priv)
444{
445	struct drm_ctx *ctx = data;
446
447	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
448	    !drm_core_check_feature(dev, DRIVER_LEGACY))
449		return -EOPNOTSUPP;
450
451	DRM_DEBUG("%d\n", ctx->handle);
452	return drm_context_switch(dev, dev->last_context, ctx->handle);
453}
454
455/*
456 * New context.
457 *
458 * \param inode device inode.
459 * \param file_priv DRM file private.
460 * \param cmd command.
461 * \param arg user argument pointing to a drm_ctx structure.
462 * \return zero on success or a negative number on failure.
463 *
464 * Calls context_switch_complete().
465 */
466int drm_legacy_newctx(struct drm_device *dev, void *data,
467		      struct drm_file *file_priv)
468{
469	struct drm_ctx *ctx = data;
470
471	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
472	    !drm_core_check_feature(dev, DRIVER_LEGACY))
473		return -EOPNOTSUPP;
474
475	DRM_DEBUG("%d\n", ctx->handle);
476	drm_context_switch_complete(dev, file_priv, ctx->handle);
477
478	return 0;
479}
480
481/*
482 * Remove context.
483 *
484 * \param inode device inode.
485 * \param file_priv DRM file private.
486 * \param cmd command.
487 * \param arg user argument pointing to a drm_ctx structure.
488 * \return zero on success or a negative number on failure.
489 *
490 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
491 */
492int drm_legacy_rmctx(struct drm_device *dev, void *data,
493		     struct drm_file *file_priv)
494{
495	struct drm_ctx *ctx = data;
496
497	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
498	    !drm_core_check_feature(dev, DRIVER_LEGACY))
499		return -EOPNOTSUPP;
500
501	DRM_DEBUG("%d\n", ctx->handle);
502	if (ctx->handle != DRM_KERNEL_CONTEXT) {
503		if (dev->driver->context_dtor)
504			dev->driver->context_dtor(dev, ctx->handle);
505		drm_legacy_ctxbitmap_free(dev, ctx->handle);
506	}
507
508	mutex_lock(&dev->ctxlist_mutex);
509	if (!list_empty(&dev->ctxlist)) {
510		struct drm_ctx_list *pos, *n;
511
512		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
513			if (pos->handle == ctx->handle) {
514				list_del(&pos->head);
515				kfree(pos);
516			}
517		}
518	}
519	mutex_unlock(&dev->ctxlist_mutex);
520
521	return 0;
522}
523
524/*@}*/
v4.6
  1/*
  2 * Legacy: Generic DRM Contexts
  3 *
  4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
  5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  6 * All Rights Reserved.
  7 *
  8 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
  9 * Author: Gareth Hughes <gareth@valinux.com>
 10 *
 11 * Permission is hereby granted, free of charge, to any person obtaining a
 12 * copy of this software and associated documentation files (the "Software"),
 13 * to deal in the Software without restriction, including without limitation
 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 15 * and/or sell copies of the Software, and to permit persons to whom the
 16 * Software is furnished to do so, subject to the following conditions:
 17 *
 18 * The above copyright notice and this permission notice (including the next
 19 * paragraph) shall be included in all copies or substantial portions of the
 20 * Software.
 21 *
 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 28 * OTHER DEALINGS IN THE SOFTWARE.
 29 */
 30
 31#include <drm/drmP.h>
 
 
 
 
 
 
 32#include "drm_legacy.h"
 33
 34struct drm_ctx_list {
 35	struct list_head head;
 36	drm_context_t handle;
 37	struct drm_file *tag;
 38};
 39
 40/******************************************************************/
 41/** \name Context bitmap support */
 42/*@{*/
 43
 44/**
 45 * Free a handle from the context bitmap.
 46 *
 47 * \param dev DRM device.
 48 * \param ctx_handle context handle.
 49 *
 50 * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
 51 * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
 52 * lock.
 53 */
 54void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
 55{
 56	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 57	    drm_core_check_feature(dev, DRIVER_MODESET))
 58		return;
 59
 60	mutex_lock(&dev->struct_mutex);
 61	idr_remove(&dev->ctx_idr, ctx_handle);
 62	mutex_unlock(&dev->struct_mutex);
 63}
 64
 65/**
 66 * Context bitmap allocation.
 67 *
 68 * \param dev DRM device.
 69 * \return (non-negative) context handle on success or a negative number on failure.
 70 *
 71 * Allocate a new idr from drm_device::ctx_idr while holding the
 72 * drm_device::struct_mutex lock.
 73 */
 74static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
 75{
 76	int ret;
 77
 78	mutex_lock(&dev->struct_mutex);
 79	ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0,
 80			GFP_KERNEL);
 81	mutex_unlock(&dev->struct_mutex);
 82	return ret;
 83}
 84
 85/**
 86 * Context bitmap initialization.
 87 *
 88 * \param dev DRM device.
 89 *
 90 * Initialise the drm_device::ctx_idr
 91 */
 92void drm_legacy_ctxbitmap_init(struct drm_device * dev)
 93{
 94	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
 95	    drm_core_check_feature(dev, DRIVER_MODESET))
 96		return;
 97
 98	idr_init(&dev->ctx_idr);
 99}
100
101/**
102 * Context bitmap cleanup.
103 *
104 * \param dev DRM device.
105 *
106 * Free all idr members using drm_ctx_sarea_free helper function
107 * while holding the drm_device::struct_mutex lock.
108 */
109void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
110{
111	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
112	    drm_core_check_feature(dev, DRIVER_MODESET))
113		return;
114
115	mutex_lock(&dev->struct_mutex);
116	idr_destroy(&dev->ctx_idr);
117	mutex_unlock(&dev->struct_mutex);
118}
119
120/**
121 * drm_ctxbitmap_flush() - Flush all contexts owned by a file
122 * @dev: DRM device to operate on
123 * @file: Open file to flush contexts for
124 *
125 * This iterates over all contexts on @dev and drops them if they're owned by
126 * @file. Note that after this call returns, new contexts might be added if
127 * the file is still alive.
128 */
129void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
130{
131	struct drm_ctx_list *pos, *tmp;
132
133	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
134	    drm_core_check_feature(dev, DRIVER_MODESET))
135		return;
136
137	mutex_lock(&dev->ctxlist_mutex);
138
139	list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) {
140		if (pos->tag == file &&
141		    pos->handle != DRM_KERNEL_CONTEXT) {
142			if (dev->driver->context_dtor)
143				dev->driver->context_dtor(dev, pos->handle);
144
145			drm_legacy_ctxbitmap_free(dev, pos->handle);
146			list_del(&pos->head);
147			kfree(pos);
148		}
149	}
150
151	mutex_unlock(&dev->ctxlist_mutex);
152}
153
154/*@}*/
155
156/******************************************************************/
157/** \name Per Context SAREA Support */
158/*@{*/
159
160/**
161 * Get per-context SAREA.
162 *
163 * \param inode device inode.
164 * \param file_priv DRM file private.
165 * \param cmd command.
166 * \param arg user argument pointing to a drm_ctx_priv_map structure.
167 * \return zero on success or a negative number on failure.
168 *
169 * Gets the map from drm_device::ctx_idr with the handle specified and
170 * returns its handle.
171 */
172int drm_legacy_getsareactx(struct drm_device *dev, void *data,
173			   struct drm_file *file_priv)
174{
175	struct drm_ctx_priv_map *request = data;
176	struct drm_local_map *map;
177	struct drm_map_list *_entry;
178
179	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
180	    drm_core_check_feature(dev, DRIVER_MODESET))
181		return -EINVAL;
182
183	mutex_lock(&dev->struct_mutex);
184
185	map = idr_find(&dev->ctx_idr, request->ctx_id);
186	if (!map) {
187		mutex_unlock(&dev->struct_mutex);
188		return -EINVAL;
189	}
190
191	request->handle = NULL;
192	list_for_each_entry(_entry, &dev->maplist, head) {
193		if (_entry->map == map) {
194			request->handle =
195			    (void *)(unsigned long)_entry->user_token;
196			break;
197		}
198	}
199
200	mutex_unlock(&dev->struct_mutex);
201
202	if (request->handle == NULL)
203		return -EINVAL;
204
205	return 0;
206}
207
208/**
209 * Set per-context SAREA.
210 *
211 * \param inode device inode.
212 * \param file_priv DRM file private.
213 * \param cmd command.
214 * \param arg user argument pointing to a drm_ctx_priv_map structure.
215 * \return zero on success or a negative number on failure.
216 *
217 * Searches the mapping specified in \p arg and update the entry in
218 * drm_device::ctx_idr with it.
219 */
220int drm_legacy_setsareactx(struct drm_device *dev, void *data,
221			   struct drm_file *file_priv)
222{
223	struct drm_ctx_priv_map *request = data;
224	struct drm_local_map *map = NULL;
225	struct drm_map_list *r_list = NULL;
226
227	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
228	    drm_core_check_feature(dev, DRIVER_MODESET))
229		return -EINVAL;
230
231	mutex_lock(&dev->struct_mutex);
232	list_for_each_entry(r_list, &dev->maplist, head) {
233		if (r_list->map
234		    && r_list->user_token == (unsigned long) request->handle)
235			goto found;
236	}
237      bad:
238	mutex_unlock(&dev->struct_mutex);
239	return -EINVAL;
240
241      found:
242	map = r_list->map;
243	if (!map)
244		goto bad;
245
246	if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
247		goto bad;
248
249	mutex_unlock(&dev->struct_mutex);
250
251	return 0;
252}
253
254/*@}*/
255
256/******************************************************************/
257/** \name The actual DRM context handling routines */
258/*@{*/
259
260/**
261 * Switch context.
262 *
263 * \param dev DRM device.
264 * \param old old context handle.
265 * \param new new context handle.
266 * \return zero on success or a negative number on failure.
267 *
268 * Attempt to set drm_device::context_flag.
269 */
270static int drm_context_switch(struct drm_device * dev, int old, int new)
271{
272	if (test_and_set_bit(0, &dev->context_flag)) {
273		DRM_ERROR("Reentering -- FIXME\n");
274		return -EBUSY;
275	}
276
277	DRM_DEBUG("Context switch from %d to %d\n", old, new);
278
279	if (new == dev->last_context) {
280		clear_bit(0, &dev->context_flag);
281		return 0;
282	}
283
284	return 0;
285}
286
287/**
288 * Complete context switch.
289 *
290 * \param dev DRM device.
291 * \param new new context handle.
292 * \return zero on success or a negative number on failure.
293 *
294 * Updates drm_device::last_context and drm_device::last_switch. Verifies the
295 * hardware lock is held, clears the drm_device::context_flag and wakes up
296 * drm_device::context_wait.
297 */
298static int drm_context_switch_complete(struct drm_device *dev,
299				       struct drm_file *file_priv, int new)
300{
301	dev->last_context = new;	/* PRE/POST: This is the _only_ writer. */
302
303	if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
304		DRM_ERROR("Lock isn't held after context switch\n");
305	}
306
307	/* If a context switch is ever initiated
308	   when the kernel holds the lock, release
309	   that lock here. */
310	clear_bit(0, &dev->context_flag);
311
312	return 0;
313}
314
315/**
316 * Reserve contexts.
317 *
318 * \param inode device inode.
319 * \param file_priv DRM file private.
320 * \param cmd command.
321 * \param arg user argument pointing to a drm_ctx_res structure.
322 * \return zero on success or a negative number on failure.
323 */
324int drm_legacy_resctx(struct drm_device *dev, void *data,
325		      struct drm_file *file_priv)
326{
327	struct drm_ctx_res *res = data;
328	struct drm_ctx ctx;
329	int i;
330
331	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
332	    drm_core_check_feature(dev, DRIVER_MODESET))
333		return -EINVAL;
334
335	if (res->count >= DRM_RESERVED_CONTEXTS) {
336		memset(&ctx, 0, sizeof(ctx));
337		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
338			ctx.handle = i;
339			if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
340				return -EFAULT;
341		}
342	}
343	res->count = DRM_RESERVED_CONTEXTS;
344
345	return 0;
346}
347
348/**
349 * Add context.
350 *
351 * \param inode device inode.
352 * \param file_priv DRM file private.
353 * \param cmd command.
354 * \param arg user argument pointing to a drm_ctx structure.
355 * \return zero on success or a negative number on failure.
356 *
357 * Get a new handle for the context and copy to userspace.
358 */
359int drm_legacy_addctx(struct drm_device *dev, void *data,
360		      struct drm_file *file_priv)
361{
362	struct drm_ctx_list *ctx_entry;
363	struct drm_ctx *ctx = data;
 
364
365	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
366	    drm_core_check_feature(dev, DRIVER_MODESET))
367		return -EINVAL;
368
369	ctx->handle = drm_legacy_ctxbitmap_next(dev);
370	if (ctx->handle == DRM_KERNEL_CONTEXT) {
371		/* Skip kernel's context and get a new one. */
372		ctx->handle = drm_legacy_ctxbitmap_next(dev);
373	}
374	DRM_DEBUG("%d\n", ctx->handle);
375	if (ctx->handle == -1) {
376		DRM_DEBUG("Not enough free contexts.\n");
377		/* Should this return -EBUSY instead? */
378		return -ENOMEM;
379	}
380
 
 
381	ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
382	if (!ctx_entry) {
383		DRM_DEBUG("out of memory\n");
384		return -ENOMEM;
385	}
386
387	INIT_LIST_HEAD(&ctx_entry->head);
388	ctx_entry->handle = ctx->handle;
389	ctx_entry->tag = file_priv;
390
391	mutex_lock(&dev->ctxlist_mutex);
392	list_add(&ctx_entry->head, &dev->ctxlist);
393	mutex_unlock(&dev->ctxlist_mutex);
394
395	return 0;
396}
397
398/**
399 * Get context.
400 *
401 * \param inode device inode.
402 * \param file_priv DRM file private.
403 * \param cmd command.
404 * \param arg user argument pointing to a drm_ctx structure.
405 * \return zero on success or a negative number on failure.
406 */
407int drm_legacy_getctx(struct drm_device *dev, void *data,
408		      struct drm_file *file_priv)
409{
410	struct drm_ctx *ctx = data;
411
412	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
413	    drm_core_check_feature(dev, DRIVER_MODESET))
414		return -EINVAL;
415
416	/* This is 0, because we don't handle any context flags */
417	ctx->flags = 0;
418
419	return 0;
420}
421
422/**
423 * Switch context.
424 *
425 * \param inode device inode.
426 * \param file_priv DRM file private.
427 * \param cmd command.
428 * \param arg user argument pointing to a drm_ctx structure.
429 * \return zero on success or a negative number on failure.
430 *
431 * Calls context_switch().
432 */
433int drm_legacy_switchctx(struct drm_device *dev, void *data,
434			 struct drm_file *file_priv)
435{
436	struct drm_ctx *ctx = data;
437
438	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
439	    drm_core_check_feature(dev, DRIVER_MODESET))
440		return -EINVAL;
441
442	DRM_DEBUG("%d\n", ctx->handle);
443	return drm_context_switch(dev, dev->last_context, ctx->handle);
444}
445
446/**
447 * New context.
448 *
449 * \param inode device inode.
450 * \param file_priv DRM file private.
451 * \param cmd command.
452 * \param arg user argument pointing to a drm_ctx structure.
453 * \return zero on success or a negative number on failure.
454 *
455 * Calls context_switch_complete().
456 */
457int drm_legacy_newctx(struct drm_device *dev, void *data,
458		      struct drm_file *file_priv)
459{
460	struct drm_ctx *ctx = data;
461
462	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
463	    drm_core_check_feature(dev, DRIVER_MODESET))
464		return -EINVAL;
465
466	DRM_DEBUG("%d\n", ctx->handle);
467	drm_context_switch_complete(dev, file_priv, ctx->handle);
468
469	return 0;
470}
471
472/**
473 * Remove context.
474 *
475 * \param inode device inode.
476 * \param file_priv DRM file private.
477 * \param cmd command.
478 * \param arg user argument pointing to a drm_ctx structure.
479 * \return zero on success or a negative number on failure.
480 *
481 * If not the special kernel context, calls ctxbitmap_free() to free the specified context.
482 */
483int drm_legacy_rmctx(struct drm_device *dev, void *data,
484		     struct drm_file *file_priv)
485{
486	struct drm_ctx *ctx = data;
487
488	if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
489	    drm_core_check_feature(dev, DRIVER_MODESET))
490		return -EINVAL;
491
492	DRM_DEBUG("%d\n", ctx->handle);
493	if (ctx->handle != DRM_KERNEL_CONTEXT) {
494		if (dev->driver->context_dtor)
495			dev->driver->context_dtor(dev, ctx->handle);
496		drm_legacy_ctxbitmap_free(dev, ctx->handle);
497	}
498
499	mutex_lock(&dev->ctxlist_mutex);
500	if (!list_empty(&dev->ctxlist)) {
501		struct drm_ctx_list *pos, *n;
502
503		list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
504			if (pos->handle == ctx->handle) {
505				list_del(&pos->head);
506				kfree(pos);
507			}
508		}
509	}
510	mutex_unlock(&dev->ctxlist_mutex);
511
512	return 0;
513}
514
515/*@}*/