Linux Audio

Check our new training course

Loading...
v4.6
  1/**
  2 * \file drm_lock.c
  3 * IOCTLs for locking
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 36#include <linux/export.h>
 37#include <drm/drmP.h>
 38#include "drm_legacy.h"
 39#include "drm_internal.h"
 40
 41static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
 42
 43/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 44 * Lock ioctl.
 45 *
 46 * \param inode device inode.
 47 * \param file_priv DRM file private.
 48 * \param cmd command.
 49 * \param arg user argument, pointing to a drm_lock structure.
 50 * \return zero on success or negative number on failure.
 51 *
 52 * Add the current task to the lock wait queue, and attempt to take to lock.
 53 */
 54int drm_legacy_lock(struct drm_device *dev, void *data,
 55		    struct drm_file *file_priv)
 56{
 57	DECLARE_WAITQUEUE(entry, current);
 58	struct drm_lock *lock = data;
 59	struct drm_master *master = file_priv->master;
 60	int ret = 0;
 61
 62	if (drm_core_check_feature(dev, DRIVER_MODESET))
 63		return -EINVAL;
 64
 65	++file_priv->lock_count;
 66
 67	if (lock->context == DRM_KERNEL_CONTEXT) {
 68		DRM_ERROR("Process %d using kernel context %d\n",
 69			  task_pid_nr(current), lock->context);
 70		return -EINVAL;
 71	}
 72
 73	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
 74		  lock->context, task_pid_nr(current),
 75		  master->lock.hw_lock->lock, lock->flags);
 
 76
 77	add_wait_queue(&master->lock.lock_queue, &entry);
 78	spin_lock_bh(&master->lock.spinlock);
 79	master->lock.user_waiters++;
 80	spin_unlock_bh(&master->lock.spinlock);
 81
 82	for (;;) {
 83		__set_current_state(TASK_INTERRUPTIBLE);
 84		if (!master->lock.hw_lock) {
 85			/* Device has been unregistered */
 86			send_sig(SIGTERM, current, 0);
 87			ret = -EINTR;
 88			break;
 89		}
 90		if (drm_lock_take(&master->lock, lock->context)) {
 91			master->lock.file_priv = file_priv;
 92			master->lock.lock_time = jiffies;
 93			break;	/* Got lock */
 94		}
 95
 96		/* Contention */
 97		mutex_unlock(&drm_global_mutex);
 98		schedule();
 99		mutex_lock(&drm_global_mutex);
100		if (signal_pending(current)) {
101			ret = -EINTR;
102			break;
103		}
104	}
105	spin_lock_bh(&master->lock.spinlock);
106	master->lock.user_waiters--;
107	spin_unlock_bh(&master->lock.spinlock);
108	__set_current_state(TASK_RUNNING);
109	remove_wait_queue(&master->lock.lock_queue, &entry);
110
111	DRM_DEBUG("%d %s\n", lock->context,
112		  ret ? "interrupted" : "has lock");
113	if (ret) return ret;
114
115	/* don't set the block all signals on the master process for now 
116	 * really probably not the correct answer but lets us debug xkb
117 	 * xserver for now */
118	if (!file_priv->is_master) {
119		dev->sigdata.context = lock->context;
120		dev->sigdata.lock = master->lock.hw_lock;
121	}
122
123	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
124	{
125		if (dev->driver->dma_quiescent(dev)) {
126			DRM_DEBUG("%d waiting for DMA quiescent\n",
127				  lock->context);
128			return -EBUSY;
129		}
130	}
131
132	return 0;
133}
134
135/**
136 * Unlock ioctl.
137 *
138 * \param inode device inode.
139 * \param file_priv DRM file private.
140 * \param cmd command.
141 * \param arg user argument, pointing to a drm_lock structure.
142 * \return zero on success or negative number on failure.
143 *
144 * Transfer and free the lock.
145 */
146int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
147{
148	struct drm_lock *lock = data;
149	struct drm_master *master = file_priv->master;
150
151	if (drm_core_check_feature(dev, DRIVER_MODESET))
152		return -EINVAL;
153
154	if (lock->context == DRM_KERNEL_CONTEXT) {
155		DRM_ERROR("Process %d using kernel context %d\n",
156			  task_pid_nr(current), lock->context);
157		return -EINVAL;
158	}
159
160	if (drm_legacy_lock_free(&master->lock, lock->context)) {
161		/* FIXME: Should really bail out here. */
162	}
163
164	return 0;
165}
166
167/**
168 * Take the heavyweight lock.
169 *
170 * \param lock lock pointer.
171 * \param context locking context.
172 * \return one if the lock is held, or zero otherwise.
173 *
174 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
175 */
176static
177int drm_lock_take(struct drm_lock_data *lock_data,
178		  unsigned int context)
179{
180	unsigned int old, new, prev;
181	volatile unsigned int *lock = &lock_data->hw_lock->lock;
182
183	spin_lock_bh(&lock_data->spinlock);
184	do {
185		old = *lock;
186		if (old & _DRM_LOCK_HELD)
187			new = old | _DRM_LOCK_CONT;
188		else {
189			new = context | _DRM_LOCK_HELD |
190				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
191				 _DRM_LOCK_CONT : 0);
192		}
193		prev = cmpxchg(lock, old, new);
194	} while (prev != old);
195	spin_unlock_bh(&lock_data->spinlock);
196
197	if (_DRM_LOCKING_CONTEXT(old) == context) {
198		if (old & _DRM_LOCK_HELD) {
199			if (context != DRM_KERNEL_CONTEXT) {
200				DRM_ERROR("%d holds heavyweight lock\n",
201					  context);
202			}
203			return 0;
204		}
205	}
206
207	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
208		/* Have lock */
209		return 1;
210	}
211	return 0;
212}
213
214/**
215 * This takes a lock forcibly and hands it to context.	Should ONLY be used
216 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
217 *
218 * \param dev DRM device.
219 * \param lock lock pointer.
220 * \param context locking context.
221 * \return always one.
222 *
223 * Resets the lock file pointer.
224 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
225 */
226static int drm_lock_transfer(struct drm_lock_data *lock_data,
227			     unsigned int context)
228{
229	unsigned int old, new, prev;
230	volatile unsigned int *lock = &lock_data->hw_lock->lock;
231
232	lock_data->file_priv = NULL;
233	do {
234		old = *lock;
235		new = context | _DRM_LOCK_HELD;
236		prev = cmpxchg(lock, old, new);
237	} while (prev != old);
238	return 1;
239}
240
241/**
242 * Free lock.
243 *
244 * \param dev DRM device.
245 * \param lock lock.
246 * \param context context.
247 *
248 * Resets the lock file pointer.
249 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
250 * waiting on the lock queue.
251 */
252int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
253{
254	unsigned int old, new, prev;
255	volatile unsigned int *lock = &lock_data->hw_lock->lock;
256
257	spin_lock_bh(&lock_data->spinlock);
258	if (lock_data->kernel_waiters != 0) {
259		drm_lock_transfer(lock_data, 0);
260		lock_data->idle_has_lock = 1;
261		spin_unlock_bh(&lock_data->spinlock);
262		return 1;
263	}
264	spin_unlock_bh(&lock_data->spinlock);
265
266	do {
267		old = *lock;
268		new = _DRM_LOCKING_CONTEXT(old);
269		prev = cmpxchg(lock, old, new);
270	} while (prev != old);
271
272	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
273		DRM_ERROR("%d freed heavyweight lock held by %d\n",
274			  context, _DRM_LOCKING_CONTEXT(old));
275		return 1;
276	}
277	wake_up_interruptible(&lock_data->lock_queue);
278	return 0;
279}
280
281/**
282 * This function returns immediately and takes the hw lock
283 * with the kernel context if it is free, otherwise it gets the highest priority when and if
284 * it is eventually released.
285 *
286 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
287 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
288 * a deadlock, which is why the "idlelock" was invented).
289 *
290 * This should be sufficient to wait for GPU idle without
291 * having to worry about starvation.
292 */
293
294void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
295{
296	int ret;
297
298	spin_lock_bh(&lock_data->spinlock);
299	lock_data->kernel_waiters++;
300	if (!lock_data->idle_has_lock) {
301
302		spin_unlock_bh(&lock_data->spinlock);
303		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
304		spin_lock_bh(&lock_data->spinlock);
305
306		if (ret == 1)
307			lock_data->idle_has_lock = 1;
308	}
309	spin_unlock_bh(&lock_data->spinlock);
310}
311EXPORT_SYMBOL(drm_legacy_idlelock_take);
312
313void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
314{
315	unsigned int old, prev;
316	volatile unsigned int *lock = &lock_data->hw_lock->lock;
317
318	spin_lock_bh(&lock_data->spinlock);
319	if (--lock_data->kernel_waiters == 0) {
320		if (lock_data->idle_has_lock) {
321			do {
322				old = *lock;
323				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
324			} while (prev != old);
325			wake_up_interruptible(&lock_data->lock_queue);
326			lock_data->idle_has_lock = 0;
327		}
328	}
329	spin_unlock_bh(&lock_data->spinlock);
330}
331EXPORT_SYMBOL(drm_legacy_idlelock_release);
332
333int drm_legacy_i_have_hw_lock(struct drm_device *dev,
334			      struct drm_file *file_priv)
335{
336	struct drm_master *master = file_priv->master;
337	return (file_priv->lock_count && master->lock.hw_lock &&
338		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
339		master->lock.file_priv == file_priv);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340}
v4.10.11
  1/**
  2 * \file drm_lock.c
  3 * IOCTLs for locking
  4 *
  5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
  6 * \author Gareth Hughes <gareth@valinux.com>
  7 */
  8
  9/*
 10 * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
 11 *
 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 14 * All Rights Reserved.
 15 *
 16 * Permission is hereby granted, free of charge, to any person obtaining a
 17 * copy of this software and associated documentation files (the "Software"),
 18 * to deal in the Software without restriction, including without limitation
 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 20 * and/or sell copies of the Software, and to permit persons to whom the
 21 * Software is furnished to do so, subject to the following conditions:
 22 *
 23 * The above copyright notice and this permission notice (including the next
 24 * paragraph) shall be included in all copies or substantial portions of the
 25 * Software.
 26 *
 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 33 * OTHER DEALINGS IN THE SOFTWARE.
 34 */
 35
 36#include <linux/export.h>
 37#include <drm/drmP.h>
 38#include "drm_legacy.h"
 39#include "drm_internal.h"
 40
 41static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
 42
 43/**
 44 * Take the heavyweight lock.
 45 *
 46 * \param lock lock pointer.
 47 * \param context locking context.
 48 * \return one if the lock is held, or zero otherwise.
 49 *
 50 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
 51 */
 52static
 53int drm_lock_take(struct drm_lock_data *lock_data,
 54		  unsigned int context)
 55{
 56	unsigned int old, new, prev;
 57	volatile unsigned int *lock = &lock_data->hw_lock->lock;
 58
 59	spin_lock_bh(&lock_data->spinlock);
 60	do {
 61		old = *lock;
 62		if (old & _DRM_LOCK_HELD)
 63			new = old | _DRM_LOCK_CONT;
 64		else {
 65			new = context | _DRM_LOCK_HELD |
 66				((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
 67				 _DRM_LOCK_CONT : 0);
 68		}
 69		prev = cmpxchg(lock, old, new);
 70	} while (prev != old);
 71	spin_unlock_bh(&lock_data->spinlock);
 72
 73	if (_DRM_LOCKING_CONTEXT(old) == context) {
 74		if (old & _DRM_LOCK_HELD) {
 75			if (context != DRM_KERNEL_CONTEXT) {
 76				DRM_ERROR("%d holds heavyweight lock\n",
 77					  context);
 78			}
 79			return 0;
 80		}
 81	}
 82
 83	if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
 84		/* Have lock */
 85		return 1;
 86	}
 87	return 0;
 88}
 89
 90/**
 91 * This takes a lock forcibly and hands it to context.	Should ONLY be used
 92 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
 93 *
 94 * \param dev DRM device.
 95 * \param lock lock pointer.
 96 * \param context locking context.
 97 * \return always one.
 98 *
 99 * Resets the lock file pointer.
100 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
101 */
102static int drm_lock_transfer(struct drm_lock_data *lock_data,
103			     unsigned int context)
104{
105	unsigned int old, new, prev;
106	volatile unsigned int *lock = &lock_data->hw_lock->lock;
107
108	lock_data->file_priv = NULL;
109	do {
110		old = *lock;
111		new = context | _DRM_LOCK_HELD;
112		prev = cmpxchg(lock, old, new);
113	} while (prev != old);
114	return 1;
115}
116
117static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
118				unsigned int context)
119{
120	unsigned int old, new, prev;
121	volatile unsigned int *lock = &lock_data->hw_lock->lock;
122
123	spin_lock_bh(&lock_data->spinlock);
124	if (lock_data->kernel_waiters != 0) {
125		drm_lock_transfer(lock_data, 0);
126		lock_data->idle_has_lock = 1;
127		spin_unlock_bh(&lock_data->spinlock);
128		return 1;
129	}
130	spin_unlock_bh(&lock_data->spinlock);
131
132	do {
133		old = *lock;
134		new = _DRM_LOCKING_CONTEXT(old);
135		prev = cmpxchg(lock, old, new);
136	} while (prev != old);
137
138	if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
139		DRM_ERROR("%d freed heavyweight lock held by %d\n",
140			  context, _DRM_LOCKING_CONTEXT(old));
141		return 1;
142	}
143	wake_up_interruptible(&lock_data->lock_queue);
144	return 0;
145}
146
147/**
148 * Lock ioctl.
149 *
150 * \param inode device inode.
151 * \param file_priv DRM file private.
152 * \param cmd command.
153 * \param arg user argument, pointing to a drm_lock structure.
154 * \return zero on success or negative number on failure.
155 *
156 * Add the current task to the lock wait queue, and attempt to take to lock.
157 */
158int drm_legacy_lock(struct drm_device *dev, void *data,
159		    struct drm_file *file_priv)
160{
161	DECLARE_WAITQUEUE(entry, current);
162	struct drm_lock *lock = data;
163	struct drm_master *master = file_priv->master;
164	int ret = 0;
165
166	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
167		return -EINVAL;
168
169	++file_priv->lock_count;
170
171	if (lock->context == DRM_KERNEL_CONTEXT) {
172		DRM_ERROR("Process %d using kernel context %d\n",
173			  task_pid_nr(current), lock->context);
174		return -EINVAL;
175	}
176
177	DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
178		  lock->context, task_pid_nr(current),
179		  master->lock.hw_lock ? master->lock.hw_lock->lock : -1,
180		  lock->flags);
181
182	add_wait_queue(&master->lock.lock_queue, &entry);
183	spin_lock_bh(&master->lock.spinlock);
184	master->lock.user_waiters++;
185	spin_unlock_bh(&master->lock.spinlock);
186
187	for (;;) {
188		__set_current_state(TASK_INTERRUPTIBLE);
189		if (!master->lock.hw_lock) {
190			/* Device has been unregistered */
191			send_sig(SIGTERM, current, 0);
192			ret = -EINTR;
193			break;
194		}
195		if (drm_lock_take(&master->lock, lock->context)) {
196			master->lock.file_priv = file_priv;
197			master->lock.lock_time = jiffies;
198			break;	/* Got lock */
199		}
200
201		/* Contention */
202		mutex_unlock(&drm_global_mutex);
203		schedule();
204		mutex_lock(&drm_global_mutex);
205		if (signal_pending(current)) {
206			ret = -EINTR;
207			break;
208		}
209	}
210	spin_lock_bh(&master->lock.spinlock);
211	master->lock.user_waiters--;
212	spin_unlock_bh(&master->lock.spinlock);
213	__set_current_state(TASK_RUNNING);
214	remove_wait_queue(&master->lock.lock_queue, &entry);
215
216	DRM_DEBUG("%d %s\n", lock->context,
217		  ret ? "interrupted" : "has lock");
218	if (ret) return ret;
219
220	/* don't set the block all signals on the master process for now 
221	 * really probably not the correct answer but lets us debug xkb
222 	 * xserver for now */
223	if (!drm_is_current_master(file_priv)) {
224		dev->sigdata.context = lock->context;
225		dev->sigdata.lock = master->lock.hw_lock;
226	}
227
228	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
229	{
230		if (dev->driver->dma_quiescent(dev)) {
231			DRM_DEBUG("%d waiting for DMA quiescent\n",
232				  lock->context);
233			return -EBUSY;
234		}
235	}
236
237	return 0;
238}
239
240/**
241 * Unlock ioctl.
242 *
243 * \param inode device inode.
244 * \param file_priv DRM file private.
245 * \param cmd command.
246 * \param arg user argument, pointing to a drm_lock structure.
247 * \return zero on success or negative number on failure.
248 *
249 * Transfer and free the lock.
250 */
251int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
252{
253	struct drm_lock *lock = data;
254	struct drm_master *master = file_priv->master;
255
256	if (!drm_core_check_feature(dev, DRIVER_LEGACY))
257		return -EINVAL;
258
259	if (lock->context == DRM_KERNEL_CONTEXT) {
260		DRM_ERROR("Process %d using kernel context %d\n",
261			  task_pid_nr(current), lock->context);
262		return -EINVAL;
263	}
264
265	if (drm_legacy_lock_free(&master->lock, lock->context)) {
266		/* FIXME: Should really bail out here. */
267	}
268
269	return 0;
270}
271
272/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273 * This function returns immediately and takes the hw lock
274 * with the kernel context if it is free, otherwise it gets the highest priority when and if
275 * it is eventually released.
276 *
277 * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held
278 * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause
279 * a deadlock, which is why the "idlelock" was invented).
280 *
281 * This should be sufficient to wait for GPU idle without
282 * having to worry about starvation.
283 */
284
285void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
286{
287	int ret;
288
289	spin_lock_bh(&lock_data->spinlock);
290	lock_data->kernel_waiters++;
291	if (!lock_data->idle_has_lock) {
292
293		spin_unlock_bh(&lock_data->spinlock);
294		ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT);
295		spin_lock_bh(&lock_data->spinlock);
296
297		if (ret == 1)
298			lock_data->idle_has_lock = 1;
299	}
300	spin_unlock_bh(&lock_data->spinlock);
301}
302EXPORT_SYMBOL(drm_legacy_idlelock_take);
303
304void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
305{
306	unsigned int old, prev;
307	volatile unsigned int *lock = &lock_data->hw_lock->lock;
308
309	spin_lock_bh(&lock_data->spinlock);
310	if (--lock_data->kernel_waiters == 0) {
311		if (lock_data->idle_has_lock) {
312			do {
313				old = *lock;
314				prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT);
315			} while (prev != old);
316			wake_up_interruptible(&lock_data->lock_queue);
317			lock_data->idle_has_lock = 0;
318		}
319	}
320	spin_unlock_bh(&lock_data->spinlock);
321}
322EXPORT_SYMBOL(drm_legacy_idlelock_release);
323
324static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
325				     struct drm_file *file_priv)
326{
327	struct drm_master *master = file_priv->master;
328	return (file_priv->lock_count && master->lock.hw_lock &&
329		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
330		master->lock.file_priv == file_priv);
331}
332
333void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
334{
335	struct drm_file *file_priv = filp->private_data;
336
337	/* if the master has gone away we can't do anything with the lock */
338	if (!dev->master)
339		return;
340
341	if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
342		DRM_DEBUG("File %p released, freeing lock for context %d\n",
343			  filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
344		drm_legacy_lock_free(&file_priv->master->lock,
345				     _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
346	}
347}