Linux Audio

Check our new training course

Loading...
v3.1
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "drmP.h"
 29#include "vmwgfx_drv.h"
 30
 31#define VMW_FENCE_WRAP (1 << 24)
 32
 33irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
 34{
 35	struct drm_device *dev = (struct drm_device *)arg;
 36	struct vmw_private *dev_priv = vmw_priv(dev);
 37	uint32_t status;
 38
 39	spin_lock(&dev_priv->irq_lock);
 40	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
 41	spin_unlock(&dev_priv->irq_lock);
 42
 43	if (status & SVGA_IRQFLAG_ANY_FENCE)
 
 
 
 
 
 
 
 
 44		wake_up_all(&dev_priv->fence_queue);
 45	if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
 
 
 46		wake_up_all(&dev_priv->fifo_queue);
 47
 48	if (likely(status)) {
 49		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 50		return IRQ_HANDLED;
 51	}
 52
 53	return IRQ_NONE;
 54}
 55
 56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
 57{
 58	uint32_t busy;
 59
 60	mutex_lock(&dev_priv->hw_mutex);
 61	busy = vmw_read(dev_priv, SVGA_REG_BUSY);
 62	mutex_unlock(&dev_priv->hw_mutex);
 63
 64	return (busy == 0);
 65}
 66
 67void vmw_update_sequence(struct vmw_private *dev_priv,
 68			 struct vmw_fifo_state *fifo_state)
 69{
 70	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 
 71
 72	uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 73
 74	if (dev_priv->last_read_sequence != sequence) {
 75		dev_priv->last_read_sequence = sequence;
 76		vmw_fence_pull(&fifo_state->fence_queue, sequence);
 77	}
 78}
 79
 80bool vmw_fence_signaled(struct vmw_private *dev_priv,
 81			uint32_t sequence)
 82{
 83	struct vmw_fifo_state *fifo_state;
 84	bool ret;
 85
 86	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
 87		return true;
 88
 89	fifo_state = &dev_priv->fifo;
 90	vmw_update_sequence(dev_priv, fifo_state);
 91	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
 92		return true;
 93
 94	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
 95	    vmw_fifo_idle(dev_priv, sequence))
 96		return true;
 97
 98	/**
 99	 * Then check if the sequence is higher than what we've actually
100	 * emitted. Then the fence is stale and signaled.
101	 */
102
103	ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
104	       > VMW_FENCE_WRAP);
105
106	return ret;
107}
108
109int vmw_fallback_wait(struct vmw_private *dev_priv,
110		      bool lazy,
111		      bool fifo_idle,
112		      uint32_t sequence,
113		      bool interruptible,
114		      unsigned long timeout)
115{
116	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
117
118	uint32_t count = 0;
119	uint32_t signal_seq;
120	int ret;
121	unsigned long end_jiffies = jiffies + timeout;
122	bool (*wait_condition)(struct vmw_private *, uint32_t);
123	DEFINE_WAIT(__wait);
124
125	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
126		&vmw_fence_signaled;
127
128	/**
129	 * Block command submission while waiting for idle.
130	 */
131
132	if (fifo_idle)
133		down_read(&fifo_state->rwsem);
134	signal_seq = atomic_read(&dev_priv->fence_seq);
135	ret = 0;
136
137	for (;;) {
138		prepare_to_wait(&dev_priv->fence_queue, &__wait,
139				(interruptible) ?
140				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
141		if (wait_condition(dev_priv, sequence))
142			break;
143		if (time_after_eq(jiffies, end_jiffies)) {
144			DRM_ERROR("SVGA device lockup.\n");
145			break;
146		}
147		if (lazy)
148			schedule_timeout(1);
149		else if ((++count & 0x0F) == 0) {
150			/**
151			 * FIXME: Use schedule_hr_timeout here for
152			 * newer kernels and lower CPU utilization.
153			 */
154
155			__set_current_state(TASK_RUNNING);
156			schedule();
157			__set_current_state((interruptible) ?
158					    TASK_INTERRUPTIBLE :
159					    TASK_UNINTERRUPTIBLE);
160		}
161		if (interruptible && signal_pending(current)) {
162			ret = -ERESTARTSYS;
163			break;
164		}
165	}
166	finish_wait(&dev_priv->fence_queue, &__wait);
167	if (ret == 0 && fifo_idle) {
168		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
169		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
170	}
171	wake_up_all(&dev_priv->fence_queue);
172	if (fifo_idle)
173		up_read(&fifo_state->rwsem);
174
175	return ret;
176}
177
178int vmw_wait_fence(struct vmw_private *dev_priv,
179		   bool lazy, uint32_t sequence,
180		   bool interruptible, unsigned long timeout)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181{
182	long ret;
183	unsigned long irq_flags;
184	struct vmw_fifo_state *fifo = &dev_priv->fifo;
185
186	if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
187		return 0;
188
189	if (likely(vmw_fence_signaled(dev_priv, sequence)))
190		return 0;
191
192	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
193
194	if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
195		return vmw_fallback_wait(dev_priv, lazy, true, sequence,
196					 interruptible, timeout);
197
198	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
199		return vmw_fallback_wait(dev_priv, lazy, false, sequence,
200					 interruptible, timeout);
201
202	mutex_lock(&dev_priv->hw_mutex);
203	if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
204		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
205		outl(SVGA_IRQFLAG_ANY_FENCE,
206		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
207		vmw_write(dev_priv, SVGA_REG_IRQMASK,
208			  vmw_read(dev_priv, SVGA_REG_IRQMASK) |
209			  SVGA_IRQFLAG_ANY_FENCE);
210		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
211	}
212	mutex_unlock(&dev_priv->hw_mutex);
213
214	if (interruptible)
215		ret = wait_event_interruptible_timeout
216		    (dev_priv->fence_queue,
217		     vmw_fence_signaled(dev_priv, sequence),
218		     timeout);
219	else
220		ret = wait_event_timeout
221		    (dev_priv->fence_queue,
222		     vmw_fence_signaled(dev_priv, sequence),
223		     timeout);
224
 
 
225	if (unlikely(ret == 0))
226		ret = -EBUSY;
227	else if (likely(ret > 0))
228		ret = 0;
229
230	mutex_lock(&dev_priv->hw_mutex);
231	if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
232		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
233		vmw_write(dev_priv, SVGA_REG_IRQMASK,
234			  vmw_read(dev_priv, SVGA_REG_IRQMASK) &
235			  ~SVGA_IRQFLAG_ANY_FENCE);
236		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
237	}
238	mutex_unlock(&dev_priv->hw_mutex);
239
240	return ret;
241}
242
243void vmw_irq_preinstall(struct drm_device *dev)
244{
245	struct vmw_private *dev_priv = vmw_priv(dev);
246	uint32_t status;
247
248	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
249		return;
250
251	spin_lock_init(&dev_priv->irq_lock);
252	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
253	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
254}
255
256int vmw_irq_postinstall(struct drm_device *dev)
257{
258	return 0;
259}
260
261void vmw_irq_uninstall(struct drm_device *dev)
262{
263	struct vmw_private *dev_priv = vmw_priv(dev);
264	uint32_t status;
265
266	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
267		return;
268
269	mutex_lock(&dev_priv->hw_mutex);
270	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
271	mutex_unlock(&dev_priv->hw_mutex);
272
273	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
274	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
275}
276
277#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
278
279int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
280			 struct drm_file *file_priv)
281{
282	struct drm_vmw_fence_wait_arg *arg =
283	    (struct drm_vmw_fence_wait_arg *)data;
284	unsigned long timeout;
285
286	if (!arg->cookie_valid) {
287		arg->cookie_valid = 1;
288		arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
289	}
290
291	timeout = jiffies;
292	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
293		return -EBUSY;
294
295	timeout = (unsigned long)arg->kernel_cookie - timeout;
296	return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
297}
v3.5.6
  1/**************************************************************************
  2 *
  3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4 * All Rights Reserved.
  5 *
  6 * Permission is hereby granted, free of charge, to any person obtaining a
  7 * copy of this software and associated documentation files (the
  8 * "Software"), to deal in the Software without restriction, including
  9 * without limitation the rights to use, copy, modify, merge, publish,
 10 * distribute, sub license, and/or sell copies of the Software, and to
 11 * permit persons to whom the Software is furnished to do so, subject to
 12 * the following conditions:
 13 *
 14 * The above copyright notice and this permission notice (including the
 15 * next paragraph) shall be included in all copies or substantial portions
 16 * of the Software.
 17 *
 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 25 *
 26 **************************************************************************/
 27
 28#include "drmP.h"
 29#include "vmwgfx_drv.h"
 30
 31#define VMW_FENCE_WRAP (1 << 24)
 32
 33irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
 34{
 35	struct drm_device *dev = (struct drm_device *)arg;
 36	struct vmw_private *dev_priv = vmw_priv(dev);
 37	uint32_t status, masked_status;
 38
 39	spin_lock(&dev_priv->irq_lock);
 40	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 41	masked_status = status & dev_priv->irq_mask;
 42	spin_unlock(&dev_priv->irq_lock);
 43
 44	if (likely(status))
 45		outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 46
 47	if (!masked_status)
 48		return IRQ_NONE;
 49
 50	if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
 51			     SVGA_IRQFLAG_FENCE_GOAL)) {
 52		vmw_fences_update(dev_priv->fman);
 53		wake_up_all(&dev_priv->fence_queue);
 54	}
 55
 56	if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
 57		wake_up_all(&dev_priv->fifo_queue);
 58
 
 
 
 
 59
 60	return IRQ_HANDLED;
 61}
 62
 63static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 64{
 65	uint32_t busy;
 66
 67	mutex_lock(&dev_priv->hw_mutex);
 68	busy = vmw_read(dev_priv, SVGA_REG_BUSY);
 69	mutex_unlock(&dev_priv->hw_mutex);
 70
 71	return (busy == 0);
 72}
 73
 74void vmw_update_seqno(struct vmw_private *dev_priv,
 75			 struct vmw_fifo_state *fifo_state)
 76{
 77	__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
 78	uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
 79
 80	if (dev_priv->last_read_seqno != seqno) {
 81		dev_priv->last_read_seqno = seqno;
 82		vmw_marker_pull(&fifo_state->marker_queue, seqno);
 83		vmw_fences_update(dev_priv->fman);
 
 84	}
 85}
 86
 87bool vmw_seqno_passed(struct vmw_private *dev_priv,
 88			 uint32_t seqno)
 89{
 90	struct vmw_fifo_state *fifo_state;
 91	bool ret;
 92
 93	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 94		return true;
 95
 96	fifo_state = &dev_priv->fifo;
 97	vmw_update_seqno(dev_priv, fifo_state);
 98	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 99		return true;
100
101	if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
102	    vmw_fifo_idle(dev_priv, seqno))
103		return true;
104
105	/**
106	 * Then check if the seqno is higher than what we've actually
107	 * emitted. Then the fence is stale and signaled.
108	 */
109
110	ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
111	       > VMW_FENCE_WRAP);
112
113	return ret;
114}
115
116int vmw_fallback_wait(struct vmw_private *dev_priv,
117		      bool lazy,
118		      bool fifo_idle,
119		      uint32_t seqno,
120		      bool interruptible,
121		      unsigned long timeout)
122{
123	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
124
125	uint32_t count = 0;
126	uint32_t signal_seq;
127	int ret;
128	unsigned long end_jiffies = jiffies + timeout;
129	bool (*wait_condition)(struct vmw_private *, uint32_t);
130	DEFINE_WAIT(__wait);
131
132	wait_condition = (fifo_idle) ? &vmw_fifo_idle :
133		&vmw_seqno_passed;
134
135	/**
136	 * Block command submission while waiting for idle.
137	 */
138
139	if (fifo_idle)
140		down_read(&fifo_state->rwsem);
141	signal_seq = atomic_read(&dev_priv->marker_seq);
142	ret = 0;
143
144	for (;;) {
145		prepare_to_wait(&dev_priv->fence_queue, &__wait,
146				(interruptible) ?
147				TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
148		if (wait_condition(dev_priv, seqno))
149			break;
150		if (time_after_eq(jiffies, end_jiffies)) {
151			DRM_ERROR("SVGA device lockup.\n");
152			break;
153		}
154		if (lazy)
155			schedule_timeout(1);
156		else if ((++count & 0x0F) == 0) {
157			/**
158			 * FIXME: Use schedule_hr_timeout here for
159			 * newer kernels and lower CPU utilization.
160			 */
161
162			__set_current_state(TASK_RUNNING);
163			schedule();
164			__set_current_state((interruptible) ?
165					    TASK_INTERRUPTIBLE :
166					    TASK_UNINTERRUPTIBLE);
167		}
168		if (interruptible && signal_pending(current)) {
169			ret = -ERESTARTSYS;
170			break;
171		}
172	}
173	finish_wait(&dev_priv->fence_queue, &__wait);
174	if (ret == 0 && fifo_idle) {
175		__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
176		iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
177	}
178	wake_up_all(&dev_priv->fence_queue);
179	if (fifo_idle)
180		up_read(&fifo_state->rwsem);
181
182	return ret;
183}
184
185void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
186{
187	mutex_lock(&dev_priv->hw_mutex);
188	if (dev_priv->fence_queue_waiters++ == 0) {
189		unsigned long irq_flags;
190
191		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
192		outl(SVGA_IRQFLAG_ANY_FENCE,
193		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
194		dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
195		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
196		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
197	}
198	mutex_unlock(&dev_priv->hw_mutex);
199}
200
201void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
202{
203	mutex_lock(&dev_priv->hw_mutex);
204	if (--dev_priv->fence_queue_waiters == 0) {
205		unsigned long irq_flags;
206
207		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
208		dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
209		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
210		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
211	}
212	mutex_unlock(&dev_priv->hw_mutex);
213}
214
215
216void vmw_goal_waiter_add(struct vmw_private *dev_priv)
217{
218	mutex_lock(&dev_priv->hw_mutex);
219	if (dev_priv->goal_queue_waiters++ == 0) {
220		unsigned long irq_flags;
221
222		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
223		outl(SVGA_IRQFLAG_FENCE_GOAL,
224		     dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
225		dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
226		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
227		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
228	}
229	mutex_unlock(&dev_priv->hw_mutex);
230}
231
232void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
233{
234	mutex_lock(&dev_priv->hw_mutex);
235	if (--dev_priv->goal_queue_waiters == 0) {
236		unsigned long irq_flags;
237
238		spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
239		dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
240		vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
241		spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
242	}
243	mutex_unlock(&dev_priv->hw_mutex);
244}
245
246int vmw_wait_seqno(struct vmw_private *dev_priv,
247		      bool lazy, uint32_t seqno,
248		      bool interruptible, unsigned long timeout)
249{
250	long ret;
 
251	struct vmw_fifo_state *fifo = &dev_priv->fifo;
252
253	if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
254		return 0;
255
256	if (likely(vmw_seqno_passed(dev_priv, seqno)))
257		return 0;
258
259	vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
260
261	if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
262		return vmw_fallback_wait(dev_priv, lazy, true, seqno,
263					 interruptible, timeout);
264
265	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
266		return vmw_fallback_wait(dev_priv, lazy, false, seqno,
267					 interruptible, timeout);
268
269	vmw_seqno_waiter_add(dev_priv);
 
 
 
 
 
 
 
 
 
 
270
271	if (interruptible)
272		ret = wait_event_interruptible_timeout
273		    (dev_priv->fence_queue,
274		     vmw_seqno_passed(dev_priv, seqno),
275		     timeout);
276	else
277		ret = wait_event_timeout
278		    (dev_priv->fence_queue,
279		     vmw_seqno_passed(dev_priv, seqno),
280		     timeout);
281
282	vmw_seqno_waiter_remove(dev_priv);
283
284	if (unlikely(ret == 0))
285		ret = -EBUSY;
286	else if (likely(ret > 0))
287		ret = 0;
288
 
 
 
 
 
 
 
 
 
 
289	return ret;
290}
291
292void vmw_irq_preinstall(struct drm_device *dev)
293{
294	struct vmw_private *dev_priv = vmw_priv(dev);
295	uint32_t status;
296
297	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
298		return;
299
300	spin_lock_init(&dev_priv->irq_lock);
301	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
302	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
303}
304
305int vmw_irq_postinstall(struct drm_device *dev)
306{
307	return 0;
308}
309
310void vmw_irq_uninstall(struct drm_device *dev)
311{
312	struct vmw_private *dev_priv = vmw_priv(dev);
313	uint32_t status;
314
315	if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
316		return;
317
318	mutex_lock(&dev_priv->hw_mutex);
319	vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
320	mutex_unlock(&dev_priv->hw_mutex);
321
322	status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
323	outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324}