Loading...
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "drmP.h"
29#include "vmwgfx_drv.h"
30
31#define VMW_FENCE_WRAP (1 << 24)
32
33irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
34{
35 struct drm_device *dev = (struct drm_device *)arg;
36 struct vmw_private *dev_priv = vmw_priv(dev);
37 uint32_t status;
38
39 spin_lock(&dev_priv->irq_lock);
40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
41 spin_unlock(&dev_priv->irq_lock);
42
43 if (status & SVGA_IRQFLAG_ANY_FENCE)
44 wake_up_all(&dev_priv->fence_queue);
45 if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
46 wake_up_all(&dev_priv->fifo_queue);
47
48 if (likely(status)) {
49 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
50 return IRQ_HANDLED;
51 }
52
53 return IRQ_NONE;
54}
55
56static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
57{
58 uint32_t busy;
59
60 mutex_lock(&dev_priv->hw_mutex);
61 busy = vmw_read(dev_priv, SVGA_REG_BUSY);
62 mutex_unlock(&dev_priv->hw_mutex);
63
64 return (busy == 0);
65}
66
67void vmw_update_sequence(struct vmw_private *dev_priv,
68 struct vmw_fifo_state *fifo_state)
69{
70 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
71
72 uint32_t sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
73
74 if (dev_priv->last_read_sequence != sequence) {
75 dev_priv->last_read_sequence = sequence;
76 vmw_fence_pull(&fifo_state->fence_queue, sequence);
77 }
78}
79
80bool vmw_fence_signaled(struct vmw_private *dev_priv,
81 uint32_t sequence)
82{
83 struct vmw_fifo_state *fifo_state;
84 bool ret;
85
86 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
87 return true;
88
89 fifo_state = &dev_priv->fifo;
90 vmw_update_sequence(dev_priv, fifo_state);
91 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
92 return true;
93
94 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
95 vmw_fifo_idle(dev_priv, sequence))
96 return true;
97
98 /**
99 * Then check if the sequence is higher than what we've actually
100 * emitted. Then the fence is stale and signaled.
101 */
102
103 ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
104 > VMW_FENCE_WRAP);
105
106 return ret;
107}
108
109int vmw_fallback_wait(struct vmw_private *dev_priv,
110 bool lazy,
111 bool fifo_idle,
112 uint32_t sequence,
113 bool interruptible,
114 unsigned long timeout)
115{
116 struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
117
118 uint32_t count = 0;
119 uint32_t signal_seq;
120 int ret;
121 unsigned long end_jiffies = jiffies + timeout;
122 bool (*wait_condition)(struct vmw_private *, uint32_t);
123 DEFINE_WAIT(__wait);
124
125 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
126 &vmw_fence_signaled;
127
128 /**
129 * Block command submission while waiting for idle.
130 */
131
132 if (fifo_idle)
133 down_read(&fifo_state->rwsem);
134 signal_seq = atomic_read(&dev_priv->fence_seq);
135 ret = 0;
136
137 for (;;) {
138 prepare_to_wait(&dev_priv->fence_queue, &__wait,
139 (interruptible) ?
140 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
141 if (wait_condition(dev_priv, sequence))
142 break;
143 if (time_after_eq(jiffies, end_jiffies)) {
144 DRM_ERROR("SVGA device lockup.\n");
145 break;
146 }
147 if (lazy)
148 schedule_timeout(1);
149 else if ((++count & 0x0F) == 0) {
150 /**
151 * FIXME: Use schedule_hr_timeout here for
152 * newer kernels and lower CPU utilization.
153 */
154
155 __set_current_state(TASK_RUNNING);
156 schedule();
157 __set_current_state((interruptible) ?
158 TASK_INTERRUPTIBLE :
159 TASK_UNINTERRUPTIBLE);
160 }
161 if (interruptible && signal_pending(current)) {
162 ret = -ERESTARTSYS;
163 break;
164 }
165 }
166 finish_wait(&dev_priv->fence_queue, &__wait);
167 if (ret == 0 && fifo_idle) {
168 __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
169 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
170 }
171 wake_up_all(&dev_priv->fence_queue);
172 if (fifo_idle)
173 up_read(&fifo_state->rwsem);
174
175 return ret;
176}
177
178int vmw_wait_fence(struct vmw_private *dev_priv,
179 bool lazy, uint32_t sequence,
180 bool interruptible, unsigned long timeout)
181{
182 long ret;
183 unsigned long irq_flags;
184 struct vmw_fifo_state *fifo = &dev_priv->fifo;
185
186 if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
187 return 0;
188
189 if (likely(vmw_fence_signaled(dev_priv, sequence)))
190 return 0;
191
192 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
193
194 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
195 return vmw_fallback_wait(dev_priv, lazy, true, sequence,
196 interruptible, timeout);
197
198 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
199 return vmw_fallback_wait(dev_priv, lazy, false, sequence,
200 interruptible, timeout);
201
202 mutex_lock(&dev_priv->hw_mutex);
203 if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
204 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
205 outl(SVGA_IRQFLAG_ANY_FENCE,
206 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
207 vmw_write(dev_priv, SVGA_REG_IRQMASK,
208 vmw_read(dev_priv, SVGA_REG_IRQMASK) |
209 SVGA_IRQFLAG_ANY_FENCE);
210 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
211 }
212 mutex_unlock(&dev_priv->hw_mutex);
213
214 if (interruptible)
215 ret = wait_event_interruptible_timeout
216 (dev_priv->fence_queue,
217 vmw_fence_signaled(dev_priv, sequence),
218 timeout);
219 else
220 ret = wait_event_timeout
221 (dev_priv->fence_queue,
222 vmw_fence_signaled(dev_priv, sequence),
223 timeout);
224
225 if (unlikely(ret == 0))
226 ret = -EBUSY;
227 else if (likely(ret > 0))
228 ret = 0;
229
230 mutex_lock(&dev_priv->hw_mutex);
231 if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
232 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
233 vmw_write(dev_priv, SVGA_REG_IRQMASK,
234 vmw_read(dev_priv, SVGA_REG_IRQMASK) &
235 ~SVGA_IRQFLAG_ANY_FENCE);
236 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
237 }
238 mutex_unlock(&dev_priv->hw_mutex);
239
240 return ret;
241}
242
243void vmw_irq_preinstall(struct drm_device *dev)
244{
245 struct vmw_private *dev_priv = vmw_priv(dev);
246 uint32_t status;
247
248 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
249 return;
250
251 spin_lock_init(&dev_priv->irq_lock);
252 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
253 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
254}
255
256int vmw_irq_postinstall(struct drm_device *dev)
257{
258 return 0;
259}
260
261void vmw_irq_uninstall(struct drm_device *dev)
262{
263 struct vmw_private *dev_priv = vmw_priv(dev);
264 uint32_t status;
265
266 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
267 return;
268
269 mutex_lock(&dev_priv->hw_mutex);
270 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
271 mutex_unlock(&dev_priv->hw_mutex);
272
273 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
274 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
275}
276
277#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
278
279int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
280 struct drm_file *file_priv)
281{
282 struct drm_vmw_fence_wait_arg *arg =
283 (struct drm_vmw_fence_wait_arg *)data;
284 unsigned long timeout;
285
286 if (!arg->cookie_valid) {
287 arg->cookie_valid = 1;
288 arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
289 }
290
291 timeout = jiffies;
292 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
293 return -EBUSY;
294
295 timeout = (unsigned long)arg->kernel_cookie - timeout;
296 return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
297}
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2/**************************************************************************
3 *
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/sched/signal.h>
30
31#include "vmwgfx_drv.h"
32
33#define VMW_FENCE_WRAP (1 << 24)
34
35static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw)
36{
37 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
38 return SVGA_IRQFLAG_REG_FENCE_GOAL;
39 else
40 return SVGA_IRQFLAG_FENCE_GOAL;
41}
42
43/**
44 * vmw_thread_fn - Deferred (process context) irq handler
45 *
46 * @irq: irq number
47 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
48 *
49 * This function implements the deferred part of irq processing.
50 * The function is guaranteed to run at least once after the
51 * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
52 *
53 */
54static irqreturn_t vmw_thread_fn(int irq, void *arg)
55{
56 struct drm_device *dev = (struct drm_device *)arg;
57 struct vmw_private *dev_priv = vmw_priv(dev);
58 irqreturn_t ret = IRQ_NONE;
59
60 if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
61 dev_priv->irqthread_pending)) {
62 vmw_fences_update(dev_priv->fman);
63 wake_up_all(&dev_priv->fence_queue);
64 ret = IRQ_HANDLED;
65 }
66
67 if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
68 dev_priv->irqthread_pending)) {
69 vmw_cmdbuf_irqthread(dev_priv->cman);
70 ret = IRQ_HANDLED;
71 }
72
73 return ret;
74}
75
76/**
77 * vmw_irq_handler: irq handler
78 *
79 * @irq: irq number
80 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
81 *
82 * This function implements the quick part of irq processing.
83 * The function performs fast actions like clearing the device interrupt
84 * flags and also reasonably quick actions like waking processes waiting for
85 * FIFO space. Other IRQ actions are deferred to the IRQ thread.
86 */
87static irqreturn_t vmw_irq_handler(int irq, void *arg)
88{
89 struct drm_device *dev = (struct drm_device *)arg;
90 struct vmw_private *dev_priv = vmw_priv(dev);
91 uint32_t status, masked_status;
92 irqreturn_t ret = IRQ_HANDLED;
93
94 status = vmw_irq_status_read(dev_priv);
95 masked_status = status & READ_ONCE(dev_priv->irq_mask);
96
97 if (likely(status))
98 vmw_irq_status_write(dev_priv, status);
99
100 if (!status)
101 return IRQ_NONE;
102
103 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
104 wake_up_all(&dev_priv->fifo_queue);
105
106 if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
107 vmw_irqflag_fence_goal(dev_priv))) &&
108 !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
109 ret = IRQ_WAKE_THREAD;
110
111 if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
112 SVGA_IRQFLAG_ERROR)) &&
113 !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
114 dev_priv->irqthread_pending))
115 ret = IRQ_WAKE_THREAD;
116
117 return ret;
118}
119
120static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
121{
122
123 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
124}
125
126void vmw_update_seqno(struct vmw_private *dev_priv)
127{
128 uint32_t seqno = vmw_fence_read(dev_priv);
129
130 if (dev_priv->last_read_seqno != seqno) {
131 dev_priv->last_read_seqno = seqno;
132 vmw_fences_update(dev_priv->fman);
133 }
134}
135
136bool vmw_seqno_passed(struct vmw_private *dev_priv,
137 uint32_t seqno)
138{
139 bool ret;
140
141 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
142 return true;
143
144 vmw_update_seqno(dev_priv);
145 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
146 return true;
147
148 if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
149 return true;
150
151 /**
152 * Then check if the seqno is higher than what we've actually
153 * emitted. Then the fence is stale and signaled.
154 */
155
156 ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
157 > VMW_FENCE_WRAP);
158
159 return ret;
160}
161
162int vmw_fallback_wait(struct vmw_private *dev_priv,
163 bool lazy,
164 bool fifo_idle,
165 uint32_t seqno,
166 bool interruptible,
167 unsigned long timeout)
168{
169 struct vmw_fifo_state *fifo_state = dev_priv->fifo;
170 bool fifo_down = false;
171
172 uint32_t count = 0;
173 uint32_t signal_seq;
174 int ret;
175 unsigned long end_jiffies = jiffies + timeout;
176 bool (*wait_condition)(struct vmw_private *, uint32_t);
177 DEFINE_WAIT(__wait);
178
179 wait_condition = (fifo_idle) ? &vmw_fifo_idle :
180 &vmw_seqno_passed;
181
182 /**
183 * Block command submission while waiting for idle.
184 */
185
186 if (fifo_idle) {
187 if (dev_priv->cman) {
188 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
189 10*HZ);
190 if (ret)
191 goto out_err;
192 } else if (fifo_state) {
193 down_read(&fifo_state->rwsem);
194 fifo_down = true;
195 }
196 }
197
198 signal_seq = atomic_read(&dev_priv->marker_seq);
199 ret = 0;
200
201 for (;;) {
202 prepare_to_wait(&dev_priv->fence_queue, &__wait,
203 (interruptible) ?
204 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
205 if (wait_condition(dev_priv, seqno))
206 break;
207 if (time_after_eq(jiffies, end_jiffies)) {
208 DRM_ERROR("SVGA device lockup.\n");
209 break;
210 }
211 if (lazy)
212 schedule_timeout(1);
213 else if ((++count & 0x0F) == 0) {
214 /**
215 * FIXME: Use schedule_hr_timeout here for
216 * newer kernels and lower CPU utilization.
217 */
218
219 __set_current_state(TASK_RUNNING);
220 schedule();
221 __set_current_state((interruptible) ?
222 TASK_INTERRUPTIBLE :
223 TASK_UNINTERRUPTIBLE);
224 }
225 if (interruptible && signal_pending(current)) {
226 ret = -ERESTARTSYS;
227 break;
228 }
229 }
230 finish_wait(&dev_priv->fence_queue, &__wait);
231 if (ret == 0 && fifo_idle && fifo_state)
232 vmw_fence_write(dev_priv, signal_seq);
233
234 wake_up_all(&dev_priv->fence_queue);
235out_err:
236 if (fifo_down)
237 up_read(&fifo_state->rwsem);
238
239 return ret;
240}
241
242void vmw_generic_waiter_add(struct vmw_private *dev_priv,
243 u32 flag, int *waiter_count)
244{
245 spin_lock_bh(&dev_priv->waiter_lock);
246 if ((*waiter_count)++ == 0) {
247 vmw_irq_status_write(dev_priv, flag);
248 dev_priv->irq_mask |= flag;
249 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
250 }
251 spin_unlock_bh(&dev_priv->waiter_lock);
252}
253
254void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
255 u32 flag, int *waiter_count)
256{
257 spin_lock_bh(&dev_priv->waiter_lock);
258 if (--(*waiter_count) == 0) {
259 dev_priv->irq_mask &= ~flag;
260 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
261 }
262 spin_unlock_bh(&dev_priv->waiter_lock);
263}
264
265void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
266{
267 vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
268 &dev_priv->fence_queue_waiters);
269}
270
271void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
272{
273 vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
274 &dev_priv->fence_queue_waiters);
275}
276
277void vmw_goal_waiter_add(struct vmw_private *dev_priv)
278{
279 vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
280 &dev_priv->goal_queue_waiters);
281}
282
283void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
284{
285 vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
286 &dev_priv->goal_queue_waiters);
287}
288
289static void vmw_irq_preinstall(struct drm_device *dev)
290{
291 struct vmw_private *dev_priv = vmw_priv(dev);
292 uint32_t status;
293
294 status = vmw_irq_status_read(dev_priv);
295 vmw_irq_status_write(dev_priv, status);
296}
297
298void vmw_irq_uninstall(struct drm_device *dev)
299{
300 struct vmw_private *dev_priv = vmw_priv(dev);
301 struct pci_dev *pdev = to_pci_dev(dev->dev);
302 uint32_t status;
303 u32 i;
304
305 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
306 return;
307
308 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
309
310 status = vmw_irq_status_read(dev_priv);
311 vmw_irq_status_write(dev_priv, status);
312
313 for (i = 0; i < dev_priv->num_irq_vectors; ++i)
314 free_irq(dev_priv->irqs[i], dev);
315
316 pci_free_irq_vectors(pdev);
317 dev_priv->num_irq_vectors = 0;
318}
319
320/**
321 * vmw_irq_install - Install the irq handlers
322 *
323 * @dev_priv: Pointer to the vmw_private device.
324 * Return: Zero if successful. Negative number otherwise.
325 */
326int vmw_irq_install(struct vmw_private *dev_priv)
327{
328 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
329 struct drm_device *dev = &dev_priv->drm;
330 int ret;
331 int nvec;
332 int i = 0;
333
334 BUILD_BUG_ON((SVGA_IRQFLAG_MAX >> VMWGFX_MAX_NUM_IRQS) != 1);
335 BUG_ON(VMWGFX_MAX_NUM_IRQS != get_count_order(SVGA_IRQFLAG_MAX));
336
337 nvec = pci_alloc_irq_vectors(pdev, 1, VMWGFX_MAX_NUM_IRQS,
338 PCI_IRQ_ALL_TYPES);
339
340 if (nvec <= 0) {
341 drm_err(&dev_priv->drm,
342 "IRQ's are unavailable, nvec: %d\n", nvec);
343 ret = nvec;
344 goto done;
345 }
346
347 vmw_irq_preinstall(dev);
348
349 for (i = 0; i < nvec; ++i) {
350 ret = pci_irq_vector(pdev, i);
351 if (ret < 0) {
352 drm_err(&dev_priv->drm,
353 "failed getting irq vector: %d\n", ret);
354 goto done;
355 }
356 dev_priv->irqs[i] = ret;
357
358 ret = request_threaded_irq(dev_priv->irqs[i], vmw_irq_handler, vmw_thread_fn,
359 IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
360 if (ret != 0) {
361 drm_err(&dev_priv->drm,
362 "Failed installing irq(%d): %d\n",
363 dev_priv->irqs[i], ret);
364 goto done;
365 }
366 }
367
368done:
369 dev_priv->num_irq_vectors = i;
370 return ret;
371}