Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/**************************************************************************
3 * Copyright (c) 2007, Intel Corporation.
4 * All Rights Reserved.
5 *
6 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7 * develop this driver.
8 *
9 **************************************************************************/
10
11#include <drm/drm_drv.h>
12#include <drm/drm_vblank.h>
13
14#include "power.h"
15#include "psb_drv.h"
16#include "psb_intel_reg.h"
17#include "psb_irq.h"
18#include "psb_reg.h"
19
20/*
21 * inline functions
22 */
23
24static inline u32 gma_pipestat(int pipe)
25{
26 if (pipe == 0)
27 return PIPEASTAT;
28 if (pipe == 1)
29 return PIPEBSTAT;
30 if (pipe == 2)
31 return PIPECSTAT;
32 BUG();
33}
34
35static inline u32 gma_pipeconf(int pipe)
36{
37 if (pipe == 0)
38 return PIPEACONF;
39 if (pipe == 1)
40 return PIPEBCONF;
41 if (pipe == 2)
42 return PIPECCONF;
43 BUG();
44}
45
46void gma_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
47{
48 if ((dev_priv->pipestat[pipe] & mask) != mask) {
49 u32 reg = gma_pipestat(pipe);
50 dev_priv->pipestat[pipe] |= mask;
51 /* Enable the interrupt, clear any pending status */
52 if (gma_power_begin(&dev_priv->dev, false)) {
53 u32 writeVal = PSB_RVDC32(reg);
54 writeVal |= (mask | (mask >> 16));
55 PSB_WVDC32(writeVal, reg);
56 (void) PSB_RVDC32(reg);
57 gma_power_end(&dev_priv->dev);
58 }
59 }
60}
61
62void gma_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
63{
64 if ((dev_priv->pipestat[pipe] & mask) != 0) {
65 u32 reg = gma_pipestat(pipe);
66 dev_priv->pipestat[pipe] &= ~mask;
67 if (gma_power_begin(&dev_priv->dev, false)) {
68 u32 writeVal = PSB_RVDC32(reg);
69 writeVal &= ~mask;
70 PSB_WVDC32(writeVal, reg);
71 (void) PSB_RVDC32(reg);
72 gma_power_end(&dev_priv->dev);
73 }
74 }
75}
76
77/*
78 * Display controller interrupt handler for pipe event.
79 */
80static void gma_pipe_event_handler(struct drm_device *dev, int pipe)
81{
82 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
83
84 uint32_t pipe_stat_val = 0;
85 uint32_t pipe_stat_reg = gma_pipestat(pipe);
86 uint32_t pipe_enable = dev_priv->pipestat[pipe];
87 uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
88 uint32_t pipe_clear;
89 uint32_t i = 0;
90
91 spin_lock(&dev_priv->irqmask_lock);
92
93 pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
94 pipe_stat_val &= pipe_enable | pipe_status;
95 pipe_stat_val &= pipe_stat_val >> 16;
96
97 spin_unlock(&dev_priv->irqmask_lock);
98
99 /* Clear the 2nd level interrupt status bits
100 * Sometimes the bits are very sticky so we repeat until they unstick */
101 for (i = 0; i < 0xffff; i++) {
102 PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
103 pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
104
105 if (pipe_clear == 0)
106 break;
107 }
108
109 if (pipe_clear)
110 dev_err(dev->dev,
111 "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
112 __func__, pipe, PSB_RVDC32(pipe_stat_reg));
113
114 if (pipe_stat_val & PIPE_VBLANK_STATUS) {
115 struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
116 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
117 unsigned long flags;
118
119 drm_handle_vblank(dev, pipe);
120
121 spin_lock_irqsave(&dev->event_lock, flags);
122 if (gma_crtc->page_flip_event) {
123 drm_crtc_send_vblank_event(crtc,
124 gma_crtc->page_flip_event);
125 gma_crtc->page_flip_event = NULL;
126 drm_crtc_vblank_put(crtc);
127 }
128 spin_unlock_irqrestore(&dev->event_lock, flags);
129 }
130}
131
132/*
133 * Display controller interrupt handler.
134 */
135static void gma_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
136{
137 if (vdc_stat & _PSB_IRQ_ASLE)
138 psb_intel_opregion_asle_intr(dev);
139
140 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
141 gma_pipe_event_handler(dev, 0);
142
143 if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
144 gma_pipe_event_handler(dev, 1);
145}
146
147/*
148 * SGX interrupt handler
149 */
150static void gma_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
151{
152 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
153 u32 val, addr;
154
155 if (stat_1 & _PSB_CE_TWOD_COMPLETE)
156 val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
157
158 if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
159 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
160 addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
161 if (val) {
162 if (val & _PSB_CBI_STAT_PF_N_RW)
163 DRM_ERROR("SGX MMU page fault:");
164 else
165 DRM_ERROR("SGX MMU read / write protection fault:");
166
167 if (val & _PSB_CBI_STAT_FAULT_CACHE)
168 DRM_ERROR("\tCache requestor");
169 if (val & _PSB_CBI_STAT_FAULT_TA)
170 DRM_ERROR("\tTA requestor");
171 if (val & _PSB_CBI_STAT_FAULT_VDM)
172 DRM_ERROR("\tVDM requestor");
173 if (val & _PSB_CBI_STAT_FAULT_2D)
174 DRM_ERROR("\t2D requestor");
175 if (val & _PSB_CBI_STAT_FAULT_PBE)
176 DRM_ERROR("\tPBE requestor");
177 if (val & _PSB_CBI_STAT_FAULT_TSP)
178 DRM_ERROR("\tTSP requestor");
179 if (val & _PSB_CBI_STAT_FAULT_ISP)
180 DRM_ERROR("\tISP requestor");
181 if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
182 DRM_ERROR("\tUSSEPDS requestor");
183 if (val & _PSB_CBI_STAT_FAULT_HOST)
184 DRM_ERROR("\tHost requestor");
185
186 DRM_ERROR("\tMMU failing address is 0x%08x.\n",
187 (unsigned int)addr);
188 }
189 }
190
191 /* Clear bits */
192 PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
193 PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
194 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
195}
196
197static irqreturn_t gma_irq_handler(int irq, void *arg)
198{
199 struct drm_device *dev = arg;
200 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
201 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
202 u32 sgx_stat_1, sgx_stat_2;
203 int handled = 0;
204
205 spin_lock(&dev_priv->irqmask_lock);
206
207 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
208
209 if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
210 dsp_int = 1;
211
212 if (vdc_stat & _PSB_IRQ_SGX_FLAG)
213 sgx_int = 1;
214 if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
215 hotplug_int = 1;
216
217 vdc_stat &= dev_priv->vdc_irq_mask;
218 spin_unlock(&dev_priv->irqmask_lock);
219
220 if (dsp_int) {
221 gma_vdc_interrupt(dev, vdc_stat);
222 handled = 1;
223 }
224
225 if (sgx_int) {
226 sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
227 sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
228 gma_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
229 handled = 1;
230 }
231
232 /* Note: this bit has other meanings on some devices, so we will
233 need to address that later if it ever matters */
234 if (hotplug_int && dev_priv->ops->hotplug) {
235 handled = dev_priv->ops->hotplug(dev);
236 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
237 }
238
239 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
240 (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
241 rmb();
242
243 if (!handled)
244 return IRQ_NONE;
245
246 return IRQ_HANDLED;
247}
248
249void gma_irq_preinstall(struct drm_device *dev)
250{
251 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
252 unsigned long irqflags;
253
254 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
255
256 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
257 PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
258 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
259 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
260 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
261
262 if (dev->vblank[0].enabled)
263 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
264 if (dev->vblank[1].enabled)
265 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
266
267 /* Revisit this area - want per device masks ? */
268 if (dev_priv->ops->hotplug)
269 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
270 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
271
272 /* This register is safe even if display island is off */
273 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
274 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
275}
276
277void gma_irq_postinstall(struct drm_device *dev)
278{
279 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
280 unsigned long irqflags;
281 unsigned int i;
282
283 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
284
285 /* Enable 2D and MMU fault interrupts */
286 PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
287 PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
288 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
289
290 /* This register is safe even if display island is off */
291 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
292 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
293
294 for (i = 0; i < dev->num_crtcs; ++i) {
295 if (dev->vblank[i].enabled)
296 gma_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
297 else
298 gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
299 }
300
301 if (dev_priv->ops->hotplug_enable)
302 dev_priv->ops->hotplug_enable(dev, true);
303
304 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
305}
306
307int gma_irq_install(struct drm_device *dev)
308{
309 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
310 struct pci_dev *pdev = to_pci_dev(dev->dev);
311 int ret;
312
313 if (dev_priv->use_msi && pci_enable_msi(pdev)) {
314 dev_warn(dev->dev, "Enabling MSI failed!\n");
315 dev_priv->use_msi = false;
316 }
317
318 if (pdev->irq == IRQ_NOTCONNECTED)
319 return -ENOTCONN;
320
321 gma_irq_preinstall(dev);
322
323 /* PCI devices require shared interrupts. */
324 ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
325 if (ret)
326 return ret;
327
328 gma_irq_postinstall(dev);
329
330 dev_priv->irq_enabled = true;
331
332 return 0;
333}
334
335void gma_irq_uninstall(struct drm_device *dev)
336{
337 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
338 struct pci_dev *pdev = to_pci_dev(dev->dev);
339 unsigned long irqflags;
340 unsigned int i;
341
342 if (!dev_priv->irq_enabled)
343 return;
344
345 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
346
347 if (dev_priv->ops->hotplug_enable)
348 dev_priv->ops->hotplug_enable(dev, false);
349
350 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
351
352 for (i = 0; i < dev->num_crtcs; ++i) {
353 if (dev->vblank[i].enabled)
354 gma_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
355 }
356
357 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
358 _PSB_IRQ_MSVDX_FLAG |
359 _LNC_IRQ_TOPAZ_FLAG;
360
361 /* These two registers are safe even if display island is off */
362 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
363 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
364
365 wmb();
366
367 /* This register is safe even if display island is off */
368 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
369 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
370
371 free_irq(pdev->irq, dev);
372 if (dev_priv->use_msi)
373 pci_disable_msi(pdev);
374}
375
376int gma_crtc_enable_vblank(struct drm_crtc *crtc)
377{
378 struct drm_device *dev = crtc->dev;
379 unsigned int pipe = crtc->index;
380 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
381 unsigned long irqflags;
382 uint32_t reg_val = 0;
383 uint32_t pipeconf_reg = gma_pipeconf(pipe);
384
385 if (gma_power_begin(dev, false)) {
386 reg_val = REG_READ(pipeconf_reg);
387 gma_power_end(dev);
388 }
389
390 if (!(reg_val & PIPEACONF_ENABLE))
391 return -EINVAL;
392
393 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
394
395 if (pipe == 0)
396 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
397 else if (pipe == 1)
398 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
399
400 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
401 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
402 gma_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
403
404 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
405
406 return 0;
407}
408
409void gma_crtc_disable_vblank(struct drm_crtc *crtc)
410{
411 struct drm_device *dev = crtc->dev;
412 unsigned int pipe = crtc->index;
413 struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
414 unsigned long irqflags;
415
416 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
417
418 if (pipe == 0)
419 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
420 else if (pipe == 1)
421 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
422
423 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
424 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
425 gma_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
426
427 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
428}
429
430/* Called from drm generic code, passed a 'crtc', which
431 * we use as a pipe index
432 */
433u32 gma_crtc_get_vblank_counter(struct drm_crtc *crtc)
434{
435 struct drm_device *dev = crtc->dev;
436 unsigned int pipe = crtc->index;
437 uint32_t high_frame = PIPEAFRAMEHIGH;
438 uint32_t low_frame = PIPEAFRAMEPIXEL;
439 uint32_t pipeconf_reg = PIPEACONF;
440 uint32_t reg_val = 0;
441 uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
442
443 switch (pipe) {
444 case 0:
445 break;
446 case 1:
447 high_frame = PIPEBFRAMEHIGH;
448 low_frame = PIPEBFRAMEPIXEL;
449 pipeconf_reg = PIPEBCONF;
450 break;
451 case 2:
452 high_frame = PIPECFRAMEHIGH;
453 low_frame = PIPECFRAMEPIXEL;
454 pipeconf_reg = PIPECCONF;
455 break;
456 default:
457 dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
458 return 0;
459 }
460
461 if (!gma_power_begin(dev, false))
462 return 0;
463
464 reg_val = REG_READ(pipeconf_reg);
465
466 if (!(reg_val & PIPEACONF_ENABLE)) {
467 dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
468 pipe);
469 goto err_gma_power_end;
470 }
471
472 /*
473 * High & low register fields aren't synchronized, so make sure
474 * we get a low value that's stable across two reads of the high
475 * register.
476 */
477 do {
478 high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
479 PIPE_FRAME_HIGH_SHIFT);
480 low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
481 PIPE_FRAME_LOW_SHIFT);
482 high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
483 PIPE_FRAME_HIGH_SHIFT);
484 } while (high1 != high2);
485
486 count = (high1 << 8) | low;
487
488err_gma_power_end:
489 gma_power_end(dev);
490
491 return count;
492}
493
1// SPDX-License-Identifier: GPL-2.0-only
2/**************************************************************************
3 * Copyright (c) 2007, Intel Corporation.
4 * All Rights Reserved.
5 *
6 * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
7 * develop this driver.
8 *
9 **************************************************************************/
10
11#include <drm/drm_vblank.h>
12
13#include "power.h"
14#include "psb_drv.h"
15#include "psb_intel_reg.h"
16#include "psb_irq.h"
17#include "psb_reg.h"
18
19/*
20 * inline functions
21 */
22
23static inline u32
24psb_pipestat(int pipe)
25{
26 if (pipe == 0)
27 return PIPEASTAT;
28 if (pipe == 1)
29 return PIPEBSTAT;
30 if (pipe == 2)
31 return PIPECSTAT;
32 BUG();
33}
34
35static inline u32
36mid_pipe_event(int pipe)
37{
38 if (pipe == 0)
39 return _PSB_PIPEA_EVENT_FLAG;
40 if (pipe == 1)
41 return _MDFLD_PIPEB_EVENT_FLAG;
42 if (pipe == 2)
43 return _MDFLD_PIPEC_EVENT_FLAG;
44 BUG();
45}
46
47static inline u32
48mid_pipe_vsync(int pipe)
49{
50 if (pipe == 0)
51 return _PSB_VSYNC_PIPEA_FLAG;
52 if (pipe == 1)
53 return _PSB_VSYNC_PIPEB_FLAG;
54 if (pipe == 2)
55 return _MDFLD_PIPEC_VBLANK_FLAG;
56 BUG();
57}
58
59static inline u32
60mid_pipeconf(int pipe)
61{
62 if (pipe == 0)
63 return PIPEACONF;
64 if (pipe == 1)
65 return PIPEBCONF;
66 if (pipe == 2)
67 return PIPECCONF;
68 BUG();
69}
70
71void
72psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
73{
74 if ((dev_priv->pipestat[pipe] & mask) != mask) {
75 u32 reg = psb_pipestat(pipe);
76 dev_priv->pipestat[pipe] |= mask;
77 /* Enable the interrupt, clear any pending status */
78 if (gma_power_begin(dev_priv->dev, false)) {
79 u32 writeVal = PSB_RVDC32(reg);
80 writeVal |= (mask | (mask >> 16));
81 PSB_WVDC32(writeVal, reg);
82 (void) PSB_RVDC32(reg);
83 gma_power_end(dev_priv->dev);
84 }
85 }
86}
87
88void
89psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
90{
91 if ((dev_priv->pipestat[pipe] & mask) != 0) {
92 u32 reg = psb_pipestat(pipe);
93 dev_priv->pipestat[pipe] &= ~mask;
94 if (gma_power_begin(dev_priv->dev, false)) {
95 u32 writeVal = PSB_RVDC32(reg);
96 writeVal &= ~mask;
97 PSB_WVDC32(writeVal, reg);
98 (void) PSB_RVDC32(reg);
99 gma_power_end(dev_priv->dev);
100 }
101 }
102}
103
104/*
105 * Display controller interrupt handler for pipe event.
106 */
107static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
108{
109 struct drm_psb_private *dev_priv =
110 (struct drm_psb_private *) dev->dev_private;
111
112 uint32_t pipe_stat_val = 0;
113 uint32_t pipe_stat_reg = psb_pipestat(pipe);
114 uint32_t pipe_enable = dev_priv->pipestat[pipe];
115 uint32_t pipe_status = dev_priv->pipestat[pipe] >> 16;
116 uint32_t pipe_clear;
117 uint32_t i = 0;
118
119 spin_lock(&dev_priv->irqmask_lock);
120
121 pipe_stat_val = PSB_RVDC32(pipe_stat_reg);
122 pipe_stat_val &= pipe_enable | pipe_status;
123 pipe_stat_val &= pipe_stat_val >> 16;
124
125 spin_unlock(&dev_priv->irqmask_lock);
126
127 /* Clear the 2nd level interrupt status bits
128 * Sometimes the bits are very sticky so we repeat until they unstick */
129 for (i = 0; i < 0xffff; i++) {
130 PSB_WVDC32(PSB_RVDC32(pipe_stat_reg), pipe_stat_reg);
131 pipe_clear = PSB_RVDC32(pipe_stat_reg) & pipe_status;
132
133 if (pipe_clear == 0)
134 break;
135 }
136
137 if (pipe_clear)
138 dev_err(dev->dev,
139 "%s, can't clear status bits for pipe %d, its value = 0x%x.\n",
140 __func__, pipe, PSB_RVDC32(pipe_stat_reg));
141
142 if (pipe_stat_val & PIPE_VBLANK_STATUS) {
143 struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
144 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
145 unsigned long flags;
146
147 drm_handle_vblank(dev, pipe);
148
149 spin_lock_irqsave(&dev->event_lock, flags);
150 if (gma_crtc->page_flip_event) {
151 drm_crtc_send_vblank_event(crtc,
152 gma_crtc->page_flip_event);
153 gma_crtc->page_flip_event = NULL;
154 drm_crtc_vblank_put(crtc);
155 }
156 spin_unlock_irqrestore(&dev->event_lock, flags);
157 }
158}
159
160/*
161 * Display controller interrupt handler.
162 */
163static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
164{
165 if (vdc_stat & _PSB_IRQ_ASLE)
166 psb_intel_opregion_asle_intr(dev);
167
168 if (vdc_stat & _PSB_VSYNC_PIPEA_FLAG)
169 mid_pipe_event_handler(dev, 0);
170
171 if (vdc_stat & _PSB_VSYNC_PIPEB_FLAG)
172 mid_pipe_event_handler(dev, 1);
173}
174
175/*
176 * SGX interrupt handler
177 */
178static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
179{
180 struct drm_psb_private *dev_priv = dev->dev_private;
181 u32 val, addr;
182
183 if (stat_1 & _PSB_CE_TWOD_COMPLETE)
184 val = PSB_RSGX32(PSB_CR_2D_BLIT_STATUS);
185
186 if (stat_2 & _PSB_CE2_BIF_REQUESTER_FAULT) {
187 val = PSB_RSGX32(PSB_CR_BIF_INT_STAT);
188 addr = PSB_RSGX32(PSB_CR_BIF_FAULT);
189 if (val) {
190 if (val & _PSB_CBI_STAT_PF_N_RW)
191 DRM_ERROR("SGX MMU page fault:");
192 else
193 DRM_ERROR("SGX MMU read / write protection fault:");
194
195 if (val & _PSB_CBI_STAT_FAULT_CACHE)
196 DRM_ERROR("\tCache requestor");
197 if (val & _PSB_CBI_STAT_FAULT_TA)
198 DRM_ERROR("\tTA requestor");
199 if (val & _PSB_CBI_STAT_FAULT_VDM)
200 DRM_ERROR("\tVDM requestor");
201 if (val & _PSB_CBI_STAT_FAULT_2D)
202 DRM_ERROR("\t2D requestor");
203 if (val & _PSB_CBI_STAT_FAULT_PBE)
204 DRM_ERROR("\tPBE requestor");
205 if (val & _PSB_CBI_STAT_FAULT_TSP)
206 DRM_ERROR("\tTSP requestor");
207 if (val & _PSB_CBI_STAT_FAULT_ISP)
208 DRM_ERROR("\tISP requestor");
209 if (val & _PSB_CBI_STAT_FAULT_USSEPDS)
210 DRM_ERROR("\tUSSEPDS requestor");
211 if (val & _PSB_CBI_STAT_FAULT_HOST)
212 DRM_ERROR("\tHost requestor");
213
214 DRM_ERROR("\tMMU failing address is 0x%08x.\n",
215 (unsigned int)addr);
216 }
217 }
218
219 /* Clear bits */
220 PSB_WSGX32(stat_1, PSB_CR_EVENT_HOST_CLEAR);
221 PSB_WSGX32(stat_2, PSB_CR_EVENT_HOST_CLEAR2);
222 PSB_RSGX32(PSB_CR_EVENT_HOST_CLEAR2);
223}
224
225irqreturn_t psb_irq_handler(int irq, void *arg)
226{
227 struct drm_device *dev = arg;
228 struct drm_psb_private *dev_priv = dev->dev_private;
229 uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
230 u32 sgx_stat_1, sgx_stat_2;
231 int handled = 0;
232
233 spin_lock(&dev_priv->irqmask_lock);
234
235 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
236
237 if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
238 dsp_int = 1;
239
240 if (vdc_stat & _PSB_IRQ_SGX_FLAG)
241 sgx_int = 1;
242 if (vdc_stat & _PSB_IRQ_DISP_HOTSYNC)
243 hotplug_int = 1;
244
245 vdc_stat &= dev_priv->vdc_irq_mask;
246 spin_unlock(&dev_priv->irqmask_lock);
247
248 if (dsp_int && gma_power_is_on(dev)) {
249 psb_vdc_interrupt(dev, vdc_stat);
250 handled = 1;
251 }
252
253 if (sgx_int) {
254 sgx_stat_1 = PSB_RSGX32(PSB_CR_EVENT_STATUS);
255 sgx_stat_2 = PSB_RSGX32(PSB_CR_EVENT_STATUS2);
256 psb_sgx_interrupt(dev, sgx_stat_1, sgx_stat_2);
257 handled = 1;
258 }
259
260 /* Note: this bit has other meanings on some devices, so we will
261 need to address that later if it ever matters */
262 if (hotplug_int && dev_priv->ops->hotplug) {
263 handled = dev_priv->ops->hotplug(dev);
264 REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
265 }
266
267 PSB_WVDC32(vdc_stat, PSB_INT_IDENTITY_R);
268 (void) PSB_RVDC32(PSB_INT_IDENTITY_R);
269 rmb();
270
271 if (!handled)
272 return IRQ_NONE;
273
274 return IRQ_HANDLED;
275}
276
277void psb_irq_preinstall(struct drm_device *dev)
278{
279 struct drm_psb_private *dev_priv =
280 (struct drm_psb_private *) dev->dev_private;
281 unsigned long irqflags;
282
283 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
284
285 if (gma_power_is_on(dev)) {
286 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
287 PSB_WVDC32(0x00000000, PSB_INT_MASK_R);
288 PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
289 PSB_WSGX32(0x00000000, PSB_CR_EVENT_HOST_ENABLE);
290 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE);
291 }
292 if (dev->vblank[0].enabled)
293 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
294 if (dev->vblank[1].enabled)
295 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
296
297 /* Revisit this area - want per device masks ? */
298 if (dev_priv->ops->hotplug)
299 dev_priv->vdc_irq_mask |= _PSB_IRQ_DISP_HOTSYNC;
300 dev_priv->vdc_irq_mask |= _PSB_IRQ_ASLE | _PSB_IRQ_SGX_FLAG;
301
302 /* This register is safe even if display island is off */
303 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
304 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
305}
306
307int psb_irq_postinstall(struct drm_device *dev)
308{
309 struct drm_psb_private *dev_priv = dev->dev_private;
310 unsigned long irqflags;
311 unsigned int i;
312
313 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
314
315 /* Enable 2D and MMU fault interrupts */
316 PSB_WSGX32(_PSB_CE2_BIF_REQUESTER_FAULT, PSB_CR_EVENT_HOST_ENABLE2);
317 PSB_WSGX32(_PSB_CE_TWOD_COMPLETE, PSB_CR_EVENT_HOST_ENABLE);
318 PSB_RSGX32(PSB_CR_EVENT_HOST_ENABLE); /* Post */
319
320 /* This register is safe even if display island is off */
321 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
322 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
323
324 for (i = 0; i < dev->num_crtcs; ++i) {
325 if (dev->vblank[i].enabled)
326 psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
327 else
328 psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
329 }
330
331 if (dev_priv->ops->hotplug_enable)
332 dev_priv->ops->hotplug_enable(dev, true);
333
334 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
335 return 0;
336}
337
338void psb_irq_uninstall(struct drm_device *dev)
339{
340 struct drm_psb_private *dev_priv = dev->dev_private;
341 unsigned long irqflags;
342 unsigned int i;
343
344 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
345
346 if (dev_priv->ops->hotplug_enable)
347 dev_priv->ops->hotplug_enable(dev, false);
348
349 PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
350
351 for (i = 0; i < dev->num_crtcs; ++i) {
352 if (dev->vblank[i].enabled)
353 psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE);
354 }
355
356 dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG |
357 _PSB_IRQ_MSVDX_FLAG |
358 _LNC_IRQ_TOPAZ_FLAG;
359
360 /* These two registers are safe even if display island is off */
361 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
362 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
363
364 wmb();
365
366 /* This register is safe even if display island is off */
367 PSB_WVDC32(PSB_RVDC32(PSB_INT_IDENTITY_R), PSB_INT_IDENTITY_R);
368 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
369}
370
371/*
372 * It is used to enable VBLANK interrupt
373 */
374int psb_enable_vblank(struct drm_crtc *crtc)
375{
376 struct drm_device *dev = crtc->dev;
377 unsigned int pipe = crtc->index;
378 struct drm_psb_private *dev_priv = dev->dev_private;
379 unsigned long irqflags;
380 uint32_t reg_val = 0;
381 uint32_t pipeconf_reg = mid_pipeconf(pipe);
382
383 if (gma_power_begin(dev, false)) {
384 reg_val = REG_READ(pipeconf_reg);
385 gma_power_end(dev);
386 }
387
388 if (!(reg_val & PIPEACONF_ENABLE))
389 return -EINVAL;
390
391 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
392
393 if (pipe == 0)
394 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEA_FLAG;
395 else if (pipe == 1)
396 dev_priv->vdc_irq_mask |= _PSB_VSYNC_PIPEB_FLAG;
397
398 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
399 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
400 psb_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
401
402 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
403
404 return 0;
405}
406
407/*
408 * It is used to disable VBLANK interrupt
409 */
410void psb_disable_vblank(struct drm_crtc *crtc)
411{
412 struct drm_device *dev = crtc->dev;
413 unsigned int pipe = crtc->index;
414 struct drm_psb_private *dev_priv = dev->dev_private;
415 unsigned long irqflags;
416
417 spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
418
419 if (pipe == 0)
420 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEA_FLAG;
421 else if (pipe == 1)
422 dev_priv->vdc_irq_mask &= ~_PSB_VSYNC_PIPEB_FLAG;
423
424 PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
425 PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
426 psb_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_ENABLE);
427
428 spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
429}
430
431/* Called from drm generic code, passed a 'crtc', which
432 * we use as a pipe index
433 */
434u32 psb_get_vblank_counter(struct drm_crtc *crtc)
435{
436 struct drm_device *dev = crtc->dev;
437 unsigned int pipe = crtc->index;
438 uint32_t high_frame = PIPEAFRAMEHIGH;
439 uint32_t low_frame = PIPEAFRAMEPIXEL;
440 uint32_t pipeconf_reg = PIPEACONF;
441 uint32_t reg_val = 0;
442 uint32_t high1 = 0, high2 = 0, low = 0, count = 0;
443
444 switch (pipe) {
445 case 0:
446 break;
447 case 1:
448 high_frame = PIPEBFRAMEHIGH;
449 low_frame = PIPEBFRAMEPIXEL;
450 pipeconf_reg = PIPEBCONF;
451 break;
452 case 2:
453 high_frame = PIPECFRAMEHIGH;
454 low_frame = PIPECFRAMEPIXEL;
455 pipeconf_reg = PIPECCONF;
456 break;
457 default:
458 dev_err(dev->dev, "%s, invalid pipe.\n", __func__);
459 return 0;
460 }
461
462 if (!gma_power_begin(dev, false))
463 return 0;
464
465 reg_val = REG_READ(pipeconf_reg);
466
467 if (!(reg_val & PIPEACONF_ENABLE)) {
468 dev_err(dev->dev, "trying to get vblank count for disabled pipe %u\n",
469 pipe);
470 goto psb_get_vblank_counter_exit;
471 }
472
473 /*
474 * High & low register fields aren't synchronized, so make sure
475 * we get a low value that's stable across two reads of the high
476 * register.
477 */
478 do {
479 high1 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
480 PIPE_FRAME_HIGH_SHIFT);
481 low = ((REG_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
482 PIPE_FRAME_LOW_SHIFT);
483 high2 = ((REG_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
484 PIPE_FRAME_HIGH_SHIFT);
485 } while (high1 != high2);
486
487 count = (high1 << 8) | low;
488
489psb_get_vblank_counter_exit:
490
491 gma_power_end(dev);
492
493 return count;
494}
495