Loading...
1/*
2 * drivers/gpu/drm/omapdrm/omap_irq.c
3 *
4 * Copyright (C) 2012 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "omap_drv.h"
21
22static DEFINE_SPINLOCK(list_lock);
23
24static void omap_irq_error_handler(struct omap_drm_irq *irq,
25 uint32_t irqstatus)
26{
27 DRM_ERROR("errors: %08x\n", irqstatus);
28}
29
30/* call with list_lock and dispc runtime held */
31static void omap_irq_update(struct drm_device *dev)
32{
33 struct omap_drm_private *priv = dev->dev_private;
34 struct omap_drm_irq *irq;
35 uint32_t irqmask = priv->vblank_mask;
36
37 assert_spin_locked(&list_lock);
38
39 list_for_each_entry(irq, &priv->irq_list, node)
40 irqmask |= irq->irqmask;
41
42 DBG("irqmask=%08x", irqmask);
43
44 dispc_write_irqenable(irqmask);
45 dispc_read_irqenable(); /* flush posted write */
46}
47
48void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
49{
50 struct omap_drm_private *priv = dev->dev_private;
51 unsigned long flags;
52
53 spin_lock_irqsave(&list_lock, flags);
54
55 if (!WARN_ON(irq->registered)) {
56 irq->registered = true;
57 list_add(&irq->node, &priv->irq_list);
58 omap_irq_update(dev);
59 }
60
61 spin_unlock_irqrestore(&list_lock, flags);
62}
63
64void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq)
65{
66 dispc_runtime_get();
67
68 __omap_irq_register(dev, irq);
69
70 dispc_runtime_put();
71}
72
73void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
74{
75 unsigned long flags;
76
77 spin_lock_irqsave(&list_lock, flags);
78
79 if (!WARN_ON(!irq->registered)) {
80 irq->registered = false;
81 list_del(&irq->node);
82 omap_irq_update(dev);
83 }
84
85 spin_unlock_irqrestore(&list_lock, flags);
86}
87
88void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq)
89{
90 dispc_runtime_get();
91
92 __omap_irq_unregister(dev, irq);
93
94 dispc_runtime_put();
95}
96
97struct omap_irq_wait {
98 struct omap_drm_irq irq;
99 int count;
100};
101
102static DECLARE_WAIT_QUEUE_HEAD(wait_event);
103
104static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus)
105{
106 struct omap_irq_wait *wait =
107 container_of(irq, struct omap_irq_wait, irq);
108 wait->count--;
109 wake_up_all(&wait_event);
110}
111
112struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
113 uint32_t irqmask, int count)
114{
115 struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
116 wait->irq.irq = wait_irq;
117 wait->irq.irqmask = irqmask;
118 wait->count = count;
119 omap_irq_register(dev, &wait->irq);
120 return wait;
121}
122
123int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
124 unsigned long timeout)
125{
126 int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout);
127 omap_irq_unregister(dev, &wait->irq);
128 kfree(wait);
129 if (ret == 0)
130 return -1;
131 return 0;
132}
133
134/**
135 * enable_vblank - enable vblank interrupt events
136 * @dev: DRM device
137 * @pipe: which irq to enable
138 *
139 * Enable vblank interrupts for @crtc. If the device doesn't have
140 * a hardware vblank counter, this routine should be a no-op, since
141 * interrupts will have to stay on to keep the count accurate.
142 *
143 * RETURNS
144 * Zero on success, appropriate errno if the given @crtc's vblank
145 * interrupt cannot be enabled.
146 */
147int omap_irq_enable_vblank(struct drm_device *dev, unsigned int pipe)
148{
149 struct omap_drm_private *priv = dev->dev_private;
150 struct drm_crtc *crtc = priv->crtcs[pipe];
151 unsigned long flags;
152
153 DBG("dev=%p, crtc=%u", dev, pipe);
154
155 spin_lock_irqsave(&list_lock, flags);
156 priv->vblank_mask |= pipe2vbl(crtc);
157 omap_irq_update(dev);
158 spin_unlock_irqrestore(&list_lock, flags);
159
160 return 0;
161}
162
163/**
164 * disable_vblank - disable vblank interrupt events
165 * @dev: DRM device
166 * @pipe: which irq to enable
167 *
168 * Disable vblank interrupts for @crtc. If the device doesn't have
169 * a hardware vblank counter, this routine should be a no-op, since
170 * interrupts will have to stay on to keep the count accurate.
171 */
172void omap_irq_disable_vblank(struct drm_device *dev, unsigned int pipe)
173{
174 struct omap_drm_private *priv = dev->dev_private;
175 struct drm_crtc *crtc = priv->crtcs[pipe];
176 unsigned long flags;
177
178 DBG("dev=%p, crtc=%u", dev, pipe);
179
180 spin_lock_irqsave(&list_lock, flags);
181 priv->vblank_mask &= ~pipe2vbl(crtc);
182 omap_irq_update(dev);
183 spin_unlock_irqrestore(&list_lock, flags);
184}
185
186static irqreturn_t omap_irq_handler(int irq, void *arg)
187{
188 struct drm_device *dev = (struct drm_device *) arg;
189 struct omap_drm_private *priv = dev->dev_private;
190 struct omap_drm_irq *handler, *n;
191 unsigned long flags;
192 unsigned int id;
193 u32 irqstatus;
194
195 irqstatus = dispc_read_irqstatus();
196 dispc_clear_irqstatus(irqstatus);
197 dispc_read_irqstatus(); /* flush posted write */
198
199 VERB("irqs: %08x", irqstatus);
200
201 for (id = 0; id < priv->num_crtcs; id++) {
202 struct drm_crtc *crtc = priv->crtcs[id];
203
204 if (irqstatus & pipe2vbl(crtc))
205 drm_handle_vblank(dev, id);
206 }
207
208 spin_lock_irqsave(&list_lock, flags);
209 list_for_each_entry_safe(handler, n, &priv->irq_list, node) {
210 if (handler->irqmask & irqstatus) {
211 spin_unlock_irqrestore(&list_lock, flags);
212 handler->irq(handler, handler->irqmask & irqstatus);
213 spin_lock_irqsave(&list_lock, flags);
214 }
215 }
216 spin_unlock_irqrestore(&list_lock, flags);
217
218 return IRQ_HANDLED;
219}
220
221/*
222 * We need a special version, instead of just using drm_irq_install(),
223 * because we need to register the irq via omapdss. Once omapdss and
224 * omapdrm are merged together we can assign the dispc hwmod data to
225 * ourselves and drop these and just use drm_irq_{install,uninstall}()
226 */
227
228int omap_drm_irq_install(struct drm_device *dev)
229{
230 struct omap_drm_private *priv = dev->dev_private;
231 struct omap_drm_irq *error_handler = &priv->error_handler;
232 int ret;
233
234 INIT_LIST_HEAD(&priv->irq_list);
235
236 dispc_runtime_get();
237 dispc_clear_irqstatus(0xffffffff);
238 dispc_runtime_put();
239
240 ret = dispc_request_irq(omap_irq_handler, dev);
241 if (ret < 0)
242 return ret;
243
244 error_handler->irq = omap_irq_error_handler;
245 error_handler->irqmask = DISPC_IRQ_OCP_ERR;
246
247 /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think
248 * we just need to ignore it while enabling tv-out
249 */
250 error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
251
252 omap_irq_register(dev, error_handler);
253
254 dev->irq_enabled = true;
255
256 return 0;
257}
258
259void omap_drm_irq_uninstall(struct drm_device *dev)
260{
261 unsigned long irqflags;
262 int i;
263
264 if (!dev->irq_enabled)
265 return;
266
267 dev->irq_enabled = false;
268
269 /* Wake up any waiters so they don't hang. */
270 if (dev->num_crtcs) {
271 spin_lock_irqsave(&dev->vbl_lock, irqflags);
272 for (i = 0; i < dev->num_crtcs; i++) {
273 wake_up(&dev->vblank[i].queue);
274 dev->vblank[i].enabled = false;
275 dev->vblank[i].last =
276 dev->driver->get_vblank_counter(dev, i);
277 }
278 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
279 }
280
281 dispc_free_irq(dev);
282}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/
4 * Author: Rob Clark <rob.clark@linaro.org>
5 */
6
7#include <drm/drm_vblank.h>
8
9#include "omap_drv.h"
10
11struct omap_irq_wait {
12 struct list_head node;
13 wait_queue_head_t wq;
14 u32 irqmask;
15 int count;
16};
17
18/* call with wait_lock and dispc runtime held */
19static void omap_irq_update(struct drm_device *dev)
20{
21 struct omap_drm_private *priv = dev->dev_private;
22 struct omap_irq_wait *wait;
23 u32 irqmask = priv->irq_mask;
24
25 assert_spin_locked(&priv->wait_lock);
26
27 list_for_each_entry(wait, &priv->wait_list, node)
28 irqmask |= wait->irqmask;
29
30 DBG("irqmask=%08x", irqmask);
31
32 dispc_write_irqenable(priv->dispc, irqmask);
33}
34
35static void omap_irq_wait_handler(struct omap_irq_wait *wait)
36{
37 wait->count--;
38 wake_up(&wait->wq);
39}
40
41struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
42 u32 irqmask, int count)
43{
44 struct omap_drm_private *priv = dev->dev_private;
45 struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
46 unsigned long flags;
47
48 init_waitqueue_head(&wait->wq);
49 wait->irqmask = irqmask;
50 wait->count = count;
51
52 spin_lock_irqsave(&priv->wait_lock, flags);
53 list_add(&wait->node, &priv->wait_list);
54 omap_irq_update(dev);
55 spin_unlock_irqrestore(&priv->wait_lock, flags);
56
57 return wait;
58}
59
60int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
61 unsigned long timeout)
62{
63 struct omap_drm_private *priv = dev->dev_private;
64 unsigned long flags;
65 int ret;
66
67 ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout);
68
69 spin_lock_irqsave(&priv->wait_lock, flags);
70 list_del(&wait->node);
71 omap_irq_update(dev);
72 spin_unlock_irqrestore(&priv->wait_lock, flags);
73
74 kfree(wait);
75
76 return ret == 0 ? -1 : 0;
77}
78
79int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable)
80{
81 struct drm_device *dev = crtc->dev;
82 struct omap_drm_private *priv = dev->dev_private;
83 unsigned long flags;
84 enum omap_channel channel = omap_crtc_channel(crtc);
85 int framedone_irq =
86 dispc_mgr_get_framedone_irq(priv->dispc, channel);
87
88 DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable);
89
90 spin_lock_irqsave(&priv->wait_lock, flags);
91 if (enable)
92 priv->irq_mask |= framedone_irq;
93 else
94 priv->irq_mask &= ~framedone_irq;
95 omap_irq_update(dev);
96 spin_unlock_irqrestore(&priv->wait_lock, flags);
97
98 return 0;
99}
100
101/**
102 * omap_irq_enable_vblank - enable vblank interrupt events
103 * @crtc: DRM CRTC
104 *
105 * Enable vblank interrupts for @crtc. If the device doesn't have
106 * a hardware vblank counter, this routine should be a no-op, since
107 * interrupts will have to stay on to keep the count accurate.
108 *
109 * RETURNS
110 * Zero on success, appropriate errno if the given @crtc's vblank
111 * interrupt cannot be enabled.
112 */
113int omap_irq_enable_vblank(struct drm_crtc *crtc)
114{
115 struct drm_device *dev = crtc->dev;
116 struct omap_drm_private *priv = dev->dev_private;
117 unsigned long flags;
118 enum omap_channel channel = omap_crtc_channel(crtc);
119
120 DBG("dev=%p, crtc=%u", dev, channel);
121
122 spin_lock_irqsave(&priv->wait_lock, flags);
123 priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc,
124 channel);
125 omap_irq_update(dev);
126 spin_unlock_irqrestore(&priv->wait_lock, flags);
127
128 return 0;
129}
130
131/**
132 * omap_irq_disable_vblank - disable vblank interrupt events
133 * @crtc: DRM CRTC
134 *
135 * Disable vblank interrupts for @crtc. If the device doesn't have
136 * a hardware vblank counter, this routine should be a no-op, since
137 * interrupts will have to stay on to keep the count accurate.
138 */
139void omap_irq_disable_vblank(struct drm_crtc *crtc)
140{
141 struct drm_device *dev = crtc->dev;
142 struct omap_drm_private *priv = dev->dev_private;
143 unsigned long flags;
144 enum omap_channel channel = omap_crtc_channel(crtc);
145
146 DBG("dev=%p, crtc=%u", dev, channel);
147
148 spin_lock_irqsave(&priv->wait_lock, flags);
149 priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc,
150 channel);
151 omap_irq_update(dev);
152 spin_unlock_irqrestore(&priv->wait_lock, flags);
153}
154
155static void omap_irq_fifo_underflow(struct omap_drm_private *priv,
156 u32 irqstatus)
157{
158 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
159 DEFAULT_RATELIMIT_BURST);
160 static const struct {
161 const char *name;
162 u32 mask;
163 } sources[] = {
164 { "gfx", DISPC_IRQ_GFX_FIFO_UNDERFLOW },
165 { "vid1", DISPC_IRQ_VID1_FIFO_UNDERFLOW },
166 { "vid2", DISPC_IRQ_VID2_FIFO_UNDERFLOW },
167 { "vid3", DISPC_IRQ_VID3_FIFO_UNDERFLOW },
168 };
169
170 const u32 mask = DISPC_IRQ_GFX_FIFO_UNDERFLOW
171 | DISPC_IRQ_VID1_FIFO_UNDERFLOW
172 | DISPC_IRQ_VID2_FIFO_UNDERFLOW
173 | DISPC_IRQ_VID3_FIFO_UNDERFLOW;
174 unsigned int i;
175
176 spin_lock(&priv->wait_lock);
177 irqstatus &= priv->irq_mask & mask;
178 spin_unlock(&priv->wait_lock);
179
180 if (!irqstatus)
181 return;
182
183 if (!__ratelimit(&_rs))
184 return;
185
186 DRM_ERROR("FIFO underflow on ");
187
188 for (i = 0; i < ARRAY_SIZE(sources); ++i) {
189 if (sources[i].mask & irqstatus)
190 pr_cont("%s ", sources[i].name);
191 }
192
193 pr_cont("(0x%08x)\n", irqstatus);
194}
195
196static void omap_irq_ocp_error_handler(struct drm_device *dev,
197 u32 irqstatus)
198{
199 if (!(irqstatus & DISPC_IRQ_OCP_ERR))
200 return;
201
202 dev_err_ratelimited(dev->dev, "OCP error\n");
203}
204
205static irqreturn_t omap_irq_handler(int irq, void *arg)
206{
207 struct drm_device *dev = (struct drm_device *) arg;
208 struct omap_drm_private *priv = dev->dev_private;
209 struct omap_irq_wait *wait, *n;
210 unsigned long flags;
211 unsigned int id;
212 u32 irqstatus;
213
214 irqstatus = dispc_read_irqstatus(priv->dispc);
215 dispc_clear_irqstatus(priv->dispc, irqstatus);
216 dispc_read_irqstatus(priv->dispc); /* flush posted write */
217
218 VERB("irqs: %08x", irqstatus);
219
220 for (id = 0; id < priv->num_pipes; id++) {
221 struct drm_crtc *crtc = priv->pipes[id].crtc;
222 enum omap_channel channel = omap_crtc_channel(crtc);
223
224 if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) {
225 drm_handle_vblank(dev, id);
226 omap_crtc_vblank_irq(crtc);
227 }
228
229 if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel))
230 omap_crtc_error_irq(crtc, irqstatus);
231
232 if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel))
233 omap_crtc_framedone_irq(crtc, irqstatus);
234 }
235
236 omap_irq_ocp_error_handler(dev, irqstatus);
237 omap_irq_fifo_underflow(priv, irqstatus);
238
239 spin_lock_irqsave(&priv->wait_lock, flags);
240 list_for_each_entry_safe(wait, n, &priv->wait_list, node) {
241 if (wait->irqmask & irqstatus)
242 omap_irq_wait_handler(wait);
243 }
244 spin_unlock_irqrestore(&priv->wait_lock, flags);
245
246 return IRQ_HANDLED;
247}
248
249static const u32 omap_underflow_irqs[] = {
250 [OMAP_DSS_GFX] = DISPC_IRQ_GFX_FIFO_UNDERFLOW,
251 [OMAP_DSS_VIDEO1] = DISPC_IRQ_VID1_FIFO_UNDERFLOW,
252 [OMAP_DSS_VIDEO2] = DISPC_IRQ_VID2_FIFO_UNDERFLOW,
253 [OMAP_DSS_VIDEO3] = DISPC_IRQ_VID3_FIFO_UNDERFLOW,
254};
255
256int omap_drm_irq_install(struct drm_device *dev)
257{
258 struct omap_drm_private *priv = dev->dev_private;
259 unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc);
260 unsigned int max_planes;
261 unsigned int i;
262 int ret;
263
264 spin_lock_init(&priv->wait_lock);
265 INIT_LIST_HEAD(&priv->wait_list);
266
267 priv->irq_mask = DISPC_IRQ_OCP_ERR;
268
269 max_planes = min(ARRAY_SIZE(priv->planes),
270 ARRAY_SIZE(omap_underflow_irqs));
271 for (i = 0; i < max_planes; ++i) {
272 if (priv->planes[i])
273 priv->irq_mask |= omap_underflow_irqs[i];
274 }
275
276 for (i = 0; i < num_mgrs; ++i)
277 priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i);
278
279 dispc_runtime_get(priv->dispc);
280 dispc_clear_irqstatus(priv->dispc, 0xffffffff);
281 dispc_runtime_put(priv->dispc);
282
283 ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev);
284 if (ret < 0)
285 return ret;
286
287 priv->irq_enabled = true;
288
289 return 0;
290}
291
292void omap_drm_irq_uninstall(struct drm_device *dev)
293{
294 struct omap_drm_private *priv = dev->dev_private;
295
296 if (!priv->irq_enabled)
297 return;
298
299 priv->irq_enabled = false;
300
301 dispc_free_irq(priv->dispc, dev);
302}