Loading...
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include <trace/events/host1x.h>
25#include "channel.h"
26#include "dev.h"
27#include "intr.h"
28
29/* Wait list management */
30
31enum waitlist_state {
32 WLS_PENDING,
33 WLS_REMOVED,
34 WLS_CANCELLED,
35 WLS_HANDLED
36};
37
38static void waiter_release(struct kref *kref)
39{
40 kfree(container_of(kref, struct host1x_waitlist, refcount));
41}
42
43/*
44 * add a waiter to a waiter queue, sorted by threshold
45 * returns true if it was added at the head of the queue
46 */
47static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 struct list_head *queue)
49{
50 struct host1x_waitlist *pos;
51 u32 thresh = waiter->thresh;
52
53 list_for_each_entry_reverse(pos, queue, list)
54 if ((s32)(pos->thresh - thresh) <= 0) {
55 list_add(&waiter->list, &pos->list);
56 return false;
57 }
58
59 list_add(&waiter->list, queue);
60 return true;
61}
62
63/*
64 * run through a waiter queue for a single sync point ID
65 * and gather all completed waiters into lists by actions
66 */
67static void remove_completed_waiters(struct list_head *head, u32 sync,
68 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69{
70 struct list_head *dest;
71 struct host1x_waitlist *waiter, *next, *prev;
72
73 list_for_each_entry_safe(waiter, next, head, list) {
74 if ((s32)(waiter->thresh - sync) > 0)
75 break;
76
77 dest = completed + waiter->action;
78
79 /* consolidate submit cleanups */
80 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 !list_empty(dest)) {
82 prev = list_entry(dest->prev,
83 struct host1x_waitlist, list);
84 if (prev->data == waiter->data) {
85 prev->count++;
86 dest = NULL;
87 }
88 }
89
90 /* PENDING->REMOVED or CANCELLED->HANDLED */
91 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 list_del(&waiter->list);
93 kref_put(&waiter->refcount, waiter_release);
94 } else
95 list_move_tail(&waiter->list, dest);
96 }
97}
98
99static void reset_threshold_interrupt(struct host1x *host,
100 struct list_head *head,
101 unsigned int id)
102{
103 u32 thresh =
104 list_first_entry(head, struct host1x_waitlist, list)->thresh;
105
106 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 host1x_hw_intr_enable_syncpt_intr(host, id);
108}
109
110static void action_submit_complete(struct host1x_waitlist *waiter)
111{
112 struct host1x_channel *channel = waiter->data;
113
114 host1x_cdma_update(&channel->cdma);
115
116 /* Add nr_completed to trace */
117 trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 waiter->count, waiter->thresh);
119
120}
121
122static void action_wakeup(struct host1x_waitlist *waiter)
123{
124 wait_queue_head_t *wq = waiter->data;
125 wake_up(wq);
126}
127
128static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
129{
130 wait_queue_head_t *wq = waiter->data;
131 wake_up_interruptible(wq);
132}
133
134typedef void (*action_handler)(struct host1x_waitlist *waiter);
135
136static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
137 action_submit_complete,
138 action_wakeup,
139 action_wakeup_interruptible,
140};
141
142static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
143{
144 struct list_head *head = completed;
145 int i;
146
147 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
148 action_handler handler = action_handlers[i];
149 struct host1x_waitlist *waiter, *next;
150
151 list_for_each_entry_safe(waiter, next, head, list) {
152 list_del(&waiter->list);
153 handler(waiter);
154 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
155 WLS_REMOVED);
156 kref_put(&waiter->refcount, waiter_release);
157 }
158 }
159}
160
161/*
162 * Remove & handle all waiters that have completed for the given syncpt
163 */
164static int process_wait_list(struct host1x *host,
165 struct host1x_syncpt *syncpt,
166 u32 threshold)
167{
168 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
169 unsigned int i;
170 int empty;
171
172 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
173 INIT_LIST_HEAD(completed + i);
174
175 spin_lock(&syncpt->intr.lock);
176
177 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
178 completed);
179
180 empty = list_empty(&syncpt->intr.wait_head);
181 if (empty)
182 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
183 else
184 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
185 syncpt->id);
186
187 spin_unlock(&syncpt->intr.lock);
188
189 run_handlers(completed);
190
191 return empty;
192}
193
194/*
195 * Sync point threshold interrupt service thread function
196 * Handles sync point threshold triggers, in thread context
197 */
198
199static void syncpt_thresh_work(struct work_struct *work)
200{
201 struct host1x_syncpt_intr *syncpt_intr =
202 container_of(work, struct host1x_syncpt_intr, work);
203 struct host1x_syncpt *syncpt =
204 container_of(syncpt_intr, struct host1x_syncpt, intr);
205 unsigned int id = syncpt->id;
206 struct host1x *host = syncpt->host;
207
208 (void)process_wait_list(host, syncpt,
209 host1x_syncpt_load(host->syncpt + id));
210}
211
212int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
213 enum host1x_intr_action action, void *data,
214 struct host1x_waitlist *waiter, void **ref)
215{
216 struct host1x_syncpt *syncpt;
217 int queue_was_empty;
218
219 if (waiter == NULL) {
220 pr_warn("%s: NULL waiter\n", __func__);
221 return -EINVAL;
222 }
223
224 /* initialize a new waiter */
225 INIT_LIST_HEAD(&waiter->list);
226 kref_init(&waiter->refcount);
227 if (ref)
228 kref_get(&waiter->refcount);
229 waiter->thresh = thresh;
230 waiter->action = action;
231 atomic_set(&waiter->state, WLS_PENDING);
232 waiter->data = data;
233 waiter->count = 1;
234
235 syncpt = host->syncpt + id;
236
237 spin_lock(&syncpt->intr.lock);
238
239 queue_was_empty = list_empty(&syncpt->intr.wait_head);
240
241 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
242 /* added at head of list - new threshold value */
243 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
244
245 /* added as first waiter - enable interrupt */
246 if (queue_was_empty)
247 host1x_hw_intr_enable_syncpt_intr(host, id);
248 }
249
250 spin_unlock(&syncpt->intr.lock);
251
252 if (ref)
253 *ref = waiter;
254 return 0;
255}
256
257void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
258{
259 struct host1x_waitlist *waiter = ref;
260 struct host1x_syncpt *syncpt;
261
262 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
263 WLS_REMOVED)
264 schedule();
265
266 syncpt = host->syncpt + id;
267 (void)process_wait_list(host, syncpt,
268 host1x_syncpt_load(host->syncpt + id));
269
270 kref_put(&waiter->refcount, waiter_release);
271}
272
273int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
274{
275 unsigned int id;
276 u32 nb_pts = host1x_syncpt_nb_pts(host);
277
278 mutex_init(&host->intr_mutex);
279 host->intr_syncpt_irq = irq_sync;
280 host->intr_wq = create_workqueue("host_syncpt");
281 if (!host->intr_wq)
282 return -ENOMEM;
283
284 for (id = 0; id < nb_pts; ++id) {
285 struct host1x_syncpt *syncpt = host->syncpt + id;
286
287 spin_lock_init(&syncpt->intr.lock);
288 INIT_LIST_HEAD(&syncpt->intr.wait_head);
289 snprintf(syncpt->intr.thresh_irq_name,
290 sizeof(syncpt->intr.thresh_irq_name),
291 "host1x_sp_%02d", id);
292 }
293
294 host1x_intr_start(host);
295
296 return 0;
297}
298
299void host1x_intr_deinit(struct host1x *host)
300{
301 host1x_intr_stop(host);
302 destroy_workqueue(host->intr_wq);
303}
304
305void host1x_intr_start(struct host1x *host)
306{
307 u32 hz = clk_get_rate(host->clk);
308 int err;
309
310 mutex_lock(&host->intr_mutex);
311 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
312 syncpt_thresh_work);
313 if (err) {
314 mutex_unlock(&host->intr_mutex);
315 return;
316 }
317 mutex_unlock(&host->intr_mutex);
318}
319
320void host1x_intr_stop(struct host1x *host)
321{
322 unsigned int id;
323 struct host1x_syncpt *syncpt = host->syncpt;
324 u32 nb_pts = host1x_syncpt_nb_pts(host);
325
326 mutex_lock(&host->intr_mutex);
327
328 host1x_hw_intr_disable_all_syncpt_intrs(host);
329
330 for (id = 0; id < nb_pts; ++id) {
331 struct host1x_waitlist *waiter, *next;
332
333 list_for_each_entry_safe(waiter, next,
334 &syncpt[id].intr.wait_head, list) {
335 if (atomic_cmpxchg(&waiter->state,
336 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
337 list_del(&waiter->list);
338 kref_put(&waiter->refcount, waiter_release);
339 }
340 }
341
342 if (!list_empty(&syncpt[id].intr.wait_head)) {
343 /* output diagnostics */
344 mutex_unlock(&host->intr_mutex);
345 pr_warn("%s cannot stop syncpt intr id=%d\n",
346 __func__, id);
347 return;
348 }
349 }
350
351 host1x_hw_intr_free_syncpt_irq(host);
352
353 mutex_unlock(&host->intr_mutex);
354}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Interrupt Management
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include <linux/slab.h>
11#include <linux/irq.h>
12
13#include <trace/events/host1x.h>
14#include "channel.h"
15#include "dev.h"
16#include "fence.h"
17#include "intr.h"
18
19/* Wait list management */
20
21enum waitlist_state {
22 WLS_PENDING,
23 WLS_REMOVED,
24 WLS_CANCELLED,
25 WLS_HANDLED
26};
27
28static void waiter_release(struct kref *kref)
29{
30 kfree(container_of(kref, struct host1x_waitlist, refcount));
31}
32
33/*
34 * add a waiter to a waiter queue, sorted by threshold
35 * returns true if it was added at the head of the queue
36 */
37static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
38 struct list_head *queue)
39{
40 struct host1x_waitlist *pos;
41 u32 thresh = waiter->thresh;
42
43 list_for_each_entry_reverse(pos, queue, list)
44 if ((s32)(pos->thresh - thresh) <= 0) {
45 list_add(&waiter->list, &pos->list);
46 return false;
47 }
48
49 list_add(&waiter->list, queue);
50 return true;
51}
52
53/*
54 * run through a waiter queue for a single sync point ID
55 * and gather all completed waiters into lists by actions
56 */
57static void remove_completed_waiters(struct list_head *head, u32 sync,
58 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
59{
60 struct list_head *dest;
61 struct host1x_waitlist *waiter, *next, *prev;
62
63 list_for_each_entry_safe(waiter, next, head, list) {
64 if ((s32)(waiter->thresh - sync) > 0)
65 break;
66
67 dest = completed + waiter->action;
68
69 /* consolidate submit cleanups */
70 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
71 !list_empty(dest)) {
72 prev = list_entry(dest->prev,
73 struct host1x_waitlist, list);
74 if (prev->data == waiter->data) {
75 prev->count++;
76 dest = NULL;
77 }
78 }
79
80 /* PENDING->REMOVED or CANCELLED->HANDLED */
81 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
82 list_del(&waiter->list);
83 kref_put(&waiter->refcount, waiter_release);
84 } else
85 list_move_tail(&waiter->list, dest);
86 }
87}
88
89static void reset_threshold_interrupt(struct host1x *host,
90 struct list_head *head,
91 unsigned int id)
92{
93 u32 thresh =
94 list_first_entry(head, struct host1x_waitlist, list)->thresh;
95
96 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
97 host1x_hw_intr_enable_syncpt_intr(host, id);
98}
99
100static void action_submit_complete(struct host1x_waitlist *waiter)
101{
102 struct host1x_channel *channel = waiter->data;
103
104 host1x_cdma_update(&channel->cdma);
105
106 /* Add nr_completed to trace */
107 trace_host1x_channel_submit_complete(dev_name(channel->dev),
108 waiter->count, waiter->thresh);
109}
110
111static void action_wakeup(struct host1x_waitlist *waiter)
112{
113 wait_queue_head_t *wq = waiter->data;
114
115 wake_up(wq);
116}
117
118static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
119{
120 wait_queue_head_t *wq = waiter->data;
121
122 wake_up_interruptible(wq);
123}
124
125static void action_signal_fence(struct host1x_waitlist *waiter)
126{
127 struct host1x_syncpt_fence *f = waiter->data;
128
129 host1x_fence_signal(f);
130}
131
132typedef void (*action_handler)(struct host1x_waitlist *waiter);
133
134static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
135 action_submit_complete,
136 action_wakeup,
137 action_wakeup_interruptible,
138 action_signal_fence,
139};
140
141static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
142{
143 struct list_head *head = completed;
144 unsigned int i;
145
146 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
147 action_handler handler = action_handlers[i];
148 struct host1x_waitlist *waiter, *next;
149
150 list_for_each_entry_safe(waiter, next, head, list) {
151 list_del(&waiter->list);
152 handler(waiter);
153 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
154 WLS_REMOVED);
155 kref_put(&waiter->refcount, waiter_release);
156 }
157 }
158}
159
160/*
161 * Remove & handle all waiters that have completed for the given syncpt
162 */
163static int process_wait_list(struct host1x *host,
164 struct host1x_syncpt *syncpt,
165 u32 threshold)
166{
167 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
168 unsigned int i;
169 int empty;
170
171 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
172 INIT_LIST_HEAD(completed + i);
173
174 spin_lock(&syncpt->intr.lock);
175
176 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
177 completed);
178
179 empty = list_empty(&syncpt->intr.wait_head);
180 if (empty)
181 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
182 else
183 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
184 syncpt->id);
185
186 spin_unlock(&syncpt->intr.lock);
187
188 run_handlers(completed);
189
190 return empty;
191}
192
193/*
194 * Sync point threshold interrupt service thread function
195 * Handles sync point threshold triggers, in thread context
196 */
197
198static void syncpt_thresh_work(struct work_struct *work)
199{
200 struct host1x_syncpt_intr *syncpt_intr =
201 container_of(work, struct host1x_syncpt_intr, work);
202 struct host1x_syncpt *syncpt =
203 container_of(syncpt_intr, struct host1x_syncpt, intr);
204 unsigned int id = syncpt->id;
205 struct host1x *host = syncpt->host;
206
207 (void)process_wait_list(host, syncpt,
208 host1x_syncpt_load(host->syncpt + id));
209}
210
211int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
212 u32 thresh, enum host1x_intr_action action,
213 void *data, struct host1x_waitlist *waiter,
214 void **ref)
215{
216 int queue_was_empty;
217
218 if (waiter == NULL) {
219 pr_warn("%s: NULL waiter\n", __func__);
220 return -EINVAL;
221 }
222
223 /* initialize a new waiter */
224 INIT_LIST_HEAD(&waiter->list);
225 kref_init(&waiter->refcount);
226 if (ref)
227 kref_get(&waiter->refcount);
228 waiter->thresh = thresh;
229 waiter->action = action;
230 atomic_set(&waiter->state, WLS_PENDING);
231 waiter->data = data;
232 waiter->count = 1;
233
234 spin_lock(&syncpt->intr.lock);
235
236 queue_was_empty = list_empty(&syncpt->intr.wait_head);
237
238 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
239 /* added at head of list - new threshold value */
240 host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
241
242 /* added as first waiter - enable interrupt */
243 if (queue_was_empty)
244 host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
245 }
246
247 if (ref)
248 *ref = waiter;
249
250 spin_unlock(&syncpt->intr.lock);
251
252 return 0;
253}
254
255void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
256 bool flush)
257{
258 struct host1x_waitlist *waiter = ref;
259 struct host1x_syncpt *syncpt;
260
261 atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
262
263 syncpt = host->syncpt + id;
264
265 spin_lock(&syncpt->intr.lock);
266 if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
267 WLS_CANCELLED) {
268 list_del(&waiter->list);
269 kref_put(&waiter->refcount, waiter_release);
270 }
271 spin_unlock(&syncpt->intr.lock);
272
273 if (flush) {
274 /* Wait until any concurrently executing handler has finished. */
275 while (atomic_read(&waiter->state) != WLS_HANDLED)
276 schedule();
277 }
278
279 kref_put(&waiter->refcount, waiter_release);
280}
281
282int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
283{
284 unsigned int id;
285 u32 nb_pts = host1x_syncpt_nb_pts(host);
286
287 mutex_init(&host->intr_mutex);
288 host->intr_syncpt_irq = irq_sync;
289
290 for (id = 0; id < nb_pts; ++id) {
291 struct host1x_syncpt *syncpt = host->syncpt + id;
292
293 spin_lock_init(&syncpt->intr.lock);
294 INIT_LIST_HEAD(&syncpt->intr.wait_head);
295 snprintf(syncpt->intr.thresh_irq_name,
296 sizeof(syncpt->intr.thresh_irq_name),
297 "host1x_sp_%02u", id);
298 }
299
300 return 0;
301}
302
303void host1x_intr_deinit(struct host1x *host)
304{
305}
306
307void host1x_intr_start(struct host1x *host)
308{
309 u32 hz = clk_get_rate(host->clk);
310 int err;
311
312 mutex_lock(&host->intr_mutex);
313 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
314 syncpt_thresh_work);
315 if (err) {
316 mutex_unlock(&host->intr_mutex);
317 return;
318 }
319 mutex_unlock(&host->intr_mutex);
320}
321
322void host1x_intr_stop(struct host1x *host)
323{
324 unsigned int id;
325 struct host1x_syncpt *syncpt = host->syncpt;
326 u32 nb_pts = host1x_syncpt_nb_pts(host);
327
328 mutex_lock(&host->intr_mutex);
329
330 host1x_hw_intr_disable_all_syncpt_intrs(host);
331
332 for (id = 0; id < nb_pts; ++id) {
333 struct host1x_waitlist *waiter, *next;
334
335 list_for_each_entry_safe(waiter, next,
336 &syncpt[id].intr.wait_head, list) {
337 if (atomic_cmpxchg(&waiter->state,
338 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
339 list_del(&waiter->list);
340 kref_put(&waiter->refcount, waiter_release);
341 }
342 }
343
344 if (!list_empty(&syncpt[id].intr.wait_head)) {
345 /* output diagnostics */
346 mutex_unlock(&host->intr_mutex);
347 pr_warn("%s cannot stop syncpt intr id=%u\n",
348 __func__, id);
349 return;
350 }
351 }
352
353 host1x_hw_intr_free_syncpt_irq(host);
354
355 mutex_unlock(&host->intr_mutex);
356}