Loading...
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include <trace/events/host1x.h>
25#include "channel.h"
26#include "dev.h"
27#include "intr.h"
28
29/* Wait list management */
30
31enum waitlist_state {
32 WLS_PENDING,
33 WLS_REMOVED,
34 WLS_CANCELLED,
35 WLS_HANDLED
36};
37
38static void waiter_release(struct kref *kref)
39{
40 kfree(container_of(kref, struct host1x_waitlist, refcount));
41}
42
43/*
44 * add a waiter to a waiter queue, sorted by threshold
45 * returns true if it was added at the head of the queue
46 */
47static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 struct list_head *queue)
49{
50 struct host1x_waitlist *pos;
51 u32 thresh = waiter->thresh;
52
53 list_for_each_entry_reverse(pos, queue, list)
54 if ((s32)(pos->thresh - thresh) <= 0) {
55 list_add(&waiter->list, &pos->list);
56 return false;
57 }
58
59 list_add(&waiter->list, queue);
60 return true;
61}
62
63/*
64 * run through a waiter queue for a single sync point ID
65 * and gather all completed waiters into lists by actions
66 */
67static void remove_completed_waiters(struct list_head *head, u32 sync,
68 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69{
70 struct list_head *dest;
71 struct host1x_waitlist *waiter, *next, *prev;
72
73 list_for_each_entry_safe(waiter, next, head, list) {
74 if ((s32)(waiter->thresh - sync) > 0)
75 break;
76
77 dest = completed + waiter->action;
78
79 /* consolidate submit cleanups */
80 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 !list_empty(dest)) {
82 prev = list_entry(dest->prev,
83 struct host1x_waitlist, list);
84 if (prev->data == waiter->data) {
85 prev->count++;
86 dest = NULL;
87 }
88 }
89
90 /* PENDING->REMOVED or CANCELLED->HANDLED */
91 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 list_del(&waiter->list);
93 kref_put(&waiter->refcount, waiter_release);
94 } else
95 list_move_tail(&waiter->list, dest);
96 }
97}
98
99static void reset_threshold_interrupt(struct host1x *host,
100 struct list_head *head,
101 unsigned int id)
102{
103 u32 thresh =
104 list_first_entry(head, struct host1x_waitlist, list)->thresh;
105
106 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 host1x_hw_intr_enable_syncpt_intr(host, id);
108}
109
110static void action_submit_complete(struct host1x_waitlist *waiter)
111{
112 struct host1x_channel *channel = waiter->data;
113
114 host1x_cdma_update(&channel->cdma);
115
116 /* Add nr_completed to trace */
117 trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 waiter->count, waiter->thresh);
119
120}
121
122static void action_wakeup(struct host1x_waitlist *waiter)
123{
124 wait_queue_head_t *wq = waiter->data;
125 wake_up(wq);
126}
127
128static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
129{
130 wait_queue_head_t *wq = waiter->data;
131 wake_up_interruptible(wq);
132}
133
134typedef void (*action_handler)(struct host1x_waitlist *waiter);
135
136static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
137 action_submit_complete,
138 action_wakeup,
139 action_wakeup_interruptible,
140};
141
142static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
143{
144 struct list_head *head = completed;
145 int i;
146
147 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
148 action_handler handler = action_handlers[i];
149 struct host1x_waitlist *waiter, *next;
150
151 list_for_each_entry_safe(waiter, next, head, list) {
152 list_del(&waiter->list);
153 handler(waiter);
154 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
155 WLS_REMOVED);
156 kref_put(&waiter->refcount, waiter_release);
157 }
158 }
159}
160
161/*
162 * Remove & handle all waiters that have completed for the given syncpt
163 */
164static int process_wait_list(struct host1x *host,
165 struct host1x_syncpt *syncpt,
166 u32 threshold)
167{
168 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
169 unsigned int i;
170 int empty;
171
172 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
173 INIT_LIST_HEAD(completed + i);
174
175 spin_lock(&syncpt->intr.lock);
176
177 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
178 completed);
179
180 empty = list_empty(&syncpt->intr.wait_head);
181 if (empty)
182 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
183 else
184 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
185 syncpt->id);
186
187 spin_unlock(&syncpt->intr.lock);
188
189 run_handlers(completed);
190
191 return empty;
192}
193
194/*
195 * Sync point threshold interrupt service thread function
196 * Handles sync point threshold triggers, in thread context
197 */
198
199static void syncpt_thresh_work(struct work_struct *work)
200{
201 struct host1x_syncpt_intr *syncpt_intr =
202 container_of(work, struct host1x_syncpt_intr, work);
203 struct host1x_syncpt *syncpt =
204 container_of(syncpt_intr, struct host1x_syncpt, intr);
205 unsigned int id = syncpt->id;
206 struct host1x *host = syncpt->host;
207
208 (void)process_wait_list(host, syncpt,
209 host1x_syncpt_load(host->syncpt + id));
210}
211
212int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
213 enum host1x_intr_action action, void *data,
214 struct host1x_waitlist *waiter, void **ref)
215{
216 struct host1x_syncpt *syncpt;
217 int queue_was_empty;
218
219 if (waiter == NULL) {
220 pr_warn("%s: NULL waiter\n", __func__);
221 return -EINVAL;
222 }
223
224 /* initialize a new waiter */
225 INIT_LIST_HEAD(&waiter->list);
226 kref_init(&waiter->refcount);
227 if (ref)
228 kref_get(&waiter->refcount);
229 waiter->thresh = thresh;
230 waiter->action = action;
231 atomic_set(&waiter->state, WLS_PENDING);
232 waiter->data = data;
233 waiter->count = 1;
234
235 syncpt = host->syncpt + id;
236
237 spin_lock(&syncpt->intr.lock);
238
239 queue_was_empty = list_empty(&syncpt->intr.wait_head);
240
241 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
242 /* added at head of list - new threshold value */
243 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
244
245 /* added as first waiter - enable interrupt */
246 if (queue_was_empty)
247 host1x_hw_intr_enable_syncpt_intr(host, id);
248 }
249
250 spin_unlock(&syncpt->intr.lock);
251
252 if (ref)
253 *ref = waiter;
254 return 0;
255}
256
257void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
258{
259 struct host1x_waitlist *waiter = ref;
260 struct host1x_syncpt *syncpt;
261
262 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
263 WLS_REMOVED)
264 schedule();
265
266 syncpt = host->syncpt + id;
267 (void)process_wait_list(host, syncpt,
268 host1x_syncpt_load(host->syncpt + id));
269
270 kref_put(&waiter->refcount, waiter_release);
271}
272
273int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
274{
275 unsigned int id;
276 u32 nb_pts = host1x_syncpt_nb_pts(host);
277
278 mutex_init(&host->intr_mutex);
279 host->intr_syncpt_irq = irq_sync;
280 host->intr_wq = create_workqueue("host_syncpt");
281 if (!host->intr_wq)
282 return -ENOMEM;
283
284 for (id = 0; id < nb_pts; ++id) {
285 struct host1x_syncpt *syncpt = host->syncpt + id;
286
287 spin_lock_init(&syncpt->intr.lock);
288 INIT_LIST_HEAD(&syncpt->intr.wait_head);
289 snprintf(syncpt->intr.thresh_irq_name,
290 sizeof(syncpt->intr.thresh_irq_name),
291 "host1x_sp_%02d", id);
292 }
293
294 host1x_intr_start(host);
295
296 return 0;
297}
298
299void host1x_intr_deinit(struct host1x *host)
300{
301 host1x_intr_stop(host);
302 destroy_workqueue(host->intr_wq);
303}
304
305void host1x_intr_start(struct host1x *host)
306{
307 u32 hz = clk_get_rate(host->clk);
308 int err;
309
310 mutex_lock(&host->intr_mutex);
311 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
312 syncpt_thresh_work);
313 if (err) {
314 mutex_unlock(&host->intr_mutex);
315 return;
316 }
317 mutex_unlock(&host->intr_mutex);
318}
319
320void host1x_intr_stop(struct host1x *host)
321{
322 unsigned int id;
323 struct host1x_syncpt *syncpt = host->syncpt;
324 u32 nb_pts = host1x_syncpt_nb_pts(host);
325
326 mutex_lock(&host->intr_mutex);
327
328 host1x_hw_intr_disable_all_syncpt_intrs(host);
329
330 for (id = 0; id < nb_pts; ++id) {
331 struct host1x_waitlist *waiter, *next;
332
333 list_for_each_entry_safe(waiter, next,
334 &syncpt[id].intr.wait_head, list) {
335 if (atomic_cmpxchg(&waiter->state,
336 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
337 list_del(&waiter->list);
338 kref_put(&waiter->refcount, waiter_release);
339 }
340 }
341
342 if (!list_empty(&syncpt[id].intr.wait_head)) {
343 /* output diagnostics */
344 mutex_unlock(&host->intr_mutex);
345 pr_warn("%s cannot stop syncpt intr id=%d\n",
346 __func__, id);
347 return;
348 }
349 }
350
351 host1x_hw_intr_free_syncpt_irq(host);
352
353 mutex_unlock(&host->intr_mutex);
354}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Interrupt Management
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include <linux/slab.h>
11#include <linux/irq.h>
12
13#include <trace/events/host1x.h>
14#include "channel.h"
15#include "dev.h"
16#include "intr.h"
17
18/* Wait list management */
19
20enum waitlist_state {
21 WLS_PENDING,
22 WLS_REMOVED,
23 WLS_CANCELLED,
24 WLS_HANDLED
25};
26
27static void waiter_release(struct kref *kref)
28{
29 kfree(container_of(kref, struct host1x_waitlist, refcount));
30}
31
32/*
33 * add a waiter to a waiter queue, sorted by threshold
34 * returns true if it was added at the head of the queue
35 */
36static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
37 struct list_head *queue)
38{
39 struct host1x_waitlist *pos;
40 u32 thresh = waiter->thresh;
41
42 list_for_each_entry_reverse(pos, queue, list)
43 if ((s32)(pos->thresh - thresh) <= 0) {
44 list_add(&waiter->list, &pos->list);
45 return false;
46 }
47
48 list_add(&waiter->list, queue);
49 return true;
50}
51
52/*
53 * run through a waiter queue for a single sync point ID
54 * and gather all completed waiters into lists by actions
55 */
56static void remove_completed_waiters(struct list_head *head, u32 sync,
57 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
58{
59 struct list_head *dest;
60 struct host1x_waitlist *waiter, *next, *prev;
61
62 list_for_each_entry_safe(waiter, next, head, list) {
63 if ((s32)(waiter->thresh - sync) > 0)
64 break;
65
66 dest = completed + waiter->action;
67
68 /* consolidate submit cleanups */
69 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
70 !list_empty(dest)) {
71 prev = list_entry(dest->prev,
72 struct host1x_waitlist, list);
73 if (prev->data == waiter->data) {
74 prev->count++;
75 dest = NULL;
76 }
77 }
78
79 /* PENDING->REMOVED or CANCELLED->HANDLED */
80 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
81 list_del(&waiter->list);
82 kref_put(&waiter->refcount, waiter_release);
83 } else
84 list_move_tail(&waiter->list, dest);
85 }
86}
87
88static void reset_threshold_interrupt(struct host1x *host,
89 struct list_head *head,
90 unsigned int id)
91{
92 u32 thresh =
93 list_first_entry(head, struct host1x_waitlist, list)->thresh;
94
95 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
96 host1x_hw_intr_enable_syncpt_intr(host, id);
97}
98
99static void action_submit_complete(struct host1x_waitlist *waiter)
100{
101 struct host1x_channel *channel = waiter->data;
102
103 host1x_cdma_update(&channel->cdma);
104
105 /* Add nr_completed to trace */
106 trace_host1x_channel_submit_complete(dev_name(channel->dev),
107 waiter->count, waiter->thresh);
108
109}
110
111static void action_wakeup(struct host1x_waitlist *waiter)
112{
113 wait_queue_head_t *wq = waiter->data;
114
115 wake_up(wq);
116}
117
118static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
119{
120 wait_queue_head_t *wq = waiter->data;
121
122 wake_up_interruptible(wq);
123}
124
125typedef void (*action_handler)(struct host1x_waitlist *waiter);
126
127static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
128 action_submit_complete,
129 action_wakeup,
130 action_wakeup_interruptible,
131};
132
133static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
134{
135 struct list_head *head = completed;
136 unsigned int i;
137
138 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
139 action_handler handler = action_handlers[i];
140 struct host1x_waitlist *waiter, *next;
141
142 list_for_each_entry_safe(waiter, next, head, list) {
143 list_del(&waiter->list);
144 handler(waiter);
145 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
146 WLS_REMOVED);
147 kref_put(&waiter->refcount, waiter_release);
148 }
149 }
150}
151
152/*
153 * Remove & handle all waiters that have completed for the given syncpt
154 */
155static int process_wait_list(struct host1x *host,
156 struct host1x_syncpt *syncpt,
157 u32 threshold)
158{
159 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
160 unsigned int i;
161 int empty;
162
163 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
164 INIT_LIST_HEAD(completed + i);
165
166 spin_lock(&syncpt->intr.lock);
167
168 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
169 completed);
170
171 empty = list_empty(&syncpt->intr.wait_head);
172 if (empty)
173 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
174 else
175 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
176 syncpt->id);
177
178 spin_unlock(&syncpt->intr.lock);
179
180 run_handlers(completed);
181
182 return empty;
183}
184
185/*
186 * Sync point threshold interrupt service thread function
187 * Handles sync point threshold triggers, in thread context
188 */
189
190static void syncpt_thresh_work(struct work_struct *work)
191{
192 struct host1x_syncpt_intr *syncpt_intr =
193 container_of(work, struct host1x_syncpt_intr, work);
194 struct host1x_syncpt *syncpt =
195 container_of(syncpt_intr, struct host1x_syncpt, intr);
196 unsigned int id = syncpt->id;
197 struct host1x *host = syncpt->host;
198
199 (void)process_wait_list(host, syncpt,
200 host1x_syncpt_load(host->syncpt + id));
201}
202
203int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
204 u32 thresh, enum host1x_intr_action action,
205 void *data, struct host1x_waitlist *waiter,
206 void **ref)
207{
208 int queue_was_empty;
209
210 if (waiter == NULL) {
211 pr_warn("%s: NULL waiter\n", __func__);
212 return -EINVAL;
213 }
214
215 /* initialize a new waiter */
216 INIT_LIST_HEAD(&waiter->list);
217 kref_init(&waiter->refcount);
218 if (ref)
219 kref_get(&waiter->refcount);
220 waiter->thresh = thresh;
221 waiter->action = action;
222 atomic_set(&waiter->state, WLS_PENDING);
223 waiter->data = data;
224 waiter->count = 1;
225
226 spin_lock(&syncpt->intr.lock);
227
228 queue_was_empty = list_empty(&syncpt->intr.wait_head);
229
230 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
231 /* added at head of list - new threshold value */
232 host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
233
234 /* added as first waiter - enable interrupt */
235 if (queue_was_empty)
236 host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
237 }
238
239 spin_unlock(&syncpt->intr.lock);
240
241 if (ref)
242 *ref = waiter;
243 return 0;
244}
245
246void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
247{
248 struct host1x_waitlist *waiter = ref;
249 struct host1x_syncpt *syncpt;
250
251 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
252 WLS_REMOVED)
253 schedule();
254
255 syncpt = host->syncpt + id;
256 (void)process_wait_list(host, syncpt,
257 host1x_syncpt_load(host->syncpt + id));
258
259 kref_put(&waiter->refcount, waiter_release);
260}
261
262int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
263{
264 unsigned int id;
265 u32 nb_pts = host1x_syncpt_nb_pts(host);
266
267 mutex_init(&host->intr_mutex);
268 host->intr_syncpt_irq = irq_sync;
269
270 for (id = 0; id < nb_pts; ++id) {
271 struct host1x_syncpt *syncpt = host->syncpt + id;
272
273 spin_lock_init(&syncpt->intr.lock);
274 INIT_LIST_HEAD(&syncpt->intr.wait_head);
275 snprintf(syncpt->intr.thresh_irq_name,
276 sizeof(syncpt->intr.thresh_irq_name),
277 "host1x_sp_%02u", id);
278 }
279
280 host1x_intr_start(host);
281
282 return 0;
283}
284
285void host1x_intr_deinit(struct host1x *host)
286{
287 host1x_intr_stop(host);
288}
289
290void host1x_intr_start(struct host1x *host)
291{
292 u32 hz = clk_get_rate(host->clk);
293 int err;
294
295 mutex_lock(&host->intr_mutex);
296 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
297 syncpt_thresh_work);
298 if (err) {
299 mutex_unlock(&host->intr_mutex);
300 return;
301 }
302 mutex_unlock(&host->intr_mutex);
303}
304
305void host1x_intr_stop(struct host1x *host)
306{
307 unsigned int id;
308 struct host1x_syncpt *syncpt = host->syncpt;
309 u32 nb_pts = host1x_syncpt_nb_pts(host);
310
311 mutex_lock(&host->intr_mutex);
312
313 host1x_hw_intr_disable_all_syncpt_intrs(host);
314
315 for (id = 0; id < nb_pts; ++id) {
316 struct host1x_waitlist *waiter, *next;
317
318 list_for_each_entry_safe(waiter, next,
319 &syncpt[id].intr.wait_head, list) {
320 if (atomic_cmpxchg(&waiter->state,
321 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
322 list_del(&waiter->list);
323 kref_put(&waiter->refcount, waiter_release);
324 }
325 }
326
327 if (!list_empty(&syncpt[id].intr.wait_head)) {
328 /* output diagnostics */
329 mutex_unlock(&host->intr_mutex);
330 pr_warn("%s cannot stop syncpt intr id=%u\n",
331 __func__, id);
332 return;
333 }
334 }
335
336 host1x_hw_intr_free_syncpt_irq(host);
337
338 mutex_unlock(&host->intr_mutex);
339}