Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Interrupt Management
4 *
5 * Copyright (c) 2010-2021, NVIDIA Corporation.
6 */
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include "dev.h"
11#include "fence.h"
12#include "intr.h"
13
14static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
15 struct host1x_syncpt_fence *fence)
16{
17 struct host1x_syncpt_fence *fence_in_list;
18
19 list_for_each_entry_reverse(fence_in_list, &list->list, list) {
20 if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
21 /* Fence in list is before us, we can insert here */
22 list_add(&fence->list, &fence_in_list->list);
23 return;
24 }
25 }
26
27 /* Add as first in list */
28 list_add(&fence->list, &list->list);
29}
30
31static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
32{
33 struct host1x_syncpt_fence *fence;
34
35 if (!list_empty(&sp->fences.list)) {
36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);
37
38 host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
39 host1x_hw_intr_enable_syncpt_intr(host, sp->id);
40 } else {
41 host1x_hw_intr_disable_syncpt_intr(host, sp->id);
42 }
43}
44
45void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
46{
47 struct host1x_fence_list *fence_list = &fence->sp->fences;
48
49 INIT_LIST_HEAD(&fence->list);
50
51 host1x_intr_add_fence_to_list(fence_list, fence);
52 host1x_intr_update_hw_state(host, fence->sp);
53}
54
55bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
56{
57 struct host1x_fence_list *fence_list = &fence->sp->fences;
58 unsigned long irqflags;
59
60 spin_lock_irqsave(&fence_list->lock, irqflags);
61
62 if (list_empty(&fence->list)) {
63 spin_unlock_irqrestore(&fence_list->lock, irqflags);
64 return false;
65 }
66
67 list_del_init(&fence->list);
68 host1x_intr_update_hw_state(host, fence->sp);
69
70 spin_unlock_irqrestore(&fence_list->lock, irqflags);
71
72 return true;
73}
74
75void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
76{
77 struct host1x_syncpt *sp = &host->syncpt[id];
78 struct host1x_syncpt_fence *fence, *tmp;
79 unsigned int value;
80
81 value = host1x_syncpt_load(sp);
82
83 spin_lock(&sp->fences.lock);
84
85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
86 if (((value - fence->threshold) & 0x80000000U) != 0U) {
87 /* Fence is not yet expired, we are done */
88 break;
89 }
90
91 list_del_init(&fence->list);
92 host1x_fence_signal(fence);
93 }
94
95 /* Re-enable interrupt if necessary */
96 host1x_intr_update_hw_state(host, sp);
97
98 spin_unlock(&sp->fences.lock);
99}
100
101int host1x_intr_init(struct host1x *host)
102{
103 struct host1x_intr_irq_data *irq_data;
104 unsigned int id;
105 int i, err;
106
107 for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
108 struct host1x_syncpt *syncpt = &host->syncpt[id];
109
110 spin_lock_init(&syncpt->fences.lock);
111 INIT_LIST_HEAD(&syncpt->fences.list);
112 }
113
114 irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
115 if (!irq_data)
116 return -ENOMEM;
117
118 host1x_hw_intr_disable_all_syncpt_intrs(host);
119
120 for (i = 0; i < host->num_syncpt_irqs; i++) {
121 irq_data[i].host = host;
122 irq_data[i].offset = i;
123
124 err = devm_request_irq(host->dev, host->syncpt_irqs[i],
125 host->intr_op->isr, IRQF_SHARED,
126 "host1x_syncpt", &irq_data[i]);
127 if (err < 0)
128 return err;
129 }
130
131 return 0;
132}
133
134void host1x_intr_deinit(struct host1x *host)
135{
136}
137
138void host1x_intr_start(struct host1x *host)
139{
140 u32 hz = clk_get_rate(host->clk);
141 int err;
142
143 mutex_lock(&host->intr_mutex);
144 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
145 if (err) {
146 mutex_unlock(&host->intr_mutex);
147 return;
148 }
149 mutex_unlock(&host->intr_mutex);
150}
151
152void host1x_intr_stop(struct host1x *host)
153{
154 host1x_hw_intr_disable_all_syncpt_intrs(host);
155}
1/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include <trace/events/host1x.h>
25#include "channel.h"
26#include "dev.h"
27#include "intr.h"
28
29/* Wait list management */
30
31enum waitlist_state {
32 WLS_PENDING,
33 WLS_REMOVED,
34 WLS_CANCELLED,
35 WLS_HANDLED
36};
37
38static void waiter_release(struct kref *kref)
39{
40 kfree(container_of(kref, struct host1x_waitlist, refcount));
41}
42
43/*
44 * add a waiter to a waiter queue, sorted by threshold
45 * returns true if it was added at the head of the queue
46 */
47static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
48 struct list_head *queue)
49{
50 struct host1x_waitlist *pos;
51 u32 thresh = waiter->thresh;
52
53 list_for_each_entry_reverse(pos, queue, list)
54 if ((s32)(pos->thresh - thresh) <= 0) {
55 list_add(&waiter->list, &pos->list);
56 return false;
57 }
58
59 list_add(&waiter->list, queue);
60 return true;
61}
62
63/*
64 * run through a waiter queue for a single sync point ID
65 * and gather all completed waiters into lists by actions
66 */
67static void remove_completed_waiters(struct list_head *head, u32 sync,
68 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
69{
70 struct list_head *dest;
71 struct host1x_waitlist *waiter, *next, *prev;
72
73 list_for_each_entry_safe(waiter, next, head, list) {
74 if ((s32)(waiter->thresh - sync) > 0)
75 break;
76
77 dest = completed + waiter->action;
78
79 /* consolidate submit cleanups */
80 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
81 !list_empty(dest)) {
82 prev = list_entry(dest->prev,
83 struct host1x_waitlist, list);
84 if (prev->data == waiter->data) {
85 prev->count++;
86 dest = NULL;
87 }
88 }
89
90 /* PENDING->REMOVED or CANCELLED->HANDLED */
91 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
92 list_del(&waiter->list);
93 kref_put(&waiter->refcount, waiter_release);
94 } else
95 list_move_tail(&waiter->list, dest);
96 }
97}
98
99static void reset_threshold_interrupt(struct host1x *host,
100 struct list_head *head,
101 unsigned int id)
102{
103 u32 thresh =
104 list_first_entry(head, struct host1x_waitlist, list)->thresh;
105
106 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
107 host1x_hw_intr_enable_syncpt_intr(host, id);
108}
109
110static void action_submit_complete(struct host1x_waitlist *waiter)
111{
112 struct host1x_channel *channel = waiter->data;
113
114 host1x_cdma_update(&channel->cdma);
115
116 /* Add nr_completed to trace */
117 trace_host1x_channel_submit_complete(dev_name(channel->dev),
118 waiter->count, waiter->thresh);
119
120}
121
122static void action_wakeup(struct host1x_waitlist *waiter)
123{
124 wait_queue_head_t *wq = waiter->data;
125
126 wake_up(wq);
127}
128
129static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
130{
131 wait_queue_head_t *wq = waiter->data;
132
133 wake_up_interruptible(wq);
134}
135
136typedef void (*action_handler)(struct host1x_waitlist *waiter);
137
138static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
139 action_submit_complete,
140 action_wakeup,
141 action_wakeup_interruptible,
142};
143
144static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
145{
146 struct list_head *head = completed;
147 int i;
148
149 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
150 action_handler handler = action_handlers[i];
151 struct host1x_waitlist *waiter, *next;
152
153 list_for_each_entry_safe(waiter, next, head, list) {
154 list_del(&waiter->list);
155 handler(waiter);
156 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
157 WLS_REMOVED);
158 kref_put(&waiter->refcount, waiter_release);
159 }
160 }
161}
162
163/*
164 * Remove & handle all waiters that have completed for the given syncpt
165 */
166static int process_wait_list(struct host1x *host,
167 struct host1x_syncpt *syncpt,
168 u32 threshold)
169{
170 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
171 unsigned int i;
172 int empty;
173
174 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
175 INIT_LIST_HEAD(completed + i);
176
177 spin_lock(&syncpt->intr.lock);
178
179 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
180 completed);
181
182 empty = list_empty(&syncpt->intr.wait_head);
183 if (empty)
184 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
185 else
186 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
187 syncpt->id);
188
189 spin_unlock(&syncpt->intr.lock);
190
191 run_handlers(completed);
192
193 return empty;
194}
195
196/*
197 * Sync point threshold interrupt service thread function
198 * Handles sync point threshold triggers, in thread context
199 */
200
201static void syncpt_thresh_work(struct work_struct *work)
202{
203 struct host1x_syncpt_intr *syncpt_intr =
204 container_of(work, struct host1x_syncpt_intr, work);
205 struct host1x_syncpt *syncpt =
206 container_of(syncpt_intr, struct host1x_syncpt, intr);
207 unsigned int id = syncpt->id;
208 struct host1x *host = syncpt->host;
209
210 (void)process_wait_list(host, syncpt,
211 host1x_syncpt_load(host->syncpt + id));
212}
213
214int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
215 enum host1x_intr_action action, void *data,
216 struct host1x_waitlist *waiter, void **ref)
217{
218 struct host1x_syncpt *syncpt;
219 int queue_was_empty;
220
221 if (waiter == NULL) {
222 pr_warn("%s: NULL waiter\n", __func__);
223 return -EINVAL;
224 }
225
226 /* initialize a new waiter */
227 INIT_LIST_HEAD(&waiter->list);
228 kref_init(&waiter->refcount);
229 if (ref)
230 kref_get(&waiter->refcount);
231 waiter->thresh = thresh;
232 waiter->action = action;
233 atomic_set(&waiter->state, WLS_PENDING);
234 waiter->data = data;
235 waiter->count = 1;
236
237 syncpt = host->syncpt + id;
238
239 spin_lock(&syncpt->intr.lock);
240
241 queue_was_empty = list_empty(&syncpt->intr.wait_head);
242
243 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
244 /* added at head of list - new threshold value */
245 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
246
247 /* added as first waiter - enable interrupt */
248 if (queue_was_empty)
249 host1x_hw_intr_enable_syncpt_intr(host, id);
250 }
251
252 spin_unlock(&syncpt->intr.lock);
253
254 if (ref)
255 *ref = waiter;
256 return 0;
257}
258
259void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
260{
261 struct host1x_waitlist *waiter = ref;
262 struct host1x_syncpt *syncpt;
263
264 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
265 WLS_REMOVED)
266 schedule();
267
268 syncpt = host->syncpt + id;
269 (void)process_wait_list(host, syncpt,
270 host1x_syncpt_load(host->syncpt + id));
271
272 kref_put(&waiter->refcount, waiter_release);
273}
274
275int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
276{
277 unsigned int id;
278 u32 nb_pts = host1x_syncpt_nb_pts(host);
279
280 mutex_init(&host->intr_mutex);
281 host->intr_syncpt_irq = irq_sync;
282
283 for (id = 0; id < nb_pts; ++id) {
284 struct host1x_syncpt *syncpt = host->syncpt + id;
285
286 spin_lock_init(&syncpt->intr.lock);
287 INIT_LIST_HEAD(&syncpt->intr.wait_head);
288 snprintf(syncpt->intr.thresh_irq_name,
289 sizeof(syncpt->intr.thresh_irq_name),
290 "host1x_sp_%02u", id);
291 }
292
293 host1x_intr_start(host);
294
295 return 0;
296}
297
298void host1x_intr_deinit(struct host1x *host)
299{
300 host1x_intr_stop(host);
301}
302
303void host1x_intr_start(struct host1x *host)
304{
305 u32 hz = clk_get_rate(host->clk);
306 int err;
307
308 mutex_lock(&host->intr_mutex);
309 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
310 syncpt_thresh_work);
311 if (err) {
312 mutex_unlock(&host->intr_mutex);
313 return;
314 }
315 mutex_unlock(&host->intr_mutex);
316}
317
318void host1x_intr_stop(struct host1x *host)
319{
320 unsigned int id;
321 struct host1x_syncpt *syncpt = host->syncpt;
322 u32 nb_pts = host1x_syncpt_nb_pts(host);
323
324 mutex_lock(&host->intr_mutex);
325
326 host1x_hw_intr_disable_all_syncpt_intrs(host);
327
328 for (id = 0; id < nb_pts; ++id) {
329 struct host1x_waitlist *waiter, *next;
330
331 list_for_each_entry_safe(waiter, next,
332 &syncpt[id].intr.wait_head, list) {
333 if (atomic_cmpxchg(&waiter->state,
334 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
335 list_del(&waiter->list);
336 kref_put(&waiter->refcount, waiter_release);
337 }
338 }
339
340 if (!list_empty(&syncpt[id].intr.wait_head)) {
341 /* output diagnostics */
342 mutex_unlock(&host->intr_mutex);
343 pr_warn("%s cannot stop syncpt intr id=%u\n",
344 __func__, id);
345 return;
346 }
347 }
348
349 host1x_hw_intr_free_syncpt_irq(host);
350
351 mutex_unlock(&host->intr_mutex);
352}