Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Interrupt Management
4 *
5 * Copyright (c) 2010-2021, NVIDIA Corporation.
6 */
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include "dev.h"
11#include "fence.h"
12#include "intr.h"
13
14static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
15 struct host1x_syncpt_fence *fence)
16{
17 struct host1x_syncpt_fence *fence_in_list;
18
19 list_for_each_entry_reverse(fence_in_list, &list->list, list) {
20 if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
21 /* Fence in list is before us, we can insert here */
22 list_add(&fence->list, &fence_in_list->list);
23 return;
24 }
25 }
26
27 /* Add as first in list */
28 list_add(&fence->list, &list->list);
29}
30
31static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
32{
33 struct host1x_syncpt_fence *fence;
34
35 if (!list_empty(&sp->fences.list)) {
36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);
37
38 host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
39 host1x_hw_intr_enable_syncpt_intr(host, sp->id);
40 } else {
41 host1x_hw_intr_disable_syncpt_intr(host, sp->id);
42 }
43}
44
45void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
46{
47 struct host1x_fence_list *fence_list = &fence->sp->fences;
48
49 INIT_LIST_HEAD(&fence->list);
50
51 host1x_intr_add_fence_to_list(fence_list, fence);
52 host1x_intr_update_hw_state(host, fence->sp);
53}
54
55bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
56{
57 struct host1x_fence_list *fence_list = &fence->sp->fences;
58 unsigned long irqflags;
59
60 spin_lock_irqsave(&fence_list->lock, irqflags);
61
62 if (list_empty(&fence->list)) {
63 spin_unlock_irqrestore(&fence_list->lock, irqflags);
64 return false;
65 }
66
67 list_del_init(&fence->list);
68 host1x_intr_update_hw_state(host, fence->sp);
69
70 spin_unlock_irqrestore(&fence_list->lock, irqflags);
71
72 return true;
73}
74
75void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
76{
77 struct host1x_syncpt *sp = &host->syncpt[id];
78 struct host1x_syncpt_fence *fence, *tmp;
79 unsigned int value;
80
81 value = host1x_syncpt_load(sp);
82
83 spin_lock(&sp->fences.lock);
84
85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
86 if (((value - fence->threshold) & 0x80000000U) != 0U) {
87 /* Fence is not yet expired, we are done */
88 break;
89 }
90
91 list_del_init(&fence->list);
92 host1x_fence_signal(fence);
93 }
94
95 /* Re-enable interrupt if necessary */
96 host1x_intr_update_hw_state(host, sp);
97
98 spin_unlock(&sp->fences.lock);
99}
100
101int host1x_intr_init(struct host1x *host)
102{
103 struct host1x_intr_irq_data *irq_data;
104 unsigned int id;
105 int i, err;
106
107 for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
108 struct host1x_syncpt *syncpt = &host->syncpt[id];
109
110 spin_lock_init(&syncpt->fences.lock);
111 INIT_LIST_HEAD(&syncpt->fences.list);
112 }
113
114 irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
115 if (!irq_data)
116 return -ENOMEM;
117
118 host1x_hw_intr_disable_all_syncpt_intrs(host);
119
120 for (i = 0; i < host->num_syncpt_irqs; i++) {
121 irq_data[i].host = host;
122 irq_data[i].offset = i;
123
124 err = devm_request_irq(host->dev, host->syncpt_irqs[i],
125 host->intr_op->isr, IRQF_SHARED,
126 "host1x_syncpt", &irq_data[i]);
127 if (err < 0)
128 return err;
129 }
130
131 return 0;
132}
133
134void host1x_intr_deinit(struct host1x *host)
135{
136}
137
138void host1x_intr_start(struct host1x *host)
139{
140 u32 hz = clk_get_rate(host->clk);
141 int err;
142
143 mutex_lock(&host->intr_mutex);
144 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
145 if (err) {
146 mutex_unlock(&host->intr_mutex);
147 return;
148 }
149 mutex_unlock(&host->intr_mutex);
150}
151
152void host1x_intr_stop(struct host1x *host)
153{
154 host1x_hw_intr_disable_all_syncpt_intrs(host);
155}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Tegra host1x Interrupt Management
4 *
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
6 */
7
8#include <linux/clk.h>
9#include <linux/interrupt.h>
10#include <linux/slab.h>
11#include <linux/irq.h>
12
13#include <trace/events/host1x.h>
14#include "channel.h"
15#include "dev.h"
16#include "fence.h"
17#include "intr.h"
18
19/* Wait list management */
20
21enum waitlist_state {
22 WLS_PENDING,
23 WLS_REMOVED,
24 WLS_CANCELLED,
25 WLS_HANDLED
26};
27
28static void waiter_release(struct kref *kref)
29{
30 kfree(container_of(kref, struct host1x_waitlist, refcount));
31}
32
33/*
34 * add a waiter to a waiter queue, sorted by threshold
35 * returns true if it was added at the head of the queue
36 */
37static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
38 struct list_head *queue)
39{
40 struct host1x_waitlist *pos;
41 u32 thresh = waiter->thresh;
42
43 list_for_each_entry_reverse(pos, queue, list)
44 if ((s32)(pos->thresh - thresh) <= 0) {
45 list_add(&waiter->list, &pos->list);
46 return false;
47 }
48
49 list_add(&waiter->list, queue);
50 return true;
51}
52
53/*
54 * run through a waiter queue for a single sync point ID
55 * and gather all completed waiters into lists by actions
56 */
57static void remove_completed_waiters(struct list_head *head, u32 sync,
58 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
59{
60 struct list_head *dest;
61 struct host1x_waitlist *waiter, *next, *prev;
62
63 list_for_each_entry_safe(waiter, next, head, list) {
64 if ((s32)(waiter->thresh - sync) > 0)
65 break;
66
67 dest = completed + waiter->action;
68
69 /* consolidate submit cleanups */
70 if (waiter->action == HOST1X_INTR_ACTION_SUBMIT_COMPLETE &&
71 !list_empty(dest)) {
72 prev = list_entry(dest->prev,
73 struct host1x_waitlist, list);
74 if (prev->data == waiter->data) {
75 prev->count++;
76 dest = NULL;
77 }
78 }
79
80 /* PENDING->REMOVED or CANCELLED->HANDLED */
81 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
82 list_del(&waiter->list);
83 kref_put(&waiter->refcount, waiter_release);
84 } else
85 list_move_tail(&waiter->list, dest);
86 }
87}
88
89static void reset_threshold_interrupt(struct host1x *host,
90 struct list_head *head,
91 unsigned int id)
92{
93 u32 thresh =
94 list_first_entry(head, struct host1x_waitlist, list)->thresh;
95
96 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
97 host1x_hw_intr_enable_syncpt_intr(host, id);
98}
99
100static void action_submit_complete(struct host1x_waitlist *waiter)
101{
102 struct host1x_channel *channel = waiter->data;
103
104 host1x_cdma_update(&channel->cdma);
105
106 /* Add nr_completed to trace */
107 trace_host1x_channel_submit_complete(dev_name(channel->dev),
108 waiter->count, waiter->thresh);
109}
110
111static void action_wakeup(struct host1x_waitlist *waiter)
112{
113 wait_queue_head_t *wq = waiter->data;
114
115 wake_up(wq);
116}
117
118static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
119{
120 wait_queue_head_t *wq = waiter->data;
121
122 wake_up_interruptible(wq);
123}
124
125static void action_signal_fence(struct host1x_waitlist *waiter)
126{
127 struct host1x_syncpt_fence *f = waiter->data;
128
129 host1x_fence_signal(f);
130}
131
132typedef void (*action_handler)(struct host1x_waitlist *waiter);
133
134static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
135 action_submit_complete,
136 action_wakeup,
137 action_wakeup_interruptible,
138 action_signal_fence,
139};
140
141static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
142{
143 struct list_head *head = completed;
144 unsigned int i;
145
146 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
147 action_handler handler = action_handlers[i];
148 struct host1x_waitlist *waiter, *next;
149
150 list_for_each_entry_safe(waiter, next, head, list) {
151 list_del(&waiter->list);
152 handler(waiter);
153 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
154 WLS_REMOVED);
155 kref_put(&waiter->refcount, waiter_release);
156 }
157 }
158}
159
160/*
161 * Remove & handle all waiters that have completed for the given syncpt
162 */
163static int process_wait_list(struct host1x *host,
164 struct host1x_syncpt *syncpt,
165 u32 threshold)
166{
167 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
168 unsigned int i;
169 int empty;
170
171 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
172 INIT_LIST_HEAD(completed + i);
173
174 spin_lock(&syncpt->intr.lock);
175
176 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
177 completed);
178
179 empty = list_empty(&syncpt->intr.wait_head);
180 if (empty)
181 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
182 else
183 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
184 syncpt->id);
185
186 spin_unlock(&syncpt->intr.lock);
187
188 run_handlers(completed);
189
190 return empty;
191}
192
193/*
194 * Sync point threshold interrupt service thread function
195 * Handles sync point threshold triggers, in thread context
196 */
197
198static void syncpt_thresh_work(struct work_struct *work)
199{
200 struct host1x_syncpt_intr *syncpt_intr =
201 container_of(work, struct host1x_syncpt_intr, work);
202 struct host1x_syncpt *syncpt =
203 container_of(syncpt_intr, struct host1x_syncpt, intr);
204 unsigned int id = syncpt->id;
205 struct host1x *host = syncpt->host;
206
207 (void)process_wait_list(host, syncpt,
208 host1x_syncpt_load(host->syncpt + id));
209}
210
211int host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt,
212 u32 thresh, enum host1x_intr_action action,
213 void *data, struct host1x_waitlist *waiter,
214 void **ref)
215{
216 int queue_was_empty;
217
218 if (waiter == NULL) {
219 pr_warn("%s: NULL waiter\n", __func__);
220 return -EINVAL;
221 }
222
223 /* initialize a new waiter */
224 INIT_LIST_HEAD(&waiter->list);
225 kref_init(&waiter->refcount);
226 if (ref)
227 kref_get(&waiter->refcount);
228 waiter->thresh = thresh;
229 waiter->action = action;
230 atomic_set(&waiter->state, WLS_PENDING);
231 waiter->data = data;
232 waiter->count = 1;
233
234 spin_lock(&syncpt->intr.lock);
235
236 queue_was_empty = list_empty(&syncpt->intr.wait_head);
237
238 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
239 /* added at head of list - new threshold value */
240 host1x_hw_intr_set_syncpt_threshold(host, syncpt->id, thresh);
241
242 /* added as first waiter - enable interrupt */
243 if (queue_was_empty)
244 host1x_hw_intr_enable_syncpt_intr(host, syncpt->id);
245 }
246
247 if (ref)
248 *ref = waiter;
249
250 spin_unlock(&syncpt->intr.lock);
251
252 return 0;
253}
254
255void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref,
256 bool flush)
257{
258 struct host1x_waitlist *waiter = ref;
259 struct host1x_syncpt *syncpt;
260
261 atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED);
262
263 syncpt = host->syncpt + id;
264
265 spin_lock(&syncpt->intr.lock);
266 if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED) ==
267 WLS_CANCELLED) {
268 list_del(&waiter->list);
269 kref_put(&waiter->refcount, waiter_release);
270 }
271 spin_unlock(&syncpt->intr.lock);
272
273 if (flush) {
274 /* Wait until any concurrently executing handler has finished. */
275 while (atomic_read(&waiter->state) != WLS_HANDLED)
276 schedule();
277 }
278
279 kref_put(&waiter->refcount, waiter_release);
280}
281
282int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
283{
284 unsigned int id;
285 u32 nb_pts = host1x_syncpt_nb_pts(host);
286
287 mutex_init(&host->intr_mutex);
288 host->intr_syncpt_irq = irq_sync;
289
290 for (id = 0; id < nb_pts; ++id) {
291 struct host1x_syncpt *syncpt = host->syncpt + id;
292
293 spin_lock_init(&syncpt->intr.lock);
294 INIT_LIST_HEAD(&syncpt->intr.wait_head);
295 snprintf(syncpt->intr.thresh_irq_name,
296 sizeof(syncpt->intr.thresh_irq_name),
297 "host1x_sp_%02u", id);
298 }
299
300 return 0;
301}
302
303void host1x_intr_deinit(struct host1x *host)
304{
305}
306
307void host1x_intr_start(struct host1x *host)
308{
309 u32 hz = clk_get_rate(host->clk);
310 int err;
311
312 mutex_lock(&host->intr_mutex);
313 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
314 syncpt_thresh_work);
315 if (err) {
316 mutex_unlock(&host->intr_mutex);
317 return;
318 }
319 mutex_unlock(&host->intr_mutex);
320}
321
322void host1x_intr_stop(struct host1x *host)
323{
324 unsigned int id;
325 struct host1x_syncpt *syncpt = host->syncpt;
326 u32 nb_pts = host1x_syncpt_nb_pts(host);
327
328 mutex_lock(&host->intr_mutex);
329
330 host1x_hw_intr_disable_all_syncpt_intrs(host);
331
332 for (id = 0; id < nb_pts; ++id) {
333 struct host1x_waitlist *waiter, *next;
334
335 list_for_each_entry_safe(waiter, next,
336 &syncpt[id].intr.wait_head, list) {
337 if (atomic_cmpxchg(&waiter->state,
338 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
339 list_del(&waiter->list);
340 kref_put(&waiter->refcount, waiter_release);
341 }
342 }
343
344 if (!list_empty(&syncpt[id].intr.wait_head)) {
345 /* output diagnostics */
346 mutex_unlock(&host->intr_mutex);
347 pr_warn("%s cannot stop syncpt intr id=%u\n",
348 __func__, id);
349 return;
350 }
351 }
352
353 host1x_hw_intr_free_syncpt_irq(host);
354
355 mutex_unlock(&host->intr_mutex);
356}