Loading...
Note: File does not exist in v5.14.15.
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
5 *
6 * Authors:
7 * Haijun Liu <haijun.liu@mediatek.com>
8 * Eliot Lee <eliot.lee@intel.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 *
12 * Contributors:
13 * Amir Hanania <amir.hanania@intel.com>
14 * Sreehari Kancharla <sreehari.kancharla@intel.com>
15 */
16
17#include <linux/bits.h>
18#include <linux/bitfield.h>
19#include <linux/completion.h>
20#include <linux/device.h>
21#include <linux/delay.h>
22#include <linux/err.h>
23#include <linux/gfp.h>
24#include <linux/iopoll.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
27#include <linux/kthread.h>
28#include <linux/list.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/wait.h>
34
35#include "t7xx_hif_cldma.h"
36#include "t7xx_mhccif.h"
37#include "t7xx_modem_ops.h"
38#include "t7xx_pci.h"
39#include "t7xx_pcie_mac.h"
40#include "t7xx_port_proxy.h"
41#include "t7xx_reg.h"
42#include "t7xx_state_monitor.h"
43
44#define FSM_DRM_DISABLE_DELAY_MS 200
45#define FSM_EVENT_POLL_INTERVAL_MS 20
46#define FSM_MD_EX_REC_OK_TIMEOUT_MS 10000
47#define FSM_MD_EX_PASS_TIMEOUT_MS 45000
48#define FSM_CMD_TIMEOUT_MS 2000
49
50void t7xx_fsm_notifier_register(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
51{
52 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
53 unsigned long flags;
54
55 spin_lock_irqsave(&ctl->notifier_lock, flags);
56 list_add_tail(¬ifier->entry, &ctl->notifier_list);
57 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
58}
59
60void t7xx_fsm_notifier_unregister(struct t7xx_modem *md, struct t7xx_fsm_notifier *notifier)
61{
62 struct t7xx_fsm_notifier *notifier_cur, *notifier_next;
63 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
64 unsigned long flags;
65
66 spin_lock_irqsave(&ctl->notifier_lock, flags);
67 list_for_each_entry_safe(notifier_cur, notifier_next, &ctl->notifier_list, entry) {
68 if (notifier_cur == notifier)
69 list_del(¬ifier->entry);
70 }
71 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
72}
73
74static void fsm_state_notify(struct t7xx_modem *md, enum md_state state)
75{
76 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
77 struct t7xx_fsm_notifier *notifier;
78 unsigned long flags;
79
80 spin_lock_irqsave(&ctl->notifier_lock, flags);
81 list_for_each_entry(notifier, &ctl->notifier_list, entry) {
82 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
83 if (notifier->notifier_fn)
84 notifier->notifier_fn(state, notifier->data);
85
86 spin_lock_irqsave(&ctl->notifier_lock, flags);
87 }
88 spin_unlock_irqrestore(&ctl->notifier_lock, flags);
89}
90
91void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
92{
93 ctl->md_state = state;
94
95 /* Update to port first, otherwise sending message on HS2 may fail */
96 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, state);
97 fsm_state_notify(ctl->md, state);
98}
99
100static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
101{
102 if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
103 *cmd->ret = result;
104 complete_all(cmd->done);
105 }
106
107 kfree(cmd);
108}
109
110static void fsm_del_kf_event(struct t7xx_fsm_event *event)
111{
112 list_del(&event->entry);
113 kfree(event);
114}
115
116static void fsm_flush_event_cmd_qs(struct t7xx_fsm_ctl *ctl)
117{
118 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
119 struct t7xx_fsm_event *event, *evt_next;
120 struct t7xx_fsm_command *cmd, *cmd_next;
121 unsigned long flags;
122
123 spin_lock_irqsave(&ctl->command_lock, flags);
124 list_for_each_entry_safe(cmd, cmd_next, &ctl->command_queue, entry) {
125 dev_warn(dev, "Unhandled command %d\n", cmd->cmd_id);
126 list_del(&cmd->entry);
127 fsm_finish_command(ctl, cmd, -EINVAL);
128 }
129 spin_unlock_irqrestore(&ctl->command_lock, flags);
130
131 spin_lock_irqsave(&ctl->event_lock, flags);
132 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
133 dev_warn(dev, "Unhandled event %d\n", event->event_id);
134 fsm_del_kf_event(event);
135 }
136 spin_unlock_irqrestore(&ctl->event_lock, flags);
137}
138
139static void fsm_wait_for_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_expected,
140 enum t7xx_fsm_event_state event_ignore, int retries)
141{
142 struct t7xx_fsm_event *event;
143 bool event_received = false;
144 unsigned long flags;
145 int cnt = 0;
146
147 while (cnt++ < retries && !event_received) {
148 bool sleep_required = true;
149
150 if (kthread_should_stop())
151 return;
152
153 spin_lock_irqsave(&ctl->event_lock, flags);
154 event = list_first_entry_or_null(&ctl->event_queue, struct t7xx_fsm_event, entry);
155 if (event) {
156 event_received = event->event_id == event_expected;
157 if (event_received || event->event_id == event_ignore) {
158 fsm_del_kf_event(event);
159 sleep_required = false;
160 }
161 }
162 spin_unlock_irqrestore(&ctl->event_lock, flags);
163
164 if (sleep_required)
165 msleep(FSM_EVENT_POLL_INTERVAL_MS);
166 }
167}
168
169static void fsm_routine_exception(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd,
170 enum t7xx_ex_reason reason)
171{
172 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
173
174 if (ctl->curr_state != FSM_STATE_READY && ctl->curr_state != FSM_STATE_STARTING) {
175 if (cmd)
176 fsm_finish_command(ctl, cmd, -EINVAL);
177
178 return;
179 }
180
181 ctl->curr_state = FSM_STATE_EXCEPTION;
182
183 switch (reason) {
184 case EXCEPTION_HS_TIMEOUT:
185 dev_err(dev, "Boot Handshake failure\n");
186 break;
187
188 case EXCEPTION_EVENT:
189 dev_err(dev, "Exception event\n");
190 t7xx_fsm_broadcast_state(ctl, MD_STATE_EXCEPTION);
191 t7xx_pci_pm_exp_detected(ctl->md->t7xx_dev);
192 t7xx_md_exception_handshake(ctl->md);
193
194 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_REC_OK, FSM_EVENT_MD_EX,
195 FSM_MD_EX_REC_OK_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
196 fsm_wait_for_event(ctl, FSM_EVENT_MD_EX_PASS, FSM_EVENT_INVALID,
197 FSM_MD_EX_PASS_TIMEOUT_MS / FSM_EVENT_POLL_INTERVAL_MS);
198 break;
199
200 default:
201 dev_err(dev, "Exception %d\n", reason);
202 break;
203 }
204
205 if (cmd)
206 fsm_finish_command(ctl, cmd, 0);
207}
208
209static int fsm_stopped_handler(struct t7xx_fsm_ctl *ctl)
210{
211 ctl->curr_state = FSM_STATE_STOPPED;
212
213 t7xx_fsm_broadcast_state(ctl, MD_STATE_STOPPED);
214 return t7xx_md_reset(ctl->md->t7xx_dev);
215}
216
217static void fsm_routine_stopped(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
218{
219 if (ctl->curr_state == FSM_STATE_STOPPED) {
220 fsm_finish_command(ctl, cmd, -EINVAL);
221 return;
222 }
223
224 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
225}
226
227static void fsm_routine_stopping(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
228{
229 struct t7xx_pci_dev *t7xx_dev;
230 struct cldma_ctrl *md_ctrl;
231 int err;
232
233 if (ctl->curr_state == FSM_STATE_STOPPED || ctl->curr_state == FSM_STATE_STOPPING) {
234 fsm_finish_command(ctl, cmd, -EINVAL);
235 return;
236 }
237
238 md_ctrl = ctl->md->md_ctrl[CLDMA_ID_MD];
239 t7xx_dev = ctl->md->t7xx_dev;
240
241 ctl->curr_state = FSM_STATE_STOPPING;
242 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_TO_STOP);
243 t7xx_cldma_stop(md_ctrl);
244
245 if (!ctl->md->rgu_irq_asserted) {
246 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DRM_DISABLE_AP);
247 /* Wait for the DRM disable to take effect */
248 msleep(FSM_DRM_DISABLE_DELAY_MS);
249
250 err = t7xx_acpi_fldr_func(t7xx_dev);
251 if (err)
252 t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DEVICE_RESET);
253 }
254
255 fsm_finish_command(ctl, cmd, fsm_stopped_handler(ctl));
256}
257
258static void t7xx_fsm_broadcast_ready_state(struct t7xx_fsm_ctl *ctl)
259{
260 if (ctl->md_state != MD_STATE_WAITING_FOR_HS2)
261 return;
262
263 ctl->md_state = MD_STATE_READY;
264
265 fsm_state_notify(ctl->md, MD_STATE_READY);
266 t7xx_port_proxy_md_status_notify(ctl->md->port_prox, MD_STATE_READY);
267}
268
269static void fsm_routine_ready(struct t7xx_fsm_ctl *ctl)
270{
271 struct t7xx_modem *md = ctl->md;
272
273 ctl->curr_state = FSM_STATE_READY;
274 t7xx_fsm_broadcast_ready_state(ctl);
275 t7xx_md_event_notify(md, FSM_READY);
276}
277
278static int fsm_routine_starting(struct t7xx_fsm_ctl *ctl)
279{
280 struct t7xx_modem *md = ctl->md;
281 struct device *dev;
282
283 ctl->curr_state = FSM_STATE_STARTING;
284
285 t7xx_fsm_broadcast_state(ctl, MD_STATE_WAITING_FOR_HS1);
286 t7xx_md_event_notify(md, FSM_START);
287
288 wait_event_interruptible_timeout(ctl->async_hk_wq,
289 (md->core_md.ready && md->core_ap.ready) ||
290 ctl->exp_flg, HZ * 60);
291 dev = &md->t7xx_dev->pdev->dev;
292
293 if (ctl->exp_flg)
294 dev_err(dev, "MD exception is captured during handshake\n");
295
296 if (!md->core_md.ready) {
297 dev_err(dev, "MD handshake timeout\n");
298 if (md->core_md.handshake_ongoing)
299 t7xx_fsm_append_event(ctl, FSM_EVENT_MD_HS2_EXIT, NULL, 0);
300
301 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
302 return -ETIMEDOUT;
303 } else if (!md->core_ap.ready) {
304 dev_err(dev, "AP handshake timeout\n");
305 if (md->core_ap.handshake_ongoing)
306 t7xx_fsm_append_event(ctl, FSM_EVENT_AP_HS2_EXIT, NULL, 0);
307
308 fsm_routine_exception(ctl, NULL, EXCEPTION_HS_TIMEOUT);
309 return -ETIMEDOUT;
310 }
311
312 t7xx_pci_pm_init_late(md->t7xx_dev);
313 fsm_routine_ready(ctl);
314 return 0;
315}
316
317static void fsm_routine_start(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd)
318{
319 struct t7xx_modem *md = ctl->md;
320 u32 dev_status;
321 int ret;
322
323 if (!md)
324 return;
325
326 if (ctl->curr_state != FSM_STATE_INIT && ctl->curr_state != FSM_STATE_PRE_START &&
327 ctl->curr_state != FSM_STATE_STOPPED) {
328 fsm_finish_command(ctl, cmd, -EINVAL);
329 return;
330 }
331
332 ctl->curr_state = FSM_STATE_PRE_START;
333 t7xx_md_event_notify(md, FSM_PRE_START);
334
335 ret = read_poll_timeout(ioread32, dev_status,
336 (dev_status & MISC_STAGE_MASK) == LINUX_STAGE, 20000, 2000000,
337 false, IREG_BASE(md->t7xx_dev) + T7XX_PCIE_MISC_DEV_STATUS);
338 if (ret) {
339 struct device *dev = &md->t7xx_dev->pdev->dev;
340
341 fsm_finish_command(ctl, cmd, -ETIMEDOUT);
342 dev_err(dev, "Invalid device status 0x%lx\n", dev_status & MISC_STAGE_MASK);
343 return;
344 }
345
346 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_AP]);
347 t7xx_cldma_hif_hw_init(md->md_ctrl[CLDMA_ID_MD]);
348 fsm_finish_command(ctl, cmd, fsm_routine_starting(ctl));
349}
350
351static int fsm_main_thread(void *data)
352{
353 struct t7xx_fsm_ctl *ctl = data;
354 struct t7xx_fsm_command *cmd;
355 unsigned long flags;
356
357 while (!kthread_should_stop()) {
358 if (wait_event_interruptible(ctl->command_wq, !list_empty(&ctl->command_queue) ||
359 kthread_should_stop()))
360 continue;
361
362 if (kthread_should_stop())
363 break;
364
365 spin_lock_irqsave(&ctl->command_lock, flags);
366 cmd = list_first_entry(&ctl->command_queue, struct t7xx_fsm_command, entry);
367 list_del(&cmd->entry);
368 spin_unlock_irqrestore(&ctl->command_lock, flags);
369
370 switch (cmd->cmd_id) {
371 case FSM_CMD_START:
372 fsm_routine_start(ctl, cmd);
373 break;
374
375 case FSM_CMD_EXCEPTION:
376 fsm_routine_exception(ctl, cmd, FIELD_GET(FSM_CMD_EX_REASON, cmd->flag));
377 break;
378
379 case FSM_CMD_PRE_STOP:
380 fsm_routine_stopping(ctl, cmd);
381 break;
382
383 case FSM_CMD_STOP:
384 fsm_routine_stopped(ctl, cmd);
385 break;
386
387 default:
388 fsm_finish_command(ctl, cmd, -EINVAL);
389 fsm_flush_event_cmd_qs(ctl);
390 break;
391 }
392 }
393
394 return 0;
395}
396
397int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
398{
399 DECLARE_COMPLETION_ONSTACK(done);
400 struct t7xx_fsm_command *cmd;
401 unsigned long flags;
402 int ret;
403
404 cmd = kzalloc(sizeof(*cmd), flag & FSM_CMD_FLAG_IN_INTERRUPT ? GFP_ATOMIC : GFP_KERNEL);
405 if (!cmd)
406 return -ENOMEM;
407
408 INIT_LIST_HEAD(&cmd->entry);
409 cmd->cmd_id = cmd_id;
410 cmd->flag = flag;
411 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
412 cmd->done = &done;
413 cmd->ret = &ret;
414 }
415
416 spin_lock_irqsave(&ctl->command_lock, flags);
417 list_add_tail(&cmd->entry, &ctl->command_queue);
418 spin_unlock_irqrestore(&ctl->command_lock, flags);
419
420 wake_up(&ctl->command_wq);
421
422 if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
423 unsigned long wait_ret;
424
425 wait_ret = wait_for_completion_timeout(&done,
426 msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
427 if (!wait_ret)
428 return -ETIMEDOUT;
429
430 return ret;
431 }
432
433 return 0;
434}
435
436int t7xx_fsm_append_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id,
437 unsigned char *data, unsigned int length)
438{
439 struct device *dev = &ctl->md->t7xx_dev->pdev->dev;
440 struct t7xx_fsm_event *event;
441 unsigned long flags;
442
443 if (event_id <= FSM_EVENT_INVALID || event_id >= FSM_EVENT_MAX) {
444 dev_err(dev, "Invalid event %d\n", event_id);
445 return -EINVAL;
446 }
447
448 event = kmalloc(struct_size(event, data, length),
449 in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
450 if (!event)
451 return -ENOMEM;
452
453 INIT_LIST_HEAD(&event->entry);
454 event->event_id = event_id;
455 event->length = length;
456
457 if (data && length)
458 memcpy(event->data, data, length);
459
460 spin_lock_irqsave(&ctl->event_lock, flags);
461 list_add_tail(&event->entry, &ctl->event_queue);
462 spin_unlock_irqrestore(&ctl->event_lock, flags);
463
464 wake_up_all(&ctl->event_wq);
465 return 0;
466}
467
468void t7xx_fsm_clr_event(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_event_state event_id)
469{
470 struct t7xx_fsm_event *event, *evt_next;
471 unsigned long flags;
472
473 spin_lock_irqsave(&ctl->event_lock, flags);
474 list_for_each_entry_safe(event, evt_next, &ctl->event_queue, entry) {
475 if (event->event_id == event_id)
476 fsm_del_kf_event(event);
477 }
478 spin_unlock_irqrestore(&ctl->event_lock, flags);
479}
480
481enum md_state t7xx_fsm_get_md_state(struct t7xx_fsm_ctl *ctl)
482{
483 if (ctl)
484 return ctl->md_state;
485
486 return MD_STATE_INVALID;
487}
488
489unsigned int t7xx_fsm_get_ctl_state(struct t7xx_fsm_ctl *ctl)
490{
491 if (ctl)
492 return ctl->curr_state;
493
494 return FSM_STATE_STOPPED;
495}
496
497int t7xx_fsm_recv_md_intr(struct t7xx_fsm_ctl *ctl, enum t7xx_md_irq_type type)
498{
499 unsigned int cmd_flags = FSM_CMD_FLAG_IN_INTERRUPT;
500
501 if (type == MD_IRQ_PORT_ENUM) {
502 return t7xx_fsm_append_cmd(ctl, FSM_CMD_START, cmd_flags);
503 } else if (type == MD_IRQ_CCIF_EX) {
504 ctl->exp_flg = true;
505 wake_up(&ctl->async_hk_wq);
506 cmd_flags |= FIELD_PREP(FSM_CMD_EX_REASON, EXCEPTION_EVENT);
507 return t7xx_fsm_append_cmd(ctl, FSM_CMD_EXCEPTION, cmd_flags);
508 }
509
510 return -EINVAL;
511}
512
513void t7xx_fsm_reset(struct t7xx_modem *md)
514{
515 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
516
517 fsm_flush_event_cmd_qs(ctl);
518 ctl->curr_state = FSM_STATE_STOPPED;
519 ctl->exp_flg = false;
520}
521
522int t7xx_fsm_init(struct t7xx_modem *md)
523{
524 struct device *dev = &md->t7xx_dev->pdev->dev;
525 struct t7xx_fsm_ctl *ctl;
526
527 ctl = devm_kzalloc(dev, sizeof(*ctl), GFP_KERNEL);
528 if (!ctl)
529 return -ENOMEM;
530
531 md->fsm_ctl = ctl;
532 ctl->md = md;
533 ctl->curr_state = FSM_STATE_INIT;
534 INIT_LIST_HEAD(&ctl->command_queue);
535 INIT_LIST_HEAD(&ctl->event_queue);
536 init_waitqueue_head(&ctl->async_hk_wq);
537 init_waitqueue_head(&ctl->event_wq);
538 INIT_LIST_HEAD(&ctl->notifier_list);
539 init_waitqueue_head(&ctl->command_wq);
540 spin_lock_init(&ctl->event_lock);
541 spin_lock_init(&ctl->command_lock);
542 ctl->exp_flg = false;
543 spin_lock_init(&ctl->notifier_lock);
544
545 ctl->fsm_thread = kthread_run(fsm_main_thread, ctl, "t7xx_fsm");
546 return PTR_ERR_OR_ZERO(ctl->fsm_thread);
547}
548
549void t7xx_fsm_uninit(struct t7xx_modem *md)
550{
551 struct t7xx_fsm_ctl *ctl = md->fsm_ctl;
552
553 if (!ctl)
554 return;
555
556 if (ctl->fsm_thread)
557 kthread_stop(ctl->fsm_thread);
558
559 fsm_flush_event_cmd_qs(ctl);
560}