Loading...
Note: File does not exist in v6.8.
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2015-2017 Google, Inc
4 *
5 * USB Power Delivery protocol stack.
6 */
7
8#include <linux/completion.h>
9#include <linux/debugfs.h>
10#include <linux/device.h>
11#include <linux/jiffies.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/mutex.h>
15#include <linux/proc_fs.h>
16#include <linux/sched/clock.h>
17#include <linux/seq_file.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/usb/pd.h>
21#include <linux/usb/pd_bdo.h>
22#include <linux/usb/pd_vdo.h>
23#include <linux/usb/role.h>
24#include <linux/usb/tcpm.h>
25#include <linux/usb/typec.h>
26#include <linux/workqueue.h>
27
28#define FOREACH_STATE(S) \
29 S(INVALID_STATE), \
30 S(DRP_TOGGLING), \
31 S(SRC_UNATTACHED), \
32 S(SRC_ATTACH_WAIT), \
33 S(SRC_ATTACHED), \
34 S(SRC_STARTUP), \
35 S(SRC_SEND_CAPABILITIES), \
36 S(SRC_NEGOTIATE_CAPABILITIES), \
37 S(SRC_TRANSITION_SUPPLY), \
38 S(SRC_READY), \
39 S(SRC_WAIT_NEW_CAPABILITIES), \
40 \
41 S(SNK_UNATTACHED), \
42 S(SNK_ATTACH_WAIT), \
43 S(SNK_DEBOUNCED), \
44 S(SNK_ATTACHED), \
45 S(SNK_STARTUP), \
46 S(SNK_DISCOVERY), \
47 S(SNK_DISCOVERY_DEBOUNCE), \
48 S(SNK_DISCOVERY_DEBOUNCE_DONE), \
49 S(SNK_WAIT_CAPABILITIES), \
50 S(SNK_NEGOTIATE_CAPABILITIES), \
51 S(SNK_TRANSITION_SINK), \
52 S(SNK_TRANSITION_SINK_VBUS), \
53 S(SNK_READY), \
54 \
55 S(ACC_UNATTACHED), \
56 S(DEBUG_ACC_ATTACHED), \
57 S(AUDIO_ACC_ATTACHED), \
58 S(AUDIO_ACC_DEBOUNCE), \
59 \
60 S(HARD_RESET_SEND), \
61 S(HARD_RESET_START), \
62 S(SRC_HARD_RESET_VBUS_OFF), \
63 S(SRC_HARD_RESET_VBUS_ON), \
64 S(SNK_HARD_RESET_SINK_OFF), \
65 S(SNK_HARD_RESET_WAIT_VBUS), \
66 S(SNK_HARD_RESET_SINK_ON), \
67 \
68 S(SOFT_RESET), \
69 S(SOFT_RESET_SEND), \
70 \
71 S(DR_SWAP_ACCEPT), \
72 S(DR_SWAP_SEND), \
73 S(DR_SWAP_SEND_TIMEOUT), \
74 S(DR_SWAP_CANCEL), \
75 S(DR_SWAP_CHANGE_DR), \
76 \
77 S(PR_SWAP_ACCEPT), \
78 S(PR_SWAP_SEND), \
79 S(PR_SWAP_SEND_TIMEOUT), \
80 S(PR_SWAP_CANCEL), \
81 S(PR_SWAP_START), \
82 S(PR_SWAP_SRC_SNK_TRANSITION_OFF), \
83 S(PR_SWAP_SRC_SNK_SOURCE_OFF), \
84 S(PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED), \
85 S(PR_SWAP_SRC_SNK_SINK_ON), \
86 S(PR_SWAP_SNK_SRC_SINK_OFF), \
87 S(PR_SWAP_SNK_SRC_SOURCE_ON), \
88 S(PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP), \
89 \
90 S(VCONN_SWAP_ACCEPT), \
91 S(VCONN_SWAP_SEND), \
92 S(VCONN_SWAP_SEND_TIMEOUT), \
93 S(VCONN_SWAP_CANCEL), \
94 S(VCONN_SWAP_START), \
95 S(VCONN_SWAP_WAIT_FOR_VCONN), \
96 S(VCONN_SWAP_TURN_ON_VCONN), \
97 S(VCONN_SWAP_TURN_OFF_VCONN), \
98 \
99 S(SNK_TRY), \
100 S(SNK_TRY_WAIT), \
101 S(SNK_TRY_WAIT_DEBOUNCE), \
102 S(SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS), \
103 S(SRC_TRYWAIT), \
104 S(SRC_TRYWAIT_DEBOUNCE), \
105 S(SRC_TRYWAIT_UNATTACHED), \
106 \
107 S(SRC_TRY), \
108 S(SRC_TRY_WAIT), \
109 S(SRC_TRY_DEBOUNCE), \
110 S(SNK_TRYWAIT), \
111 S(SNK_TRYWAIT_DEBOUNCE), \
112 S(SNK_TRYWAIT_VBUS), \
113 S(BIST_RX), \
114 \
115 S(ERROR_RECOVERY), \
116 S(PORT_RESET), \
117 S(PORT_RESET_WAIT_OFF)
118
119#define GENERATE_ENUM(e) e
120#define GENERATE_STRING(s) #s
121
122enum tcpm_state {
123 FOREACH_STATE(GENERATE_ENUM)
124};
125
126static const char * const tcpm_states[] = {
127 FOREACH_STATE(GENERATE_STRING)
128};
129
130enum vdm_states {
131 VDM_STATE_ERR_BUSY = -3,
132 VDM_STATE_ERR_SEND = -2,
133 VDM_STATE_ERR_TMOUT = -1,
134 VDM_STATE_DONE = 0,
135 /* Anything >0 represents an active state */
136 VDM_STATE_READY = 1,
137 VDM_STATE_BUSY = 2,
138 VDM_STATE_WAIT_RSP_BUSY = 3,
139};
140
141enum pd_msg_request {
142 PD_MSG_NONE = 0,
143 PD_MSG_CTRL_REJECT,
144 PD_MSG_CTRL_WAIT,
145 PD_MSG_DATA_SINK_CAP,
146 PD_MSG_DATA_SOURCE_CAP,
147};
148
149/* Events from low level driver */
150
151#define TCPM_CC_EVENT BIT(0)
152#define TCPM_VBUS_EVENT BIT(1)
153#define TCPM_RESET_EVENT BIT(2)
154
155#define LOG_BUFFER_ENTRIES 1024
156#define LOG_BUFFER_ENTRY_SIZE 128
157
158/* Alternate mode support */
159
160#define SVID_DISCOVERY_MAX 16
161
162struct pd_mode_data {
163 int svid_index; /* current SVID index */
164 int nsvids;
165 u16 svids[SVID_DISCOVERY_MAX];
166 int altmodes; /* number of alternate modes */
167 struct typec_altmode_desc altmode_desc[SVID_DISCOVERY_MAX];
168};
169
170struct tcpm_port {
171 struct device *dev;
172
173 struct mutex lock; /* tcpm state machine lock */
174 struct workqueue_struct *wq;
175
176 struct typec_capability typec_caps;
177 struct typec_port *typec_port;
178
179 struct tcpc_dev *tcpc;
180 struct usb_role_switch *role_sw;
181
182 enum typec_role vconn_role;
183 enum typec_role pwr_role;
184 enum typec_data_role data_role;
185 enum typec_pwr_opmode pwr_opmode;
186
187 struct usb_pd_identity partner_ident;
188 struct typec_partner_desc partner_desc;
189 struct typec_partner *partner;
190
191 enum typec_cc_status cc_req;
192
193 enum typec_cc_status cc1;
194 enum typec_cc_status cc2;
195 enum typec_cc_polarity polarity;
196
197 bool attached;
198 bool connected;
199 enum typec_port_type port_type;
200 bool vbus_present;
201 bool vbus_never_low;
202 bool vbus_source;
203 bool vbus_charge;
204
205 bool send_discover;
206 bool op_vsafe5v;
207
208 int try_role;
209 int try_snk_count;
210 int try_src_count;
211
212 enum pd_msg_request queued_message;
213
214 enum tcpm_state enter_state;
215 enum tcpm_state prev_state;
216 enum tcpm_state state;
217 enum tcpm_state delayed_state;
218 unsigned long delayed_runtime;
219 unsigned long delay_ms;
220
221 spinlock_t pd_event_lock;
222 u32 pd_events;
223
224 struct work_struct event_work;
225 struct delayed_work state_machine;
226 struct delayed_work vdm_state_machine;
227 bool state_machine_running;
228
229 struct completion tx_complete;
230 enum tcpm_transmit_status tx_status;
231
232 struct mutex swap_lock; /* swap command lock */
233 bool swap_pending;
234 bool non_pd_role_swap;
235 struct completion swap_complete;
236 int swap_status;
237
238 unsigned int message_id;
239 unsigned int caps_count;
240 unsigned int hard_reset_count;
241 bool pd_capable;
242 bool explicit_contract;
243 unsigned int rx_msgid;
244
245 /* Partner capabilities/requests */
246 u32 sink_request;
247 u32 source_caps[PDO_MAX_OBJECTS];
248 unsigned int nr_source_caps;
249 u32 sink_caps[PDO_MAX_OBJECTS];
250 unsigned int nr_sink_caps;
251
252 /* Local capabilities */
253 u32 src_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_src_pdo;
255 u32 snk_pdo[PDO_MAX_OBJECTS];
256 unsigned int nr_snk_pdo;
257 u32 snk_vdo[VDO_MAX_OBJECTS];
258 unsigned int nr_snk_vdo;
259
260 unsigned int max_snk_mv;
261 unsigned int max_snk_ma;
262 unsigned int max_snk_mw;
263 unsigned int operating_snk_mw;
264
265 /* Requested current / voltage */
266 u32 current_limit;
267 u32 supply_voltage;
268
269 u32 bist_request;
270
271 /* PD state for Vendor Defined Messages */
272 enum vdm_states vdm_state;
273 u32 vdm_retries;
274 /* next Vendor Defined Message to send */
275 u32 vdo_data[VDO_MAX_SIZE];
276 u8 vdo_count;
277 /* VDO to retry if UFP responder replied busy */
278 u32 vdo_retry;
279
280 /* Alternate mode data */
281
282 struct pd_mode_data mode_data;
283 struct typec_altmode *partner_altmode[SVID_DISCOVERY_MAX];
284 struct typec_altmode *port_altmode[SVID_DISCOVERY_MAX];
285
286 /* Deadline in jiffies to exit src_try_wait state */
287 unsigned long max_wait;
288
289#ifdef CONFIG_DEBUG_FS
290 struct dentry *dentry;
291 struct mutex logbuffer_lock; /* log buffer access lock */
292 int logbuffer_head;
293 int logbuffer_tail;
294 u8 *logbuffer[LOG_BUFFER_ENTRIES];
295#endif
296};
297
298struct pd_rx_event {
299 struct work_struct work;
300 struct tcpm_port *port;
301 struct pd_message msg;
302};
303
304#define tcpm_cc_is_sink(cc) \
305 ((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
306 (cc) == TYPEC_CC_RP_3_0)
307
308#define tcpm_port_is_sink(port) \
309 ((tcpm_cc_is_sink((port)->cc1) && !tcpm_cc_is_sink((port)->cc2)) || \
310 (tcpm_cc_is_sink((port)->cc2) && !tcpm_cc_is_sink((port)->cc1)))
311
312#define tcpm_cc_is_source(cc) ((cc) == TYPEC_CC_RD)
313#define tcpm_cc_is_audio(cc) ((cc) == TYPEC_CC_RA)
314#define tcpm_cc_is_open(cc) ((cc) == TYPEC_CC_OPEN)
315
316#define tcpm_port_is_source(port) \
317 ((tcpm_cc_is_source((port)->cc1) && \
318 !tcpm_cc_is_source((port)->cc2)) || \
319 (tcpm_cc_is_source((port)->cc2) && \
320 !tcpm_cc_is_source((port)->cc1)))
321
322#define tcpm_port_is_debug(port) \
323 (tcpm_cc_is_source((port)->cc1) && tcpm_cc_is_source((port)->cc2))
324
325#define tcpm_port_is_audio(port) \
326 (tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_audio((port)->cc2))
327
328#define tcpm_port_is_audio_detached(port) \
329 ((tcpm_cc_is_audio((port)->cc1) && tcpm_cc_is_open((port)->cc2)) || \
330 (tcpm_cc_is_audio((port)->cc2) && tcpm_cc_is_open((port)->cc1)))
331
332#define tcpm_try_snk(port) \
333 ((port)->try_snk_count == 0 && (port)->try_role == TYPEC_SINK && \
334 (port)->port_type == TYPEC_PORT_DRP)
335
336#define tcpm_try_src(port) \
337 ((port)->try_src_count == 0 && (port)->try_role == TYPEC_SOURCE && \
338 (port)->port_type == TYPEC_PORT_DRP)
339
340static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
341{
342 if (port->port_type == TYPEC_PORT_DRP) {
343 if (port->try_role == TYPEC_SINK)
344 return SNK_UNATTACHED;
345 else if (port->try_role == TYPEC_SOURCE)
346 return SRC_UNATTACHED;
347 else if (port->tcpc->config->default_role == TYPEC_SINK)
348 return SNK_UNATTACHED;
349 /* Fall through to return SRC_UNATTACHED */
350 } else if (port->port_type == TYPEC_PORT_SNK) {
351 return SNK_UNATTACHED;
352 }
353 return SRC_UNATTACHED;
354}
355
356static inline
357struct tcpm_port *typec_cap_to_tcpm(const struct typec_capability *cap)
358{
359 return container_of(cap, struct tcpm_port, typec_caps);
360}
361
362static bool tcpm_port_is_disconnected(struct tcpm_port *port)
363{
364 return (!port->attached && port->cc1 == TYPEC_CC_OPEN &&
365 port->cc2 == TYPEC_CC_OPEN) ||
366 (port->attached && ((port->polarity == TYPEC_POLARITY_CC1 &&
367 port->cc1 == TYPEC_CC_OPEN) ||
368 (port->polarity == TYPEC_POLARITY_CC2 &&
369 port->cc2 == TYPEC_CC_OPEN)));
370}
371
372/*
373 * Logging
374 */
375
376#ifdef CONFIG_DEBUG_FS
377
378static bool tcpm_log_full(struct tcpm_port *port)
379{
380 return port->logbuffer_tail ==
381 (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
382}
383
384__printf(2, 0)
385static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
386{
387 char tmpbuffer[LOG_BUFFER_ENTRY_SIZE];
388 u64 ts_nsec = local_clock();
389 unsigned long rem_nsec;
390
391 if (!port->logbuffer[port->logbuffer_head]) {
392 port->logbuffer[port->logbuffer_head] =
393 kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
394 if (!port->logbuffer[port->logbuffer_head])
395 return;
396 }
397
398 vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
399
400 mutex_lock(&port->logbuffer_lock);
401
402 if (tcpm_log_full(port)) {
403 port->logbuffer_head = max(port->logbuffer_head - 1, 0);
404 strcpy(tmpbuffer, "overflow");
405 }
406
407 if (port->logbuffer_head < 0 ||
408 port->logbuffer_head >= LOG_BUFFER_ENTRIES) {
409 dev_warn(port->dev,
410 "Bad log buffer index %d\n", port->logbuffer_head);
411 goto abort;
412 }
413
414 if (!port->logbuffer[port->logbuffer_head]) {
415 dev_warn(port->dev,
416 "Log buffer index %d is NULL\n", port->logbuffer_head);
417 goto abort;
418 }
419
420 rem_nsec = do_div(ts_nsec, 1000000000);
421 scnprintf(port->logbuffer[port->logbuffer_head],
422 LOG_BUFFER_ENTRY_SIZE, "[%5lu.%06lu] %s",
423 (unsigned long)ts_nsec, rem_nsec / 1000,
424 tmpbuffer);
425 port->logbuffer_head = (port->logbuffer_head + 1) % LOG_BUFFER_ENTRIES;
426
427abort:
428 mutex_unlock(&port->logbuffer_lock);
429}
430
431__printf(2, 3)
432static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
433{
434 va_list args;
435
436 /* Do not log while disconnected and unattached */
437 if (tcpm_port_is_disconnected(port) &&
438 (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
439 port->state == DRP_TOGGLING))
440 return;
441
442 va_start(args, fmt);
443 _tcpm_log(port, fmt, args);
444 va_end(args);
445}
446
447__printf(2, 3)
448static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...)
449{
450 va_list args;
451
452 va_start(args, fmt);
453 _tcpm_log(port, fmt, args);
454 va_end(args);
455}
456
457static void tcpm_log_source_caps(struct tcpm_port *port)
458{
459 int i;
460
461 for (i = 0; i < port->nr_source_caps; i++) {
462 u32 pdo = port->source_caps[i];
463 enum pd_pdo_type type = pdo_type(pdo);
464 char msg[64];
465
466 switch (type) {
467 case PDO_TYPE_FIXED:
468 scnprintf(msg, sizeof(msg),
469 "%u mV, %u mA [%s%s%s%s%s%s]",
470 pdo_fixed_voltage(pdo),
471 pdo_max_current(pdo),
472 (pdo & PDO_FIXED_DUAL_ROLE) ?
473 "R" : "",
474 (pdo & PDO_FIXED_SUSPEND) ?
475 "S" : "",
476 (pdo & PDO_FIXED_HIGHER_CAP) ?
477 "H" : "",
478 (pdo & PDO_FIXED_USB_COMM) ?
479 "U" : "",
480 (pdo & PDO_FIXED_DATA_SWAP) ?
481 "D" : "",
482 (pdo & PDO_FIXED_EXTPOWER) ?
483 "E" : "");
484 break;
485 case PDO_TYPE_VAR:
486 scnprintf(msg, sizeof(msg),
487 "%u-%u mV, %u mA",
488 pdo_min_voltage(pdo),
489 pdo_max_voltage(pdo),
490 pdo_max_current(pdo));
491 break;
492 case PDO_TYPE_BATT:
493 scnprintf(msg, sizeof(msg),
494 "%u-%u mV, %u mW",
495 pdo_min_voltage(pdo),
496 pdo_max_voltage(pdo),
497 pdo_max_power(pdo));
498 break;
499 default:
500 strcpy(msg, "undefined");
501 break;
502 }
503 tcpm_log(port, " PDO %d: type %d, %s",
504 i, type, msg);
505 }
506}
507
508static int tcpm_debug_show(struct seq_file *s, void *v)
509{
510 struct tcpm_port *port = (struct tcpm_port *)s->private;
511 int tail;
512
513 mutex_lock(&port->logbuffer_lock);
514 tail = port->logbuffer_tail;
515 while (tail != port->logbuffer_head) {
516 seq_printf(s, "%s\n", port->logbuffer[tail]);
517 tail = (tail + 1) % LOG_BUFFER_ENTRIES;
518 }
519 if (!seq_has_overflowed(s))
520 port->logbuffer_tail = tail;
521 mutex_unlock(&port->logbuffer_lock);
522
523 return 0;
524}
525DEFINE_SHOW_ATTRIBUTE(tcpm_debug);
526
527static struct dentry *rootdir;
528
529static int tcpm_debugfs_init(struct tcpm_port *port)
530{
531 mutex_init(&port->logbuffer_lock);
532 /* /sys/kernel/debug/tcpm/usbcX */
533 if (!rootdir) {
534 rootdir = debugfs_create_dir("tcpm", NULL);
535 if (!rootdir)
536 return -ENOMEM;
537 }
538
539 port->dentry = debugfs_create_file(dev_name(port->dev),
540 S_IFREG | 0444, rootdir,
541 port, &tcpm_debug_fops);
542
543 return 0;
544}
545
546static void tcpm_debugfs_exit(struct tcpm_port *port)
547{
548 debugfs_remove(port->dentry);
549}
550
551#else
552
553__printf(2, 3)
554static void tcpm_log(const struct tcpm_port *port, const char *fmt, ...) { }
555__printf(2, 3)
556static void tcpm_log_force(struct tcpm_port *port, const char *fmt, ...) { }
557static void tcpm_log_source_caps(struct tcpm_port *port) { }
558static int tcpm_debugfs_init(const struct tcpm_port *port) { return 0; }
559static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
560
561#endif
562
563static int tcpm_pd_transmit(struct tcpm_port *port,
564 enum tcpm_transmit_type type,
565 const struct pd_message *msg)
566{
567 unsigned long timeout;
568 int ret;
569
570 if (msg)
571 tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
572 else
573 tcpm_log(port, "PD TX, type: %#x", type);
574
575 reinit_completion(&port->tx_complete);
576 ret = port->tcpc->pd_transmit(port->tcpc, type, msg);
577 if (ret < 0)
578 return ret;
579
580 mutex_unlock(&port->lock);
581 timeout = wait_for_completion_timeout(&port->tx_complete,
582 msecs_to_jiffies(PD_T_TCPC_TX_TIMEOUT));
583 mutex_lock(&port->lock);
584 if (!timeout)
585 return -ETIMEDOUT;
586
587 switch (port->tx_status) {
588 case TCPC_TX_SUCCESS:
589 port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
590 return 0;
591 case TCPC_TX_DISCARDED:
592 return -EAGAIN;
593 case TCPC_TX_FAILED:
594 default:
595 return -EIO;
596 }
597}
598
599void tcpm_pd_transmit_complete(struct tcpm_port *port,
600 enum tcpm_transmit_status status)
601{
602 tcpm_log(port, "PD TX complete, status: %u", status);
603 port->tx_status = status;
604 complete(&port->tx_complete);
605}
606EXPORT_SYMBOL_GPL(tcpm_pd_transmit_complete);
607
608static int tcpm_mux_set(struct tcpm_port *port, enum tcpc_mux_mode mode,
609 enum usb_role usb_role,
610 enum typec_orientation orientation)
611{
612 int ret;
613
614 tcpm_log(port, "Requesting mux mode %d, usb-role %d, orientation %d",
615 mode, usb_role, orientation);
616
617 ret = typec_set_orientation(port->typec_port, orientation);
618 if (ret)
619 return ret;
620
621 if (port->role_sw) {
622 ret = usb_role_switch_set_role(port->role_sw, usb_role);
623 if (ret)
624 return ret;
625 }
626
627 return typec_set_mode(port->typec_port, mode);
628}
629
630static int tcpm_set_polarity(struct tcpm_port *port,
631 enum typec_cc_polarity polarity)
632{
633 int ret;
634
635 tcpm_log(port, "polarity %d", polarity);
636
637 ret = port->tcpc->set_polarity(port->tcpc, polarity);
638 if (ret < 0)
639 return ret;
640
641 port->polarity = polarity;
642
643 return 0;
644}
645
646static int tcpm_set_vconn(struct tcpm_port *port, bool enable)
647{
648 int ret;
649
650 tcpm_log(port, "vconn:=%d", enable);
651
652 ret = port->tcpc->set_vconn(port->tcpc, enable);
653 if (!ret) {
654 port->vconn_role = enable ? TYPEC_SOURCE : TYPEC_SINK;
655 typec_set_vconn_role(port->typec_port, port->vconn_role);
656 }
657
658 return ret;
659}
660
661static u32 tcpm_get_current_limit(struct tcpm_port *port)
662{
663 enum typec_cc_status cc;
664 u32 limit;
665
666 cc = port->polarity ? port->cc2 : port->cc1;
667 switch (cc) {
668 case TYPEC_CC_RP_1_5:
669 limit = 1500;
670 break;
671 case TYPEC_CC_RP_3_0:
672 limit = 3000;
673 break;
674 case TYPEC_CC_RP_DEF:
675 default:
676 if (port->tcpc->get_current_limit)
677 limit = port->tcpc->get_current_limit(port->tcpc);
678 else
679 limit = 0;
680 break;
681 }
682
683 return limit;
684}
685
686static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
687{
688 int ret = -EOPNOTSUPP;
689
690 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
691
692 if (port->tcpc->set_current_limit)
693 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
694
695 return ret;
696}
697
698/*
699 * Determine RP value to set based on maximum current supported
700 * by a port if configured as source.
701 * Returns CC value to report to link partner.
702 */
703static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
704{
705 const u32 *src_pdo = port->src_pdo;
706 int nr_pdo = port->nr_src_pdo;
707 int i;
708
709 /*
710 * Search for first entry with matching voltage.
711 * It should report the maximum supported current.
712 */
713 for (i = 0; i < nr_pdo; i++) {
714 const u32 pdo = src_pdo[i];
715
716 if (pdo_type(pdo) == PDO_TYPE_FIXED &&
717 pdo_fixed_voltage(pdo) == 5000) {
718 unsigned int curr = pdo_max_current(pdo);
719
720 if (curr >= 3000)
721 return TYPEC_CC_RP_3_0;
722 else if (curr >= 1500)
723 return TYPEC_CC_RP_1_5;
724 return TYPEC_CC_RP_DEF;
725 }
726 }
727
728 return TYPEC_CC_RP_DEF;
729}
730
731static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
732{
733 return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
734 port->data_role);
735}
736
737static int tcpm_set_roles(struct tcpm_port *port, bool attached,
738 enum typec_role role, enum typec_data_role data)
739{
740 enum typec_orientation orientation;
741 enum usb_role usb_role;
742 int ret;
743
744 if (port->polarity == TYPEC_POLARITY_CC1)
745 orientation = TYPEC_ORIENTATION_NORMAL;
746 else
747 orientation = TYPEC_ORIENTATION_REVERSE;
748
749 if (data == TYPEC_HOST)
750 usb_role = USB_ROLE_HOST;
751 else
752 usb_role = USB_ROLE_DEVICE;
753
754 ret = tcpm_mux_set(port, TYPEC_MUX_USB, usb_role, orientation);
755 if (ret < 0)
756 return ret;
757
758 ret = port->tcpc->set_roles(port->tcpc, attached, role, data);
759 if (ret < 0)
760 return ret;
761
762 port->pwr_role = role;
763 port->data_role = data;
764 typec_set_data_role(port->typec_port, data);
765 typec_set_pwr_role(port->typec_port, role);
766
767 return 0;
768}
769
770static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
771{
772 int ret;
773
774 ret = port->tcpc->set_roles(port->tcpc, true, role,
775 port->data_role);
776 if (ret < 0)
777 return ret;
778
779 port->pwr_role = role;
780 typec_set_pwr_role(port->typec_port, role);
781
782 return 0;
783}
784
785static int tcpm_pd_send_source_caps(struct tcpm_port *port)
786{
787 struct pd_message msg;
788 int i;
789
790 memset(&msg, 0, sizeof(msg));
791 if (!port->nr_src_pdo) {
792 /* No source capabilities defined, sink only */
793 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
794 port->pwr_role,
795 port->data_role,
796 port->message_id, 0);
797 } else {
798 msg.header = PD_HEADER_LE(PD_DATA_SOURCE_CAP,
799 port->pwr_role,
800 port->data_role,
801 port->message_id,
802 port->nr_src_pdo);
803 }
804 for (i = 0; i < port->nr_src_pdo; i++)
805 msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
806
807 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
808}
809
810static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
811{
812 struct pd_message msg;
813 int i;
814
815 memset(&msg, 0, sizeof(msg));
816 if (!port->nr_snk_pdo) {
817 /* No sink capabilities defined, source only */
818 msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
819 port->pwr_role,
820 port->data_role,
821 port->message_id, 0);
822 } else {
823 msg.header = PD_HEADER_LE(PD_DATA_SINK_CAP,
824 port->pwr_role,
825 port->data_role,
826 port->message_id,
827 port->nr_snk_pdo);
828 }
829 for (i = 0; i < port->nr_snk_pdo; i++)
830 msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
831
832 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
833}
834
835static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
836 unsigned int delay_ms)
837{
838 if (delay_ms) {
839 tcpm_log(port, "pending state change %s -> %s @ %u ms",
840 tcpm_states[port->state], tcpm_states[state],
841 delay_ms);
842 port->delayed_state = state;
843 mod_delayed_work(port->wq, &port->state_machine,
844 msecs_to_jiffies(delay_ms));
845 port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms);
846 port->delay_ms = delay_ms;
847 } else {
848 tcpm_log(port, "state change %s -> %s",
849 tcpm_states[port->state], tcpm_states[state]);
850 port->delayed_state = INVALID_STATE;
851 port->prev_state = port->state;
852 port->state = state;
853 /*
854 * Don't re-queue the state machine work item if we're currently
855 * in the state machine and we're immediately changing states.
856 * tcpm_state_machine_work() will continue running the state
857 * machine.
858 */
859 if (!port->state_machine_running)
860 mod_delayed_work(port->wq, &port->state_machine, 0);
861 }
862}
863
864static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
865 unsigned int delay_ms)
866{
867 if (port->enter_state == port->state)
868 tcpm_set_state(port, state, delay_ms);
869 else
870 tcpm_log(port,
871 "skipped %sstate change %s -> %s [%u ms], context state %s",
872 delay_ms ? "delayed " : "",
873 tcpm_states[port->state], tcpm_states[state],
874 delay_ms, tcpm_states[port->enter_state]);
875}
876
877static void tcpm_queue_message(struct tcpm_port *port,
878 enum pd_msg_request message)
879{
880 port->queued_message = message;
881 mod_delayed_work(port->wq, &port->state_machine, 0);
882}
883
884/*
885 * VDM/VDO handling functions
886 */
887static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
888 const u32 *data, int cnt)
889{
890 port->vdo_count = cnt + 1;
891 port->vdo_data[0] = header;
892 memcpy(&port->vdo_data[1], data, sizeof(u32) * cnt);
893 /* Set ready, vdm state machine will actually send */
894 port->vdm_retries = 0;
895 port->vdm_state = VDM_STATE_READY;
896}
897
898static void svdm_consume_identity(struct tcpm_port *port, const __le32 *payload,
899 int cnt)
900{
901 u32 vdo = le32_to_cpu(payload[VDO_INDEX_IDH]);
902 u32 product = le32_to_cpu(payload[VDO_INDEX_PRODUCT]);
903
904 memset(&port->mode_data, 0, sizeof(port->mode_data));
905
906 port->partner_ident.id_header = vdo;
907 port->partner_ident.cert_stat = le32_to_cpu(payload[VDO_INDEX_CSTAT]);
908 port->partner_ident.product = product;
909
910 typec_partner_set_identity(port->partner);
911
912 tcpm_log(port, "Identity: %04x:%04x.%04x",
913 PD_IDH_VID(vdo),
914 PD_PRODUCT_PID(product), product & 0xffff);
915}
916
917static bool svdm_consume_svids(struct tcpm_port *port, const __le32 *payload,
918 int cnt)
919{
920 struct pd_mode_data *pmdata = &port->mode_data;
921 int i;
922
923 for (i = 1; i < cnt; i++) {
924 u32 p = le32_to_cpu(payload[i]);
925 u16 svid;
926
927 svid = (p >> 16) & 0xffff;
928 if (!svid)
929 return false;
930
931 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
932 goto abort;
933
934 pmdata->svids[pmdata->nsvids++] = svid;
935 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
936
937 svid = p & 0xffff;
938 if (!svid)
939 return false;
940
941 if (pmdata->nsvids >= SVID_DISCOVERY_MAX)
942 goto abort;
943
944 pmdata->svids[pmdata->nsvids++] = svid;
945 tcpm_log(port, "SVID %d: 0x%x", pmdata->nsvids, svid);
946 }
947 return true;
948abort:
949 tcpm_log(port, "SVID_DISCOVERY_MAX(%d) too low!", SVID_DISCOVERY_MAX);
950 return false;
951}
952
953static void svdm_consume_modes(struct tcpm_port *port, const __le32 *payload,
954 int cnt)
955{
956 struct pd_mode_data *pmdata = &port->mode_data;
957 struct typec_altmode_desc *paltmode;
958 struct typec_mode_desc *pmode;
959 int i;
960
961 if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
962 /* Already logged in svdm_consume_svids() */
963 return;
964 }
965
966 paltmode = &pmdata->altmode_desc[pmdata->altmodes];
967 memset(paltmode, 0, sizeof(*paltmode));
968
969 paltmode->svid = pmdata->svids[pmdata->svid_index];
970
971 tcpm_log(port, " Alternate mode %d: SVID 0x%04x",
972 pmdata->altmodes, paltmode->svid);
973
974 for (i = 1; i < cnt && paltmode->n_modes < ALTMODE_MAX_MODES; i++) {
975 pmode = &paltmode->modes[paltmode->n_modes];
976 memset(pmode, 0, sizeof(*pmode));
977 pmode->vdo = le32_to_cpu(payload[i]);
978 pmode->index = i - 1;
979 paltmode->n_modes++;
980 tcpm_log(port, " VDO %d: 0x%08x",
981 pmode->index, pmode->vdo);
982 }
983 port->partner_altmode[pmdata->altmodes] =
984 typec_partner_register_altmode(port->partner, paltmode);
985 if (!port->partner_altmode[pmdata->altmodes]) {
986 tcpm_log(port,
987 "Failed to register alternate modes for SVID 0x%04x",
988 paltmode->svid);
989 return;
990 }
991 pmdata->altmodes++;
992}
993
994#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
995
996static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
997 u32 *response)
998{
999 u32 p0 = le32_to_cpu(payload[0]);
1000 int cmd_type = PD_VDO_CMDT(p0);
1001 int cmd = PD_VDO_CMD(p0);
1002 struct pd_mode_data *modep;
1003 int rlen = 0;
1004 u16 svid;
1005 int i;
1006
1007 tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
1008 p0, cmd_type, cmd, cnt);
1009
1010 modep = &port->mode_data;
1011
1012 switch (cmd_type) {
1013 case CMDT_INIT:
1014 switch (cmd) {
1015 case CMD_DISCOVER_IDENT:
1016 /* 6.4.4.3.1: Only respond as UFP (device) */
1017 if (port->data_role == TYPEC_DEVICE &&
1018 port->nr_snk_vdo) {
1019 for (i = 0; i < port->nr_snk_vdo; i++)
1020 response[i + 1] = port->snk_vdo[i];
1021 rlen = port->nr_snk_vdo + 1;
1022 }
1023 break;
1024 case CMD_DISCOVER_SVID:
1025 break;
1026 case CMD_DISCOVER_MODES:
1027 break;
1028 case CMD_ENTER_MODE:
1029 break;
1030 case CMD_EXIT_MODE:
1031 break;
1032 case CMD_ATTENTION:
1033 break;
1034 default:
1035 break;
1036 }
1037 if (rlen >= 1) {
1038 response[0] = p0 | VDO_CMDT(CMDT_RSP_ACK);
1039 } else if (rlen == 0) {
1040 response[0] = p0 | VDO_CMDT(CMDT_RSP_NAK);
1041 rlen = 1;
1042 } else {
1043 response[0] = p0 | VDO_CMDT(CMDT_RSP_BUSY);
1044 rlen = 1;
1045 }
1046 break;
1047 case CMDT_RSP_ACK:
1048 /* silently drop message if we are not connected */
1049 if (IS_ERR_OR_NULL(port->partner))
1050 break;
1051
1052 switch (cmd) {
1053 case CMD_DISCOVER_IDENT:
1054 /* 6.4.4.3.1 */
1055 svdm_consume_identity(port, payload, cnt);
1056 response[0] = VDO(USB_SID_PD, 1, CMD_DISCOVER_SVID);
1057 rlen = 1;
1058 break;
1059 case CMD_DISCOVER_SVID:
1060 /* 6.4.4.3.2 */
1061 if (svdm_consume_svids(port, payload, cnt)) {
1062 response[0] = VDO(USB_SID_PD, 1,
1063 CMD_DISCOVER_SVID);
1064 rlen = 1;
1065 } else if (modep->nsvids && supports_modal(port)) {
1066 response[0] = VDO(modep->svids[0], 1,
1067 CMD_DISCOVER_MODES);
1068 rlen = 1;
1069 }
1070 break;
1071 case CMD_DISCOVER_MODES:
1072 /* 6.4.4.3.3 */
1073 svdm_consume_modes(port, payload, cnt);
1074 modep->svid_index++;
1075 if (modep->svid_index < modep->nsvids) {
1076 svid = modep->svids[modep->svid_index];
1077 response[0] = VDO(svid, 1, CMD_DISCOVER_MODES);
1078 rlen = 1;
1079 } else {
1080 /* enter alternate mode if/when implemented */
1081 }
1082 break;
1083 case CMD_ENTER_MODE:
1084 break;
1085 default:
1086 break;
1087 }
1088 break;
1089 default:
1090 break;
1091 }
1092
1093 return rlen;
1094}
1095
1096static void tcpm_handle_vdm_request(struct tcpm_port *port,
1097 const __le32 *payload, int cnt)
1098{
1099 int rlen = 0;
1100 u32 response[8] = { };
1101 u32 p0 = le32_to_cpu(payload[0]);
1102
1103 if (port->vdm_state == VDM_STATE_BUSY) {
1104 /* If UFP responded busy retry after timeout */
1105 if (PD_VDO_CMDT(p0) == CMDT_RSP_BUSY) {
1106 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY;
1107 port->vdo_retry = (p0 & ~VDO_CMDT_MASK) |
1108 CMDT_INIT;
1109 mod_delayed_work(port->wq, &port->vdm_state_machine,
1110 msecs_to_jiffies(PD_T_VDM_BUSY));
1111 return;
1112 }
1113 port->vdm_state = VDM_STATE_DONE;
1114 }
1115
1116 if (PD_VDO_SVDM(p0))
1117 rlen = tcpm_pd_svdm(port, payload, cnt, response);
1118
1119 if (rlen > 0) {
1120 tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
1121 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1122 }
1123}
1124
1125static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
1126 const u32 *data, int count)
1127{
1128 u32 header;
1129
1130 if (WARN_ON(count > VDO_MAX_SIZE - 1))
1131 count = VDO_MAX_SIZE - 1;
1132
1133 /* set VDM header with VID & CMD */
1134 header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1135 1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION), cmd);
1136 tcpm_queue_vdm(port, header, data, count);
1137
1138 mod_delayed_work(port->wq, &port->vdm_state_machine, 0);
1139}
1140
1141static unsigned int vdm_ready_timeout(u32 vdm_hdr)
1142{
1143 unsigned int timeout;
1144 int cmd = PD_VDO_CMD(vdm_hdr);
1145
1146 /* its not a structured VDM command */
1147 if (!PD_VDO_SVDM(vdm_hdr))
1148 return PD_T_VDM_UNSTRUCTURED;
1149
1150 switch (PD_VDO_CMDT(vdm_hdr)) {
1151 case CMDT_INIT:
1152 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1153 timeout = PD_T_VDM_WAIT_MODE_E;
1154 else
1155 timeout = PD_T_VDM_SNDR_RSP;
1156 break;
1157 default:
1158 if (cmd == CMD_ENTER_MODE || cmd == CMD_EXIT_MODE)
1159 timeout = PD_T_VDM_E_MODE;
1160 else
1161 timeout = PD_T_VDM_RCVR_RSP;
1162 break;
1163 }
1164 return timeout;
1165}
1166
1167static void vdm_run_state_machine(struct tcpm_port *port)
1168{
1169 struct pd_message msg;
1170 int i, res;
1171
1172 switch (port->vdm_state) {
1173 case VDM_STATE_READY:
1174 /* Only transmit VDM if attached */
1175 if (!port->attached) {
1176 port->vdm_state = VDM_STATE_ERR_BUSY;
1177 break;
1178 }
1179
1180 /*
1181 * if there's traffic or we're not in PDO ready state don't send
1182 * a VDM.
1183 */
1184 if (port->state != SRC_READY && port->state != SNK_READY)
1185 break;
1186
1187 /* Prepare and send VDM */
1188 memset(&msg, 0, sizeof(msg));
1189 msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
1190 port->pwr_role,
1191 port->data_role,
1192 port->message_id, port->vdo_count);
1193 for (i = 0; i < port->vdo_count; i++)
1194 msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
1195 res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1196 if (res < 0) {
1197 port->vdm_state = VDM_STATE_ERR_SEND;
1198 } else {
1199 unsigned long timeout;
1200
1201 port->vdm_retries = 0;
1202 port->vdm_state = VDM_STATE_BUSY;
1203 timeout = vdm_ready_timeout(port->vdo_data[0]);
1204 mod_delayed_work(port->wq, &port->vdm_state_machine,
1205 timeout);
1206 }
1207 break;
1208 case VDM_STATE_WAIT_RSP_BUSY:
1209 port->vdo_data[0] = port->vdo_retry;
1210 port->vdo_count = 1;
1211 port->vdm_state = VDM_STATE_READY;
1212 break;
1213 case VDM_STATE_BUSY:
1214 port->vdm_state = VDM_STATE_ERR_TMOUT;
1215 break;
1216 case VDM_STATE_ERR_SEND:
1217 /*
1218 * A partner which does not support USB PD will not reply,
1219 * so this is not a fatal error. At the same time, some
1220 * devices may not return GoodCRC under some circumstances,
1221 * so we need to retry.
1222 */
1223 if (port->vdm_retries < 3) {
1224 tcpm_log(port, "VDM Tx error, retry");
1225 port->vdm_retries++;
1226 port->vdm_state = VDM_STATE_READY;
1227 }
1228 break;
1229 default:
1230 break;
1231 }
1232}
1233
1234static void vdm_state_machine_work(struct work_struct *work)
1235{
1236 struct tcpm_port *port = container_of(work, struct tcpm_port,
1237 vdm_state_machine.work);
1238 enum vdm_states prev_state;
1239
1240 mutex_lock(&port->lock);
1241
1242 /*
1243 * Continue running as long as the port is not busy and there was
1244 * a state change.
1245 */
1246 do {
1247 prev_state = port->vdm_state;
1248 vdm_run_state_machine(port);
1249 } while (port->vdm_state != prev_state &&
1250 port->vdm_state != VDM_STATE_BUSY);
1251
1252 mutex_unlock(&port->lock);
1253}
1254
1255enum pdo_err {
1256 PDO_NO_ERR,
1257 PDO_ERR_NO_VSAFE5V,
1258 PDO_ERR_VSAFE5V_NOT_FIRST,
1259 PDO_ERR_PDO_TYPE_NOT_IN_ORDER,
1260 PDO_ERR_FIXED_NOT_SORTED,
1261 PDO_ERR_VARIABLE_BATT_NOT_SORTED,
1262 PDO_ERR_DUPE_PDO,
1263};
1264
1265static const char * const pdo_err_msg[] = {
1266 [PDO_ERR_NO_VSAFE5V] =
1267 " err: source/sink caps should atleast have vSafe5V",
1268 [PDO_ERR_VSAFE5V_NOT_FIRST] =
1269 " err: vSafe5V Fixed Supply Object Shall always be the first object",
1270 [PDO_ERR_PDO_TYPE_NOT_IN_ORDER] =
1271 " err: PDOs should be in the following order: Fixed; Battery; Variable",
1272 [PDO_ERR_FIXED_NOT_SORTED] =
1273 " err: Fixed supply pdos should be in increasing order of their fixed voltage",
1274 [PDO_ERR_VARIABLE_BATT_NOT_SORTED] =
1275 " err: Variable/Battery supply pdos should be in increasing order of their minimum voltage",
1276 [PDO_ERR_DUPE_PDO] =
1277 " err: Variable/Batt supply pdos cannot have same min/max voltage",
1278};
1279
1280static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
1281 unsigned int nr_pdo)
1282{
1283 unsigned int i;
1284
1285 /* Should at least contain vSafe5v */
1286 if (nr_pdo < 1)
1287 return PDO_ERR_NO_VSAFE5V;
1288
1289 /* The vSafe5V Fixed Supply Object Shall always be the first object */
1290 if (pdo_type(pdo[0]) != PDO_TYPE_FIXED ||
1291 pdo_fixed_voltage(pdo[0]) != VSAFE5V)
1292 return PDO_ERR_VSAFE5V_NOT_FIRST;
1293
1294 for (i = 1; i < nr_pdo; i++) {
1295 if (pdo_type(pdo[i]) < pdo_type(pdo[i - 1])) {
1296 return PDO_ERR_PDO_TYPE_NOT_IN_ORDER;
1297 } else if (pdo_type(pdo[i]) == pdo_type(pdo[i - 1])) {
1298 enum pd_pdo_type type = pdo_type(pdo[i]);
1299
1300 switch (type) {
1301 /*
1302 * The remaining Fixed Supply Objects, if
1303 * present, shall be sent in voltage order;
1304 * lowest to highest.
1305 */
1306 case PDO_TYPE_FIXED:
1307 if (pdo_fixed_voltage(pdo[i]) <=
1308 pdo_fixed_voltage(pdo[i - 1]))
1309 return PDO_ERR_FIXED_NOT_SORTED;
1310 break;
1311 /*
1312 * The Battery Supply Objects and Variable
1313 * supply, if present shall be sent in Minimum
1314 * Voltage order; lowest to highest.
1315 */
1316 case PDO_TYPE_VAR:
1317 case PDO_TYPE_BATT:
1318 if (pdo_min_voltage(pdo[i]) <
1319 pdo_min_voltage(pdo[i - 1]))
1320 return PDO_ERR_VARIABLE_BATT_NOT_SORTED;
1321 else if ((pdo_min_voltage(pdo[i]) ==
1322 pdo_min_voltage(pdo[i - 1])) &&
1323 (pdo_max_voltage(pdo[i]) ==
1324 pdo_min_voltage(pdo[i - 1])))
1325 return PDO_ERR_DUPE_PDO;
1326 break;
1327 default:
1328 tcpm_log_force(port, " Unknown pdo type");
1329 }
1330 }
1331 }
1332
1333 return PDO_NO_ERR;
1334}
1335
1336static int tcpm_validate_caps(struct tcpm_port *port, const u32 *pdo,
1337 unsigned int nr_pdo)
1338{
1339 enum pdo_err err_index = tcpm_caps_err(port, pdo, nr_pdo);
1340
1341 if (err_index != PDO_NO_ERR) {
1342 tcpm_log_force(port, " %s", pdo_err_msg[err_index]);
1343 return -EINVAL;
1344 }
1345
1346 return 0;
1347}
1348
1349/*
1350 * PD (data, control) command handling functions
1351 */
1352static void tcpm_pd_data_request(struct tcpm_port *port,
1353 const struct pd_message *msg)
1354{
1355 enum pd_data_msg_type type = pd_header_type_le(msg->header);
1356 unsigned int cnt = pd_header_cnt_le(msg->header);
1357 unsigned int i;
1358
1359 switch (type) {
1360 case PD_DATA_SOURCE_CAP:
1361 if (port->pwr_role != TYPEC_SINK)
1362 break;
1363
1364 for (i = 0; i < cnt; i++)
1365 port->source_caps[i] = le32_to_cpu(msg->payload[i]);
1366
1367 port->nr_source_caps = cnt;
1368
1369 tcpm_log_source_caps(port);
1370
1371 tcpm_validate_caps(port, port->source_caps,
1372 port->nr_source_caps);
1373
1374 /*
1375 * This message may be received even if VBUS is not
1376 * present. This is quite unexpected; see USB PD
1377 * specification, sections 8.3.3.6.3.1 and 8.3.3.6.3.2.
1378 * However, at the same time, we must be ready to
1379 * receive this message and respond to it 15ms after
1380 * receiving PS_RDY during power swap operations, no matter
1381 * if VBUS is available or not (USB PD specification,
1382 * section 6.5.9.2).
1383 * So we need to accept the message either way,
1384 * but be prepared to keep waiting for VBUS after it was
1385 * handled.
1386 */
1387 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
1388 break;
1389 case PD_DATA_REQUEST:
1390 if (port->pwr_role != TYPEC_SOURCE ||
1391 cnt != 1) {
1392 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1393 break;
1394 }
1395 port->sink_request = le32_to_cpu(msg->payload[0]);
1396 tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
1397 break;
1398 case PD_DATA_SINK_CAP:
1399 /* We don't do anything with this at the moment... */
1400 for (i = 0; i < cnt; i++)
1401 port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
1402 port->nr_sink_caps = cnt;
1403 break;
1404 case PD_DATA_VENDOR_DEF:
1405 tcpm_handle_vdm_request(port, msg->payload, cnt);
1406 break;
1407 case PD_DATA_BIST:
1408 if (port->state == SRC_READY || port->state == SNK_READY) {
1409 port->bist_request = le32_to_cpu(msg->payload[0]);
1410 tcpm_set_state(port, BIST_RX, 0);
1411 }
1412 break;
1413 default:
1414 tcpm_log(port, "Unhandled data message type %#x", type);
1415 break;
1416 }
1417}
1418
1419static void tcpm_pd_ctrl_request(struct tcpm_port *port,
1420 const struct pd_message *msg)
1421{
1422 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1423 enum tcpm_state next_state;
1424
1425 switch (type) {
1426 case PD_CTRL_GOOD_CRC:
1427 case PD_CTRL_PING:
1428 break;
1429 case PD_CTRL_GET_SOURCE_CAP:
1430 switch (port->state) {
1431 case SRC_READY:
1432 case SNK_READY:
1433 tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
1434 break;
1435 default:
1436 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1437 break;
1438 }
1439 break;
1440 case PD_CTRL_GET_SINK_CAP:
1441 switch (port->state) {
1442 case SRC_READY:
1443 case SNK_READY:
1444 tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
1445 break;
1446 default:
1447 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1448 break;
1449 }
1450 break;
1451 case PD_CTRL_GOTO_MIN:
1452 break;
1453 case PD_CTRL_PS_RDY:
1454 switch (port->state) {
1455 case SNK_TRANSITION_SINK:
1456 if (port->vbus_present) {
1457 tcpm_set_current_limit(port,
1458 port->current_limit,
1459 port->supply_voltage);
1460 port->explicit_contract = true;
1461 tcpm_set_state(port, SNK_READY, 0);
1462 } else {
1463 /*
1464 * Seen after power swap. Keep waiting for VBUS
1465 * in a transitional state.
1466 */
1467 tcpm_set_state(port,
1468 SNK_TRANSITION_SINK_VBUS, 0);
1469 }
1470 break;
1471 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
1472 tcpm_set_state(port, PR_SWAP_SRC_SNK_SINK_ON, 0);
1473 break;
1474 case PR_SWAP_SNK_SRC_SINK_OFF:
1475 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON, 0);
1476 break;
1477 case VCONN_SWAP_WAIT_FOR_VCONN:
1478 tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0);
1479 break;
1480 default:
1481 break;
1482 }
1483 break;
1484 case PD_CTRL_REJECT:
1485 case PD_CTRL_WAIT:
1486 switch (port->state) {
1487 case SNK_NEGOTIATE_CAPABILITIES:
1488 /* USB PD specification, Figure 8-43 */
1489 if (port->explicit_contract)
1490 next_state = SNK_READY;
1491 else
1492 next_state = SNK_WAIT_CAPABILITIES;
1493 tcpm_set_state(port, next_state, 0);
1494 break;
1495 case DR_SWAP_SEND:
1496 port->swap_status = (type == PD_CTRL_WAIT ?
1497 -EAGAIN : -EOPNOTSUPP);
1498 tcpm_set_state(port, DR_SWAP_CANCEL, 0);
1499 break;
1500 case PR_SWAP_SEND:
1501 port->swap_status = (type == PD_CTRL_WAIT ?
1502 -EAGAIN : -EOPNOTSUPP);
1503 tcpm_set_state(port, PR_SWAP_CANCEL, 0);
1504 break;
1505 case VCONN_SWAP_SEND:
1506 port->swap_status = (type == PD_CTRL_WAIT ?
1507 -EAGAIN : -EOPNOTSUPP);
1508 tcpm_set_state(port, VCONN_SWAP_CANCEL, 0);
1509 break;
1510 default:
1511 break;
1512 }
1513 break;
1514 case PD_CTRL_ACCEPT:
1515 switch (port->state) {
1516 case SNK_NEGOTIATE_CAPABILITIES:
1517 tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
1518 break;
1519 case SOFT_RESET_SEND:
1520 port->message_id = 0;
1521 port->rx_msgid = -1;
1522 if (port->pwr_role == TYPEC_SOURCE)
1523 next_state = SRC_SEND_CAPABILITIES;
1524 else
1525 next_state = SNK_WAIT_CAPABILITIES;
1526 tcpm_set_state(port, next_state, 0);
1527 break;
1528 case DR_SWAP_SEND:
1529 tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
1530 break;
1531 case PR_SWAP_SEND:
1532 tcpm_set_state(port, PR_SWAP_START, 0);
1533 break;
1534 case VCONN_SWAP_SEND:
1535 tcpm_set_state(port, VCONN_SWAP_START, 0);
1536 break;
1537 default:
1538 break;
1539 }
1540 break;
1541 case PD_CTRL_SOFT_RESET:
1542 tcpm_set_state(port, SOFT_RESET, 0);
1543 break;
1544 case PD_CTRL_DR_SWAP:
1545 if (port->port_type != TYPEC_PORT_DRP) {
1546 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1547 break;
1548 }
1549 /*
1550 * XXX
1551 * 6.3.9: If an alternate mode is active, a request to swap
1552 * alternate modes shall trigger a port reset.
1553 */
1554 switch (port->state) {
1555 case SRC_READY:
1556 case SNK_READY:
1557 tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
1558 break;
1559 default:
1560 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1561 break;
1562 }
1563 break;
1564 case PD_CTRL_PR_SWAP:
1565 if (port->port_type != TYPEC_PORT_DRP) {
1566 tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
1567 break;
1568 }
1569 switch (port->state) {
1570 case SRC_READY:
1571 case SNK_READY:
1572 tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
1573 break;
1574 default:
1575 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1576 break;
1577 }
1578 break;
1579 case PD_CTRL_VCONN_SWAP:
1580 switch (port->state) {
1581 case SRC_READY:
1582 case SNK_READY:
1583 tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
1584 break;
1585 default:
1586 tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
1587 break;
1588 }
1589 break;
1590 default:
1591 tcpm_log(port, "Unhandled ctrl message type %#x", type);
1592 break;
1593 }
1594}
1595
1596static void tcpm_pd_rx_handler(struct work_struct *work)
1597{
1598 struct pd_rx_event *event = container_of(work,
1599 struct pd_rx_event, work);
1600 const struct pd_message *msg = &event->msg;
1601 unsigned int cnt = pd_header_cnt_le(msg->header);
1602 struct tcpm_port *port = event->port;
1603
1604 mutex_lock(&port->lock);
1605
1606 tcpm_log(port, "PD RX, header: %#x [%d]", le16_to_cpu(msg->header),
1607 port->attached);
1608
1609 if (port->attached) {
1610 enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
1611 unsigned int msgid = pd_header_msgid_le(msg->header);
1612
1613 /*
1614 * USB PD standard, 6.6.1.2:
1615 * "... if MessageID value in a received Message is the
1616 * same as the stored value, the receiver shall return a
1617 * GoodCRC Message with that MessageID value and drop
1618 * the Message (this is a retry of an already received
1619 * Message). Note: this shall not apply to the Soft_Reset
1620 * Message which always has a MessageID value of zero."
1621 */
1622 if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
1623 goto done;
1624 port->rx_msgid = msgid;
1625
1626 /*
1627 * If both ends believe to be DFP/host, we have a data role
1628 * mismatch.
1629 */
1630 if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
1631 (port->data_role == TYPEC_HOST)) {
1632 tcpm_log(port,
1633 "Data role mismatch, initiating error recovery");
1634 tcpm_set_state(port, ERROR_RECOVERY, 0);
1635 } else {
1636 if (cnt)
1637 tcpm_pd_data_request(port, msg);
1638 else
1639 tcpm_pd_ctrl_request(port, msg);
1640 }
1641 }
1642
1643done:
1644 mutex_unlock(&port->lock);
1645 kfree(event);
1646}
1647
1648void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
1649{
1650 struct pd_rx_event *event;
1651
1652 event = kzalloc(sizeof(*event), GFP_ATOMIC);
1653 if (!event)
1654 return;
1655
1656 INIT_WORK(&event->work, tcpm_pd_rx_handler);
1657 event->port = port;
1658 memcpy(&event->msg, msg, sizeof(*msg));
1659 queue_work(port->wq, &event->work);
1660}
1661EXPORT_SYMBOL_GPL(tcpm_pd_receive);
1662
1663static int tcpm_pd_send_control(struct tcpm_port *port,
1664 enum pd_ctrl_msg_type type)
1665{
1666 struct pd_message msg;
1667
1668 memset(&msg, 0, sizeof(msg));
1669 msg.header = PD_HEADER_LE(type, port->pwr_role,
1670 port->data_role,
1671 port->message_id, 0);
1672
1673 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1674}
1675
1676/*
1677 * Send queued message without affecting state.
1678 * Return true if state machine should go back to sleep,
1679 * false otherwise.
1680 */
1681static bool tcpm_send_queued_message(struct tcpm_port *port)
1682{
1683 enum pd_msg_request queued_message;
1684
1685 do {
1686 queued_message = port->queued_message;
1687 port->queued_message = PD_MSG_NONE;
1688
1689 switch (queued_message) {
1690 case PD_MSG_CTRL_WAIT:
1691 tcpm_pd_send_control(port, PD_CTRL_WAIT);
1692 break;
1693 case PD_MSG_CTRL_REJECT:
1694 tcpm_pd_send_control(port, PD_CTRL_REJECT);
1695 break;
1696 case PD_MSG_DATA_SINK_CAP:
1697 tcpm_pd_send_sink_caps(port);
1698 break;
1699 case PD_MSG_DATA_SOURCE_CAP:
1700 tcpm_pd_send_source_caps(port);
1701 break;
1702 default:
1703 break;
1704 }
1705 } while (port->queued_message != PD_MSG_NONE);
1706
1707 if (port->delayed_state != INVALID_STATE) {
1708 if (time_is_after_jiffies(port->delayed_runtime)) {
1709 mod_delayed_work(port->wq, &port->state_machine,
1710 port->delayed_runtime - jiffies);
1711 return true;
1712 }
1713 port->delayed_state = INVALID_STATE;
1714 }
1715 return false;
1716}
1717
1718static int tcpm_pd_check_request(struct tcpm_port *port)
1719{
1720 u32 pdo, rdo = port->sink_request;
1721 unsigned int max, op, pdo_max, index;
1722 enum pd_pdo_type type;
1723
1724 index = rdo_index(rdo);
1725 if (!index || index > port->nr_src_pdo)
1726 return -EINVAL;
1727
1728 pdo = port->src_pdo[index - 1];
1729 type = pdo_type(pdo);
1730 switch (type) {
1731 case PDO_TYPE_FIXED:
1732 case PDO_TYPE_VAR:
1733 max = rdo_max_current(rdo);
1734 op = rdo_op_current(rdo);
1735 pdo_max = pdo_max_current(pdo);
1736
1737 if (op > pdo_max)
1738 return -EINVAL;
1739 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1740 return -EINVAL;
1741
1742 if (type == PDO_TYPE_FIXED)
1743 tcpm_log(port,
1744 "Requested %u mV, %u mA for %u / %u mA",
1745 pdo_fixed_voltage(pdo), pdo_max, op, max);
1746 else
1747 tcpm_log(port,
1748 "Requested %u -> %u mV, %u mA for %u / %u mA",
1749 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1750 pdo_max, op, max);
1751 break;
1752 case PDO_TYPE_BATT:
1753 max = rdo_max_power(rdo);
1754 op = rdo_op_power(rdo);
1755 pdo_max = pdo_max_power(pdo);
1756
1757 if (op > pdo_max)
1758 return -EINVAL;
1759 if (max > pdo_max && !(rdo & RDO_CAP_MISMATCH))
1760 return -EINVAL;
1761 tcpm_log(port,
1762 "Requested %u -> %u mV, %u mW for %u / %u mW",
1763 pdo_min_voltage(pdo), pdo_max_voltage(pdo),
1764 pdo_max, op, max);
1765 break;
1766 default:
1767 return -EINVAL;
1768 }
1769
1770 port->op_vsafe5v = index == 1;
1771
1772 return 0;
1773}
1774
1775static int tcpm_pd_select_pdo(struct tcpm_port *port)
1776{
1777 unsigned int i, max_mw = 0, max_mv = 0;
1778 int ret = -EINVAL;
1779
1780 /*
1781 * Select the source PDO providing the most power while staying within
1782 * the board's voltage limits. Prefer PDO providing exp
1783 */
1784 for (i = 0; i < port->nr_source_caps; i++) {
1785 u32 pdo = port->source_caps[i];
1786 enum pd_pdo_type type = pdo_type(pdo);
1787 unsigned int mv, ma, mw;
1788
1789 if (type == PDO_TYPE_FIXED)
1790 mv = pdo_fixed_voltage(pdo);
1791 else
1792 mv = pdo_min_voltage(pdo);
1793
1794 if (type == PDO_TYPE_BATT) {
1795 mw = pdo_max_power(pdo);
1796 } else {
1797 ma = min(pdo_max_current(pdo),
1798 port->max_snk_ma);
1799 mw = ma * mv / 1000;
1800 }
1801
1802 /* Perfer higher voltages if available */
1803 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1804 mv <= port->max_snk_mv) {
1805 ret = i;
1806 max_mw = mw;
1807 max_mv = mv;
1808 }
1809 }
1810
1811 return ret;
1812}
1813
1814static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1815{
1816 unsigned int mv, ma, mw, flags;
1817 unsigned int max_ma, max_mw;
1818 enum pd_pdo_type type;
1819 int index;
1820 u32 pdo;
1821
1822 index = tcpm_pd_select_pdo(port);
1823 if (index < 0)
1824 return -EINVAL;
1825 pdo = port->source_caps[index];
1826 type = pdo_type(pdo);
1827
1828 if (type == PDO_TYPE_FIXED)
1829 mv = pdo_fixed_voltage(pdo);
1830 else
1831 mv = pdo_min_voltage(pdo);
1832
1833 /* Select maximum available current within the board's power limit */
1834 if (type == PDO_TYPE_BATT) {
1835 mw = pdo_max_power(pdo);
1836 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1837 } else {
1838 ma = min(pdo_max_current(pdo),
1839 1000 * port->max_snk_mw / mv);
1840 }
1841 ma = min(ma, port->max_snk_ma);
1842
1843 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
1844
1845 /* Set mismatch bit if offered power is less than operating power */
1846 mw = ma * mv / 1000;
1847 max_ma = ma;
1848 max_mw = mw;
1849 if (mw < port->operating_snk_mw) {
1850 flags |= RDO_CAP_MISMATCH;
1851 max_mw = port->operating_snk_mw;
1852 max_ma = max_mw * 1000 / mv;
1853 }
1854
1855 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
1856 port->cc_req, port->cc1, port->cc2, port->vbus_source,
1857 port->vconn_role == TYPEC_SOURCE ? "source" : "sink",
1858 port->polarity);
1859
1860 if (type == PDO_TYPE_BATT) {
1861 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1862
1863 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1864 index, mv, mw,
1865 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1866 } else {
1867 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1868
1869 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1870 index, mv, ma,
1871 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1872 }
1873
1874 port->current_limit = ma;
1875 port->supply_voltage = mv;
1876
1877 return 0;
1878}
1879
1880static int tcpm_pd_send_request(struct tcpm_port *port)
1881{
1882 struct pd_message msg;
1883 int ret;
1884 u32 rdo;
1885
1886 ret = tcpm_pd_build_request(port, &rdo);
1887 if (ret < 0)
1888 return ret;
1889
1890 memset(&msg, 0, sizeof(msg));
1891 msg.header = PD_HEADER_LE(PD_DATA_REQUEST,
1892 port->pwr_role,
1893 port->data_role,
1894 port->message_id, 1);
1895 msg.payload[0] = cpu_to_le32(rdo);
1896
1897 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
1898}
1899
1900static int tcpm_set_vbus(struct tcpm_port *port, bool enable)
1901{
1902 int ret;
1903
1904 if (enable && port->vbus_charge)
1905 return -EINVAL;
1906
1907 tcpm_log(port, "vbus:=%d charge=%d", enable, port->vbus_charge);
1908
1909 ret = port->tcpc->set_vbus(port->tcpc, enable, port->vbus_charge);
1910 if (ret < 0)
1911 return ret;
1912
1913 port->vbus_source = enable;
1914 return 0;
1915}
1916
1917static int tcpm_set_charge(struct tcpm_port *port, bool charge)
1918{
1919 int ret;
1920
1921 if (charge && port->vbus_source)
1922 return -EINVAL;
1923
1924 if (charge != port->vbus_charge) {
1925 tcpm_log(port, "vbus=%d charge:=%d", port->vbus_source, charge);
1926 ret = port->tcpc->set_vbus(port->tcpc, port->vbus_source,
1927 charge);
1928 if (ret < 0)
1929 return ret;
1930 }
1931 port->vbus_charge = charge;
1932 return 0;
1933}
1934
1935static bool tcpm_start_drp_toggling(struct tcpm_port *port)
1936{
1937 int ret;
1938
1939 if (port->tcpc->start_drp_toggling &&
1940 port->port_type == TYPEC_PORT_DRP) {
1941 tcpm_log_force(port, "Start DRP toggling");
1942 ret = port->tcpc->start_drp_toggling(port->tcpc,
1943 tcpm_rp_cc(port));
1944 if (!ret)
1945 return true;
1946 }
1947
1948 return false;
1949}
1950
1951static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
1952{
1953 tcpm_log(port, "cc:=%d", cc);
1954 port->cc_req = cc;
1955 port->tcpc->set_cc(port->tcpc, cc);
1956}
1957
1958static int tcpm_init_vbus(struct tcpm_port *port)
1959{
1960 int ret;
1961
1962 ret = port->tcpc->set_vbus(port->tcpc, false, false);
1963 port->vbus_source = false;
1964 port->vbus_charge = false;
1965 return ret;
1966}
1967
1968static int tcpm_init_vconn(struct tcpm_port *port)
1969{
1970 int ret;
1971
1972 ret = port->tcpc->set_vconn(port->tcpc, false);
1973 port->vconn_role = TYPEC_SINK;
1974 return ret;
1975}
1976
1977static void tcpm_typec_connect(struct tcpm_port *port)
1978{
1979 if (!port->connected) {
1980 /* Make sure we don't report stale identity information */
1981 memset(&port->partner_ident, 0, sizeof(port->partner_ident));
1982 port->partner_desc.usb_pd = port->pd_capable;
1983 if (tcpm_port_is_debug(port))
1984 port->partner_desc.accessory = TYPEC_ACCESSORY_DEBUG;
1985 else if (tcpm_port_is_audio(port))
1986 port->partner_desc.accessory = TYPEC_ACCESSORY_AUDIO;
1987 else
1988 port->partner_desc.accessory = TYPEC_ACCESSORY_NONE;
1989 port->partner = typec_register_partner(port->typec_port,
1990 &port->partner_desc);
1991 port->connected = true;
1992 }
1993}
1994
1995static int tcpm_src_attach(struct tcpm_port *port)
1996{
1997 enum typec_cc_polarity polarity =
1998 port->cc2 == TYPEC_CC_RD ? TYPEC_POLARITY_CC2
1999 : TYPEC_POLARITY_CC1;
2000 int ret;
2001
2002 if (port->attached)
2003 return 0;
2004
2005 ret = tcpm_set_polarity(port, polarity);
2006 if (ret < 0)
2007 return ret;
2008
2009 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2010 if (ret < 0)
2011 return ret;
2012
2013 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2014 if (ret < 0)
2015 goto out_disable_mux;
2016
2017 /*
2018 * USB Type-C specification, version 1.2,
2019 * chapter 4.5.2.2.8.1 (Attached.SRC Requirements)
2020 * Enable VCONN only if the non-RD port is set to RA.
2021 */
2022 if ((polarity == TYPEC_POLARITY_CC1 && port->cc2 == TYPEC_CC_RA) ||
2023 (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) {
2024 ret = tcpm_set_vconn(port, true);
2025 if (ret < 0)
2026 goto out_disable_pd;
2027 }
2028
2029 ret = tcpm_set_vbus(port, true);
2030 if (ret < 0)
2031 goto out_disable_vconn;
2032
2033 port->pd_capable = false;
2034
2035 port->partner = NULL;
2036
2037 port->attached = true;
2038 port->send_discover = true;
2039
2040 return 0;
2041
2042out_disable_vconn:
2043 tcpm_set_vconn(port, false);
2044out_disable_pd:
2045 port->tcpc->set_pd_rx(port->tcpc, false);
2046out_disable_mux:
2047 tcpm_mux_set(port, TYPEC_MUX_NONE, USB_ROLE_NONE,
2048 TYPEC_ORIENTATION_NONE);
2049 return ret;
2050}
2051
2052static void tcpm_typec_disconnect(struct tcpm_port *port)
2053{
2054 if (port->connected) {
2055 typec_unregister_partner(port->partner);
2056 port->partner = NULL;
2057 port->connected = false;
2058 }
2059}
2060
2061static void tcpm_unregister_altmodes(struct tcpm_port *port)
2062{
2063 struct pd_mode_data *modep = &port->mode_data;
2064 int i;
2065
2066 for (i = 0; i < modep->altmodes; i++) {
2067 typec_unregister_altmode(port->partner_altmode[i]);
2068 port->partner_altmode[i] = NULL;
2069 }
2070
2071 memset(modep, 0, sizeof(*modep));
2072}
2073
2074static void tcpm_reset_port(struct tcpm_port *port)
2075{
2076 tcpm_unregister_altmodes(port);
2077 tcpm_typec_disconnect(port);
2078 port->attached = false;
2079 port->pd_capable = false;
2080
2081 /*
2082 * First Rx ID should be 0; set this to a sentinel of -1 so that
2083 * we can check tcpm_pd_rx_handler() if we had seen it before.
2084 */
2085 port->rx_msgid = -1;
2086
2087 port->tcpc->set_pd_rx(port->tcpc, false);
2088 tcpm_init_vbus(port); /* also disables charging */
2089 tcpm_init_vconn(port);
2090 tcpm_set_current_limit(port, 0, 0);
2091 tcpm_set_polarity(port, TYPEC_POLARITY_CC1);
2092 tcpm_mux_set(port, TYPEC_MUX_NONE, USB_ROLE_NONE,
2093 TYPEC_ORIENTATION_NONE);
2094 tcpm_set_attached_state(port, false);
2095 port->try_src_count = 0;
2096 port->try_snk_count = 0;
2097}
2098
2099static void tcpm_detach(struct tcpm_port *port)
2100{
2101 if (!port->attached)
2102 return;
2103
2104 if (tcpm_port_is_disconnected(port))
2105 port->hard_reset_count = 0;
2106
2107 tcpm_reset_port(port);
2108}
2109
2110static void tcpm_src_detach(struct tcpm_port *port)
2111{
2112 tcpm_detach(port);
2113}
2114
2115static int tcpm_snk_attach(struct tcpm_port *port)
2116{
2117 int ret;
2118
2119 if (port->attached)
2120 return 0;
2121
2122 ret = tcpm_set_polarity(port, port->cc2 != TYPEC_CC_OPEN ?
2123 TYPEC_POLARITY_CC2 : TYPEC_POLARITY_CC1);
2124 if (ret < 0)
2125 return ret;
2126
2127 ret = tcpm_set_roles(port, true, TYPEC_SINK, TYPEC_DEVICE);
2128 if (ret < 0)
2129 return ret;
2130
2131 port->pd_capable = false;
2132
2133 port->partner = NULL;
2134
2135 port->attached = true;
2136 port->send_discover = true;
2137
2138 return 0;
2139}
2140
2141static void tcpm_snk_detach(struct tcpm_port *port)
2142{
2143 tcpm_detach(port);
2144}
2145
2146static int tcpm_acc_attach(struct tcpm_port *port)
2147{
2148 int ret;
2149
2150 if (port->attached)
2151 return 0;
2152
2153 ret = tcpm_set_roles(port, true, TYPEC_SOURCE, TYPEC_HOST);
2154 if (ret < 0)
2155 return ret;
2156
2157 port->partner = NULL;
2158
2159 tcpm_typec_connect(port);
2160
2161 port->attached = true;
2162
2163 return 0;
2164}
2165
2166static void tcpm_acc_detach(struct tcpm_port *port)
2167{
2168 tcpm_detach(port);
2169}
2170
2171static inline enum tcpm_state hard_reset_state(struct tcpm_port *port)
2172{
2173 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
2174 return HARD_RESET_SEND;
2175 if (port->pd_capable)
2176 return ERROR_RECOVERY;
2177 if (port->pwr_role == TYPEC_SOURCE)
2178 return SRC_UNATTACHED;
2179 if (port->state == SNK_WAIT_CAPABILITIES)
2180 return SNK_READY;
2181 return SNK_UNATTACHED;
2182}
2183
2184static inline enum tcpm_state ready_state(struct tcpm_port *port)
2185{
2186 if (port->pwr_role == TYPEC_SOURCE)
2187 return SRC_READY;
2188 else
2189 return SNK_READY;
2190}
2191
2192static inline enum tcpm_state unattached_state(struct tcpm_port *port)
2193{
2194 if (port->port_type == TYPEC_PORT_DRP) {
2195 if (port->pwr_role == TYPEC_SOURCE)
2196 return SRC_UNATTACHED;
2197 else
2198 return SNK_UNATTACHED;
2199 } else if (port->port_type == TYPEC_PORT_SRC) {
2200 return SRC_UNATTACHED;
2201 }
2202
2203 return SNK_UNATTACHED;
2204}
2205
2206static void tcpm_check_send_discover(struct tcpm_port *port)
2207{
2208 if (port->data_role == TYPEC_HOST && port->send_discover &&
2209 port->pd_capable) {
2210 tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
2211 port->send_discover = false;
2212 }
2213}
2214
2215static void tcpm_swap_complete(struct tcpm_port *port, int result)
2216{
2217 if (port->swap_pending) {
2218 port->swap_status = result;
2219 port->swap_pending = false;
2220 port->non_pd_role_swap = false;
2221 complete(&port->swap_complete);
2222 }
2223}
2224
2225static enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
2226{
2227 switch (cc) {
2228 case TYPEC_CC_RP_1_5:
2229 return TYPEC_PWR_MODE_1_5A;
2230 case TYPEC_CC_RP_3_0:
2231 return TYPEC_PWR_MODE_3_0A;
2232 case TYPEC_CC_RP_DEF:
2233 default:
2234 return TYPEC_PWR_MODE_USB;
2235 }
2236}
2237
2238static void run_state_machine(struct tcpm_port *port)
2239{
2240 int ret;
2241 enum typec_pwr_opmode opmode;
2242 unsigned int msecs;
2243
2244 port->enter_state = port->state;
2245 switch (port->state) {
2246 case DRP_TOGGLING:
2247 break;
2248 /* SRC states */
2249 case SRC_UNATTACHED:
2250 if (!port->non_pd_role_swap)
2251 tcpm_swap_complete(port, -ENOTCONN);
2252 tcpm_src_detach(port);
2253 if (tcpm_start_drp_toggling(port)) {
2254 tcpm_set_state(port, DRP_TOGGLING, 0);
2255 break;
2256 }
2257 tcpm_set_cc(port, tcpm_rp_cc(port));
2258 if (port->port_type == TYPEC_PORT_DRP)
2259 tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
2260 break;
2261 case SRC_ATTACH_WAIT:
2262 if (tcpm_port_is_debug(port))
2263 tcpm_set_state(port, DEBUG_ACC_ATTACHED,
2264 PD_T_CC_DEBOUNCE);
2265 else if (tcpm_port_is_audio(port))
2266 tcpm_set_state(port, AUDIO_ACC_ATTACHED,
2267 PD_T_CC_DEBOUNCE);
2268 else if (tcpm_port_is_source(port))
2269 tcpm_set_state(port,
2270 tcpm_try_snk(port) ? SNK_TRY
2271 : SRC_ATTACHED,
2272 PD_T_CC_DEBOUNCE);
2273 break;
2274
2275 case SNK_TRY:
2276 port->try_snk_count++;
2277 /*
2278 * Requirements:
2279 * - Do not drive vconn or vbus
2280 * - Terminate CC pins (both) to Rd
2281 * Action:
2282 * - Wait for tDRPTry (PD_T_DRP_TRY).
2283 * Until then, ignore any state changes.
2284 */
2285 tcpm_set_cc(port, TYPEC_CC_RD);
2286 tcpm_set_state(port, SNK_TRY_WAIT, PD_T_DRP_TRY);
2287 break;
2288 case SNK_TRY_WAIT:
2289 if (tcpm_port_is_sink(port)) {
2290 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE, 0);
2291 } else {
2292 tcpm_set_state(port, SRC_TRYWAIT, 0);
2293 port->max_wait = 0;
2294 }
2295 break;
2296 case SNK_TRY_WAIT_DEBOUNCE:
2297 tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
2298 PD_T_PD_DEBOUNCE);
2299 break;
2300 case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
2301 if (port->vbus_present && tcpm_port_is_sink(port)) {
2302 tcpm_set_state(port, SNK_ATTACHED, 0);
2303 } else {
2304 tcpm_set_state(port, SRC_TRYWAIT, 0);
2305 port->max_wait = 0;
2306 }
2307 break;
2308 case SRC_TRYWAIT:
2309 tcpm_set_cc(port, tcpm_rp_cc(port));
2310 if (port->max_wait == 0) {
2311 port->max_wait = jiffies +
2312 msecs_to_jiffies(PD_T_DRP_TRY);
2313 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2314 PD_T_DRP_TRY);
2315 } else {
2316 if (time_is_after_jiffies(port->max_wait))
2317 tcpm_set_state(port, SRC_TRYWAIT_UNATTACHED,
2318 jiffies_to_msecs(port->max_wait -
2319 jiffies));
2320 else
2321 tcpm_set_state(port, SNK_UNATTACHED, 0);
2322 }
2323 break;
2324 case SRC_TRYWAIT_DEBOUNCE:
2325 tcpm_set_state(port, SRC_ATTACHED, PD_T_CC_DEBOUNCE);
2326 break;
2327 case SRC_TRYWAIT_UNATTACHED:
2328 tcpm_set_state(port, SNK_UNATTACHED, 0);
2329 break;
2330
2331 case SRC_ATTACHED:
2332 ret = tcpm_src_attach(port);
2333 tcpm_set_state(port, SRC_UNATTACHED,
2334 ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
2335 break;
2336 case SRC_STARTUP:
2337 opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
2338 typec_set_pwr_opmode(port->typec_port, opmode);
2339 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2340 port->caps_count = 0;
2341 port->message_id = 0;
2342 port->rx_msgid = -1;
2343 port->explicit_contract = false;
2344 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2345 break;
2346 case SRC_SEND_CAPABILITIES:
2347 port->caps_count++;
2348 if (port->caps_count > PD_N_CAPS_COUNT) {
2349 tcpm_set_state(port, SRC_READY, 0);
2350 break;
2351 }
2352 ret = tcpm_pd_send_source_caps(port);
2353 if (ret < 0) {
2354 tcpm_set_state(port, SRC_SEND_CAPABILITIES,
2355 PD_T_SEND_SOURCE_CAP);
2356 } else {
2357 /*
2358 * Per standard, we should clear the reset counter here.
2359 * However, that can result in state machine hang-ups.
2360 * Reset it only in READY state to improve stability.
2361 */
2362 /* port->hard_reset_count = 0; */
2363 port->caps_count = 0;
2364 port->pd_capable = true;
2365 tcpm_set_state_cond(port, hard_reset_state(port),
2366 PD_T_SEND_SOURCE_CAP);
2367 }
2368 break;
2369 case SRC_NEGOTIATE_CAPABILITIES:
2370 ret = tcpm_pd_check_request(port);
2371 if (ret < 0) {
2372 tcpm_pd_send_control(port, PD_CTRL_REJECT);
2373 if (!port->explicit_contract) {
2374 tcpm_set_state(port,
2375 SRC_WAIT_NEW_CAPABILITIES, 0);
2376 } else {
2377 tcpm_set_state(port, SRC_READY, 0);
2378 }
2379 } else {
2380 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2381 tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
2382 PD_T_SRC_TRANSITION);
2383 }
2384 break;
2385 case SRC_TRANSITION_SUPPLY:
2386 /* XXX: regulator_set_voltage(vbus, ...) */
2387 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2388 port->explicit_contract = true;
2389 typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
2390 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2391 tcpm_set_state_cond(port, SRC_READY, 0);
2392 break;
2393 case SRC_READY:
2394#if 1
2395 port->hard_reset_count = 0;
2396#endif
2397 port->try_src_count = 0;
2398
2399 tcpm_swap_complete(port, 0);
2400 tcpm_typec_connect(port);
2401 tcpm_check_send_discover(port);
2402 /*
2403 * 6.3.5
2404 * Sending ping messages is not necessary if
2405 * - the source operates at vSafe5V
2406 * or
2407 * - The system is not operating in PD mode
2408 * or
2409 * - Both partners are connected using a Type-C connector
2410 *
2411 * There is no actual need to send PD messages since the local
2412 * port type-c and the spec does not clearly say whether PD is
2413 * possible when type-c is connected to Type-A/B
2414 */
2415 break;
2416 case SRC_WAIT_NEW_CAPABILITIES:
2417 /* Nothing to do... */
2418 break;
2419
2420 /* SNK states */
2421 case SNK_UNATTACHED:
2422 if (!port->non_pd_role_swap)
2423 tcpm_swap_complete(port, -ENOTCONN);
2424 tcpm_snk_detach(port);
2425 if (tcpm_start_drp_toggling(port)) {
2426 tcpm_set_state(port, DRP_TOGGLING, 0);
2427 break;
2428 }
2429 tcpm_set_cc(port, TYPEC_CC_RD);
2430 if (port->port_type == TYPEC_PORT_DRP)
2431 tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
2432 break;
2433 case SNK_ATTACH_WAIT:
2434 if ((port->cc1 == TYPEC_CC_OPEN &&
2435 port->cc2 != TYPEC_CC_OPEN) ||
2436 (port->cc1 != TYPEC_CC_OPEN &&
2437 port->cc2 == TYPEC_CC_OPEN))
2438 tcpm_set_state(port, SNK_DEBOUNCED,
2439 PD_T_CC_DEBOUNCE);
2440 else if (tcpm_port_is_disconnected(port))
2441 tcpm_set_state(port, SNK_UNATTACHED,
2442 PD_T_PD_DEBOUNCE);
2443 break;
2444 case SNK_DEBOUNCED:
2445 if (tcpm_port_is_disconnected(port))
2446 tcpm_set_state(port, SNK_UNATTACHED,
2447 PD_T_PD_DEBOUNCE);
2448 else if (port->vbus_present)
2449 tcpm_set_state(port,
2450 tcpm_try_src(port) ? SRC_TRY
2451 : SNK_ATTACHED,
2452 0);
2453 else
2454 /* Wait for VBUS, but not forever */
2455 tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
2456 break;
2457
2458 case SRC_TRY:
2459 port->try_src_count++;
2460 tcpm_set_cc(port, tcpm_rp_cc(port));
2461 port->max_wait = 0;
2462 tcpm_set_state(port, SRC_TRY_WAIT, 0);
2463 break;
2464 case SRC_TRY_WAIT:
2465 if (port->max_wait == 0) {
2466 port->max_wait = jiffies +
2467 msecs_to_jiffies(PD_T_DRP_TRY);
2468 msecs = PD_T_DRP_TRY;
2469 } else {
2470 if (time_is_after_jiffies(port->max_wait))
2471 msecs = jiffies_to_msecs(port->max_wait -
2472 jiffies);
2473 else
2474 msecs = 0;
2475 }
2476 tcpm_set_state(port, SNK_TRYWAIT, msecs);
2477 break;
2478 case SRC_TRY_DEBOUNCE:
2479 tcpm_set_state(port, SRC_ATTACHED, PD_T_PD_DEBOUNCE);
2480 break;
2481 case SNK_TRYWAIT:
2482 tcpm_set_cc(port, TYPEC_CC_RD);
2483 tcpm_set_state(port, SNK_TRYWAIT_VBUS, PD_T_CC_DEBOUNCE);
2484 break;
2485 case SNK_TRYWAIT_VBUS:
2486 /*
2487 * TCPM stays in this state indefinitely until VBUS
2488 * is detected as long as Rp is not detected for
2489 * more than a time period of tPDDebounce.
2490 */
2491 if (port->vbus_present && tcpm_port_is_sink(port)) {
2492 tcpm_set_state(port, SNK_ATTACHED, 0);
2493 break;
2494 }
2495 if (!tcpm_port_is_sink(port))
2496 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
2497 break;
2498 case SNK_TRYWAIT_DEBOUNCE:
2499 tcpm_set_state(port, SNK_UNATTACHED, PD_T_PD_DEBOUNCE);
2500 break;
2501 case SNK_ATTACHED:
2502 ret = tcpm_snk_attach(port);
2503 if (ret < 0)
2504 tcpm_set_state(port, SNK_UNATTACHED, 0);
2505 else
2506 tcpm_set_state(port, SNK_STARTUP, 0);
2507 break;
2508 case SNK_STARTUP:
2509 opmode = tcpm_get_pwr_opmode(port->polarity ?
2510 port->cc2 : port->cc1);
2511 typec_set_pwr_opmode(port->typec_port, opmode);
2512 port->pwr_opmode = TYPEC_PWR_MODE_USB;
2513 port->message_id = 0;
2514 port->rx_msgid = -1;
2515 port->explicit_contract = false;
2516 tcpm_set_state(port, SNK_DISCOVERY, 0);
2517 break;
2518 case SNK_DISCOVERY:
2519 if (port->vbus_present) {
2520 tcpm_set_current_limit(port,
2521 tcpm_get_current_limit(port),
2522 5000);
2523 tcpm_set_charge(port, true);
2524 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2525 break;
2526 }
2527 /*
2528 * For DRP, timeouts differ. Also, handling is supposed to be
2529 * different and much more complex (dead battery detection;
2530 * see USB power delivery specification, section 8.3.3.6.1.5.1).
2531 */
2532 tcpm_set_state(port, hard_reset_state(port),
2533 port->port_type == TYPEC_PORT_DRP ?
2534 PD_T_DB_DETECT : PD_T_NO_RESPONSE);
2535 break;
2536 case SNK_DISCOVERY_DEBOUNCE:
2537 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE_DONE,
2538 PD_T_CC_DEBOUNCE);
2539 break;
2540 case SNK_DISCOVERY_DEBOUNCE_DONE:
2541 if (!tcpm_port_is_disconnected(port) &&
2542 tcpm_port_is_sink(port) &&
2543 time_is_after_jiffies(port->delayed_runtime)) {
2544 tcpm_set_state(port, SNK_DISCOVERY,
2545 port->delayed_runtime - jiffies);
2546 break;
2547 }
2548 tcpm_set_state(port, unattached_state(port), 0);
2549 break;
2550 case SNK_WAIT_CAPABILITIES:
2551 ret = port->tcpc->set_pd_rx(port->tcpc, true);
2552 if (ret < 0) {
2553 tcpm_set_state(port, SNK_READY, 0);
2554 break;
2555 }
2556 /*
2557 * If VBUS has never been low, and we time out waiting
2558 * for source cap, try a soft reset first, in case we
2559 * were already in a stable contract before this boot.
2560 * Do this only once.
2561 */
2562 if (port->vbus_never_low) {
2563 port->vbus_never_low = false;
2564 tcpm_set_state(port, SOFT_RESET_SEND,
2565 PD_T_SINK_WAIT_CAP);
2566 } else {
2567 tcpm_set_state(port, hard_reset_state(port),
2568 PD_T_SINK_WAIT_CAP);
2569 }
2570 break;
2571 case SNK_NEGOTIATE_CAPABILITIES:
2572 port->pd_capable = true;
2573 port->hard_reset_count = 0;
2574 ret = tcpm_pd_send_request(port);
2575 if (ret < 0) {
2576 /* Let the Source send capabilities again. */
2577 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2578 } else {
2579 tcpm_set_state_cond(port, hard_reset_state(port),
2580 PD_T_SENDER_RESPONSE);
2581 }
2582 break;
2583 case SNK_TRANSITION_SINK:
2584 case SNK_TRANSITION_SINK_VBUS:
2585 tcpm_set_state(port, hard_reset_state(port),
2586 PD_T_PS_TRANSITION);
2587 break;
2588 case SNK_READY:
2589 port->try_snk_count = 0;
2590 if (port->explicit_contract) {
2591 typec_set_pwr_opmode(port->typec_port,
2592 TYPEC_PWR_MODE_PD);
2593 port->pwr_opmode = TYPEC_PWR_MODE_PD;
2594 }
2595
2596 tcpm_swap_complete(port, 0);
2597 tcpm_typec_connect(port);
2598 tcpm_check_send_discover(port);
2599 break;
2600
2601 /* Accessory states */
2602 case ACC_UNATTACHED:
2603 tcpm_acc_detach(port);
2604 tcpm_set_state(port, SRC_UNATTACHED, 0);
2605 break;
2606 case DEBUG_ACC_ATTACHED:
2607 case AUDIO_ACC_ATTACHED:
2608 ret = tcpm_acc_attach(port);
2609 if (ret < 0)
2610 tcpm_set_state(port, ACC_UNATTACHED, 0);
2611 break;
2612 case AUDIO_ACC_DEBOUNCE:
2613 tcpm_set_state(port, ACC_UNATTACHED, PD_T_CC_DEBOUNCE);
2614 break;
2615
2616 /* Hard_Reset states */
2617 case HARD_RESET_SEND:
2618 tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
2619 tcpm_set_state(port, HARD_RESET_START, 0);
2620 break;
2621 case HARD_RESET_START:
2622 port->hard_reset_count++;
2623 port->tcpc->set_pd_rx(port->tcpc, false);
2624 tcpm_unregister_altmodes(port);
2625 port->send_discover = true;
2626 if (port->pwr_role == TYPEC_SOURCE)
2627 tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
2628 PD_T_PS_HARD_RESET);
2629 else
2630 tcpm_set_state(port, SNK_HARD_RESET_SINK_OFF, 0);
2631 break;
2632 case SRC_HARD_RESET_VBUS_OFF:
2633 tcpm_set_vconn(port, true);
2634 tcpm_set_vbus(port, false);
2635 tcpm_set_roles(port, false, TYPEC_SOURCE, TYPEC_HOST);
2636 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, PD_T_SRC_RECOVER);
2637 break;
2638 case SRC_HARD_RESET_VBUS_ON:
2639 tcpm_set_vbus(port, true);
2640 port->tcpc->set_pd_rx(port->tcpc, true);
2641 tcpm_set_attached_state(port, true);
2642 tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
2643 break;
2644 case SNK_HARD_RESET_SINK_OFF:
2645 tcpm_set_vconn(port, false);
2646 tcpm_set_charge(port, false);
2647 tcpm_set_roles(port, false, TYPEC_SINK, TYPEC_DEVICE);
2648 /*
2649 * VBUS may or may not toggle, depending on the adapter.
2650 * If it doesn't toggle, transition to SNK_HARD_RESET_SINK_ON
2651 * directly after timeout.
2652 */
2653 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
2654 break;
2655 case SNK_HARD_RESET_WAIT_VBUS:
2656 /* Assume we're disconnected if VBUS doesn't come back. */
2657 tcpm_set_state(port, SNK_UNATTACHED,
2658 PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
2659 break;
2660 case SNK_HARD_RESET_SINK_ON:
2661 /* Note: There is no guarantee that VBUS is on in this state */
2662 /*
2663 * XXX:
2664 * The specification suggests that dual mode ports in sink
2665 * mode should transition to state PE_SRC_Transition_to_default.
2666 * See USB power delivery specification chapter 8.3.3.6.1.3.
2667 * This would mean to to
2668 * - turn off VCONN, reset power supply
2669 * - request hardware reset
2670 * - turn on VCONN
2671 * - Transition to state PE_Src_Startup
2672 * SNK only ports shall transition to state Snk_Startup
2673 * (see chapter 8.3.3.3.8).
2674 * Similar, dual-mode ports in source mode should transition
2675 * to PE_SNK_Transition_to_default.
2676 */
2677 tcpm_set_attached_state(port, true);
2678 tcpm_set_state(port, SNK_STARTUP, 0);
2679 break;
2680
2681 /* Soft_Reset states */
2682 case SOFT_RESET:
2683 port->message_id = 0;
2684 port->rx_msgid = -1;
2685 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2686 if (port->pwr_role == TYPEC_SOURCE)
2687 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2688 else
2689 tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
2690 break;
2691 case SOFT_RESET_SEND:
2692 port->message_id = 0;
2693 port->rx_msgid = -1;
2694 if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
2695 tcpm_set_state_cond(port, hard_reset_state(port), 0);
2696 else
2697 tcpm_set_state_cond(port, hard_reset_state(port),
2698 PD_T_SENDER_RESPONSE);
2699 break;
2700
2701 /* DR_Swap states */
2702 case DR_SWAP_SEND:
2703 tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
2704 tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
2705 PD_T_SENDER_RESPONSE);
2706 break;
2707 case DR_SWAP_ACCEPT:
2708 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2709 tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
2710 break;
2711 case DR_SWAP_SEND_TIMEOUT:
2712 tcpm_swap_complete(port, -ETIMEDOUT);
2713 tcpm_set_state(port, ready_state(port), 0);
2714 break;
2715 case DR_SWAP_CHANGE_DR:
2716 if (port->data_role == TYPEC_HOST) {
2717 tcpm_unregister_altmodes(port);
2718 tcpm_set_roles(port, true, port->pwr_role,
2719 TYPEC_DEVICE);
2720 } else {
2721 tcpm_set_roles(port, true, port->pwr_role,
2722 TYPEC_HOST);
2723 port->send_discover = true;
2724 }
2725 tcpm_set_state(port, ready_state(port), 0);
2726 break;
2727
2728 /* PR_Swap states */
2729 case PR_SWAP_ACCEPT:
2730 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2731 tcpm_set_state(port, PR_SWAP_START, 0);
2732 break;
2733 case PR_SWAP_SEND:
2734 tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
2735 tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
2736 PD_T_SENDER_RESPONSE);
2737 break;
2738 case PR_SWAP_SEND_TIMEOUT:
2739 tcpm_swap_complete(port, -ETIMEDOUT);
2740 tcpm_set_state(port, ready_state(port), 0);
2741 break;
2742 case PR_SWAP_START:
2743 if (port->pwr_role == TYPEC_SOURCE)
2744 tcpm_set_state(port, PR_SWAP_SRC_SNK_TRANSITION_OFF,
2745 PD_T_SRC_TRANSITION);
2746 else
2747 tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
2748 break;
2749 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
2750 tcpm_set_vbus(port, false);
2751 port->explicit_contract = false;
2752 /* allow time for Vbus discharge, must be < tSrcSwapStdby */
2753 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF,
2754 PD_T_SRCSWAPSTDBY);
2755 break;
2756 case PR_SWAP_SRC_SNK_SOURCE_OFF:
2757 tcpm_set_cc(port, TYPEC_CC_RD);
2758 /* allow CC debounce */
2759 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED,
2760 PD_T_CC_DEBOUNCE);
2761 break;
2762 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
2763 /*
2764 * USB-PD standard, 6.2.1.4, Port Power Role:
2765 * "During the Power Role Swap Sequence, for the initial Source
2766 * Port, the Port Power Role field shall be set to Sink in the
2767 * PS_RDY Message indicating that the initial Source’s power
2768 * supply is turned off"
2769 */
2770 tcpm_set_pwr_role(port, TYPEC_SINK);
2771 if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
2772 tcpm_set_state(port, ERROR_RECOVERY, 0);
2773 break;
2774 }
2775 tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
2776 break;
2777 case PR_SWAP_SRC_SNK_SINK_ON:
2778 tcpm_set_state(port, SNK_STARTUP, 0);
2779 break;
2780 case PR_SWAP_SNK_SRC_SINK_OFF:
2781 tcpm_set_charge(port, false);
2782 tcpm_set_state(port, hard_reset_state(port),
2783 PD_T_PS_SOURCE_OFF);
2784 break;
2785 case PR_SWAP_SNK_SRC_SOURCE_ON:
2786 tcpm_set_cc(port, tcpm_rp_cc(port));
2787 tcpm_set_vbus(port, true);
2788 /*
2789 * allow time VBUS ramp-up, must be < tNewSrc
2790 * Also, this window overlaps with CC debounce as well.
2791 * So, Wait for the max of two which is PD_T_NEWSRC
2792 */
2793 tcpm_set_state(port, PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP,
2794 PD_T_NEWSRC);
2795 break;
2796 case PR_SWAP_SNK_SRC_SOURCE_ON_VBUS_RAMPED_UP:
2797 /*
2798 * USB PD standard, 6.2.1.4:
2799 * "Subsequent Messages initiated by the Policy Engine,
2800 * such as the PS_RDY Message sent to indicate that Vbus
2801 * is ready, will have the Port Power Role field set to
2802 * Source."
2803 */
2804 tcpm_set_pwr_role(port, TYPEC_SOURCE);
2805 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2806 tcpm_set_state(port, SRC_STARTUP, 0);
2807 break;
2808
2809 case VCONN_SWAP_ACCEPT:
2810 tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
2811 tcpm_set_state(port, VCONN_SWAP_START, 0);
2812 break;
2813 case VCONN_SWAP_SEND:
2814 tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
2815 tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
2816 PD_T_SENDER_RESPONSE);
2817 break;
2818 case VCONN_SWAP_SEND_TIMEOUT:
2819 tcpm_swap_complete(port, -ETIMEDOUT);
2820 tcpm_set_state(port, ready_state(port), 0);
2821 break;
2822 case VCONN_SWAP_START:
2823 if (port->vconn_role == TYPEC_SOURCE)
2824 tcpm_set_state(port, VCONN_SWAP_WAIT_FOR_VCONN, 0);
2825 else
2826 tcpm_set_state(port, VCONN_SWAP_TURN_ON_VCONN, 0);
2827 break;
2828 case VCONN_SWAP_WAIT_FOR_VCONN:
2829 tcpm_set_state(port, hard_reset_state(port),
2830 PD_T_VCONN_SOURCE_ON);
2831 break;
2832 case VCONN_SWAP_TURN_ON_VCONN:
2833 tcpm_set_vconn(port, true);
2834 tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
2835 tcpm_set_state(port, ready_state(port), 0);
2836 break;
2837 case VCONN_SWAP_TURN_OFF_VCONN:
2838 tcpm_set_vconn(port, false);
2839 tcpm_set_state(port, ready_state(port), 0);
2840 break;
2841
2842 case DR_SWAP_CANCEL:
2843 case PR_SWAP_CANCEL:
2844 case VCONN_SWAP_CANCEL:
2845 tcpm_swap_complete(port, port->swap_status);
2846 if (port->pwr_role == TYPEC_SOURCE)
2847 tcpm_set_state(port, SRC_READY, 0);
2848 else
2849 tcpm_set_state(port, SNK_READY, 0);
2850 break;
2851
2852 case BIST_RX:
2853 switch (BDO_MODE_MASK(port->bist_request)) {
2854 case BDO_MODE_CARRIER2:
2855 tcpm_pd_transmit(port, TCPC_TX_BIST_MODE_2, NULL);
2856 break;
2857 default:
2858 break;
2859 }
2860 /* Always switch to unattached state */
2861 tcpm_set_state(port, unattached_state(port), 0);
2862 break;
2863 case ERROR_RECOVERY:
2864 tcpm_swap_complete(port, -EPROTO);
2865 tcpm_set_state(port, PORT_RESET, 0);
2866 break;
2867 case PORT_RESET:
2868 tcpm_reset_port(port);
2869 tcpm_set_cc(port, TYPEC_CC_OPEN);
2870 tcpm_set_state(port, PORT_RESET_WAIT_OFF,
2871 PD_T_ERROR_RECOVERY);
2872 break;
2873 case PORT_RESET_WAIT_OFF:
2874 tcpm_set_state(port,
2875 tcpm_default_state(port),
2876 port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
2877 break;
2878 default:
2879 WARN(1, "Unexpected port state %d\n", port->state);
2880 break;
2881 }
2882}
2883
2884static void tcpm_state_machine_work(struct work_struct *work)
2885{
2886 struct tcpm_port *port = container_of(work, struct tcpm_port,
2887 state_machine.work);
2888 enum tcpm_state prev_state;
2889
2890 mutex_lock(&port->lock);
2891 port->state_machine_running = true;
2892
2893 if (port->queued_message && tcpm_send_queued_message(port))
2894 goto done;
2895
2896 /* If we were queued due to a delayed state change, update it now */
2897 if (port->delayed_state) {
2898 tcpm_log(port, "state change %s -> %s [delayed %ld ms]",
2899 tcpm_states[port->state],
2900 tcpm_states[port->delayed_state], port->delay_ms);
2901 port->prev_state = port->state;
2902 port->state = port->delayed_state;
2903 port->delayed_state = INVALID_STATE;
2904 }
2905
2906 /*
2907 * Continue running as long as we have (non-delayed) state changes
2908 * to make.
2909 */
2910 do {
2911 prev_state = port->state;
2912 run_state_machine(port);
2913 if (port->queued_message)
2914 tcpm_send_queued_message(port);
2915 } while (port->state != prev_state && !port->delayed_state);
2916
2917done:
2918 port->state_machine_running = false;
2919 mutex_unlock(&port->lock);
2920}
2921
2922static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
2923 enum typec_cc_status cc2)
2924{
2925 enum typec_cc_status old_cc1, old_cc2;
2926 enum tcpm_state new_state;
2927
2928 old_cc1 = port->cc1;
2929 old_cc2 = port->cc2;
2930 port->cc1 = cc1;
2931 port->cc2 = cc2;
2932
2933 tcpm_log_force(port,
2934 "CC1: %u -> %u, CC2: %u -> %u [state %s, polarity %d, %s]",
2935 old_cc1, cc1, old_cc2, cc2, tcpm_states[port->state],
2936 port->polarity,
2937 tcpm_port_is_disconnected(port) ? "disconnected"
2938 : "connected");
2939
2940 switch (port->state) {
2941 case DRP_TOGGLING:
2942 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2943 tcpm_port_is_source(port))
2944 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2945 else if (tcpm_port_is_sink(port))
2946 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2947 break;
2948 case SRC_UNATTACHED:
2949 case ACC_UNATTACHED:
2950 if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
2951 tcpm_port_is_source(port))
2952 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2953 break;
2954 case SRC_ATTACH_WAIT:
2955 if (tcpm_port_is_disconnected(port) ||
2956 tcpm_port_is_audio_detached(port))
2957 tcpm_set_state(port, SRC_UNATTACHED, 0);
2958 else if (cc1 != old_cc1 || cc2 != old_cc2)
2959 tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
2960 break;
2961 case SRC_ATTACHED:
2962 case SRC_SEND_CAPABILITIES:
2963 case SRC_READY:
2964 if (tcpm_port_is_disconnected(port) ||
2965 !tcpm_port_is_source(port))
2966 tcpm_set_state(port, SRC_UNATTACHED, 0);
2967 break;
2968 case SNK_UNATTACHED:
2969 if (tcpm_port_is_sink(port))
2970 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2971 break;
2972 case SNK_ATTACH_WAIT:
2973 if ((port->cc1 == TYPEC_CC_OPEN &&
2974 port->cc2 != TYPEC_CC_OPEN) ||
2975 (port->cc1 != TYPEC_CC_OPEN &&
2976 port->cc2 == TYPEC_CC_OPEN))
2977 new_state = SNK_DEBOUNCED;
2978 else if (tcpm_port_is_disconnected(port))
2979 new_state = SNK_UNATTACHED;
2980 else
2981 break;
2982 if (new_state != port->delayed_state)
2983 tcpm_set_state(port, SNK_ATTACH_WAIT, 0);
2984 break;
2985 case SNK_DEBOUNCED:
2986 if (tcpm_port_is_disconnected(port))
2987 new_state = SNK_UNATTACHED;
2988 else if (port->vbus_present)
2989 new_state = tcpm_try_src(port) ? SRC_TRY : SNK_ATTACHED;
2990 else
2991 new_state = SNK_UNATTACHED;
2992 if (new_state != port->delayed_state)
2993 tcpm_set_state(port, SNK_DEBOUNCED, 0);
2994 break;
2995 case SNK_READY:
2996 if (tcpm_port_is_disconnected(port))
2997 tcpm_set_state(port, unattached_state(port), 0);
2998 else if (!port->pd_capable &&
2999 (cc1 != old_cc1 || cc2 != old_cc2))
3000 tcpm_set_current_limit(port,
3001 tcpm_get_current_limit(port),
3002 5000);
3003 break;
3004
3005 case AUDIO_ACC_ATTACHED:
3006 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
3007 tcpm_set_state(port, AUDIO_ACC_DEBOUNCE, 0);
3008 break;
3009 case AUDIO_ACC_DEBOUNCE:
3010 if (tcpm_port_is_audio(port))
3011 tcpm_set_state(port, AUDIO_ACC_ATTACHED, 0);
3012 break;
3013
3014 case DEBUG_ACC_ATTACHED:
3015 if (cc1 == TYPEC_CC_OPEN || cc2 == TYPEC_CC_OPEN)
3016 tcpm_set_state(port, ACC_UNATTACHED, 0);
3017 break;
3018
3019 case SNK_TRY:
3020 /* Do nothing, waiting for timeout */
3021 break;
3022
3023 case SNK_DISCOVERY:
3024 /* CC line is unstable, wait for debounce */
3025 if (tcpm_port_is_disconnected(port))
3026 tcpm_set_state(port, SNK_DISCOVERY_DEBOUNCE, 0);
3027 break;
3028 case SNK_DISCOVERY_DEBOUNCE:
3029 break;
3030
3031 case SRC_TRYWAIT:
3032 /* Hand over to state machine if needed */
3033 if (!port->vbus_present && tcpm_port_is_source(port))
3034 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
3035 break;
3036 case SRC_TRYWAIT_DEBOUNCE:
3037 if (port->vbus_present || !tcpm_port_is_source(port))
3038 tcpm_set_state(port, SRC_TRYWAIT, 0);
3039 break;
3040 case SNK_TRY_WAIT_DEBOUNCE:
3041 if (!tcpm_port_is_sink(port)) {
3042 port->max_wait = 0;
3043 tcpm_set_state(port, SRC_TRYWAIT, 0);
3044 }
3045 break;
3046 case SRC_TRY_WAIT:
3047 if (tcpm_port_is_source(port))
3048 tcpm_set_state(port, SRC_TRY_DEBOUNCE, 0);
3049 break;
3050 case SRC_TRY_DEBOUNCE:
3051 tcpm_set_state(port, SRC_TRY_WAIT, 0);
3052 break;
3053 case SNK_TRYWAIT_DEBOUNCE:
3054 if (tcpm_port_is_sink(port))
3055 tcpm_set_state(port, SNK_TRYWAIT_VBUS, 0);
3056 break;
3057 case SNK_TRYWAIT_VBUS:
3058 if (!tcpm_port_is_sink(port))
3059 tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
3060 break;
3061 case SNK_TRYWAIT:
3062 /* Do nothing, waiting for tCCDebounce */
3063 break;
3064 case PR_SWAP_SNK_SRC_SINK_OFF:
3065 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3066 case PR_SWAP_SRC_SNK_SOURCE_OFF:
3067 case PR_SWAP_SRC_SNK_SOURCE_OFF_CC_DEBOUNCED:
3068 case PR_SWAP_SNK_SRC_SOURCE_ON:
3069 /*
3070 * CC state change is expected in PR_SWAP
3071 * Ignore it.
3072 */
3073 break;
3074
3075 default:
3076 if (tcpm_port_is_disconnected(port))
3077 tcpm_set_state(port, unattached_state(port), 0);
3078 break;
3079 }
3080}
3081
3082static void _tcpm_pd_vbus_on(struct tcpm_port *port)
3083{
3084 tcpm_log_force(port, "VBUS on");
3085 port->vbus_present = true;
3086 switch (port->state) {
3087 case SNK_TRANSITION_SINK_VBUS:
3088 port->explicit_contract = true;
3089 tcpm_set_state(port, SNK_READY, 0);
3090 break;
3091 case SNK_DISCOVERY:
3092 tcpm_set_state(port, SNK_DISCOVERY, 0);
3093 break;
3094
3095 case SNK_DEBOUNCED:
3096 tcpm_set_state(port, tcpm_try_src(port) ? SRC_TRY
3097 : SNK_ATTACHED,
3098 0);
3099 break;
3100 case SNK_HARD_RESET_WAIT_VBUS:
3101 tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, 0);
3102 break;
3103 case SRC_ATTACHED:
3104 tcpm_set_state(port, SRC_STARTUP, 0);
3105 break;
3106 case SRC_HARD_RESET_VBUS_ON:
3107 tcpm_set_state(port, SRC_STARTUP, 0);
3108 break;
3109
3110 case SNK_TRY:
3111 /* Do nothing, waiting for timeout */
3112 break;
3113 case SRC_TRYWAIT:
3114 /* Do nothing, Waiting for Rd to be detected */
3115 break;
3116 case SRC_TRYWAIT_DEBOUNCE:
3117 tcpm_set_state(port, SRC_TRYWAIT, 0);
3118 break;
3119 case SNK_TRY_WAIT_DEBOUNCE:
3120 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
3121 break;
3122 case SNK_TRYWAIT:
3123 /* Do nothing, waiting for tCCDebounce */
3124 break;
3125 case SNK_TRYWAIT_VBUS:
3126 if (tcpm_port_is_sink(port))
3127 tcpm_set_state(port, SNK_ATTACHED, 0);
3128 break;
3129 case SNK_TRYWAIT_DEBOUNCE:
3130 /* Do nothing, waiting for Rp */
3131 break;
3132 case SRC_TRY_WAIT:
3133 case SRC_TRY_DEBOUNCE:
3134 /* Do nothing, waiting for sink detection */
3135 break;
3136 default:
3137 break;
3138 }
3139}
3140
3141static void _tcpm_pd_vbus_off(struct tcpm_port *port)
3142{
3143 tcpm_log_force(port, "VBUS off");
3144 port->vbus_present = false;
3145 port->vbus_never_low = false;
3146 switch (port->state) {
3147 case SNK_HARD_RESET_SINK_OFF:
3148 tcpm_set_state(port, SNK_HARD_RESET_WAIT_VBUS, 0);
3149 break;
3150 case SRC_HARD_RESET_VBUS_OFF:
3151 tcpm_set_state(port, SRC_HARD_RESET_VBUS_ON, 0);
3152 break;
3153 case HARD_RESET_SEND:
3154 break;
3155
3156 case SNK_TRY:
3157 /* Do nothing, waiting for timeout */
3158 break;
3159 case SRC_TRYWAIT:
3160 /* Hand over to state machine if needed */
3161 if (tcpm_port_is_source(port))
3162 tcpm_set_state(port, SRC_TRYWAIT_DEBOUNCE, 0);
3163 break;
3164 case SNK_TRY_WAIT_DEBOUNCE:
3165 /* Do nothing, waiting for PD_DEBOUNCE to do be done */
3166 break;
3167 case SNK_TRYWAIT:
3168 case SNK_TRYWAIT_VBUS:
3169 case SNK_TRYWAIT_DEBOUNCE:
3170 break;
3171 case SNK_ATTACH_WAIT:
3172 tcpm_set_state(port, SNK_UNATTACHED, 0);
3173 break;
3174
3175 case SNK_NEGOTIATE_CAPABILITIES:
3176 break;
3177
3178 case PR_SWAP_SRC_SNK_TRANSITION_OFF:
3179 tcpm_set_state(port, PR_SWAP_SRC_SNK_SOURCE_OFF, 0);
3180 break;
3181
3182 case PR_SWAP_SNK_SRC_SINK_OFF:
3183 /* Do nothing, expected */
3184 break;
3185
3186 case PORT_RESET_WAIT_OFF:
3187 tcpm_set_state(port, tcpm_default_state(port), 0);
3188 break;
3189 case SRC_TRY_WAIT:
3190 case SRC_TRY_DEBOUNCE:
3191 /* Do nothing, waiting for sink detection */
3192 break;
3193 default:
3194 if (port->pwr_role == TYPEC_SINK &&
3195 port->attached)
3196 tcpm_set_state(port, SNK_UNATTACHED, 0);
3197 break;
3198 }
3199}
3200
3201static void _tcpm_pd_hard_reset(struct tcpm_port *port)
3202{
3203 tcpm_log_force(port, "Received hard reset");
3204 /*
3205 * If we keep receiving hard reset requests, executing the hard reset
3206 * must have failed. Revert to error recovery if that happens.
3207 */
3208 tcpm_set_state(port,
3209 port->hard_reset_count < PD_N_HARD_RESET_COUNT ?
3210 HARD_RESET_START : ERROR_RECOVERY,
3211 0);
3212}
3213
3214static void tcpm_pd_event_handler(struct work_struct *work)
3215{
3216 struct tcpm_port *port = container_of(work, struct tcpm_port,
3217 event_work);
3218 u32 events;
3219
3220 mutex_lock(&port->lock);
3221
3222 spin_lock(&port->pd_event_lock);
3223 while (port->pd_events) {
3224 events = port->pd_events;
3225 port->pd_events = 0;
3226 spin_unlock(&port->pd_event_lock);
3227 if (events & TCPM_RESET_EVENT)
3228 _tcpm_pd_hard_reset(port);
3229 if (events & TCPM_VBUS_EVENT) {
3230 bool vbus;
3231
3232 vbus = port->tcpc->get_vbus(port->tcpc);
3233 if (vbus)
3234 _tcpm_pd_vbus_on(port);
3235 else
3236 _tcpm_pd_vbus_off(port);
3237 }
3238 if (events & TCPM_CC_EVENT) {
3239 enum typec_cc_status cc1, cc2;
3240
3241 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3242 _tcpm_cc_change(port, cc1, cc2);
3243 }
3244 spin_lock(&port->pd_event_lock);
3245 }
3246 spin_unlock(&port->pd_event_lock);
3247 mutex_unlock(&port->lock);
3248}
3249
3250void tcpm_cc_change(struct tcpm_port *port)
3251{
3252 spin_lock(&port->pd_event_lock);
3253 port->pd_events |= TCPM_CC_EVENT;
3254 spin_unlock(&port->pd_event_lock);
3255 queue_work(port->wq, &port->event_work);
3256}
3257EXPORT_SYMBOL_GPL(tcpm_cc_change);
3258
3259void tcpm_vbus_change(struct tcpm_port *port)
3260{
3261 spin_lock(&port->pd_event_lock);
3262 port->pd_events |= TCPM_VBUS_EVENT;
3263 spin_unlock(&port->pd_event_lock);
3264 queue_work(port->wq, &port->event_work);
3265}
3266EXPORT_SYMBOL_GPL(tcpm_vbus_change);
3267
3268void tcpm_pd_hard_reset(struct tcpm_port *port)
3269{
3270 spin_lock(&port->pd_event_lock);
3271 port->pd_events = TCPM_RESET_EVENT;
3272 spin_unlock(&port->pd_event_lock);
3273 queue_work(port->wq, &port->event_work);
3274}
3275EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset);
3276
3277static int tcpm_dr_set(const struct typec_capability *cap,
3278 enum typec_data_role data)
3279{
3280 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3281 int ret;
3282
3283 mutex_lock(&port->swap_lock);
3284 mutex_lock(&port->lock);
3285
3286 if (port->port_type != TYPEC_PORT_DRP) {
3287 ret = -EINVAL;
3288 goto port_unlock;
3289 }
3290 if (port->state != SRC_READY && port->state != SNK_READY) {
3291 ret = -EAGAIN;
3292 goto port_unlock;
3293 }
3294
3295 if (port->data_role == data) {
3296 ret = 0;
3297 goto port_unlock;
3298 }
3299
3300 /*
3301 * XXX
3302 * 6.3.9: If an alternate mode is active, a request to swap
3303 * alternate modes shall trigger a port reset.
3304 * Reject data role swap request in this case.
3305 */
3306
3307 if (!port->pd_capable) {
3308 /*
3309 * If the partner is not PD capable, reset the port to
3310 * trigger a role change. This can only work if a preferred
3311 * role is configured, and if it matches the requested role.
3312 */
3313 if (port->try_role == TYPEC_NO_PREFERRED_ROLE ||
3314 port->try_role == port->pwr_role) {
3315 ret = -EINVAL;
3316 goto port_unlock;
3317 }
3318 port->non_pd_role_swap = true;
3319 tcpm_set_state(port, PORT_RESET, 0);
3320 } else {
3321 tcpm_set_state(port, DR_SWAP_SEND, 0);
3322 }
3323
3324 port->swap_status = 0;
3325 port->swap_pending = true;
3326 reinit_completion(&port->swap_complete);
3327 mutex_unlock(&port->lock);
3328
3329 if (!wait_for_completion_timeout(&port->swap_complete,
3330 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3331 ret = -ETIMEDOUT;
3332 else
3333 ret = port->swap_status;
3334
3335 port->non_pd_role_swap = false;
3336 goto swap_unlock;
3337
3338port_unlock:
3339 mutex_unlock(&port->lock);
3340swap_unlock:
3341 mutex_unlock(&port->swap_lock);
3342 return ret;
3343}
3344
3345static int tcpm_pr_set(const struct typec_capability *cap,
3346 enum typec_role role)
3347{
3348 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3349 int ret;
3350
3351 mutex_lock(&port->swap_lock);
3352 mutex_lock(&port->lock);
3353
3354 if (port->port_type != TYPEC_PORT_DRP) {
3355 ret = -EINVAL;
3356 goto port_unlock;
3357 }
3358 if (port->state != SRC_READY && port->state != SNK_READY) {
3359 ret = -EAGAIN;
3360 goto port_unlock;
3361 }
3362
3363 if (role == port->pwr_role) {
3364 ret = 0;
3365 goto port_unlock;
3366 }
3367
3368 port->swap_status = 0;
3369 port->swap_pending = true;
3370 reinit_completion(&port->swap_complete);
3371 tcpm_set_state(port, PR_SWAP_SEND, 0);
3372 mutex_unlock(&port->lock);
3373
3374 if (!wait_for_completion_timeout(&port->swap_complete,
3375 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3376 ret = -ETIMEDOUT;
3377 else
3378 ret = port->swap_status;
3379
3380 goto swap_unlock;
3381
3382port_unlock:
3383 mutex_unlock(&port->lock);
3384swap_unlock:
3385 mutex_unlock(&port->swap_lock);
3386 return ret;
3387}
3388
3389static int tcpm_vconn_set(const struct typec_capability *cap,
3390 enum typec_role role)
3391{
3392 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3393 int ret;
3394
3395 mutex_lock(&port->swap_lock);
3396 mutex_lock(&port->lock);
3397
3398 if (port->state != SRC_READY && port->state != SNK_READY) {
3399 ret = -EAGAIN;
3400 goto port_unlock;
3401 }
3402
3403 if (role == port->vconn_role) {
3404 ret = 0;
3405 goto port_unlock;
3406 }
3407
3408 port->swap_status = 0;
3409 port->swap_pending = true;
3410 reinit_completion(&port->swap_complete);
3411 tcpm_set_state(port, VCONN_SWAP_SEND, 0);
3412 mutex_unlock(&port->lock);
3413
3414 if (!wait_for_completion_timeout(&port->swap_complete,
3415 msecs_to_jiffies(PD_ROLE_SWAP_TIMEOUT)))
3416 ret = -ETIMEDOUT;
3417 else
3418 ret = port->swap_status;
3419
3420 goto swap_unlock;
3421
3422port_unlock:
3423 mutex_unlock(&port->lock);
3424swap_unlock:
3425 mutex_unlock(&port->swap_lock);
3426 return ret;
3427}
3428
3429static int tcpm_try_role(const struct typec_capability *cap, int role)
3430{
3431 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3432 struct tcpc_dev *tcpc = port->tcpc;
3433 int ret = 0;
3434
3435 mutex_lock(&port->lock);
3436 if (tcpc->try_role)
3437 ret = tcpc->try_role(tcpc, role);
3438 if (!ret && !tcpc->config->try_role_hw)
3439 port->try_role = role;
3440 port->try_src_count = 0;
3441 port->try_snk_count = 0;
3442 mutex_unlock(&port->lock);
3443
3444 return ret;
3445}
3446
3447static void tcpm_init(struct tcpm_port *port)
3448{
3449 enum typec_cc_status cc1, cc2;
3450
3451 port->tcpc->init(port->tcpc);
3452
3453 tcpm_reset_port(port);
3454
3455 /*
3456 * XXX
3457 * Should possibly wait for VBUS to settle if it was enabled locally
3458 * since tcpm_reset_port() will disable VBUS.
3459 */
3460 port->vbus_present = port->tcpc->get_vbus(port->tcpc);
3461 if (port->vbus_present)
3462 port->vbus_never_low = true;
3463
3464 tcpm_set_state(port, tcpm_default_state(port), 0);
3465
3466 if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0)
3467 _tcpm_cc_change(port, cc1, cc2);
3468
3469 /*
3470 * Some adapters need a clean slate at startup, and won't recover
3471 * otherwise. So do not try to be fancy and force a clean disconnect.
3472 */
3473 tcpm_set_state(port, PORT_RESET, 0);
3474}
3475
3476static int tcpm_port_type_set(const struct typec_capability *cap,
3477 enum typec_port_type type)
3478{
3479 struct tcpm_port *port = typec_cap_to_tcpm(cap);
3480
3481 mutex_lock(&port->lock);
3482 if (type == port->port_type)
3483 goto port_unlock;
3484
3485 port->port_type = type;
3486
3487 if (!port->connected) {
3488 tcpm_set_state(port, PORT_RESET, 0);
3489 } else if (type == TYPEC_PORT_SNK) {
3490 if (!(port->pwr_role == TYPEC_SINK &&
3491 port->data_role == TYPEC_DEVICE))
3492 tcpm_set_state(port, PORT_RESET, 0);
3493 } else if (type == TYPEC_PORT_SRC) {
3494 if (!(port->pwr_role == TYPEC_SOURCE &&
3495 port->data_role == TYPEC_HOST))
3496 tcpm_set_state(port, PORT_RESET, 0);
3497 }
3498
3499port_unlock:
3500 mutex_unlock(&port->lock);
3501 return 0;
3502}
3503
3504void tcpm_tcpc_reset(struct tcpm_port *port)
3505{
3506 mutex_lock(&port->lock);
3507 /* XXX: Maintain PD connection if possible? */
3508 tcpm_init(port);
3509 mutex_unlock(&port->lock);
3510}
3511EXPORT_SYMBOL_GPL(tcpm_tcpc_reset);
3512
3513static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo,
3514 unsigned int nr_pdo)
3515{
3516 unsigned int i;
3517
3518 if (nr_pdo > PDO_MAX_OBJECTS)
3519 nr_pdo = PDO_MAX_OBJECTS;
3520
3521 for (i = 0; i < nr_pdo; i++)
3522 dest_pdo[i] = src_pdo[i];
3523
3524 return nr_pdo;
3525}
3526
3527static int tcpm_copy_vdos(u32 *dest_vdo, const u32 *src_vdo,
3528 unsigned int nr_vdo)
3529{
3530 unsigned int i;
3531
3532 if (nr_vdo > VDO_MAX_OBJECTS)
3533 nr_vdo = VDO_MAX_OBJECTS;
3534
3535 for (i = 0; i < nr_vdo; i++)
3536 dest_vdo[i] = src_vdo[i];
3537
3538 return nr_vdo;
3539}
3540
3541int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo,
3542 unsigned int nr_pdo)
3543{
3544 if (tcpm_validate_caps(port, pdo, nr_pdo))
3545 return -EINVAL;
3546
3547 mutex_lock(&port->lock);
3548 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, pdo, nr_pdo);
3549 switch (port->state) {
3550 case SRC_UNATTACHED:
3551 case SRC_ATTACH_WAIT:
3552 case SRC_TRYWAIT:
3553 tcpm_set_cc(port, tcpm_rp_cc(port));
3554 break;
3555 case SRC_SEND_CAPABILITIES:
3556 case SRC_NEGOTIATE_CAPABILITIES:
3557 case SRC_READY:
3558 case SRC_WAIT_NEW_CAPABILITIES:
3559 tcpm_set_cc(port, tcpm_rp_cc(port));
3560 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
3561 break;
3562 default:
3563 break;
3564 }
3565 mutex_unlock(&port->lock);
3566 return 0;
3567}
3568EXPORT_SYMBOL_GPL(tcpm_update_source_capabilities);
3569
3570int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3571 unsigned int nr_pdo,
3572 unsigned int max_snk_mv,
3573 unsigned int max_snk_ma,
3574 unsigned int max_snk_mw,
3575 unsigned int operating_snk_mw)
3576{
3577 if (tcpm_validate_caps(port, pdo, nr_pdo))
3578 return -EINVAL;
3579
3580 mutex_lock(&port->lock);
3581 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
3582 port->max_snk_mv = max_snk_mv;
3583 port->max_snk_ma = max_snk_ma;
3584 port->max_snk_mw = max_snk_mw;
3585 port->operating_snk_mw = operating_snk_mw;
3586
3587 switch (port->state) {
3588 case SNK_NEGOTIATE_CAPABILITIES:
3589 case SNK_READY:
3590 case SNK_TRANSITION_SINK:
3591 case SNK_TRANSITION_SINK_VBUS:
3592 tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
3593 break;
3594 default:
3595 break;
3596 }
3597 mutex_unlock(&port->lock);
3598 return 0;
3599}
3600EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3601
3602struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3603{
3604 struct tcpm_port *port;
3605 int i, err;
3606
3607 if (!dev || !tcpc || !tcpc->config ||
3608 !tcpc->get_vbus || !tcpc->set_cc || !tcpc->get_cc ||
3609 !tcpc->set_polarity || !tcpc->set_vconn || !tcpc->set_vbus ||
3610 !tcpc->set_pd_rx || !tcpc->set_roles || !tcpc->pd_transmit)
3611 return ERR_PTR(-EINVAL);
3612
3613 port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
3614 if (!port)
3615 return ERR_PTR(-ENOMEM);
3616
3617 port->dev = dev;
3618 port->tcpc = tcpc;
3619
3620 mutex_init(&port->lock);
3621 mutex_init(&port->swap_lock);
3622
3623 port->wq = create_singlethread_workqueue(dev_name(dev));
3624 if (!port->wq)
3625 return ERR_PTR(-ENOMEM);
3626 INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work);
3627 INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work);
3628 INIT_WORK(&port->event_work, tcpm_pd_event_handler);
3629
3630 spin_lock_init(&port->pd_event_lock);
3631
3632 init_completion(&port->tx_complete);
3633 init_completion(&port->swap_complete);
3634 tcpm_debugfs_init(port);
3635
3636 if (tcpm_validate_caps(port, tcpc->config->src_pdo,
3637 tcpc->config->nr_src_pdo) ||
3638 tcpm_validate_caps(port, tcpc->config->snk_pdo,
3639 tcpc->config->nr_snk_pdo)) {
3640 err = -EINVAL;
3641 goto out_destroy_wq;
3642 }
3643 port->nr_src_pdo = tcpm_copy_pdos(port->src_pdo, tcpc->config->src_pdo,
3644 tcpc->config->nr_src_pdo);
3645 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3646 tcpc->config->nr_snk_pdo);
3647 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3648 tcpc->config->nr_snk_vdo);
3649
3650 port->max_snk_mv = tcpc->config->max_snk_mv;
3651 port->max_snk_ma = tcpc->config->max_snk_ma;
3652 port->max_snk_mw = tcpc->config->max_snk_mw;
3653 port->operating_snk_mw = tcpc->config->operating_snk_mw;
3654 if (!tcpc->config->try_role_hw)
3655 port->try_role = tcpc->config->default_role;
3656 else
3657 port->try_role = TYPEC_NO_PREFERRED_ROLE;
3658
3659 port->typec_caps.prefer_role = tcpc->config->default_role;
3660 port->typec_caps.type = tcpc->config->type;
3661 port->typec_caps.data = tcpc->config->data;
3662 port->typec_caps.revision = 0x0120; /* Type-C spec release 1.2 */
3663 port->typec_caps.pd_revision = 0x0200; /* USB-PD spec release 2.0 */
3664 port->typec_caps.dr_set = tcpm_dr_set;
3665 port->typec_caps.pr_set = tcpm_pr_set;
3666 port->typec_caps.vconn_set = tcpm_vconn_set;
3667 port->typec_caps.try_role = tcpm_try_role;
3668 port->typec_caps.port_type_set = tcpm_port_type_set;
3669
3670 port->partner_desc.identity = &port->partner_ident;
3671 port->port_type = tcpc->config->type;
3672
3673 port->role_sw = usb_role_switch_get(port->dev);
3674 if (IS_ERR(port->role_sw)) {
3675 err = PTR_ERR(port->role_sw);
3676 goto out_destroy_wq;
3677 }
3678
3679 port->typec_port = typec_register_port(port->dev, &port->typec_caps);
3680 if (IS_ERR(port->typec_port)) {
3681 err = PTR_ERR(port->typec_port);
3682 goto out_destroy_wq;
3683 }
3684
3685 if (tcpc->config->alt_modes) {
3686 const struct typec_altmode_desc *paltmode = tcpc->config->alt_modes;
3687
3688 i = 0;
3689 while (paltmode->svid && i < ARRAY_SIZE(port->port_altmode)) {
3690 struct typec_altmode *alt;
3691
3692 alt = typec_port_register_altmode(port->typec_port,
3693 paltmode);
3694 if (IS_ERR(alt)) {
3695 tcpm_log(port,
3696 "%s: failed to register port alternate mode 0x%x",
3697 dev_name(dev), paltmode->svid);
3698 break;
3699 }
3700 port->port_altmode[i] = alt;
3701 i++;
3702 paltmode++;
3703 }
3704 }
3705
3706 mutex_lock(&port->lock);
3707 tcpm_init(port);
3708 mutex_unlock(&port->lock);
3709
3710 tcpm_log(port, "%s: registered", dev_name(dev));
3711 return port;
3712
3713out_destroy_wq:
3714 usb_role_switch_put(port->role_sw);
3715 destroy_workqueue(port->wq);
3716 return ERR_PTR(err);
3717}
3718EXPORT_SYMBOL_GPL(tcpm_register_port);
3719
3720void tcpm_unregister_port(struct tcpm_port *port)
3721{
3722 int i;
3723
3724 tcpm_reset_port(port);
3725 for (i = 0; i < ARRAY_SIZE(port->port_altmode); i++)
3726 typec_unregister_altmode(port->port_altmode[i]);
3727 typec_unregister_port(port->typec_port);
3728 usb_role_switch_put(port->role_sw);
3729 tcpm_debugfs_exit(port);
3730 destroy_workqueue(port->wq);
3731}
3732EXPORT_SYMBOL_GPL(tcpm_unregister_port);
3733
3734MODULE_AUTHOR("Guenter Roeck <groeck@chromium.org>");
3735MODULE_DESCRIPTION("USB Type-C Port Manager");
3736MODULE_LICENSE("GPL");