Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Thunderbolt Time Management Unit (TMU) support
  4 *
  5 * Copyright (C) 2019, Intel Corporation
  6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
  7 *	    Rajmohan Mani <rajmohan.mani@intel.com>
  8 */
  9
 10#include <linux/delay.h>
 11
 12#include "tb.h"
 13
 14static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
 15					 enum tb_switch_tmu_rate rate)
 16{
 17	u32 freq_meas_wind[2] = { 30, 800 };
 18	u32 avg_const[2] = { 4, 8 };
 19	u32 freq, avg, val;
 20	int ret;
 21
 22	if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
 23		freq = freq_meas_wind[0];
 24		avg = avg_const[0];
 25	} else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
 26		freq = freq_meas_wind[1];
 27		avg = avg_const[1];
 28	} else {
 29		return 0;
 30	}
 31
 32	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 33			 sw->tmu.cap + TMU_RTR_CS_0, 1);
 34	if (ret)
 35		return ret;
 36
 37	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
 38	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
 39
 40	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
 41			  sw->tmu.cap + TMU_RTR_CS_0, 1);
 42	if (ret)
 43		return ret;
 44
 45	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 46			 sw->tmu.cap + TMU_RTR_CS_15, 1);
 47	if (ret)
 48		return ret;
 49
 50	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
 51		~TMU_RTR_CS_15_DELAY_AVG_MASK &
 52		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
 53		~TMU_RTR_CS_15_ERROR_AVG_MASK;
 54	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
 55		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
 56		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
 57		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
 58
 59	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
 60			   sw->tmu.cap + TMU_RTR_CS_15, 1);
 61}
 62
 63static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
 64{
 65	bool root_switch = !tb_route(sw);
 66
 67	switch (sw->tmu.rate) {
 68	case TB_SWITCH_TMU_RATE_OFF:
 69		return "off";
 70
 71	case TB_SWITCH_TMU_RATE_HIFI:
 72		/* Root switch does not have upstream directionality */
 73		if (root_switch)
 74			return "HiFi";
 75		if (sw->tmu.unidirectional)
 76			return "uni-directional, HiFi";
 77		return "bi-directional, HiFi";
 78
 79	case TB_SWITCH_TMU_RATE_NORMAL:
 80		if (root_switch)
 81			return "normal";
 82		return "uni-directional, normal";
 83
 84	default:
 85		return "unknown";
 86	}
 87}
 88
 89static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
 90{
 91	int ret;
 92	u32 val;
 93
 94	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 95			 sw->tmu.cap + TMU_RTR_CS_0, 1);
 96	if (ret)
 97		return false;
 98
 99	return !!(val & TMU_RTR_CS_0_UCAP);
100}
101
102static int tb_switch_tmu_rate_read(struct tb_switch *sw)
103{
104	int ret;
105	u32 val;
106
107	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108			 sw->tmu.cap + TMU_RTR_CS_3, 1);
109	if (ret)
110		return ret;
111
112	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
113	return val;
114}
115
116static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
117{
118	int ret;
119	u32 val;
120
121	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
122			 sw->tmu.cap + TMU_RTR_CS_3, 1);
123	if (ret)
124		return ret;
125
126	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
127	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
128
129	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
130			   sw->tmu.cap + TMU_RTR_CS_3, 1);
131}
132
133static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
134			     u32 value)
135{
136	u32 data;
137	int ret;
138
139	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
140	if (ret)
141		return ret;
142
143	data &= ~mask;
144	data |= value;
145
146	return tb_port_write(port, &data, TB_CFG_PORT,
147			     port->cap_tmu + offset, 1);
148}
149
150static int tb_port_tmu_set_unidirectional(struct tb_port *port,
151					  bool unidirectional)
152{
153	u32 val;
154
155	if (!port->sw->tmu.has_ucap)
156		return 0;
157
158	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
159	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
160}
161
162static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
163{
164	return tb_port_tmu_set_unidirectional(port, false);
165}
166
167static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
168{
169	return tb_port_tmu_set_unidirectional(port, true);
170}
171
172static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
173{
174	int ret;
175	u32 val;
176
177	ret = tb_port_read(port, &val, TB_CFG_PORT,
178			   port->cap_tmu + TMU_ADP_CS_3, 1);
179	if (ret)
180		return false;
181
182	return val & TMU_ADP_CS_3_UDM;
183}
184
185static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
186{
187	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
188
189	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
190}
191
192static int tb_port_tmu_time_sync_disable(struct tb_port *port)
193{
194	return tb_port_tmu_time_sync(port, true);
195}
196
197static int tb_port_tmu_time_sync_enable(struct tb_port *port)
198{
199	return tb_port_tmu_time_sync(port, false);
200}
201
202static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
203{
204	u32 val, offset, bit;
205	int ret;
206
207	if (tb_switch_is_usb4(sw)) {
208		offset = sw->tmu.cap + TMU_RTR_CS_0;
209		bit = TMU_RTR_CS_0_TD;
210	} else {
211		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
212		bit = TB_TIME_VSEC_3_CS_26_TD;
213	}
214
215	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
216	if (ret)
217		return ret;
218
219	if (set)
220		val |= bit;
221	else
222		val &= ~bit;
223
224	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
225}
226
227/**
228 * tb_switch_tmu_init() - Initialize switch TMU structures
229 * @sw: Switch to initialized
230 *
231 * This function must be called before other TMU related functions to
232 * makes the internal structures are filled in correctly. Does not
233 * change any hardware configuration.
234 */
235int tb_switch_tmu_init(struct tb_switch *sw)
236{
237	struct tb_port *port;
238	int ret;
239
240	if (tb_switch_is_icm(sw))
241		return 0;
242
243	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
244	if (ret > 0)
245		sw->tmu.cap = ret;
246
247	tb_switch_for_each_port(sw, port) {
248		int cap;
249
250		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
251		if (cap > 0)
252			port->cap_tmu = cap;
253	}
254
255	ret = tb_switch_tmu_rate_read(sw);
256	if (ret < 0)
257		return ret;
258
259	sw->tmu.rate = ret;
260
261	sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
262	if (sw->tmu.has_ucap) {
263		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
264
265		if (tb_route(sw)) {
266			struct tb_port *up = tb_upstream_port(sw);
267
268			sw->tmu.unidirectional =
269				tb_port_tmu_is_unidirectional(up);
270		}
271	} else {
272		sw->tmu.unidirectional = false;
273	}
274
275	tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
276	return 0;
277}
278
279/**
280 * tb_switch_tmu_post_time() - Update switch local time
281 * @sw: Switch whose time to update
282 *
283 * Updates switch local time using time posting procedure.
284 */
285int tb_switch_tmu_post_time(struct tb_switch *sw)
286{
287	unsigned int post_time_high_offset, post_time_high = 0;
288	unsigned int post_local_time_offset, post_time_offset;
289	struct tb_switch *root_switch = sw->tb->root_switch;
290	u64 hi, mid, lo, local_time, post_time;
291	int i, ret, retries = 100;
292	u32 gm_local_time[3];
293
294	if (!tb_route(sw))
295		return 0;
296
297	if (!tb_switch_is_usb4(sw))
298		return 0;
299
300	/* Need to be able to read the grand master time */
301	if (!root_switch->tmu.cap)
302		return 0;
303
304	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
305			 root_switch->tmu.cap + TMU_RTR_CS_1,
306			 ARRAY_SIZE(gm_local_time));
307	if (ret)
308		return ret;
309
310	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
311		tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
312			  gm_local_time[i]);
313
314	/* Convert to nanoseconds (drop fractional part) */
315	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
316	mid = gm_local_time[1];
317	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
318		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
319	local_time = hi << 48 | mid << 16 | lo;
320
321	/* Tell the switch that time sync is disrupted for a while */
322	ret = tb_switch_tmu_set_time_disruption(sw, true);
323	if (ret)
324		return ret;
325
326	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
327	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
328	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
329
330	/*
331	 * Write the Grandmaster time to the Post Local Time registers
332	 * of the new switch.
333	 */
334	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
335			  post_local_time_offset, 2);
336	if (ret)
337		goto out;
338
339	/*
340	 * Have the new switch update its local time by:
341	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
342	 * Post Time High register.
343	 * 2) write 0 to Post Time High register and then wait for
344	 * the completion of the post_time register becomes 0.
345	 * This means the time has been converged properly.
346	 */
347	post_time = 0xffffffff00000001ULL;
348
349	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
350	if (ret)
351		goto out;
352
353	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
354			  post_time_high_offset, 1);
355	if (ret)
356		goto out;
357
358	do {
359		usleep_range(5, 10);
360		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
361				 post_time_offset, 2);
362		if (ret)
363			goto out;
364	} while (--retries && post_time);
365
366	if (!retries) {
367		ret = -ETIMEDOUT;
368		goto out;
369	}
370
371	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
372
373out:
374	tb_switch_tmu_set_time_disruption(sw, false);
375	return ret;
376}
377
378/**
379 * tb_switch_tmu_disable() - Disable TMU of a switch
380 * @sw: Switch whose TMU to disable
381 *
382 * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
383 */
384int tb_switch_tmu_disable(struct tb_switch *sw)
385{
386	/*
387	 * No need to disable TMU on devices that don't support CLx since
388	 * on these devices e.g. Alpine Ridge and earlier, the TMU mode
389	 * HiFi bi-directional is enabled by default and we don't change it.
390	 */
391	if (!tb_switch_is_clx_supported(sw))
392		return 0;
393
394	/* Already disabled? */
395	if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
396		return 0;
397
398
399	if (tb_route(sw)) {
400		bool unidirectional = sw->tmu.unidirectional;
401		struct tb_switch *parent = tb_switch_parent(sw);
402		struct tb_port *down, *up;
403		int ret;
404
405		down = tb_port_at(tb_route(sw), parent);
406		up = tb_upstream_port(sw);
407		/*
408		 * In case of uni-directional time sync, TMU handshake is
409		 * initiated by upstream router. In case of bi-directional
410		 * time sync, TMU handshake is initiated by downstream router.
411		 * We change downstream router's rate to off for both uni/bidir
412		 * cases although it is needed only for the bi-directional mode.
413		 * We avoid changing upstream router's mode since it might
414		 * have another downstream router plugged, that is set to
415		 * uni-directional mode and we don't want to change it's TMU
416		 * mode.
417		 */
418		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
419
420		tb_port_tmu_time_sync_disable(up);
421		ret = tb_port_tmu_time_sync_disable(down);
422		if (ret)
423			return ret;
424
425		if (unidirectional) {
426			/* The switch may be unplugged so ignore any errors */
427			tb_port_tmu_unidirectional_disable(up);
428			ret = tb_port_tmu_unidirectional_disable(down);
429			if (ret)
430				return ret;
431		}
432	} else {
433		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
434	}
435
436	sw->tmu.unidirectional = false;
437	sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
438
439	tb_sw_dbg(sw, "TMU: disabled\n");
440	return 0;
441}
442
443static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
444{
445	struct tb_switch *parent = tb_switch_parent(sw);
446	struct tb_port *down, *up;
447
448	down = tb_port_at(tb_route(sw), parent);
449	up = tb_upstream_port(sw);
450	/*
451	 * In case of any failure in one of the steps when setting
452	 * bi-directional or uni-directional TMU mode, get back to the TMU
453	 * configurations in off mode. In case of additional failures in
454	 * the functions below, ignore them since the caller shall already
455	 * report a failure.
456	 */
457	tb_port_tmu_time_sync_disable(down);
458	tb_port_tmu_time_sync_disable(up);
459	if (unidirectional)
460		tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
461	else
462		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
463
464	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
465	tb_port_tmu_unidirectional_disable(down);
466	tb_port_tmu_unidirectional_disable(up);
467}
468
469/*
470 * This function is called when the previous TMU mode was
471 * TB_SWITCH_TMU_RATE_OFF.
472 */
473static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
474{
475	struct tb_switch *parent = tb_switch_parent(sw);
476	struct tb_port *up, *down;
477	int ret;
478
479	up = tb_upstream_port(sw);
480	down = tb_port_at(tb_route(sw), parent);
481
482	ret = tb_port_tmu_unidirectional_disable(up);
483	if (ret)
484		return ret;
485
486	ret = tb_port_tmu_unidirectional_disable(down);
487	if (ret)
488		goto out;
489
490	ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
491	if (ret)
492		goto out;
493
494	ret = tb_port_tmu_time_sync_enable(up);
495	if (ret)
496		goto out;
497
498	ret = tb_port_tmu_time_sync_enable(down);
499	if (ret)
500		goto out;
501
502	return 0;
503
504out:
505	__tb_switch_tmu_off(sw, false);
506	return ret;
507}
508
509static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
510{
511	u32 val;
512	int ret;
513
514	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
515			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
516	if (ret)
517		return ret;
518
519	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
520
521	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
522			   sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
523}
524
525static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
526{
527	struct tb_port *up = tb_upstream_port(sw);
528
529	return tb_port_tmu_write(up, TMU_ADP_CS_6,
530				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
531				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
532}
533
534/*
535 * This function is called when the previous TMU mode was
536 * TB_SWITCH_TMU_RATE_OFF.
537 */
538static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
539{
540	struct tb_switch *parent = tb_switch_parent(sw);
541	struct tb_port *up, *down;
542	int ret;
543
544	up = tb_upstream_port(sw);
545	down = tb_port_at(tb_route(sw), parent);
546	ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
547	if (ret)
548		return ret;
549
550	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
551	if (ret)
552		return ret;
553
554	ret = tb_port_tmu_unidirectional_enable(up);
555	if (ret)
556		goto out;
557
558	ret = tb_port_tmu_time_sync_enable(up);
559	if (ret)
560		goto out;
561
562	ret = tb_port_tmu_unidirectional_enable(down);
563	if (ret)
564		goto out;
565
566	ret = tb_port_tmu_time_sync_enable(down);
567	if (ret)
568		goto out;
569
570	return 0;
571
572out:
573	__tb_switch_tmu_off(sw, true);
574	return ret;
575}
576
577static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
578{
579	struct tb_switch *parent = tb_switch_parent(sw);
580	struct tb_port *down, *up;
581
582	down = tb_port_at(tb_route(sw), parent);
583	up = tb_upstream_port(sw);
584	/*
585	 * In case of any failure in one of the steps when change mode,
586	 * get back to the TMU configurations in previous mode.
587	 * In case of additional failures in the functions below,
588	 * ignore them since the caller shall already report a failure.
589	 */
590	tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
591	if (sw->tmu.unidirectional_request)
592		tb_switch_tmu_rate_write(parent, sw->tmu.rate);
593	else
594		tb_switch_tmu_rate_write(sw, sw->tmu.rate);
595
596	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
597	tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
598}
599
600static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
601{
602	struct tb_switch *parent = tb_switch_parent(sw);
603	struct tb_port *up, *down;
604	int ret;
605
606	up = tb_upstream_port(sw);
607	down = tb_port_at(tb_route(sw), parent);
608	ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
609	if (ret)
610		goto out;
611
612	if (sw->tmu.unidirectional_request)
613		ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
614	else
615		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
616	if (ret)
617		return ret;
618
619	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
620	if (ret)
621		return ret;
622
623	ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
624	if (ret)
625		goto out;
626
627	ret = tb_port_tmu_time_sync_enable(down);
628	if (ret)
629		goto out;
630
631	ret = tb_port_tmu_time_sync_enable(up);
632	if (ret)
633		goto out;
634
635	return 0;
636
637out:
638	__tb_switch_tmu_change_mode_prev(sw);
639	return ret;
640}
641
642/**
643 * tb_switch_tmu_enable() - Enable TMU on a router
644 * @sw: Router whose TMU to enable
645 *
646 * Enables TMU of a router to be in uni-directional Normal/HiFi
647 * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
648 * before calling this function, to select the mode Normal/HiFi and
649 * directionality (uni-directional/bi-directional).
650 * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
651 * work. Uni-directional mode is required for CLx (Link Low-Power) to work.
652 */
653int tb_switch_tmu_enable(struct tb_switch *sw)
654{
655	bool unidirectional = sw->tmu.unidirectional_request;
656	int ret;
657
658	if (unidirectional && !sw->tmu.has_ucap)
659		return -EOPNOTSUPP;
660
661	/*
662	 * No need to enable TMU on devices that don't support CLx since on
663	 * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
664	 * bi-directional is enabled by default.
665	 */
666	if (!tb_switch_is_clx_supported(sw))
667		return 0;
668
669	if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
670		return 0;
671
672	if (tb_switch_is_titan_ridge(sw) && unidirectional) {
673		/*
674		 * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
675		 * enabled and supported together.
676		 */
677		if (!tb_switch_is_clx_enabled(sw, TB_CL1))
678			return -EOPNOTSUPP;
679
680		ret = tb_switch_tmu_objection_mask(sw);
681		if (ret)
682			return ret;
683
684		ret = tb_switch_tmu_unidirectional_enable(sw);
685		if (ret)
686			return ret;
687	}
688
689	ret = tb_switch_tmu_set_time_disruption(sw, true);
690	if (ret)
691		return ret;
692
693	if (tb_route(sw)) {
694		/*
695		 * The used mode changes are from OFF to
696		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
697		 * HiFi-Uni.
698		 */
699		if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
700			if (unidirectional)
701				ret = __tb_switch_tmu_enable_unidirectional(sw);
702			else
703				ret = __tb_switch_tmu_enable_bidirectional(sw);
704			if (ret)
705				return ret;
706		} else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
707			ret = __tb_switch_tmu_change_mode(sw);
708			if (ret)
709				return ret;
710		}
711		sw->tmu.unidirectional = unidirectional;
712	} else {
713		/*
714		 * Host router port configurations are written as
715		 * part of configurations for downstream port of the parent
716		 * of the child node - see above.
717		 * Here only the host router' rate configuration is written.
718		 */
719		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
720		if (ret)
721			return ret;
722	}
723
724	sw->tmu.rate = sw->tmu.rate_request;
725
726	tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
727	return tb_switch_tmu_set_time_disruption(sw, false);
728}
729
730/**
731 * tb_switch_tmu_configure() - Configure the TMU rate and directionality
732 * @sw: Router whose mode to change
733 * @rate: Rate to configure Off/Normal/HiFi
734 * @unidirectional: If uni-directional (bi-directional otherwise)
735 *
736 * Selects the rate of the TMU and directionality (uni-directional or
737 * bi-directional). Must be called before tb_switch_tmu_enable().
738 */
739void tb_switch_tmu_configure(struct tb_switch *sw,
740			     enum tb_switch_tmu_rate rate, bool unidirectional)
741{
742	sw->tmu.unidirectional_request = unidirectional;
743	sw->tmu.rate_request = rate;
744}
745
746static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
747{
748	if (tb_is_switch(dev)) {
749		struct tb_switch *sw = tb_to_switch(dev);
750
751		tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
752					tb_switch_is_clx_enabled(sw, TB_CL1));
753		if (tb_switch_tmu_enable(sw))
754			tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
755	}
756
757	return 0;
758}
759
760/**
761 * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
762 * @sw: The router to configure and enable it's children TMU
763 * @rate: Rate of the TMU to configure the router's chidren to
764 *
765 * Configures and enables the TMU mode of 1st depth children of the specified
766 * router to the specified rate.
767 */
768void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
769				    enum tb_switch_tmu_rate rate)
770{
771	device_for_each_child(&sw->dev, &rate,
772			      tb_switch_tmu_config_enable);
773}