Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt Time Management Unit (TMU) support
   4 *
   5 * Copyright (C) 2019, Intel Corporation
   6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 *	    Rajmohan Mani <rajmohan.mani@intel.com>
   8 */
   9
  10#include <linux/delay.h>
  11
  12#include "tb.h"
  13
  14static const unsigned int tmu_rates[] = {
  15	[TB_SWITCH_TMU_MODE_OFF] = 0,
  16	[TB_SWITCH_TMU_MODE_LOWRES] = 1000,
  17	[TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
  18	[TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
  19	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
  20};
  21
  22static const struct {
  23	unsigned int freq_meas_window;
  24	unsigned int avg_const;
  25	unsigned int delta_avg_const;
  26	unsigned int repl_timeout;
  27	unsigned int repl_threshold;
  28	unsigned int repl_n;
  29	unsigned int dirswitch_n;
  30} tmu_params[] = {
  31	[TB_SWITCH_TMU_MODE_OFF] = { },
  32	[TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
  33	[TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
  34	[TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
  35	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
  36		800, 4, 0, 3125, 25, 128, 255,
  37	},
  38};
  39
  40static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
  41{
  42	switch (mode) {
  43	case TB_SWITCH_TMU_MODE_OFF:
  44		return "off";
  45	case TB_SWITCH_TMU_MODE_LOWRES:
  46		return "uni-directional, LowRes";
  47	case TB_SWITCH_TMU_MODE_HIFI_UNI:
  48		return "uni-directional, HiFi";
  49	case TB_SWITCH_TMU_MODE_HIFI_BI:
  50		return "bi-directional, HiFi";
  51	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
  52		return "enhanced uni-directional, MedRes";
  53	default:
  54		return "unknown";
  55	}
  56}
  57
  58static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
  59{
  60	return usb4_switch_version(sw) > 1;
  61}
  62
  63static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
  64					 enum tb_switch_tmu_mode mode)
  65{
  66	u32 freq, avg, val;
  67	int ret;
  68
  69	freq = tmu_params[mode].freq_meas_window;
  70	avg = tmu_params[mode].avg_const;
  71
  72	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  73			 sw->tmu.cap + TMU_RTR_CS_0, 1);
  74	if (ret)
  75		return ret;
  76
  77	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
  78	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
  79
  80	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
  81			  sw->tmu.cap + TMU_RTR_CS_0, 1);
  82	if (ret)
  83		return ret;
  84
  85	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  86			 sw->tmu.cap + TMU_RTR_CS_15, 1);
  87	if (ret)
  88		return ret;
  89
  90	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
  91		~TMU_RTR_CS_15_DELAY_AVG_MASK &
  92		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
  93		~TMU_RTR_CS_15_ERROR_AVG_MASK;
  94	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
  95		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
  96		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
  97		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
  98
  99	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
 100			 sw->tmu.cap + TMU_RTR_CS_15, 1);
 101	if (ret)
 102		return ret;
 103
 104	if (tb_switch_tmu_enhanced_is_supported(sw)) {
 105		u32 delta_avg = tmu_params[mode].delta_avg_const;
 106
 107		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 108				 sw->tmu.cap + TMU_RTR_CS_18, 1);
 109		if (ret)
 110			return ret;
 111
 112		val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
 113		val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
 114
 115		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
 116				  sw->tmu.cap + TMU_RTR_CS_18, 1);
 117	}
 118
 119	return ret;
 120}
 121
 122static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
 123{
 124	int ret;
 125	u32 val;
 126
 127	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 128			 sw->tmu.cap + TMU_RTR_CS_0, 1);
 129	if (ret)
 130		return false;
 131
 132	return !!(val & TMU_RTR_CS_0_UCAP);
 133}
 134
 135static int tb_switch_tmu_rate_read(struct tb_switch *sw)
 136{
 137	int ret;
 138	u32 val;
 139
 140	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 141			 sw->tmu.cap + TMU_RTR_CS_3, 1);
 142	if (ret)
 143		return ret;
 144
 145	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
 146	return val;
 147}
 148
 149static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
 150{
 151	int ret;
 152	u32 val;
 153
 154	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 155			 sw->tmu.cap + TMU_RTR_CS_3, 1);
 156	if (ret)
 157		return ret;
 158
 159	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
 160	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
 161
 162	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
 163			   sw->tmu.cap + TMU_RTR_CS_3, 1);
 164}
 165
 166static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
 167			     u32 value)
 168{
 169	u32 data;
 170	int ret;
 171
 172	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
 173	if (ret)
 174		return ret;
 175
 176	data &= ~mask;
 177	data |= value;
 178
 179	return tb_port_write(port, &data, TB_CFG_PORT,
 180			     port->cap_tmu + offset, 1);
 181}
 182
 183static int tb_port_tmu_set_unidirectional(struct tb_port *port,
 184					  bool unidirectional)
 185{
 186	u32 val;
 187
 188	if (!port->sw->tmu.has_ucap)
 189		return 0;
 190
 191	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
 192	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
 193}
 194
 195static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
 196{
 197	return tb_port_tmu_set_unidirectional(port, false);
 198}
 199
 200static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
 201{
 202	return tb_port_tmu_set_unidirectional(port, true);
 203}
 204
 205static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
 206{
 207	int ret;
 208	u32 val;
 209
 210	ret = tb_port_read(port, &val, TB_CFG_PORT,
 211			   port->cap_tmu + TMU_ADP_CS_3, 1);
 212	if (ret)
 213		return false;
 214
 215	return val & TMU_ADP_CS_3_UDM;
 216}
 217
 218static bool tb_port_tmu_is_enhanced(struct tb_port *port)
 219{
 220	int ret;
 221	u32 val;
 222
 223	ret = tb_port_read(port, &val, TB_CFG_PORT,
 224			   port->cap_tmu + TMU_ADP_CS_8, 1);
 225	if (ret)
 226		return false;
 227
 228	return val & TMU_ADP_CS_8_EUDM;
 229}
 230
 231/* Can be called to non-v2 lane adapters too */
 232static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
 233{
 234	int ret;
 235	u32 val;
 236
 237	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
 238		return 0;
 239
 240	ret = tb_port_read(port, &val, TB_CFG_PORT,
 241			   port->cap_tmu + TMU_ADP_CS_8, 1);
 242	if (ret)
 243		return ret;
 244
 245	if (enable)
 246		val |= TMU_ADP_CS_8_EUDM;
 247	else
 248		val &= ~TMU_ADP_CS_8_EUDM;
 249
 250	return tb_port_write(port, &val, TB_CFG_PORT,
 251			     port->cap_tmu + TMU_ADP_CS_8, 1);
 252}
 253
 254static int tb_port_set_tmu_mode_params(struct tb_port *port,
 255				       enum tb_switch_tmu_mode mode)
 256{
 257	u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
 258	int ret;
 259
 260	repl_timeout = tmu_params[mode].repl_timeout;
 261	repl_threshold = tmu_params[mode].repl_threshold;
 262	repl_n = tmu_params[mode].repl_n;
 263	dirswitch_n = tmu_params[mode].dirswitch_n;
 264
 265	ret = tb_port_read(port, &val, TB_CFG_PORT,
 266			   port->cap_tmu + TMU_ADP_CS_8, 1);
 267	if (ret)
 268		return ret;
 269
 270	val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
 271	val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
 272	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
 273	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
 274
 275	ret = tb_port_write(port, &val, TB_CFG_PORT,
 276			    port->cap_tmu + TMU_ADP_CS_8, 1);
 277	if (ret)
 278		return ret;
 279
 280	ret = tb_port_read(port, &val, TB_CFG_PORT,
 281			   port->cap_tmu + TMU_ADP_CS_9, 1);
 282	if (ret)
 283		return ret;
 284
 285	val &= ~TMU_ADP_CS_9_REPL_N_MASK;
 286	val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
 287	val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
 288	val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
 289
 290	return tb_port_write(port, &val, TB_CFG_PORT,
 291			     port->cap_tmu + TMU_ADP_CS_9, 1);
 292}
 293
 294/* Can be called to non-v2 lane adapters too */
 295static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
 296{
 297	int ret;
 298	u32 val;
 299
 300	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
 301		return 0;
 302
 303	ret = tb_port_read(port, &val, TB_CFG_PORT,
 304			   port->cap_tmu + TMU_ADP_CS_9, 1);
 305	if (ret)
 306		return ret;
 307
 308	val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
 309	val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
 310
 311	return tb_port_write(port, &val, TB_CFG_PORT,
 312			     port->cap_tmu + TMU_ADP_CS_9, 1);
 313}
 314
 315static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
 316{
 317	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
 318
 319	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
 320}
 321
 322static int tb_port_tmu_time_sync_disable(struct tb_port *port)
 323{
 324	return tb_port_tmu_time_sync(port, true);
 325}
 326
 327static int tb_port_tmu_time_sync_enable(struct tb_port *port)
 328{
 329	return tb_port_tmu_time_sync(port, false);
 330}
 331
 332static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
 333{
 334	u32 val, offset, bit;
 335	int ret;
 336
 337	if (tb_switch_is_usb4(sw)) {
 338		offset = sw->tmu.cap + TMU_RTR_CS_0;
 339		bit = TMU_RTR_CS_0_TD;
 340	} else {
 341		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
 342		bit = TB_TIME_VSEC_3_CS_26_TD;
 343	}
 344
 345	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
 346	if (ret)
 347		return ret;
 348
 349	if (set)
 350		val |= bit;
 351	else
 352		val &= ~bit;
 353
 354	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
 355}
 356
 357static int tmu_mode_init(struct tb_switch *sw)
 358{
 359	bool enhanced, ucap;
 360	int ret, rate;
 361
 362	ucap = tb_switch_tmu_ucap_is_supported(sw);
 363	if (ucap)
 364		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
 365	enhanced = tb_switch_tmu_enhanced_is_supported(sw);
 366	if (enhanced)
 367		tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
 368
 369	ret = tb_switch_tmu_rate_read(sw);
 370	if (ret < 0)
 371		return ret;
 372	rate = ret;
 373
 374	/* Off by default */
 375	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
 376
 377	if (tb_route(sw)) {
 378		struct tb_port *up = tb_upstream_port(sw);
 379
 380		if (enhanced && tb_port_tmu_is_enhanced(up)) {
 381			sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
 382		} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
 383			if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
 384				sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
 385			else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
 386				sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
 387		} else if (rate) {
 388			sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
 389		}
 390	} else if (rate) {
 391		sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
 392	}
 393
 394	/* Update the initial request to match the current mode */
 395	sw->tmu.mode_request = sw->tmu.mode;
 396	sw->tmu.has_ucap = ucap;
 397
 398	return 0;
 399}
 400
 401/**
 402 * tb_switch_tmu_init() - Initialize switch TMU structures
 403 * @sw: Switch to initialized
 404 *
 405 * This function must be called before other TMU related functions to
 406 * makes the internal structures are filled in correctly. Does not
 407 * change any hardware configuration.
 408 */
 409int tb_switch_tmu_init(struct tb_switch *sw)
 410{
 411	struct tb_port *port;
 412	int ret;
 413
 414	if (tb_switch_is_icm(sw))
 415		return 0;
 416
 417	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
 418	if (ret > 0)
 419		sw->tmu.cap = ret;
 420
 421	tb_switch_for_each_port(sw, port) {
 422		int cap;
 423
 424		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
 425		if (cap > 0)
 426			port->cap_tmu = cap;
 427	}
 428
 429	ret = tmu_mode_init(sw);
 430	if (ret)
 431		return ret;
 432
 433	tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
 434	return 0;
 435}
 436
 437/**
 438 * tb_switch_tmu_post_time() - Update switch local time
 439 * @sw: Switch whose time to update
 440 *
 441 * Updates switch local time using time posting procedure.
 442 */
 443int tb_switch_tmu_post_time(struct tb_switch *sw)
 444{
 445	unsigned int post_time_high_offset, post_time_high = 0;
 446	unsigned int post_local_time_offset, post_time_offset;
 447	struct tb_switch *root_switch = sw->tb->root_switch;
 448	u64 hi, mid, lo, local_time, post_time;
 449	int i, ret, retries = 100;
 450	u32 gm_local_time[3];
 451
 452	if (!tb_route(sw))
 453		return 0;
 454
 455	if (!tb_switch_is_usb4(sw))
 456		return 0;
 457
 458	/* Need to be able to read the grand master time */
 459	if (!root_switch->tmu.cap)
 460		return 0;
 461
 462	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
 463			 root_switch->tmu.cap + TMU_RTR_CS_1,
 464			 ARRAY_SIZE(gm_local_time));
 465	if (ret)
 466		return ret;
 467
 468	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
 469		tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
 470			  gm_local_time[i]);
 471
 472	/* Convert to nanoseconds (drop fractional part) */
 473	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
 474	mid = gm_local_time[1];
 475	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
 476		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
 477	local_time = hi << 48 | mid << 16 | lo;
 478
 479	/* Tell the switch that time sync is disrupted for a while */
 480	ret = tb_switch_tmu_set_time_disruption(sw, true);
 481	if (ret)
 482		return ret;
 483
 484	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
 485	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
 486	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
 487
 488	/*
 489	 * Write the Grandmaster time to the Post Local Time registers
 490	 * of the new switch.
 491	 */
 492	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
 493			  post_local_time_offset, 2);
 494	if (ret)
 495		goto out;
 496
 497	/*
 498	 * Have the new switch update its local time by:
 499	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
 500	 * Post Time High register.
 501	 * 2) write 0 to Post Time High register and then wait for
 502	 * the completion of the post_time register becomes 0.
 503	 * This means the time has been converged properly.
 504	 */
 505	post_time = 0xffffffff00000001ULL;
 506
 507	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
 508	if (ret)
 509		goto out;
 510
 511	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
 512			  post_time_high_offset, 1);
 513	if (ret)
 514		goto out;
 515
 516	do {
 517		usleep_range(5, 10);
 518		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
 519				 post_time_offset, 2);
 520		if (ret)
 521			goto out;
 522	} while (--retries && post_time);
 523
 524	if (!retries) {
 525		ret = -ETIMEDOUT;
 526		goto out;
 527	}
 528
 529	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
 530
 531out:
 532	tb_switch_tmu_set_time_disruption(sw, false);
 533	return ret;
 534}
 535
 536static int disable_enhanced(struct tb_port *up, struct tb_port *down)
 537{
 538	int ret;
 539
 540	/*
 541	 * Router may already been disconnected so ignore errors on the
 542	 * upstream port.
 543	 */
 544	tb_port_tmu_rate_write(up, 0);
 545	tb_port_tmu_enhanced_enable(up, false);
 546
 547	ret = tb_port_tmu_rate_write(down, 0);
 548	if (ret)
 549		return ret;
 550	return tb_port_tmu_enhanced_enable(down, false);
 551}
 552
 553/**
 554 * tb_switch_tmu_disable() - Disable TMU of a switch
 555 * @sw: Switch whose TMU to disable
 556 *
 557 * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
 558 */
 559int tb_switch_tmu_disable(struct tb_switch *sw)
 560{
 561	/* Already disabled? */
 562	if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
 563		return 0;
 564
 565	if (tb_route(sw)) {
 566		struct tb_port *down, *up;
 567		int ret;
 568
 569		down = tb_switch_downstream_port(sw);
 570		up = tb_upstream_port(sw);
 571		/*
 572		 * In case of uni-directional time sync, TMU handshake is
 573		 * initiated by upstream router. In case of bi-directional
 574		 * time sync, TMU handshake is initiated by downstream router.
 575		 * We change downstream router's rate to off for both uni/bidir
 576		 * cases although it is needed only for the bi-directional mode.
 577		 * We avoid changing upstream router's mode since it might
 578		 * have another downstream router plugged, that is set to
 579		 * uni-directional mode and we don't want to change it's TMU
 580		 * mode.
 581		 */
 582		ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
 583		if (ret)
 584			return ret;
 585
 586		tb_port_tmu_time_sync_disable(up);
 587		ret = tb_port_tmu_time_sync_disable(down);
 588		if (ret)
 589			return ret;
 590
 591		switch (sw->tmu.mode) {
 592		case TB_SWITCH_TMU_MODE_LOWRES:
 593		case TB_SWITCH_TMU_MODE_HIFI_UNI:
 594			/* The switch may be unplugged so ignore any errors */
 595			tb_port_tmu_unidirectional_disable(up);
 596			ret = tb_port_tmu_unidirectional_disable(down);
 597			if (ret)
 598				return ret;
 599			break;
 600
 601		case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
 602			ret = disable_enhanced(up, down);
 603			if (ret)
 604				return ret;
 605			break;
 606
 607		default:
 608			break;
 609		}
 610	} else {
 611		tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
 612	}
 613
 614	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
 615
 616	tb_sw_dbg(sw, "TMU: disabled\n");
 617	return 0;
 618}
 619
 620/* Called only when there is failure enabling requested mode */
 621static void tb_switch_tmu_off(struct tb_switch *sw)
 622{
 623	unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
 624	struct tb_port *down, *up;
 625
 626	down = tb_switch_downstream_port(sw);
 627	up = tb_upstream_port(sw);
 628	/*
 629	 * In case of any failure in one of the steps when setting
 630	 * bi-directional or uni-directional TMU mode, get back to the TMU
 631	 * configurations in off mode. In case of additional failures in
 632	 * the functions below, ignore them since the caller shall already
 633	 * report a failure.
 634	 */
 635	tb_port_tmu_time_sync_disable(down);
 636	tb_port_tmu_time_sync_disable(up);
 637
 638	switch (sw->tmu.mode_request) {
 639	case TB_SWITCH_TMU_MODE_LOWRES:
 640	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 641		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
 642		break;
 643	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
 644		disable_enhanced(up, down);
 645		break;
 646	default:
 647		break;
 648	}
 649
 650	/* Always set the rate to 0 */
 651	tb_switch_tmu_rate_write(sw, rate);
 652
 653	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
 654	tb_port_tmu_unidirectional_disable(down);
 655	tb_port_tmu_unidirectional_disable(up);
 656}
 657
 658/*
 659 * This function is called when the previous TMU mode was
 660 * TB_SWITCH_TMU_MODE_OFF.
 661 */
 662static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
 663{
 664	struct tb_port *up, *down;
 665	int ret;
 666
 667	up = tb_upstream_port(sw);
 668	down = tb_switch_downstream_port(sw);
 669
 670	ret = tb_port_tmu_unidirectional_disable(up);
 671	if (ret)
 672		return ret;
 673
 674	ret = tb_port_tmu_unidirectional_disable(down);
 675	if (ret)
 676		goto out;
 677
 678	ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
 679	if (ret)
 680		goto out;
 681
 682	ret = tb_port_tmu_time_sync_enable(up);
 683	if (ret)
 684		goto out;
 685
 686	ret = tb_port_tmu_time_sync_enable(down);
 687	if (ret)
 688		goto out;
 689
 690	return 0;
 691
 692out:
 693	tb_switch_tmu_off(sw);
 694	return ret;
 695}
 696
 697/* Only needed for Titan Ridge */
 698static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
 699{
 700	struct tb_port *up = tb_upstream_port(sw);
 701	u32 val;
 702	int ret;
 703
 704	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 705			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
 706	if (ret)
 707		return ret;
 708
 709	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
 710
 711	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
 712			  sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
 713	if (ret)
 714		return ret;
 715
 716	return tb_port_tmu_write(up, TMU_ADP_CS_6,
 717				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
 718				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
 719				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
 720}
 721
 722/*
 723 * This function is called when the previous TMU mode was
 724 * TB_SWITCH_TMU_MODE_OFF.
 725 */
 726static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
 727{
 728	struct tb_port *up, *down;
 729	int ret;
 730
 731	up = tb_upstream_port(sw);
 732	down = tb_switch_downstream_port(sw);
 733	ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
 734				       tmu_rates[sw->tmu.mode_request]);
 735	if (ret)
 736		return ret;
 737
 738	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
 739	if (ret)
 740		return ret;
 741
 742	ret = tb_port_tmu_unidirectional_enable(up);
 743	if (ret)
 744		goto out;
 745
 746	ret = tb_port_tmu_time_sync_enable(up);
 747	if (ret)
 748		goto out;
 749
 750	ret = tb_port_tmu_unidirectional_enable(down);
 751	if (ret)
 752		goto out;
 753
 754	ret = tb_port_tmu_time_sync_enable(down);
 755	if (ret)
 756		goto out;
 757
 758	return 0;
 759
 760out:
 761	tb_switch_tmu_off(sw);
 762	return ret;
 763}
 764
 765/*
 766 * This function is called when the previous TMU mode was
 767 * TB_SWITCH_TMU_RATE_OFF.
 768 */
 769static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
 770{
 771	unsigned int rate = tmu_rates[sw->tmu.mode_request];
 772	struct tb_port *up, *down;
 773	int ret;
 774
 775	/* Router specific parameters first */
 776	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
 777	if (ret)
 778		return ret;
 779
 780	up = tb_upstream_port(sw);
 781	down = tb_switch_downstream_port(sw);
 782
 783	ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
 784	if (ret)
 785		goto out;
 786
 787	ret = tb_port_tmu_rate_write(up, rate);
 788	if (ret)
 789		goto out;
 790
 791	ret = tb_port_tmu_enhanced_enable(up, true);
 792	if (ret)
 793		goto out;
 794
 795	ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
 796	if (ret)
 797		goto out;
 798
 799	ret = tb_port_tmu_rate_write(down, rate);
 800	if (ret)
 801		goto out;
 802
 803	ret = tb_port_tmu_enhanced_enable(down, true);
 804	if (ret)
 805		goto out;
 806
 807	return 0;
 808
 809out:
 810	tb_switch_tmu_off(sw);
 811	return ret;
 812}
 813
 814static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
 815{
 816	unsigned int rate = tmu_rates[sw->tmu.mode];
 817	struct tb_port *down, *up;
 818
 819	down = tb_switch_downstream_port(sw);
 820	up = tb_upstream_port(sw);
 821	/*
 822	 * In case of any failure in one of the steps when change mode,
 823	 * get back to the TMU configurations in previous mode.
 824	 * In case of additional failures in the functions below,
 825	 * ignore them since the caller shall already report a failure.
 826	 */
 827	switch (sw->tmu.mode) {
 828	case TB_SWITCH_TMU_MODE_LOWRES:
 829	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 830		tb_port_tmu_set_unidirectional(down, true);
 831		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
 832		break;
 833
 834	case TB_SWITCH_TMU_MODE_HIFI_BI:
 835		tb_port_tmu_set_unidirectional(down, false);
 836		tb_switch_tmu_rate_write(sw, rate);
 837		break;
 838
 839	default:
 840		break;
 841	}
 842
 843	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
 844
 845	switch (sw->tmu.mode) {
 846	case TB_SWITCH_TMU_MODE_LOWRES:
 847	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 848		tb_port_tmu_set_unidirectional(up, true);
 849		break;
 850
 851	case TB_SWITCH_TMU_MODE_HIFI_BI:
 852		tb_port_tmu_set_unidirectional(up, false);
 853		break;
 854
 855	default:
 856		break;
 857	}
 858}
 859
 860static int tb_switch_tmu_change_mode(struct tb_switch *sw)
 861{
 862	unsigned int rate = tmu_rates[sw->tmu.mode_request];
 863	struct tb_port *up, *down;
 864	int ret;
 865
 866	up = tb_upstream_port(sw);
 867	down = tb_switch_downstream_port(sw);
 868
 869	/* Program the upstream router downstream facing lane adapter */
 870	switch (sw->tmu.mode_request) {
 871	case TB_SWITCH_TMU_MODE_LOWRES:
 872	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 873		ret = tb_port_tmu_set_unidirectional(down, true);
 874		if (ret)
 875			goto out;
 876		ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
 877		if (ret)
 878			goto out;
 879		break;
 880
 881	case TB_SWITCH_TMU_MODE_HIFI_BI:
 882		ret = tb_port_tmu_set_unidirectional(down, false);
 883		if (ret)
 884			goto out;
 885		ret = tb_switch_tmu_rate_write(sw, rate);
 886		if (ret)
 887			goto out;
 888		break;
 889
 890	default:
 891		/* Not allowed to change modes from other than above */
 892		return -EINVAL;
 893	}
 894
 895	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
 896	if (ret)
 897		goto out;
 898
 899	/* Program the new mode and the downstream router lane adapter */
 900	switch (sw->tmu.mode_request) {
 901	case TB_SWITCH_TMU_MODE_LOWRES:
 902	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 903		ret = tb_port_tmu_set_unidirectional(up, true);
 904		if (ret)
 905			goto out;
 906		break;
 907
 908	case TB_SWITCH_TMU_MODE_HIFI_BI:
 909		ret = tb_port_tmu_set_unidirectional(up, false);
 910		if (ret)
 911			goto out;
 912		break;
 913
 914	default:
 915		/* Not allowed to change modes from other than above */
 916		return -EINVAL;
 917	}
 918
 919	ret = tb_port_tmu_time_sync_enable(down);
 920	if (ret)
 921		goto out;
 922
 923	ret = tb_port_tmu_time_sync_enable(up);
 924	if (ret)
 925		goto out;
 926
 927	return 0;
 928
 929out:
 930	tb_switch_tmu_change_mode_prev(sw);
 931	return ret;
 932}
 933
 934/**
 935 * tb_switch_tmu_enable() - Enable TMU on a router
 936 * @sw: Router whose TMU to enable
 937 *
 938 * Enables TMU of a router to be in uni-directional Normal/HiFi or
 939 * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
 940 * required before calling this function.
 941 */
 942int tb_switch_tmu_enable(struct tb_switch *sw)
 943{
 944	int ret;
 945
 946	if (tb_switch_tmu_is_enabled(sw))
 947		return 0;
 948
 949	if (tb_switch_is_titan_ridge(sw) &&
 950	    (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
 951	     sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
 952		ret = tb_switch_tmu_disable_objections(sw);
 953		if (ret)
 954			return ret;
 955	}
 956
 957	ret = tb_switch_tmu_set_time_disruption(sw, true);
 958	if (ret)
 959		return ret;
 960
 961	if (tb_route(sw)) {
 962		/*
 963		 * The used mode changes are from OFF to
 964		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
 965		 * HiFi-Uni.
 966		 */
 967		if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
 968			switch (sw->tmu.mode_request) {
 969			case TB_SWITCH_TMU_MODE_LOWRES:
 970			case TB_SWITCH_TMU_MODE_HIFI_UNI:
 971				ret = tb_switch_tmu_enable_unidirectional(sw);
 972				break;
 973
 974			case TB_SWITCH_TMU_MODE_HIFI_BI:
 975				ret = tb_switch_tmu_enable_bidirectional(sw);
 976				break;
 977			case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
 978				ret = tb_switch_tmu_enable_enhanced(sw);
 979				break;
 980			default:
 981				ret = -EINVAL;
 982				break;
 983			}
 984		} else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
 985			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
 986			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
 987			ret = tb_switch_tmu_change_mode(sw);
 988		} else {
 989			ret = -EINVAL;
 990		}
 991	} else {
 992		/*
 993		 * Host router port configurations are written as
 994		 * part of configurations for downstream port of the parent
 995		 * of the child node - see above.
 996		 * Here only the host router' rate configuration is written.
 997		 */
 998		ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
 999	}
1000
1001	if (ret) {
1002		tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
1003			   tmu_mode_name(sw->tmu.mode_request), ret);
1004	} else {
1005		sw->tmu.mode = sw->tmu.mode_request;
1006		tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
1007	}
1008
1009	return tb_switch_tmu_set_time_disruption(sw, false);
1010}
1011
1012/**
1013 * tb_switch_tmu_configure() - Configure the TMU mode
1014 * @sw: Router whose mode to change
1015 * @mode: Mode to configure
1016 *
1017 * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
1018 * next called.
1019 *
1020 * Returns %0 in success and negative errno otherwise. Specifically
1021 * returns %-EOPNOTSUPP if the requested mode is not possible (not
1022 * supported by the router and/or topology).
1023 */
1024int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
1025{
1026	switch (mode) {
1027	case TB_SWITCH_TMU_MODE_OFF:
1028		break;
1029
1030	case TB_SWITCH_TMU_MODE_LOWRES:
1031	case TB_SWITCH_TMU_MODE_HIFI_UNI:
1032		if (!sw->tmu.has_ucap)
1033			return -EOPNOTSUPP;
1034		break;
1035
1036	case TB_SWITCH_TMU_MODE_HIFI_BI:
1037		break;
1038
1039	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
1040		const struct tb_switch *parent_sw = tb_switch_parent(sw);
1041
1042		if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
1043			return -EOPNOTSUPP;
1044		if (!tb_switch_tmu_enhanced_is_supported(sw))
1045			return -EOPNOTSUPP;
1046
1047		break;
1048	}
1049
1050	default:
1051		tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
1052		return -EINVAL;
1053	}
1054
1055	if (sw->tmu.mode_request != mode) {
1056		tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
1057			  tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
1058		sw->tmu.mode_request = mode;
1059	}
1060
1061	return 0;
1062}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Thunderbolt Time Management Unit (TMU) support
   4 *
   5 * Copyright (C) 2019, Intel Corporation
   6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
   7 *	    Rajmohan Mani <rajmohan.mani@intel.com>
   8 */
   9
  10#include <linux/delay.h>
  11
  12#include "tb.h"
  13
  14static const unsigned int tmu_rates[] = {
  15	[TB_SWITCH_TMU_MODE_OFF] = 0,
  16	[TB_SWITCH_TMU_MODE_LOWRES] = 1000,
  17	[TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
  18	[TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
  19	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
  20};
  21
  22static const struct {
  23	unsigned int freq_meas_window;
  24	unsigned int avg_const;
  25	unsigned int delta_avg_const;
  26	unsigned int repl_timeout;
  27	unsigned int repl_threshold;
  28	unsigned int repl_n;
  29	unsigned int dirswitch_n;
  30} tmu_params[] = {
  31	[TB_SWITCH_TMU_MODE_OFF] = { },
  32	[TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
  33	[TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
  34	[TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
  35	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
  36		800, 4, 0, 3125, 25, 128, 255,
  37	},
  38};
  39
  40static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
  41{
  42	switch (mode) {
  43	case TB_SWITCH_TMU_MODE_OFF:
  44		return "off";
  45	case TB_SWITCH_TMU_MODE_LOWRES:
  46		return "uni-directional, LowRes";
  47	case TB_SWITCH_TMU_MODE_HIFI_UNI:
  48		return "uni-directional, HiFi";
  49	case TB_SWITCH_TMU_MODE_HIFI_BI:
  50		return "bi-directional, HiFi";
  51	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
  52		return "enhanced uni-directional, MedRes";
  53	default:
  54		return "unknown";
  55	}
  56}
  57
  58static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
  59{
  60	return usb4_switch_version(sw) > 1;
  61}
  62
  63static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
  64					 enum tb_switch_tmu_mode mode)
  65{
  66	u32 freq, avg, val;
  67	int ret;
  68
  69	freq = tmu_params[mode].freq_meas_window;
  70	avg = tmu_params[mode].avg_const;
  71
  72	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  73			 sw->tmu.cap + TMU_RTR_CS_0, 1);
  74	if (ret)
  75		return ret;
  76
  77	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
  78	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
  79
  80	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
  81			  sw->tmu.cap + TMU_RTR_CS_0, 1);
  82	if (ret)
  83		return ret;
  84
  85	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
  86			 sw->tmu.cap + TMU_RTR_CS_15, 1);
  87	if (ret)
  88		return ret;
  89
  90	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
  91		~TMU_RTR_CS_15_DELAY_AVG_MASK &
  92		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
  93		~TMU_RTR_CS_15_ERROR_AVG_MASK;
  94	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
  95		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
  96		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
  97		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
  98
  99	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
 100			 sw->tmu.cap + TMU_RTR_CS_15, 1);
 101	if (ret)
 102		return ret;
 103
 104	if (tb_switch_tmu_enhanced_is_supported(sw)) {
 105		u32 delta_avg = tmu_params[mode].delta_avg_const;
 106
 107		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 108				 sw->tmu.cap + TMU_RTR_CS_18, 1);
 109		if (ret)
 110			return ret;
 111
 112		val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
 113		val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
 114
 115		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
 116				  sw->tmu.cap + TMU_RTR_CS_18, 1);
 117	}
 118
 119	return ret;
 120}
 121
 122static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
 123{
 124	int ret;
 125	u32 val;
 126
 127	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 128			 sw->tmu.cap + TMU_RTR_CS_0, 1);
 129	if (ret)
 130		return false;
 131
 132	return !!(val & TMU_RTR_CS_0_UCAP);
 133}
 134
 135static int tb_switch_tmu_rate_read(struct tb_switch *sw)
 136{
 137	int ret;
 138	u32 val;
 139
 140	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 141			 sw->tmu.cap + TMU_RTR_CS_3, 1);
 142	if (ret)
 143		return ret;
 144
 145	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
 146	return val;
 147}
 148
 149static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
 150{
 151	int ret;
 152	u32 val;
 153
 154	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 155			 sw->tmu.cap + TMU_RTR_CS_3, 1);
 156	if (ret)
 157		return ret;
 158
 159	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
 160	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
 161
 162	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
 163			   sw->tmu.cap + TMU_RTR_CS_3, 1);
 164}
 165
 166static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
 167			     u32 value)
 168{
 169	u32 data;
 170	int ret;
 171
 172	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
 173	if (ret)
 174		return ret;
 175
 176	data &= ~mask;
 177	data |= value;
 178
 179	return tb_port_write(port, &data, TB_CFG_PORT,
 180			     port->cap_tmu + offset, 1);
 181}
 182
 183static int tb_port_tmu_set_unidirectional(struct tb_port *port,
 184					  bool unidirectional)
 185{
 186	u32 val;
 187
 188	if (!port->sw->tmu.has_ucap)
 189		return 0;
 190
 191	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
 192	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
 193}
 194
 195static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
 196{
 197	return tb_port_tmu_set_unidirectional(port, false);
 198}
 199
 200static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
 201{
 202	return tb_port_tmu_set_unidirectional(port, true);
 203}
 204
 205static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
 206{
 207	int ret;
 208	u32 val;
 209
 210	ret = tb_port_read(port, &val, TB_CFG_PORT,
 211			   port->cap_tmu + TMU_ADP_CS_3, 1);
 212	if (ret)
 213		return false;
 214
 215	return val & TMU_ADP_CS_3_UDM;
 216}
 217
 218static bool tb_port_tmu_is_enhanced(struct tb_port *port)
 219{
 220	int ret;
 221	u32 val;
 222
 223	ret = tb_port_read(port, &val, TB_CFG_PORT,
 224			   port->cap_tmu + TMU_ADP_CS_8, 1);
 225	if (ret)
 226		return false;
 227
 228	return val & TMU_ADP_CS_8_EUDM;
 229}
 230
 231/* Can be called to non-v2 lane adapters too */
 232static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
 233{
 234	int ret;
 235	u32 val;
 236
 237	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
 238		return 0;
 239
 240	ret = tb_port_read(port, &val, TB_CFG_PORT,
 241			   port->cap_tmu + TMU_ADP_CS_8, 1);
 242	if (ret)
 243		return ret;
 244
 245	if (enable)
 246		val |= TMU_ADP_CS_8_EUDM;
 247	else
 248		val &= ~TMU_ADP_CS_8_EUDM;
 249
 250	return tb_port_write(port, &val, TB_CFG_PORT,
 251			     port->cap_tmu + TMU_ADP_CS_8, 1);
 252}
 253
 254static int tb_port_set_tmu_mode_params(struct tb_port *port,
 255				       enum tb_switch_tmu_mode mode)
 256{
 257	u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
 258	int ret;
 259
 260	repl_timeout = tmu_params[mode].repl_timeout;
 261	repl_threshold = tmu_params[mode].repl_threshold;
 262	repl_n = tmu_params[mode].repl_n;
 263	dirswitch_n = tmu_params[mode].dirswitch_n;
 264
 265	ret = tb_port_read(port, &val, TB_CFG_PORT,
 266			   port->cap_tmu + TMU_ADP_CS_8, 1);
 267	if (ret)
 268		return ret;
 269
 270	val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
 271	val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
 272	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
 273	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
 274
 275	ret = tb_port_write(port, &val, TB_CFG_PORT,
 276			    port->cap_tmu + TMU_ADP_CS_8, 1);
 277	if (ret)
 278		return ret;
 279
 280	ret = tb_port_read(port, &val, TB_CFG_PORT,
 281			   port->cap_tmu + TMU_ADP_CS_9, 1);
 282	if (ret)
 283		return ret;
 284
 285	val &= ~TMU_ADP_CS_9_REPL_N_MASK;
 286	val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
 287	val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
 288	val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
 289
 290	return tb_port_write(port, &val, TB_CFG_PORT,
 291			     port->cap_tmu + TMU_ADP_CS_9, 1);
 292}
 293
 294/* Can be called to non-v2 lane adapters too */
 295static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
 296{
 297	int ret;
 298	u32 val;
 299
 300	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
 301		return 0;
 302
 303	ret = tb_port_read(port, &val, TB_CFG_PORT,
 304			   port->cap_tmu + TMU_ADP_CS_9, 1);
 305	if (ret)
 306		return ret;
 307
 308	val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
 309	val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
 310
 311	return tb_port_write(port, &val, TB_CFG_PORT,
 312			     port->cap_tmu + TMU_ADP_CS_9, 1);
 313}
 314
 315static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
 316{
 317	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
 318
 319	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
 320}
 321
 322static int tb_port_tmu_time_sync_disable(struct tb_port *port)
 323{
 324	return tb_port_tmu_time_sync(port, true);
 325}
 326
 327static int tb_port_tmu_time_sync_enable(struct tb_port *port)
 328{
 329	return tb_port_tmu_time_sync(port, false);
 330}
 331
 332static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
 333{
 334	u32 val, offset, bit;
 335	int ret;
 336
 337	if (tb_switch_is_usb4(sw)) {
 338		offset = sw->tmu.cap + TMU_RTR_CS_0;
 339		bit = TMU_RTR_CS_0_TD;
 340	} else {
 341		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
 342		bit = TB_TIME_VSEC_3_CS_26_TD;
 343	}
 344
 345	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
 346	if (ret)
 347		return ret;
 348
 349	if (set)
 350		val |= bit;
 351	else
 352		val &= ~bit;
 353
 354	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
 355}
 356
 357static int tmu_mode_init(struct tb_switch *sw)
 358{
 359	bool enhanced, ucap;
 360	int ret, rate;
 361
 362	ucap = tb_switch_tmu_ucap_is_supported(sw);
 363	if (ucap)
 364		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
 365	enhanced = tb_switch_tmu_enhanced_is_supported(sw);
 366	if (enhanced)
 367		tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
 368
 369	ret = tb_switch_tmu_rate_read(sw);
 370	if (ret < 0)
 371		return ret;
 372	rate = ret;
 373
 374	/* Off by default */
 375	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
 376
 377	if (tb_route(sw)) {
 378		struct tb_port *up = tb_upstream_port(sw);
 379
 380		if (enhanced && tb_port_tmu_is_enhanced(up)) {
 381			sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
 382		} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
 383			if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
 384				sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
 385			else if (tmu_rates[TB_SWITCH_TMU_MODE_HIFI_UNI] == rate)
 386				sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
 387		} else if (rate) {
 388			sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
 389		}
 390	} else if (rate) {
 391		sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
 392	}
 393
 394	/* Update the initial request to match the current mode */
 395	sw->tmu.mode_request = sw->tmu.mode;
 396	sw->tmu.has_ucap = ucap;
 397
 398	return 0;
 399}
 400
 401/**
 402 * tb_switch_tmu_init() - Initialize switch TMU structures
 403 * @sw: Switch to initialized
 404 *
 405 * This function must be called before other TMU related functions to
 406 * makes the internal structures are filled in correctly. Does not
 407 * change any hardware configuration.
 408 */
 409int tb_switch_tmu_init(struct tb_switch *sw)
 410{
 411	struct tb_port *port;
 412	int ret;
 413
 414	if (tb_switch_is_icm(sw))
 415		return 0;
 416
 417	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
 418	if (ret > 0)
 419		sw->tmu.cap = ret;
 420
 421	tb_switch_for_each_port(sw, port) {
 422		int cap;
 423
 424		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
 425		if (cap > 0)
 426			port->cap_tmu = cap;
 427	}
 428
 429	ret = tmu_mode_init(sw);
 430	if (ret)
 431		return ret;
 432
 433	tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
 434	return 0;
 435}
 436
 437/**
 438 * tb_switch_tmu_post_time() - Update switch local time
 439 * @sw: Switch whose time to update
 440 *
 441 * Updates switch local time using time posting procedure.
 442 */
 443int tb_switch_tmu_post_time(struct tb_switch *sw)
 444{
 445	unsigned int post_time_high_offset, post_time_high = 0;
 446	unsigned int post_local_time_offset, post_time_offset;
 447	struct tb_switch *root_switch = sw->tb->root_switch;
 448	u64 hi, mid, lo, local_time, post_time;
 449	int i, ret, retries = 100;
 450	u32 gm_local_time[3];
 451
 452	if (!tb_route(sw))
 453		return 0;
 454
 455	if (!tb_switch_is_usb4(sw))
 456		return 0;
 457
 458	/* Need to be able to read the grand master time */
 459	if (!root_switch->tmu.cap)
 460		return 0;
 461
 462	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
 463			 root_switch->tmu.cap + TMU_RTR_CS_1,
 464			 ARRAY_SIZE(gm_local_time));
 465	if (ret)
 466		return ret;
 467
 468	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
 469		tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
 470			  gm_local_time[i]);
 471
 472	/* Convert to nanoseconds (drop fractional part) */
 473	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
 474	mid = gm_local_time[1];
 475	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
 476		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
 477	local_time = hi << 48 | mid << 16 | lo;
 478
 479	/* Tell the switch that time sync is disrupted for a while */
 480	ret = tb_switch_tmu_set_time_disruption(sw, true);
 481	if (ret)
 482		return ret;
 483
 484	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
 485	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
 486	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
 487
 488	/*
 489	 * Write the Grandmaster time to the Post Local Time registers
 490	 * of the new switch.
 491	 */
 492	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
 493			  post_local_time_offset, 2);
 494	if (ret)
 495		goto out;
 496
 497	/*
 498	 * Have the new switch update its local time by:
 499	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
 500	 * Post Time High register.
 501	 * 2) write 0 to Post Time High register and then wait for
 502	 * the completion of the post_time register becomes 0.
 503	 * This means the time has been converged properly.
 504	 */
 505	post_time = 0xffffffff00000001ULL;
 506
 507	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
 508	if (ret)
 509		goto out;
 510
 511	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
 512			  post_time_high_offset, 1);
 513	if (ret)
 514		goto out;
 515
 516	do {
 517		usleep_range(5, 10);
 518		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
 519				 post_time_offset, 2);
 520		if (ret)
 521			goto out;
 522	} while (--retries && post_time);
 523
 524	if (!retries) {
 525		ret = -ETIMEDOUT;
 526		goto out;
 527	}
 528
 529	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
 530
 531out:
 532	tb_switch_tmu_set_time_disruption(sw, false);
 533	return ret;
 534}
 535
 536static int disable_enhanced(struct tb_port *up, struct tb_port *down)
 537{
 538	int ret;
 539
 540	/*
 541	 * Router may already been disconnected so ignore errors on the
 542	 * upstream port.
 543	 */
 544	tb_port_tmu_rate_write(up, 0);
 545	tb_port_tmu_enhanced_enable(up, false);
 546
 547	ret = tb_port_tmu_rate_write(down, 0);
 548	if (ret)
 549		return ret;
 550	return tb_port_tmu_enhanced_enable(down, false);
 551}
 552
 553/**
 554 * tb_switch_tmu_disable() - Disable TMU of a switch
 555 * @sw: Switch whose TMU to disable
 556 *
 557 * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
 558 */
 559int tb_switch_tmu_disable(struct tb_switch *sw)
 560{
 561	/* Already disabled? */
 562	if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
 563		return 0;
 564
 565	if (tb_route(sw)) {
 566		struct tb_port *down, *up;
 567		int ret;
 568
 569		down = tb_switch_downstream_port(sw);
 570		up = tb_upstream_port(sw);
 571		/*
 572		 * In case of uni-directional time sync, TMU handshake is
 573		 * initiated by upstream router. In case of bi-directional
 574		 * time sync, TMU handshake is initiated by downstream router.
 575		 * We change downstream router's rate to off for both uni/bidir
 576		 * cases although it is needed only for the bi-directional mode.
 577		 * We avoid changing upstream router's mode since it might
 578		 * have another downstream router plugged, that is set to
 579		 * uni-directional mode and we don't want to change it's TMU
 580		 * mode.
 581		 */
 582		ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
 583		if (ret)
 584			return ret;
 585
 586		tb_port_tmu_time_sync_disable(up);
 587		ret = tb_port_tmu_time_sync_disable(down);
 588		if (ret)
 589			return ret;
 590
 591		switch (sw->tmu.mode) {
 592		case TB_SWITCH_TMU_MODE_LOWRES:
 593		case TB_SWITCH_TMU_MODE_HIFI_UNI:
 594			/* The switch may be unplugged so ignore any errors */
 595			tb_port_tmu_unidirectional_disable(up);
 596			ret = tb_port_tmu_unidirectional_disable(down);
 597			if (ret)
 598				return ret;
 599			break;
 600
 601		case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
 602			ret = disable_enhanced(up, down);
 603			if (ret)
 604				return ret;
 605			break;
 606
 607		default:
 608			break;
 609		}
 610	} else {
 611		tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
 612	}
 613
 614	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
 615
 616	tb_sw_dbg(sw, "TMU: disabled\n");
 617	return 0;
 618}
 619
 620/* Called only when there is failure enabling requested mode */
 621static void tb_switch_tmu_off(struct tb_switch *sw)
 622{
 623	unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
 624	struct tb_port *down, *up;
 625
 626	down = tb_switch_downstream_port(sw);
 627	up = tb_upstream_port(sw);
 628	/*
 629	 * In case of any failure in one of the steps when setting
 630	 * bi-directional or uni-directional TMU mode, get back to the TMU
 631	 * configurations in off mode. In case of additional failures in
 632	 * the functions below, ignore them since the caller shall already
 633	 * report a failure.
 634	 */
 635	tb_port_tmu_time_sync_disable(down);
 636	tb_port_tmu_time_sync_disable(up);
 637
 638	switch (sw->tmu.mode_request) {
 639	case TB_SWITCH_TMU_MODE_LOWRES:
 640	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 641		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
 642		break;
 643	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
 644		disable_enhanced(up, down);
 645		break;
 646	default:
 647		break;
 648	}
 649
 650	/* Always set the rate to 0 */
 651	tb_switch_tmu_rate_write(sw, rate);
 652
 653	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
 654	tb_port_tmu_unidirectional_disable(down);
 655	tb_port_tmu_unidirectional_disable(up);
 656}
 657
 658/*
 659 * This function is called when the previous TMU mode was
 660 * TB_SWITCH_TMU_MODE_OFF.
 661 */
 662static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
 663{
 664	struct tb_port *up, *down;
 665	int ret;
 666
 667	up = tb_upstream_port(sw);
 668	down = tb_switch_downstream_port(sw);
 669
 670	ret = tb_port_tmu_unidirectional_disable(up);
 671	if (ret)
 672		return ret;
 673
 674	ret = tb_port_tmu_unidirectional_disable(down);
 675	if (ret)
 676		goto out;
 677
 678	ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
 679	if (ret)
 680		goto out;
 681
 682	ret = tb_port_tmu_time_sync_enable(up);
 683	if (ret)
 684		goto out;
 685
 686	ret = tb_port_tmu_time_sync_enable(down);
 687	if (ret)
 688		goto out;
 689
 690	return 0;
 691
 692out:
 693	tb_switch_tmu_off(sw);
 694	return ret;
 695}
 696
 697/* Only needed for Titan Ridge */
 698static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
 699{
 700	struct tb_port *up = tb_upstream_port(sw);
 701	u32 val;
 702	int ret;
 703
 704	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
 705			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
 706	if (ret)
 707		return ret;
 708
 709	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
 710
 711	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
 712			  sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
 713	if (ret)
 714		return ret;
 715
 716	return tb_port_tmu_write(up, TMU_ADP_CS_6,
 717				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
 718				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
 719				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
 720}
 721
 722/*
 723 * This function is called when the previous TMU mode was
 724 * TB_SWITCH_TMU_MODE_OFF.
 725 */
 726static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
 727{
 728	struct tb_port *up, *down;
 729	int ret;
 730
 731	up = tb_upstream_port(sw);
 732	down = tb_switch_downstream_port(sw);
 733	ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
 734				       tmu_rates[sw->tmu.mode_request]);
 735	if (ret)
 736		return ret;
 737
 738	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
 739	if (ret)
 740		return ret;
 741
 742	ret = tb_port_tmu_unidirectional_enable(up);
 743	if (ret)
 744		goto out;
 745
 746	ret = tb_port_tmu_time_sync_enable(up);
 747	if (ret)
 748		goto out;
 749
 750	ret = tb_port_tmu_unidirectional_enable(down);
 751	if (ret)
 752		goto out;
 753
 754	ret = tb_port_tmu_time_sync_enable(down);
 755	if (ret)
 756		goto out;
 757
 758	return 0;
 759
 760out:
 761	tb_switch_tmu_off(sw);
 762	return ret;
 763}
 764
 765/*
 766 * This function is called when the previous TMU mode was
 767 * TB_SWITCH_TMU_RATE_OFF.
 768 */
 769static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
 770{
 771	unsigned int rate = tmu_rates[sw->tmu.mode_request];
 772	struct tb_port *up, *down;
 773	int ret;
 774
 775	/* Router specific parameters first */
 776	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
 777	if (ret)
 778		return ret;
 779
 780	up = tb_upstream_port(sw);
 781	down = tb_switch_downstream_port(sw);
 782
 783	ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
 784	if (ret)
 785		goto out;
 786
 787	ret = tb_port_tmu_rate_write(up, rate);
 788	if (ret)
 789		goto out;
 790
 791	ret = tb_port_tmu_enhanced_enable(up, true);
 792	if (ret)
 793		goto out;
 794
 795	ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
 796	if (ret)
 797		goto out;
 798
 799	ret = tb_port_tmu_rate_write(down, rate);
 800	if (ret)
 801		goto out;
 802
 803	ret = tb_port_tmu_enhanced_enable(down, true);
 804	if (ret)
 805		goto out;
 806
 807	return 0;
 808
 809out:
 810	tb_switch_tmu_off(sw);
 811	return ret;
 812}
 813
 814static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
 815{
 816	unsigned int rate = tmu_rates[sw->tmu.mode];
 817	struct tb_port *down, *up;
 818
 819	down = tb_switch_downstream_port(sw);
 820	up = tb_upstream_port(sw);
 821	/*
 822	 * In case of any failure in one of the steps when change mode,
 823	 * get back to the TMU configurations in previous mode.
 824	 * In case of additional failures in the functions below,
 825	 * ignore them since the caller shall already report a failure.
 826	 */
 827	switch (sw->tmu.mode) {
 828	case TB_SWITCH_TMU_MODE_LOWRES:
 829	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 830		tb_port_tmu_set_unidirectional(down, true);
 831		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
 832		break;
 833
 834	case TB_SWITCH_TMU_MODE_HIFI_BI:
 835		tb_port_tmu_set_unidirectional(down, false);
 836		tb_switch_tmu_rate_write(sw, rate);
 837		break;
 838
 839	default:
 840		break;
 841	}
 842
 843	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
 844
 845	switch (sw->tmu.mode) {
 846	case TB_SWITCH_TMU_MODE_LOWRES:
 847	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 848		tb_port_tmu_set_unidirectional(up, true);
 849		break;
 850
 851	case TB_SWITCH_TMU_MODE_HIFI_BI:
 852		tb_port_tmu_set_unidirectional(up, false);
 853		break;
 854
 855	default:
 856		break;
 857	}
 858}
 859
 860static int tb_switch_tmu_change_mode(struct tb_switch *sw)
 861{
 862	unsigned int rate = tmu_rates[sw->tmu.mode_request];
 863	struct tb_port *up, *down;
 864	int ret;
 865
 866	up = tb_upstream_port(sw);
 867	down = tb_switch_downstream_port(sw);
 868
 869	/* Program the upstream router downstream facing lane adapter */
 870	switch (sw->tmu.mode_request) {
 871	case TB_SWITCH_TMU_MODE_LOWRES:
 872	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 873		ret = tb_port_tmu_set_unidirectional(down, true);
 874		if (ret)
 875			goto out;
 876		ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
 877		if (ret)
 878			goto out;
 879		break;
 880
 881	case TB_SWITCH_TMU_MODE_HIFI_BI:
 882		ret = tb_port_tmu_set_unidirectional(down, false);
 883		if (ret)
 884			goto out;
 885		ret = tb_switch_tmu_rate_write(sw, rate);
 886		if (ret)
 887			goto out;
 888		break;
 889
 890	default:
 891		/* Not allowed to change modes from other than above */
 892		return -EINVAL;
 893	}
 894
 895	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
 896	if (ret)
 897		goto out;
 898
 899	/* Program the new mode and the downstream router lane adapter */
 900	switch (sw->tmu.mode_request) {
 901	case TB_SWITCH_TMU_MODE_LOWRES:
 902	case TB_SWITCH_TMU_MODE_HIFI_UNI:
 903		ret = tb_port_tmu_set_unidirectional(up, true);
 904		if (ret)
 905			goto out;
 906		break;
 907
 908	case TB_SWITCH_TMU_MODE_HIFI_BI:
 909		ret = tb_port_tmu_set_unidirectional(up, false);
 910		if (ret)
 911			goto out;
 912		break;
 913
 914	default:
 915		/* Not allowed to change modes from other than above */
 916		return -EINVAL;
 917	}
 918
 919	ret = tb_port_tmu_time_sync_enable(down);
 920	if (ret)
 921		goto out;
 922
 923	ret = tb_port_tmu_time_sync_enable(up);
 924	if (ret)
 925		goto out;
 926
 927	return 0;
 928
 929out:
 930	tb_switch_tmu_change_mode_prev(sw);
 931	return ret;
 932}
 933
 934/**
 935 * tb_switch_tmu_enable() - Enable TMU on a router
 936 * @sw: Router whose TMU to enable
 937 *
 938 * Enables TMU of a router to be in uni-directional Normal/HiFi or
 939 * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
 940 * required before calling this function.
 941 */
 942int tb_switch_tmu_enable(struct tb_switch *sw)
 943{
 944	int ret;
 945
 946	if (tb_switch_tmu_is_enabled(sw))
 947		return 0;
 948
 949	if (tb_switch_is_titan_ridge(sw) &&
 950	    (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
 951	     sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
 952		ret = tb_switch_tmu_disable_objections(sw);
 953		if (ret)
 954			return ret;
 955	}
 956
 957	ret = tb_switch_tmu_set_time_disruption(sw, true);
 958	if (ret)
 959		return ret;
 960
 961	if (tb_route(sw)) {
 962		/*
 963		 * The used mode changes are from OFF to
 964		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
 965		 * HiFi-Uni.
 966		 */
 967		if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
 968			switch (sw->tmu.mode_request) {
 969			case TB_SWITCH_TMU_MODE_LOWRES:
 970			case TB_SWITCH_TMU_MODE_HIFI_UNI:
 971				ret = tb_switch_tmu_enable_unidirectional(sw);
 972				break;
 973
 974			case TB_SWITCH_TMU_MODE_HIFI_BI:
 975				ret = tb_switch_tmu_enable_bidirectional(sw);
 976				break;
 977			case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
 978				ret = tb_switch_tmu_enable_enhanced(sw);
 979				break;
 980			default:
 981				ret = -EINVAL;
 982				break;
 983			}
 984		} else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
 985			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
 986			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
 987			ret = tb_switch_tmu_change_mode(sw);
 988		} else {
 989			ret = -EINVAL;
 990		}
 991	} else {
 992		/*
 993		 * Host router port configurations are written as
 994		 * part of configurations for downstream port of the parent
 995		 * of the child node - see above.
 996		 * Here only the host router' rate configuration is written.
 997		 */
 998		ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
 999	}
1000
1001	if (ret) {
1002		tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
1003			   tmu_mode_name(sw->tmu.mode_request), ret);
1004	} else {
1005		sw->tmu.mode = sw->tmu.mode_request;
1006		tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
1007	}
1008
1009	return tb_switch_tmu_set_time_disruption(sw, false);
1010}
1011
1012/**
1013 * tb_switch_tmu_configure() - Configure the TMU mode
1014 * @sw: Router whose mode to change
1015 * @mode: Mode to configure
1016 *
1017 * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
1018 * next called.
1019 *
1020 * Returns %0 in success and negative errno otherwise. Specifically
1021 * returns %-EOPNOTSUPP if the requested mode is not possible (not
1022 * supported by the router and/or topology).
1023 */
1024int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
1025{
1026	switch (mode) {
1027	case TB_SWITCH_TMU_MODE_OFF:
1028		break;
1029
1030	case TB_SWITCH_TMU_MODE_LOWRES:
1031	case TB_SWITCH_TMU_MODE_HIFI_UNI:
1032		if (!sw->tmu.has_ucap)
1033			return -EOPNOTSUPP;
1034		break;
1035
1036	case TB_SWITCH_TMU_MODE_HIFI_BI:
1037		break;
1038
1039	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
1040		const struct tb_switch *parent_sw = tb_switch_parent(sw);
1041
1042		if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
1043			return -EOPNOTSUPP;
1044		if (!tb_switch_tmu_enhanced_is_supported(sw))
1045			return -EOPNOTSUPP;
1046
1047		break;
1048	}
1049
1050	default:
1051		tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
1052		return -EINVAL;
1053	}
1054
1055	if (sw->tmu.mode_request != mode) {
1056		tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
1057			  tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
1058		sw->tmu.mode_request = mode;
1059	}
1060
1061	return 0;
1062}