Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Thunderbolt link controller support
  4 *
  5 * Copyright (C) 2019, Intel Corporation
  6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
  7 */
  8
  9#include "tb.h"
 10
 11/**
 12 * tb_lc_read_uuid() - Read switch UUID from link controller common register
 13 * @sw: Switch whose UUID is read
 14 * @uuid: UUID is placed here
 15 */
 16int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
 17{
 18	if (!sw->cap_lc)
 19		return -EINVAL;
 20	return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
 21}
 22
 23static int read_lc_desc(struct tb_switch *sw, u32 *desc)
 24{
 25	if (!sw->cap_lc)
 26		return -EINVAL;
 27	return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
 28}
 29
 30static int find_port_lc_cap(struct tb_port *port)
 31{
 32	struct tb_switch *sw = port->sw;
 33	int start, phys, ret, size;
 34	u32 desc;
 35
 36	ret = read_lc_desc(sw, &desc);
 37	if (ret)
 38		return ret;
 39
 40	/* Start of port LC registers */
 41	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
 42	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
 43	phys = tb_phy_port_from_link(port->port);
 44
 45	return sw->cap_lc + start + phys * size;
 46}
 47
 48static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
 49{
 50	bool upstream = tb_is_upstream_port(port);
 51	struct tb_switch *sw = port->sw;
 52	u32 ctrl, lane;
 53	int cap, ret;
 54
 55	if (sw->generation < 2)
 56		return 0;
 57
 58	cap = find_port_lc_cap(port);
 59	if (cap < 0)
 60		return cap;
 61
 62	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
 63	if (ret)
 64		return ret;
 65
 66	/* Resolve correct lane */
 67	if (port->port % 2)
 68		lane = TB_LC_SX_CTRL_L1C;
 69	else
 70		lane = TB_LC_SX_CTRL_L2C;
 71
 72	if (configured) {
 73		ctrl |= lane;
 74		if (upstream)
 75			ctrl |= TB_LC_SX_CTRL_UPSTREAM;
 76	} else {
 77		ctrl &= ~lane;
 78		if (upstream)
 79			ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
 80	}
 81
 82	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
 83}
 84
 85/**
 86 * tb_lc_configure_port() - Let LC know about configured port
 87 * @port: Port that is set as configured
 88 *
 89 * Sets the port configured for power management purposes.
 90 */
 91int tb_lc_configure_port(struct tb_port *port)
 92{
 93	return tb_lc_set_port_configured(port, true);
 94}
 95
 96/**
 97 * tb_lc_unconfigure_port() - Let LC know about unconfigured port
 98 * @port: Port that is set as configured
 99 *
100 * Sets the port unconfigured for power management purposes.
101 */
102void tb_lc_unconfigure_port(struct tb_port *port)
103{
104	tb_lc_set_port_configured(port, false);
105}
106
107static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
108{
109	struct tb_switch *sw = port->sw;
110	u32 ctrl, lane;
111	int cap, ret;
112
113	if (sw->generation < 2)
114		return 0;
115
116	cap = find_port_lc_cap(port);
117	if (cap < 0)
118		return cap;
119
120	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
121	if (ret)
122		return ret;
123
124	/* Resolve correct lane */
125	if (port->port % 2)
126		lane = TB_LC_SX_CTRL_L1D;
127	else
128		lane = TB_LC_SX_CTRL_L2D;
129
130	if (configure)
131		ctrl |= lane;
132	else
133		ctrl &= ~lane;
134
135	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
136}
137
138/**
139 * tb_lc_configure_xdomain() - Inform LC that the link is XDomain
140 * @port: Switch downstream port connected to another host
141 *
142 * Sets the lane configured for XDomain accordingly so that the LC knows
143 * about this. Returns %0 in success and negative errno in failure.
144 */
145int tb_lc_configure_xdomain(struct tb_port *port)
146{
147	return tb_lc_set_xdomain_configured(port, true);
148}
149
150/**
151 * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
152 * @port: Switch downstream port that was connected to another host
153 *
154 * Unsets the lane XDomain configuration.
155 */
156void tb_lc_unconfigure_xdomain(struct tb_port *port)
157{
158	tb_lc_set_xdomain_configured(port, false);
159}
160
161/**
162 * tb_lc_start_lane_initialization() - Start lane initialization
163 * @port: Device router lane 0 adapter
164 *
165 * Starts lane initialization for @port after the router resumed from
166 * sleep. Should be called for those downstream lane adapters that were
167 * not connected (tb_lc_configure_port() was not called) before sleep.
168 *
169 * Returns %0 in success and negative errno in case of failure.
170 */
171int tb_lc_start_lane_initialization(struct tb_port *port)
172{
173	struct tb_switch *sw = port->sw;
174	int ret, cap;
175	u32 ctrl;
176
177	if (!tb_route(sw))
178		return 0;
179
180	if (sw->generation < 2)
181		return 0;
182
183	cap = find_port_lc_cap(port);
184	if (cap < 0)
185		return cap;
186
187	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
188	if (ret)
189		return ret;
190
191	ctrl |= TB_LC_SX_CTRL_SLI;
192
193	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
194}
195
196static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
197			      unsigned int flags)
198{
199	u32 ctrl;
200	int ret;
201
202	/*
203	 * Enable wake on PCIe and USB4 (wake coming from another
204	 * router).
205	 */
206	ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
207			 offset + TB_LC_SX_CTRL, 1);
208	if (ret)
209		return ret;
210
211	ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC |
212		  TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4);
213
214	if (flags & TB_WAKE_ON_CONNECT)
215		ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
216	if (flags & TB_WAKE_ON_USB4)
217		ctrl |= TB_LC_SX_CTRL_WOU4;
218	if (flags & TB_WAKE_ON_PCIE)
219		ctrl |= TB_LC_SX_CTRL_WOP;
220	if (flags & TB_WAKE_ON_DP)
221		ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD;
222
223	return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
224}
225
226/**
227 * tb_lc_set_wake() - Enable/disable wake
228 * @sw: Switch whose wakes to configure
229 * @flags: Wakeup flags (%0 to disable)
230 *
231 * For each LC sets wake bits accordingly.
232 */
233int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
234{
235	int start, size, nlc, ret, i;
236	u32 desc;
237
238	if (sw->generation < 2)
239		return 0;
240
241	if (!tb_route(sw))
242		return 0;
243
244	ret = read_lc_desc(sw, &desc);
245	if (ret)
246		return ret;
247
248	/* Figure out number of link controllers */
249	nlc = desc & TB_LC_DESC_NLC_MASK;
250	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
251	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
252
253	/* For each link controller set sleep bit */
254	for (i = 0; i < nlc; i++) {
255		unsigned int offset = sw->cap_lc + start + i * size;
256
257		ret = tb_lc_set_wake_one(sw, offset, flags);
258		if (ret)
259			return ret;
260	}
261
262	return 0;
263}
264
265/**
266 * tb_lc_set_sleep() - Inform LC that the switch is going to sleep
267 * @sw: Switch to set sleep
268 *
269 * Let the switch link controllers know that the switch is going to
270 * sleep.
271 */
272int tb_lc_set_sleep(struct tb_switch *sw)
273{
274	int start, size, nlc, ret, i;
275	u32 desc;
276
277	if (sw->generation < 2)
278		return 0;
279
280	ret = read_lc_desc(sw, &desc);
281	if (ret)
282		return ret;
283
284	/* Figure out number of link controllers */
285	nlc = desc & TB_LC_DESC_NLC_MASK;
286	start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
287	size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
288
289	/* For each link controller set sleep bit */
290	for (i = 0; i < nlc; i++) {
291		unsigned int offset = sw->cap_lc + start + i * size;
292		u32 ctrl;
293
294		ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
295				 offset + TB_LC_SX_CTRL, 1);
296		if (ret)
297			return ret;
298
299		ctrl |= TB_LC_SX_CTRL_SLP;
300		ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
301				  offset + TB_LC_SX_CTRL, 1);
302		if (ret)
303			return ret;
304	}
305
306	return 0;
307}
308
309/**
310 * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
311 * @sw: Switch to check
312 *
313 * Checks whether conditions for lane bonding from parent to @sw are
314 * possible.
315 */
316bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
317{
318	struct tb_port *up;
319	int cap, ret;
320	u32 val;
321
322	if (sw->generation < 2)
323		return false;
324
325	up = tb_upstream_port(sw);
326	cap = find_port_lc_cap(up);
327	if (cap < 0)
328		return false;
329
330	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
331	if (ret)
332		return false;
333
334	return !!(val & TB_LC_PORT_ATTR_BE);
335}
336
337static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
338				   struct tb_port *in)
339{
340	struct tb_port *port;
341
342	/* The first DP IN port is sink 0 and second is sink 1 */
343	tb_switch_for_each_port(sw, port) {
344		if (tb_port_is_dpin(port))
345			return in != port;
346	}
347
348	return -EINVAL;
349}
350
351static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
352{
353	u32 val, alloc;
354	int ret;
355
356	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
357			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
358	if (ret)
359		return ret;
360
361	/*
362	 * Sink is available for CM/SW to use if the allocation valie is
363	 * either 0 or 1.
364	 */
365	if (!sink) {
366		alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
367		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
368			return 0;
369	} else {
370		alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
371			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
372		if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
373			return 0;
374	}
375
376	return -EBUSY;
377}
378
379/**
380 * tb_lc_dp_sink_query() - Is DP sink available for DP IN port
381 * @sw: Switch whose DP sink is queried
382 * @in: DP IN port to check
383 *
384 * Queries through LC SNK_ALLOCATION registers whether DP sink is available
385 * for the given DP IN port or not.
386 */
387bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
388{
389	int sink;
390
391	/*
392	 * For older generations sink is always available as there is no
393	 * allocation mechanism.
394	 */
395	if (sw->generation < 3)
396		return true;
397
398	sink = tb_lc_dp_sink_from_port(sw, in);
399	if (sink < 0)
400		return false;
401
402	return !tb_lc_dp_sink_available(sw, sink);
403}
404
405/**
406 * tb_lc_dp_sink_alloc() - Allocate DP sink
407 * @sw: Switch whose DP sink is allocated
408 * @in: DP IN port the DP sink is allocated for
409 *
410 * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
411 * resource is available and allocation is successful returns %0. In all
412 * other cases returs negative errno. In particular %-EBUSY is returned if
413 * the resource was not available.
414 */
415int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
416{
417	int ret, sink;
418	u32 val;
419
420	if (sw->generation < 3)
421		return 0;
422
423	sink = tb_lc_dp_sink_from_port(sw, in);
424	if (sink < 0)
425		return sink;
426
427	ret = tb_lc_dp_sink_available(sw, sink);
428	if (ret)
429		return ret;
430
431	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
432			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
433	if (ret)
434		return ret;
435
436	if (!sink) {
437		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
438		val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
439	} else {
440		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
441		val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
442			TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
443	}
444
445	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
446			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
447
448	if (ret)
449		return ret;
450
451	tb_port_dbg(in, "sink %d allocated\n", sink);
452	return 0;
453}
454
455/**
456 * tb_lc_dp_sink_dealloc() - De-allocate DP sink
457 * @sw: Switch whose DP sink is de-allocated
458 * @in: DP IN port whose DP sink is de-allocated
459 *
460 * De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
461 */
462int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
463{
464	int ret, sink;
465	u32 val;
466
467	if (sw->generation < 3)
468		return 0;
469
470	sink = tb_lc_dp_sink_from_port(sw, in);
471	if (sink < 0)
472		return sink;
473
474	/* Needs to be owned by CM/SW */
475	ret = tb_lc_dp_sink_available(sw, sink);
476	if (ret)
477		return ret;
478
479	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
480			 sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
481	if (ret)
482		return ret;
483
484	if (!sink)
485		val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
486	else
487		val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
488
489	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
490			  sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
491	if (ret)
492		return ret;
493
494	tb_port_dbg(in, "sink %d de-allocated\n", sink);
495	return 0;
496}
497
498/**
499 * tb_lc_force_power() - Forces LC to be powered on
500 * @sw: Thunderbolt switch
501 *
502 * This is useful to let authentication cycle pass even without
503 * a Thunderbolt link present.
504 */
505int tb_lc_force_power(struct tb_switch *sw)
506{
507	u32 in = 0xffff;
508
509	return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
510}