xref: /openbmc/linux/drivers/thunderbolt/tmu.c (revision a28ec0e1)
1cf29b9afSRajmohan Mani // SPDX-License-Identifier: GPL-2.0
2cf29b9afSRajmohan Mani /*
3cf29b9afSRajmohan Mani  * Thunderbolt Time Management Unit (TMU) support
4cf29b9afSRajmohan Mani  *
5cf29b9afSRajmohan Mani  * Copyright (C) 2019, Intel Corporation
6cf29b9afSRajmohan Mani  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7cf29b9afSRajmohan Mani  *	    Rajmohan Mani <rajmohan.mani@intel.com>
8cf29b9afSRajmohan Mani  */
9cf29b9afSRajmohan Mani 
10cf29b9afSRajmohan Mani #include <linux/delay.h>
11cf29b9afSRajmohan Mani 
12cf29b9afSRajmohan Mani #include "tb.h"
13cf29b9afSRajmohan Mani 
14cf29b9afSRajmohan Mani static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
15cf29b9afSRajmohan Mani {
16cf29b9afSRajmohan Mani 	bool root_switch = !tb_route(sw);
17cf29b9afSRajmohan Mani 
18cf29b9afSRajmohan Mani 	switch (sw->tmu.rate) {
19cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_OFF:
20cf29b9afSRajmohan Mani 		return "off";
21cf29b9afSRajmohan Mani 
22cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_HIFI:
23cf29b9afSRajmohan Mani 		/* Root switch does not have upstream directionality */
24cf29b9afSRajmohan Mani 		if (root_switch)
25cf29b9afSRajmohan Mani 			return "HiFi";
26cf29b9afSRajmohan Mani 		if (sw->tmu.unidirectional)
27cf29b9afSRajmohan Mani 			return "uni-directional, HiFi";
28cf29b9afSRajmohan Mani 		return "bi-directional, HiFi";
29cf29b9afSRajmohan Mani 
30cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_NORMAL:
31cf29b9afSRajmohan Mani 		if (root_switch)
32cf29b9afSRajmohan Mani 			return "normal";
33cf29b9afSRajmohan Mani 		return "uni-directional, normal";
34cf29b9afSRajmohan Mani 
35cf29b9afSRajmohan Mani 	default:
36cf29b9afSRajmohan Mani 		return "unknown";
37cf29b9afSRajmohan Mani 	}
38cf29b9afSRajmohan Mani }
39cf29b9afSRajmohan Mani 
40cf29b9afSRajmohan Mani static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
41cf29b9afSRajmohan Mani {
42cf29b9afSRajmohan Mani 	int ret;
43cf29b9afSRajmohan Mani 	u32 val;
44cf29b9afSRajmohan Mani 
45cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
46cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
47cf29b9afSRajmohan Mani 	if (ret)
48cf29b9afSRajmohan Mani 		return false;
49cf29b9afSRajmohan Mani 
50cf29b9afSRajmohan Mani 	return !!(val & TMU_RTR_CS_0_UCAP);
51cf29b9afSRajmohan Mani }
52cf29b9afSRajmohan Mani 
53cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_read(struct tb_switch *sw)
54cf29b9afSRajmohan Mani {
55cf29b9afSRajmohan Mani 	int ret;
56cf29b9afSRajmohan Mani 	u32 val;
57cf29b9afSRajmohan Mani 
58cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
59cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
60cf29b9afSRajmohan Mani 	if (ret)
61cf29b9afSRajmohan Mani 		return ret;
62cf29b9afSRajmohan Mani 
63cf29b9afSRajmohan Mani 	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
64cf29b9afSRajmohan Mani 	return val;
65cf29b9afSRajmohan Mani }
66cf29b9afSRajmohan Mani 
67cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
68cf29b9afSRajmohan Mani {
69cf29b9afSRajmohan Mani 	int ret;
70cf29b9afSRajmohan Mani 	u32 val;
71cf29b9afSRajmohan Mani 
72cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
73cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
74cf29b9afSRajmohan Mani 	if (ret)
75cf29b9afSRajmohan Mani 		return ret;
76cf29b9afSRajmohan Mani 
77cf29b9afSRajmohan Mani 	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
78cf29b9afSRajmohan Mani 	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
79cf29b9afSRajmohan Mani 
80cf29b9afSRajmohan Mani 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
81cf29b9afSRajmohan Mani 			   sw->tmu.cap + TMU_RTR_CS_3, 1);
82cf29b9afSRajmohan Mani }
83cf29b9afSRajmohan Mani 
84cf29b9afSRajmohan Mani static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
85cf29b9afSRajmohan Mani 			     u32 value)
86cf29b9afSRajmohan Mani {
87cf29b9afSRajmohan Mani 	u32 data;
88cf29b9afSRajmohan Mani 	int ret;
89cf29b9afSRajmohan Mani 
90cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
91cf29b9afSRajmohan Mani 	if (ret)
92cf29b9afSRajmohan Mani 		return ret;
93cf29b9afSRajmohan Mani 
94cf29b9afSRajmohan Mani 	data &= ~mask;
95cf29b9afSRajmohan Mani 	data |= value;
96cf29b9afSRajmohan Mani 
97cf29b9afSRajmohan Mani 	return tb_port_write(port, &data, TB_CFG_PORT,
98cf29b9afSRajmohan Mani 			     port->cap_tmu + offset, 1);
99cf29b9afSRajmohan Mani }
100cf29b9afSRajmohan Mani 
101cf29b9afSRajmohan Mani static int tb_port_tmu_set_unidirectional(struct tb_port *port,
102cf29b9afSRajmohan Mani 					  bool unidirectional)
103cf29b9afSRajmohan Mani {
104cf29b9afSRajmohan Mani 	u32 val;
105cf29b9afSRajmohan Mani 
106cf29b9afSRajmohan Mani 	if (!port->sw->tmu.has_ucap)
107cf29b9afSRajmohan Mani 		return 0;
108cf29b9afSRajmohan Mani 
109cf29b9afSRajmohan Mani 	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
110cf29b9afSRajmohan Mani 	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
111cf29b9afSRajmohan Mani }
112cf29b9afSRajmohan Mani 
113cf29b9afSRajmohan Mani static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
114cf29b9afSRajmohan Mani {
115cf29b9afSRajmohan Mani 	return tb_port_tmu_set_unidirectional(port, false);
116cf29b9afSRajmohan Mani }
117cf29b9afSRajmohan Mani 
118*a28ec0e1SGil Fine static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
119*a28ec0e1SGil Fine {
120*a28ec0e1SGil Fine 	return tb_port_tmu_set_unidirectional(port, true);
121*a28ec0e1SGil Fine }
122*a28ec0e1SGil Fine 
123cf29b9afSRajmohan Mani static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
124cf29b9afSRajmohan Mani {
125cf29b9afSRajmohan Mani 	int ret;
126cf29b9afSRajmohan Mani 	u32 val;
127cf29b9afSRajmohan Mani 
128cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &val, TB_CFG_PORT,
129cf29b9afSRajmohan Mani 			   port->cap_tmu + TMU_ADP_CS_3, 1);
130cf29b9afSRajmohan Mani 	if (ret)
131cf29b9afSRajmohan Mani 		return false;
132cf29b9afSRajmohan Mani 
133cf29b9afSRajmohan Mani 	return val & TMU_ADP_CS_3_UDM;
134cf29b9afSRajmohan Mani }
135cf29b9afSRajmohan Mani 
136*a28ec0e1SGil Fine static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
137*a28ec0e1SGil Fine {
138*a28ec0e1SGil Fine 	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
139*a28ec0e1SGil Fine 
140*a28ec0e1SGil Fine 	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
141*a28ec0e1SGil Fine }
142*a28ec0e1SGil Fine 
143*a28ec0e1SGil Fine static int tb_port_tmu_time_sync_disable(struct tb_port *port)
144*a28ec0e1SGil Fine {
145*a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, true);
146*a28ec0e1SGil Fine }
147*a28ec0e1SGil Fine 
148*a28ec0e1SGil Fine static int tb_port_tmu_time_sync_enable(struct tb_port *port)
149*a28ec0e1SGil Fine {
150*a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, false);
151*a28ec0e1SGil Fine }
152*a28ec0e1SGil Fine 
153cf29b9afSRajmohan Mani static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
154cf29b9afSRajmohan Mani {
155cf29b9afSRajmohan Mani 	int ret;
156cf29b9afSRajmohan Mani 	u32 val;
157cf29b9afSRajmohan Mani 
158cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
159cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
160cf29b9afSRajmohan Mani 	if (ret)
161cf29b9afSRajmohan Mani 		return ret;
162cf29b9afSRajmohan Mani 
163cf29b9afSRajmohan Mani 	if (set)
164cf29b9afSRajmohan Mani 		val |= TMU_RTR_CS_0_TD;
165cf29b9afSRajmohan Mani 	else
166cf29b9afSRajmohan Mani 		val &= ~TMU_RTR_CS_0_TD;
167cf29b9afSRajmohan Mani 
168cf29b9afSRajmohan Mani 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
169cf29b9afSRajmohan Mani 			   sw->tmu.cap + TMU_RTR_CS_0, 1);
170cf29b9afSRajmohan Mani }
171cf29b9afSRajmohan Mani 
172cf29b9afSRajmohan Mani /**
173cf29b9afSRajmohan Mani  * tb_switch_tmu_init() - Initialize switch TMU structures
174cf29b9afSRajmohan Mani  * @sw: Switch to initialized
175cf29b9afSRajmohan Mani  *
176cf29b9afSRajmohan Mani  * This function must be called before other TMU related functions to
177cf29b9afSRajmohan Mani  * makes the internal structures are filled in correctly. Does not
178cf29b9afSRajmohan Mani  * change any hardware configuration.
179cf29b9afSRajmohan Mani  */
180cf29b9afSRajmohan Mani int tb_switch_tmu_init(struct tb_switch *sw)
181cf29b9afSRajmohan Mani {
182cf29b9afSRajmohan Mani 	struct tb_port *port;
183cf29b9afSRajmohan Mani 	int ret;
184cf29b9afSRajmohan Mani 
185cf29b9afSRajmohan Mani 	if (tb_switch_is_icm(sw))
186cf29b9afSRajmohan Mani 		return 0;
187cf29b9afSRajmohan Mani 
188cf29b9afSRajmohan Mani 	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
189cf29b9afSRajmohan Mani 	if (ret > 0)
190cf29b9afSRajmohan Mani 		sw->tmu.cap = ret;
191cf29b9afSRajmohan Mani 
192cf29b9afSRajmohan Mani 	tb_switch_for_each_port(sw, port) {
193cf29b9afSRajmohan Mani 		int cap;
194cf29b9afSRajmohan Mani 
195cf29b9afSRajmohan Mani 		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
196cf29b9afSRajmohan Mani 		if (cap > 0)
197cf29b9afSRajmohan Mani 			port->cap_tmu = cap;
198cf29b9afSRajmohan Mani 	}
199cf29b9afSRajmohan Mani 
200cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_rate_read(sw);
201cf29b9afSRajmohan Mani 	if (ret < 0)
202cf29b9afSRajmohan Mani 		return ret;
203cf29b9afSRajmohan Mani 
204cf29b9afSRajmohan Mani 	sw->tmu.rate = ret;
205cf29b9afSRajmohan Mani 
206cf29b9afSRajmohan Mani 	sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
207cf29b9afSRajmohan Mani 	if (sw->tmu.has_ucap) {
208cf29b9afSRajmohan Mani 		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
209cf29b9afSRajmohan Mani 
210cf29b9afSRajmohan Mani 		if (tb_route(sw)) {
211cf29b9afSRajmohan Mani 			struct tb_port *up = tb_upstream_port(sw);
212cf29b9afSRajmohan Mani 
213cf29b9afSRajmohan Mani 			sw->tmu.unidirectional =
214cf29b9afSRajmohan Mani 				tb_port_tmu_is_unidirectional(up);
215cf29b9afSRajmohan Mani 		}
216cf29b9afSRajmohan Mani 	} else {
217cf29b9afSRajmohan Mani 		sw->tmu.unidirectional = false;
218cf29b9afSRajmohan Mani 	}
219cf29b9afSRajmohan Mani 
220cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
221cf29b9afSRajmohan Mani 	return 0;
222cf29b9afSRajmohan Mani }
223cf29b9afSRajmohan Mani 
224cf29b9afSRajmohan Mani /**
225cf29b9afSRajmohan Mani  * tb_switch_tmu_post_time() - Update switch local time
226cf29b9afSRajmohan Mani  * @sw: Switch whose time to update
227cf29b9afSRajmohan Mani  *
228cf29b9afSRajmohan Mani  * Updates switch local time using time posting procedure.
229cf29b9afSRajmohan Mani  */
230cf29b9afSRajmohan Mani int tb_switch_tmu_post_time(struct tb_switch *sw)
231cf29b9afSRajmohan Mani {
232*a28ec0e1SGil Fine 	unsigned int post_time_high_offset, post_time_high = 0;
233cf29b9afSRajmohan Mani 	unsigned int post_local_time_offset, post_time_offset;
234cf29b9afSRajmohan Mani 	struct tb_switch *root_switch = sw->tb->root_switch;
235cf29b9afSRajmohan Mani 	u64 hi, mid, lo, local_time, post_time;
236cf29b9afSRajmohan Mani 	int i, ret, retries = 100;
237cf29b9afSRajmohan Mani 	u32 gm_local_time[3];
238cf29b9afSRajmohan Mani 
239cf29b9afSRajmohan Mani 	if (!tb_route(sw))
240cf29b9afSRajmohan Mani 		return 0;
241cf29b9afSRajmohan Mani 
242cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
243cf29b9afSRajmohan Mani 		return 0;
244cf29b9afSRajmohan Mani 
245cf29b9afSRajmohan Mani 	/* Need to be able to read the grand master time */
246cf29b9afSRajmohan Mani 	if (!root_switch->tmu.cap)
247cf29b9afSRajmohan Mani 		return 0;
248cf29b9afSRajmohan Mani 
249cf29b9afSRajmohan Mani 	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
250cf29b9afSRajmohan Mani 			 root_switch->tmu.cap + TMU_RTR_CS_1,
251cf29b9afSRajmohan Mani 			 ARRAY_SIZE(gm_local_time));
252cf29b9afSRajmohan Mani 	if (ret)
253cf29b9afSRajmohan Mani 		return ret;
254cf29b9afSRajmohan Mani 
255cf29b9afSRajmohan Mani 	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
256cf29b9afSRajmohan Mani 		tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
257cf29b9afSRajmohan Mani 			  gm_local_time[i]);
258cf29b9afSRajmohan Mani 
259cf29b9afSRajmohan Mani 	/* Convert to nanoseconds (drop fractional part) */
260cf29b9afSRajmohan Mani 	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
261cf29b9afSRajmohan Mani 	mid = gm_local_time[1];
262cf29b9afSRajmohan Mani 	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
263cf29b9afSRajmohan Mani 		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
264cf29b9afSRajmohan Mani 	local_time = hi << 48 | mid << 16 | lo;
265cf29b9afSRajmohan Mani 
266cf29b9afSRajmohan Mani 	/* Tell the switch that time sync is disrupted for a while */
267cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
268cf29b9afSRajmohan Mani 	if (ret)
269cf29b9afSRajmohan Mani 		return ret;
270cf29b9afSRajmohan Mani 
271cf29b9afSRajmohan Mani 	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
272cf29b9afSRajmohan Mani 	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
273*a28ec0e1SGil Fine 	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
274cf29b9afSRajmohan Mani 
275cf29b9afSRajmohan Mani 	/*
276cf29b9afSRajmohan Mani 	 * Write the Grandmaster time to the Post Local Time registers
277cf29b9afSRajmohan Mani 	 * of the new switch.
278cf29b9afSRajmohan Mani 	 */
279cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
280cf29b9afSRajmohan Mani 			  post_local_time_offset, 2);
281cf29b9afSRajmohan Mani 	if (ret)
282cf29b9afSRajmohan Mani 		goto out;
283cf29b9afSRajmohan Mani 
284cf29b9afSRajmohan Mani 	/*
285*a28ec0e1SGil Fine 	 * Have the new switch update its local time by:
286*a28ec0e1SGil Fine 	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
287*a28ec0e1SGil Fine 	 * Post Time High register.
288*a28ec0e1SGil Fine 	 * 2) write 0 to Post Time High register and then wait for
289*a28ec0e1SGil Fine 	 * the completion of the post_time register becomes 0.
290*a28ec0e1SGil Fine 	 * This means the time has been converged properly.
291cf29b9afSRajmohan Mani 	 */
292*a28ec0e1SGil Fine 	post_time = 0xffffffff00000001ULL;
293cf29b9afSRajmohan Mani 
294cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
295cf29b9afSRajmohan Mani 	if (ret)
296cf29b9afSRajmohan Mani 		goto out;
297cf29b9afSRajmohan Mani 
298*a28ec0e1SGil Fine 	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
299*a28ec0e1SGil Fine 			  post_time_high_offset, 1);
300*a28ec0e1SGil Fine 	if (ret)
301*a28ec0e1SGil Fine 		goto out;
302*a28ec0e1SGil Fine 
303cf29b9afSRajmohan Mani 	do {
304cf29b9afSRajmohan Mani 		usleep_range(5, 10);
305cf29b9afSRajmohan Mani 		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
306cf29b9afSRajmohan Mani 				 post_time_offset, 2);
307cf29b9afSRajmohan Mani 		if (ret)
308cf29b9afSRajmohan Mani 			goto out;
309cf29b9afSRajmohan Mani 	} while (--retries && post_time);
310cf29b9afSRajmohan Mani 
311cf29b9afSRajmohan Mani 	if (!retries) {
312cf29b9afSRajmohan Mani 		ret = -ETIMEDOUT;
313cf29b9afSRajmohan Mani 		goto out;
314cf29b9afSRajmohan Mani 	}
315cf29b9afSRajmohan Mani 
316cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
317cf29b9afSRajmohan Mani 
318cf29b9afSRajmohan Mani out:
319cf29b9afSRajmohan Mani 	tb_switch_tmu_set_time_disruption(sw, false);
320cf29b9afSRajmohan Mani 	return ret;
321cf29b9afSRajmohan Mani }
322cf29b9afSRajmohan Mani 
323cf29b9afSRajmohan Mani /**
324cf29b9afSRajmohan Mani  * tb_switch_tmu_disable() - Disable TMU of a switch
325cf29b9afSRajmohan Mani  * @sw: Switch whose TMU to disable
326cf29b9afSRajmohan Mani  *
327cf29b9afSRajmohan Mani  * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
328cf29b9afSRajmohan Mani  */
329cf29b9afSRajmohan Mani int tb_switch_tmu_disable(struct tb_switch *sw)
330cf29b9afSRajmohan Mani {
331cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
332cf29b9afSRajmohan Mani 		return 0;
333cf29b9afSRajmohan Mani 
334cf29b9afSRajmohan Mani 	/* Already disabled? */
335cf29b9afSRajmohan Mani 	if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
336cf29b9afSRajmohan Mani 		return 0;
337cf29b9afSRajmohan Mani 
338*a28ec0e1SGil Fine 
339*a28ec0e1SGil Fine 	if (tb_route(sw)) {
340*a28ec0e1SGil Fine 		bool unidirectional = tb_switch_tmu_hifi_is_enabled(sw, true);
341cf29b9afSRajmohan Mani 		struct tb_switch *parent = tb_switch_parent(sw);
342*a28ec0e1SGil Fine 		struct tb_port *down, *up;
343*a28ec0e1SGil Fine 		int ret;
344cf29b9afSRajmohan Mani 
345cf29b9afSRajmohan Mani 		down = tb_port_at(tb_route(sw), parent);
346*a28ec0e1SGil Fine 		up = tb_upstream_port(sw);
347*a28ec0e1SGil Fine 		/*
348*a28ec0e1SGil Fine 		 * In case of uni-directional time sync, TMU handshake is
349*a28ec0e1SGil Fine 		 * initiated by upstream router. In case of bi-directional
350*a28ec0e1SGil Fine 		 * time sync, TMU handshake is initiated by downstream router.
351*a28ec0e1SGil Fine 		 * Therefore, we change the rate to off in the respective
352*a28ec0e1SGil Fine 		 * router.
353*a28ec0e1SGil Fine 		 */
354*a28ec0e1SGil Fine 		if (unidirectional)
355*a28ec0e1SGil Fine 			tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
356*a28ec0e1SGil Fine 		else
357*a28ec0e1SGil Fine 			tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
358cf29b9afSRajmohan Mani 
359*a28ec0e1SGil Fine 		tb_port_tmu_time_sync_disable(up);
360*a28ec0e1SGil Fine 		ret = tb_port_tmu_time_sync_disable(down);
361*a28ec0e1SGil Fine 		if (ret)
362*a28ec0e1SGil Fine 			return ret;
363*a28ec0e1SGil Fine 
364*a28ec0e1SGil Fine 		if (unidirectional) {
365cf29b9afSRajmohan Mani 			/* The switch may be unplugged so ignore any errors */
366cf29b9afSRajmohan Mani 			tb_port_tmu_unidirectional_disable(up);
367cf29b9afSRajmohan Mani 			ret = tb_port_tmu_unidirectional_disable(down);
368cf29b9afSRajmohan Mani 			if (ret)
369cf29b9afSRajmohan Mani 				return ret;
370cf29b9afSRajmohan Mani 		}
371*a28ec0e1SGil Fine 	} else {
372cf29b9afSRajmohan Mani 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
373*a28ec0e1SGil Fine 	}
374cf29b9afSRajmohan Mani 
375cf29b9afSRajmohan Mani 	sw->tmu.unidirectional = false;
376cf29b9afSRajmohan Mani 	sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
377cf29b9afSRajmohan Mani 
378cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: disabled\n");
379cf29b9afSRajmohan Mani 	return 0;
380cf29b9afSRajmohan Mani }
381cf29b9afSRajmohan Mani 
382*a28ec0e1SGil Fine static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
383cf29b9afSRajmohan Mani {
384*a28ec0e1SGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
385*a28ec0e1SGil Fine 	struct tb_port *down, *up;
386*a28ec0e1SGil Fine 
387*a28ec0e1SGil Fine 	down = tb_port_at(tb_route(sw), parent);
388*a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
389*a28ec0e1SGil Fine 	/*
390*a28ec0e1SGil Fine 	 * In case of any failure in one of the steps when setting
391*a28ec0e1SGil Fine 	 * bi-directional or uni-directional TMU mode, get back to the TMU
392*a28ec0e1SGil Fine 	 * configurations in off mode. In case of additional failures in
393*a28ec0e1SGil Fine 	 * the functions below, ignore them since the caller shall already
394*a28ec0e1SGil Fine 	 * report a failure.
395*a28ec0e1SGil Fine 	 */
396*a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(down);
397*a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(up);
398*a28ec0e1SGil Fine 	if (unidirectional)
399*a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
400*a28ec0e1SGil Fine 	else
401*a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
402*a28ec0e1SGil Fine 
403*a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(down);
404*a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(up);
405*a28ec0e1SGil Fine }
406*a28ec0e1SGil Fine 
407*a28ec0e1SGil Fine /*
408*a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
409*a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
410*a28ec0e1SGil Fine  */
411*a28ec0e1SGil Fine static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
412*a28ec0e1SGil Fine {
413*a28ec0e1SGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
414*a28ec0e1SGil Fine 	struct tb_port *up, *down;
415cf29b9afSRajmohan Mani 	int ret;
416cf29b9afSRajmohan Mani 
417*a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
418*a28ec0e1SGil Fine 	down = tb_port_at(tb_route(sw), parent);
419*a28ec0e1SGil Fine 
420*a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(up);
421*a28ec0e1SGil Fine 	if (ret)
422*a28ec0e1SGil Fine 		return ret;
423*a28ec0e1SGil Fine 
424*a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(down);
425*a28ec0e1SGil Fine 	if (ret)
426*a28ec0e1SGil Fine 		goto out;
427*a28ec0e1SGil Fine 
428*a28ec0e1SGil Fine 	ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
429*a28ec0e1SGil Fine 	if (ret)
430*a28ec0e1SGil Fine 		goto out;
431*a28ec0e1SGil Fine 
432*a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
433*a28ec0e1SGil Fine 	if (ret)
434*a28ec0e1SGil Fine 		goto out;
435*a28ec0e1SGil Fine 
436*a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
437*a28ec0e1SGil Fine 	if (ret)
438*a28ec0e1SGil Fine 		goto out;
439*a28ec0e1SGil Fine 
440*a28ec0e1SGil Fine 	return 0;
441*a28ec0e1SGil Fine 
442*a28ec0e1SGil Fine out:
443*a28ec0e1SGil Fine 	__tb_switch_tmu_off(sw, false);
444*a28ec0e1SGil Fine 	return ret;
445*a28ec0e1SGil Fine }
446*a28ec0e1SGil Fine 
447*a28ec0e1SGil Fine /*
448*a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
449*a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
450*a28ec0e1SGil Fine  */
451*a28ec0e1SGil Fine static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
452*a28ec0e1SGil Fine {
453*a28ec0e1SGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
454*a28ec0e1SGil Fine 	struct tb_port *up, *down;
455*a28ec0e1SGil Fine 	int ret;
456*a28ec0e1SGil Fine 
457*a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
458*a28ec0e1SGil Fine 	down = tb_port_at(tb_route(sw), parent);
459*a28ec0e1SGil Fine 	ret = tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_HIFI);
460*a28ec0e1SGil Fine 	if (ret)
461*a28ec0e1SGil Fine 		return ret;
462*a28ec0e1SGil Fine 
463*a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(up);
464*a28ec0e1SGil Fine 	if (ret)
465*a28ec0e1SGil Fine 		goto out;
466*a28ec0e1SGil Fine 
467*a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
468*a28ec0e1SGil Fine 	if (ret)
469*a28ec0e1SGil Fine 		goto out;
470*a28ec0e1SGil Fine 
471*a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(down);
472*a28ec0e1SGil Fine 	if (ret)
473*a28ec0e1SGil Fine 		goto out;
474*a28ec0e1SGil Fine 
475*a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
476*a28ec0e1SGil Fine 	if (ret)
477*a28ec0e1SGil Fine 		goto out;
478*a28ec0e1SGil Fine 
479*a28ec0e1SGil Fine 	return 0;
480*a28ec0e1SGil Fine 
481*a28ec0e1SGil Fine out:
482*a28ec0e1SGil Fine 	__tb_switch_tmu_off(sw, true);
483*a28ec0e1SGil Fine 	return ret;
484*a28ec0e1SGil Fine }
485*a28ec0e1SGil Fine 
486*a28ec0e1SGil Fine static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
487*a28ec0e1SGil Fine {
488*a28ec0e1SGil Fine 	bool unidirectional = sw->tmu.unidirectional_request;
489*a28ec0e1SGil Fine 	int ret;
490*a28ec0e1SGil Fine 
491*a28ec0e1SGil Fine 	if (unidirectional && !sw->tmu.has_ucap)
492*a28ec0e1SGil Fine 		return -EOPNOTSUPP;
493*a28ec0e1SGil Fine 
494cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
495cf29b9afSRajmohan Mani 		return 0;
496cf29b9afSRajmohan Mani 
497*a28ec0e1SGil Fine 	if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
498cf29b9afSRajmohan Mani 		return 0;
499cf29b9afSRajmohan Mani 
500cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
501cf29b9afSRajmohan Mani 	if (ret)
502cf29b9afSRajmohan Mani 		return ret;
503cf29b9afSRajmohan Mani 
504*a28ec0e1SGil Fine 	if (tb_route(sw)) {
505*a28ec0e1SGil Fine 		/* The used mode changes are from OFF to HiFi-Uni/HiFi-BiDir */
506*a28ec0e1SGil Fine 		if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
507*a28ec0e1SGil Fine 			if (unidirectional)
508*a28ec0e1SGil Fine 				ret = __tb_switch_tmu_enable_unidirectional(sw);
509*a28ec0e1SGil Fine 			else
510*a28ec0e1SGil Fine 				ret = __tb_switch_tmu_enable_bidirectional(sw);
511cf29b9afSRajmohan Mani 			if (ret)
512cf29b9afSRajmohan Mani 				return ret;
513*a28ec0e1SGil Fine 		}
514*a28ec0e1SGil Fine 		sw->tmu.unidirectional = unidirectional;
515cf29b9afSRajmohan Mani 	} else {
516*a28ec0e1SGil Fine 		/*
517*a28ec0e1SGil Fine 		 * Host router port configurations are written as
518*a28ec0e1SGil Fine 		 * part of configurations for downstream port of the parent
519*a28ec0e1SGil Fine 		 * of the child node - see above.
520*a28ec0e1SGil Fine 		 * Here only the host router' rate configuration is written.
521*a28ec0e1SGil Fine 		 */
522cf29b9afSRajmohan Mani 		ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
523cf29b9afSRajmohan Mani 		if (ret)
524cf29b9afSRajmohan Mani 			return ret;
525cf29b9afSRajmohan Mani 	}
526cf29b9afSRajmohan Mani 
527cf29b9afSRajmohan Mani 	sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
528cf29b9afSRajmohan Mani 
529*a28ec0e1SGil Fine 	tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
530cf29b9afSRajmohan Mani 	return tb_switch_tmu_set_time_disruption(sw, false);
531cf29b9afSRajmohan Mani }
532*a28ec0e1SGil Fine 
533*a28ec0e1SGil Fine /**
534*a28ec0e1SGil Fine  * tb_switch_tmu_enable() - Enable TMU on a router
535*a28ec0e1SGil Fine  * @sw: Router whose TMU to enable
536*a28ec0e1SGil Fine  *
537*a28ec0e1SGil Fine  * Enables TMU of a router to be in uni-directional or bi-directional HiFi mode.
538*a28ec0e1SGil Fine  * Calling tb_switch_tmu_configure() is required before calling this function,
539*a28ec0e1SGil Fine  * to select the mode HiFi and directionality (uni-directional/bi-directional).
540*a28ec0e1SGil Fine  * In both modes all tunneling should work. Uni-directional mode is required for
541*a28ec0e1SGil Fine  * CLx (Link Low-Power) to work.
542*a28ec0e1SGil Fine  */
543*a28ec0e1SGil Fine int tb_switch_tmu_enable(struct tb_switch *sw)
544*a28ec0e1SGil Fine {
545*a28ec0e1SGil Fine 	if (sw->tmu.rate_request == TB_SWITCH_TMU_RATE_NORMAL)
546*a28ec0e1SGil Fine 		return -EOPNOTSUPP;
547*a28ec0e1SGil Fine 
548*a28ec0e1SGil Fine 	return tb_switch_tmu_hifi_enable(sw);
549*a28ec0e1SGil Fine }
550*a28ec0e1SGil Fine 
551*a28ec0e1SGil Fine /**
552*a28ec0e1SGil Fine  * tb_switch_tmu_configure() - Configure the TMU rate and directionality
553*a28ec0e1SGil Fine  * @sw: Router whose mode to change
554*a28ec0e1SGil Fine  * @rate: Rate to configure Off/LowRes/HiFi
555*a28ec0e1SGil Fine  * @unidirectional: If uni-directional (bi-directional otherwise)
556*a28ec0e1SGil Fine  *
557*a28ec0e1SGil Fine  * Selects the rate of the TMU and directionality (uni-directional or
558*a28ec0e1SGil Fine  * bi-directional). Must be called before tb_switch_tmu_enable().
559*a28ec0e1SGil Fine  */
560*a28ec0e1SGil Fine void tb_switch_tmu_configure(struct tb_switch *sw,
561*a28ec0e1SGil Fine 			     enum tb_switch_tmu_rate rate, bool unidirectional)
562*a28ec0e1SGil Fine {
563*a28ec0e1SGil Fine 	sw->tmu.unidirectional_request = unidirectional;
564*a28ec0e1SGil Fine 	sw->tmu.rate_request = rate;
565*a28ec0e1SGil Fine }
566