xref: /openbmc/linux/drivers/thunderbolt/tmu.c (revision 3084b48f)
1cf29b9afSRajmohan Mani // SPDX-License-Identifier: GPL-2.0
2cf29b9afSRajmohan Mani /*
3cf29b9afSRajmohan Mani  * Thunderbolt Time Management Unit (TMU) support
4cf29b9afSRajmohan Mani  *
5cf29b9afSRajmohan Mani  * Copyright (C) 2019, Intel Corporation
6cf29b9afSRajmohan Mani  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7cf29b9afSRajmohan Mani  *	    Rajmohan Mani <rajmohan.mani@intel.com>
8cf29b9afSRajmohan Mani  */
9cf29b9afSRajmohan Mani 
10cf29b9afSRajmohan Mani #include <linux/delay.h>
11cf29b9afSRajmohan Mani 
12cf29b9afSRajmohan Mani #include "tb.h"
13cf29b9afSRajmohan Mani 
14b017a46dSGil Fine static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
15b017a46dSGil Fine 					 enum tb_switch_tmu_rate rate)
16b017a46dSGil Fine {
17b017a46dSGil Fine 	u32 freq_meas_wind[2] = { 30, 800 };
18b017a46dSGil Fine 	u32 avg_const[2] = { 4, 8 };
19b017a46dSGil Fine 	u32 freq, avg, val;
20b017a46dSGil Fine 	int ret;
21b017a46dSGil Fine 
22b017a46dSGil Fine 	if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
23b017a46dSGil Fine 		freq = freq_meas_wind[0];
24b017a46dSGil Fine 		avg = avg_const[0];
25b017a46dSGil Fine 	} else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
26b017a46dSGil Fine 		freq = freq_meas_wind[1];
27b017a46dSGil Fine 		avg = avg_const[1];
28b017a46dSGil Fine 	} else {
29b017a46dSGil Fine 		return 0;
30b017a46dSGil Fine 	}
31b017a46dSGil Fine 
32b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
33b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
34b017a46dSGil Fine 	if (ret)
35b017a46dSGil Fine 		return ret;
36b017a46dSGil Fine 
37b017a46dSGil Fine 	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
38b017a46dSGil Fine 	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
39b017a46dSGil Fine 
40b017a46dSGil Fine 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
41b017a46dSGil Fine 			  sw->tmu.cap + TMU_RTR_CS_0, 1);
42b017a46dSGil Fine 	if (ret)
43b017a46dSGil Fine 		return ret;
44b017a46dSGil Fine 
45b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
46b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_15, 1);
47b017a46dSGil Fine 	if (ret)
48b017a46dSGil Fine 		return ret;
49b017a46dSGil Fine 
50b017a46dSGil Fine 	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
51b017a46dSGil Fine 		~TMU_RTR_CS_15_DELAY_AVG_MASK &
52b017a46dSGil Fine 		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
53b017a46dSGil Fine 		~TMU_RTR_CS_15_ERROR_AVG_MASK;
54b017a46dSGil Fine 	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
55b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
56b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
57b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
58b017a46dSGil Fine 
59b017a46dSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
60b017a46dSGil Fine 			   sw->tmu.cap + TMU_RTR_CS_15, 1);
61b017a46dSGil Fine }
62b017a46dSGil Fine 
63cf29b9afSRajmohan Mani static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
64cf29b9afSRajmohan Mani {
65cf29b9afSRajmohan Mani 	bool root_switch = !tb_route(sw);
66cf29b9afSRajmohan Mani 
67cf29b9afSRajmohan Mani 	switch (sw->tmu.rate) {
68cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_OFF:
69cf29b9afSRajmohan Mani 		return "off";
70cf29b9afSRajmohan Mani 
71cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_HIFI:
72cf29b9afSRajmohan Mani 		/* Root switch does not have upstream directionality */
73cf29b9afSRajmohan Mani 		if (root_switch)
74cf29b9afSRajmohan Mani 			return "HiFi";
75cf29b9afSRajmohan Mani 		if (sw->tmu.unidirectional)
76cf29b9afSRajmohan Mani 			return "uni-directional, HiFi";
77cf29b9afSRajmohan Mani 		return "bi-directional, HiFi";
78cf29b9afSRajmohan Mani 
79cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_NORMAL:
80cf29b9afSRajmohan Mani 		if (root_switch)
81cf29b9afSRajmohan Mani 			return "normal";
82cf29b9afSRajmohan Mani 		return "uni-directional, normal";
83cf29b9afSRajmohan Mani 
84cf29b9afSRajmohan Mani 	default:
85cf29b9afSRajmohan Mani 		return "unknown";
86cf29b9afSRajmohan Mani 	}
87cf29b9afSRajmohan Mani }
88cf29b9afSRajmohan Mani 
89cf29b9afSRajmohan Mani static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
90cf29b9afSRajmohan Mani {
91cf29b9afSRajmohan Mani 	int ret;
92cf29b9afSRajmohan Mani 	u32 val;
93cf29b9afSRajmohan Mani 
94cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
95cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
96cf29b9afSRajmohan Mani 	if (ret)
97cf29b9afSRajmohan Mani 		return false;
98cf29b9afSRajmohan Mani 
99cf29b9afSRajmohan Mani 	return !!(val & TMU_RTR_CS_0_UCAP);
100cf29b9afSRajmohan Mani }
101cf29b9afSRajmohan Mani 
102cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_read(struct tb_switch *sw)
103cf29b9afSRajmohan Mani {
104cf29b9afSRajmohan Mani 	int ret;
105cf29b9afSRajmohan Mani 	u32 val;
106cf29b9afSRajmohan Mani 
107cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
109cf29b9afSRajmohan Mani 	if (ret)
110cf29b9afSRajmohan Mani 		return ret;
111cf29b9afSRajmohan Mani 
112cf29b9afSRajmohan Mani 	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
113cf29b9afSRajmohan Mani 	return val;
114cf29b9afSRajmohan Mani }
115cf29b9afSRajmohan Mani 
116cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
117cf29b9afSRajmohan Mani {
118cf29b9afSRajmohan Mani 	int ret;
119cf29b9afSRajmohan Mani 	u32 val;
120cf29b9afSRajmohan Mani 
121cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
122cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
123cf29b9afSRajmohan Mani 	if (ret)
124cf29b9afSRajmohan Mani 		return ret;
125cf29b9afSRajmohan Mani 
126cf29b9afSRajmohan Mani 	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
127cf29b9afSRajmohan Mani 	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
128cf29b9afSRajmohan Mani 
129cf29b9afSRajmohan Mani 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
130cf29b9afSRajmohan Mani 			   sw->tmu.cap + TMU_RTR_CS_3, 1);
131cf29b9afSRajmohan Mani }
132cf29b9afSRajmohan Mani 
133cf29b9afSRajmohan Mani static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
134cf29b9afSRajmohan Mani 			     u32 value)
135cf29b9afSRajmohan Mani {
136cf29b9afSRajmohan Mani 	u32 data;
137cf29b9afSRajmohan Mani 	int ret;
138cf29b9afSRajmohan Mani 
139cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
140cf29b9afSRajmohan Mani 	if (ret)
141cf29b9afSRajmohan Mani 		return ret;
142cf29b9afSRajmohan Mani 
143cf29b9afSRajmohan Mani 	data &= ~mask;
144cf29b9afSRajmohan Mani 	data |= value;
145cf29b9afSRajmohan Mani 
146cf29b9afSRajmohan Mani 	return tb_port_write(port, &data, TB_CFG_PORT,
147cf29b9afSRajmohan Mani 			     port->cap_tmu + offset, 1);
148cf29b9afSRajmohan Mani }
149cf29b9afSRajmohan Mani 
150cf29b9afSRajmohan Mani static int tb_port_tmu_set_unidirectional(struct tb_port *port,
151cf29b9afSRajmohan Mani 					  bool unidirectional)
152cf29b9afSRajmohan Mani {
153cf29b9afSRajmohan Mani 	u32 val;
154cf29b9afSRajmohan Mani 
155cf29b9afSRajmohan Mani 	if (!port->sw->tmu.has_ucap)
156cf29b9afSRajmohan Mani 		return 0;
157cf29b9afSRajmohan Mani 
158cf29b9afSRajmohan Mani 	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
159cf29b9afSRajmohan Mani 	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
160cf29b9afSRajmohan Mani }
161cf29b9afSRajmohan Mani 
162cf29b9afSRajmohan Mani static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
163cf29b9afSRajmohan Mani {
164cf29b9afSRajmohan Mani 	return tb_port_tmu_set_unidirectional(port, false);
165cf29b9afSRajmohan Mani }
166cf29b9afSRajmohan Mani 
167a28ec0e1SGil Fine static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
168a28ec0e1SGil Fine {
169a28ec0e1SGil Fine 	return tb_port_tmu_set_unidirectional(port, true);
170a28ec0e1SGil Fine }
171a28ec0e1SGil Fine 
172cf29b9afSRajmohan Mani static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
173cf29b9afSRajmohan Mani {
174cf29b9afSRajmohan Mani 	int ret;
175cf29b9afSRajmohan Mani 	u32 val;
176cf29b9afSRajmohan Mani 
177cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &val, TB_CFG_PORT,
178cf29b9afSRajmohan Mani 			   port->cap_tmu + TMU_ADP_CS_3, 1);
179cf29b9afSRajmohan Mani 	if (ret)
180cf29b9afSRajmohan Mani 		return false;
181cf29b9afSRajmohan Mani 
182cf29b9afSRajmohan Mani 	return val & TMU_ADP_CS_3_UDM;
183cf29b9afSRajmohan Mani }
184cf29b9afSRajmohan Mani 
185a28ec0e1SGil Fine static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
186a28ec0e1SGil Fine {
187a28ec0e1SGil Fine 	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
188a28ec0e1SGil Fine 
189a28ec0e1SGil Fine 	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
190a28ec0e1SGil Fine }
191a28ec0e1SGil Fine 
192a28ec0e1SGil Fine static int tb_port_tmu_time_sync_disable(struct tb_port *port)
193a28ec0e1SGil Fine {
194a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, true);
195a28ec0e1SGil Fine }
196a28ec0e1SGil Fine 
197a28ec0e1SGil Fine static int tb_port_tmu_time_sync_enable(struct tb_port *port)
198a28ec0e1SGil Fine {
199a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, false);
200a28ec0e1SGil Fine }
201a28ec0e1SGil Fine 
202cf29b9afSRajmohan Mani static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
203cf29b9afSRajmohan Mani {
20423ccd21cSGil Fine 	u32 val, offset, bit;
205cf29b9afSRajmohan Mani 	int ret;
206cf29b9afSRajmohan Mani 
20723ccd21cSGil Fine 	if (tb_switch_is_usb4(sw)) {
20823ccd21cSGil Fine 		offset = sw->tmu.cap + TMU_RTR_CS_0;
20923ccd21cSGil Fine 		bit = TMU_RTR_CS_0_TD;
21023ccd21cSGil Fine 	} else {
21123ccd21cSGil Fine 		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
21223ccd21cSGil Fine 		bit = TB_TIME_VSEC_3_CS_26_TD;
21323ccd21cSGil Fine 	}
21423ccd21cSGil Fine 
21523ccd21cSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
216cf29b9afSRajmohan Mani 	if (ret)
217cf29b9afSRajmohan Mani 		return ret;
218cf29b9afSRajmohan Mani 
219cf29b9afSRajmohan Mani 	if (set)
22023ccd21cSGil Fine 		val |= bit;
221cf29b9afSRajmohan Mani 	else
22223ccd21cSGil Fine 		val &= ~bit;
223cf29b9afSRajmohan Mani 
22423ccd21cSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
225cf29b9afSRajmohan Mani }
226cf29b9afSRajmohan Mani 
227cf29b9afSRajmohan Mani /**
228cf29b9afSRajmohan Mani  * tb_switch_tmu_init() - Initialize switch TMU structures
229cf29b9afSRajmohan Mani  * @sw: Switch to initialized
230cf29b9afSRajmohan Mani  *
231cf29b9afSRajmohan Mani  * This function must be called before other TMU related functions to
232cf29b9afSRajmohan Mani  * makes the internal structures are filled in correctly. Does not
233cf29b9afSRajmohan Mani  * change any hardware configuration.
234cf29b9afSRajmohan Mani  */
235cf29b9afSRajmohan Mani int tb_switch_tmu_init(struct tb_switch *sw)
236cf29b9afSRajmohan Mani {
237cf29b9afSRajmohan Mani 	struct tb_port *port;
238cf29b9afSRajmohan Mani 	int ret;
239cf29b9afSRajmohan Mani 
240cf29b9afSRajmohan Mani 	if (tb_switch_is_icm(sw))
241cf29b9afSRajmohan Mani 		return 0;
242cf29b9afSRajmohan Mani 
243cf29b9afSRajmohan Mani 	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
244cf29b9afSRajmohan Mani 	if (ret > 0)
245cf29b9afSRajmohan Mani 		sw->tmu.cap = ret;
246cf29b9afSRajmohan Mani 
247cf29b9afSRajmohan Mani 	tb_switch_for_each_port(sw, port) {
248cf29b9afSRajmohan Mani 		int cap;
249cf29b9afSRajmohan Mani 
250cf29b9afSRajmohan Mani 		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
251cf29b9afSRajmohan Mani 		if (cap > 0)
252cf29b9afSRajmohan Mani 			port->cap_tmu = cap;
253cf29b9afSRajmohan Mani 	}
254cf29b9afSRajmohan Mani 
255cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_rate_read(sw);
256cf29b9afSRajmohan Mani 	if (ret < 0)
257cf29b9afSRajmohan Mani 		return ret;
258cf29b9afSRajmohan Mani 
259cf29b9afSRajmohan Mani 	sw->tmu.rate = ret;
260cf29b9afSRajmohan Mani 
261cf29b9afSRajmohan Mani 	sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
262cf29b9afSRajmohan Mani 	if (sw->tmu.has_ucap) {
263cf29b9afSRajmohan Mani 		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
264cf29b9afSRajmohan Mani 
265cf29b9afSRajmohan Mani 		if (tb_route(sw)) {
266cf29b9afSRajmohan Mani 			struct tb_port *up = tb_upstream_port(sw);
267cf29b9afSRajmohan Mani 
268cf29b9afSRajmohan Mani 			sw->tmu.unidirectional =
269cf29b9afSRajmohan Mani 				tb_port_tmu_is_unidirectional(up);
270cf29b9afSRajmohan Mani 		}
271cf29b9afSRajmohan Mani 	} else {
272cf29b9afSRajmohan Mani 		sw->tmu.unidirectional = false;
273cf29b9afSRajmohan Mani 	}
274cf29b9afSRajmohan Mani 
275cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
276cf29b9afSRajmohan Mani 	return 0;
277cf29b9afSRajmohan Mani }
278cf29b9afSRajmohan Mani 
279cf29b9afSRajmohan Mani /**
280cf29b9afSRajmohan Mani  * tb_switch_tmu_post_time() - Update switch local time
281cf29b9afSRajmohan Mani  * @sw: Switch whose time to update
282cf29b9afSRajmohan Mani  *
283cf29b9afSRajmohan Mani  * Updates switch local time using time posting procedure.
284cf29b9afSRajmohan Mani  */
285cf29b9afSRajmohan Mani int tb_switch_tmu_post_time(struct tb_switch *sw)
286cf29b9afSRajmohan Mani {
287a28ec0e1SGil Fine 	unsigned int post_time_high_offset, post_time_high = 0;
288cf29b9afSRajmohan Mani 	unsigned int post_local_time_offset, post_time_offset;
289cf29b9afSRajmohan Mani 	struct tb_switch *root_switch = sw->tb->root_switch;
290cf29b9afSRajmohan Mani 	u64 hi, mid, lo, local_time, post_time;
291cf29b9afSRajmohan Mani 	int i, ret, retries = 100;
292cf29b9afSRajmohan Mani 	u32 gm_local_time[3];
293cf29b9afSRajmohan Mani 
294cf29b9afSRajmohan Mani 	if (!tb_route(sw))
295cf29b9afSRajmohan Mani 		return 0;
296cf29b9afSRajmohan Mani 
297cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
298cf29b9afSRajmohan Mani 		return 0;
299cf29b9afSRajmohan Mani 
300cf29b9afSRajmohan Mani 	/* Need to be able to read the grand master time */
301cf29b9afSRajmohan Mani 	if (!root_switch->tmu.cap)
302cf29b9afSRajmohan Mani 		return 0;
303cf29b9afSRajmohan Mani 
304cf29b9afSRajmohan Mani 	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
305cf29b9afSRajmohan Mani 			 root_switch->tmu.cap + TMU_RTR_CS_1,
306cf29b9afSRajmohan Mani 			 ARRAY_SIZE(gm_local_time));
307cf29b9afSRajmohan Mani 	if (ret)
308cf29b9afSRajmohan Mani 		return ret;
309cf29b9afSRajmohan Mani 
310cf29b9afSRajmohan Mani 	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
311cf29b9afSRajmohan Mani 		tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
312cf29b9afSRajmohan Mani 			  gm_local_time[i]);
313cf29b9afSRajmohan Mani 
314cf29b9afSRajmohan Mani 	/* Convert to nanoseconds (drop fractional part) */
315cf29b9afSRajmohan Mani 	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
316cf29b9afSRajmohan Mani 	mid = gm_local_time[1];
317cf29b9afSRajmohan Mani 	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
318cf29b9afSRajmohan Mani 		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
319cf29b9afSRajmohan Mani 	local_time = hi << 48 | mid << 16 | lo;
320cf29b9afSRajmohan Mani 
321cf29b9afSRajmohan Mani 	/* Tell the switch that time sync is disrupted for a while */
322cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
323cf29b9afSRajmohan Mani 	if (ret)
324cf29b9afSRajmohan Mani 		return ret;
325cf29b9afSRajmohan Mani 
326cf29b9afSRajmohan Mani 	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
327cf29b9afSRajmohan Mani 	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
328a28ec0e1SGil Fine 	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
329cf29b9afSRajmohan Mani 
330cf29b9afSRajmohan Mani 	/*
331cf29b9afSRajmohan Mani 	 * Write the Grandmaster time to the Post Local Time registers
332cf29b9afSRajmohan Mani 	 * of the new switch.
333cf29b9afSRajmohan Mani 	 */
334cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
335cf29b9afSRajmohan Mani 			  post_local_time_offset, 2);
336cf29b9afSRajmohan Mani 	if (ret)
337cf29b9afSRajmohan Mani 		goto out;
338cf29b9afSRajmohan Mani 
339cf29b9afSRajmohan Mani 	/*
340a28ec0e1SGil Fine 	 * Have the new switch update its local time by:
341a28ec0e1SGil Fine 	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
342a28ec0e1SGil Fine 	 * Post Time High register.
343a28ec0e1SGil Fine 	 * 2) write 0 to Post Time High register and then wait for
344a28ec0e1SGil Fine 	 * the completion of the post_time register becomes 0.
345a28ec0e1SGil Fine 	 * This means the time has been converged properly.
346cf29b9afSRajmohan Mani 	 */
347a28ec0e1SGil Fine 	post_time = 0xffffffff00000001ULL;
348cf29b9afSRajmohan Mani 
349cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
350cf29b9afSRajmohan Mani 	if (ret)
351cf29b9afSRajmohan Mani 		goto out;
352cf29b9afSRajmohan Mani 
353a28ec0e1SGil Fine 	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
354a28ec0e1SGil Fine 			  post_time_high_offset, 1);
355a28ec0e1SGil Fine 	if (ret)
356a28ec0e1SGil Fine 		goto out;
357a28ec0e1SGil Fine 
358cf29b9afSRajmohan Mani 	do {
359cf29b9afSRajmohan Mani 		usleep_range(5, 10);
360cf29b9afSRajmohan Mani 		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
361cf29b9afSRajmohan Mani 				 post_time_offset, 2);
362cf29b9afSRajmohan Mani 		if (ret)
363cf29b9afSRajmohan Mani 			goto out;
364cf29b9afSRajmohan Mani 	} while (--retries && post_time);
365cf29b9afSRajmohan Mani 
366cf29b9afSRajmohan Mani 	if (!retries) {
367cf29b9afSRajmohan Mani 		ret = -ETIMEDOUT;
368cf29b9afSRajmohan Mani 		goto out;
369cf29b9afSRajmohan Mani 	}
370cf29b9afSRajmohan Mani 
371cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
372cf29b9afSRajmohan Mani 
373cf29b9afSRajmohan Mani out:
374cf29b9afSRajmohan Mani 	tb_switch_tmu_set_time_disruption(sw, false);
375cf29b9afSRajmohan Mani 	return ret;
376cf29b9afSRajmohan Mani }
377cf29b9afSRajmohan Mani 
378cf29b9afSRajmohan Mani /**
379cf29b9afSRajmohan Mani  * tb_switch_tmu_disable() - Disable TMU of a switch
380cf29b9afSRajmohan Mani  * @sw: Switch whose TMU to disable
381cf29b9afSRajmohan Mani  *
382cf29b9afSRajmohan Mani  * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
383cf29b9afSRajmohan Mani  */
384cf29b9afSRajmohan Mani int tb_switch_tmu_disable(struct tb_switch *sw)
385cf29b9afSRajmohan Mani {
38643f977bcSGil Fine 	/*
38743f977bcSGil Fine 	 * No need to disable TMU on devices that don't support CLx since
38843f977bcSGil Fine 	 * on these devices e.g. Alpine Ridge and earlier, the TMU mode
38943f977bcSGil Fine 	 * HiFi bi-directional is enabled by default and we don't change it.
39043f977bcSGil Fine 	 */
39143f977bcSGil Fine 	if (!tb_switch_is_clx_supported(sw))
392cf29b9afSRajmohan Mani 		return 0;
393cf29b9afSRajmohan Mani 
394cf29b9afSRajmohan Mani 	/* Already disabled? */
395cf29b9afSRajmohan Mani 	if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
396cf29b9afSRajmohan Mani 		return 0;
397cf29b9afSRajmohan Mani 
398a28ec0e1SGil Fine 
399a28ec0e1SGil Fine 	if (tb_route(sw)) {
400b017a46dSGil Fine 		bool unidirectional = sw->tmu.unidirectional;
401cf29b9afSRajmohan Mani 		struct tb_switch *parent = tb_switch_parent(sw);
402a28ec0e1SGil Fine 		struct tb_port *down, *up;
403a28ec0e1SGil Fine 		int ret;
404cf29b9afSRajmohan Mani 
405cf29b9afSRajmohan Mani 		down = tb_port_at(tb_route(sw), parent);
406a28ec0e1SGil Fine 		up = tb_upstream_port(sw);
407a28ec0e1SGil Fine 		/*
408a28ec0e1SGil Fine 		 * In case of uni-directional time sync, TMU handshake is
409a28ec0e1SGil Fine 		 * initiated by upstream router. In case of bi-directional
410a28ec0e1SGil Fine 		 * time sync, TMU handshake is initiated by downstream router.
4115fd6b9a5SGil Fine 		 * We change downstream router's rate to off for both uni/bidir
4125fd6b9a5SGil Fine 		 * cases although it is needed only for the bi-directional mode.
4135fd6b9a5SGil Fine 		 * We avoid changing upstream router's mode since it might
4145fd6b9a5SGil Fine 		 * have another downstream router plugged, that is set to
4155fd6b9a5SGil Fine 		 * uni-directional mode and we don't want to change it's TMU
4165fd6b9a5SGil Fine 		 * mode.
417a28ec0e1SGil Fine 		 */
418a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
419cf29b9afSRajmohan Mani 
420a28ec0e1SGil Fine 		tb_port_tmu_time_sync_disable(up);
421a28ec0e1SGil Fine 		ret = tb_port_tmu_time_sync_disable(down);
422a28ec0e1SGil Fine 		if (ret)
423a28ec0e1SGil Fine 			return ret;
424a28ec0e1SGil Fine 
425a28ec0e1SGil Fine 		if (unidirectional) {
426cf29b9afSRajmohan Mani 			/* The switch may be unplugged so ignore any errors */
427cf29b9afSRajmohan Mani 			tb_port_tmu_unidirectional_disable(up);
428cf29b9afSRajmohan Mani 			ret = tb_port_tmu_unidirectional_disable(down);
429cf29b9afSRajmohan Mani 			if (ret)
430cf29b9afSRajmohan Mani 				return ret;
431cf29b9afSRajmohan Mani 		}
432a28ec0e1SGil Fine 	} else {
433cf29b9afSRajmohan Mani 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
434a28ec0e1SGil Fine 	}
435cf29b9afSRajmohan Mani 
436cf29b9afSRajmohan Mani 	sw->tmu.unidirectional = false;
437cf29b9afSRajmohan Mani 	sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
438cf29b9afSRajmohan Mani 
439cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: disabled\n");
440cf29b9afSRajmohan Mani 	return 0;
441cf29b9afSRajmohan Mani }
442cf29b9afSRajmohan Mani 
443a28ec0e1SGil Fine static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
444cf29b9afSRajmohan Mani {
445a28ec0e1SGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
446a28ec0e1SGil Fine 	struct tb_port *down, *up;
447a28ec0e1SGil Fine 
448a28ec0e1SGil Fine 	down = tb_port_at(tb_route(sw), parent);
449a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
450a28ec0e1SGil Fine 	/*
451a28ec0e1SGil Fine 	 * In case of any failure in one of the steps when setting
452a28ec0e1SGil Fine 	 * bi-directional or uni-directional TMU mode, get back to the TMU
453a28ec0e1SGil Fine 	 * configurations in off mode. In case of additional failures in
454a28ec0e1SGil Fine 	 * the functions below, ignore them since the caller shall already
455a28ec0e1SGil Fine 	 * report a failure.
456a28ec0e1SGil Fine 	 */
457a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(down);
458a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(up);
459a28ec0e1SGil Fine 	if (unidirectional)
460a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
461a28ec0e1SGil Fine 	else
462a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
463a28ec0e1SGil Fine 
464b017a46dSGil Fine 	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
465a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(down);
466a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(up);
467a28ec0e1SGil Fine }
468a28ec0e1SGil Fine 
469a28ec0e1SGil Fine /*
470a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
471a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
472a28ec0e1SGil Fine  */
473a28ec0e1SGil Fine static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
474a28ec0e1SGil Fine {
475a28ec0e1SGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
476a28ec0e1SGil Fine 	struct tb_port *up, *down;
477cf29b9afSRajmohan Mani 	int ret;
478cf29b9afSRajmohan Mani 
479a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
480a28ec0e1SGil Fine 	down = tb_port_at(tb_route(sw), parent);
481a28ec0e1SGil Fine 
482a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(up);
483a28ec0e1SGil Fine 	if (ret)
484a28ec0e1SGil Fine 		return ret;
485a28ec0e1SGil Fine 
486a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(down);
487a28ec0e1SGil Fine 	if (ret)
488a28ec0e1SGil Fine 		goto out;
489a28ec0e1SGil Fine 
490a28ec0e1SGil Fine 	ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
491a28ec0e1SGil Fine 	if (ret)
492a28ec0e1SGil Fine 		goto out;
493a28ec0e1SGil Fine 
494a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
495a28ec0e1SGil Fine 	if (ret)
496a28ec0e1SGil Fine 		goto out;
497a28ec0e1SGil Fine 
498a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
499a28ec0e1SGil Fine 	if (ret)
500a28ec0e1SGil Fine 		goto out;
501a28ec0e1SGil Fine 
502a28ec0e1SGil Fine 	return 0;
503a28ec0e1SGil Fine 
504a28ec0e1SGil Fine out:
505a28ec0e1SGil Fine 	__tb_switch_tmu_off(sw, false);
506a28ec0e1SGil Fine 	return ret;
507a28ec0e1SGil Fine }
508a28ec0e1SGil Fine 
50943f977bcSGil Fine static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
51043f977bcSGil Fine {
51143f977bcSGil Fine 	u32 val;
51243f977bcSGil Fine 	int ret;
51343f977bcSGil Fine 
51443f977bcSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
51543f977bcSGil Fine 			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
51643f977bcSGil Fine 	if (ret)
51743f977bcSGil Fine 		return ret;
51843f977bcSGil Fine 
51943f977bcSGil Fine 	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
52043f977bcSGil Fine 
52143f977bcSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
52243f977bcSGil Fine 			   sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
52343f977bcSGil Fine }
52443f977bcSGil Fine 
52543f977bcSGil Fine static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
52643f977bcSGil Fine {
52743f977bcSGil Fine 	struct tb_port *up = tb_upstream_port(sw);
52843f977bcSGil Fine 
52943f977bcSGil Fine 	return tb_port_tmu_write(up, TMU_ADP_CS_6,
53043f977bcSGil Fine 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
53143f977bcSGil Fine 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
53243f977bcSGil Fine }
53343f977bcSGil Fine 
534a28ec0e1SGil Fine /*
535a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
536a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
537a28ec0e1SGil Fine  */
538a28ec0e1SGil Fine static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
539a28ec0e1SGil Fine {
540a28ec0e1SGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
541a28ec0e1SGil Fine 	struct tb_port *up, *down;
542a28ec0e1SGil Fine 	int ret;
543a28ec0e1SGil Fine 
544a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
545a28ec0e1SGil Fine 	down = tb_port_at(tb_route(sw), parent);
546b017a46dSGil Fine 	ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
547b017a46dSGil Fine 	if (ret)
548b017a46dSGil Fine 		return ret;
549b017a46dSGil Fine 
550b017a46dSGil Fine 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
551a28ec0e1SGil Fine 	if (ret)
552a28ec0e1SGil Fine 		return ret;
553a28ec0e1SGil Fine 
554a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(up);
555a28ec0e1SGil Fine 	if (ret)
556a28ec0e1SGil Fine 		goto out;
557a28ec0e1SGil Fine 
558a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
559a28ec0e1SGil Fine 	if (ret)
560a28ec0e1SGil Fine 		goto out;
561a28ec0e1SGil Fine 
562a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(down);
563a28ec0e1SGil Fine 	if (ret)
564a28ec0e1SGil Fine 		goto out;
565a28ec0e1SGil Fine 
566a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
567a28ec0e1SGil Fine 	if (ret)
568a28ec0e1SGil Fine 		goto out;
569a28ec0e1SGil Fine 
570a28ec0e1SGil Fine 	return 0;
571a28ec0e1SGil Fine 
572a28ec0e1SGil Fine out:
573a28ec0e1SGil Fine 	__tb_switch_tmu_off(sw, true);
574a28ec0e1SGil Fine 	return ret;
575a28ec0e1SGil Fine }
576a28ec0e1SGil Fine 
577b017a46dSGil Fine static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
578b017a46dSGil Fine {
579b017a46dSGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
580b017a46dSGil Fine 	struct tb_port *down, *up;
581b017a46dSGil Fine 
582b017a46dSGil Fine 	down = tb_port_at(tb_route(sw), parent);
583b017a46dSGil Fine 	up = tb_upstream_port(sw);
584b017a46dSGil Fine 	/*
585b017a46dSGil Fine 	 * In case of any failure in one of the steps when change mode,
586b017a46dSGil Fine 	 * get back to the TMU configurations in previous mode.
587b017a46dSGil Fine 	 * In case of additional failures in the functions below,
588b017a46dSGil Fine 	 * ignore them since the caller shall already report a failure.
589b017a46dSGil Fine 	 */
590b017a46dSGil Fine 	tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
591b017a46dSGil Fine 	if (sw->tmu.unidirectional_request)
592b017a46dSGil Fine 		tb_switch_tmu_rate_write(parent, sw->tmu.rate);
593b017a46dSGil Fine 	else
594b017a46dSGil Fine 		tb_switch_tmu_rate_write(sw, sw->tmu.rate);
595b017a46dSGil Fine 
596b017a46dSGil Fine 	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
597b017a46dSGil Fine 	tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
598b017a46dSGil Fine }
599b017a46dSGil Fine 
600b017a46dSGil Fine static int __tb_switch_tmu_change_mode(struct tb_switch *sw)
601b017a46dSGil Fine {
602b017a46dSGil Fine 	struct tb_switch *parent = tb_switch_parent(sw);
603b017a46dSGil Fine 	struct tb_port *up, *down;
604b017a46dSGil Fine 	int ret;
605b017a46dSGil Fine 
606b017a46dSGil Fine 	up = tb_upstream_port(sw);
607b017a46dSGil Fine 	down = tb_port_at(tb_route(sw), parent);
608b017a46dSGil Fine 	ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
609b017a46dSGil Fine 	if (ret)
610b017a46dSGil Fine 		goto out;
611b017a46dSGil Fine 
612b017a46dSGil Fine 	if (sw->tmu.unidirectional_request)
613b017a46dSGil Fine 		ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request);
614b017a46dSGil Fine 	else
615b017a46dSGil Fine 		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
616b017a46dSGil Fine 	if (ret)
617b017a46dSGil Fine 		return ret;
618b017a46dSGil Fine 
619b017a46dSGil Fine 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
620b017a46dSGil Fine 	if (ret)
621b017a46dSGil Fine 		return ret;
622b017a46dSGil Fine 
623b017a46dSGil Fine 	ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
624b017a46dSGil Fine 	if (ret)
625b017a46dSGil Fine 		goto out;
626b017a46dSGil Fine 
627b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
628b017a46dSGil Fine 	if (ret)
629b017a46dSGil Fine 		goto out;
630b017a46dSGil Fine 
631b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
632b017a46dSGil Fine 	if (ret)
633b017a46dSGil Fine 		goto out;
634b017a46dSGil Fine 
635b017a46dSGil Fine 	return 0;
636b017a46dSGil Fine 
637b017a46dSGil Fine out:
638b017a46dSGil Fine 	__tb_switch_tmu_change_mode_prev(sw);
639b017a46dSGil Fine 	return ret;
640b017a46dSGil Fine }
641b017a46dSGil Fine 
642b017a46dSGil Fine /**
643b017a46dSGil Fine  * tb_switch_tmu_enable() - Enable TMU on a router
644b017a46dSGil Fine  * @sw: Router whose TMU to enable
645b017a46dSGil Fine  *
646b017a46dSGil Fine  * Enables TMU of a router to be in uni-directional Normal/HiFi
647b017a46dSGil Fine  * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
648b017a46dSGil Fine  * before calling this function, to select the mode Normal/HiFi and
649b017a46dSGil Fine  * directionality (uni-directional/bi-directional).
650b017a46dSGil Fine  * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
651b017a46dSGil Fine  * work. Uni-directional mode is required for CLx (Link Low-Power) to work.
652b017a46dSGil Fine  */
653b017a46dSGil Fine int tb_switch_tmu_enable(struct tb_switch *sw)
654a28ec0e1SGil Fine {
655a28ec0e1SGil Fine 	bool unidirectional = sw->tmu.unidirectional_request;
656a28ec0e1SGil Fine 	int ret;
657a28ec0e1SGil Fine 
658a28ec0e1SGil Fine 	if (unidirectional && !sw->tmu.has_ucap)
659a28ec0e1SGil Fine 		return -EOPNOTSUPP;
660a28ec0e1SGil Fine 
66143f977bcSGil Fine 	/*
66243f977bcSGil Fine 	 * No need to enable TMU on devices that don't support CLx since on
66343f977bcSGil Fine 	 * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
66443f977bcSGil Fine 	 * bi-directional is enabled by default.
66543f977bcSGil Fine 	 */
66643f977bcSGil Fine 	if (!tb_switch_is_clx_supported(sw))
667cf29b9afSRajmohan Mani 		return 0;
668cf29b9afSRajmohan Mani 
669b017a46dSGil Fine 	if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
670cf29b9afSRajmohan Mani 		return 0;
671cf29b9afSRajmohan Mani 
67243f977bcSGil Fine 	if (tb_switch_is_titan_ridge(sw) && unidirectional) {
673b017a46dSGil Fine 		/*
674b017a46dSGil Fine 		 * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
675b017a46dSGil Fine 		 * enabled and supported together.
676b017a46dSGil Fine 		 */
677b017a46dSGil Fine 		if (!tb_switch_is_clx_enabled(sw, TB_CL1))
67843f977bcSGil Fine 			return -EOPNOTSUPP;
67943f977bcSGil Fine 
68043f977bcSGil Fine 		ret = tb_switch_tmu_objection_mask(sw);
68143f977bcSGil Fine 		if (ret)
68243f977bcSGil Fine 			return ret;
68343f977bcSGil Fine 
68443f977bcSGil Fine 		ret = tb_switch_tmu_unidirectional_enable(sw);
68543f977bcSGil Fine 		if (ret)
68643f977bcSGil Fine 			return ret;
68743f977bcSGil Fine 	}
68843f977bcSGil Fine 
689cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
690cf29b9afSRajmohan Mani 	if (ret)
691cf29b9afSRajmohan Mani 		return ret;
692cf29b9afSRajmohan Mani 
693a28ec0e1SGil Fine 	if (tb_route(sw)) {
694b017a46dSGil Fine 		/*
695b017a46dSGil Fine 		 * The used mode changes are from OFF to
696b017a46dSGil Fine 		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
697b017a46dSGil Fine 		 * HiFi-Uni.
698b017a46dSGil Fine 		 */
699a28ec0e1SGil Fine 		if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
700a28ec0e1SGil Fine 			if (unidirectional)
701a28ec0e1SGil Fine 				ret = __tb_switch_tmu_enable_unidirectional(sw);
702a28ec0e1SGil Fine 			else
703a28ec0e1SGil Fine 				ret = __tb_switch_tmu_enable_bidirectional(sw);
704cf29b9afSRajmohan Mani 			if (ret)
705cf29b9afSRajmohan Mani 				return ret;
706b017a46dSGil Fine 		} else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
707b017a46dSGil Fine 			ret = __tb_switch_tmu_change_mode(sw);
708b017a46dSGil Fine 			if (ret)
709b017a46dSGil Fine 				return ret;
710a28ec0e1SGil Fine 		}
711a28ec0e1SGil Fine 		sw->tmu.unidirectional = unidirectional;
712cf29b9afSRajmohan Mani 	} else {
713a28ec0e1SGil Fine 		/*
714a28ec0e1SGil Fine 		 * Host router port configurations are written as
715a28ec0e1SGil Fine 		 * part of configurations for downstream port of the parent
716a28ec0e1SGil Fine 		 * of the child node - see above.
717a28ec0e1SGil Fine 		 * Here only the host router' rate configuration is written.
718a28ec0e1SGil Fine 		 */
719b017a46dSGil Fine 		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
720cf29b9afSRajmohan Mani 		if (ret)
721cf29b9afSRajmohan Mani 			return ret;
722cf29b9afSRajmohan Mani 	}
723cf29b9afSRajmohan Mani 
724b017a46dSGil Fine 	sw->tmu.rate = sw->tmu.rate_request;
725cf29b9afSRajmohan Mani 
726a28ec0e1SGil Fine 	tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
727cf29b9afSRajmohan Mani 	return tb_switch_tmu_set_time_disruption(sw, false);
728cf29b9afSRajmohan Mani }
729a28ec0e1SGil Fine 
730a28ec0e1SGil Fine /**
731a28ec0e1SGil Fine  * tb_switch_tmu_configure() - Configure the TMU rate and directionality
732a28ec0e1SGil Fine  * @sw: Router whose mode to change
733b4e08d5dSGil Fine  * @rate: Rate to configure Off/Normal/HiFi
734a28ec0e1SGil Fine  * @unidirectional: If uni-directional (bi-directional otherwise)
735a28ec0e1SGil Fine  *
736a28ec0e1SGil Fine  * Selects the rate of the TMU and directionality (uni-directional or
737a28ec0e1SGil Fine  * bi-directional). Must be called before tb_switch_tmu_enable().
738a28ec0e1SGil Fine  */
739a28ec0e1SGil Fine void tb_switch_tmu_configure(struct tb_switch *sw,
740a28ec0e1SGil Fine 			     enum tb_switch_tmu_rate rate, bool unidirectional)
741a28ec0e1SGil Fine {
742a28ec0e1SGil Fine 	sw->tmu.unidirectional_request = unidirectional;
743a28ec0e1SGil Fine 	sw->tmu.rate_request = rate;
744a28ec0e1SGil Fine }
745*3084b48fSGil Fine 
746*3084b48fSGil Fine static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
747*3084b48fSGil Fine {
748*3084b48fSGil Fine 	if (tb_is_switch(dev)) {
749*3084b48fSGil Fine 		struct tb_switch *sw = tb_to_switch(dev);
750*3084b48fSGil Fine 
751*3084b48fSGil Fine 		tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
752*3084b48fSGil Fine 					tb_switch_is_clx_enabled(sw, TB_CL1));
753*3084b48fSGil Fine 		if (tb_switch_tmu_enable(sw))
754*3084b48fSGil Fine 			tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
755*3084b48fSGil Fine 	}
756*3084b48fSGil Fine 
757*3084b48fSGil Fine 	return 0;
758*3084b48fSGil Fine }
759*3084b48fSGil Fine 
760*3084b48fSGil Fine /**
761*3084b48fSGil Fine  * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
762*3084b48fSGil Fine  * @sw: The router to configure and enable it's children TMU
763*3084b48fSGil Fine  * @rate: Rate of the TMU to configure the router's chidren to
764*3084b48fSGil Fine  *
765*3084b48fSGil Fine  * Configures and enables the TMU mode of 1st depth children of the specified
766*3084b48fSGil Fine  * router to the specified rate.
767*3084b48fSGil Fine  */
768*3084b48fSGil Fine void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
769*3084b48fSGil Fine 				    enum tb_switch_tmu_rate rate)
770*3084b48fSGil Fine {
771*3084b48fSGil Fine 	device_for_each_child(&sw->dev, &rate,
772*3084b48fSGil Fine 			      tb_switch_tmu_config_enable);
773*3084b48fSGil Fine }
774