xref: /openbmc/linux/drivers/thunderbolt/tmu.c (revision c437dcb1)
1cf29b9afSRajmohan Mani // SPDX-License-Identifier: GPL-2.0
2cf29b9afSRajmohan Mani /*
3cf29b9afSRajmohan Mani  * Thunderbolt Time Management Unit (TMU) support
4cf29b9afSRajmohan Mani  *
5cf29b9afSRajmohan Mani  * Copyright (C) 2019, Intel Corporation
6cf29b9afSRajmohan Mani  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7cf29b9afSRajmohan Mani  *	    Rajmohan Mani <rajmohan.mani@intel.com>
8cf29b9afSRajmohan Mani  */
9cf29b9afSRajmohan Mani 
10cf29b9afSRajmohan Mani #include <linux/delay.h>
11cf29b9afSRajmohan Mani 
12cf29b9afSRajmohan Mani #include "tb.h"
13cf29b9afSRajmohan Mani 
14b017a46dSGil Fine static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
15b017a46dSGil Fine 					 enum tb_switch_tmu_rate rate)
16b017a46dSGil Fine {
17b017a46dSGil Fine 	u32 freq_meas_wind[2] = { 30, 800 };
18b017a46dSGil Fine 	u32 avg_const[2] = { 4, 8 };
19b017a46dSGil Fine 	u32 freq, avg, val;
20b017a46dSGil Fine 	int ret;
21b017a46dSGil Fine 
22b017a46dSGil Fine 	if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
23b017a46dSGil Fine 		freq = freq_meas_wind[0];
24b017a46dSGil Fine 		avg = avg_const[0];
25b017a46dSGil Fine 	} else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
26b017a46dSGil Fine 		freq = freq_meas_wind[1];
27b017a46dSGil Fine 		avg = avg_const[1];
28b017a46dSGil Fine 	} else {
29b017a46dSGil Fine 		return 0;
30b017a46dSGil Fine 	}
31b017a46dSGil Fine 
32b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
33b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
34b017a46dSGil Fine 	if (ret)
35b017a46dSGil Fine 		return ret;
36b017a46dSGil Fine 
37b017a46dSGil Fine 	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
38b017a46dSGil Fine 	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
39b017a46dSGil Fine 
40b017a46dSGil Fine 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
41b017a46dSGil Fine 			  sw->tmu.cap + TMU_RTR_CS_0, 1);
42b017a46dSGil Fine 	if (ret)
43b017a46dSGil Fine 		return ret;
44b017a46dSGil Fine 
45b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
46b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_15, 1);
47b017a46dSGil Fine 	if (ret)
48b017a46dSGil Fine 		return ret;
49b017a46dSGil Fine 
50b017a46dSGil Fine 	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
51b017a46dSGil Fine 		~TMU_RTR_CS_15_DELAY_AVG_MASK &
52b017a46dSGil Fine 		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
53b017a46dSGil Fine 		~TMU_RTR_CS_15_ERROR_AVG_MASK;
54b017a46dSGil Fine 	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
55b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
56b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
57b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
58b017a46dSGil Fine 
59b017a46dSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
60b017a46dSGil Fine 			   sw->tmu.cap + TMU_RTR_CS_15, 1);
61b017a46dSGil Fine }
62b017a46dSGil Fine 
63cf29b9afSRajmohan Mani static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
64cf29b9afSRajmohan Mani {
65cf29b9afSRajmohan Mani 	bool root_switch = !tb_route(sw);
66cf29b9afSRajmohan Mani 
67cf29b9afSRajmohan Mani 	switch (sw->tmu.rate) {
68cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_OFF:
69cf29b9afSRajmohan Mani 		return "off";
70cf29b9afSRajmohan Mani 
71cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_HIFI:
72cf29b9afSRajmohan Mani 		/* Root switch does not have upstream directionality */
73cf29b9afSRajmohan Mani 		if (root_switch)
74cf29b9afSRajmohan Mani 			return "HiFi";
75cf29b9afSRajmohan Mani 		if (sw->tmu.unidirectional)
76cf29b9afSRajmohan Mani 			return "uni-directional, HiFi";
77cf29b9afSRajmohan Mani 		return "bi-directional, HiFi";
78cf29b9afSRajmohan Mani 
79cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_NORMAL:
80cf29b9afSRajmohan Mani 		if (root_switch)
81cf29b9afSRajmohan Mani 			return "normal";
82cf29b9afSRajmohan Mani 		return "uni-directional, normal";
83cf29b9afSRajmohan Mani 
84cf29b9afSRajmohan Mani 	default:
85cf29b9afSRajmohan Mani 		return "unknown";
86cf29b9afSRajmohan Mani 	}
87cf29b9afSRajmohan Mani }
88cf29b9afSRajmohan Mani 
89cf29b9afSRajmohan Mani static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
90cf29b9afSRajmohan Mani {
91cf29b9afSRajmohan Mani 	int ret;
92cf29b9afSRajmohan Mani 	u32 val;
93cf29b9afSRajmohan Mani 
94cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
95cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
96cf29b9afSRajmohan Mani 	if (ret)
97cf29b9afSRajmohan Mani 		return false;
98cf29b9afSRajmohan Mani 
99cf29b9afSRajmohan Mani 	return !!(val & TMU_RTR_CS_0_UCAP);
100cf29b9afSRajmohan Mani }
101cf29b9afSRajmohan Mani 
102cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_read(struct tb_switch *sw)
103cf29b9afSRajmohan Mani {
104cf29b9afSRajmohan Mani 	int ret;
105cf29b9afSRajmohan Mani 	u32 val;
106cf29b9afSRajmohan Mani 
107cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
109cf29b9afSRajmohan Mani 	if (ret)
110cf29b9afSRajmohan Mani 		return ret;
111cf29b9afSRajmohan Mani 
112cf29b9afSRajmohan Mani 	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
113cf29b9afSRajmohan Mani 	return val;
114cf29b9afSRajmohan Mani }
115cf29b9afSRajmohan Mani 
116cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
117cf29b9afSRajmohan Mani {
118cf29b9afSRajmohan Mani 	int ret;
119cf29b9afSRajmohan Mani 	u32 val;
120cf29b9afSRajmohan Mani 
121cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
122cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
123cf29b9afSRajmohan Mani 	if (ret)
124cf29b9afSRajmohan Mani 		return ret;
125cf29b9afSRajmohan Mani 
126cf29b9afSRajmohan Mani 	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
127cf29b9afSRajmohan Mani 	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
128cf29b9afSRajmohan Mani 
129cf29b9afSRajmohan Mani 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
130cf29b9afSRajmohan Mani 			   sw->tmu.cap + TMU_RTR_CS_3, 1);
131cf29b9afSRajmohan Mani }
132cf29b9afSRajmohan Mani 
133cf29b9afSRajmohan Mani static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
134cf29b9afSRajmohan Mani 			     u32 value)
135cf29b9afSRajmohan Mani {
136cf29b9afSRajmohan Mani 	u32 data;
137cf29b9afSRajmohan Mani 	int ret;
138cf29b9afSRajmohan Mani 
139cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
140cf29b9afSRajmohan Mani 	if (ret)
141cf29b9afSRajmohan Mani 		return ret;
142cf29b9afSRajmohan Mani 
143cf29b9afSRajmohan Mani 	data &= ~mask;
144cf29b9afSRajmohan Mani 	data |= value;
145cf29b9afSRajmohan Mani 
146cf29b9afSRajmohan Mani 	return tb_port_write(port, &data, TB_CFG_PORT,
147cf29b9afSRajmohan Mani 			     port->cap_tmu + offset, 1);
148cf29b9afSRajmohan Mani }
149cf29b9afSRajmohan Mani 
150cf29b9afSRajmohan Mani static int tb_port_tmu_set_unidirectional(struct tb_port *port,
151cf29b9afSRajmohan Mani 					  bool unidirectional)
152cf29b9afSRajmohan Mani {
153cf29b9afSRajmohan Mani 	u32 val;
154cf29b9afSRajmohan Mani 
155cf29b9afSRajmohan Mani 	if (!port->sw->tmu.has_ucap)
156cf29b9afSRajmohan Mani 		return 0;
157cf29b9afSRajmohan Mani 
158cf29b9afSRajmohan Mani 	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
159cf29b9afSRajmohan Mani 	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
160cf29b9afSRajmohan Mani }
161cf29b9afSRajmohan Mani 
162cf29b9afSRajmohan Mani static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
163cf29b9afSRajmohan Mani {
164cf29b9afSRajmohan Mani 	return tb_port_tmu_set_unidirectional(port, false);
165cf29b9afSRajmohan Mani }
166cf29b9afSRajmohan Mani 
167a28ec0e1SGil Fine static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
168a28ec0e1SGil Fine {
169a28ec0e1SGil Fine 	return tb_port_tmu_set_unidirectional(port, true);
170a28ec0e1SGil Fine }
171a28ec0e1SGil Fine 
172cf29b9afSRajmohan Mani static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
173cf29b9afSRajmohan Mani {
174cf29b9afSRajmohan Mani 	int ret;
175cf29b9afSRajmohan Mani 	u32 val;
176cf29b9afSRajmohan Mani 
177cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &val, TB_CFG_PORT,
178cf29b9afSRajmohan Mani 			   port->cap_tmu + TMU_ADP_CS_3, 1);
179cf29b9afSRajmohan Mani 	if (ret)
180cf29b9afSRajmohan Mani 		return false;
181cf29b9afSRajmohan Mani 
182cf29b9afSRajmohan Mani 	return val & TMU_ADP_CS_3_UDM;
183cf29b9afSRajmohan Mani }
184cf29b9afSRajmohan Mani 
185a28ec0e1SGil Fine static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
186a28ec0e1SGil Fine {
187a28ec0e1SGil Fine 	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
188a28ec0e1SGil Fine 
189a28ec0e1SGil Fine 	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
190a28ec0e1SGil Fine }
191a28ec0e1SGil Fine 
192a28ec0e1SGil Fine static int tb_port_tmu_time_sync_disable(struct tb_port *port)
193a28ec0e1SGil Fine {
194a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, true);
195a28ec0e1SGil Fine }
196a28ec0e1SGil Fine 
197a28ec0e1SGil Fine static int tb_port_tmu_time_sync_enable(struct tb_port *port)
198a28ec0e1SGil Fine {
199a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, false);
200a28ec0e1SGil Fine }
201a28ec0e1SGil Fine 
202cf29b9afSRajmohan Mani static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
203cf29b9afSRajmohan Mani {
20423ccd21cSGil Fine 	u32 val, offset, bit;
205cf29b9afSRajmohan Mani 	int ret;
206cf29b9afSRajmohan Mani 
20723ccd21cSGil Fine 	if (tb_switch_is_usb4(sw)) {
20823ccd21cSGil Fine 		offset = sw->tmu.cap + TMU_RTR_CS_0;
20923ccd21cSGil Fine 		bit = TMU_RTR_CS_0_TD;
21023ccd21cSGil Fine 	} else {
21123ccd21cSGil Fine 		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
21223ccd21cSGil Fine 		bit = TB_TIME_VSEC_3_CS_26_TD;
21323ccd21cSGil Fine 	}
21423ccd21cSGil Fine 
21523ccd21cSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
216cf29b9afSRajmohan Mani 	if (ret)
217cf29b9afSRajmohan Mani 		return ret;
218cf29b9afSRajmohan Mani 
219cf29b9afSRajmohan Mani 	if (set)
22023ccd21cSGil Fine 		val |= bit;
221cf29b9afSRajmohan Mani 	else
22223ccd21cSGil Fine 		val &= ~bit;
223cf29b9afSRajmohan Mani 
22423ccd21cSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
225cf29b9afSRajmohan Mani }
226cf29b9afSRajmohan Mani 
227cf29b9afSRajmohan Mani /**
228cf29b9afSRajmohan Mani  * tb_switch_tmu_init() - Initialize switch TMU structures
229cf29b9afSRajmohan Mani  * @sw: Switch to initialized
230cf29b9afSRajmohan Mani  *
231cf29b9afSRajmohan Mani  * This function must be called before other TMU related functions to
232cf29b9afSRajmohan Mani  * makes the internal structures are filled in correctly. Does not
233cf29b9afSRajmohan Mani  * change any hardware configuration.
234cf29b9afSRajmohan Mani  */
235cf29b9afSRajmohan Mani int tb_switch_tmu_init(struct tb_switch *sw)
236cf29b9afSRajmohan Mani {
237cf29b9afSRajmohan Mani 	struct tb_port *port;
238cf29b9afSRajmohan Mani 	int ret;
239cf29b9afSRajmohan Mani 
240cf29b9afSRajmohan Mani 	if (tb_switch_is_icm(sw))
241cf29b9afSRajmohan Mani 		return 0;
242cf29b9afSRajmohan Mani 
243cf29b9afSRajmohan Mani 	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
244cf29b9afSRajmohan Mani 	if (ret > 0)
245cf29b9afSRajmohan Mani 		sw->tmu.cap = ret;
246cf29b9afSRajmohan Mani 
247cf29b9afSRajmohan Mani 	tb_switch_for_each_port(sw, port) {
248cf29b9afSRajmohan Mani 		int cap;
249cf29b9afSRajmohan Mani 
250cf29b9afSRajmohan Mani 		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
251cf29b9afSRajmohan Mani 		if (cap > 0)
252cf29b9afSRajmohan Mani 			port->cap_tmu = cap;
253cf29b9afSRajmohan Mani 	}
254cf29b9afSRajmohan Mani 
255cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_rate_read(sw);
256cf29b9afSRajmohan Mani 	if (ret < 0)
257cf29b9afSRajmohan Mani 		return ret;
258cf29b9afSRajmohan Mani 
259cf29b9afSRajmohan Mani 	sw->tmu.rate = ret;
260cf29b9afSRajmohan Mani 
261cf29b9afSRajmohan Mani 	sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
262cf29b9afSRajmohan Mani 	if (sw->tmu.has_ucap) {
263cf29b9afSRajmohan Mani 		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
264cf29b9afSRajmohan Mani 
265cf29b9afSRajmohan Mani 		if (tb_route(sw)) {
266cf29b9afSRajmohan Mani 			struct tb_port *up = tb_upstream_port(sw);
267cf29b9afSRajmohan Mani 
268cf29b9afSRajmohan Mani 			sw->tmu.unidirectional =
269cf29b9afSRajmohan Mani 				tb_port_tmu_is_unidirectional(up);
270cf29b9afSRajmohan Mani 		}
271cf29b9afSRajmohan Mani 	} else {
272cf29b9afSRajmohan Mani 		sw->tmu.unidirectional = false;
273cf29b9afSRajmohan Mani 	}
274cf29b9afSRajmohan Mani 
275cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
276cf29b9afSRajmohan Mani 	return 0;
277cf29b9afSRajmohan Mani }
278cf29b9afSRajmohan Mani 
279cf29b9afSRajmohan Mani /**
280cf29b9afSRajmohan Mani  * tb_switch_tmu_post_time() - Update switch local time
281cf29b9afSRajmohan Mani  * @sw: Switch whose time to update
282cf29b9afSRajmohan Mani  *
283cf29b9afSRajmohan Mani  * Updates switch local time using time posting procedure.
284cf29b9afSRajmohan Mani  */
285cf29b9afSRajmohan Mani int tb_switch_tmu_post_time(struct tb_switch *sw)
286cf29b9afSRajmohan Mani {
287a28ec0e1SGil Fine 	unsigned int post_time_high_offset, post_time_high = 0;
288cf29b9afSRajmohan Mani 	unsigned int post_local_time_offset, post_time_offset;
289cf29b9afSRajmohan Mani 	struct tb_switch *root_switch = sw->tb->root_switch;
290cf29b9afSRajmohan Mani 	u64 hi, mid, lo, local_time, post_time;
291cf29b9afSRajmohan Mani 	int i, ret, retries = 100;
292cf29b9afSRajmohan Mani 	u32 gm_local_time[3];
293cf29b9afSRajmohan Mani 
294cf29b9afSRajmohan Mani 	if (!tb_route(sw))
295cf29b9afSRajmohan Mani 		return 0;
296cf29b9afSRajmohan Mani 
297cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
298cf29b9afSRajmohan Mani 		return 0;
299cf29b9afSRajmohan Mani 
300cf29b9afSRajmohan Mani 	/* Need to be able to read the grand master time */
301cf29b9afSRajmohan Mani 	if (!root_switch->tmu.cap)
302cf29b9afSRajmohan Mani 		return 0;
303cf29b9afSRajmohan Mani 
304cf29b9afSRajmohan Mani 	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
305cf29b9afSRajmohan Mani 			 root_switch->tmu.cap + TMU_RTR_CS_1,
306cf29b9afSRajmohan Mani 			 ARRAY_SIZE(gm_local_time));
307cf29b9afSRajmohan Mani 	if (ret)
308cf29b9afSRajmohan Mani 		return ret;
309cf29b9afSRajmohan Mani 
310cf29b9afSRajmohan Mani 	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
311cf29b9afSRajmohan Mani 		tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i,
312cf29b9afSRajmohan Mani 			  gm_local_time[i]);
313cf29b9afSRajmohan Mani 
314cf29b9afSRajmohan Mani 	/* Convert to nanoseconds (drop fractional part) */
315cf29b9afSRajmohan Mani 	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
316cf29b9afSRajmohan Mani 	mid = gm_local_time[1];
317cf29b9afSRajmohan Mani 	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
318cf29b9afSRajmohan Mani 		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
319cf29b9afSRajmohan Mani 	local_time = hi << 48 | mid << 16 | lo;
320cf29b9afSRajmohan Mani 
321cf29b9afSRajmohan Mani 	/* Tell the switch that time sync is disrupted for a while */
322cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
323cf29b9afSRajmohan Mani 	if (ret)
324cf29b9afSRajmohan Mani 		return ret;
325cf29b9afSRajmohan Mani 
326cf29b9afSRajmohan Mani 	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
327cf29b9afSRajmohan Mani 	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
328a28ec0e1SGil Fine 	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
329cf29b9afSRajmohan Mani 
330cf29b9afSRajmohan Mani 	/*
331cf29b9afSRajmohan Mani 	 * Write the Grandmaster time to the Post Local Time registers
332cf29b9afSRajmohan Mani 	 * of the new switch.
333cf29b9afSRajmohan Mani 	 */
334cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
335cf29b9afSRajmohan Mani 			  post_local_time_offset, 2);
336cf29b9afSRajmohan Mani 	if (ret)
337cf29b9afSRajmohan Mani 		goto out;
338cf29b9afSRajmohan Mani 
339cf29b9afSRajmohan Mani 	/*
340a28ec0e1SGil Fine 	 * Have the new switch update its local time by:
341a28ec0e1SGil Fine 	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
342a28ec0e1SGil Fine 	 * Post Time High register.
343a28ec0e1SGil Fine 	 * 2) write 0 to Post Time High register and then wait for
344a28ec0e1SGil Fine 	 * the completion of the post_time register becomes 0.
345a28ec0e1SGil Fine 	 * This means the time has been converged properly.
346cf29b9afSRajmohan Mani 	 */
347a28ec0e1SGil Fine 	post_time = 0xffffffff00000001ULL;
348cf29b9afSRajmohan Mani 
349cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
350cf29b9afSRajmohan Mani 	if (ret)
351cf29b9afSRajmohan Mani 		goto out;
352cf29b9afSRajmohan Mani 
353a28ec0e1SGil Fine 	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
354a28ec0e1SGil Fine 			  post_time_high_offset, 1);
355a28ec0e1SGil Fine 	if (ret)
356a28ec0e1SGil Fine 		goto out;
357a28ec0e1SGil Fine 
358cf29b9afSRajmohan Mani 	do {
359cf29b9afSRajmohan Mani 		usleep_range(5, 10);
360cf29b9afSRajmohan Mani 		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
361cf29b9afSRajmohan Mani 				 post_time_offset, 2);
362cf29b9afSRajmohan Mani 		if (ret)
363cf29b9afSRajmohan Mani 			goto out;
364cf29b9afSRajmohan Mani 	} while (--retries && post_time);
365cf29b9afSRajmohan Mani 
366cf29b9afSRajmohan Mani 	if (!retries) {
367cf29b9afSRajmohan Mani 		ret = -ETIMEDOUT;
368cf29b9afSRajmohan Mani 		goto out;
369cf29b9afSRajmohan Mani 	}
370cf29b9afSRajmohan Mani 
371cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
372cf29b9afSRajmohan Mani 
373cf29b9afSRajmohan Mani out:
374cf29b9afSRajmohan Mani 	tb_switch_tmu_set_time_disruption(sw, false);
375cf29b9afSRajmohan Mani 	return ret;
376cf29b9afSRajmohan Mani }
377cf29b9afSRajmohan Mani 
378cf29b9afSRajmohan Mani /**
379cf29b9afSRajmohan Mani  * tb_switch_tmu_disable() - Disable TMU of a switch
380cf29b9afSRajmohan Mani  * @sw: Switch whose TMU to disable
381cf29b9afSRajmohan Mani  *
382cf29b9afSRajmohan Mani  * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
383cf29b9afSRajmohan Mani  */
384cf29b9afSRajmohan Mani int tb_switch_tmu_disable(struct tb_switch *sw)
385cf29b9afSRajmohan Mani {
38643f977bcSGil Fine 	/*
38743f977bcSGil Fine 	 * No need to disable TMU on devices that don't support CLx since
38843f977bcSGil Fine 	 * on these devices e.g. Alpine Ridge and earlier, the TMU mode
38943f977bcSGil Fine 	 * HiFi bi-directional is enabled by default and we don't change it.
39043f977bcSGil Fine 	 */
39143f977bcSGil Fine 	if (!tb_switch_is_clx_supported(sw))
392cf29b9afSRajmohan Mani 		return 0;
393cf29b9afSRajmohan Mani 
394cf29b9afSRajmohan Mani 	/* Already disabled? */
395cf29b9afSRajmohan Mani 	if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
396cf29b9afSRajmohan Mani 		return 0;
397cf29b9afSRajmohan Mani 
398a28ec0e1SGil Fine 	if (tb_route(sw)) {
399b017a46dSGil Fine 		bool unidirectional = sw->tmu.unidirectional;
400a28ec0e1SGil Fine 		struct tb_port *down, *up;
401a28ec0e1SGil Fine 		int ret;
402cf29b9afSRajmohan Mani 
4037ce54221SGil Fine 		down = tb_switch_downstream_port(sw);
404a28ec0e1SGil Fine 		up = tb_upstream_port(sw);
405a28ec0e1SGil Fine 		/*
406a28ec0e1SGil Fine 		 * In case of uni-directional time sync, TMU handshake is
407a28ec0e1SGil Fine 		 * initiated by upstream router. In case of bi-directional
408a28ec0e1SGil Fine 		 * time sync, TMU handshake is initiated by downstream router.
4095fd6b9a5SGil Fine 		 * We change downstream router's rate to off for both uni/bidir
4105fd6b9a5SGil Fine 		 * cases although it is needed only for the bi-directional mode.
4115fd6b9a5SGil Fine 		 * We avoid changing upstream router's mode since it might
4125fd6b9a5SGil Fine 		 * have another downstream router plugged, that is set to
4135fd6b9a5SGil Fine 		 * uni-directional mode and we don't want to change it's TMU
4145fd6b9a5SGil Fine 		 * mode.
415a28ec0e1SGil Fine 		 */
416a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
417cf29b9afSRajmohan Mani 
418a28ec0e1SGil Fine 		tb_port_tmu_time_sync_disable(up);
419a28ec0e1SGil Fine 		ret = tb_port_tmu_time_sync_disable(down);
420a28ec0e1SGil Fine 		if (ret)
421a28ec0e1SGil Fine 			return ret;
422a28ec0e1SGil Fine 
423a28ec0e1SGil Fine 		if (unidirectional) {
424cf29b9afSRajmohan Mani 			/* The switch may be unplugged so ignore any errors */
425cf29b9afSRajmohan Mani 			tb_port_tmu_unidirectional_disable(up);
426cf29b9afSRajmohan Mani 			ret = tb_port_tmu_unidirectional_disable(down);
427cf29b9afSRajmohan Mani 			if (ret)
428cf29b9afSRajmohan Mani 				return ret;
429cf29b9afSRajmohan Mani 		}
430a28ec0e1SGil Fine 	} else {
431cf29b9afSRajmohan Mani 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
432a28ec0e1SGil Fine 	}
433cf29b9afSRajmohan Mani 
434cf29b9afSRajmohan Mani 	sw->tmu.unidirectional = false;
435cf29b9afSRajmohan Mani 	sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
436cf29b9afSRajmohan Mani 
437cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: disabled\n");
438cf29b9afSRajmohan Mani 	return 0;
439cf29b9afSRajmohan Mani }
440cf29b9afSRajmohan Mani 
441*c437dcb1SMika Westerberg static void tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
442cf29b9afSRajmohan Mani {
443a28ec0e1SGil Fine 	struct tb_port *down, *up;
444a28ec0e1SGil Fine 
4457ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
446a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
447a28ec0e1SGil Fine 	/*
448a28ec0e1SGil Fine 	 * In case of any failure in one of the steps when setting
449a28ec0e1SGil Fine 	 * bi-directional or uni-directional TMU mode, get back to the TMU
450a28ec0e1SGil Fine 	 * configurations in off mode. In case of additional failures in
451a28ec0e1SGil Fine 	 * the functions below, ignore them since the caller shall already
452a28ec0e1SGil Fine 	 * report a failure.
453a28ec0e1SGil Fine 	 */
454a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(down);
455a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(up);
456a28ec0e1SGil Fine 	if (unidirectional)
4577ce54221SGil Fine 		tb_switch_tmu_rate_write(tb_switch_parent(sw),
4587ce54221SGil Fine 					 TB_SWITCH_TMU_RATE_OFF);
459a28ec0e1SGil Fine 	else
460a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
461a28ec0e1SGil Fine 
462b017a46dSGil Fine 	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
463a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(down);
464a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(up);
465a28ec0e1SGil Fine }
466a28ec0e1SGil Fine 
467a28ec0e1SGil Fine /*
468a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
469a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
470a28ec0e1SGil Fine  */
471*c437dcb1SMika Westerberg static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
472a28ec0e1SGil Fine {
473a28ec0e1SGil Fine 	struct tb_port *up, *down;
474cf29b9afSRajmohan Mani 	int ret;
475cf29b9afSRajmohan Mani 
476a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
4777ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
478a28ec0e1SGil Fine 
479a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(up);
480a28ec0e1SGil Fine 	if (ret)
481a28ec0e1SGil Fine 		return ret;
482a28ec0e1SGil Fine 
483a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(down);
484a28ec0e1SGil Fine 	if (ret)
485a28ec0e1SGil Fine 		goto out;
486a28ec0e1SGil Fine 
487a28ec0e1SGil Fine 	ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
488a28ec0e1SGil Fine 	if (ret)
489a28ec0e1SGil Fine 		goto out;
490a28ec0e1SGil Fine 
491a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
492a28ec0e1SGil Fine 	if (ret)
493a28ec0e1SGil Fine 		goto out;
494a28ec0e1SGil Fine 
495a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
496a28ec0e1SGil Fine 	if (ret)
497a28ec0e1SGil Fine 		goto out;
498a28ec0e1SGil Fine 
499a28ec0e1SGil Fine 	return 0;
500a28ec0e1SGil Fine 
501a28ec0e1SGil Fine out:
502*c437dcb1SMika Westerberg 	tb_switch_tmu_off(sw, false);
503a28ec0e1SGil Fine 	return ret;
504a28ec0e1SGil Fine }
505a28ec0e1SGil Fine 
50643f977bcSGil Fine static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
50743f977bcSGil Fine {
50843f977bcSGil Fine 	u32 val;
50943f977bcSGil Fine 	int ret;
51043f977bcSGil Fine 
51143f977bcSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
51243f977bcSGil Fine 			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
51343f977bcSGil Fine 	if (ret)
51443f977bcSGil Fine 		return ret;
51543f977bcSGil Fine 
51643f977bcSGil Fine 	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
51743f977bcSGil Fine 
51843f977bcSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
51943f977bcSGil Fine 			   sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
52043f977bcSGil Fine }
52143f977bcSGil Fine 
52243f977bcSGil Fine static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
52343f977bcSGil Fine {
52443f977bcSGil Fine 	struct tb_port *up = tb_upstream_port(sw);
52543f977bcSGil Fine 
52643f977bcSGil Fine 	return tb_port_tmu_write(up, TMU_ADP_CS_6,
52743f977bcSGil Fine 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
52843f977bcSGil Fine 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
52943f977bcSGil Fine }
53043f977bcSGil Fine 
531a28ec0e1SGil Fine /*
532a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
533a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
534a28ec0e1SGil Fine  */
535*c437dcb1SMika Westerberg static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
536a28ec0e1SGil Fine {
537a28ec0e1SGil Fine 	struct tb_port *up, *down;
538a28ec0e1SGil Fine 	int ret;
539a28ec0e1SGil Fine 
540a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
5417ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
5427ce54221SGil Fine 	ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
5437ce54221SGil Fine 				       sw->tmu.rate_request);
544b017a46dSGil Fine 	if (ret)
545b017a46dSGil Fine 		return ret;
546b017a46dSGil Fine 
547b017a46dSGil Fine 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
548a28ec0e1SGil Fine 	if (ret)
549a28ec0e1SGil Fine 		return ret;
550a28ec0e1SGil Fine 
551a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(up);
552a28ec0e1SGil Fine 	if (ret)
553a28ec0e1SGil Fine 		goto out;
554a28ec0e1SGil Fine 
555a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
556a28ec0e1SGil Fine 	if (ret)
557a28ec0e1SGil Fine 		goto out;
558a28ec0e1SGil Fine 
559a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(down);
560a28ec0e1SGil Fine 	if (ret)
561a28ec0e1SGil Fine 		goto out;
562a28ec0e1SGil Fine 
563a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
564a28ec0e1SGil Fine 	if (ret)
565a28ec0e1SGil Fine 		goto out;
566a28ec0e1SGil Fine 
567a28ec0e1SGil Fine 	return 0;
568a28ec0e1SGil Fine 
569a28ec0e1SGil Fine out:
570*c437dcb1SMika Westerberg 	tb_switch_tmu_off(sw, true);
571a28ec0e1SGil Fine 	return ret;
572a28ec0e1SGil Fine }
573a28ec0e1SGil Fine 
574*c437dcb1SMika Westerberg static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
575b017a46dSGil Fine {
576b017a46dSGil Fine 	struct tb_port *down, *up;
577b017a46dSGil Fine 
5787ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
579b017a46dSGil Fine 	up = tb_upstream_port(sw);
580b017a46dSGil Fine 	/*
581b017a46dSGil Fine 	 * In case of any failure in one of the steps when change mode,
582b017a46dSGil Fine 	 * get back to the TMU configurations in previous mode.
583b017a46dSGil Fine 	 * In case of additional failures in the functions below,
584b017a46dSGil Fine 	 * ignore them since the caller shall already report a failure.
585b017a46dSGil Fine 	 */
586b017a46dSGil Fine 	tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
587b017a46dSGil Fine 	if (sw->tmu.unidirectional_request)
5887ce54221SGil Fine 		tb_switch_tmu_rate_write(tb_switch_parent(sw), sw->tmu.rate);
589b017a46dSGil Fine 	else
590b017a46dSGil Fine 		tb_switch_tmu_rate_write(sw, sw->tmu.rate);
591b017a46dSGil Fine 
592b017a46dSGil Fine 	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
593b017a46dSGil Fine 	tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
594b017a46dSGil Fine }
595b017a46dSGil Fine 
596*c437dcb1SMika Westerberg static int tb_switch_tmu_change_mode(struct tb_switch *sw)
597b017a46dSGil Fine {
598b017a46dSGil Fine 	struct tb_port *up, *down;
599b017a46dSGil Fine 	int ret;
600b017a46dSGil Fine 
601b017a46dSGil Fine 	up = tb_upstream_port(sw);
6027ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
603b017a46dSGil Fine 	ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
604b017a46dSGil Fine 	if (ret)
605b017a46dSGil Fine 		goto out;
606b017a46dSGil Fine 
607b017a46dSGil Fine 	if (sw->tmu.unidirectional_request)
6087ce54221SGil Fine 		ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
6097ce54221SGil Fine 					       sw->tmu.rate_request);
610b017a46dSGil Fine 	else
611b017a46dSGil Fine 		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
612b017a46dSGil Fine 	if (ret)
613b017a46dSGil Fine 		return ret;
614b017a46dSGil Fine 
615b017a46dSGil Fine 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
616b017a46dSGil Fine 	if (ret)
617b017a46dSGil Fine 		return ret;
618b017a46dSGil Fine 
619b017a46dSGil Fine 	ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
620b017a46dSGil Fine 	if (ret)
621b017a46dSGil Fine 		goto out;
622b017a46dSGil Fine 
623b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
624b017a46dSGil Fine 	if (ret)
625b017a46dSGil Fine 		goto out;
626b017a46dSGil Fine 
627b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
628b017a46dSGil Fine 	if (ret)
629b017a46dSGil Fine 		goto out;
630b017a46dSGil Fine 
631b017a46dSGil Fine 	return 0;
632b017a46dSGil Fine 
633b017a46dSGil Fine out:
634*c437dcb1SMika Westerberg 	tb_switch_tmu_change_mode_prev(sw);
635b017a46dSGil Fine 	return ret;
636b017a46dSGil Fine }
637b017a46dSGil Fine 
638b017a46dSGil Fine /**
639b017a46dSGil Fine  * tb_switch_tmu_enable() - Enable TMU on a router
640b017a46dSGil Fine  * @sw: Router whose TMU to enable
641b017a46dSGil Fine  *
642b017a46dSGil Fine  * Enables TMU of a router to be in uni-directional Normal/HiFi
643b017a46dSGil Fine  * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required
644b017a46dSGil Fine  * before calling this function, to select the mode Normal/HiFi and
645b017a46dSGil Fine  * directionality (uni-directional/bi-directional).
646b017a46dSGil Fine  * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't
647b017a46dSGil Fine  * work. Uni-directional mode is required for CLx (Link Low-Power) to work.
648b017a46dSGil Fine  */
649b017a46dSGil Fine int tb_switch_tmu_enable(struct tb_switch *sw)
650a28ec0e1SGil Fine {
651a28ec0e1SGil Fine 	bool unidirectional = sw->tmu.unidirectional_request;
652a28ec0e1SGil Fine 	int ret;
653a28ec0e1SGil Fine 
654a28ec0e1SGil Fine 	if (unidirectional && !sw->tmu.has_ucap)
655a28ec0e1SGil Fine 		return -EOPNOTSUPP;
656a28ec0e1SGil Fine 
65743f977bcSGil Fine 	/*
65843f977bcSGil Fine 	 * No need to enable TMU on devices that don't support CLx since on
65943f977bcSGil Fine 	 * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
66043f977bcSGil Fine 	 * bi-directional is enabled by default.
66143f977bcSGil Fine 	 */
66243f977bcSGil Fine 	if (!tb_switch_is_clx_supported(sw))
663cf29b9afSRajmohan Mani 		return 0;
664cf29b9afSRajmohan Mani 
665b017a46dSGil Fine 	if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request))
666cf29b9afSRajmohan Mani 		return 0;
667cf29b9afSRajmohan Mani 
66843f977bcSGil Fine 	if (tb_switch_is_titan_ridge(sw) && unidirectional) {
669b017a46dSGil Fine 		/*
670b017a46dSGil Fine 		 * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are
671b017a46dSGil Fine 		 * enabled and supported together.
672b017a46dSGil Fine 		 */
673b017a46dSGil Fine 		if (!tb_switch_is_clx_enabled(sw, TB_CL1))
67443f977bcSGil Fine 			return -EOPNOTSUPP;
67543f977bcSGil Fine 
67643f977bcSGil Fine 		ret = tb_switch_tmu_objection_mask(sw);
67743f977bcSGil Fine 		if (ret)
67843f977bcSGil Fine 			return ret;
67943f977bcSGil Fine 
68043f977bcSGil Fine 		ret = tb_switch_tmu_unidirectional_enable(sw);
68143f977bcSGil Fine 		if (ret)
68243f977bcSGil Fine 			return ret;
68343f977bcSGil Fine 	}
68443f977bcSGil Fine 
685cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
686cf29b9afSRajmohan Mani 	if (ret)
687cf29b9afSRajmohan Mani 		return ret;
688cf29b9afSRajmohan Mani 
689a28ec0e1SGil Fine 	if (tb_route(sw)) {
690b017a46dSGil Fine 		/*
691b017a46dSGil Fine 		 * The used mode changes are from OFF to
692b017a46dSGil Fine 		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
693b017a46dSGil Fine 		 * HiFi-Uni.
694b017a46dSGil Fine 		 */
695a28ec0e1SGil Fine 		if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
696a28ec0e1SGil Fine 			if (unidirectional)
697*c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_unidirectional(sw);
698a28ec0e1SGil Fine 			else
699*c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_bidirectional(sw);
700cf29b9afSRajmohan Mani 			if (ret)
701cf29b9afSRajmohan Mani 				return ret;
702b017a46dSGil Fine 		} else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
703*c437dcb1SMika Westerberg 			ret = tb_switch_tmu_change_mode(sw);
704b017a46dSGil Fine 			if (ret)
705b017a46dSGil Fine 				return ret;
706a28ec0e1SGil Fine 		}
707a28ec0e1SGil Fine 		sw->tmu.unidirectional = unidirectional;
708cf29b9afSRajmohan Mani 	} else {
709a28ec0e1SGil Fine 		/*
710a28ec0e1SGil Fine 		 * Host router port configurations are written as
711a28ec0e1SGil Fine 		 * part of configurations for downstream port of the parent
712a28ec0e1SGil Fine 		 * of the child node - see above.
713a28ec0e1SGil Fine 		 * Here only the host router' rate configuration is written.
714a28ec0e1SGil Fine 		 */
715b017a46dSGil Fine 		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
716cf29b9afSRajmohan Mani 		if (ret)
717cf29b9afSRajmohan Mani 			return ret;
718cf29b9afSRajmohan Mani 	}
719cf29b9afSRajmohan Mani 
720b017a46dSGil Fine 	sw->tmu.rate = sw->tmu.rate_request;
721cf29b9afSRajmohan Mani 
722a28ec0e1SGil Fine 	tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
723cf29b9afSRajmohan Mani 	return tb_switch_tmu_set_time_disruption(sw, false);
724cf29b9afSRajmohan Mani }
725a28ec0e1SGil Fine 
726a28ec0e1SGil Fine /**
727a28ec0e1SGil Fine  * tb_switch_tmu_configure() - Configure the TMU rate and directionality
728a28ec0e1SGil Fine  * @sw: Router whose mode to change
729b4e08d5dSGil Fine  * @rate: Rate to configure Off/Normal/HiFi
730a28ec0e1SGil Fine  * @unidirectional: If uni-directional (bi-directional otherwise)
731a28ec0e1SGil Fine  *
732a28ec0e1SGil Fine  * Selects the rate of the TMU and directionality (uni-directional or
733a28ec0e1SGil Fine  * bi-directional). Must be called before tb_switch_tmu_enable().
734a28ec0e1SGil Fine  */
735a28ec0e1SGil Fine void tb_switch_tmu_configure(struct tb_switch *sw,
736a28ec0e1SGil Fine 			     enum tb_switch_tmu_rate rate, bool unidirectional)
737a28ec0e1SGil Fine {
738a28ec0e1SGil Fine 	sw->tmu.unidirectional_request = unidirectional;
739a28ec0e1SGil Fine 	sw->tmu.rate_request = rate;
740a28ec0e1SGil Fine }
7413084b48fSGil Fine 
7423084b48fSGil Fine static int tb_switch_tmu_config_enable(struct device *dev, void *rate)
7433084b48fSGil Fine {
7443084b48fSGil Fine 	if (tb_is_switch(dev)) {
7453084b48fSGil Fine 		struct tb_switch *sw = tb_to_switch(dev);
7463084b48fSGil Fine 
7473084b48fSGil Fine 		tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate,
7483084b48fSGil Fine 					tb_switch_is_clx_enabled(sw, TB_CL1));
7493084b48fSGil Fine 		if (tb_switch_tmu_enable(sw))
7503084b48fSGil Fine 			tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n");
7513084b48fSGil Fine 	}
7523084b48fSGil Fine 
7533084b48fSGil Fine 	return 0;
7543084b48fSGil Fine }
7553084b48fSGil Fine 
7563084b48fSGil Fine /**
7573084b48fSGil Fine  * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren
7583084b48fSGil Fine  * @sw: The router to configure and enable it's children TMU
7593084b48fSGil Fine  * @rate: Rate of the TMU to configure the router's chidren to
7603084b48fSGil Fine  *
7613084b48fSGil Fine  * Configures and enables the TMU mode of 1st depth children of the specified
7623084b48fSGil Fine  * router to the specified rate.
7633084b48fSGil Fine  */
7643084b48fSGil Fine void tb_switch_enable_tmu_1st_child(struct tb_switch *sw,
7653084b48fSGil Fine 				    enum tb_switch_tmu_rate rate)
7663084b48fSGil Fine {
7673084b48fSGil Fine 	device_for_each_child(&sw->dev, &rate,
7683084b48fSGil Fine 			      tb_switch_tmu_config_enable);
7693084b48fSGil Fine }
770