xref: /openbmc/linux/drivers/thunderbolt/tmu.c (revision d49b4f04)
1cf29b9afSRajmohan Mani // SPDX-License-Identifier: GPL-2.0
2cf29b9afSRajmohan Mani /*
3cf29b9afSRajmohan Mani  * Thunderbolt Time Management Unit (TMU) support
4cf29b9afSRajmohan Mani  *
5cf29b9afSRajmohan Mani  * Copyright (C) 2019, Intel Corporation
6cf29b9afSRajmohan Mani  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7cf29b9afSRajmohan Mani  *	    Rajmohan Mani <rajmohan.mani@intel.com>
8cf29b9afSRajmohan Mani  */
9cf29b9afSRajmohan Mani 
10cf29b9afSRajmohan Mani #include <linux/delay.h>
11cf29b9afSRajmohan Mani 
12cf29b9afSRajmohan Mani #include "tb.h"
13cf29b9afSRajmohan Mani 
14*d49b4f04SMika Westerberg static const unsigned int tmu_rates[] = {
15*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_OFF] = 0,
16*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_LOWRES] = 1000,
17*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
18*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
19*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
20*d49b4f04SMika Westerberg };
21*d49b4f04SMika Westerberg 
22*d49b4f04SMika Westerberg const struct {
23*d49b4f04SMika Westerberg 	unsigned int freq_meas_window;
24*d49b4f04SMika Westerberg 	unsigned int avg_const;
25*d49b4f04SMika Westerberg 	unsigned int delta_avg_const;
26*d49b4f04SMika Westerberg 	unsigned int repl_timeout;
27*d49b4f04SMika Westerberg 	unsigned int repl_threshold;
28*d49b4f04SMika Westerberg 	unsigned int repl_n;
29*d49b4f04SMika Westerberg 	unsigned int dirswitch_n;
30*d49b4f04SMika Westerberg } tmu_params[] = {
31*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_OFF] = { },
32*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
33*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
34*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
35*d49b4f04SMika Westerberg 	[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
36*d49b4f04SMika Westerberg 		800, 4, 0, 3125, 25, 128, 255,
37*d49b4f04SMika Westerberg 	},
38*d49b4f04SMika Westerberg };
39*d49b4f04SMika Westerberg 
40*d49b4f04SMika Westerberg static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
41b017a46dSGil Fine {
42*d49b4f04SMika Westerberg 	switch (mode) {
43*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_OFF:
44*d49b4f04SMika Westerberg 		return "off";
45*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
46*d49b4f04SMika Westerberg 		return "uni-directional, LowRes";
47*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
48*d49b4f04SMika Westerberg 		return "uni-directional, HiFi";
49*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
50*d49b4f04SMika Westerberg 		return "bi-directional, HiFi";
51*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
52*d49b4f04SMika Westerberg 		return "enhanced uni-directional, MedRes";
53*d49b4f04SMika Westerberg 	default:
54*d49b4f04SMika Westerberg 		return "unknown";
55*d49b4f04SMika Westerberg 	}
56*d49b4f04SMika Westerberg }
57*d49b4f04SMika Westerberg 
58*d49b4f04SMika Westerberg static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
59*d49b4f04SMika Westerberg {
60*d49b4f04SMika Westerberg 	return usb4_switch_version(sw) > 1;
61*d49b4f04SMika Westerberg }
62*d49b4f04SMika Westerberg 
63*d49b4f04SMika Westerberg static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
64*d49b4f04SMika Westerberg 					 enum tb_switch_tmu_mode mode)
65*d49b4f04SMika Westerberg {
66b017a46dSGil Fine 	u32 freq, avg, val;
67b017a46dSGil Fine 	int ret;
68b017a46dSGil Fine 
69*d49b4f04SMika Westerberg 	freq = tmu_params[mode].freq_meas_window;
70*d49b4f04SMika Westerberg 	avg = tmu_params[mode].avg_const;
71b017a46dSGil Fine 
72b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
73b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
74b017a46dSGil Fine 	if (ret)
75b017a46dSGil Fine 		return ret;
76b017a46dSGil Fine 
77b017a46dSGil Fine 	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
78b017a46dSGil Fine 	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
79b017a46dSGil Fine 
80b017a46dSGil Fine 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
81b017a46dSGil Fine 			  sw->tmu.cap + TMU_RTR_CS_0, 1);
82b017a46dSGil Fine 	if (ret)
83b017a46dSGil Fine 		return ret;
84b017a46dSGil Fine 
85b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
86b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_15, 1);
87b017a46dSGil Fine 	if (ret)
88b017a46dSGil Fine 		return ret;
89b017a46dSGil Fine 
90b017a46dSGil Fine 	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
91b017a46dSGil Fine 		~TMU_RTR_CS_15_DELAY_AVG_MASK &
92b017a46dSGil Fine 		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
93b017a46dSGil Fine 		~TMU_RTR_CS_15_ERROR_AVG_MASK;
94b017a46dSGil Fine 	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
95b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
96b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
97b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
98b017a46dSGil Fine 
99*d49b4f04SMika Westerberg 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
100b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_15, 1);
101*d49b4f04SMika Westerberg 	if (ret)
102*d49b4f04SMika Westerberg 		return ret;
103*d49b4f04SMika Westerberg 
104*d49b4f04SMika Westerberg 	if (tb_switch_tmu_enhanced_is_supported(sw)) {
105*d49b4f04SMika Westerberg 		u32 delta_avg = tmu_params[mode].delta_avg_const;
106*d49b4f04SMika Westerberg 
107*d49b4f04SMika Westerberg 		ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108*d49b4f04SMika Westerberg 				 sw->tmu.cap + TMU_RTR_CS_18, 1);
109*d49b4f04SMika Westerberg 		if (ret)
110*d49b4f04SMika Westerberg 			return ret;
111*d49b4f04SMika Westerberg 
112*d49b4f04SMika Westerberg 		val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
113*d49b4f04SMika Westerberg 		val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
114*d49b4f04SMika Westerberg 
115*d49b4f04SMika Westerberg 		ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
116*d49b4f04SMika Westerberg 				  sw->tmu.cap + TMU_RTR_CS_18, 1);
117b017a46dSGil Fine 	}
118b017a46dSGil Fine 
119*d49b4f04SMika Westerberg 	return ret;
120cf29b9afSRajmohan Mani }
121cf29b9afSRajmohan Mani 
122*d49b4f04SMika Westerberg static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
123cf29b9afSRajmohan Mani {
124cf29b9afSRajmohan Mani 	int ret;
125cf29b9afSRajmohan Mani 	u32 val;
126cf29b9afSRajmohan Mani 
127cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
128cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
129cf29b9afSRajmohan Mani 	if (ret)
130cf29b9afSRajmohan Mani 		return false;
131cf29b9afSRajmohan Mani 
132cf29b9afSRajmohan Mani 	return !!(val & TMU_RTR_CS_0_UCAP);
133cf29b9afSRajmohan Mani }
134cf29b9afSRajmohan Mani 
135cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_read(struct tb_switch *sw)
136cf29b9afSRajmohan Mani {
137cf29b9afSRajmohan Mani 	int ret;
138cf29b9afSRajmohan Mani 	u32 val;
139cf29b9afSRajmohan Mani 
140cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
141cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
142cf29b9afSRajmohan Mani 	if (ret)
143cf29b9afSRajmohan Mani 		return ret;
144cf29b9afSRajmohan Mani 
145cf29b9afSRajmohan Mani 	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
146cf29b9afSRajmohan Mani 	return val;
147cf29b9afSRajmohan Mani }
148cf29b9afSRajmohan Mani 
149cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
150cf29b9afSRajmohan Mani {
151cf29b9afSRajmohan Mani 	int ret;
152cf29b9afSRajmohan Mani 	u32 val;
153cf29b9afSRajmohan Mani 
154cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
155cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
156cf29b9afSRajmohan Mani 	if (ret)
157cf29b9afSRajmohan Mani 		return ret;
158cf29b9afSRajmohan Mani 
159cf29b9afSRajmohan Mani 	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
160cf29b9afSRajmohan Mani 	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
161cf29b9afSRajmohan Mani 
162cf29b9afSRajmohan Mani 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
163cf29b9afSRajmohan Mani 			   sw->tmu.cap + TMU_RTR_CS_3, 1);
164cf29b9afSRajmohan Mani }
165cf29b9afSRajmohan Mani 
166cf29b9afSRajmohan Mani static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
167cf29b9afSRajmohan Mani 			     u32 value)
168cf29b9afSRajmohan Mani {
169cf29b9afSRajmohan Mani 	u32 data;
170cf29b9afSRajmohan Mani 	int ret;
171cf29b9afSRajmohan Mani 
172cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
173cf29b9afSRajmohan Mani 	if (ret)
174cf29b9afSRajmohan Mani 		return ret;
175cf29b9afSRajmohan Mani 
176cf29b9afSRajmohan Mani 	data &= ~mask;
177cf29b9afSRajmohan Mani 	data |= value;
178cf29b9afSRajmohan Mani 
179cf29b9afSRajmohan Mani 	return tb_port_write(port, &data, TB_CFG_PORT,
180cf29b9afSRajmohan Mani 			     port->cap_tmu + offset, 1);
181cf29b9afSRajmohan Mani }
182cf29b9afSRajmohan Mani 
183cf29b9afSRajmohan Mani static int tb_port_tmu_set_unidirectional(struct tb_port *port,
184cf29b9afSRajmohan Mani 					  bool unidirectional)
185cf29b9afSRajmohan Mani {
186cf29b9afSRajmohan Mani 	u32 val;
187cf29b9afSRajmohan Mani 
188cf29b9afSRajmohan Mani 	if (!port->sw->tmu.has_ucap)
189cf29b9afSRajmohan Mani 		return 0;
190cf29b9afSRajmohan Mani 
191cf29b9afSRajmohan Mani 	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
192cf29b9afSRajmohan Mani 	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
193cf29b9afSRajmohan Mani }
194cf29b9afSRajmohan Mani 
195cf29b9afSRajmohan Mani static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
196cf29b9afSRajmohan Mani {
197cf29b9afSRajmohan Mani 	return tb_port_tmu_set_unidirectional(port, false);
198cf29b9afSRajmohan Mani }
199cf29b9afSRajmohan Mani 
200a28ec0e1SGil Fine static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
201a28ec0e1SGil Fine {
202a28ec0e1SGil Fine 	return tb_port_tmu_set_unidirectional(port, true);
203a28ec0e1SGil Fine }
204a28ec0e1SGil Fine 
205cf29b9afSRajmohan Mani static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
206cf29b9afSRajmohan Mani {
207cf29b9afSRajmohan Mani 	int ret;
208cf29b9afSRajmohan Mani 	u32 val;
209cf29b9afSRajmohan Mani 
210cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &val, TB_CFG_PORT,
211cf29b9afSRajmohan Mani 			   port->cap_tmu + TMU_ADP_CS_3, 1);
212cf29b9afSRajmohan Mani 	if (ret)
213cf29b9afSRajmohan Mani 		return false;
214cf29b9afSRajmohan Mani 
215cf29b9afSRajmohan Mani 	return val & TMU_ADP_CS_3_UDM;
216cf29b9afSRajmohan Mani }
217cf29b9afSRajmohan Mani 
218*d49b4f04SMika Westerberg static bool tb_port_tmu_is_enhanced(struct tb_port *port)
219*d49b4f04SMika Westerberg {
220*d49b4f04SMika Westerberg 	int ret;
221*d49b4f04SMika Westerberg 	u32 val;
222*d49b4f04SMika Westerberg 
223*d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
224*d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_8, 1);
225*d49b4f04SMika Westerberg 	if (ret)
226*d49b4f04SMika Westerberg 		return false;
227*d49b4f04SMika Westerberg 
228*d49b4f04SMika Westerberg 	return val & TMU_ADP_CS_8_EUDM;
229*d49b4f04SMika Westerberg }
230*d49b4f04SMika Westerberg 
231*d49b4f04SMika Westerberg /* Can be called to non-v2 lane adapters too */
232*d49b4f04SMika Westerberg static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
233*d49b4f04SMika Westerberg {
234*d49b4f04SMika Westerberg 	int ret;
235*d49b4f04SMika Westerberg 	u32 val;
236*d49b4f04SMika Westerberg 
237*d49b4f04SMika Westerberg 	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
238*d49b4f04SMika Westerberg 		return 0;
239*d49b4f04SMika Westerberg 
240*d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
241*d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_8, 1);
242*d49b4f04SMika Westerberg 	if (ret)
243*d49b4f04SMika Westerberg 		return ret;
244*d49b4f04SMika Westerberg 
245*d49b4f04SMika Westerberg 	if (enable)
246*d49b4f04SMika Westerberg 		val |= TMU_ADP_CS_8_EUDM;
247*d49b4f04SMika Westerberg 	else
248*d49b4f04SMika Westerberg 		val &= ~TMU_ADP_CS_8_EUDM;
249*d49b4f04SMika Westerberg 
250*d49b4f04SMika Westerberg 	return tb_port_write(port, &val, TB_CFG_PORT,
251*d49b4f04SMika Westerberg 			     port->cap_tmu + TMU_ADP_CS_8, 1);
252*d49b4f04SMika Westerberg }
253*d49b4f04SMika Westerberg 
254*d49b4f04SMika Westerberg static int tb_port_set_tmu_mode_params(struct tb_port *port,
255*d49b4f04SMika Westerberg 				       enum tb_switch_tmu_mode mode)
256*d49b4f04SMika Westerberg {
257*d49b4f04SMika Westerberg 	u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
258*d49b4f04SMika Westerberg 	int ret;
259*d49b4f04SMika Westerberg 
260*d49b4f04SMika Westerberg 	repl_timeout = tmu_params[mode].repl_timeout;
261*d49b4f04SMika Westerberg 	repl_threshold = tmu_params[mode].repl_threshold;
262*d49b4f04SMika Westerberg 	repl_n = tmu_params[mode].repl_n;
263*d49b4f04SMika Westerberg 	dirswitch_n = tmu_params[mode].dirswitch_n;
264*d49b4f04SMika Westerberg 
265*d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
266*d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_8, 1);
267*d49b4f04SMika Westerberg 	if (ret)
268*d49b4f04SMika Westerberg 		return ret;
269*d49b4f04SMika Westerberg 
270*d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
271*d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
272*d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
273*d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
274*d49b4f04SMika Westerberg 
275*d49b4f04SMika Westerberg 	ret = tb_port_write(port, &val, TB_CFG_PORT,
276*d49b4f04SMika Westerberg 			    port->cap_tmu + TMU_ADP_CS_8, 1);
277*d49b4f04SMika Westerberg 	if (ret)
278*d49b4f04SMika Westerberg 		return ret;
279*d49b4f04SMika Westerberg 
280*d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
281*d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_9, 1);
282*d49b4f04SMika Westerberg 	if (ret)
283*d49b4f04SMika Westerberg 		return ret;
284*d49b4f04SMika Westerberg 
285*d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_9_REPL_N_MASK;
286*d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
287*d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
288*d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
289*d49b4f04SMika Westerberg 
290*d49b4f04SMika Westerberg 	return tb_port_write(port, &val, TB_CFG_PORT,
291*d49b4f04SMika Westerberg 			     port->cap_tmu + TMU_ADP_CS_9, 1);
292*d49b4f04SMika Westerberg }
293*d49b4f04SMika Westerberg 
294*d49b4f04SMika Westerberg /* Can be called to non-v2 lane adapters too */
295*d49b4f04SMika Westerberg static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
296*d49b4f04SMika Westerberg {
297*d49b4f04SMika Westerberg 	int ret;
298*d49b4f04SMika Westerberg 	u32 val;
299*d49b4f04SMika Westerberg 
300*d49b4f04SMika Westerberg 	if (!tb_switch_tmu_enhanced_is_supported(port->sw))
301*d49b4f04SMika Westerberg 		return 0;
302*d49b4f04SMika Westerberg 
303*d49b4f04SMika Westerberg 	ret = tb_port_read(port, &val, TB_CFG_PORT,
304*d49b4f04SMika Westerberg 			   port->cap_tmu + TMU_ADP_CS_9, 1);
305*d49b4f04SMika Westerberg 	if (ret)
306*d49b4f04SMika Westerberg 		return ret;
307*d49b4f04SMika Westerberg 
308*d49b4f04SMika Westerberg 	val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
309*d49b4f04SMika Westerberg 	val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
310*d49b4f04SMika Westerberg 
311*d49b4f04SMika Westerberg 	return tb_port_write(port, &val, TB_CFG_PORT,
312*d49b4f04SMika Westerberg 			     port->cap_tmu + TMU_ADP_CS_9, 1);
313*d49b4f04SMika Westerberg }
314*d49b4f04SMika Westerberg 
315a28ec0e1SGil Fine static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
316a28ec0e1SGil Fine {
317a28ec0e1SGil Fine 	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
318a28ec0e1SGil Fine 
319a28ec0e1SGil Fine 	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
320a28ec0e1SGil Fine }
321a28ec0e1SGil Fine 
322a28ec0e1SGil Fine static int tb_port_tmu_time_sync_disable(struct tb_port *port)
323a28ec0e1SGil Fine {
324a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, true);
325a28ec0e1SGil Fine }
326a28ec0e1SGil Fine 
327a28ec0e1SGil Fine static int tb_port_tmu_time_sync_enable(struct tb_port *port)
328a28ec0e1SGil Fine {
329a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, false);
330a28ec0e1SGil Fine }
331a28ec0e1SGil Fine 
332cf29b9afSRajmohan Mani static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
333cf29b9afSRajmohan Mani {
33423ccd21cSGil Fine 	u32 val, offset, bit;
335cf29b9afSRajmohan Mani 	int ret;
336cf29b9afSRajmohan Mani 
33723ccd21cSGil Fine 	if (tb_switch_is_usb4(sw)) {
33823ccd21cSGil Fine 		offset = sw->tmu.cap + TMU_RTR_CS_0;
33923ccd21cSGil Fine 		bit = TMU_RTR_CS_0_TD;
34023ccd21cSGil Fine 	} else {
34123ccd21cSGil Fine 		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
34223ccd21cSGil Fine 		bit = TB_TIME_VSEC_3_CS_26_TD;
34323ccd21cSGil Fine 	}
34423ccd21cSGil Fine 
34523ccd21cSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
346cf29b9afSRajmohan Mani 	if (ret)
347cf29b9afSRajmohan Mani 		return ret;
348cf29b9afSRajmohan Mani 
349cf29b9afSRajmohan Mani 	if (set)
35023ccd21cSGil Fine 		val |= bit;
351cf29b9afSRajmohan Mani 	else
35223ccd21cSGil Fine 		val &= ~bit;
353cf29b9afSRajmohan Mani 
35423ccd21cSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
355cf29b9afSRajmohan Mani }
356cf29b9afSRajmohan Mani 
357*d49b4f04SMika Westerberg static int tmu_mode_init(struct tb_switch *sw)
358*d49b4f04SMika Westerberg {
359*d49b4f04SMika Westerberg 	bool enhanced, ucap;
360*d49b4f04SMika Westerberg 	int ret, rate;
361*d49b4f04SMika Westerberg 
362*d49b4f04SMika Westerberg 	ucap = tb_switch_tmu_ucap_is_supported(sw);
363*d49b4f04SMika Westerberg 	if (ucap)
364*d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
365*d49b4f04SMika Westerberg 	enhanced = tb_switch_tmu_enhanced_is_supported(sw);
366*d49b4f04SMika Westerberg 	if (enhanced)
367*d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
368*d49b4f04SMika Westerberg 
369*d49b4f04SMika Westerberg 	ret = tb_switch_tmu_rate_read(sw);
370*d49b4f04SMika Westerberg 	if (ret < 0)
371*d49b4f04SMika Westerberg 		return ret;
372*d49b4f04SMika Westerberg 	rate = ret;
373*d49b4f04SMika Westerberg 
374*d49b4f04SMika Westerberg 	/* Off by default */
375*d49b4f04SMika Westerberg 	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
376*d49b4f04SMika Westerberg 
377*d49b4f04SMika Westerberg 	if (tb_route(sw)) {
378*d49b4f04SMika Westerberg 		struct tb_port *up = tb_upstream_port(sw);
379*d49b4f04SMika Westerberg 
380*d49b4f04SMika Westerberg 		if (enhanced && tb_port_tmu_is_enhanced(up)) {
381*d49b4f04SMika Westerberg 			sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
382*d49b4f04SMika Westerberg 		} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
383*d49b4f04SMika Westerberg 			if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
384*d49b4f04SMika Westerberg 				sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
385*d49b4f04SMika Westerberg 			else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
386*d49b4f04SMika Westerberg 				sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
387*d49b4f04SMika Westerberg 		} else if (rate) {
388*d49b4f04SMika Westerberg 			sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
389*d49b4f04SMika Westerberg 		}
390*d49b4f04SMika Westerberg 	} else if (rate) {
391*d49b4f04SMika Westerberg 		sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
392*d49b4f04SMika Westerberg 	}
393*d49b4f04SMika Westerberg 
394*d49b4f04SMika Westerberg 	/* Update the initial request to match the current mode */
395*d49b4f04SMika Westerberg 	sw->tmu.mode_request = sw->tmu.mode;
396*d49b4f04SMika Westerberg 	sw->tmu.has_ucap = ucap;
397*d49b4f04SMika Westerberg 
398*d49b4f04SMika Westerberg 	return 0;
399*d49b4f04SMika Westerberg }
400*d49b4f04SMika Westerberg 
401cf29b9afSRajmohan Mani /**
402cf29b9afSRajmohan Mani  * tb_switch_tmu_init() - Initialize switch TMU structures
403cf29b9afSRajmohan Mani  * @sw: Switch to initialized
404cf29b9afSRajmohan Mani  *
405cf29b9afSRajmohan Mani  * This function must be called before other TMU related functions to
406cf29b9afSRajmohan Mani  * makes the internal structures are filled in correctly. Does not
407cf29b9afSRajmohan Mani  * change any hardware configuration.
408cf29b9afSRajmohan Mani  */
409cf29b9afSRajmohan Mani int tb_switch_tmu_init(struct tb_switch *sw)
410cf29b9afSRajmohan Mani {
411cf29b9afSRajmohan Mani 	struct tb_port *port;
412cf29b9afSRajmohan Mani 	int ret;
413cf29b9afSRajmohan Mani 
414cf29b9afSRajmohan Mani 	if (tb_switch_is_icm(sw))
415cf29b9afSRajmohan Mani 		return 0;
416cf29b9afSRajmohan Mani 
417cf29b9afSRajmohan Mani 	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
418cf29b9afSRajmohan Mani 	if (ret > 0)
419cf29b9afSRajmohan Mani 		sw->tmu.cap = ret;
420cf29b9afSRajmohan Mani 
421cf29b9afSRajmohan Mani 	tb_switch_for_each_port(sw, port) {
422cf29b9afSRajmohan Mani 		int cap;
423cf29b9afSRajmohan Mani 
424cf29b9afSRajmohan Mani 		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
425cf29b9afSRajmohan Mani 		if (cap > 0)
426cf29b9afSRajmohan Mani 			port->cap_tmu = cap;
427cf29b9afSRajmohan Mani 	}
428cf29b9afSRajmohan Mani 
429*d49b4f04SMika Westerberg 	ret = tmu_mode_init(sw);
430*d49b4f04SMika Westerberg 	if (ret)
431cf29b9afSRajmohan Mani 		return ret;
432cf29b9afSRajmohan Mani 
433*d49b4f04SMika Westerberg 	tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
434cf29b9afSRajmohan Mani 	return 0;
435cf29b9afSRajmohan Mani }
436cf29b9afSRajmohan Mani 
437cf29b9afSRajmohan Mani /**
438cf29b9afSRajmohan Mani  * tb_switch_tmu_post_time() - Update switch local time
439cf29b9afSRajmohan Mani  * @sw: Switch whose time to update
440cf29b9afSRajmohan Mani  *
441cf29b9afSRajmohan Mani  * Updates switch local time using time posting procedure.
442cf29b9afSRajmohan Mani  */
443cf29b9afSRajmohan Mani int tb_switch_tmu_post_time(struct tb_switch *sw)
444cf29b9afSRajmohan Mani {
445a28ec0e1SGil Fine 	unsigned int post_time_high_offset, post_time_high = 0;
446cf29b9afSRajmohan Mani 	unsigned int post_local_time_offset, post_time_offset;
447cf29b9afSRajmohan Mani 	struct tb_switch *root_switch = sw->tb->root_switch;
448cf29b9afSRajmohan Mani 	u64 hi, mid, lo, local_time, post_time;
449cf29b9afSRajmohan Mani 	int i, ret, retries = 100;
450cf29b9afSRajmohan Mani 	u32 gm_local_time[3];
451cf29b9afSRajmohan Mani 
452cf29b9afSRajmohan Mani 	if (!tb_route(sw))
453cf29b9afSRajmohan Mani 		return 0;
454cf29b9afSRajmohan Mani 
455cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
456cf29b9afSRajmohan Mani 		return 0;
457cf29b9afSRajmohan Mani 
458cf29b9afSRajmohan Mani 	/* Need to be able to read the grand master time */
459cf29b9afSRajmohan Mani 	if (!root_switch->tmu.cap)
460cf29b9afSRajmohan Mani 		return 0;
461cf29b9afSRajmohan Mani 
462cf29b9afSRajmohan Mani 	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
463cf29b9afSRajmohan Mani 			 root_switch->tmu.cap + TMU_RTR_CS_1,
464cf29b9afSRajmohan Mani 			 ARRAY_SIZE(gm_local_time));
465cf29b9afSRajmohan Mani 	if (ret)
466cf29b9afSRajmohan Mani 		return ret;
467cf29b9afSRajmohan Mani 
468cf29b9afSRajmohan Mani 	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
469cb625ec6SMika Westerberg 		tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
470cf29b9afSRajmohan Mani 			  gm_local_time[i]);
471cf29b9afSRajmohan Mani 
472cf29b9afSRajmohan Mani 	/* Convert to nanoseconds (drop fractional part) */
473cf29b9afSRajmohan Mani 	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
474cf29b9afSRajmohan Mani 	mid = gm_local_time[1];
475cf29b9afSRajmohan Mani 	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
476cf29b9afSRajmohan Mani 		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
477cf29b9afSRajmohan Mani 	local_time = hi << 48 | mid << 16 | lo;
478cf29b9afSRajmohan Mani 
479cf29b9afSRajmohan Mani 	/* Tell the switch that time sync is disrupted for a while */
480cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
481cf29b9afSRajmohan Mani 	if (ret)
482cf29b9afSRajmohan Mani 		return ret;
483cf29b9afSRajmohan Mani 
484cf29b9afSRajmohan Mani 	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
485cf29b9afSRajmohan Mani 	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
486a28ec0e1SGil Fine 	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
487cf29b9afSRajmohan Mani 
488cf29b9afSRajmohan Mani 	/*
489cf29b9afSRajmohan Mani 	 * Write the Grandmaster time to the Post Local Time registers
490cf29b9afSRajmohan Mani 	 * of the new switch.
491cf29b9afSRajmohan Mani 	 */
492cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
493cf29b9afSRajmohan Mani 			  post_local_time_offset, 2);
494cf29b9afSRajmohan Mani 	if (ret)
495cf29b9afSRajmohan Mani 		goto out;
496cf29b9afSRajmohan Mani 
497cf29b9afSRajmohan Mani 	/*
498a28ec0e1SGil Fine 	 * Have the new switch update its local time by:
499a28ec0e1SGil Fine 	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
500a28ec0e1SGil Fine 	 * Post Time High register.
501a28ec0e1SGil Fine 	 * 2) write 0 to Post Time High register and then wait for
502a28ec0e1SGil Fine 	 * the completion of the post_time register becomes 0.
503a28ec0e1SGil Fine 	 * This means the time has been converged properly.
504cf29b9afSRajmohan Mani 	 */
505a28ec0e1SGil Fine 	post_time = 0xffffffff00000001ULL;
506cf29b9afSRajmohan Mani 
507cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
508cf29b9afSRajmohan Mani 	if (ret)
509cf29b9afSRajmohan Mani 		goto out;
510cf29b9afSRajmohan Mani 
511a28ec0e1SGil Fine 	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
512a28ec0e1SGil Fine 			  post_time_high_offset, 1);
513a28ec0e1SGil Fine 	if (ret)
514a28ec0e1SGil Fine 		goto out;
515a28ec0e1SGil Fine 
516cf29b9afSRajmohan Mani 	do {
517cf29b9afSRajmohan Mani 		usleep_range(5, 10);
518cf29b9afSRajmohan Mani 		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
519cf29b9afSRajmohan Mani 				 post_time_offset, 2);
520cf29b9afSRajmohan Mani 		if (ret)
521cf29b9afSRajmohan Mani 			goto out;
522cf29b9afSRajmohan Mani 	} while (--retries && post_time);
523cf29b9afSRajmohan Mani 
524cf29b9afSRajmohan Mani 	if (!retries) {
525cf29b9afSRajmohan Mani 		ret = -ETIMEDOUT;
526cf29b9afSRajmohan Mani 		goto out;
527cf29b9afSRajmohan Mani 	}
528cf29b9afSRajmohan Mani 
529cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
530cf29b9afSRajmohan Mani 
531cf29b9afSRajmohan Mani out:
532cf29b9afSRajmohan Mani 	tb_switch_tmu_set_time_disruption(sw, false);
533cf29b9afSRajmohan Mani 	return ret;
534cf29b9afSRajmohan Mani }
535cf29b9afSRajmohan Mani 
536*d49b4f04SMika Westerberg static int disable_enhanced(struct tb_port *up, struct tb_port *down)
537*d49b4f04SMika Westerberg {
538*d49b4f04SMika Westerberg 	int ret;
539*d49b4f04SMika Westerberg 
540*d49b4f04SMika Westerberg 	/*
541*d49b4f04SMika Westerberg 	 * Router may already been disconnected so ignore errors on the
542*d49b4f04SMika Westerberg 	 * upstream port.
543*d49b4f04SMika Westerberg 	 */
544*d49b4f04SMika Westerberg 	tb_port_tmu_rate_write(up, 0);
545*d49b4f04SMika Westerberg 	tb_port_tmu_enhanced_enable(up, false);
546*d49b4f04SMika Westerberg 
547*d49b4f04SMika Westerberg 	ret = tb_port_tmu_rate_write(down, 0);
548*d49b4f04SMika Westerberg 	if (ret)
549*d49b4f04SMika Westerberg 		return ret;
550*d49b4f04SMika Westerberg 	return tb_port_tmu_enhanced_enable(down, false);
551*d49b4f04SMika Westerberg }
552*d49b4f04SMika Westerberg 
553cf29b9afSRajmohan Mani /**
554cf29b9afSRajmohan Mani  * tb_switch_tmu_disable() - Disable TMU of a switch
555cf29b9afSRajmohan Mani  * @sw: Switch whose TMU to disable
556cf29b9afSRajmohan Mani  *
557cf29b9afSRajmohan Mani  * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
558cf29b9afSRajmohan Mani  */
559cf29b9afSRajmohan Mani int tb_switch_tmu_disable(struct tb_switch *sw)
560cf29b9afSRajmohan Mani {
561cf29b9afSRajmohan Mani 	/* Already disabled? */
562*d49b4f04SMika Westerberg 	if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
563cf29b9afSRajmohan Mani 		return 0;
564cf29b9afSRajmohan Mani 
565a28ec0e1SGil Fine 	if (tb_route(sw)) {
566a28ec0e1SGil Fine 		struct tb_port *down, *up;
567a28ec0e1SGil Fine 		int ret;
568cf29b9afSRajmohan Mani 
5697ce54221SGil Fine 		down = tb_switch_downstream_port(sw);
570a28ec0e1SGil Fine 		up = tb_upstream_port(sw);
571a28ec0e1SGil Fine 		/*
572a28ec0e1SGil Fine 		 * In case of uni-directional time sync, TMU handshake is
573a28ec0e1SGil Fine 		 * initiated by upstream router. In case of bi-directional
574a28ec0e1SGil Fine 		 * time sync, TMU handshake is initiated by downstream router.
5755fd6b9a5SGil Fine 		 * We change downstream router's rate to off for both uni/bidir
5765fd6b9a5SGil Fine 		 * cases although it is needed only for the bi-directional mode.
5775fd6b9a5SGil Fine 		 * We avoid changing upstream router's mode since it might
5785fd6b9a5SGil Fine 		 * have another downstream router plugged, that is set to
5795fd6b9a5SGil Fine 		 * uni-directional mode and we don't want to change it's TMU
5805fd6b9a5SGil Fine 		 * mode.
581a28ec0e1SGil Fine 		 */
582*d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
583cf29b9afSRajmohan Mani 
584a28ec0e1SGil Fine 		tb_port_tmu_time_sync_disable(up);
585a28ec0e1SGil Fine 		ret = tb_port_tmu_time_sync_disable(down);
586a28ec0e1SGil Fine 		if (ret)
587a28ec0e1SGil Fine 			return ret;
588a28ec0e1SGil Fine 
589*d49b4f04SMika Westerberg 		switch (sw->tmu.mode) {
590*d49b4f04SMika Westerberg 		case TB_SWITCH_TMU_MODE_LOWRES:
591*d49b4f04SMika Westerberg 		case TB_SWITCH_TMU_MODE_HIFI_UNI:
592cf29b9afSRajmohan Mani 			/* The switch may be unplugged so ignore any errors */
593cf29b9afSRajmohan Mani 			tb_port_tmu_unidirectional_disable(up);
594cf29b9afSRajmohan Mani 			ret = tb_port_tmu_unidirectional_disable(down);
595cf29b9afSRajmohan Mani 			if (ret)
596cf29b9afSRajmohan Mani 				return ret;
597*d49b4f04SMika Westerberg 			break;
598*d49b4f04SMika Westerberg 
599*d49b4f04SMika Westerberg 		case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
600*d49b4f04SMika Westerberg 			ret = disable_enhanced(up, down);
601*d49b4f04SMika Westerberg 			if (ret)
602*d49b4f04SMika Westerberg 				return ret;
603*d49b4f04SMika Westerberg 			break;
604*d49b4f04SMika Westerberg 
605*d49b4f04SMika Westerberg 		default:
606*d49b4f04SMika Westerberg 			break;
607cf29b9afSRajmohan Mani 		}
608a28ec0e1SGil Fine 	} else {
609*d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
610a28ec0e1SGil Fine 	}
611cf29b9afSRajmohan Mani 
612*d49b4f04SMika Westerberg 	sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
613cf29b9afSRajmohan Mani 
614cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: disabled\n");
615cf29b9afSRajmohan Mani 	return 0;
616cf29b9afSRajmohan Mani }
617cf29b9afSRajmohan Mani 
618*d49b4f04SMika Westerberg /* Called only when there is failure enabling requested mode */
619*d49b4f04SMika Westerberg static void tb_switch_tmu_off(struct tb_switch *sw)
620cf29b9afSRajmohan Mani {
621*d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
622a28ec0e1SGil Fine 	struct tb_port *down, *up;
623a28ec0e1SGil Fine 
6247ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
625a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
626a28ec0e1SGil Fine 	/*
627a28ec0e1SGil Fine 	 * In case of any failure in one of the steps when setting
628a28ec0e1SGil Fine 	 * bi-directional or uni-directional TMU mode, get back to the TMU
629a28ec0e1SGil Fine 	 * configurations in off mode. In case of additional failures in
630a28ec0e1SGil Fine 	 * the functions below, ignore them since the caller shall already
631a28ec0e1SGil Fine 	 * report a failure.
632a28ec0e1SGil Fine 	 */
633a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(down);
634a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(up);
635a28ec0e1SGil Fine 
636*d49b4f04SMika Westerberg 	switch (sw->tmu.mode_request) {
637*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
638*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
639*d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
640*d49b4f04SMika Westerberg 		break;
641*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
642*d49b4f04SMika Westerberg 		disable_enhanced(up, down);
643*d49b4f04SMika Westerberg 		break;
644*d49b4f04SMika Westerberg 	default:
645*d49b4f04SMika Westerberg 		break;
646*d49b4f04SMika Westerberg 	}
647*d49b4f04SMika Westerberg 
648*d49b4f04SMika Westerberg 	/* Always set the rate to 0 */
649*d49b4f04SMika Westerberg 	tb_switch_tmu_rate_write(sw, rate);
650*d49b4f04SMika Westerberg 
651*d49b4f04SMika Westerberg 	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
652a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(down);
653a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(up);
654a28ec0e1SGil Fine }
655a28ec0e1SGil Fine 
656a28ec0e1SGil Fine /*
657a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
658*d49b4f04SMika Westerberg  * TB_SWITCH_TMU_MODE_OFF.
659a28ec0e1SGil Fine  */
660c437dcb1SMika Westerberg static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
661a28ec0e1SGil Fine {
662a28ec0e1SGil Fine 	struct tb_port *up, *down;
663cf29b9afSRajmohan Mani 	int ret;
664cf29b9afSRajmohan Mani 
665a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
6667ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
667a28ec0e1SGil Fine 
668a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(up);
669a28ec0e1SGil Fine 	if (ret)
670a28ec0e1SGil Fine 		return ret;
671a28ec0e1SGil Fine 
672a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(down);
673a28ec0e1SGil Fine 	if (ret)
674a28ec0e1SGil Fine 		goto out;
675a28ec0e1SGil Fine 
676*d49b4f04SMika Westerberg 	ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
677a28ec0e1SGil Fine 	if (ret)
678a28ec0e1SGil Fine 		goto out;
679a28ec0e1SGil Fine 
680a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
681a28ec0e1SGil Fine 	if (ret)
682a28ec0e1SGil Fine 		goto out;
683a28ec0e1SGil Fine 
684a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
685a28ec0e1SGil Fine 	if (ret)
686a28ec0e1SGil Fine 		goto out;
687a28ec0e1SGil Fine 
688a28ec0e1SGil Fine 	return 0;
689a28ec0e1SGil Fine 
690a28ec0e1SGil Fine out:
691*d49b4f04SMika Westerberg 	tb_switch_tmu_off(sw);
692a28ec0e1SGil Fine 	return ret;
693a28ec0e1SGil Fine }
694a28ec0e1SGil Fine 
695701e73a8SMika Westerberg /* Only needed for Titan Ridge */
696701e73a8SMika Westerberg static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
69743f977bcSGil Fine {
698701e73a8SMika Westerberg 	struct tb_port *up = tb_upstream_port(sw);
69943f977bcSGil Fine 	u32 val;
70043f977bcSGil Fine 	int ret;
70143f977bcSGil Fine 
70243f977bcSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
70343f977bcSGil Fine 			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
70443f977bcSGil Fine 	if (ret)
70543f977bcSGil Fine 		return ret;
70643f977bcSGil Fine 
70743f977bcSGil Fine 	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
70843f977bcSGil Fine 
709701e73a8SMika Westerberg 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
71043f977bcSGil Fine 			  sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
711701e73a8SMika Westerberg 	if (ret)
712701e73a8SMika Westerberg 		return ret;
71343f977bcSGil Fine 
71443f977bcSGil Fine 	return tb_port_tmu_write(up, TMU_ADP_CS_6,
71543f977bcSGil Fine 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
716701e73a8SMika Westerberg 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
717701e73a8SMika Westerberg 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
71843f977bcSGil Fine }
71943f977bcSGil Fine 
720a28ec0e1SGil Fine /*
721a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
722*d49b4f04SMika Westerberg  * TB_SWITCH_TMU_MODE_OFF.
723a28ec0e1SGil Fine  */
724c437dcb1SMika Westerberg static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
725a28ec0e1SGil Fine {
726a28ec0e1SGil Fine 	struct tb_port *up, *down;
727a28ec0e1SGil Fine 	int ret;
728a28ec0e1SGil Fine 
729a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
7307ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
7317ce54221SGil Fine 	ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
732*d49b4f04SMika Westerberg 				       tmu_rates[sw->tmu.mode_request]);
733b017a46dSGil Fine 	if (ret)
734b017a46dSGil Fine 		return ret;
735b017a46dSGil Fine 
736*d49b4f04SMika Westerberg 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
737a28ec0e1SGil Fine 	if (ret)
738a28ec0e1SGil Fine 		return ret;
739a28ec0e1SGil Fine 
740a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(up);
741a28ec0e1SGil Fine 	if (ret)
742a28ec0e1SGil Fine 		goto out;
743a28ec0e1SGil Fine 
744a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
745a28ec0e1SGil Fine 	if (ret)
746a28ec0e1SGil Fine 		goto out;
747a28ec0e1SGil Fine 
748a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(down);
749a28ec0e1SGil Fine 	if (ret)
750a28ec0e1SGil Fine 		goto out;
751a28ec0e1SGil Fine 
752a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
753a28ec0e1SGil Fine 	if (ret)
754a28ec0e1SGil Fine 		goto out;
755a28ec0e1SGil Fine 
756a28ec0e1SGil Fine 	return 0;
757a28ec0e1SGil Fine 
758a28ec0e1SGil Fine out:
759*d49b4f04SMika Westerberg 	tb_switch_tmu_off(sw);
760*d49b4f04SMika Westerberg 	return ret;
761*d49b4f04SMika Westerberg }
762*d49b4f04SMika Westerberg 
763*d49b4f04SMika Westerberg /*
764*d49b4f04SMika Westerberg  * This function is called when the previous TMU mode was
765*d49b4f04SMika Westerberg  * TB_SWITCH_TMU_RATE_OFF.
766*d49b4f04SMika Westerberg  */
767*d49b4f04SMika Westerberg static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
768*d49b4f04SMika Westerberg {
769*d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[sw->tmu.mode_request];
770*d49b4f04SMika Westerberg 	struct tb_port *up, *down;
771*d49b4f04SMika Westerberg 	int ret;
772*d49b4f04SMika Westerberg 
773*d49b4f04SMika Westerberg 	/* Router specific parameters first */
774*d49b4f04SMika Westerberg 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
775*d49b4f04SMika Westerberg 	if (ret)
776*d49b4f04SMika Westerberg 		return ret;
777*d49b4f04SMika Westerberg 
778*d49b4f04SMika Westerberg 	up = tb_upstream_port(sw);
779*d49b4f04SMika Westerberg 	down = tb_switch_downstream_port(sw);
780*d49b4f04SMika Westerberg 
781*d49b4f04SMika Westerberg 	ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
782*d49b4f04SMika Westerberg 	if (ret)
783*d49b4f04SMika Westerberg 		goto out;
784*d49b4f04SMika Westerberg 
785*d49b4f04SMika Westerberg 	ret = tb_port_tmu_rate_write(up, rate);
786*d49b4f04SMika Westerberg 	if (ret)
787*d49b4f04SMika Westerberg 		goto out;
788*d49b4f04SMika Westerberg 
789*d49b4f04SMika Westerberg 	ret = tb_port_tmu_enhanced_enable(up, true);
790*d49b4f04SMika Westerberg 	if (ret)
791*d49b4f04SMika Westerberg 		goto out;
792*d49b4f04SMika Westerberg 
793*d49b4f04SMika Westerberg 	ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
794*d49b4f04SMika Westerberg 	if (ret)
795*d49b4f04SMika Westerberg 		goto out;
796*d49b4f04SMika Westerberg 
797*d49b4f04SMika Westerberg 	ret = tb_port_tmu_rate_write(down, rate);
798*d49b4f04SMika Westerberg 	if (ret)
799*d49b4f04SMika Westerberg 		goto out;
800*d49b4f04SMika Westerberg 
801*d49b4f04SMika Westerberg 	ret = tb_port_tmu_enhanced_enable(down, true);
802*d49b4f04SMika Westerberg 	if (ret)
803*d49b4f04SMika Westerberg 		goto out;
804*d49b4f04SMika Westerberg 
805*d49b4f04SMika Westerberg 	return 0;
806*d49b4f04SMika Westerberg 
807*d49b4f04SMika Westerberg out:
808*d49b4f04SMika Westerberg 	tb_switch_tmu_off(sw);
809a28ec0e1SGil Fine 	return ret;
810a28ec0e1SGil Fine }
811a28ec0e1SGil Fine 
812c437dcb1SMika Westerberg static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
813b017a46dSGil Fine {
814*d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[sw->tmu.mode];
815b017a46dSGil Fine 	struct tb_port *down, *up;
816b017a46dSGil Fine 
8177ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
818b017a46dSGil Fine 	up = tb_upstream_port(sw);
819b017a46dSGil Fine 	/*
820b017a46dSGil Fine 	 * In case of any failure in one of the steps when change mode,
821b017a46dSGil Fine 	 * get back to the TMU configurations in previous mode.
822b017a46dSGil Fine 	 * In case of additional failures in the functions below,
823b017a46dSGil Fine 	 * ignore them since the caller shall already report a failure.
824b017a46dSGil Fine 	 */
825*d49b4f04SMika Westerberg 	switch (sw->tmu.mode) {
826*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
827*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
828*d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(down, true);
829*d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
830*d49b4f04SMika Westerberg 		break;
831b017a46dSGil Fine 
832*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
833*d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(down, false);
834*d49b4f04SMika Westerberg 		tb_switch_tmu_rate_write(sw, rate);
835*d49b4f04SMika Westerberg 		break;
836*d49b4f04SMika Westerberg 
837*d49b4f04SMika Westerberg 	default:
838*d49b4f04SMika Westerberg 		break;
839*d49b4f04SMika Westerberg 	}
840*d49b4f04SMika Westerberg 
841*d49b4f04SMika Westerberg 	tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
842*d49b4f04SMika Westerberg 
843*d49b4f04SMika Westerberg 	switch (sw->tmu.mode) {
844*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
845*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
846*d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(up, true);
847*d49b4f04SMika Westerberg 		break;
848*d49b4f04SMika Westerberg 
849*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
850*d49b4f04SMika Westerberg 		tb_port_tmu_set_unidirectional(up, false);
851*d49b4f04SMika Westerberg 		break;
852*d49b4f04SMika Westerberg 
853*d49b4f04SMika Westerberg 	default:
854*d49b4f04SMika Westerberg 		break;
855*d49b4f04SMika Westerberg 	}
856b017a46dSGil Fine }
857b017a46dSGil Fine 
858c437dcb1SMika Westerberg static int tb_switch_tmu_change_mode(struct tb_switch *sw)
859b017a46dSGil Fine {
860*d49b4f04SMika Westerberg 	unsigned int rate = tmu_rates[sw->tmu.mode_request];
861b017a46dSGil Fine 	struct tb_port *up, *down;
862b017a46dSGil Fine 	int ret;
863b017a46dSGil Fine 
864b017a46dSGil Fine 	up = tb_upstream_port(sw);
8657ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
866*d49b4f04SMika Westerberg 
867*d49b4f04SMika Westerberg 	/* Program the upstream router downstream facing lane adapter */
868*d49b4f04SMika Westerberg 	switch (sw->tmu.mode_request) {
869*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
870*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
871*d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(down, true);
872b017a46dSGil Fine 		if (ret)
873b017a46dSGil Fine 			goto out;
874*d49b4f04SMika Westerberg 		ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
875*d49b4f04SMika Westerberg 		if (ret)
876*d49b4f04SMika Westerberg 			goto out;
877*d49b4f04SMika Westerberg 		break;
878b017a46dSGil Fine 
879*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
880*d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(down, false);
881*d49b4f04SMika Westerberg 		if (ret)
882*d49b4f04SMika Westerberg 			goto out;
883*d49b4f04SMika Westerberg 		ret = tb_switch_tmu_rate_write(sw, rate);
884*d49b4f04SMika Westerberg 		if (ret)
885*d49b4f04SMika Westerberg 			goto out;
886*d49b4f04SMika Westerberg 		break;
887*d49b4f04SMika Westerberg 
888*d49b4f04SMika Westerberg 	default:
889*d49b4f04SMika Westerberg 		/* Not allowed to change modes from other than above */
890*d49b4f04SMika Westerberg 		return -EINVAL;
891*d49b4f04SMika Westerberg 	}
892*d49b4f04SMika Westerberg 
893*d49b4f04SMika Westerberg 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
894b017a46dSGil Fine 	if (ret)
895b017a46dSGil Fine 		return ret;
896b017a46dSGil Fine 
897*d49b4f04SMika Westerberg 	/* Program the new mode and the downstream router lane adapter */
898*d49b4f04SMika Westerberg 	switch (sw->tmu.mode_request) {
899*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
900*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
901*d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(up, true);
902b017a46dSGil Fine 		if (ret)
903b017a46dSGil Fine 			goto out;
904*d49b4f04SMika Westerberg 		break;
905*d49b4f04SMika Westerberg 
906*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
907*d49b4f04SMika Westerberg 		ret = tb_port_tmu_set_unidirectional(up, false);
908*d49b4f04SMika Westerberg 		if (ret)
909*d49b4f04SMika Westerberg 			goto out;
910*d49b4f04SMika Westerberg 		break;
911*d49b4f04SMika Westerberg 
912*d49b4f04SMika Westerberg 	default:
913*d49b4f04SMika Westerberg 		/* Not allowed to change modes from other than above */
914*d49b4f04SMika Westerberg 		return -EINVAL;
915*d49b4f04SMika Westerberg 	}
916b017a46dSGil Fine 
917b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
918b017a46dSGil Fine 	if (ret)
919b017a46dSGil Fine 		goto out;
920b017a46dSGil Fine 
921b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
922b017a46dSGil Fine 	if (ret)
923b017a46dSGil Fine 		goto out;
924b017a46dSGil Fine 
925b017a46dSGil Fine 	return 0;
926b017a46dSGil Fine 
927b017a46dSGil Fine out:
928c437dcb1SMika Westerberg 	tb_switch_tmu_change_mode_prev(sw);
929b017a46dSGil Fine 	return ret;
930b017a46dSGil Fine }
931b017a46dSGil Fine 
932b017a46dSGil Fine /**
933b017a46dSGil Fine  * tb_switch_tmu_enable() - Enable TMU on a router
934b017a46dSGil Fine  * @sw: Router whose TMU to enable
935b017a46dSGil Fine  *
936826f55d5SMika Westerberg  * Enables TMU of a router to be in uni-directional Normal/HiFi or
937826f55d5SMika Westerberg  * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
938826f55d5SMika Westerberg  * required before calling this function.
939b017a46dSGil Fine  */
940b017a46dSGil Fine int tb_switch_tmu_enable(struct tb_switch *sw)
941a28ec0e1SGil Fine {
942a28ec0e1SGil Fine 	int ret;
943a28ec0e1SGil Fine 
944826f55d5SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
945cf29b9afSRajmohan Mani 		return 0;
946cf29b9afSRajmohan Mani 
947*d49b4f04SMika Westerberg 	if (tb_switch_is_titan_ridge(sw) &&
948*d49b4f04SMika Westerberg 	    (sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
949*d49b4f04SMika Westerberg 	     sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
950701e73a8SMika Westerberg 		ret = tb_switch_tmu_disable_objections(sw);
95143f977bcSGil Fine 		if (ret)
95243f977bcSGil Fine 			return ret;
95343f977bcSGil Fine 	}
95443f977bcSGil Fine 
955cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
956cf29b9afSRajmohan Mani 	if (ret)
957cf29b9afSRajmohan Mani 		return ret;
958cf29b9afSRajmohan Mani 
959a28ec0e1SGil Fine 	if (tb_route(sw)) {
960b017a46dSGil Fine 		/*
961b017a46dSGil Fine 		 * The used mode changes are from OFF to
962b017a46dSGil Fine 		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
963b017a46dSGil Fine 		 * HiFi-Uni.
964b017a46dSGil Fine 		 */
965*d49b4f04SMika Westerberg 		if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
966*d49b4f04SMika Westerberg 			switch (sw->tmu.mode_request) {
967*d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_LOWRES:
968*d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_HIFI_UNI:
969c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_unidirectional(sw);
970*d49b4f04SMika Westerberg 				break;
971*d49b4f04SMika Westerberg 
972*d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_HIFI_BI:
973c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_bidirectional(sw);
974*d49b4f04SMika Westerberg 				break;
975*d49b4f04SMika Westerberg 			case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
976*d49b4f04SMika Westerberg 				ret = tb_switch_tmu_enable_enhanced(sw);
977*d49b4f04SMika Westerberg 				break;
978*d49b4f04SMika Westerberg 			default:
979*d49b4f04SMika Westerberg 				ret = -EINVAL;
980*d49b4f04SMika Westerberg 				break;
981a28ec0e1SGil Fine 			}
982*d49b4f04SMika Westerberg 		} else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
983*d49b4f04SMika Westerberg 			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
984*d49b4f04SMika Westerberg 			   sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
985*d49b4f04SMika Westerberg 			ret = tb_switch_tmu_change_mode(sw);
986*d49b4f04SMika Westerberg 		} else {
987*d49b4f04SMika Westerberg 			ret = -EINVAL;
988*d49b4f04SMika Westerberg 		}
989cf29b9afSRajmohan Mani 	} else {
990a28ec0e1SGil Fine 		/*
991a28ec0e1SGil Fine 		 * Host router port configurations are written as
992a28ec0e1SGil Fine 		 * part of configurations for downstream port of the parent
993a28ec0e1SGil Fine 		 * of the child node - see above.
994a28ec0e1SGil Fine 		 * Here only the host router' rate configuration is written.
995a28ec0e1SGil Fine 		 */
996*d49b4f04SMika Westerberg 		ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
997cf29b9afSRajmohan Mani 	}
998cf29b9afSRajmohan Mani 
999*d49b4f04SMika Westerberg 	if (ret) {
1000*d49b4f04SMika Westerberg 		tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
1001*d49b4f04SMika Westerberg 			   tmu_mode_name(sw->tmu.mode_request), ret);
1002*d49b4f04SMika Westerberg 	} else {
1003*d49b4f04SMika Westerberg 		sw->tmu.mode = sw->tmu.mode_request;
1004*d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
1005*d49b4f04SMika Westerberg 	}
1006cf29b9afSRajmohan Mani 
1007cf29b9afSRajmohan Mani 	return tb_switch_tmu_set_time_disruption(sw, false);
1008cf29b9afSRajmohan Mani }
1009a28ec0e1SGil Fine 
1010a28ec0e1SGil Fine /**
1011*d49b4f04SMika Westerberg  * tb_switch_tmu_configure() - Configure the TMU mode
1012a28ec0e1SGil Fine  * @sw: Router whose mode to change
1013*d49b4f04SMika Westerberg  * @mode: Mode to configure
1014a28ec0e1SGil Fine  *
1015*d49b4f04SMika Westerberg  * Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
1016*d49b4f04SMika Westerberg  * next called.
1017ef34add8SMika Westerberg  *
1018*d49b4f04SMika Westerberg  * Returns %0 in success and negative errno otherwise. Specifically
1019*d49b4f04SMika Westerberg  * returns %-EOPNOTSUPP if the requested mode is not possible (not
1020*d49b4f04SMika Westerberg  * supported by the router and/or topology).
1021a28ec0e1SGil Fine  */
1022*d49b4f04SMika Westerberg int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
1023a28ec0e1SGil Fine {
1024*d49b4f04SMika Westerberg 	switch (mode) {
1025*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_OFF:
1026*d49b4f04SMika Westerberg 		break;
1027ef34add8SMika Westerberg 
1028*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_LOWRES:
1029*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_UNI:
1030*d49b4f04SMika Westerberg 		if (!sw->tmu.has_ucap)
1031*d49b4f04SMika Westerberg 			return -EOPNOTSUPP;
1032*d49b4f04SMika Westerberg 		break;
1033*d49b4f04SMika Westerberg 
1034*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_HIFI_BI:
1035*d49b4f04SMika Westerberg 		break;
1036*d49b4f04SMika Westerberg 
1037*d49b4f04SMika Westerberg 	case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
1038*d49b4f04SMika Westerberg 		const struct tb_switch *parent_sw = tb_switch_parent(sw);
1039*d49b4f04SMika Westerberg 
1040*d49b4f04SMika Westerberg 		if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
1041*d49b4f04SMika Westerberg 			return -EOPNOTSUPP;
1042*d49b4f04SMika Westerberg 		if (!tb_switch_tmu_enhanced_is_supported(sw))
1043*d49b4f04SMika Westerberg 			return -EOPNOTSUPP;
1044*d49b4f04SMika Westerberg 
1045*d49b4f04SMika Westerberg 		break;
1046*d49b4f04SMika Westerberg 	}
1047*d49b4f04SMika Westerberg 
1048*d49b4f04SMika Westerberg 	default:
1049*d49b4f04SMika Westerberg 		tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
1050*d49b4f04SMika Westerberg 		return -EINVAL;
1051*d49b4f04SMika Westerberg 	}
1052*d49b4f04SMika Westerberg 
1053*d49b4f04SMika Westerberg 	if (sw->tmu.mode_request != mode) {
1054*d49b4f04SMika Westerberg 		tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
1055*d49b4f04SMika Westerberg 			  tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
1056*d49b4f04SMika Westerberg 		sw->tmu.mode_request = mode;
1057*d49b4f04SMika Westerberg 	}
1058*d49b4f04SMika Westerberg 
1059ef34add8SMika Westerberg 	return 0;
1060a28ec0e1SGil Fine }
1061