xref: /openbmc/linux/drivers/thunderbolt/tmu.c (revision cb625ec6)
1cf29b9afSRajmohan Mani // SPDX-License-Identifier: GPL-2.0
2cf29b9afSRajmohan Mani /*
3cf29b9afSRajmohan Mani  * Thunderbolt Time Management Unit (TMU) support
4cf29b9afSRajmohan Mani  *
5cf29b9afSRajmohan Mani  * Copyright (C) 2019, Intel Corporation
6cf29b9afSRajmohan Mani  * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7cf29b9afSRajmohan Mani  *	    Rajmohan Mani <rajmohan.mani@intel.com>
8cf29b9afSRajmohan Mani  */
9cf29b9afSRajmohan Mani 
10cf29b9afSRajmohan Mani #include <linux/delay.h>
11cf29b9afSRajmohan Mani 
12cf29b9afSRajmohan Mani #include "tb.h"
13cf29b9afSRajmohan Mani 
14b017a46dSGil Fine static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
15b017a46dSGil Fine 					 enum tb_switch_tmu_rate rate)
16b017a46dSGil Fine {
17b017a46dSGil Fine 	u32 freq_meas_wind[2] = { 30, 800 };
18b017a46dSGil Fine 	u32 avg_const[2] = { 4, 8 };
19b017a46dSGil Fine 	u32 freq, avg, val;
20b017a46dSGil Fine 	int ret;
21b017a46dSGil Fine 
22b017a46dSGil Fine 	if (rate == TB_SWITCH_TMU_RATE_NORMAL) {
23b017a46dSGil Fine 		freq = freq_meas_wind[0];
24b017a46dSGil Fine 		avg = avg_const[0];
25b017a46dSGil Fine 	} else if (rate == TB_SWITCH_TMU_RATE_HIFI) {
26b017a46dSGil Fine 		freq = freq_meas_wind[1];
27b017a46dSGil Fine 		avg = avg_const[1];
28b017a46dSGil Fine 	} else {
29b017a46dSGil Fine 		return 0;
30b017a46dSGil Fine 	}
31b017a46dSGil Fine 
32b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
33b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
34b017a46dSGil Fine 	if (ret)
35b017a46dSGil Fine 		return ret;
36b017a46dSGil Fine 
37b017a46dSGil Fine 	val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
38b017a46dSGil Fine 	val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
39b017a46dSGil Fine 
40b017a46dSGil Fine 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
41b017a46dSGil Fine 			  sw->tmu.cap + TMU_RTR_CS_0, 1);
42b017a46dSGil Fine 	if (ret)
43b017a46dSGil Fine 		return ret;
44b017a46dSGil Fine 
45b017a46dSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
46b017a46dSGil Fine 			 sw->tmu.cap + TMU_RTR_CS_15, 1);
47b017a46dSGil Fine 	if (ret)
48b017a46dSGil Fine 		return ret;
49b017a46dSGil Fine 
50b017a46dSGil Fine 	val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
51b017a46dSGil Fine 		~TMU_RTR_CS_15_DELAY_AVG_MASK &
52b017a46dSGil Fine 		~TMU_RTR_CS_15_OFFSET_AVG_MASK &
53b017a46dSGil Fine 		~TMU_RTR_CS_15_ERROR_AVG_MASK;
54b017a46dSGil Fine 	val |=  FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
55b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
56b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
57b017a46dSGil Fine 		FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
58b017a46dSGil Fine 
59b017a46dSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
60b017a46dSGil Fine 			   sw->tmu.cap + TMU_RTR_CS_15, 1);
61b017a46dSGil Fine }
62b017a46dSGil Fine 
63cf29b9afSRajmohan Mani static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw)
64cf29b9afSRajmohan Mani {
65cf29b9afSRajmohan Mani 	bool root_switch = !tb_route(sw);
66cf29b9afSRajmohan Mani 
67cf29b9afSRajmohan Mani 	switch (sw->tmu.rate) {
68cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_OFF:
69cf29b9afSRajmohan Mani 		return "off";
70cf29b9afSRajmohan Mani 
71cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_HIFI:
72cf29b9afSRajmohan Mani 		/* Root switch does not have upstream directionality */
73cf29b9afSRajmohan Mani 		if (root_switch)
74cf29b9afSRajmohan Mani 			return "HiFi";
75cf29b9afSRajmohan Mani 		if (sw->tmu.unidirectional)
76cf29b9afSRajmohan Mani 			return "uni-directional, HiFi";
77cf29b9afSRajmohan Mani 		return "bi-directional, HiFi";
78cf29b9afSRajmohan Mani 
79cf29b9afSRajmohan Mani 	case TB_SWITCH_TMU_RATE_NORMAL:
80cf29b9afSRajmohan Mani 		if (root_switch)
81cf29b9afSRajmohan Mani 			return "normal";
82cf29b9afSRajmohan Mani 		return "uni-directional, normal";
83cf29b9afSRajmohan Mani 
84cf29b9afSRajmohan Mani 	default:
85cf29b9afSRajmohan Mani 		return "unknown";
86cf29b9afSRajmohan Mani 	}
87cf29b9afSRajmohan Mani }
88cf29b9afSRajmohan Mani 
89cf29b9afSRajmohan Mani static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw)
90cf29b9afSRajmohan Mani {
91cf29b9afSRajmohan Mani 	int ret;
92cf29b9afSRajmohan Mani 	u32 val;
93cf29b9afSRajmohan Mani 
94cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
95cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_0, 1);
96cf29b9afSRajmohan Mani 	if (ret)
97cf29b9afSRajmohan Mani 		return false;
98cf29b9afSRajmohan Mani 
99cf29b9afSRajmohan Mani 	return !!(val & TMU_RTR_CS_0_UCAP);
100cf29b9afSRajmohan Mani }
101cf29b9afSRajmohan Mani 
102cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_read(struct tb_switch *sw)
103cf29b9afSRajmohan Mani {
104cf29b9afSRajmohan Mani 	int ret;
105cf29b9afSRajmohan Mani 	u32 val;
106cf29b9afSRajmohan Mani 
107cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
108cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
109cf29b9afSRajmohan Mani 	if (ret)
110cf29b9afSRajmohan Mani 		return ret;
111cf29b9afSRajmohan Mani 
112cf29b9afSRajmohan Mani 	val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
113cf29b9afSRajmohan Mani 	return val;
114cf29b9afSRajmohan Mani }
115cf29b9afSRajmohan Mani 
116cf29b9afSRajmohan Mani static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
117cf29b9afSRajmohan Mani {
118cf29b9afSRajmohan Mani 	int ret;
119cf29b9afSRajmohan Mani 	u32 val;
120cf29b9afSRajmohan Mani 
121cf29b9afSRajmohan Mani 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
122cf29b9afSRajmohan Mani 			 sw->tmu.cap + TMU_RTR_CS_3, 1);
123cf29b9afSRajmohan Mani 	if (ret)
124cf29b9afSRajmohan Mani 		return ret;
125cf29b9afSRajmohan Mani 
126cf29b9afSRajmohan Mani 	val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
127cf29b9afSRajmohan Mani 	val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
128cf29b9afSRajmohan Mani 
129cf29b9afSRajmohan Mani 	return tb_sw_write(sw, &val, TB_CFG_SWITCH,
130cf29b9afSRajmohan Mani 			   sw->tmu.cap + TMU_RTR_CS_3, 1);
131cf29b9afSRajmohan Mani }
132cf29b9afSRajmohan Mani 
133cf29b9afSRajmohan Mani static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
134cf29b9afSRajmohan Mani 			     u32 value)
135cf29b9afSRajmohan Mani {
136cf29b9afSRajmohan Mani 	u32 data;
137cf29b9afSRajmohan Mani 	int ret;
138cf29b9afSRajmohan Mani 
139cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
140cf29b9afSRajmohan Mani 	if (ret)
141cf29b9afSRajmohan Mani 		return ret;
142cf29b9afSRajmohan Mani 
143cf29b9afSRajmohan Mani 	data &= ~mask;
144cf29b9afSRajmohan Mani 	data |= value;
145cf29b9afSRajmohan Mani 
146cf29b9afSRajmohan Mani 	return tb_port_write(port, &data, TB_CFG_PORT,
147cf29b9afSRajmohan Mani 			     port->cap_tmu + offset, 1);
148cf29b9afSRajmohan Mani }
149cf29b9afSRajmohan Mani 
150cf29b9afSRajmohan Mani static int tb_port_tmu_set_unidirectional(struct tb_port *port,
151cf29b9afSRajmohan Mani 					  bool unidirectional)
152cf29b9afSRajmohan Mani {
153cf29b9afSRajmohan Mani 	u32 val;
154cf29b9afSRajmohan Mani 
155cf29b9afSRajmohan Mani 	if (!port->sw->tmu.has_ucap)
156cf29b9afSRajmohan Mani 		return 0;
157cf29b9afSRajmohan Mani 
158cf29b9afSRajmohan Mani 	val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
159cf29b9afSRajmohan Mani 	return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
160cf29b9afSRajmohan Mani }
161cf29b9afSRajmohan Mani 
162cf29b9afSRajmohan Mani static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
163cf29b9afSRajmohan Mani {
164cf29b9afSRajmohan Mani 	return tb_port_tmu_set_unidirectional(port, false);
165cf29b9afSRajmohan Mani }
166cf29b9afSRajmohan Mani 
167a28ec0e1SGil Fine static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
168a28ec0e1SGil Fine {
169a28ec0e1SGil Fine 	return tb_port_tmu_set_unidirectional(port, true);
170a28ec0e1SGil Fine }
171a28ec0e1SGil Fine 
172cf29b9afSRajmohan Mani static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
173cf29b9afSRajmohan Mani {
174cf29b9afSRajmohan Mani 	int ret;
175cf29b9afSRajmohan Mani 	u32 val;
176cf29b9afSRajmohan Mani 
177cf29b9afSRajmohan Mani 	ret = tb_port_read(port, &val, TB_CFG_PORT,
178cf29b9afSRajmohan Mani 			   port->cap_tmu + TMU_ADP_CS_3, 1);
179cf29b9afSRajmohan Mani 	if (ret)
180cf29b9afSRajmohan Mani 		return false;
181cf29b9afSRajmohan Mani 
182cf29b9afSRajmohan Mani 	return val & TMU_ADP_CS_3_UDM;
183cf29b9afSRajmohan Mani }
184cf29b9afSRajmohan Mani 
185a28ec0e1SGil Fine static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
186a28ec0e1SGil Fine {
187a28ec0e1SGil Fine 	u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
188a28ec0e1SGil Fine 
189a28ec0e1SGil Fine 	return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
190a28ec0e1SGil Fine }
191a28ec0e1SGil Fine 
192a28ec0e1SGil Fine static int tb_port_tmu_time_sync_disable(struct tb_port *port)
193a28ec0e1SGil Fine {
194a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, true);
195a28ec0e1SGil Fine }
196a28ec0e1SGil Fine 
197a28ec0e1SGil Fine static int tb_port_tmu_time_sync_enable(struct tb_port *port)
198a28ec0e1SGil Fine {
199a28ec0e1SGil Fine 	return tb_port_tmu_time_sync(port, false);
200a28ec0e1SGil Fine }
201a28ec0e1SGil Fine 
202cf29b9afSRajmohan Mani static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
203cf29b9afSRajmohan Mani {
20423ccd21cSGil Fine 	u32 val, offset, bit;
205cf29b9afSRajmohan Mani 	int ret;
206cf29b9afSRajmohan Mani 
20723ccd21cSGil Fine 	if (tb_switch_is_usb4(sw)) {
20823ccd21cSGil Fine 		offset = sw->tmu.cap + TMU_RTR_CS_0;
20923ccd21cSGil Fine 		bit = TMU_RTR_CS_0_TD;
21023ccd21cSGil Fine 	} else {
21123ccd21cSGil Fine 		offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
21223ccd21cSGil Fine 		bit = TB_TIME_VSEC_3_CS_26_TD;
21323ccd21cSGil Fine 	}
21423ccd21cSGil Fine 
21523ccd21cSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
216cf29b9afSRajmohan Mani 	if (ret)
217cf29b9afSRajmohan Mani 		return ret;
218cf29b9afSRajmohan Mani 
219cf29b9afSRajmohan Mani 	if (set)
22023ccd21cSGil Fine 		val |= bit;
221cf29b9afSRajmohan Mani 	else
22223ccd21cSGil Fine 		val &= ~bit;
223cf29b9afSRajmohan Mani 
22423ccd21cSGil Fine 	return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
225cf29b9afSRajmohan Mani }
226cf29b9afSRajmohan Mani 
227cf29b9afSRajmohan Mani /**
228cf29b9afSRajmohan Mani  * tb_switch_tmu_init() - Initialize switch TMU structures
229cf29b9afSRajmohan Mani  * @sw: Switch to initialized
230cf29b9afSRajmohan Mani  *
231cf29b9afSRajmohan Mani  * This function must be called before other TMU related functions to
232cf29b9afSRajmohan Mani  * makes the internal structures are filled in correctly. Does not
233cf29b9afSRajmohan Mani  * change any hardware configuration.
234cf29b9afSRajmohan Mani  */
235cf29b9afSRajmohan Mani int tb_switch_tmu_init(struct tb_switch *sw)
236cf29b9afSRajmohan Mani {
237cf29b9afSRajmohan Mani 	struct tb_port *port;
238cf29b9afSRajmohan Mani 	int ret;
239cf29b9afSRajmohan Mani 
240cf29b9afSRajmohan Mani 	if (tb_switch_is_icm(sw))
241cf29b9afSRajmohan Mani 		return 0;
242cf29b9afSRajmohan Mani 
243cf29b9afSRajmohan Mani 	ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
244cf29b9afSRajmohan Mani 	if (ret > 0)
245cf29b9afSRajmohan Mani 		sw->tmu.cap = ret;
246cf29b9afSRajmohan Mani 
247cf29b9afSRajmohan Mani 	tb_switch_for_each_port(sw, port) {
248cf29b9afSRajmohan Mani 		int cap;
249cf29b9afSRajmohan Mani 
250cf29b9afSRajmohan Mani 		cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
251cf29b9afSRajmohan Mani 		if (cap > 0)
252cf29b9afSRajmohan Mani 			port->cap_tmu = cap;
253cf29b9afSRajmohan Mani 	}
254cf29b9afSRajmohan Mani 
255cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_rate_read(sw);
256cf29b9afSRajmohan Mani 	if (ret < 0)
257cf29b9afSRajmohan Mani 		return ret;
258cf29b9afSRajmohan Mani 
259cf29b9afSRajmohan Mani 	sw->tmu.rate = ret;
260cf29b9afSRajmohan Mani 
261cf29b9afSRajmohan Mani 	sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw);
262cf29b9afSRajmohan Mani 	if (sw->tmu.has_ucap) {
263cf29b9afSRajmohan Mani 		tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
264cf29b9afSRajmohan Mani 
265cf29b9afSRajmohan Mani 		if (tb_route(sw)) {
266cf29b9afSRajmohan Mani 			struct tb_port *up = tb_upstream_port(sw);
267cf29b9afSRajmohan Mani 
268cf29b9afSRajmohan Mani 			sw->tmu.unidirectional =
269cf29b9afSRajmohan Mani 				tb_port_tmu_is_unidirectional(up);
270cf29b9afSRajmohan Mani 		}
271cf29b9afSRajmohan Mani 	} else {
272cf29b9afSRajmohan Mani 		sw->tmu.unidirectional = false;
273cf29b9afSRajmohan Mani 	}
274cf29b9afSRajmohan Mani 
275cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw));
276cf29b9afSRajmohan Mani 	return 0;
277cf29b9afSRajmohan Mani }
278cf29b9afSRajmohan Mani 
279cf29b9afSRajmohan Mani /**
280cf29b9afSRajmohan Mani  * tb_switch_tmu_post_time() - Update switch local time
281cf29b9afSRajmohan Mani  * @sw: Switch whose time to update
282cf29b9afSRajmohan Mani  *
283cf29b9afSRajmohan Mani  * Updates switch local time using time posting procedure.
284cf29b9afSRajmohan Mani  */
285cf29b9afSRajmohan Mani int tb_switch_tmu_post_time(struct tb_switch *sw)
286cf29b9afSRajmohan Mani {
287a28ec0e1SGil Fine 	unsigned int post_time_high_offset, post_time_high = 0;
288cf29b9afSRajmohan Mani 	unsigned int post_local_time_offset, post_time_offset;
289cf29b9afSRajmohan Mani 	struct tb_switch *root_switch = sw->tb->root_switch;
290cf29b9afSRajmohan Mani 	u64 hi, mid, lo, local_time, post_time;
291cf29b9afSRajmohan Mani 	int i, ret, retries = 100;
292cf29b9afSRajmohan Mani 	u32 gm_local_time[3];
293cf29b9afSRajmohan Mani 
294cf29b9afSRajmohan Mani 	if (!tb_route(sw))
295cf29b9afSRajmohan Mani 		return 0;
296cf29b9afSRajmohan Mani 
297cf29b9afSRajmohan Mani 	if (!tb_switch_is_usb4(sw))
298cf29b9afSRajmohan Mani 		return 0;
299cf29b9afSRajmohan Mani 
300cf29b9afSRajmohan Mani 	/* Need to be able to read the grand master time */
301cf29b9afSRajmohan Mani 	if (!root_switch->tmu.cap)
302cf29b9afSRajmohan Mani 		return 0;
303cf29b9afSRajmohan Mani 
304cf29b9afSRajmohan Mani 	ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
305cf29b9afSRajmohan Mani 			 root_switch->tmu.cap + TMU_RTR_CS_1,
306cf29b9afSRajmohan Mani 			 ARRAY_SIZE(gm_local_time));
307cf29b9afSRajmohan Mani 	if (ret)
308cf29b9afSRajmohan Mani 		return ret;
309cf29b9afSRajmohan Mani 
310cf29b9afSRajmohan Mani 	for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
311*cb625ec6SMika Westerberg 		tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
312cf29b9afSRajmohan Mani 			  gm_local_time[i]);
313cf29b9afSRajmohan Mani 
314cf29b9afSRajmohan Mani 	/* Convert to nanoseconds (drop fractional part) */
315cf29b9afSRajmohan Mani 	hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
316cf29b9afSRajmohan Mani 	mid = gm_local_time[1];
317cf29b9afSRajmohan Mani 	lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
318cf29b9afSRajmohan Mani 		TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
319cf29b9afSRajmohan Mani 	local_time = hi << 48 | mid << 16 | lo;
320cf29b9afSRajmohan Mani 
321cf29b9afSRajmohan Mani 	/* Tell the switch that time sync is disrupted for a while */
322cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
323cf29b9afSRajmohan Mani 	if (ret)
324cf29b9afSRajmohan Mani 		return ret;
325cf29b9afSRajmohan Mani 
326cf29b9afSRajmohan Mani 	post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
327cf29b9afSRajmohan Mani 	post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
328a28ec0e1SGil Fine 	post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
329cf29b9afSRajmohan Mani 
330cf29b9afSRajmohan Mani 	/*
331cf29b9afSRajmohan Mani 	 * Write the Grandmaster time to the Post Local Time registers
332cf29b9afSRajmohan Mani 	 * of the new switch.
333cf29b9afSRajmohan Mani 	 */
334cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
335cf29b9afSRajmohan Mani 			  post_local_time_offset, 2);
336cf29b9afSRajmohan Mani 	if (ret)
337cf29b9afSRajmohan Mani 		goto out;
338cf29b9afSRajmohan Mani 
339cf29b9afSRajmohan Mani 	/*
340a28ec0e1SGil Fine 	 * Have the new switch update its local time by:
341a28ec0e1SGil Fine 	 * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
342a28ec0e1SGil Fine 	 * Post Time High register.
343a28ec0e1SGil Fine 	 * 2) write 0 to Post Time High register and then wait for
344a28ec0e1SGil Fine 	 * the completion of the post_time register becomes 0.
345a28ec0e1SGil Fine 	 * This means the time has been converged properly.
346cf29b9afSRajmohan Mani 	 */
347a28ec0e1SGil Fine 	post_time = 0xffffffff00000001ULL;
348cf29b9afSRajmohan Mani 
349cf29b9afSRajmohan Mani 	ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
350cf29b9afSRajmohan Mani 	if (ret)
351cf29b9afSRajmohan Mani 		goto out;
352cf29b9afSRajmohan Mani 
353a28ec0e1SGil Fine 	ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
354a28ec0e1SGil Fine 			  post_time_high_offset, 1);
355a28ec0e1SGil Fine 	if (ret)
356a28ec0e1SGil Fine 		goto out;
357a28ec0e1SGil Fine 
358cf29b9afSRajmohan Mani 	do {
359cf29b9afSRajmohan Mani 		usleep_range(5, 10);
360cf29b9afSRajmohan Mani 		ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
361cf29b9afSRajmohan Mani 				 post_time_offset, 2);
362cf29b9afSRajmohan Mani 		if (ret)
363cf29b9afSRajmohan Mani 			goto out;
364cf29b9afSRajmohan Mani 	} while (--retries && post_time);
365cf29b9afSRajmohan Mani 
366cf29b9afSRajmohan Mani 	if (!retries) {
367cf29b9afSRajmohan Mani 		ret = -ETIMEDOUT;
368cf29b9afSRajmohan Mani 		goto out;
369cf29b9afSRajmohan Mani 	}
370cf29b9afSRajmohan Mani 
371cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
372cf29b9afSRajmohan Mani 
373cf29b9afSRajmohan Mani out:
374cf29b9afSRajmohan Mani 	tb_switch_tmu_set_time_disruption(sw, false);
375cf29b9afSRajmohan Mani 	return ret;
376cf29b9afSRajmohan Mani }
377cf29b9afSRajmohan Mani 
378cf29b9afSRajmohan Mani /**
379cf29b9afSRajmohan Mani  * tb_switch_tmu_disable() - Disable TMU of a switch
380cf29b9afSRajmohan Mani  * @sw: Switch whose TMU to disable
381cf29b9afSRajmohan Mani  *
382cf29b9afSRajmohan Mani  * Turns off TMU of @sw if it is enabled. If not enabled does nothing.
383cf29b9afSRajmohan Mani  */
384cf29b9afSRajmohan Mani int tb_switch_tmu_disable(struct tb_switch *sw)
385cf29b9afSRajmohan Mani {
386cf29b9afSRajmohan Mani 	/* Already disabled? */
387cf29b9afSRajmohan Mani 	if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
388cf29b9afSRajmohan Mani 		return 0;
389cf29b9afSRajmohan Mani 
390a28ec0e1SGil Fine 	if (tb_route(sw)) {
391b017a46dSGil Fine 		bool unidirectional = sw->tmu.unidirectional;
392a28ec0e1SGil Fine 		struct tb_port *down, *up;
393a28ec0e1SGil Fine 		int ret;
394cf29b9afSRajmohan Mani 
3957ce54221SGil Fine 		down = tb_switch_downstream_port(sw);
396a28ec0e1SGil Fine 		up = tb_upstream_port(sw);
397a28ec0e1SGil Fine 		/*
398a28ec0e1SGil Fine 		 * In case of uni-directional time sync, TMU handshake is
399a28ec0e1SGil Fine 		 * initiated by upstream router. In case of bi-directional
400a28ec0e1SGil Fine 		 * time sync, TMU handshake is initiated by downstream router.
4015fd6b9a5SGil Fine 		 * We change downstream router's rate to off for both uni/bidir
4025fd6b9a5SGil Fine 		 * cases although it is needed only for the bi-directional mode.
4035fd6b9a5SGil Fine 		 * We avoid changing upstream router's mode since it might
4045fd6b9a5SGil Fine 		 * have another downstream router plugged, that is set to
4055fd6b9a5SGil Fine 		 * uni-directional mode and we don't want to change it's TMU
4065fd6b9a5SGil Fine 		 * mode.
407a28ec0e1SGil Fine 		 */
408a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
409cf29b9afSRajmohan Mani 
410a28ec0e1SGil Fine 		tb_port_tmu_time_sync_disable(up);
411a28ec0e1SGil Fine 		ret = tb_port_tmu_time_sync_disable(down);
412a28ec0e1SGil Fine 		if (ret)
413a28ec0e1SGil Fine 			return ret;
414a28ec0e1SGil Fine 
415a28ec0e1SGil Fine 		if (unidirectional) {
416cf29b9afSRajmohan Mani 			/* The switch may be unplugged so ignore any errors */
417cf29b9afSRajmohan Mani 			tb_port_tmu_unidirectional_disable(up);
418cf29b9afSRajmohan Mani 			ret = tb_port_tmu_unidirectional_disable(down);
419cf29b9afSRajmohan Mani 			if (ret)
420cf29b9afSRajmohan Mani 				return ret;
421cf29b9afSRajmohan Mani 		}
422a28ec0e1SGil Fine 	} else {
423cf29b9afSRajmohan Mani 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
424a28ec0e1SGil Fine 	}
425cf29b9afSRajmohan Mani 
426cf29b9afSRajmohan Mani 	sw->tmu.unidirectional = false;
427cf29b9afSRajmohan Mani 	sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
428cf29b9afSRajmohan Mani 
429cf29b9afSRajmohan Mani 	tb_sw_dbg(sw, "TMU: disabled\n");
430cf29b9afSRajmohan Mani 	return 0;
431cf29b9afSRajmohan Mani }
432cf29b9afSRajmohan Mani 
433c437dcb1SMika Westerberg static void tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
434cf29b9afSRajmohan Mani {
435a28ec0e1SGil Fine 	struct tb_port *down, *up;
436a28ec0e1SGil Fine 
4377ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
438a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
439a28ec0e1SGil Fine 	/*
440a28ec0e1SGil Fine 	 * In case of any failure in one of the steps when setting
441a28ec0e1SGil Fine 	 * bi-directional or uni-directional TMU mode, get back to the TMU
442a28ec0e1SGil Fine 	 * configurations in off mode. In case of additional failures in
443a28ec0e1SGil Fine 	 * the functions below, ignore them since the caller shall already
444a28ec0e1SGil Fine 	 * report a failure.
445a28ec0e1SGil Fine 	 */
446a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(down);
447a28ec0e1SGil Fine 	tb_port_tmu_time_sync_disable(up);
448a28ec0e1SGil Fine 	if (unidirectional)
4497ce54221SGil Fine 		tb_switch_tmu_rate_write(tb_switch_parent(sw),
4507ce54221SGil Fine 					 TB_SWITCH_TMU_RATE_OFF);
451a28ec0e1SGil Fine 	else
452a28ec0e1SGil Fine 		tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
453a28ec0e1SGil Fine 
454b017a46dSGil Fine 	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
455a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(down);
456a28ec0e1SGil Fine 	tb_port_tmu_unidirectional_disable(up);
457a28ec0e1SGil Fine }
458a28ec0e1SGil Fine 
459a28ec0e1SGil Fine /*
460a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
461a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
462a28ec0e1SGil Fine  */
463c437dcb1SMika Westerberg static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
464a28ec0e1SGil Fine {
465a28ec0e1SGil Fine 	struct tb_port *up, *down;
466cf29b9afSRajmohan Mani 	int ret;
467cf29b9afSRajmohan Mani 
468a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
4697ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
470a28ec0e1SGil Fine 
471a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(up);
472a28ec0e1SGil Fine 	if (ret)
473a28ec0e1SGil Fine 		return ret;
474a28ec0e1SGil Fine 
475a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_disable(down);
476a28ec0e1SGil Fine 	if (ret)
477a28ec0e1SGil Fine 		goto out;
478a28ec0e1SGil Fine 
479a28ec0e1SGil Fine 	ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
480a28ec0e1SGil Fine 	if (ret)
481a28ec0e1SGil Fine 		goto out;
482a28ec0e1SGil Fine 
483a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
484a28ec0e1SGil Fine 	if (ret)
485a28ec0e1SGil Fine 		goto out;
486a28ec0e1SGil Fine 
487a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
488a28ec0e1SGil Fine 	if (ret)
489a28ec0e1SGil Fine 		goto out;
490a28ec0e1SGil Fine 
491a28ec0e1SGil Fine 	return 0;
492a28ec0e1SGil Fine 
493a28ec0e1SGil Fine out:
494c437dcb1SMika Westerberg 	tb_switch_tmu_off(sw, false);
495a28ec0e1SGil Fine 	return ret;
496a28ec0e1SGil Fine }
497a28ec0e1SGil Fine 
498701e73a8SMika Westerberg /* Only needed for Titan Ridge */
499701e73a8SMika Westerberg static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
50043f977bcSGil Fine {
501701e73a8SMika Westerberg 	struct tb_port *up = tb_upstream_port(sw);
50243f977bcSGil Fine 	u32 val;
50343f977bcSGil Fine 	int ret;
50443f977bcSGil Fine 
50543f977bcSGil Fine 	ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
50643f977bcSGil Fine 			 sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
50743f977bcSGil Fine 	if (ret)
50843f977bcSGil Fine 		return ret;
50943f977bcSGil Fine 
51043f977bcSGil Fine 	val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
51143f977bcSGil Fine 
512701e73a8SMika Westerberg 	ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
51343f977bcSGil Fine 			  sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
514701e73a8SMika Westerberg 	if (ret)
515701e73a8SMika Westerberg 		return ret;
51643f977bcSGil Fine 
51743f977bcSGil Fine 	return tb_port_tmu_write(up, TMU_ADP_CS_6,
51843f977bcSGil Fine 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
519701e73a8SMika Westerberg 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
520701e73a8SMika Westerberg 				 TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
52143f977bcSGil Fine }
52243f977bcSGil Fine 
523a28ec0e1SGil Fine /*
524a28ec0e1SGil Fine  * This function is called when the previous TMU mode was
525a28ec0e1SGil Fine  * TB_SWITCH_TMU_RATE_OFF.
526a28ec0e1SGil Fine  */
527c437dcb1SMika Westerberg static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
528a28ec0e1SGil Fine {
529a28ec0e1SGil Fine 	struct tb_port *up, *down;
530a28ec0e1SGil Fine 	int ret;
531a28ec0e1SGil Fine 
532a28ec0e1SGil Fine 	up = tb_upstream_port(sw);
5337ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
5347ce54221SGil Fine 	ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
5357ce54221SGil Fine 				       sw->tmu.rate_request);
536b017a46dSGil Fine 	if (ret)
537b017a46dSGil Fine 		return ret;
538b017a46dSGil Fine 
539b017a46dSGil Fine 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
540a28ec0e1SGil Fine 	if (ret)
541a28ec0e1SGil Fine 		return ret;
542a28ec0e1SGil Fine 
543a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(up);
544a28ec0e1SGil Fine 	if (ret)
545a28ec0e1SGil Fine 		goto out;
546a28ec0e1SGil Fine 
547a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
548a28ec0e1SGil Fine 	if (ret)
549a28ec0e1SGil Fine 		goto out;
550a28ec0e1SGil Fine 
551a28ec0e1SGil Fine 	ret = tb_port_tmu_unidirectional_enable(down);
552a28ec0e1SGil Fine 	if (ret)
553a28ec0e1SGil Fine 		goto out;
554a28ec0e1SGil Fine 
555a28ec0e1SGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
556a28ec0e1SGil Fine 	if (ret)
557a28ec0e1SGil Fine 		goto out;
558a28ec0e1SGil Fine 
559a28ec0e1SGil Fine 	return 0;
560a28ec0e1SGil Fine 
561a28ec0e1SGil Fine out:
562c437dcb1SMika Westerberg 	tb_switch_tmu_off(sw, true);
563a28ec0e1SGil Fine 	return ret;
564a28ec0e1SGil Fine }
565a28ec0e1SGil Fine 
566c437dcb1SMika Westerberg static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
567b017a46dSGil Fine {
568b017a46dSGil Fine 	struct tb_port *down, *up;
569b017a46dSGil Fine 
5707ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
571b017a46dSGil Fine 	up = tb_upstream_port(sw);
572b017a46dSGil Fine 	/*
573b017a46dSGil Fine 	 * In case of any failure in one of the steps when change mode,
574b017a46dSGil Fine 	 * get back to the TMU configurations in previous mode.
575b017a46dSGil Fine 	 * In case of additional failures in the functions below,
576b017a46dSGil Fine 	 * ignore them since the caller shall already report a failure.
577b017a46dSGil Fine 	 */
578b017a46dSGil Fine 	tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional);
579b017a46dSGil Fine 	if (sw->tmu.unidirectional_request)
5807ce54221SGil Fine 		tb_switch_tmu_rate_write(tb_switch_parent(sw), sw->tmu.rate);
581b017a46dSGil Fine 	else
582b017a46dSGil Fine 		tb_switch_tmu_rate_write(sw, sw->tmu.rate);
583b017a46dSGil Fine 
584b017a46dSGil Fine 	tb_switch_set_tmu_mode_params(sw, sw->tmu.rate);
585b017a46dSGil Fine 	tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional);
586b017a46dSGil Fine }
587b017a46dSGil Fine 
588c437dcb1SMika Westerberg static int tb_switch_tmu_change_mode(struct tb_switch *sw)
589b017a46dSGil Fine {
590b017a46dSGil Fine 	struct tb_port *up, *down;
591b017a46dSGil Fine 	int ret;
592b017a46dSGil Fine 
593b017a46dSGil Fine 	up = tb_upstream_port(sw);
5947ce54221SGil Fine 	down = tb_switch_downstream_port(sw);
595b017a46dSGil Fine 	ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request);
596b017a46dSGil Fine 	if (ret)
597b017a46dSGil Fine 		goto out;
598b017a46dSGil Fine 
599b017a46dSGil Fine 	if (sw->tmu.unidirectional_request)
6007ce54221SGil Fine 		ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
6017ce54221SGil Fine 					       sw->tmu.rate_request);
602b017a46dSGil Fine 	else
603b017a46dSGil Fine 		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
604b017a46dSGil Fine 	if (ret)
605b017a46dSGil Fine 		return ret;
606b017a46dSGil Fine 
607b017a46dSGil Fine 	ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request);
608b017a46dSGil Fine 	if (ret)
609b017a46dSGil Fine 		return ret;
610b017a46dSGil Fine 
611b017a46dSGil Fine 	ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request);
612b017a46dSGil Fine 	if (ret)
613b017a46dSGil Fine 		goto out;
614b017a46dSGil Fine 
615b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(down);
616b017a46dSGil Fine 	if (ret)
617b017a46dSGil Fine 		goto out;
618b017a46dSGil Fine 
619b017a46dSGil Fine 	ret = tb_port_tmu_time_sync_enable(up);
620b017a46dSGil Fine 	if (ret)
621b017a46dSGil Fine 		goto out;
622b017a46dSGil Fine 
623b017a46dSGil Fine 	return 0;
624b017a46dSGil Fine 
625b017a46dSGil Fine out:
626c437dcb1SMika Westerberg 	tb_switch_tmu_change_mode_prev(sw);
627b017a46dSGil Fine 	return ret;
628b017a46dSGil Fine }
629b017a46dSGil Fine 
630b017a46dSGil Fine /**
631b017a46dSGil Fine  * tb_switch_tmu_enable() - Enable TMU on a router
632b017a46dSGil Fine  * @sw: Router whose TMU to enable
633b017a46dSGil Fine  *
634826f55d5SMika Westerberg  * Enables TMU of a router to be in uni-directional Normal/HiFi or
635826f55d5SMika Westerberg  * bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
636826f55d5SMika Westerberg  * required before calling this function.
637b017a46dSGil Fine  */
638b017a46dSGil Fine int tb_switch_tmu_enable(struct tb_switch *sw)
639a28ec0e1SGil Fine {
640a28ec0e1SGil Fine 	bool unidirectional = sw->tmu.unidirectional_request;
641a28ec0e1SGil Fine 	int ret;
642a28ec0e1SGil Fine 
643826f55d5SMika Westerberg 	if (tb_switch_tmu_is_enabled(sw))
644cf29b9afSRajmohan Mani 		return 0;
645cf29b9afSRajmohan Mani 
64643f977bcSGil Fine 	if (tb_switch_is_titan_ridge(sw) && unidirectional) {
647701e73a8SMika Westerberg 		ret = tb_switch_tmu_disable_objections(sw);
64843f977bcSGil Fine 		if (ret)
64943f977bcSGil Fine 			return ret;
65043f977bcSGil Fine 	}
65143f977bcSGil Fine 
652cf29b9afSRajmohan Mani 	ret = tb_switch_tmu_set_time_disruption(sw, true);
653cf29b9afSRajmohan Mani 	if (ret)
654cf29b9afSRajmohan Mani 		return ret;
655cf29b9afSRajmohan Mani 
656a28ec0e1SGil Fine 	if (tb_route(sw)) {
657b017a46dSGil Fine 		/*
658b017a46dSGil Fine 		 * The used mode changes are from OFF to
659b017a46dSGil Fine 		 * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
660b017a46dSGil Fine 		 * HiFi-Uni.
661b017a46dSGil Fine 		 */
662a28ec0e1SGil Fine 		if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
663a28ec0e1SGil Fine 			if (unidirectional)
664c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_unidirectional(sw);
665a28ec0e1SGil Fine 			else
666c437dcb1SMika Westerberg 				ret = tb_switch_tmu_enable_bidirectional(sw);
667cf29b9afSRajmohan Mani 			if (ret)
668cf29b9afSRajmohan Mani 				return ret;
669b017a46dSGil Fine 		} else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) {
670c437dcb1SMika Westerberg 			ret = tb_switch_tmu_change_mode(sw);
671b017a46dSGil Fine 			if (ret)
672b017a46dSGil Fine 				return ret;
673a28ec0e1SGil Fine 		}
674a28ec0e1SGil Fine 		sw->tmu.unidirectional = unidirectional;
675cf29b9afSRajmohan Mani 	} else {
676a28ec0e1SGil Fine 		/*
677a28ec0e1SGil Fine 		 * Host router port configurations are written as
678a28ec0e1SGil Fine 		 * part of configurations for downstream port of the parent
679a28ec0e1SGil Fine 		 * of the child node - see above.
680a28ec0e1SGil Fine 		 * Here only the host router' rate configuration is written.
681a28ec0e1SGil Fine 		 */
682b017a46dSGil Fine 		ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request);
683cf29b9afSRajmohan Mani 		if (ret)
684cf29b9afSRajmohan Mani 			return ret;
685cf29b9afSRajmohan Mani 	}
686cf29b9afSRajmohan Mani 
687b017a46dSGil Fine 	sw->tmu.rate = sw->tmu.rate_request;
688cf29b9afSRajmohan Mani 
689a28ec0e1SGil Fine 	tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
690cf29b9afSRajmohan Mani 	return tb_switch_tmu_set_time_disruption(sw, false);
691cf29b9afSRajmohan Mani }
692a28ec0e1SGil Fine 
693a28ec0e1SGil Fine /**
694a28ec0e1SGil Fine  * tb_switch_tmu_configure() - Configure the TMU rate and directionality
695a28ec0e1SGil Fine  * @sw: Router whose mode to change
696b4e08d5dSGil Fine  * @rate: Rate to configure Off/Normal/HiFi
697a28ec0e1SGil Fine  * @unidirectional: If uni-directional (bi-directional otherwise)
698a28ec0e1SGil Fine  *
699a28ec0e1SGil Fine  * Selects the rate of the TMU and directionality (uni-directional or
700a28ec0e1SGil Fine  * bi-directional). Must be called before tb_switch_tmu_enable().
701ef34add8SMika Westerberg  *
702ef34add8SMika Westerberg  * Returns %0 in success and negative errno otherwise.
703a28ec0e1SGil Fine  */
704ef34add8SMika Westerberg int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_rate rate,
705ef34add8SMika Westerberg 			    bool unidirectional)
706a28ec0e1SGil Fine {
707ef34add8SMika Westerberg 	if (unidirectional && !sw->tmu.has_ucap)
708ef34add8SMika Westerberg 		return -EINVAL;
709ef34add8SMika Westerberg 
710a28ec0e1SGil Fine 	sw->tmu.unidirectional_request = unidirectional;
711a28ec0e1SGil Fine 	sw->tmu.rate_request = rate;
712ef34add8SMika Westerberg 	return 0;
713a28ec0e1SGil Fine }
714