1 /*
2  * ff-stream.c - a part of driver for RME Fireface series
3  *
4  * Copyright (c) 2015-2017 Takashi Sakamoto
5  *
6  * Licensed under the terms of the GNU General Public License, version 2.
7  */
8 
9 #include "ff.h"
10 
11 #define CALLBACK_TIMEOUT_MS	200
12 
13 int snd_ff_stream_get_multiplier_mode(enum cip_sfc sfc,
14 				      enum snd_ff_stream_mode *mode)
15 {
16 	static const enum snd_ff_stream_mode modes[] = {
17 		[CIP_SFC_32000] = SND_FF_STREAM_MODE_LOW,
18 		[CIP_SFC_44100] = SND_FF_STREAM_MODE_LOW,
19 		[CIP_SFC_48000] = SND_FF_STREAM_MODE_LOW,
20 		[CIP_SFC_88200] = SND_FF_STREAM_MODE_MID,
21 		[CIP_SFC_96000] = SND_FF_STREAM_MODE_MID,
22 		[CIP_SFC_176400] = SND_FF_STREAM_MODE_HIGH,
23 		[CIP_SFC_192000] = SND_FF_STREAM_MODE_HIGH,
24 	};
25 
26 	if (sfc >= CIP_SFC_COUNT)
27 		return -EINVAL;
28 
29 	*mode = modes[sfc];
30 
31 	return 0;
32 }
33 
34 /*
35  * Fireface 400 manages isochronous channel number in 3 bit field. Therefore,
36  * we can allocate between 0 and 7 channel.
37  */
38 static int keep_resources(struct snd_ff *ff, unsigned int rate)
39 {
40 	enum snd_ff_stream_mode mode;
41 	int i;
42 	int err;
43 
44 	for (i = 0; i < CIP_SFC_COUNT; ++i) {
45 		if (amdtp_rate_table[i] == rate)
46 			break;
47 	}
48 	if (i == CIP_SFC_COUNT)
49 		return -EINVAL;
50 
51 	err = snd_ff_stream_get_multiplier_mode(i, &mode);
52 	if (err < 0)
53 		return err;
54 
55 	/* Keep resources for in-stream. */
56 	err = amdtp_ff_set_parameters(&ff->tx_stream, rate,
57 				      ff->spec->pcm_capture_channels[mode]);
58 	if (err < 0)
59 		return err;
60 	ff->tx_resources.channels_mask = 0x00000000000000ffuLL;
61 	err = fw_iso_resources_allocate(&ff->tx_resources,
62 			amdtp_stream_get_max_payload(&ff->tx_stream),
63 			fw_parent_device(ff->unit)->max_speed);
64 	if (err < 0)
65 		return err;
66 
67 	/* Keep resources for out-stream. */
68 	err = amdtp_ff_set_parameters(&ff->rx_stream, rate,
69 				      ff->spec->pcm_playback_channels[mode]);
70 	if (err < 0)
71 		return err;
72 	ff->rx_resources.channels_mask = 0x00000000000000ffuLL;
73 	err = fw_iso_resources_allocate(&ff->rx_resources,
74 			amdtp_stream_get_max_payload(&ff->rx_stream),
75 			fw_parent_device(ff->unit)->max_speed);
76 	if (err < 0)
77 		fw_iso_resources_free(&ff->tx_resources);
78 
79 	return err;
80 }
81 
82 static void release_resources(struct snd_ff *ff)
83 {
84 	fw_iso_resources_free(&ff->tx_resources);
85 	fw_iso_resources_free(&ff->rx_resources);
86 }
87 
88 static int switch_fetching_mode(struct snd_ff *ff, bool enable)
89 {
90 	unsigned int count;
91 	__le32 *reg;
92 	int i;
93 	int err;
94 
95 	count = 0;
96 	for (i = 0; i < SND_FF_STREAM_MODE_COUNT; ++i)
97 		count = max(count, ff->spec->pcm_playback_channels[i]);
98 
99 	reg = kcalloc(count, sizeof(__le32), GFP_KERNEL);
100 	if (!reg)
101 		return -ENOMEM;
102 
103 	if (!enable) {
104 		/*
105 		 * Each quadlet is corresponding to data channels in a data
106 		 * blocks in reverse order. Precisely, quadlets for available
107 		 * data channels should be enabled. Here, I take second best
108 		 * to fetch PCM frames from all of data channels regardless of
109 		 * stf.
110 		 */
111 		for (i = 0; i < count; ++i)
112 			reg[i] = cpu_to_le32(0x00000001);
113 	}
114 
115 	err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
116 				 SND_FF_REG_FETCH_PCM_FRAMES, reg,
117 				 sizeof(__le32) * count, 0);
118 	kfree(reg);
119 	return err;
120 }
121 
122 static inline void finish_session(struct snd_ff *ff)
123 {
124 	ff->spec->protocol->finish_session(ff);
125 	switch_fetching_mode(ff, false);
126 }
127 
128 static int init_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
129 {
130 	int err;
131 	struct fw_iso_resources *resources;
132 	struct amdtp_stream *stream;
133 
134 	if (dir == AMDTP_IN_STREAM) {
135 		resources = &ff->tx_resources;
136 		stream = &ff->tx_stream;
137 	} else {
138 		resources = &ff->rx_resources;
139 		stream = &ff->rx_stream;
140 	}
141 
142 	err = fw_iso_resources_init(resources, ff->unit);
143 	if (err < 0)
144 		return err;
145 
146 	err = amdtp_ff_init(stream, ff->unit, dir);
147 	if (err < 0)
148 		fw_iso_resources_destroy(resources);
149 
150 	return err;
151 }
152 
153 static void destroy_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
154 {
155 	if (dir == AMDTP_IN_STREAM) {
156 		amdtp_stream_destroy(&ff->tx_stream);
157 		fw_iso_resources_destroy(&ff->tx_resources);
158 	} else {
159 		amdtp_stream_destroy(&ff->rx_stream);
160 		fw_iso_resources_destroy(&ff->rx_resources);
161 	}
162 }
163 
164 int snd_ff_stream_init_duplex(struct snd_ff *ff)
165 {
166 	int err;
167 
168 	err = init_stream(ff, AMDTP_OUT_STREAM);
169 	if (err < 0)
170 		goto end;
171 
172 	err = init_stream(ff, AMDTP_IN_STREAM);
173 	if (err < 0)
174 		destroy_stream(ff, AMDTP_OUT_STREAM);
175 end:
176 	return err;
177 }
178 
179 /*
180  * This function should be called before starting streams or after stopping
181  * streams.
182  */
183 void snd_ff_stream_destroy_duplex(struct snd_ff *ff)
184 {
185 	destroy_stream(ff, AMDTP_IN_STREAM);
186 	destroy_stream(ff, AMDTP_OUT_STREAM);
187 }
188 
189 int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
190 {
191 	unsigned int curr_rate;
192 	enum snd_ff_clock_src src;
193 	int err;
194 
195 	if (ff->substreams_counter == 0)
196 		return 0;
197 
198 	err = snd_ff_transaction_get_clock(ff, &curr_rate, &src);
199 	if (err < 0)
200 		return err;
201 	if (curr_rate != rate ||
202 	    amdtp_streaming_error(&ff->tx_stream) ||
203 	    amdtp_streaming_error(&ff->rx_stream)) {
204 		finish_session(ff);
205 
206 		amdtp_stream_stop(&ff->tx_stream);
207 		amdtp_stream_stop(&ff->rx_stream);
208 
209 		release_resources(ff);
210 	}
211 
212 	/*
213 	 * Regardless of current source of clock signal, drivers transfer some
214 	 * packets. Then, the device transfers packets.
215 	 */
216 	if (!amdtp_stream_running(&ff->rx_stream)) {
217 		err = keep_resources(ff, rate);
218 		if (err < 0)
219 			goto error;
220 
221 		err = ff->spec->protocol->begin_session(ff, rate);
222 		if (err < 0)
223 			goto error;
224 
225 		err = amdtp_stream_start(&ff->rx_stream,
226 					 ff->rx_resources.channel,
227 					 fw_parent_device(ff->unit)->max_speed);
228 		if (err < 0)
229 			goto error;
230 
231 		if (!amdtp_stream_wait_callback(&ff->rx_stream,
232 						CALLBACK_TIMEOUT_MS)) {
233 			err = -ETIMEDOUT;
234 			goto error;
235 		}
236 
237 		err = switch_fetching_mode(ff, true);
238 		if (err < 0)
239 			goto error;
240 	}
241 
242 	if (!amdtp_stream_running(&ff->tx_stream)) {
243 		err = amdtp_stream_start(&ff->tx_stream,
244 					 ff->tx_resources.channel,
245 					 fw_parent_device(ff->unit)->max_speed);
246 		if (err < 0)
247 			goto error;
248 
249 		if (!amdtp_stream_wait_callback(&ff->tx_stream,
250 						CALLBACK_TIMEOUT_MS)) {
251 			err = -ETIMEDOUT;
252 			goto error;
253 		}
254 	}
255 
256 	return 0;
257 error:
258 	amdtp_stream_stop(&ff->tx_stream);
259 	amdtp_stream_stop(&ff->rx_stream);
260 
261 	finish_session(ff);
262 	release_resources(ff);
263 
264 	return err;
265 }
266 
267 void snd_ff_stream_stop_duplex(struct snd_ff *ff)
268 {
269 	if (ff->substreams_counter > 0)
270 		return;
271 
272 	amdtp_stream_stop(&ff->tx_stream);
273 	amdtp_stream_stop(&ff->rx_stream);
274 	finish_session(ff);
275 	release_resources(ff);
276 }
277 
278 void snd_ff_stream_update_duplex(struct snd_ff *ff)
279 {
280 	/* The device discontinue to transfer packets.  */
281 	amdtp_stream_pcm_abort(&ff->tx_stream);
282 	amdtp_stream_stop(&ff->tx_stream);
283 
284 	amdtp_stream_pcm_abort(&ff->rx_stream);
285 	amdtp_stream_stop(&ff->rx_stream);
286 
287 	fw_iso_resources_update(&ff->tx_resources);
288 	fw_iso_resources_update(&ff->rx_resources);
289 }
290 
291 void snd_ff_stream_lock_changed(struct snd_ff *ff)
292 {
293 	ff->dev_lock_changed = true;
294 	wake_up(&ff->hwdep_wait);
295 }
296 
297 int snd_ff_stream_lock_try(struct snd_ff *ff)
298 {
299 	int err;
300 
301 	spin_lock_irq(&ff->lock);
302 
303 	/* user land lock this */
304 	if (ff->dev_lock_count < 0) {
305 		err = -EBUSY;
306 		goto end;
307 	}
308 
309 	/* this is the first time */
310 	if (ff->dev_lock_count++ == 0)
311 		snd_ff_stream_lock_changed(ff);
312 	err = 0;
313 end:
314 	spin_unlock_irq(&ff->lock);
315 	return err;
316 }
317 
318 void snd_ff_stream_lock_release(struct snd_ff *ff)
319 {
320 	spin_lock_irq(&ff->lock);
321 
322 	if (WARN_ON(ff->dev_lock_count <= 0))
323 		goto end;
324 	if (--ff->dev_lock_count == 0)
325 		snd_ff_stream_lock_changed(ff);
326 end:
327 	spin_unlock_irq(&ff->lock);
328 }
329