1 /*
2  * ff-stream.c - a part of driver for RME Fireface series
3  *
4  * Copyright (c) 2015-2017 Takashi Sakamoto
5  *
6  * Licensed under the terms of the GNU General Public License, version 2.
7  */
8 
9 #include "ff.h"
10 
11 #define CALLBACK_TIMEOUT_MS	200
12 
13 static int get_rate_mode(unsigned int rate, unsigned int *mode)
14 {
15 	int i;
16 
17 	for (i = 0; i < CIP_SFC_COUNT; i++) {
18 		if (amdtp_rate_table[i] == rate)
19 			break;
20 	}
21 
22 	if (i == CIP_SFC_COUNT)
23 		return -EINVAL;
24 
25 	*mode = ((int)i - 1) / 2;
26 
27 	return 0;
28 }
29 
30 /*
31  * Fireface 400 manages isochronous channel number in 3 bit field. Therefore,
32  * we can allocate between 0 and 7 channel.
33  */
34 static int keep_resources(struct snd_ff *ff, unsigned int rate)
35 {
36 	int mode;
37 	int err;
38 
39 	err = get_rate_mode(rate, &mode);
40 	if (err < 0)
41 		return err;
42 
43 	/* Keep resources for in-stream. */
44 	err = amdtp_ff_set_parameters(&ff->tx_stream, rate,
45 				      ff->spec->pcm_capture_channels[mode]);
46 	if (err < 0)
47 		return err;
48 	ff->tx_resources.channels_mask = 0x00000000000000ffuLL;
49 	err = fw_iso_resources_allocate(&ff->tx_resources,
50 			amdtp_stream_get_max_payload(&ff->tx_stream),
51 			fw_parent_device(ff->unit)->max_speed);
52 	if (err < 0)
53 		return err;
54 
55 	/* Keep resources for out-stream. */
56 	err = amdtp_ff_set_parameters(&ff->rx_stream, rate,
57 				      ff->spec->pcm_playback_channels[mode]);
58 	if (err < 0)
59 		return err;
60 	ff->rx_resources.channels_mask = 0x00000000000000ffuLL;
61 	err = fw_iso_resources_allocate(&ff->rx_resources,
62 			amdtp_stream_get_max_payload(&ff->rx_stream),
63 			fw_parent_device(ff->unit)->max_speed);
64 	if (err < 0)
65 		fw_iso_resources_free(&ff->tx_resources);
66 
67 	return err;
68 }
69 
70 static void release_resources(struct snd_ff *ff)
71 {
72 	fw_iso_resources_free(&ff->tx_resources);
73 	fw_iso_resources_free(&ff->rx_resources);
74 }
75 
76 static int switch_fetching_mode(struct snd_ff *ff, bool enable)
77 {
78 	unsigned int count;
79 	__le32 *reg;
80 	int i;
81 	int err;
82 
83 	count = 0;
84 	for (i = 0; i < SND_FF_STREAM_MODES; ++i)
85 		count = max(count, ff->spec->pcm_playback_channels[i]);
86 
87 	reg = kcalloc(count, sizeof(__le32), GFP_KERNEL);
88 	if (!reg)
89 		return -ENOMEM;
90 
91 	if (!enable) {
92 		/*
93 		 * Each quadlet is corresponding to data channels in a data
94 		 * blocks in reverse order. Precisely, quadlets for available
95 		 * data channels should be enabled. Here, I take second best
96 		 * to fetch PCM frames from all of data channels regardless of
97 		 * stf.
98 		 */
99 		for (i = 0; i < count; ++i)
100 			reg[i] = cpu_to_le32(0x00000001);
101 	}
102 
103 	err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
104 				 SND_FF_REG_FETCH_PCM_FRAMES, reg,
105 				 sizeof(__le32) * count, 0);
106 	kfree(reg);
107 	return err;
108 }
109 
110 static inline void finish_session(struct snd_ff *ff)
111 {
112 	ff->spec->protocol->finish_session(ff);
113 	switch_fetching_mode(ff, false);
114 }
115 
116 static int init_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
117 {
118 	int err;
119 	struct fw_iso_resources *resources;
120 	struct amdtp_stream *stream;
121 
122 	if (dir == AMDTP_IN_STREAM) {
123 		resources = &ff->tx_resources;
124 		stream = &ff->tx_stream;
125 	} else {
126 		resources = &ff->rx_resources;
127 		stream = &ff->rx_stream;
128 	}
129 
130 	err = fw_iso_resources_init(resources, ff->unit);
131 	if (err < 0)
132 		return err;
133 
134 	err = amdtp_ff_init(stream, ff->unit, dir);
135 	if (err < 0)
136 		fw_iso_resources_destroy(resources);
137 
138 	return err;
139 }
140 
141 static void destroy_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
142 {
143 	if (dir == AMDTP_IN_STREAM) {
144 		amdtp_stream_destroy(&ff->tx_stream);
145 		fw_iso_resources_destroy(&ff->tx_resources);
146 	} else {
147 		amdtp_stream_destroy(&ff->rx_stream);
148 		fw_iso_resources_destroy(&ff->rx_resources);
149 	}
150 }
151 
152 int snd_ff_stream_init_duplex(struct snd_ff *ff)
153 {
154 	int err;
155 
156 	err = init_stream(ff, AMDTP_OUT_STREAM);
157 	if (err < 0)
158 		goto end;
159 
160 	err = init_stream(ff, AMDTP_IN_STREAM);
161 	if (err < 0)
162 		destroy_stream(ff, AMDTP_OUT_STREAM);
163 end:
164 	return err;
165 }
166 
167 /*
168  * This function should be called before starting streams or after stopping
169  * streams.
170  */
171 void snd_ff_stream_destroy_duplex(struct snd_ff *ff)
172 {
173 	destroy_stream(ff, AMDTP_IN_STREAM);
174 	destroy_stream(ff, AMDTP_OUT_STREAM);
175 }
176 
177 int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
178 {
179 	unsigned int curr_rate;
180 	enum snd_ff_clock_src src;
181 	int err;
182 
183 	if (ff->substreams_counter == 0)
184 		return 0;
185 
186 	err = snd_ff_transaction_get_clock(ff, &curr_rate, &src);
187 	if (err < 0)
188 		return err;
189 	if (curr_rate != rate ||
190 	    amdtp_streaming_error(&ff->tx_stream) ||
191 	    amdtp_streaming_error(&ff->rx_stream)) {
192 		finish_session(ff);
193 
194 		amdtp_stream_stop(&ff->tx_stream);
195 		amdtp_stream_stop(&ff->rx_stream);
196 
197 		release_resources(ff);
198 	}
199 
200 	/*
201 	 * Regardless of current source of clock signal, drivers transfer some
202 	 * packets. Then, the device transfers packets.
203 	 */
204 	if (!amdtp_stream_running(&ff->rx_stream)) {
205 		err = keep_resources(ff, rate);
206 		if (err < 0)
207 			goto error;
208 
209 		err = ff->spec->protocol->begin_session(ff, rate);
210 		if (err < 0)
211 			goto error;
212 
213 		err = amdtp_stream_start(&ff->rx_stream,
214 					 ff->rx_resources.channel,
215 					 fw_parent_device(ff->unit)->max_speed);
216 		if (err < 0)
217 			goto error;
218 
219 		if (!amdtp_stream_wait_callback(&ff->rx_stream,
220 						CALLBACK_TIMEOUT_MS)) {
221 			err = -ETIMEDOUT;
222 			goto error;
223 		}
224 
225 		err = switch_fetching_mode(ff, true);
226 		if (err < 0)
227 			goto error;
228 	}
229 
230 	if (!amdtp_stream_running(&ff->tx_stream)) {
231 		err = amdtp_stream_start(&ff->tx_stream,
232 					 ff->tx_resources.channel,
233 					 fw_parent_device(ff->unit)->max_speed);
234 		if (err < 0)
235 			goto error;
236 
237 		if (!amdtp_stream_wait_callback(&ff->tx_stream,
238 						CALLBACK_TIMEOUT_MS)) {
239 			err = -ETIMEDOUT;
240 			goto error;
241 		}
242 	}
243 
244 	return 0;
245 error:
246 	amdtp_stream_stop(&ff->tx_stream);
247 	amdtp_stream_stop(&ff->rx_stream);
248 
249 	finish_session(ff);
250 	release_resources(ff);
251 
252 	return err;
253 }
254 
255 void snd_ff_stream_stop_duplex(struct snd_ff *ff)
256 {
257 	if (ff->substreams_counter > 0)
258 		return;
259 
260 	amdtp_stream_stop(&ff->tx_stream);
261 	amdtp_stream_stop(&ff->rx_stream);
262 	finish_session(ff);
263 	release_resources(ff);
264 }
265 
266 void snd_ff_stream_update_duplex(struct snd_ff *ff)
267 {
268 	/* The device discontinue to transfer packets.  */
269 	amdtp_stream_pcm_abort(&ff->tx_stream);
270 	amdtp_stream_stop(&ff->tx_stream);
271 
272 	amdtp_stream_pcm_abort(&ff->rx_stream);
273 	amdtp_stream_stop(&ff->rx_stream);
274 
275 	fw_iso_resources_update(&ff->tx_resources);
276 	fw_iso_resources_update(&ff->rx_resources);
277 }
278 
279 void snd_ff_stream_lock_changed(struct snd_ff *ff)
280 {
281 	ff->dev_lock_changed = true;
282 	wake_up(&ff->hwdep_wait);
283 }
284 
285 int snd_ff_stream_lock_try(struct snd_ff *ff)
286 {
287 	int err;
288 
289 	spin_lock_irq(&ff->lock);
290 
291 	/* user land lock this */
292 	if (ff->dev_lock_count < 0) {
293 		err = -EBUSY;
294 		goto end;
295 	}
296 
297 	/* this is the first time */
298 	if (ff->dev_lock_count++ == 0)
299 		snd_ff_stream_lock_changed(ff);
300 	err = 0;
301 end:
302 	spin_unlock_irq(&ff->lock);
303 	return err;
304 }
305 
306 void snd_ff_stream_lock_release(struct snd_ff *ff)
307 {
308 	spin_lock_irq(&ff->lock);
309 
310 	if (WARN_ON(ff->dev_lock_count <= 0))
311 		goto end;
312 	if (--ff->dev_lock_count == 0)
313 		snd_ff_stream_lock_changed(ff);
314 end:
315 	spin_unlock_irq(&ff->lock);
316 }
317