1 /*
2  * ff-stream.c - a part of driver for RME Fireface series
3  *
4  * Copyright (c) 2015-2017 Takashi Sakamoto
5  *
6  * Licensed under the terms of the GNU General Public License, version 2.
7  */
8 
9 #include "ff.h"
10 
11 #define CALLBACK_TIMEOUT_MS	200
12 
13 int snd_ff_stream_get_multiplier_mode(enum cip_sfc sfc,
14 				      enum snd_ff_stream_mode *mode)
15 {
16 	static const enum snd_ff_stream_mode modes[] = {
17 		[CIP_SFC_32000] = SND_FF_STREAM_MODE_LOW,
18 		[CIP_SFC_44100] = SND_FF_STREAM_MODE_LOW,
19 		[CIP_SFC_48000] = SND_FF_STREAM_MODE_LOW,
20 		[CIP_SFC_88200] = SND_FF_STREAM_MODE_MID,
21 		[CIP_SFC_96000] = SND_FF_STREAM_MODE_MID,
22 		[CIP_SFC_176400] = SND_FF_STREAM_MODE_HIGH,
23 		[CIP_SFC_192000] = SND_FF_STREAM_MODE_HIGH,
24 	};
25 
26 	if (sfc >= CIP_SFC_COUNT)
27 		return -EINVAL;
28 
29 	*mode = modes[sfc];
30 
31 	return 0;
32 }
33 
34 static void release_resources(struct snd_ff *ff)
35 {
36 	fw_iso_resources_free(&ff->tx_resources);
37 	fw_iso_resources_free(&ff->rx_resources);
38 }
39 
40 static int switch_fetching_mode(struct snd_ff *ff, bool enable)
41 {
42 	unsigned int count;
43 	__le32 *reg;
44 	int i;
45 	int err;
46 
47 	count = 0;
48 	for (i = 0; i < SND_FF_STREAM_MODE_COUNT; ++i)
49 		count = max(count, ff->spec->pcm_playback_channels[i]);
50 
51 	reg = kcalloc(count, sizeof(__le32), GFP_KERNEL);
52 	if (!reg)
53 		return -ENOMEM;
54 
55 	if (!enable) {
56 		/*
57 		 * Each quadlet is corresponding to data channels in a data
58 		 * blocks in reverse order. Precisely, quadlets for available
59 		 * data channels should be enabled. Here, I take second best
60 		 * to fetch PCM frames from all of data channels regardless of
61 		 * stf.
62 		 */
63 		for (i = 0; i < count; ++i)
64 			reg[i] = cpu_to_le32(0x00000001);
65 	}
66 
67 	err = snd_fw_transaction(ff->unit, TCODE_WRITE_BLOCK_REQUEST,
68 				 SND_FF_REG_FETCH_PCM_FRAMES, reg,
69 				 sizeof(__le32) * count, 0);
70 	kfree(reg);
71 	return err;
72 }
73 
74 static inline void finish_session(struct snd_ff *ff)
75 {
76 	ff->spec->protocol->finish_session(ff);
77 	switch_fetching_mode(ff, false);
78 }
79 
80 static int init_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
81 {
82 	int err;
83 	struct fw_iso_resources *resources;
84 	struct amdtp_stream *stream;
85 
86 	if (dir == AMDTP_IN_STREAM) {
87 		resources = &ff->tx_resources;
88 		stream = &ff->tx_stream;
89 	} else {
90 		resources = &ff->rx_resources;
91 		stream = &ff->rx_stream;
92 	}
93 
94 	err = fw_iso_resources_init(resources, ff->unit);
95 	if (err < 0)
96 		return err;
97 
98 	err = amdtp_ff_init(stream, ff->unit, dir);
99 	if (err < 0)
100 		fw_iso_resources_destroy(resources);
101 
102 	return err;
103 }
104 
105 static void destroy_stream(struct snd_ff *ff, enum amdtp_stream_direction dir)
106 {
107 	if (dir == AMDTP_IN_STREAM) {
108 		amdtp_stream_destroy(&ff->tx_stream);
109 		fw_iso_resources_destroy(&ff->tx_resources);
110 	} else {
111 		amdtp_stream_destroy(&ff->rx_stream);
112 		fw_iso_resources_destroy(&ff->rx_resources);
113 	}
114 }
115 
116 int snd_ff_stream_init_duplex(struct snd_ff *ff)
117 {
118 	int err;
119 
120 	err = init_stream(ff, AMDTP_OUT_STREAM);
121 	if (err < 0)
122 		goto end;
123 
124 	err = init_stream(ff, AMDTP_IN_STREAM);
125 	if (err < 0)
126 		destroy_stream(ff, AMDTP_OUT_STREAM);
127 end:
128 	return err;
129 }
130 
131 /*
132  * This function should be called before starting streams or after stopping
133  * streams.
134  */
135 void snd_ff_stream_destroy_duplex(struct snd_ff *ff)
136 {
137 	destroy_stream(ff, AMDTP_IN_STREAM);
138 	destroy_stream(ff, AMDTP_OUT_STREAM);
139 }
140 
141 int snd_ff_stream_start_duplex(struct snd_ff *ff, unsigned int rate)
142 {
143 	unsigned int curr_rate;
144 	enum snd_ff_clock_src src;
145 	int err;
146 
147 	if (ff->substreams_counter == 0)
148 		return 0;
149 
150 	err = snd_ff_transaction_get_clock(ff, &curr_rate, &src);
151 	if (err < 0)
152 		return err;
153 	if (curr_rate != rate ||
154 	    amdtp_streaming_error(&ff->tx_stream) ||
155 	    amdtp_streaming_error(&ff->rx_stream)) {
156 		finish_session(ff);
157 
158 		amdtp_stream_stop(&ff->tx_stream);
159 		amdtp_stream_stop(&ff->rx_stream);
160 
161 		release_resources(ff);
162 	}
163 
164 	/*
165 	 * Regardless of current source of clock signal, drivers transfer some
166 	 * packets. Then, the device transfers packets.
167 	 */
168 	if (!amdtp_stream_running(&ff->rx_stream)) {
169 		enum snd_ff_stream_mode mode;
170 		int i;
171 
172 		for (i = 0; i < CIP_SFC_COUNT; ++i) {
173 			if (amdtp_rate_table[i] == rate)
174 				break;
175 		}
176 		if (i >= CIP_SFC_COUNT)
177 			return -EINVAL;
178 
179 		err = snd_ff_stream_get_multiplier_mode(i, &mode);
180 		if (err < 0)
181 			return err;
182 
183 		err = amdtp_ff_set_parameters(&ff->tx_stream, rate,
184 					ff->spec->pcm_capture_channels[mode]);
185 		if (err < 0)
186 			return err;
187 
188 		err = amdtp_ff_set_parameters(&ff->rx_stream, rate,
189 					ff->spec->pcm_playback_channels[mode]);
190 		if (err < 0)
191 			return err;
192 
193 		err = ff->spec->protocol->begin_session(ff, rate);
194 		if (err < 0)
195 			goto error;
196 
197 		err = amdtp_stream_start(&ff->rx_stream,
198 					 ff->rx_resources.channel,
199 					 fw_parent_device(ff->unit)->max_speed);
200 		if (err < 0)
201 			goto error;
202 
203 		if (!amdtp_stream_wait_callback(&ff->rx_stream,
204 						CALLBACK_TIMEOUT_MS)) {
205 			err = -ETIMEDOUT;
206 			goto error;
207 		}
208 
209 		err = switch_fetching_mode(ff, true);
210 		if (err < 0)
211 			goto error;
212 	}
213 
214 	if (!amdtp_stream_running(&ff->tx_stream)) {
215 		err = amdtp_stream_start(&ff->tx_stream,
216 					 ff->tx_resources.channel,
217 					 fw_parent_device(ff->unit)->max_speed);
218 		if (err < 0)
219 			goto error;
220 
221 		if (!amdtp_stream_wait_callback(&ff->tx_stream,
222 						CALLBACK_TIMEOUT_MS)) {
223 			err = -ETIMEDOUT;
224 			goto error;
225 		}
226 	}
227 
228 	return 0;
229 error:
230 	amdtp_stream_stop(&ff->tx_stream);
231 	amdtp_stream_stop(&ff->rx_stream);
232 
233 	finish_session(ff);
234 	release_resources(ff);
235 
236 	return err;
237 }
238 
239 void snd_ff_stream_stop_duplex(struct snd_ff *ff)
240 {
241 	if (ff->substreams_counter > 0)
242 		return;
243 
244 	amdtp_stream_stop(&ff->tx_stream);
245 	amdtp_stream_stop(&ff->rx_stream);
246 	finish_session(ff);
247 	release_resources(ff);
248 }
249 
250 void snd_ff_stream_update_duplex(struct snd_ff *ff)
251 {
252 	/* The device discontinue to transfer packets.  */
253 	amdtp_stream_pcm_abort(&ff->tx_stream);
254 	amdtp_stream_stop(&ff->tx_stream);
255 
256 	amdtp_stream_pcm_abort(&ff->rx_stream);
257 	amdtp_stream_stop(&ff->rx_stream);
258 
259 	fw_iso_resources_update(&ff->tx_resources);
260 	fw_iso_resources_update(&ff->rx_resources);
261 }
262 
263 void snd_ff_stream_lock_changed(struct snd_ff *ff)
264 {
265 	ff->dev_lock_changed = true;
266 	wake_up(&ff->hwdep_wait);
267 }
268 
269 int snd_ff_stream_lock_try(struct snd_ff *ff)
270 {
271 	int err;
272 
273 	spin_lock_irq(&ff->lock);
274 
275 	/* user land lock this */
276 	if (ff->dev_lock_count < 0) {
277 		err = -EBUSY;
278 		goto end;
279 	}
280 
281 	/* this is the first time */
282 	if (ff->dev_lock_count++ == 0)
283 		snd_ff_stream_lock_changed(ff);
284 	err = 0;
285 end:
286 	spin_unlock_irq(&ff->lock);
287 	return err;
288 }
289 
290 void snd_ff_stream_lock_release(struct snd_ff *ff)
291 {
292 	spin_lock_irq(&ff->lock);
293 
294 	if (WARN_ON(ff->dev_lock_count <= 0))
295 		goto end;
296 	if (--ff->dev_lock_count == 0)
297 		snd_ff_stream_lock_changed(ff);
298 end:
299 	spin_unlock_irq(&ff->lock);
300 }
301