1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
4 * with Common Isochronous Packet (IEC 61883-1) headers
5 *
6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
7 */
8
9 #include <linux/device.h>
10 #include <linux/err.h>
11 #include <linux/firewire.h>
12 #include <linux/firewire-constants.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <sound/pcm.h>
16 #include <sound/pcm_params.h>
17 #include "amdtp-stream.h"
18
19 #define TICKS_PER_CYCLE 3072
20 #define CYCLES_PER_SECOND 8000
21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
22
23 #define OHCI_SECOND_MODULUS 8
24
25 /* Always support Linux tracing subsystem. */
26 #define CREATE_TRACE_POINTS
27 #include "amdtp-stream-trace.h"
28
29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */
30
31 /* isochronous header parameters */
32 #define ISO_DATA_LENGTH_SHIFT 16
33 #define TAG_NO_CIP_HEADER 0
34 #define TAG_CIP 1
35
36 // Common Isochronous Packet (CIP) header parameters. Use two quadlets CIP header when supported.
37 #define CIP_HEADER_QUADLETS 2
38 #define CIP_EOH_SHIFT 31
39 #define CIP_EOH (1u << CIP_EOH_SHIFT)
40 #define CIP_EOH_MASK 0x80000000
41 #define CIP_SID_SHIFT 24
42 #define CIP_SID_MASK 0x3f000000
43 #define CIP_DBS_MASK 0x00ff0000
44 #define CIP_DBS_SHIFT 16
45 #define CIP_SPH_MASK 0x00000400
46 #define CIP_SPH_SHIFT 10
47 #define CIP_DBC_MASK 0x000000ff
48 #define CIP_FMT_SHIFT 24
49 #define CIP_FMT_MASK 0x3f000000
50 #define CIP_FDF_MASK 0x00ff0000
51 #define CIP_FDF_SHIFT 16
52 #define CIP_FDF_NO_DATA 0xff
53 #define CIP_SYT_MASK 0x0000ffff
54 #define CIP_SYT_NO_INFO 0xffff
55 #define CIP_SYT_CYCLE_MODULUS 16
56 #define CIP_NO_DATA ((CIP_FDF_NO_DATA << CIP_FDF_SHIFT) | CIP_SYT_NO_INFO)
57
58 #define CIP_HEADER_SIZE (sizeof(__be32) * CIP_HEADER_QUADLETS)
59
60 /* Audio and Music transfer protocol specific parameters */
61 #define CIP_FMT_AM 0x10
62 #define AMDTP_FDF_NO_DATA 0xff
63
64 // For iso header and tstamp.
65 #define IR_CTX_HEADER_DEFAULT_QUADLETS 2
66 // Add nothing.
67 #define IR_CTX_HEADER_SIZE_NO_CIP (sizeof(__be32) * IR_CTX_HEADER_DEFAULT_QUADLETS)
68 // Add two quadlets CIP header.
69 #define IR_CTX_HEADER_SIZE_CIP (IR_CTX_HEADER_SIZE_NO_CIP + CIP_HEADER_SIZE)
70 #define HEADER_TSTAMP_MASK 0x0000ffff
71
72 #define IT_PKT_HEADER_SIZE_CIP CIP_HEADER_SIZE
73 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
74
75 // The initial firmware of OXFW970 can postpone transmission of packet during finishing
76 // asynchronous transaction. This module accepts 5 cycles to skip as maximum to avoid buffer
77 // overrun. Actual device can skip more, then this module stops the packet streaming.
78 #define IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES 5
79
80 static void pcm_period_work(struct work_struct *work);
81
82 /**
83 * amdtp_stream_init - initialize an AMDTP stream structure
84 * @s: the AMDTP stream to initialize
85 * @unit: the target of the stream
86 * @dir: the direction of stream
87 * @flags: the details of the streaming protocol consist of cip_flags enumeration-constants.
88 * @fmt: the value of fmt field in CIP header
89 * @process_ctx_payloads: callback handler to process payloads of isoc context
90 * @protocol_size: the size to allocate newly for protocol
91 */
amdtp_stream_init(struct amdtp_stream * s,struct fw_unit * unit,enum amdtp_stream_direction dir,unsigned int flags,unsigned int fmt,amdtp_stream_process_ctx_payloads_t process_ctx_payloads,unsigned int protocol_size)92 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
93 enum amdtp_stream_direction dir, unsigned int flags,
94 unsigned int fmt,
95 amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
96 unsigned int protocol_size)
97 {
98 if (process_ctx_payloads == NULL)
99 return -EINVAL;
100
101 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
102 if (!s->protocol)
103 return -ENOMEM;
104
105 s->unit = unit;
106 s->direction = dir;
107 s->flags = flags;
108 s->context = ERR_PTR(-1);
109 mutex_init(&s->mutex);
110 INIT_WORK(&s->period_work, pcm_period_work);
111 s->packet_index = 0;
112
113 init_waitqueue_head(&s->ready_wait);
114
115 s->fmt = fmt;
116 s->process_ctx_payloads = process_ctx_payloads;
117
118 return 0;
119 }
120 EXPORT_SYMBOL(amdtp_stream_init);
121
122 /**
123 * amdtp_stream_destroy - free stream resources
124 * @s: the AMDTP stream to destroy
125 */
amdtp_stream_destroy(struct amdtp_stream * s)126 void amdtp_stream_destroy(struct amdtp_stream *s)
127 {
128 /* Not initialized. */
129 if (s->protocol == NULL)
130 return;
131
132 WARN_ON(amdtp_stream_running(s));
133 kfree(s->protocol);
134 mutex_destroy(&s->mutex);
135 }
136 EXPORT_SYMBOL(amdtp_stream_destroy);
137
138 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = {
139 [CIP_SFC_32000] = 8,
140 [CIP_SFC_44100] = 8,
141 [CIP_SFC_48000] = 8,
142 [CIP_SFC_88200] = 16,
143 [CIP_SFC_96000] = 16,
144 [CIP_SFC_176400] = 32,
145 [CIP_SFC_192000] = 32,
146 };
147 EXPORT_SYMBOL(amdtp_syt_intervals);
148
149 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
150 [CIP_SFC_32000] = 32000,
151 [CIP_SFC_44100] = 44100,
152 [CIP_SFC_48000] = 48000,
153 [CIP_SFC_88200] = 88200,
154 [CIP_SFC_96000] = 96000,
155 [CIP_SFC_176400] = 176400,
156 [CIP_SFC_192000] = 192000,
157 };
158 EXPORT_SYMBOL(amdtp_rate_table);
159
apply_constraint_to_size(struct snd_pcm_hw_params * params,struct snd_pcm_hw_rule * rule)160 static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
161 struct snd_pcm_hw_rule *rule)
162 {
163 struct snd_interval *s = hw_param_interval(params, rule->var);
164 const struct snd_interval *r =
165 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
166 struct snd_interval t = {0};
167 unsigned int step = 0;
168 int i;
169
170 for (i = 0; i < CIP_SFC_COUNT; ++i) {
171 if (snd_interval_test(r, amdtp_rate_table[i]))
172 step = max(step, amdtp_syt_intervals[i]);
173 }
174
175 t.min = roundup(s->min, step);
176 t.max = rounddown(s->max, step);
177 t.integer = 1;
178
179 return snd_interval_refine(s, &t);
180 }
181
182 /**
183 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
184 * @s: the AMDTP stream, which must be initialized.
185 * @runtime: the PCM substream runtime
186 */
amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream * s,struct snd_pcm_runtime * runtime)187 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
188 struct snd_pcm_runtime *runtime)
189 {
190 struct snd_pcm_hardware *hw = &runtime->hw;
191 unsigned int ctx_header_size;
192 unsigned int maximum_usec_per_period;
193 int err;
194
195 hw->info = SNDRV_PCM_INFO_BLOCK_TRANSFER |
196 SNDRV_PCM_INFO_INTERLEAVED |
197 SNDRV_PCM_INFO_JOINT_DUPLEX |
198 SNDRV_PCM_INFO_MMAP |
199 SNDRV_PCM_INFO_MMAP_VALID |
200 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP;
201
202 hw->periods_min = 2;
203 hw->periods_max = UINT_MAX;
204
205 /* bytes for a frame */
206 hw->period_bytes_min = 4 * hw->channels_max;
207
208 /* Just to prevent from allocating much pages. */
209 hw->period_bytes_max = hw->period_bytes_min * 2048;
210 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
211
212 // Linux driver for 1394 OHCI controller voluntarily flushes isoc
213 // context when total size of accumulated context header reaches
214 // PAGE_SIZE. This kicks work for the isoc context and brings
215 // callback in the middle of scheduled interrupts.
216 // Although AMDTP streams in the same domain use the same events per
217 // IRQ, use the largest size of context header between IT/IR contexts.
218 // Here, use the value of context header in IR context is for both
219 // contexts.
220 if (!(s->flags & CIP_NO_HEADER))
221 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
222 else
223 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
224 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
225 CYCLES_PER_SECOND / ctx_header_size;
226
227 // In IEC 61883-6, one isoc packet can transfer events up to the value
228 // of syt interval. This comes from the interval of isoc cycle. As 1394
229 // OHCI controller can generate hardware IRQ per isoc packet, the
230 // interval is 125 usec.
231 // However, there are two ways of transmission in IEC 61883-6; blocking
232 // and non-blocking modes. In blocking mode, the sequence of isoc packet
233 // includes 'empty' or 'NODATA' packets which include no event. In
234 // non-blocking mode, the number of events per packet is variable up to
235 // the syt interval.
236 // Due to the above protocol design, the minimum PCM frames per
237 // interrupt should be double of the value of syt interval, thus it is
238 // 250 usec.
239 err = snd_pcm_hw_constraint_minmax(runtime,
240 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
241 250, maximum_usec_per_period);
242 if (err < 0)
243 goto end;
244
245 /* Non-Blocking stream has no more constraints */
246 if (!(s->flags & CIP_BLOCKING))
247 goto end;
248
249 /*
250 * One AMDTP packet can include some frames. In blocking mode, the
251 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
252 * depending on its sampling rate. For accurate period interrupt, it's
253 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
254 */
255 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
256 apply_constraint_to_size, NULL,
257 SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
258 SNDRV_PCM_HW_PARAM_RATE, -1);
259 if (err < 0)
260 goto end;
261 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
262 apply_constraint_to_size, NULL,
263 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
264 SNDRV_PCM_HW_PARAM_RATE, -1);
265 if (err < 0)
266 goto end;
267 end:
268 return err;
269 }
270 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints);
271
272 /**
273 * amdtp_stream_set_parameters - set stream parameters
274 * @s: the AMDTP stream to configure
275 * @rate: the sample rate
276 * @data_block_quadlets: the size of a data block in quadlet unit
277 * @pcm_frame_multiplier: the multiplier to compute the number of PCM frames by the number of AMDTP
278 * events.
279 *
280 * The parameters must be set before the stream is started, and must not be
281 * changed while the stream is running.
282 */
amdtp_stream_set_parameters(struct amdtp_stream * s,unsigned int rate,unsigned int data_block_quadlets,unsigned int pcm_frame_multiplier)283 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate,
284 unsigned int data_block_quadlets, unsigned int pcm_frame_multiplier)
285 {
286 unsigned int sfc;
287
288 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) {
289 if (amdtp_rate_table[sfc] == rate)
290 break;
291 }
292 if (sfc == ARRAY_SIZE(amdtp_rate_table))
293 return -EINVAL;
294
295 s->sfc = sfc;
296 s->data_block_quadlets = data_block_quadlets;
297 s->syt_interval = amdtp_syt_intervals[sfc];
298
299 // default buffering in the device.
300 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
301
302 // additional buffering needed to adjust for no-data packets.
303 if (s->flags & CIP_BLOCKING)
304 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
305
306 s->pcm_frame_multiplier = pcm_frame_multiplier;
307
308 return 0;
309 }
310 EXPORT_SYMBOL(amdtp_stream_set_parameters);
311
312 // The CIP header is processed in context header apart from context payload.
amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream * s)313 static int amdtp_stream_get_max_ctx_payload_size(struct amdtp_stream *s)
314 {
315 unsigned int multiplier;
316
317 if (s->flags & CIP_JUMBO_PAYLOAD)
318 multiplier = IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES;
319 else
320 multiplier = 1;
321
322 return s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
323 }
324
325 /**
326 * amdtp_stream_get_max_payload - get the stream's packet size
327 * @s: the AMDTP stream
328 *
329 * This function must not be called before the stream has been configured
330 * with amdtp_stream_set_parameters().
331 */
amdtp_stream_get_max_payload(struct amdtp_stream * s)332 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
333 {
334 unsigned int cip_header_size;
335
336 if (!(s->flags & CIP_NO_HEADER))
337 cip_header_size = CIP_HEADER_SIZE;
338 else
339 cip_header_size = 0;
340
341 return cip_header_size + amdtp_stream_get_max_ctx_payload_size(s);
342 }
343 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
344
345 /**
346 * amdtp_stream_pcm_prepare - prepare PCM device for running
347 * @s: the AMDTP stream
348 *
349 * This function should be called from the PCM device's .prepare callback.
350 */
amdtp_stream_pcm_prepare(struct amdtp_stream * s)351 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
352 {
353 cancel_work_sync(&s->period_work);
354 s->pcm_buffer_pointer = 0;
355 s->pcm_period_pointer = 0;
356 }
357 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
358
359 #define prev_packet_desc(s, desc) \
360 list_prev_entry_circular(desc, &s->packet_descs_list, link)
361
pool_blocking_data_blocks(struct amdtp_stream * s,struct seq_desc * descs,unsigned int size,unsigned int pos,unsigned int count)362 static void pool_blocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
363 unsigned int size, unsigned int pos, unsigned int count)
364 {
365 const unsigned int syt_interval = s->syt_interval;
366 int i;
367
368 for (i = 0; i < count; ++i) {
369 struct seq_desc *desc = descs + pos;
370
371 if (desc->syt_offset != CIP_SYT_NO_INFO)
372 desc->data_blocks = syt_interval;
373 else
374 desc->data_blocks = 0;
375
376 pos = (pos + 1) % size;
377 }
378 }
379
pool_ideal_nonblocking_data_blocks(struct amdtp_stream * s,struct seq_desc * descs,unsigned int size,unsigned int pos,unsigned int count)380 static void pool_ideal_nonblocking_data_blocks(struct amdtp_stream *s, struct seq_desc *descs,
381 unsigned int size, unsigned int pos,
382 unsigned int count)
383 {
384 const enum cip_sfc sfc = s->sfc;
385 unsigned int state = s->ctx_data.rx.data_block_state;
386 int i;
387
388 for (i = 0; i < count; ++i) {
389 struct seq_desc *desc = descs + pos;
390
391 if (!cip_sfc_is_base_44100(sfc)) {
392 // Sample_rate / 8000 is an integer, and precomputed.
393 desc->data_blocks = state;
394 } else {
395 unsigned int phase = state;
396
397 /*
398 * This calculates the number of data blocks per packet so that
399 * 1) the overall rate is correct and exactly synchronized to
400 * the bus clock, and
401 * 2) packets with a rounded-up number of blocks occur as early
402 * as possible in the sequence (to prevent underruns of the
403 * device's buffer).
404 */
405 if (sfc == CIP_SFC_44100)
406 /* 6 6 5 6 5 6 5 ... */
407 desc->data_blocks = 5 + ((phase & 1) ^ (phase == 0 || phase >= 40));
408 else
409 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
410 desc->data_blocks = 11 * (sfc >> 1) + (phase == 0);
411 if (++phase >= (80 >> (sfc >> 1)))
412 phase = 0;
413 state = phase;
414 }
415
416 pos = (pos + 1) % size;
417 }
418
419 s->ctx_data.rx.data_block_state = state;
420 }
421
calculate_syt_offset(unsigned int * last_syt_offset,unsigned int * syt_offset_state,enum cip_sfc sfc)422 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
423 unsigned int *syt_offset_state, enum cip_sfc sfc)
424 {
425 unsigned int syt_offset;
426
427 if (*last_syt_offset < TICKS_PER_CYCLE) {
428 if (!cip_sfc_is_base_44100(sfc))
429 syt_offset = *last_syt_offset + *syt_offset_state;
430 else {
431 /*
432 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
433 * n * SYT_INTERVAL * 24576000 / sample_rate
434 * Modulo TICKS_PER_CYCLE, the difference between successive
435 * elements is about 1386.23. Rounding the results of this
436 * formula to the SYT precision results in a sequence of
437 * differences that begins with:
438 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
439 * This code generates _exactly_ the same sequence.
440 */
441 unsigned int phase = *syt_offset_state;
442 unsigned int index = phase % 13;
443
444 syt_offset = *last_syt_offset;
445 syt_offset += 1386 + ((index && !(index & 3)) ||
446 phase == 146);
447 if (++phase >= 147)
448 phase = 0;
449 *syt_offset_state = phase;
450 }
451 } else
452 syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
453 *last_syt_offset = syt_offset;
454
455 if (syt_offset >= TICKS_PER_CYCLE)
456 syt_offset = CIP_SYT_NO_INFO;
457
458 return syt_offset;
459 }
460
pool_ideal_syt_offsets(struct amdtp_stream * s,struct seq_desc * descs,unsigned int size,unsigned int pos,unsigned int count)461 static void pool_ideal_syt_offsets(struct amdtp_stream *s, struct seq_desc *descs,
462 unsigned int size, unsigned int pos, unsigned int count)
463 {
464 const enum cip_sfc sfc = s->sfc;
465 unsigned int last = s->ctx_data.rx.last_syt_offset;
466 unsigned int state = s->ctx_data.rx.syt_offset_state;
467 int i;
468
469 for (i = 0; i < count; ++i) {
470 struct seq_desc *desc = descs + pos;
471
472 desc->syt_offset = calculate_syt_offset(&last, &state, sfc);
473
474 pos = (pos + 1) % size;
475 }
476
477 s->ctx_data.rx.last_syt_offset = last;
478 s->ctx_data.rx.syt_offset_state = state;
479 }
480
compute_syt_offset(unsigned int syt,unsigned int cycle,unsigned int transfer_delay)481 static unsigned int compute_syt_offset(unsigned int syt, unsigned int cycle,
482 unsigned int transfer_delay)
483 {
484 unsigned int cycle_lo = (cycle % CYCLES_PER_SECOND) & 0x0f;
485 unsigned int syt_cycle_lo = (syt & 0xf000) >> 12;
486 unsigned int syt_offset;
487
488 // Round up.
489 if (syt_cycle_lo < cycle_lo)
490 syt_cycle_lo += CIP_SYT_CYCLE_MODULUS;
491 syt_cycle_lo -= cycle_lo;
492
493 // Subtract transfer delay so that the synchronization offset is not so large
494 // at transmission.
495 syt_offset = syt_cycle_lo * TICKS_PER_CYCLE + (syt & 0x0fff);
496 if (syt_offset < transfer_delay)
497 syt_offset += CIP_SYT_CYCLE_MODULUS * TICKS_PER_CYCLE;
498
499 return syt_offset - transfer_delay;
500 }
501
502 // Both of the producer and consumer of the queue runs in the same clock of IEEE 1394 bus.
503 // Additionally, the sequence of tx packets is severely checked against any discontinuity
504 // before filling entries in the queue. The calculation is safe even if it looks fragile by
505 // overrun.
calculate_cached_cycle_count(struct amdtp_stream * s,unsigned int head)506 static unsigned int calculate_cached_cycle_count(struct amdtp_stream *s, unsigned int head)
507 {
508 const unsigned int cache_size = s->ctx_data.tx.cache.size;
509 unsigned int cycles = s->ctx_data.tx.cache.pos;
510
511 if (cycles < head)
512 cycles += cache_size;
513 cycles -= head;
514
515 return cycles;
516 }
517
cache_seq(struct amdtp_stream * s,const struct pkt_desc * src,unsigned int desc_count)518 static void cache_seq(struct amdtp_stream *s, const struct pkt_desc *src, unsigned int desc_count)
519 {
520 const unsigned int transfer_delay = s->transfer_delay;
521 const unsigned int cache_size = s->ctx_data.tx.cache.size;
522 struct seq_desc *cache = s->ctx_data.tx.cache.descs;
523 unsigned int cache_pos = s->ctx_data.tx.cache.pos;
524 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
525 int i;
526
527 for (i = 0; i < desc_count; ++i) {
528 struct seq_desc *dst = cache + cache_pos;
529
530 if (aware_syt && src->syt != CIP_SYT_NO_INFO)
531 dst->syt_offset = compute_syt_offset(src->syt, src->cycle, transfer_delay);
532 else
533 dst->syt_offset = CIP_SYT_NO_INFO;
534 dst->data_blocks = src->data_blocks;
535
536 cache_pos = (cache_pos + 1) % cache_size;
537 src = amdtp_stream_next_packet_desc(s, src);
538 }
539
540 s->ctx_data.tx.cache.pos = cache_pos;
541 }
542
pool_ideal_seq_descs(struct amdtp_stream * s,struct seq_desc * descs,unsigned int size,unsigned int pos,unsigned int count)543 static void pool_ideal_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
544 unsigned int pos, unsigned int count)
545 {
546 pool_ideal_syt_offsets(s, descs, size, pos, count);
547
548 if (s->flags & CIP_BLOCKING)
549 pool_blocking_data_blocks(s, descs, size, pos, count);
550 else
551 pool_ideal_nonblocking_data_blocks(s, descs, size, pos, count);
552 }
553
pool_replayed_seq(struct amdtp_stream * s,struct seq_desc * descs,unsigned int size,unsigned int pos,unsigned int count)554 static void pool_replayed_seq(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
555 unsigned int pos, unsigned int count)
556 {
557 struct amdtp_stream *target = s->ctx_data.rx.replay_target;
558 const struct seq_desc *cache = target->ctx_data.tx.cache.descs;
559 const unsigned int cache_size = target->ctx_data.tx.cache.size;
560 unsigned int cache_pos = s->ctx_data.rx.cache_pos;
561 int i;
562
563 for (i = 0; i < count; ++i) {
564 descs[pos] = cache[cache_pos];
565 cache_pos = (cache_pos + 1) % cache_size;
566 pos = (pos + 1) % size;
567 }
568
569 s->ctx_data.rx.cache_pos = cache_pos;
570 }
571
pool_seq_descs(struct amdtp_stream * s,struct seq_desc * descs,unsigned int size,unsigned int pos,unsigned int count)572 static void pool_seq_descs(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
573 unsigned int pos, unsigned int count)
574 {
575 struct amdtp_domain *d = s->domain;
576 void (*pool_seq_descs)(struct amdtp_stream *s, struct seq_desc *descs, unsigned int size,
577 unsigned int pos, unsigned int count);
578
579 if (!d->replay.enable || !s->ctx_data.rx.replay_target) {
580 pool_seq_descs = pool_ideal_seq_descs;
581 } else {
582 if (!d->replay.on_the_fly) {
583 pool_seq_descs = pool_replayed_seq;
584 } else {
585 struct amdtp_stream *tx = s->ctx_data.rx.replay_target;
586 const unsigned int cache_size = tx->ctx_data.tx.cache.size;
587 const unsigned int cache_pos = s->ctx_data.rx.cache_pos;
588 unsigned int cached_cycles = calculate_cached_cycle_count(tx, cache_pos);
589
590 if (cached_cycles > count && cached_cycles > cache_size / 2)
591 pool_seq_descs = pool_replayed_seq;
592 else
593 pool_seq_descs = pool_ideal_seq_descs;
594 }
595 }
596
597 pool_seq_descs(s, descs, size, pos, count);
598 }
599
update_pcm_pointers(struct amdtp_stream * s,struct snd_pcm_substream * pcm,unsigned int frames)600 static void update_pcm_pointers(struct amdtp_stream *s,
601 struct snd_pcm_substream *pcm,
602 unsigned int frames)
603 {
604 unsigned int ptr;
605
606 ptr = s->pcm_buffer_pointer + frames;
607 if (ptr >= pcm->runtime->buffer_size)
608 ptr -= pcm->runtime->buffer_size;
609 WRITE_ONCE(s->pcm_buffer_pointer, ptr);
610
611 s->pcm_period_pointer += frames;
612 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
613 s->pcm_period_pointer -= pcm->runtime->period_size;
614
615 // The program in user process should periodically check the status of intermediate
616 // buffer associated to PCM substream to process PCM frames in the buffer, instead
617 // of receiving notification of period elapsed by poll wait.
618 if (!pcm->runtime->no_period_wakeup)
619 queue_work(system_highpri_wq, &s->period_work);
620 }
621 }
622
pcm_period_work(struct work_struct * work)623 static void pcm_period_work(struct work_struct *work)
624 {
625 struct amdtp_stream *s = container_of(work, struct amdtp_stream,
626 period_work);
627 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
628
629 if (pcm)
630 snd_pcm_period_elapsed(pcm);
631 }
632
queue_packet(struct amdtp_stream * s,struct fw_iso_packet * params,bool sched_irq)633 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
634 bool sched_irq)
635 {
636 int err;
637
638 params->interrupt = sched_irq;
639 params->tag = s->tag;
640 params->sy = 0;
641
642 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
643 s->buffer.packets[s->packet_index].offset);
644 if (err < 0) {
645 dev_err(&s->unit->device, "queueing error: %d\n", err);
646 goto end;
647 }
648
649 if (++s->packet_index >= s->queue_size)
650 s->packet_index = 0;
651 end:
652 return err;
653 }
654
queue_out_packet(struct amdtp_stream * s,struct fw_iso_packet * params,bool sched_irq)655 static inline int queue_out_packet(struct amdtp_stream *s,
656 struct fw_iso_packet *params, bool sched_irq)
657 {
658 params->skip =
659 !!(params->header_length == 0 && params->payload_length == 0);
660 return queue_packet(s, params, sched_irq);
661 }
662
queue_in_packet(struct amdtp_stream * s,struct fw_iso_packet * params)663 static inline int queue_in_packet(struct amdtp_stream *s,
664 struct fw_iso_packet *params)
665 {
666 // Queue one packet for IR context.
667 params->header_length = s->ctx_data.tx.ctx_header_size;
668 params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
669 params->skip = false;
670 return queue_packet(s, params, false);
671 }
672
generate_cip_header(struct amdtp_stream * s,__be32 cip_header[2],unsigned int data_block_counter,unsigned int syt)673 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
674 unsigned int data_block_counter, unsigned int syt)
675 {
676 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
677 (s->data_block_quadlets << CIP_DBS_SHIFT) |
678 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
679 data_block_counter);
680 cip_header[1] = cpu_to_be32(CIP_EOH |
681 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
682 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
683 (syt & CIP_SYT_MASK));
684 }
685
build_it_pkt_header(struct amdtp_stream * s,unsigned int cycle,struct fw_iso_packet * params,unsigned int header_length,unsigned int data_blocks,unsigned int data_block_counter,unsigned int syt,unsigned int index,u32 curr_cycle_time)686 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
687 struct fw_iso_packet *params, unsigned int header_length,
688 unsigned int data_blocks,
689 unsigned int data_block_counter,
690 unsigned int syt, unsigned int index, u32 curr_cycle_time)
691 {
692 unsigned int payload_length;
693 __be32 *cip_header;
694
695 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
696 params->payload_length = payload_length;
697
698 if (header_length > 0) {
699 cip_header = (__be32 *)params->header;
700 generate_cip_header(s, cip_header, data_block_counter, syt);
701 params->header_length = header_length;
702 } else {
703 cip_header = NULL;
704 }
705
706 trace_amdtp_packet(s, cycle, cip_header, payload_length + header_length, data_blocks,
707 data_block_counter, s->packet_index, index, curr_cycle_time);
708 }
709
check_cip_header(struct amdtp_stream * s,const __be32 * buf,unsigned int payload_length,unsigned int * data_blocks,unsigned int * data_block_counter,unsigned int * syt)710 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
711 unsigned int payload_length,
712 unsigned int *data_blocks,
713 unsigned int *data_block_counter, unsigned int *syt)
714 {
715 u32 cip_header[2];
716 unsigned int sph;
717 unsigned int fmt;
718 unsigned int fdf;
719 unsigned int dbc;
720 bool lost;
721
722 cip_header[0] = be32_to_cpu(buf[0]);
723 cip_header[1] = be32_to_cpu(buf[1]);
724
725 /*
726 * This module supports 'Two-quadlet CIP header with SYT field'.
727 * For convenience, also check FMT field is AM824 or not.
728 */
729 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) ||
730 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) &&
731 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) {
732 dev_info_ratelimited(&s->unit->device,
733 "Invalid CIP header for AMDTP: %08X:%08X\n",
734 cip_header[0], cip_header[1]);
735 return -EAGAIN;
736 }
737
738 /* Check valid protocol or not. */
739 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT;
740 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT;
741 if (sph != s->sph || fmt != s->fmt) {
742 dev_info_ratelimited(&s->unit->device,
743 "Detect unexpected protocol: %08x %08x\n",
744 cip_header[0], cip_header[1]);
745 return -EAGAIN;
746 }
747
748 /* Calculate data blocks */
749 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
750 if (payload_length == 0 || (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
751 *data_blocks = 0;
752 } else {
753 unsigned int data_block_quadlets =
754 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
755 /* avoid division by zero */
756 if (data_block_quadlets == 0) {
757 dev_err(&s->unit->device,
758 "Detect invalid value in dbs field: %08X\n",
759 cip_header[0]);
760 return -EPROTO;
761 }
762 if (s->flags & CIP_WRONG_DBS)
763 data_block_quadlets = s->data_block_quadlets;
764
765 *data_blocks = payload_length / sizeof(__be32) / data_block_quadlets;
766 }
767
768 /* Check data block counter continuity */
769 dbc = cip_header[0] & CIP_DBC_MASK;
770 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
771 *data_block_counter != UINT_MAX)
772 dbc = *data_block_counter;
773
774 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
775 *data_block_counter == UINT_MAX) {
776 lost = false;
777 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
778 lost = dbc != *data_block_counter;
779 } else {
780 unsigned int dbc_interval;
781
782 if (!(s->flags & CIP_DBC_IS_PAYLOAD_QUADLETS)) {
783 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
784 dbc_interval = s->ctx_data.tx.dbc_interval;
785 else
786 dbc_interval = *data_blocks;
787 } else {
788 dbc_interval = payload_length / sizeof(__be32);
789 }
790
791 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
792 }
793
794 if (lost) {
795 dev_err(&s->unit->device,
796 "Detect discontinuity of CIP: %02X %02X\n",
797 *data_block_counter, dbc);
798 return -EIO;
799 }
800
801 *data_block_counter = dbc;
802
803 if (!(s->flags & CIP_UNAWARE_SYT))
804 *syt = cip_header[1] & CIP_SYT_MASK;
805
806 return 0;
807 }
808
parse_ir_ctx_header(struct amdtp_stream * s,unsigned int cycle,const __be32 * ctx_header,unsigned int * data_blocks,unsigned int * data_block_counter,unsigned int * syt,unsigned int packet_index,unsigned int index,u32 curr_cycle_time)809 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
810 const __be32 *ctx_header,
811 unsigned int *data_blocks,
812 unsigned int *data_block_counter,
813 unsigned int *syt, unsigned int packet_index, unsigned int index,
814 u32 curr_cycle_time)
815 {
816 unsigned int payload_length;
817 const __be32 *cip_header;
818 unsigned int cip_header_size;
819
820 payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
821
822 if (!(s->flags & CIP_NO_HEADER))
823 cip_header_size = CIP_HEADER_SIZE;
824 else
825 cip_header_size = 0;
826
827 if (payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
828 dev_err(&s->unit->device,
829 "Detect jumbo payload: %04x %04x\n",
830 payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
831 return -EIO;
832 }
833
834 if (cip_header_size > 0) {
835 if (payload_length >= cip_header_size) {
836 int err;
837
838 cip_header = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
839 err = check_cip_header(s, cip_header, payload_length - cip_header_size,
840 data_blocks, data_block_counter, syt);
841 if (err < 0)
842 return err;
843 } else {
844 // Handle the cycle so that empty packet arrives.
845 cip_header = NULL;
846 *data_blocks = 0;
847 *syt = 0;
848 }
849 } else {
850 cip_header = NULL;
851 *data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets;
852 *syt = 0;
853
854 if (*data_block_counter == UINT_MAX)
855 *data_block_counter = 0;
856 }
857
858 trace_amdtp_packet(s, cycle, cip_header, payload_length, *data_blocks,
859 *data_block_counter, packet_index, index, curr_cycle_time);
860
861 return 0;
862 }
863
864 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
865 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
866 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
compute_ohci_iso_ctx_cycle_count(u32 tstamp)867 static inline u32 compute_ohci_iso_ctx_cycle_count(u32 tstamp)
868 {
869 return (((tstamp >> 13) & 0x07) * CYCLES_PER_SECOND) + (tstamp & 0x1fff);
870 }
871
compute_ohci_cycle_count(__be32 ctx_header_tstamp)872 static inline u32 compute_ohci_cycle_count(__be32 ctx_header_tstamp)
873 {
874 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
875 return compute_ohci_iso_ctx_cycle_count(tstamp);
876 }
877
increment_ohci_cycle_count(u32 cycle,unsigned int addend)878 static inline u32 increment_ohci_cycle_count(u32 cycle, unsigned int addend)
879 {
880 cycle += addend;
881 if (cycle >= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND)
882 cycle -= OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
883 return cycle;
884 }
885
decrement_ohci_cycle_count(u32 minuend,u32 subtrahend)886 static inline u32 decrement_ohci_cycle_count(u32 minuend, u32 subtrahend)
887 {
888 if (minuend < subtrahend)
889 minuend += OHCI_SECOND_MODULUS * CYCLES_PER_SECOND;
890
891 return minuend - subtrahend;
892 }
893
compare_ohci_cycle_count(u32 lval,u32 rval)894 static int compare_ohci_cycle_count(u32 lval, u32 rval)
895 {
896 if (lval == rval)
897 return 0;
898 else if (lval < rval && rval - lval < OHCI_SECOND_MODULUS * CYCLES_PER_SECOND / 2)
899 return -1;
900 else
901 return 1;
902 }
903
904 // Align to actual cycle count for the packet which is going to be scheduled.
905 // This module queued the same number of isochronous cycle as the size of queue
906 // to kip isochronous cycle, therefore it's OK to just increment the cycle by
907 // the size of queue for scheduled cycle.
compute_ohci_it_cycle(const __be32 ctx_header_tstamp,unsigned int queue_size)908 static inline u32 compute_ohci_it_cycle(const __be32 ctx_header_tstamp,
909 unsigned int queue_size)
910 {
911 u32 cycle = compute_ohci_cycle_count(ctx_header_tstamp);
912 return increment_ohci_cycle_count(cycle, queue_size);
913 }
914
generate_tx_packet_descs(struct amdtp_stream * s,struct pkt_desc * desc,const __be32 * ctx_header,unsigned int packet_count,unsigned int * desc_count)915 static int generate_tx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
916 const __be32 *ctx_header, unsigned int packet_count,
917 unsigned int *desc_count)
918 {
919 unsigned int next_cycle = s->next_cycle;
920 unsigned int dbc = s->data_block_counter;
921 unsigned int packet_index = s->packet_index;
922 unsigned int queue_size = s->queue_size;
923 u32 curr_cycle_time = 0;
924 int i;
925 int err;
926
927 if (trace_amdtp_packet_enabled())
928 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
929
930 *desc_count = 0;
931 for (i = 0; i < packet_count; ++i) {
932 unsigned int cycle;
933 bool lost;
934 unsigned int data_blocks;
935 unsigned int syt;
936
937 cycle = compute_ohci_cycle_count(ctx_header[1]);
938 lost = (next_cycle != cycle);
939 if (lost) {
940 if (s->flags & CIP_NO_HEADER) {
941 // Fireface skips transmission just for an isoc cycle corresponding
942 // to empty packet.
943 unsigned int prev_cycle = next_cycle;
944
945 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
946 lost = (next_cycle != cycle);
947 if (!lost) {
948 // Prepare a description for the skipped cycle for
949 // sequence replay.
950 desc->cycle = prev_cycle;
951 desc->syt = 0;
952 desc->data_blocks = 0;
953 desc->data_block_counter = dbc;
954 desc->ctx_payload = NULL;
955 desc = amdtp_stream_next_packet_desc(s, desc);
956 ++(*desc_count);
957 }
958 } else if (s->flags & CIP_JUMBO_PAYLOAD) {
959 // OXFW970 skips transmission for several isoc cycles during
960 // asynchronous transaction. The sequence replay is impossible due
961 // to the reason.
962 unsigned int safe_cycle = increment_ohci_cycle_count(next_cycle,
963 IR_JUMBO_PAYLOAD_MAX_SKIP_CYCLES);
964 lost = (compare_ohci_cycle_count(safe_cycle, cycle) < 0);
965 }
966 if (lost) {
967 dev_err(&s->unit->device, "Detect discontinuity of cycle: %d %d\n",
968 next_cycle, cycle);
969 return -EIO;
970 }
971 }
972
973 err = parse_ir_ctx_header(s, cycle, ctx_header, &data_blocks, &dbc, &syt,
974 packet_index, i, curr_cycle_time);
975 if (err < 0)
976 return err;
977
978 desc->cycle = cycle;
979 desc->syt = syt;
980 desc->data_blocks = data_blocks;
981 desc->data_block_counter = dbc;
982 desc->ctx_payload = s->buffer.packets[packet_index].buffer;
983
984 if (!(s->flags & CIP_DBC_IS_END_EVENT))
985 dbc = (dbc + desc->data_blocks) & 0xff;
986
987 next_cycle = increment_ohci_cycle_count(next_cycle, 1);
988 desc = amdtp_stream_next_packet_desc(s, desc);
989 ++(*desc_count);
990 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
991 packet_index = (packet_index + 1) % queue_size;
992 }
993
994 s->next_cycle = next_cycle;
995 s->data_block_counter = dbc;
996
997 return 0;
998 }
999
compute_syt(unsigned int syt_offset,unsigned int cycle,unsigned int transfer_delay)1000 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
1001 unsigned int transfer_delay)
1002 {
1003 unsigned int syt;
1004
1005 syt_offset += transfer_delay;
1006 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
1007 (syt_offset % TICKS_PER_CYCLE);
1008 return syt & CIP_SYT_MASK;
1009 }
1010
generate_rx_packet_descs(struct amdtp_stream * s,struct pkt_desc * desc,const __be32 * ctx_header,unsigned int packet_count)1011 static void generate_rx_packet_descs(struct amdtp_stream *s, struct pkt_desc *desc,
1012 const __be32 *ctx_header, unsigned int packet_count)
1013 {
1014 struct seq_desc *seq_descs = s->ctx_data.rx.seq.descs;
1015 unsigned int seq_size = s->ctx_data.rx.seq.size;
1016 unsigned int seq_pos = s->ctx_data.rx.seq.pos;
1017 unsigned int dbc = s->data_block_counter;
1018 bool aware_syt = !(s->flags & CIP_UNAWARE_SYT);
1019 int i;
1020
1021 pool_seq_descs(s, seq_descs, seq_size, seq_pos, packet_count);
1022
1023 for (i = 0; i < packet_count; ++i) {
1024 unsigned int index = (s->packet_index + i) % s->queue_size;
1025 const struct seq_desc *seq = seq_descs + seq_pos;
1026
1027 desc->cycle = compute_ohci_it_cycle(*ctx_header, s->queue_size);
1028
1029 if (aware_syt && seq->syt_offset != CIP_SYT_NO_INFO)
1030 desc->syt = compute_syt(seq->syt_offset, desc->cycle, s->transfer_delay);
1031 else
1032 desc->syt = CIP_SYT_NO_INFO;
1033
1034 desc->data_blocks = seq->data_blocks;
1035
1036 if (s->flags & CIP_DBC_IS_END_EVENT)
1037 dbc = (dbc + desc->data_blocks) & 0xff;
1038
1039 desc->data_block_counter = dbc;
1040
1041 if (!(s->flags & CIP_DBC_IS_END_EVENT))
1042 dbc = (dbc + desc->data_blocks) & 0xff;
1043
1044 desc->ctx_payload = s->buffer.packets[index].buffer;
1045
1046 seq_pos = (seq_pos + 1) % seq_size;
1047 desc = amdtp_stream_next_packet_desc(s, desc);
1048
1049 ++ctx_header;
1050 }
1051
1052 s->data_block_counter = dbc;
1053 s->ctx_data.rx.seq.pos = seq_pos;
1054 }
1055
cancel_stream(struct amdtp_stream * s)1056 static inline void cancel_stream(struct amdtp_stream *s)
1057 {
1058 s->packet_index = -1;
1059 if (in_softirq())
1060 amdtp_stream_pcm_abort(s);
1061 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
1062 }
1063
compute_pcm_extra_delay(struct amdtp_stream * s,const struct pkt_desc * desc,unsigned int count)1064 static snd_pcm_sframes_t compute_pcm_extra_delay(struct amdtp_stream *s,
1065 const struct pkt_desc *desc, unsigned int count)
1066 {
1067 unsigned int data_block_count = 0;
1068 u32 latest_cycle;
1069 u32 cycle_time;
1070 u32 curr_cycle;
1071 u32 cycle_gap;
1072 int i, err;
1073
1074 if (count == 0)
1075 goto end;
1076
1077 // Forward to the latest record.
1078 for (i = 0; i < count - 1; ++i)
1079 desc = amdtp_stream_next_packet_desc(s, desc);
1080 latest_cycle = desc->cycle;
1081
1082 err = fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &cycle_time);
1083 if (err < 0)
1084 goto end;
1085
1086 // Compute cycle count with lower 3 bits of second field and cycle field like timestamp
1087 // format of 1394 OHCI isochronous context.
1088 curr_cycle = compute_ohci_iso_ctx_cycle_count((cycle_time >> 12) & 0x0000ffff);
1089
1090 if (s->direction == AMDTP_IN_STREAM) {
1091 // NOTE: The AMDTP packet descriptor should be for the past isochronous cycle since
1092 // it corresponds to arrived isochronous packet.
1093 if (compare_ohci_cycle_count(latest_cycle, curr_cycle) > 0)
1094 goto end;
1095 cycle_gap = decrement_ohci_cycle_count(curr_cycle, latest_cycle);
1096
1097 // NOTE: estimate delay by recent history of arrived AMDTP packets. The estimated
1098 // value expectedly corresponds to a few packets (0-2) since the packet arrived at
1099 // the most recent isochronous cycle has been already processed.
1100 for (i = 0; i < cycle_gap; ++i) {
1101 desc = amdtp_stream_next_packet_desc(s, desc);
1102 data_block_count += desc->data_blocks;
1103 }
1104 } else {
1105 // NOTE: The AMDTP packet descriptor should be for the future isochronous cycle
1106 // since it was already scheduled.
1107 if (compare_ohci_cycle_count(latest_cycle, curr_cycle) < 0)
1108 goto end;
1109 cycle_gap = decrement_ohci_cycle_count(latest_cycle, curr_cycle);
1110
1111 // NOTE: use history of scheduled packets.
1112 for (i = 0; i < cycle_gap; ++i) {
1113 data_block_count += desc->data_blocks;
1114 desc = prev_packet_desc(s, desc);
1115 }
1116 }
1117 end:
1118 return data_block_count * s->pcm_frame_multiplier;
1119 }
1120
process_ctx_payloads(struct amdtp_stream * s,const struct pkt_desc * desc,unsigned int count)1121 static void process_ctx_payloads(struct amdtp_stream *s,
1122 const struct pkt_desc *desc,
1123 unsigned int count)
1124 {
1125 struct snd_pcm_substream *pcm;
1126 int i;
1127
1128 pcm = READ_ONCE(s->pcm);
1129 s->process_ctx_payloads(s, desc, count, pcm);
1130
1131 if (pcm) {
1132 unsigned int data_block_count = 0;
1133
1134 pcm->runtime->delay = compute_pcm_extra_delay(s, desc, count);
1135
1136 for (i = 0; i < count; ++i) {
1137 data_block_count += desc->data_blocks;
1138 desc = amdtp_stream_next_packet_desc(s, desc);
1139 }
1140
1141 update_pcm_pointers(s, pcm, data_block_count * s->pcm_frame_multiplier);
1142 }
1143 }
1144
process_rx_packets(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1145 static void process_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1146 void *header, void *private_data)
1147 {
1148 struct amdtp_stream *s = private_data;
1149 const struct amdtp_domain *d = s->domain;
1150 const __be32 *ctx_header = header;
1151 const unsigned int events_per_period = d->events_per_period;
1152 unsigned int event_count = s->ctx_data.rx.event_count;
1153 struct pkt_desc *desc = s->packet_descs_cursor;
1154 unsigned int pkt_header_length;
1155 unsigned int packets;
1156 u32 curr_cycle_time;
1157 bool need_hw_irq;
1158 int i;
1159
1160 if (s->packet_index < 0)
1161 return;
1162
1163 // Calculate the number of packets in buffer and check XRUN.
1164 packets = header_length / sizeof(*ctx_header);
1165
1166 generate_rx_packet_descs(s, desc, ctx_header, packets);
1167
1168 process_ctx_payloads(s, desc, packets);
1169
1170 if (!(s->flags & CIP_NO_HEADER))
1171 pkt_header_length = IT_PKT_HEADER_SIZE_CIP;
1172 else
1173 pkt_header_length = 0;
1174
1175 if (s == d->irq_target) {
1176 // At NO_PERIOD_WAKEUP mode, the packets for all IT/IR contexts are processed by
1177 // the tasks of user process operating ALSA PCM character device by calling ioctl(2)
1178 // with some requests, instead of scheduled hardware IRQ of an IT context.
1179 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
1180 need_hw_irq = !pcm || !pcm->runtime->no_period_wakeup;
1181 } else {
1182 need_hw_irq = false;
1183 }
1184
1185 if (trace_amdtp_packet_enabled())
1186 (void)fw_card_read_cycle_time(fw_parent_device(s->unit)->card, &curr_cycle_time);
1187
1188 for (i = 0; i < packets; ++i) {
1189 struct {
1190 struct fw_iso_packet params;
1191 __be32 header[CIP_HEADER_QUADLETS];
1192 } template = { {0}, {0} };
1193 bool sched_irq = false;
1194
1195 build_it_pkt_header(s, desc->cycle, &template.params, pkt_header_length,
1196 desc->data_blocks, desc->data_block_counter,
1197 desc->syt, i, curr_cycle_time);
1198
1199 if (s == s->domain->irq_target) {
1200 event_count += desc->data_blocks;
1201 if (event_count >= events_per_period) {
1202 event_count -= events_per_period;
1203 sched_irq = need_hw_irq;
1204 }
1205 }
1206
1207 if (queue_out_packet(s, &template.params, sched_irq) < 0) {
1208 cancel_stream(s);
1209 return;
1210 }
1211
1212 desc = amdtp_stream_next_packet_desc(s, desc);
1213 }
1214
1215 s->ctx_data.rx.event_count = event_count;
1216 s->packet_descs_cursor = desc;
1217 }
1218
skip_rx_packets(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1219 static void skip_rx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1220 void *header, void *private_data)
1221 {
1222 struct amdtp_stream *s = private_data;
1223 struct amdtp_domain *d = s->domain;
1224 const __be32 *ctx_header = header;
1225 unsigned int packets;
1226 unsigned int cycle;
1227 int i;
1228
1229 if (s->packet_index < 0)
1230 return;
1231
1232 packets = header_length / sizeof(*ctx_header);
1233
1234 cycle = compute_ohci_it_cycle(ctx_header[packets - 1], s->queue_size);
1235 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1236
1237 for (i = 0; i < packets; ++i) {
1238 struct fw_iso_packet params = {
1239 .header_length = 0,
1240 .payload_length = 0,
1241 };
1242 bool sched_irq = (s == d->irq_target && i == packets - 1);
1243
1244 if (queue_out_packet(s, ¶ms, sched_irq) < 0) {
1245 cancel_stream(s);
1246 return;
1247 }
1248 }
1249 }
1250
1251 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1252 void *header, void *private_data);
1253
process_rx_packets_intermediately(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1254 static void process_rx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1255 size_t header_length, void *header, void *private_data)
1256 {
1257 struct amdtp_stream *s = private_data;
1258 struct amdtp_domain *d = s->domain;
1259 __be32 *ctx_header = header;
1260 const unsigned int queue_size = s->queue_size;
1261 unsigned int packets;
1262 unsigned int offset;
1263
1264 if (s->packet_index < 0)
1265 return;
1266
1267 packets = header_length / sizeof(*ctx_header);
1268
1269 offset = 0;
1270 while (offset < packets) {
1271 unsigned int cycle = compute_ohci_it_cycle(ctx_header[offset], queue_size);
1272
1273 if (compare_ohci_cycle_count(cycle, d->processing_cycle.rx_start) >= 0)
1274 break;
1275
1276 ++offset;
1277 }
1278
1279 if (offset > 0) {
1280 unsigned int length = sizeof(*ctx_header) * offset;
1281
1282 skip_rx_packets(context, tstamp, length, ctx_header, private_data);
1283 if (amdtp_streaming_error(s))
1284 return;
1285
1286 ctx_header += offset;
1287 header_length -= length;
1288 }
1289
1290 if (offset < packets) {
1291 s->ready_processing = true;
1292 wake_up(&s->ready_wait);
1293
1294 if (d->replay.enable)
1295 s->ctx_data.rx.cache_pos = 0;
1296
1297 process_rx_packets(context, tstamp, header_length, ctx_header, private_data);
1298 if (amdtp_streaming_error(s))
1299 return;
1300
1301 if (s == d->irq_target)
1302 s->context->callback.sc = irq_target_callback;
1303 else
1304 s->context->callback.sc = process_rx_packets;
1305 }
1306 }
1307
process_tx_packets(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1308 static void process_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1309 void *header, void *private_data)
1310 {
1311 struct amdtp_stream *s = private_data;
1312 __be32 *ctx_header = header;
1313 struct pkt_desc *desc = s->packet_descs_cursor;
1314 unsigned int packet_count;
1315 unsigned int desc_count;
1316 int i;
1317 int err;
1318
1319 if (s->packet_index < 0)
1320 return;
1321
1322 // Calculate the number of packets in buffer and check XRUN.
1323 packet_count = header_length / s->ctx_data.tx.ctx_header_size;
1324
1325 desc_count = 0;
1326 err = generate_tx_packet_descs(s, desc, ctx_header, packet_count, &desc_count);
1327 if (err < 0) {
1328 if (err != -EAGAIN) {
1329 cancel_stream(s);
1330 return;
1331 }
1332 } else {
1333 struct amdtp_domain *d = s->domain;
1334
1335 process_ctx_payloads(s, desc, desc_count);
1336
1337 if (d->replay.enable)
1338 cache_seq(s, desc, desc_count);
1339
1340 for (i = 0; i < desc_count; ++i)
1341 desc = amdtp_stream_next_packet_desc(s, desc);
1342 s->packet_descs_cursor = desc;
1343 }
1344
1345 for (i = 0; i < packet_count; ++i) {
1346 struct fw_iso_packet params = {0};
1347
1348 if (queue_in_packet(s, ¶ms) < 0) {
1349 cancel_stream(s);
1350 return;
1351 }
1352 }
1353 }
1354
drop_tx_packets(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1355 static void drop_tx_packets(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1356 void *header, void *private_data)
1357 {
1358 struct amdtp_stream *s = private_data;
1359 const __be32 *ctx_header = header;
1360 unsigned int packets;
1361 unsigned int cycle;
1362 int i;
1363
1364 if (s->packet_index < 0)
1365 return;
1366
1367 packets = header_length / s->ctx_data.tx.ctx_header_size;
1368
1369 ctx_header += (packets - 1) * s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
1370 cycle = compute_ohci_cycle_count(ctx_header[1]);
1371 s->next_cycle = increment_ohci_cycle_count(cycle, 1);
1372
1373 for (i = 0; i < packets; ++i) {
1374 struct fw_iso_packet params = {0};
1375
1376 if (queue_in_packet(s, ¶ms) < 0) {
1377 cancel_stream(s);
1378 return;
1379 }
1380 }
1381 }
1382
process_tx_packets_intermediately(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1383 static void process_tx_packets_intermediately(struct fw_iso_context *context, u32 tstamp,
1384 size_t header_length, void *header, void *private_data)
1385 {
1386 struct amdtp_stream *s = private_data;
1387 struct amdtp_domain *d = s->domain;
1388 __be32 *ctx_header;
1389 unsigned int packets;
1390 unsigned int offset;
1391
1392 if (s->packet_index < 0)
1393 return;
1394
1395 packets = header_length / s->ctx_data.tx.ctx_header_size;
1396
1397 offset = 0;
1398 ctx_header = header;
1399 while (offset < packets) {
1400 unsigned int cycle = compute_ohci_cycle_count(ctx_header[1]);
1401
1402 if (compare_ohci_cycle_count(cycle, d->processing_cycle.tx_start) >= 0)
1403 break;
1404
1405 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1406 ++offset;
1407 }
1408
1409 ctx_header = header;
1410
1411 if (offset > 0) {
1412 size_t length = s->ctx_data.tx.ctx_header_size * offset;
1413
1414 drop_tx_packets(context, tstamp, length, ctx_header, s);
1415 if (amdtp_streaming_error(s))
1416 return;
1417
1418 ctx_header += length / sizeof(*ctx_header);
1419 header_length -= length;
1420 }
1421
1422 if (offset < packets) {
1423 s->ready_processing = true;
1424 wake_up(&s->ready_wait);
1425
1426 process_tx_packets(context, tstamp, header_length, ctx_header, s);
1427 if (amdtp_streaming_error(s))
1428 return;
1429
1430 context->callback.sc = process_tx_packets;
1431 }
1432 }
1433
drop_tx_packets_initially(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1434 static void drop_tx_packets_initially(struct fw_iso_context *context, u32 tstamp,
1435 size_t header_length, void *header, void *private_data)
1436 {
1437 struct amdtp_stream *s = private_data;
1438 struct amdtp_domain *d = s->domain;
1439 __be32 *ctx_header;
1440 unsigned int count;
1441 unsigned int events;
1442 int i;
1443
1444 if (s->packet_index < 0)
1445 return;
1446
1447 count = header_length / s->ctx_data.tx.ctx_header_size;
1448
1449 // Attempt to detect any event in the batch of packets.
1450 events = 0;
1451 ctx_header = header;
1452 for (i = 0; i < count; ++i) {
1453 unsigned int payload_quads =
1454 (be32_to_cpu(*ctx_header) >> ISO_DATA_LENGTH_SHIFT) / sizeof(__be32);
1455 unsigned int data_blocks;
1456
1457 if (s->flags & CIP_NO_HEADER) {
1458 data_blocks = payload_quads / s->data_block_quadlets;
1459 } else {
1460 __be32 *cip_headers = ctx_header + IR_CTX_HEADER_DEFAULT_QUADLETS;
1461
1462 if (payload_quads < CIP_HEADER_QUADLETS) {
1463 data_blocks = 0;
1464 } else {
1465 payload_quads -= CIP_HEADER_QUADLETS;
1466
1467 if (s->flags & CIP_UNAWARE_SYT) {
1468 data_blocks = payload_quads / s->data_block_quadlets;
1469 } else {
1470 u32 cip1 = be32_to_cpu(cip_headers[1]);
1471
1472 // NODATA packet can includes any data blocks but they are
1473 // not available as event.
1474 if ((cip1 & CIP_NO_DATA) == CIP_NO_DATA)
1475 data_blocks = 0;
1476 else
1477 data_blocks = payload_quads / s->data_block_quadlets;
1478 }
1479 }
1480 }
1481
1482 events += data_blocks;
1483
1484 ctx_header += s->ctx_data.tx.ctx_header_size / sizeof(__be32);
1485 }
1486
1487 drop_tx_packets(context, tstamp, header_length, header, s);
1488
1489 if (events > 0)
1490 s->ctx_data.tx.event_starts = true;
1491
1492 // Decide the cycle count to begin processing content of packet in IR contexts.
1493 {
1494 unsigned int stream_count = 0;
1495 unsigned int event_starts_count = 0;
1496 unsigned int cycle = UINT_MAX;
1497
1498 list_for_each_entry(s, &d->streams, list) {
1499 if (s->direction == AMDTP_IN_STREAM) {
1500 ++stream_count;
1501 if (s->ctx_data.tx.event_starts)
1502 ++event_starts_count;
1503 }
1504 }
1505
1506 if (stream_count == event_starts_count) {
1507 unsigned int next_cycle;
1508
1509 list_for_each_entry(s, &d->streams, list) {
1510 if (s->direction != AMDTP_IN_STREAM)
1511 continue;
1512
1513 next_cycle = increment_ohci_cycle_count(s->next_cycle,
1514 d->processing_cycle.tx_init_skip);
1515 if (cycle == UINT_MAX ||
1516 compare_ohci_cycle_count(next_cycle, cycle) > 0)
1517 cycle = next_cycle;
1518
1519 s->context->callback.sc = process_tx_packets_intermediately;
1520 }
1521
1522 d->processing_cycle.tx_start = cycle;
1523 }
1524 }
1525 }
1526
process_ctxs_in_domain(struct amdtp_domain * d)1527 static void process_ctxs_in_domain(struct amdtp_domain *d)
1528 {
1529 struct amdtp_stream *s;
1530
1531 list_for_each_entry(s, &d->streams, list) {
1532 if (s != d->irq_target && amdtp_stream_running(s))
1533 fw_iso_context_flush_completions(s->context);
1534
1535 if (amdtp_streaming_error(s))
1536 goto error;
1537 }
1538
1539 return;
1540 error:
1541 if (amdtp_stream_running(d->irq_target))
1542 cancel_stream(d->irq_target);
1543
1544 list_for_each_entry(s, &d->streams, list) {
1545 if (amdtp_stream_running(s))
1546 cancel_stream(s);
1547 }
1548 }
1549
irq_target_callback(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1550 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, size_t header_length,
1551 void *header, void *private_data)
1552 {
1553 struct amdtp_stream *s = private_data;
1554 struct amdtp_domain *d = s->domain;
1555
1556 process_rx_packets(context, tstamp, header_length, header, private_data);
1557 process_ctxs_in_domain(d);
1558 }
1559
irq_target_callback_intermediately(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1560 static void irq_target_callback_intermediately(struct fw_iso_context *context, u32 tstamp,
1561 size_t header_length, void *header, void *private_data)
1562 {
1563 struct amdtp_stream *s = private_data;
1564 struct amdtp_domain *d = s->domain;
1565
1566 process_rx_packets_intermediately(context, tstamp, header_length, header, private_data);
1567 process_ctxs_in_domain(d);
1568 }
1569
irq_target_callback_skip(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1570 static void irq_target_callback_skip(struct fw_iso_context *context, u32 tstamp,
1571 size_t header_length, void *header, void *private_data)
1572 {
1573 struct amdtp_stream *s = private_data;
1574 struct amdtp_domain *d = s->domain;
1575 bool ready_to_start;
1576
1577 skip_rx_packets(context, tstamp, header_length, header, private_data);
1578 process_ctxs_in_domain(d);
1579
1580 if (d->replay.enable && !d->replay.on_the_fly) {
1581 unsigned int rx_count = 0;
1582 unsigned int rx_ready_count = 0;
1583 struct amdtp_stream *rx;
1584
1585 list_for_each_entry(rx, &d->streams, list) {
1586 struct amdtp_stream *tx;
1587 unsigned int cached_cycles;
1588
1589 if (rx->direction != AMDTP_OUT_STREAM)
1590 continue;
1591 ++rx_count;
1592
1593 tx = rx->ctx_data.rx.replay_target;
1594 cached_cycles = calculate_cached_cycle_count(tx, 0);
1595 if (cached_cycles > tx->ctx_data.tx.cache.size / 2)
1596 ++rx_ready_count;
1597 }
1598
1599 ready_to_start = (rx_count == rx_ready_count);
1600 } else {
1601 ready_to_start = true;
1602 }
1603
1604 // Decide the cycle count to begin processing content of packet in IT contexts. All of IT
1605 // contexts are expected to start and get callback when reaching here.
1606 if (ready_to_start) {
1607 unsigned int cycle = s->next_cycle;
1608 list_for_each_entry(s, &d->streams, list) {
1609 if (s->direction != AMDTP_OUT_STREAM)
1610 continue;
1611
1612 if (compare_ohci_cycle_count(s->next_cycle, cycle) > 0)
1613 cycle = s->next_cycle;
1614
1615 if (s == d->irq_target)
1616 s->context->callback.sc = irq_target_callback_intermediately;
1617 else
1618 s->context->callback.sc = process_rx_packets_intermediately;
1619 }
1620
1621 d->processing_cycle.rx_start = cycle;
1622 }
1623 }
1624
1625 // This is executed one time. For in-stream, first packet has come. For out-stream, prepared to
1626 // transmit first packet.
amdtp_stream_first_callback(struct fw_iso_context * context,u32 tstamp,size_t header_length,void * header,void * private_data)1627 static void amdtp_stream_first_callback(struct fw_iso_context *context,
1628 u32 tstamp, size_t header_length,
1629 void *header, void *private_data)
1630 {
1631 struct amdtp_stream *s = private_data;
1632 struct amdtp_domain *d = s->domain;
1633
1634 if (s->direction == AMDTP_IN_STREAM) {
1635 context->callback.sc = drop_tx_packets_initially;
1636 } else {
1637 if (s == d->irq_target)
1638 context->callback.sc = irq_target_callback_skip;
1639 else
1640 context->callback.sc = skip_rx_packets;
1641 }
1642
1643 context->callback.sc(context, tstamp, header_length, header, s);
1644 }
1645
1646 /**
1647 * amdtp_stream_start - start transferring packets
1648 * @s: the AMDTP stream to start
1649 * @channel: the isochronous channel on the bus
1650 * @speed: firewire speed code
1651 * @queue_size: The number of packets in the queue.
1652 * @idle_irq_interval: the interval to queue packet during initial state.
1653 *
1654 * The stream cannot be started until it has been configured with
1655 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
1656 * device can be started.
1657 */
amdtp_stream_start(struct amdtp_stream * s,int channel,int speed,unsigned int queue_size,unsigned int idle_irq_interval)1658 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1659 unsigned int queue_size, unsigned int idle_irq_interval)
1660 {
1661 bool is_irq_target = (s == s->domain->irq_target);
1662 unsigned int ctx_header_size;
1663 unsigned int max_ctx_payload_size;
1664 enum dma_data_direction dir;
1665 struct pkt_desc *descs;
1666 int i, type, tag, err;
1667
1668 mutex_lock(&s->mutex);
1669
1670 if (WARN_ON(amdtp_stream_running(s) ||
1671 (s->data_block_quadlets < 1))) {
1672 err = -EBADFD;
1673 goto err_unlock;
1674 }
1675
1676 if (s->direction == AMDTP_IN_STREAM) {
1677 // NOTE: IT context should be used for constant IRQ.
1678 if (is_irq_target) {
1679 err = -EINVAL;
1680 goto err_unlock;
1681 }
1682
1683 s->data_block_counter = UINT_MAX;
1684 } else {
1685 s->data_block_counter = 0;
1686 }
1687
1688 // initialize packet buffer.
1689 if (s->direction == AMDTP_IN_STREAM) {
1690 dir = DMA_FROM_DEVICE;
1691 type = FW_ISO_CONTEXT_RECEIVE;
1692 if (!(s->flags & CIP_NO_HEADER))
1693 ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1694 else
1695 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1696 } else {
1697 dir = DMA_TO_DEVICE;
1698 type = FW_ISO_CONTEXT_TRANSMIT;
1699 ctx_header_size = 0; // No effect for IT context.
1700 }
1701 max_ctx_payload_size = amdtp_stream_get_max_ctx_payload_size(s);
1702
1703 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, max_ctx_payload_size, dir);
1704 if (err < 0)
1705 goto err_unlock;
1706 s->queue_size = queue_size;
1707
1708 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
1709 type, channel, speed, ctx_header_size,
1710 amdtp_stream_first_callback, s);
1711 if (IS_ERR(s->context)) {
1712 err = PTR_ERR(s->context);
1713 if (err == -EBUSY)
1714 dev_err(&s->unit->device,
1715 "no free stream on this controller\n");
1716 goto err_buffer;
1717 }
1718
1719 amdtp_stream_update(s);
1720
1721 if (s->direction == AMDTP_IN_STREAM) {
1722 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1723 s->ctx_data.tx.ctx_header_size = ctx_header_size;
1724 s->ctx_data.tx.event_starts = false;
1725
1726 if (s->domain->replay.enable) {
1727 // struct fw_iso_context.drop_overflow_headers is false therefore it's
1728 // possible to cache much unexpectedly.
1729 s->ctx_data.tx.cache.size = max_t(unsigned int, s->syt_interval * 2,
1730 queue_size * 3 / 2);
1731 s->ctx_data.tx.cache.pos = 0;
1732 s->ctx_data.tx.cache.descs = kcalloc(s->ctx_data.tx.cache.size,
1733 sizeof(*s->ctx_data.tx.cache.descs), GFP_KERNEL);
1734 if (!s->ctx_data.tx.cache.descs) {
1735 err = -ENOMEM;
1736 goto err_context;
1737 }
1738 }
1739 } else {
1740 static const struct {
1741 unsigned int data_block;
1742 unsigned int syt_offset;
1743 } *entry, initial_state[] = {
1744 [CIP_SFC_32000] = { 4, 3072 },
1745 [CIP_SFC_48000] = { 6, 1024 },
1746 [CIP_SFC_96000] = { 12, 1024 },
1747 [CIP_SFC_192000] = { 24, 1024 },
1748 [CIP_SFC_44100] = { 0, 67 },
1749 [CIP_SFC_88200] = { 0, 67 },
1750 [CIP_SFC_176400] = { 0, 67 },
1751 };
1752
1753 s->ctx_data.rx.seq.descs = kcalloc(queue_size, sizeof(*s->ctx_data.rx.seq.descs), GFP_KERNEL);
1754 if (!s->ctx_data.rx.seq.descs) {
1755 err = -ENOMEM;
1756 goto err_context;
1757 }
1758 s->ctx_data.rx.seq.size = queue_size;
1759 s->ctx_data.rx.seq.pos = 0;
1760
1761 entry = &initial_state[s->sfc];
1762 s->ctx_data.rx.data_block_state = entry->data_block;
1763 s->ctx_data.rx.syt_offset_state = entry->syt_offset;
1764 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE;
1765
1766 s->ctx_data.rx.event_count = 0;
1767 }
1768
1769 if (s->flags & CIP_NO_HEADER)
1770 s->tag = TAG_NO_CIP_HEADER;
1771 else
1772 s->tag = TAG_CIP;
1773
1774 // NOTE: When operating without hardIRQ/softIRQ, applications tends to call ioctl request
1775 // for runtime of PCM substream in the interval equivalent to the size of PCM buffer. It
1776 // could take a round over queue of AMDTP packet descriptors and small loss of history. For
1777 // safe, keep more 8 elements for the queue, equivalent to 1 ms.
1778 descs = kcalloc(s->queue_size + 8, sizeof(*descs), GFP_KERNEL);
1779 if (!descs) {
1780 err = -ENOMEM;
1781 goto err_context;
1782 }
1783 s->packet_descs = descs;
1784
1785 INIT_LIST_HEAD(&s->packet_descs_list);
1786 for (i = 0; i < s->queue_size; ++i) {
1787 INIT_LIST_HEAD(&descs->link);
1788 list_add_tail(&descs->link, &s->packet_descs_list);
1789 ++descs;
1790 }
1791 s->packet_descs_cursor = list_first_entry(&s->packet_descs_list, struct pkt_desc, link);
1792
1793 s->packet_index = 0;
1794 do {
1795 struct fw_iso_packet params;
1796
1797 if (s->direction == AMDTP_IN_STREAM) {
1798 err = queue_in_packet(s, ¶ms);
1799 } else {
1800 bool sched_irq = false;
1801
1802 params.header_length = 0;
1803 params.payload_length = 0;
1804
1805 if (is_irq_target) {
1806 sched_irq = !((s->packet_index + 1) %
1807 idle_irq_interval);
1808 }
1809
1810 err = queue_out_packet(s, ¶ms, sched_irq);
1811 }
1812 if (err < 0)
1813 goto err_pkt_descs;
1814 } while (s->packet_index > 0);
1815
1816 /* NOTE: TAG1 matches CIP. This just affects in stream. */
1817 tag = FW_ISO_CONTEXT_MATCH_TAG1;
1818 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER))
1819 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
1820
1821 s->ready_processing = false;
1822 err = fw_iso_context_start(s->context, -1, 0, tag);
1823 if (err < 0)
1824 goto err_pkt_descs;
1825
1826 mutex_unlock(&s->mutex);
1827
1828 return 0;
1829 err_pkt_descs:
1830 kfree(s->packet_descs);
1831 s->packet_descs = NULL;
1832 err_context:
1833 if (s->direction == AMDTP_OUT_STREAM) {
1834 kfree(s->ctx_data.rx.seq.descs);
1835 } else {
1836 if (s->domain->replay.enable)
1837 kfree(s->ctx_data.tx.cache.descs);
1838 }
1839 fw_iso_context_destroy(s->context);
1840 s->context = ERR_PTR(-1);
1841 err_buffer:
1842 iso_packets_buffer_destroy(&s->buffer, s->unit);
1843 err_unlock:
1844 mutex_unlock(&s->mutex);
1845
1846 return err;
1847 }
1848
1849 /**
1850 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1851 * @d: the AMDTP domain.
1852 * @s: the AMDTP stream that transports the PCM data
1853 *
1854 * Returns the current buffer position, in frames.
1855 */
amdtp_domain_stream_pcm_pointer(struct amdtp_domain * d,struct amdtp_stream * s)1856 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1857 struct amdtp_stream *s)
1858 {
1859 struct amdtp_stream *irq_target = d->irq_target;
1860
1861 if (irq_target && amdtp_stream_running(irq_target)) {
1862 // use wq to prevent AB/BA deadlock competition for
1863 // substream lock:
1864 // fw_iso_context_flush_completions() acquires
1865 // lock by ohci_flush_iso_completions(),
1866 // amdtp-stream process_rx_packets() attempts to
1867 // acquire same lock by snd_pcm_elapsed()
1868 if (current_work() != &s->period_work)
1869 fw_iso_context_flush_completions(irq_target->context);
1870 }
1871
1872 return READ_ONCE(s->pcm_buffer_pointer);
1873 }
1874 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
1875
1876 /**
1877 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1878 * @d: the AMDTP domain.
1879 * @s: the AMDTP stream that transfers the PCM frames
1880 *
1881 * Returns zero always.
1882 */
amdtp_domain_stream_pcm_ack(struct amdtp_domain * d,struct amdtp_stream * s)1883 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
1884 {
1885 struct amdtp_stream *irq_target = d->irq_target;
1886
1887 // Process isochronous packets for recent isochronous cycle to handle
1888 // queued PCM frames.
1889 if (irq_target && amdtp_stream_running(irq_target))
1890 fw_iso_context_flush_completions(irq_target->context);
1891
1892 return 0;
1893 }
1894 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
1895
1896 /**
1897 * amdtp_stream_update - update the stream after a bus reset
1898 * @s: the AMDTP stream
1899 */
amdtp_stream_update(struct amdtp_stream * s)1900 void amdtp_stream_update(struct amdtp_stream *s)
1901 {
1902 /* Precomputing. */
1903 WRITE_ONCE(s->source_node_id_field,
1904 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK);
1905 }
1906 EXPORT_SYMBOL(amdtp_stream_update);
1907
1908 /**
1909 * amdtp_stream_stop - stop sending packets
1910 * @s: the AMDTP stream to stop
1911 *
1912 * All PCM and MIDI devices of the stream must be stopped before the stream
1913 * itself can be stopped.
1914 */
amdtp_stream_stop(struct amdtp_stream * s)1915 static void amdtp_stream_stop(struct amdtp_stream *s)
1916 {
1917 mutex_lock(&s->mutex);
1918
1919 if (!amdtp_stream_running(s)) {
1920 mutex_unlock(&s->mutex);
1921 return;
1922 }
1923
1924 cancel_work_sync(&s->period_work);
1925 fw_iso_context_stop(s->context);
1926 fw_iso_context_destroy(s->context);
1927 s->context = ERR_PTR(-1);
1928 iso_packets_buffer_destroy(&s->buffer, s->unit);
1929 kfree(s->packet_descs);
1930 s->packet_descs = NULL;
1931
1932 if (s->direction == AMDTP_OUT_STREAM) {
1933 kfree(s->ctx_data.rx.seq.descs);
1934 } else {
1935 if (s->domain->replay.enable)
1936 kfree(s->ctx_data.tx.cache.descs);
1937 }
1938
1939 mutex_unlock(&s->mutex);
1940 }
1941
1942 /**
1943 * amdtp_stream_pcm_abort - abort the running PCM device
1944 * @s: the AMDTP stream about to be stopped
1945 *
1946 * If the isochronous stream needs to be stopped asynchronously, call this
1947 * function first to stop the PCM device.
1948 */
amdtp_stream_pcm_abort(struct amdtp_stream * s)1949 void amdtp_stream_pcm_abort(struct amdtp_stream *s)
1950 {
1951 struct snd_pcm_substream *pcm;
1952
1953 pcm = READ_ONCE(s->pcm);
1954 if (pcm)
1955 snd_pcm_stop_xrun(pcm);
1956 }
1957 EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1958
1959 /**
1960 * amdtp_domain_init - initialize an AMDTP domain structure
1961 * @d: the AMDTP domain to initialize.
1962 */
amdtp_domain_init(struct amdtp_domain * d)1963 int amdtp_domain_init(struct amdtp_domain *d)
1964 {
1965 INIT_LIST_HEAD(&d->streams);
1966
1967 d->events_per_period = 0;
1968
1969 return 0;
1970 }
1971 EXPORT_SYMBOL_GPL(amdtp_domain_init);
1972
1973 /**
1974 * amdtp_domain_destroy - destroy an AMDTP domain structure
1975 * @d: the AMDTP domain to destroy.
1976 */
amdtp_domain_destroy(struct amdtp_domain * d)1977 void amdtp_domain_destroy(struct amdtp_domain *d)
1978 {
1979 // At present nothing to do.
1980 return;
1981 }
1982 EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1983
1984 /**
1985 * amdtp_domain_add_stream - register isoc context into the domain.
1986 * @d: the AMDTP domain.
1987 * @s: the AMDTP stream.
1988 * @channel: the isochronous channel on the bus.
1989 * @speed: firewire speed code.
1990 */
amdtp_domain_add_stream(struct amdtp_domain * d,struct amdtp_stream * s,int channel,int speed)1991 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1992 int channel, int speed)
1993 {
1994 struct amdtp_stream *tmp;
1995
1996 list_for_each_entry(tmp, &d->streams, list) {
1997 if (s == tmp)
1998 return -EBUSY;
1999 }
2000
2001 list_add(&s->list, &d->streams);
2002
2003 s->channel = channel;
2004 s->speed = speed;
2005 s->domain = d;
2006
2007 return 0;
2008 }
2009 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
2010
2011 // Make the reference from rx stream to tx stream for sequence replay. When the number of tx streams
2012 // is less than the number of rx streams, the first tx stream is selected.
make_association(struct amdtp_domain * d)2013 static int make_association(struct amdtp_domain *d)
2014 {
2015 unsigned int dst_index = 0;
2016 struct amdtp_stream *rx;
2017
2018 // Make association to replay target.
2019 list_for_each_entry(rx, &d->streams, list) {
2020 if (rx->direction == AMDTP_OUT_STREAM) {
2021 unsigned int src_index = 0;
2022 struct amdtp_stream *tx = NULL;
2023 struct amdtp_stream *s;
2024
2025 list_for_each_entry(s, &d->streams, list) {
2026 if (s->direction == AMDTP_IN_STREAM) {
2027 if (dst_index == src_index) {
2028 tx = s;
2029 break;
2030 }
2031
2032 ++src_index;
2033 }
2034 }
2035 if (!tx) {
2036 // Select the first entry.
2037 list_for_each_entry(s, &d->streams, list) {
2038 if (s->direction == AMDTP_IN_STREAM) {
2039 tx = s;
2040 break;
2041 }
2042 }
2043 // No target is available to replay sequence.
2044 if (!tx)
2045 return -EINVAL;
2046 }
2047
2048 rx->ctx_data.rx.replay_target = tx;
2049
2050 ++dst_index;
2051 }
2052 }
2053
2054 return 0;
2055 }
2056
2057 /**
2058 * amdtp_domain_start - start sending packets for isoc context in the domain.
2059 * @d: the AMDTP domain.
2060 * @tx_init_skip_cycles: the number of cycles to skip processing packets at initial stage of IR
2061 * contexts.
2062 * @replay_seq: whether to replay the sequence of packet in IR context for the sequence of packet in
2063 * IT context.
2064 * @replay_on_the_fly: transfer rx packets according to nominal frequency, then begin to replay
2065 * according to arrival of events in tx packets.
2066 */
amdtp_domain_start(struct amdtp_domain * d,unsigned int tx_init_skip_cycles,bool replay_seq,bool replay_on_the_fly)2067 int amdtp_domain_start(struct amdtp_domain *d, unsigned int tx_init_skip_cycles, bool replay_seq,
2068 bool replay_on_the_fly)
2069 {
2070 unsigned int events_per_buffer = d->events_per_buffer;
2071 unsigned int events_per_period = d->events_per_period;
2072 unsigned int queue_size;
2073 struct amdtp_stream *s;
2074 bool found = false;
2075 int err;
2076
2077 if (replay_seq) {
2078 err = make_association(d);
2079 if (err < 0)
2080 return err;
2081 }
2082 d->replay.enable = replay_seq;
2083 d->replay.on_the_fly = replay_on_the_fly;
2084
2085 // Select an IT context as IRQ target.
2086 list_for_each_entry(s, &d->streams, list) {
2087 if (s->direction == AMDTP_OUT_STREAM) {
2088 found = true;
2089 break;
2090 }
2091 }
2092 if (!found)
2093 return -ENXIO;
2094 d->irq_target = s;
2095
2096 d->processing_cycle.tx_init_skip = tx_init_skip_cycles;
2097
2098 // This is a case that AMDTP streams in domain run just for MIDI
2099 // substream. Use the number of events equivalent to 10 msec as
2100 // interval of hardware IRQ.
2101 if (events_per_period == 0)
2102 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
2103 if (events_per_buffer == 0)
2104 events_per_buffer = events_per_period * 3;
2105
2106 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
2107 amdtp_rate_table[d->irq_target->sfc]);
2108
2109 list_for_each_entry(s, &d->streams, list) {
2110 unsigned int idle_irq_interval = 0;
2111
2112 if (s->direction == AMDTP_OUT_STREAM && s == d->irq_target) {
2113 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
2114 amdtp_rate_table[d->irq_target->sfc]);
2115 }
2116
2117 // Starts immediately but actually DMA context starts several hundred cycles later.
2118 err = amdtp_stream_start(s, s->channel, s->speed, queue_size, idle_irq_interval);
2119 if (err < 0)
2120 goto error;
2121 }
2122
2123 return 0;
2124 error:
2125 list_for_each_entry(s, &d->streams, list)
2126 amdtp_stream_stop(s);
2127 return err;
2128 }
2129 EXPORT_SYMBOL_GPL(amdtp_domain_start);
2130
2131 /**
2132 * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
2133 * @d: the AMDTP domain to which the isoc contexts belong.
2134 */
amdtp_domain_stop(struct amdtp_domain * d)2135 void amdtp_domain_stop(struct amdtp_domain *d)
2136 {
2137 struct amdtp_stream *s, *next;
2138
2139 if (d->irq_target)
2140 amdtp_stream_stop(d->irq_target);
2141
2142 list_for_each_entry_safe(s, next, &d->streams, list) {
2143 list_del(&s->list);
2144
2145 if (s != d->irq_target)
2146 amdtp_stream_stop(s);
2147 }
2148
2149 d->events_per_period = 0;
2150 d->irq_target = NULL;
2151 }
2152 EXPORT_SYMBOL_GPL(amdtp_domain_stop);
2153