1 /* 2 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams 3 * with Common Isochronous Packet (IEC 61883-1) headers 4 * 5 * Copyright (c) Clemens Ladisch <clemens@ladisch.de> 6 * Licensed under the terms of the GNU General Public License, version 2. 7 */ 8 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/firewire.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <sound/pcm.h> 15 #include <sound/pcm_params.h> 16 #include "amdtp-stream.h" 17 18 #define TICKS_PER_CYCLE 3072 19 #define CYCLES_PER_SECOND 8000 20 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) 21 22 /* Always support Linux tracing subsystem. */ 23 #define CREATE_TRACE_POINTS 24 #include "amdtp-stream-trace.h" 25 26 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ 27 28 /* isochronous header parameters */ 29 #define ISO_DATA_LENGTH_SHIFT 16 30 #define TAG_NO_CIP_HEADER 0 31 #define TAG_CIP 1 32 33 /* common isochronous packet header parameters */ 34 #define CIP_EOH_SHIFT 31 35 #define CIP_EOH (1u << CIP_EOH_SHIFT) 36 #define CIP_EOH_MASK 0x80000000 37 #define CIP_SID_SHIFT 24 38 #define CIP_SID_MASK 0x3f000000 39 #define CIP_DBS_MASK 0x00ff0000 40 #define CIP_DBS_SHIFT 16 41 #define CIP_SPH_MASK 0x00000400 42 #define CIP_SPH_SHIFT 10 43 #define CIP_DBC_MASK 0x000000ff 44 #define CIP_FMT_SHIFT 24 45 #define CIP_FMT_MASK 0x3f000000 46 #define CIP_FDF_MASK 0x00ff0000 47 #define CIP_FDF_SHIFT 16 48 #define CIP_SYT_MASK 0x0000ffff 49 #define CIP_SYT_NO_INFO 0xffff 50 51 /* Audio and Music transfer protocol specific parameters */ 52 #define CIP_FMT_AM 0x10 53 #define AMDTP_FDF_NO_DATA 0xff 54 55 /* TODO: make these configurable */ 56 #define INTERRUPT_INTERVAL 16 57 #define QUEUE_LENGTH 48 58 59 #define IR_HEADER_SIZE 8 // For header and timestamp. 60 #define OUT_PACKET_HEADER_SIZE 0 61 #define HEADER_TSTAMP_MASK 0x0000ffff 62 63 static void pcm_period_tasklet(unsigned long data); 64 65 /** 66 * amdtp_stream_init - initialize an AMDTP stream structure 67 * @s: the AMDTP stream to initialize 68 * @unit: the target of the stream 69 * @dir: the direction of stream 70 * @flags: the packet transmission method to use 71 * @fmt: the value of fmt field in CIP header 72 * @process_data_blocks: callback handler to process data blocks 73 * @protocol_size: the size to allocate newly for protocol 74 */ 75 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 76 enum amdtp_stream_direction dir, enum cip_flags flags, 77 unsigned int fmt, 78 amdtp_stream_process_data_blocks_t process_data_blocks, 79 unsigned int protocol_size) 80 { 81 if (process_data_blocks == NULL) 82 return -EINVAL; 83 84 s->protocol = kzalloc(protocol_size, GFP_KERNEL); 85 if (!s->protocol) 86 return -ENOMEM; 87 88 s->unit = unit; 89 s->direction = dir; 90 s->flags = flags; 91 s->context = ERR_PTR(-1); 92 mutex_init(&s->mutex); 93 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s); 94 s->packet_index = 0; 95 96 init_waitqueue_head(&s->callback_wait); 97 s->callbacked = false; 98 99 s->fmt = fmt; 100 s->process_data_blocks = process_data_blocks; 101 102 return 0; 103 } 104 EXPORT_SYMBOL(amdtp_stream_init); 105 106 /** 107 * amdtp_stream_destroy - free stream resources 108 * @s: the AMDTP stream to destroy 109 */ 110 void amdtp_stream_destroy(struct amdtp_stream *s) 111 { 112 /* Not initialized. */ 113 if (s->protocol == NULL) 114 return; 115 116 WARN_ON(amdtp_stream_running(s)); 117 kfree(s->protocol); 118 mutex_destroy(&s->mutex); 119 } 120 EXPORT_SYMBOL(amdtp_stream_destroy); 121 122 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { 123 [CIP_SFC_32000] = 8, 124 [CIP_SFC_44100] = 8, 125 [CIP_SFC_48000] = 8, 126 [CIP_SFC_88200] = 16, 127 [CIP_SFC_96000] = 16, 128 [CIP_SFC_176400] = 32, 129 [CIP_SFC_192000] = 32, 130 }; 131 EXPORT_SYMBOL(amdtp_syt_intervals); 132 133 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { 134 [CIP_SFC_32000] = 32000, 135 [CIP_SFC_44100] = 44100, 136 [CIP_SFC_48000] = 48000, 137 [CIP_SFC_88200] = 88200, 138 [CIP_SFC_96000] = 96000, 139 [CIP_SFC_176400] = 176400, 140 [CIP_SFC_192000] = 192000, 141 }; 142 EXPORT_SYMBOL(amdtp_rate_table); 143 144 static int apply_constraint_to_size(struct snd_pcm_hw_params *params, 145 struct snd_pcm_hw_rule *rule) 146 { 147 struct snd_interval *s = hw_param_interval(params, rule->var); 148 const struct snd_interval *r = 149 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 150 struct snd_interval t = {0}; 151 unsigned int step = 0; 152 int i; 153 154 for (i = 0; i < CIP_SFC_COUNT; ++i) { 155 if (snd_interval_test(r, amdtp_rate_table[i])) 156 step = max(step, amdtp_syt_intervals[i]); 157 } 158 159 t.min = roundup(s->min, step); 160 t.max = rounddown(s->max, step); 161 t.integer = 1; 162 163 return snd_interval_refine(s, &t); 164 } 165 166 /** 167 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream 168 * @s: the AMDTP stream, which must be initialized. 169 * @runtime: the PCM substream runtime 170 */ 171 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, 172 struct snd_pcm_runtime *runtime) 173 { 174 struct snd_pcm_hardware *hw = &runtime->hw; 175 int err; 176 177 hw->info = SNDRV_PCM_INFO_BATCH | 178 SNDRV_PCM_INFO_BLOCK_TRANSFER | 179 SNDRV_PCM_INFO_INTERLEAVED | 180 SNDRV_PCM_INFO_JOINT_DUPLEX | 181 SNDRV_PCM_INFO_MMAP | 182 SNDRV_PCM_INFO_MMAP_VALID; 183 184 /* SNDRV_PCM_INFO_BATCH */ 185 hw->periods_min = 2; 186 hw->periods_max = UINT_MAX; 187 188 /* bytes for a frame */ 189 hw->period_bytes_min = 4 * hw->channels_max; 190 191 /* Just to prevent from allocating much pages. */ 192 hw->period_bytes_max = hw->period_bytes_min * 2048; 193 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; 194 195 /* 196 * Currently firewire-lib processes 16 packets in one software 197 * interrupt callback. This equals to 2msec but actually the 198 * interval of the interrupts has a jitter. 199 * Additionally, even if adding a constraint to fit period size to 200 * 2msec, actual calculated frames per period doesn't equal to 2msec, 201 * depending on sampling rate. 202 * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec. 203 * Here let us use 5msec for safe period interrupt. 204 */ 205 err = snd_pcm_hw_constraint_minmax(runtime, 206 SNDRV_PCM_HW_PARAM_PERIOD_TIME, 207 5000, UINT_MAX); 208 if (err < 0) 209 goto end; 210 211 /* Non-Blocking stream has no more constraints */ 212 if (!(s->flags & CIP_BLOCKING)) 213 goto end; 214 215 /* 216 * One AMDTP packet can include some frames. In blocking mode, the 217 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, 218 * depending on its sampling rate. For accurate period interrupt, it's 219 * preferrable to align period/buffer sizes to current SYT_INTERVAL. 220 */ 221 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 222 apply_constraint_to_size, NULL, 223 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 224 SNDRV_PCM_HW_PARAM_RATE, -1); 225 if (err < 0) 226 goto end; 227 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 228 apply_constraint_to_size, NULL, 229 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 230 SNDRV_PCM_HW_PARAM_RATE, -1); 231 if (err < 0) 232 goto end; 233 end: 234 return err; 235 } 236 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); 237 238 /** 239 * amdtp_stream_set_parameters - set stream parameters 240 * @s: the AMDTP stream to configure 241 * @rate: the sample rate 242 * @data_block_quadlets: the size of a data block in quadlet unit 243 * 244 * The parameters must be set before the stream is started, and must not be 245 * changed while the stream is running. 246 */ 247 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, 248 unsigned int data_block_quadlets) 249 { 250 unsigned int sfc; 251 252 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { 253 if (amdtp_rate_table[sfc] == rate) 254 break; 255 } 256 if (sfc == ARRAY_SIZE(amdtp_rate_table)) 257 return -EINVAL; 258 259 s->sfc = sfc; 260 s->data_block_quadlets = data_block_quadlets; 261 s->syt_interval = amdtp_syt_intervals[sfc]; 262 263 /* default buffering in the device */ 264 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; 265 if (s->flags & CIP_BLOCKING) 266 /* additional buffering needed to adjust for no-data packets */ 267 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate; 268 269 return 0; 270 } 271 EXPORT_SYMBOL(amdtp_stream_set_parameters); 272 273 /** 274 * amdtp_stream_get_max_payload - get the stream's packet size 275 * @s: the AMDTP stream 276 * 277 * This function must not be called before the stream has been configured 278 * with amdtp_stream_set_parameters(). 279 */ 280 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) 281 { 282 unsigned int multiplier = 1; 283 unsigned int header_size = 0; 284 285 if (s->flags & CIP_JUMBO_PAYLOAD) 286 multiplier = 5; 287 if (!(s->flags & CIP_NO_HEADER)) 288 header_size = 8; 289 290 return header_size + 291 s->syt_interval * s->data_block_quadlets * 4 * multiplier; 292 } 293 EXPORT_SYMBOL(amdtp_stream_get_max_payload); 294 295 /** 296 * amdtp_stream_pcm_prepare - prepare PCM device for running 297 * @s: the AMDTP stream 298 * 299 * This function should be called from the PCM device's .prepare callback. 300 */ 301 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) 302 { 303 tasklet_kill(&s->period_tasklet); 304 s->pcm_buffer_pointer = 0; 305 s->pcm_period_pointer = 0; 306 } 307 EXPORT_SYMBOL(amdtp_stream_pcm_prepare); 308 309 static unsigned int calculate_data_blocks(struct amdtp_stream *s, 310 unsigned int syt) 311 { 312 unsigned int phase, data_blocks; 313 314 /* Blocking mode. */ 315 if (s->flags & CIP_BLOCKING) { 316 /* This module generate empty packet for 'no data'. */ 317 if (syt == CIP_SYT_NO_INFO) 318 data_blocks = 0; 319 else 320 data_blocks = s->syt_interval; 321 /* Non-blocking mode. */ 322 } else { 323 if (!cip_sfc_is_base_44100(s->sfc)) { 324 /* Sample_rate / 8000 is an integer, and precomputed. */ 325 data_blocks = s->data_block_state; 326 } else { 327 phase = s->data_block_state; 328 329 /* 330 * This calculates the number of data blocks per packet so that 331 * 1) the overall rate is correct and exactly synchronized to 332 * the bus clock, and 333 * 2) packets with a rounded-up number of blocks occur as early 334 * as possible in the sequence (to prevent underruns of the 335 * device's buffer). 336 */ 337 if (s->sfc == CIP_SFC_44100) 338 /* 6 6 5 6 5 6 5 ... */ 339 data_blocks = 5 + ((phase & 1) ^ 340 (phase == 0 || phase >= 40)); 341 else 342 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ 343 data_blocks = 11 * (s->sfc >> 1) + (phase == 0); 344 if (++phase >= (80 >> (s->sfc >> 1))) 345 phase = 0; 346 s->data_block_state = phase; 347 } 348 } 349 350 return data_blocks; 351 } 352 353 static unsigned int calculate_syt(struct amdtp_stream *s, 354 unsigned int cycle) 355 { 356 unsigned int syt_offset, phase, index, syt; 357 358 if (s->last_syt_offset < TICKS_PER_CYCLE) { 359 if (!cip_sfc_is_base_44100(s->sfc)) 360 syt_offset = s->last_syt_offset + s->syt_offset_state; 361 else { 362 /* 363 * The time, in ticks, of the n'th SYT_INTERVAL sample is: 364 * n * SYT_INTERVAL * 24576000 / sample_rate 365 * Modulo TICKS_PER_CYCLE, the difference between successive 366 * elements is about 1386.23. Rounding the results of this 367 * formula to the SYT precision results in a sequence of 368 * differences that begins with: 369 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... 370 * This code generates _exactly_ the same sequence. 371 */ 372 phase = s->syt_offset_state; 373 index = phase % 13; 374 syt_offset = s->last_syt_offset; 375 syt_offset += 1386 + ((index && !(index & 3)) || 376 phase == 146); 377 if (++phase >= 147) 378 phase = 0; 379 s->syt_offset_state = phase; 380 } 381 } else 382 syt_offset = s->last_syt_offset - TICKS_PER_CYCLE; 383 s->last_syt_offset = syt_offset; 384 385 if (syt_offset < TICKS_PER_CYCLE) { 386 syt_offset += s->transfer_delay; 387 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; 388 syt += syt_offset % TICKS_PER_CYCLE; 389 390 return syt & CIP_SYT_MASK; 391 } else { 392 return CIP_SYT_NO_INFO; 393 } 394 } 395 396 static void update_pcm_pointers(struct amdtp_stream *s, 397 struct snd_pcm_substream *pcm, 398 unsigned int frames) 399 { 400 unsigned int ptr; 401 402 ptr = s->pcm_buffer_pointer + frames; 403 if (ptr >= pcm->runtime->buffer_size) 404 ptr -= pcm->runtime->buffer_size; 405 WRITE_ONCE(s->pcm_buffer_pointer, ptr); 406 407 s->pcm_period_pointer += frames; 408 if (s->pcm_period_pointer >= pcm->runtime->period_size) { 409 s->pcm_period_pointer -= pcm->runtime->period_size; 410 tasklet_hi_schedule(&s->period_tasklet); 411 } 412 } 413 414 static void pcm_period_tasklet(unsigned long data) 415 { 416 struct amdtp_stream *s = (void *)data; 417 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); 418 419 if (pcm) 420 snd_pcm_period_elapsed(pcm); 421 } 422 423 static int queue_packet(struct amdtp_stream *s, unsigned int header_length, 424 unsigned int payload_length) 425 { 426 struct fw_iso_packet p = {0}; 427 int err = 0; 428 429 if (IS_ERR(s->context)) 430 goto end; 431 432 p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL); 433 p.tag = s->tag; 434 p.header_length = header_length; 435 if (payload_length > 0) 436 p.payload_length = payload_length; 437 else 438 p.skip = true; 439 err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer, 440 s->buffer.packets[s->packet_index].offset); 441 if (err < 0) { 442 dev_err(&s->unit->device, "queueing error: %d\n", err); 443 goto end; 444 } 445 446 if (++s->packet_index >= QUEUE_LENGTH) 447 s->packet_index = 0; 448 end: 449 return err; 450 } 451 452 static inline int queue_out_packet(struct amdtp_stream *s, 453 unsigned int payload_length) 454 { 455 return queue_packet(s, OUT_PACKET_HEADER_SIZE, payload_length); 456 } 457 458 static inline int queue_in_packet(struct amdtp_stream *s) 459 { 460 return queue_packet(s, IR_HEADER_SIZE, s->max_payload_length); 461 } 462 463 static int handle_out_packet(struct amdtp_stream *s, 464 unsigned int payload_length, unsigned int cycle, 465 unsigned int index) 466 { 467 __be32 *buffer; 468 unsigned int syt; 469 unsigned int data_blocks; 470 unsigned int pcm_frames; 471 struct snd_pcm_substream *pcm; 472 473 buffer = s->buffer.packets[s->packet_index].buffer; 474 syt = calculate_syt(s, cycle); 475 data_blocks = calculate_data_blocks(s, syt); 476 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt); 477 478 if (s->flags & CIP_DBC_IS_END_EVENT) 479 s->data_block_counter = 480 (s->data_block_counter + data_blocks) & 0xff; 481 482 buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | 483 (s->data_block_quadlets << CIP_DBS_SHIFT) | 484 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | 485 s->data_block_counter); 486 buffer[1] = cpu_to_be32(CIP_EOH | 487 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | 488 ((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | 489 (syt & CIP_SYT_MASK)); 490 491 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 492 s->data_block_counter = 493 (s->data_block_counter + data_blocks) & 0xff; 494 payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; 495 496 trace_out_packet(s, cycle, buffer, payload_length, data_blocks, index); 497 498 if (queue_out_packet(s, payload_length) < 0) 499 return -EIO; 500 501 pcm = READ_ONCE(s->pcm); 502 if (pcm && pcm_frames > 0) 503 update_pcm_pointers(s, pcm, pcm_frames); 504 505 /* No need to return the number of handled data blocks. */ 506 return 0; 507 } 508 509 static int handle_out_packet_without_header(struct amdtp_stream *s, 510 unsigned int payload_length, unsigned int cycle, 511 unsigned int index) 512 { 513 __be32 *buffer; 514 unsigned int syt; 515 unsigned int data_blocks; 516 unsigned int pcm_frames; 517 struct snd_pcm_substream *pcm; 518 519 buffer = s->buffer.packets[s->packet_index].buffer; 520 syt = calculate_syt(s, cycle); 521 data_blocks = calculate_data_blocks(s, syt); 522 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, &syt); 523 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; 524 525 payload_length = data_blocks * 4 * s->data_block_quadlets; 526 527 trace_out_packet_without_header(s, cycle, payload_length, data_blocks, 528 index); 529 530 if (queue_out_packet(s, payload_length) < 0) 531 return -EIO; 532 533 pcm = READ_ONCE(s->pcm); 534 if (pcm && pcm_frames > 0) 535 update_pcm_pointers(s, pcm, pcm_frames); 536 537 /* No need to return the number of handled data blocks. */ 538 return 0; 539 } 540 541 static int handle_in_packet(struct amdtp_stream *s, 542 unsigned int payload_length, unsigned int cycle, 543 unsigned int index) 544 { 545 __be32 *buffer; 546 u32 cip_header[2]; 547 unsigned int sph, fmt, fdf, syt; 548 unsigned int data_block_quadlets, data_block_counter, dbc_interval; 549 unsigned int data_blocks; 550 struct snd_pcm_substream *pcm; 551 unsigned int pcm_frames; 552 bool lost; 553 554 buffer = s->buffer.packets[s->packet_index].buffer; 555 cip_header[0] = be32_to_cpu(buffer[0]); 556 cip_header[1] = be32_to_cpu(buffer[1]); 557 558 /* 559 * This module supports 'Two-quadlet CIP header with SYT field'. 560 * For convenience, also check FMT field is AM824 or not. 561 */ 562 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || 563 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && 564 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { 565 dev_info_ratelimited(&s->unit->device, 566 "Invalid CIP header for AMDTP: %08X:%08X\n", 567 cip_header[0], cip_header[1]); 568 data_blocks = 0; 569 pcm_frames = 0; 570 goto end; 571 } 572 573 /* Check valid protocol or not. */ 574 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT; 575 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT; 576 if (sph != s->sph || fmt != s->fmt) { 577 dev_info_ratelimited(&s->unit->device, 578 "Detect unexpected protocol: %08x %08x\n", 579 cip_header[0], cip_header[1]); 580 data_blocks = 0; 581 pcm_frames = 0; 582 goto end; 583 } 584 585 /* Calculate data blocks */ 586 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; 587 if (payload_length < 12 || 588 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { 589 data_blocks = 0; 590 } else { 591 data_block_quadlets = 592 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; 593 /* avoid division by zero */ 594 if (data_block_quadlets == 0) { 595 dev_err(&s->unit->device, 596 "Detect invalid value in dbs field: %08X\n", 597 cip_header[0]); 598 return -EPROTO; 599 } 600 if (s->flags & CIP_WRONG_DBS) 601 data_block_quadlets = s->data_block_quadlets; 602 603 data_blocks = (payload_length / 4 - 2) / 604 data_block_quadlets; 605 } 606 607 /* Check data block counter continuity */ 608 data_block_counter = cip_header[0] & CIP_DBC_MASK; 609 if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && 610 s->data_block_counter != UINT_MAX) 611 data_block_counter = s->data_block_counter; 612 613 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && 614 data_block_counter == s->tx_first_dbc) || 615 s->data_block_counter == UINT_MAX) { 616 lost = false; 617 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 618 lost = data_block_counter != s->data_block_counter; 619 } else { 620 if (data_blocks > 0 && s->tx_dbc_interval > 0) 621 dbc_interval = s->tx_dbc_interval; 622 else 623 dbc_interval = data_blocks; 624 625 lost = data_block_counter != 626 ((s->data_block_counter + dbc_interval) & 0xff); 627 } 628 629 if (lost) { 630 dev_err(&s->unit->device, 631 "Detect discontinuity of CIP: %02X %02X\n", 632 s->data_block_counter, data_block_counter); 633 return -EIO; 634 } 635 636 trace_in_packet(s, cycle, buffer, payload_length, data_blocks, index); 637 638 syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK; 639 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt); 640 641 if (s->flags & CIP_DBC_IS_END_EVENT) 642 s->data_block_counter = data_block_counter; 643 else 644 s->data_block_counter = 645 (data_block_counter + data_blocks) & 0xff; 646 end: 647 if (queue_in_packet(s) < 0) 648 return -EIO; 649 650 pcm = READ_ONCE(s->pcm); 651 if (pcm && pcm_frames > 0) 652 update_pcm_pointers(s, pcm, pcm_frames); 653 654 return 0; 655 } 656 657 static int handle_in_packet_without_header(struct amdtp_stream *s, 658 unsigned int payload_length, unsigned int cycle, 659 unsigned int index) 660 { 661 __be32 *buffer; 662 unsigned int data_blocks; 663 struct snd_pcm_substream *pcm; 664 unsigned int pcm_frames; 665 666 buffer = s->buffer.packets[s->packet_index].buffer; 667 data_blocks = payload_length / sizeof(__be32) / s->data_block_quadlets; 668 669 trace_in_packet_without_header(s, cycle, payload_length, data_blocks, 670 index); 671 672 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, NULL); 673 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; 674 675 if (queue_in_packet(s) < 0) 676 return -EIO; 677 678 pcm = READ_ONCE(s->pcm); 679 if (pcm && pcm_frames > 0) 680 update_pcm_pointers(s, pcm, pcm_frames); 681 682 return 0; 683 } 684 685 /* 686 * In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On 687 * the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent 688 * it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. 689 */ 690 static inline u32 compute_cycle_count(u32 tstamp) 691 { 692 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff); 693 } 694 695 static inline u32 increment_cycle_count(u32 cycle, unsigned int addend) 696 { 697 cycle += addend; 698 if (cycle >= 8 * CYCLES_PER_SECOND) 699 cycle -= 8 * CYCLES_PER_SECOND; 700 return cycle; 701 } 702 703 static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, 704 size_t header_length, void *header, 705 void *private_data) 706 { 707 struct amdtp_stream *s = private_data; 708 unsigned int i, packets = header_length / 4; 709 u32 cycle; 710 711 if (s->packet_index < 0) 712 return; 713 714 cycle = compute_cycle_count(tstamp); 715 716 /* Align to actual cycle count for the last packet. */ 717 cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets); 718 719 for (i = 0; i < packets; ++i) { 720 cycle = increment_cycle_count(cycle, 1); 721 if (s->handle_packet(s, 0, cycle, i) < 0) { 722 s->packet_index = -1; 723 if (in_interrupt()) 724 amdtp_stream_pcm_abort(s); 725 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 726 return; 727 } 728 } 729 730 fw_iso_context_queue_flush(s->context); 731 } 732 733 static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, 734 size_t header_length, void *header, 735 void *private_data) 736 { 737 struct amdtp_stream *s = private_data; 738 unsigned int i, packets; 739 unsigned int payload_length, max_payload_length; 740 __be32 *ctx_header = header; 741 742 if (s->packet_index < 0) 743 return; 744 745 /* The number of packets in buffer */ 746 packets = header_length / IR_HEADER_SIZE; 747 748 /* For buffer-over-run prevention. */ 749 max_payload_length = s->max_payload_length; 750 751 for (i = 0; i < packets; i++) { 752 u32 iso_header = be32_to_cpu(ctx_header[0]); 753 unsigned int cycle; 754 755 tstamp = be32_to_cpu(ctx_header[1]) & HEADER_TSTAMP_MASK; 756 cycle = compute_cycle_count(tstamp); 757 758 /* The number of bytes in this packet */ 759 payload_length = iso_header >> ISO_DATA_LENGTH_SHIFT; 760 if (payload_length > max_payload_length) { 761 dev_err(&s->unit->device, 762 "Detect jumbo payload: %04x %04x\n", 763 payload_length, max_payload_length); 764 break; 765 } 766 767 if (s->handle_packet(s, payload_length, cycle, i) < 0) 768 break; 769 770 ctx_header += IR_HEADER_SIZE / sizeof(__be32); 771 } 772 773 /* Queueing error or detecting invalid payload. */ 774 if (i < packets) { 775 s->packet_index = -1; 776 if (in_interrupt()) 777 amdtp_stream_pcm_abort(s); 778 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 779 return; 780 } 781 782 fw_iso_context_queue_flush(s->context); 783 } 784 785 /* this is executed one time */ 786 static void amdtp_stream_first_callback(struct fw_iso_context *context, 787 u32 tstamp, size_t header_length, 788 void *header, void *private_data) 789 { 790 struct amdtp_stream *s = private_data; 791 __be32 *ctx_header = header; 792 u32 cycle; 793 unsigned int packets; 794 795 /* 796 * For in-stream, first packet has come. 797 * For out-stream, prepared to transmit first packet 798 */ 799 s->callbacked = true; 800 wake_up(&s->callback_wait); 801 802 if (s->direction == AMDTP_IN_STREAM) { 803 tstamp = be32_to_cpu(ctx_header[1]) & HEADER_TSTAMP_MASK; 804 cycle = compute_cycle_count(tstamp); 805 806 context->callback.sc = in_stream_callback; 807 if (s->flags & CIP_NO_HEADER) 808 s->handle_packet = handle_in_packet_without_header; 809 else 810 s->handle_packet = handle_in_packet; 811 } else { 812 packets = header_length / 4; 813 cycle = compute_cycle_count(tstamp); 814 cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets); 815 context->callback.sc = out_stream_callback; 816 if (s->flags & CIP_NO_HEADER) 817 s->handle_packet = handle_out_packet_without_header; 818 else 819 s->handle_packet = handle_out_packet; 820 } 821 822 s->start_cycle = cycle; 823 824 context->callback.sc(context, tstamp, header_length, header, s); 825 } 826 827 /** 828 * amdtp_stream_start - start transferring packets 829 * @s: the AMDTP stream to start 830 * @channel: the isochronous channel on the bus 831 * @speed: firewire speed code 832 * 833 * The stream cannot be started until it has been configured with 834 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI 835 * device can be started. 836 */ 837 int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed) 838 { 839 static const struct { 840 unsigned int data_block; 841 unsigned int syt_offset; 842 } initial_state[] = { 843 [CIP_SFC_32000] = { 4, 3072 }, 844 [CIP_SFC_48000] = { 6, 1024 }, 845 [CIP_SFC_96000] = { 12, 1024 }, 846 [CIP_SFC_192000] = { 24, 1024 }, 847 [CIP_SFC_44100] = { 0, 67 }, 848 [CIP_SFC_88200] = { 0, 67 }, 849 [CIP_SFC_176400] = { 0, 67 }, 850 }; 851 unsigned int header_size; 852 enum dma_data_direction dir; 853 int type, tag, err; 854 855 mutex_lock(&s->mutex); 856 857 if (WARN_ON(amdtp_stream_running(s) || 858 (s->data_block_quadlets < 1))) { 859 err = -EBADFD; 860 goto err_unlock; 861 } 862 863 if (s->direction == AMDTP_IN_STREAM) 864 s->data_block_counter = UINT_MAX; 865 else 866 s->data_block_counter = 0; 867 s->data_block_state = initial_state[s->sfc].data_block; 868 s->syt_offset_state = initial_state[s->sfc].syt_offset; 869 s->last_syt_offset = TICKS_PER_CYCLE; 870 871 /* initialize packet buffer */ 872 if (s->direction == AMDTP_IN_STREAM) { 873 dir = DMA_FROM_DEVICE; 874 type = FW_ISO_CONTEXT_RECEIVE; 875 header_size = IR_HEADER_SIZE; 876 } else { 877 dir = DMA_TO_DEVICE; 878 type = FW_ISO_CONTEXT_TRANSMIT; 879 header_size = OUT_PACKET_HEADER_SIZE; 880 } 881 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, 882 amdtp_stream_get_max_payload(s), dir); 883 if (err < 0) 884 goto err_unlock; 885 886 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, 887 type, channel, speed, header_size, 888 amdtp_stream_first_callback, s); 889 if (IS_ERR(s->context)) { 890 err = PTR_ERR(s->context); 891 if (err == -EBUSY) 892 dev_err(&s->unit->device, 893 "no free stream on this controller\n"); 894 goto err_buffer; 895 } 896 897 amdtp_stream_update(s); 898 899 if (s->direction == AMDTP_IN_STREAM) 900 s->max_payload_length = amdtp_stream_get_max_payload(s); 901 902 if (s->flags & CIP_NO_HEADER) 903 s->tag = TAG_NO_CIP_HEADER; 904 else 905 s->tag = TAG_CIP; 906 907 s->packet_index = 0; 908 do { 909 if (s->direction == AMDTP_IN_STREAM) 910 err = queue_in_packet(s); 911 else 912 err = queue_out_packet(s, 0); 913 if (err < 0) 914 goto err_context; 915 } while (s->packet_index > 0); 916 917 /* NOTE: TAG1 matches CIP. This just affects in stream. */ 918 tag = FW_ISO_CONTEXT_MATCH_TAG1; 919 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) 920 tag |= FW_ISO_CONTEXT_MATCH_TAG0; 921 922 s->callbacked = false; 923 err = fw_iso_context_start(s->context, -1, 0, tag); 924 if (err < 0) 925 goto err_context; 926 927 mutex_unlock(&s->mutex); 928 929 return 0; 930 931 err_context: 932 fw_iso_context_destroy(s->context); 933 s->context = ERR_PTR(-1); 934 err_buffer: 935 iso_packets_buffer_destroy(&s->buffer, s->unit); 936 err_unlock: 937 mutex_unlock(&s->mutex); 938 939 return err; 940 } 941 EXPORT_SYMBOL(amdtp_stream_start); 942 943 /** 944 * amdtp_stream_pcm_pointer - get the PCM buffer position 945 * @s: the AMDTP stream that transports the PCM data 946 * 947 * Returns the current buffer position, in frames. 948 */ 949 unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s) 950 { 951 /* 952 * This function is called in software IRQ context of period_tasklet or 953 * process context. 954 * 955 * When the software IRQ context was scheduled by software IRQ context 956 * of IR/IT contexts, queued packets were already handled. Therefore, 957 * no need to flush the queue in buffer anymore. 958 * 959 * When the process context reach here, some packets will be already 960 * queued in the buffer. These packets should be handled immediately 961 * to keep better granularity of PCM pointer. 962 * 963 * Later, the process context will sometimes schedules software IRQ 964 * context of the period_tasklet. Then, no need to flush the queue by 965 * the same reason as described for IR/IT contexts. 966 */ 967 if (!in_interrupt() && amdtp_stream_running(s)) 968 fw_iso_context_flush_completions(s->context); 969 970 return READ_ONCE(s->pcm_buffer_pointer); 971 } 972 EXPORT_SYMBOL(amdtp_stream_pcm_pointer); 973 974 /** 975 * amdtp_stream_pcm_ack - acknowledge queued PCM frames 976 * @s: the AMDTP stream that transfers the PCM frames 977 * 978 * Returns zero always. 979 */ 980 int amdtp_stream_pcm_ack(struct amdtp_stream *s) 981 { 982 /* 983 * Process isochronous packets for recent isochronous cycle to handle 984 * queued PCM frames. 985 */ 986 if (amdtp_stream_running(s)) 987 fw_iso_context_flush_completions(s->context); 988 989 return 0; 990 } 991 EXPORT_SYMBOL(amdtp_stream_pcm_ack); 992 993 /** 994 * amdtp_stream_update - update the stream after a bus reset 995 * @s: the AMDTP stream 996 */ 997 void amdtp_stream_update(struct amdtp_stream *s) 998 { 999 /* Precomputing. */ 1000 WRITE_ONCE(s->source_node_id_field, 1001 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); 1002 } 1003 EXPORT_SYMBOL(amdtp_stream_update); 1004 1005 /** 1006 * amdtp_stream_stop - stop sending packets 1007 * @s: the AMDTP stream to stop 1008 * 1009 * All PCM and MIDI devices of the stream must be stopped before the stream 1010 * itself can be stopped. 1011 */ 1012 void amdtp_stream_stop(struct amdtp_stream *s) 1013 { 1014 mutex_lock(&s->mutex); 1015 1016 if (!amdtp_stream_running(s)) { 1017 mutex_unlock(&s->mutex); 1018 return; 1019 } 1020 1021 tasklet_kill(&s->period_tasklet); 1022 fw_iso_context_stop(s->context); 1023 fw_iso_context_destroy(s->context); 1024 s->context = ERR_PTR(-1); 1025 iso_packets_buffer_destroy(&s->buffer, s->unit); 1026 1027 s->callbacked = false; 1028 1029 mutex_unlock(&s->mutex); 1030 } 1031 EXPORT_SYMBOL(amdtp_stream_stop); 1032 1033 /** 1034 * amdtp_stream_pcm_abort - abort the running PCM device 1035 * @s: the AMDTP stream about to be stopped 1036 * 1037 * If the isochronous stream needs to be stopped asynchronously, call this 1038 * function first to stop the PCM device. 1039 */ 1040 void amdtp_stream_pcm_abort(struct amdtp_stream *s) 1041 { 1042 struct snd_pcm_substream *pcm; 1043 1044 pcm = READ_ONCE(s->pcm); 1045 if (pcm) 1046 snd_pcm_stop_xrun(pcm); 1047 } 1048 EXPORT_SYMBOL(amdtp_stream_pcm_abort); 1049