1 /* 2 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams 3 * with Common Isochronous Packet (IEC 61883-1) headers 4 * 5 * Copyright (c) Clemens Ladisch <clemens@ladisch.de> 6 * Licensed under the terms of the GNU General Public License, version 2. 7 */ 8 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/firewire.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <sound/pcm.h> 15 #include <sound/pcm_params.h> 16 #include "amdtp-stream.h" 17 18 #define TICKS_PER_CYCLE 3072 19 #define CYCLES_PER_SECOND 8000 20 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) 21 22 /* Always support Linux tracing subsystem. */ 23 #define CREATE_TRACE_POINTS 24 #include "amdtp-stream-trace.h" 25 26 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ 27 28 /* isochronous header parameters */ 29 #define ISO_DATA_LENGTH_SHIFT 16 30 #define TAG_NO_CIP_HEADER 0 31 #define TAG_CIP 1 32 33 /* common isochronous packet header parameters */ 34 #define CIP_EOH_SHIFT 31 35 #define CIP_EOH (1u << CIP_EOH_SHIFT) 36 #define CIP_EOH_MASK 0x80000000 37 #define CIP_SID_SHIFT 24 38 #define CIP_SID_MASK 0x3f000000 39 #define CIP_DBS_MASK 0x00ff0000 40 #define CIP_DBS_SHIFT 16 41 #define CIP_SPH_MASK 0x00000400 42 #define CIP_SPH_SHIFT 10 43 #define CIP_DBC_MASK 0x000000ff 44 #define CIP_FMT_SHIFT 24 45 #define CIP_FMT_MASK 0x3f000000 46 #define CIP_FDF_MASK 0x00ff0000 47 #define CIP_FDF_SHIFT 16 48 #define CIP_SYT_MASK 0x0000ffff 49 #define CIP_SYT_NO_INFO 0xffff 50 51 /* Audio and Music transfer protocol specific parameters */ 52 #define CIP_FMT_AM 0x10 53 #define AMDTP_FDF_NO_DATA 0xff 54 55 /* TODO: make these configurable */ 56 #define INTERRUPT_INTERVAL 16 57 #define QUEUE_LENGTH 48 58 59 #define IR_HEADER_SIZE 8 // For header and timestamp. 60 #define OUT_PACKET_HEADER_SIZE 0 61 #define HEADER_TSTAMP_MASK 0x0000ffff 62 63 static void pcm_period_tasklet(unsigned long data); 64 65 /** 66 * amdtp_stream_init - initialize an AMDTP stream structure 67 * @s: the AMDTP stream to initialize 68 * @unit: the target of the stream 69 * @dir: the direction of stream 70 * @flags: the packet transmission method to use 71 * @fmt: the value of fmt field in CIP header 72 * @process_data_blocks: callback handler to process data blocks 73 * @protocol_size: the size to allocate newly for protocol 74 */ 75 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 76 enum amdtp_stream_direction dir, enum cip_flags flags, 77 unsigned int fmt, 78 amdtp_stream_process_data_blocks_t process_data_blocks, 79 unsigned int protocol_size) 80 { 81 if (process_data_blocks == NULL) 82 return -EINVAL; 83 84 s->protocol = kzalloc(protocol_size, GFP_KERNEL); 85 if (!s->protocol) 86 return -ENOMEM; 87 88 s->unit = unit; 89 s->direction = dir; 90 s->flags = flags; 91 s->context = ERR_PTR(-1); 92 mutex_init(&s->mutex); 93 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s); 94 s->packet_index = 0; 95 96 init_waitqueue_head(&s->callback_wait); 97 s->callbacked = false; 98 99 s->fmt = fmt; 100 s->process_data_blocks = process_data_blocks; 101 102 return 0; 103 } 104 EXPORT_SYMBOL(amdtp_stream_init); 105 106 /** 107 * amdtp_stream_destroy - free stream resources 108 * @s: the AMDTP stream to destroy 109 */ 110 void amdtp_stream_destroy(struct amdtp_stream *s) 111 { 112 /* Not initialized. */ 113 if (s->protocol == NULL) 114 return; 115 116 WARN_ON(amdtp_stream_running(s)); 117 kfree(s->protocol); 118 mutex_destroy(&s->mutex); 119 } 120 EXPORT_SYMBOL(amdtp_stream_destroy); 121 122 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { 123 [CIP_SFC_32000] = 8, 124 [CIP_SFC_44100] = 8, 125 [CIP_SFC_48000] = 8, 126 [CIP_SFC_88200] = 16, 127 [CIP_SFC_96000] = 16, 128 [CIP_SFC_176400] = 32, 129 [CIP_SFC_192000] = 32, 130 }; 131 EXPORT_SYMBOL(amdtp_syt_intervals); 132 133 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { 134 [CIP_SFC_32000] = 32000, 135 [CIP_SFC_44100] = 44100, 136 [CIP_SFC_48000] = 48000, 137 [CIP_SFC_88200] = 88200, 138 [CIP_SFC_96000] = 96000, 139 [CIP_SFC_176400] = 176400, 140 [CIP_SFC_192000] = 192000, 141 }; 142 EXPORT_SYMBOL(amdtp_rate_table); 143 144 static int apply_constraint_to_size(struct snd_pcm_hw_params *params, 145 struct snd_pcm_hw_rule *rule) 146 { 147 struct snd_interval *s = hw_param_interval(params, rule->var); 148 const struct snd_interval *r = 149 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 150 struct snd_interval t = {0}; 151 unsigned int step = 0; 152 int i; 153 154 for (i = 0; i < CIP_SFC_COUNT; ++i) { 155 if (snd_interval_test(r, amdtp_rate_table[i])) 156 step = max(step, amdtp_syt_intervals[i]); 157 } 158 159 t.min = roundup(s->min, step); 160 t.max = rounddown(s->max, step); 161 t.integer = 1; 162 163 return snd_interval_refine(s, &t); 164 } 165 166 /** 167 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream 168 * @s: the AMDTP stream, which must be initialized. 169 * @runtime: the PCM substream runtime 170 */ 171 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, 172 struct snd_pcm_runtime *runtime) 173 { 174 struct snd_pcm_hardware *hw = &runtime->hw; 175 int err; 176 177 hw->info = SNDRV_PCM_INFO_BATCH | 178 SNDRV_PCM_INFO_BLOCK_TRANSFER | 179 SNDRV_PCM_INFO_INTERLEAVED | 180 SNDRV_PCM_INFO_JOINT_DUPLEX | 181 SNDRV_PCM_INFO_MMAP | 182 SNDRV_PCM_INFO_MMAP_VALID; 183 184 /* SNDRV_PCM_INFO_BATCH */ 185 hw->periods_min = 2; 186 hw->periods_max = UINT_MAX; 187 188 /* bytes for a frame */ 189 hw->period_bytes_min = 4 * hw->channels_max; 190 191 /* Just to prevent from allocating much pages. */ 192 hw->period_bytes_max = hw->period_bytes_min * 2048; 193 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; 194 195 /* 196 * Currently firewire-lib processes 16 packets in one software 197 * interrupt callback. This equals to 2msec but actually the 198 * interval of the interrupts has a jitter. 199 * Additionally, even if adding a constraint to fit period size to 200 * 2msec, actual calculated frames per period doesn't equal to 2msec, 201 * depending on sampling rate. 202 * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec. 203 * Here let us use 5msec for safe period interrupt. 204 */ 205 err = snd_pcm_hw_constraint_minmax(runtime, 206 SNDRV_PCM_HW_PARAM_PERIOD_TIME, 207 5000, UINT_MAX); 208 if (err < 0) 209 goto end; 210 211 /* Non-Blocking stream has no more constraints */ 212 if (!(s->flags & CIP_BLOCKING)) 213 goto end; 214 215 /* 216 * One AMDTP packet can include some frames. In blocking mode, the 217 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, 218 * depending on its sampling rate. For accurate period interrupt, it's 219 * preferrable to align period/buffer sizes to current SYT_INTERVAL. 220 */ 221 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 222 apply_constraint_to_size, NULL, 223 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 224 SNDRV_PCM_HW_PARAM_RATE, -1); 225 if (err < 0) 226 goto end; 227 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 228 apply_constraint_to_size, NULL, 229 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 230 SNDRV_PCM_HW_PARAM_RATE, -1); 231 if (err < 0) 232 goto end; 233 end: 234 return err; 235 } 236 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); 237 238 /** 239 * amdtp_stream_set_parameters - set stream parameters 240 * @s: the AMDTP stream to configure 241 * @rate: the sample rate 242 * @data_block_quadlets: the size of a data block in quadlet unit 243 * 244 * The parameters must be set before the stream is started, and must not be 245 * changed while the stream is running. 246 */ 247 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, 248 unsigned int data_block_quadlets) 249 { 250 unsigned int sfc; 251 252 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { 253 if (amdtp_rate_table[sfc] == rate) 254 break; 255 } 256 if (sfc == ARRAY_SIZE(amdtp_rate_table)) 257 return -EINVAL; 258 259 s->sfc = sfc; 260 s->data_block_quadlets = data_block_quadlets; 261 s->syt_interval = amdtp_syt_intervals[sfc]; 262 263 /* default buffering in the device */ 264 s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; 265 if (s->flags & CIP_BLOCKING) 266 /* additional buffering needed to adjust for no-data packets */ 267 s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate; 268 269 return 0; 270 } 271 EXPORT_SYMBOL(amdtp_stream_set_parameters); 272 273 /** 274 * amdtp_stream_get_max_payload - get the stream's packet size 275 * @s: the AMDTP stream 276 * 277 * This function must not be called before the stream has been configured 278 * with amdtp_stream_set_parameters(). 279 */ 280 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) 281 { 282 unsigned int multiplier = 1; 283 unsigned int header_size = 0; 284 285 if (s->flags & CIP_JUMBO_PAYLOAD) 286 multiplier = 5; 287 if (!(s->flags & CIP_NO_HEADER)) 288 header_size = 8; 289 290 return header_size + 291 s->syt_interval * s->data_block_quadlets * 4 * multiplier; 292 } 293 EXPORT_SYMBOL(amdtp_stream_get_max_payload); 294 295 /** 296 * amdtp_stream_pcm_prepare - prepare PCM device for running 297 * @s: the AMDTP stream 298 * 299 * This function should be called from the PCM device's .prepare callback. 300 */ 301 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) 302 { 303 tasklet_kill(&s->period_tasklet); 304 s->pcm_buffer_pointer = 0; 305 s->pcm_period_pointer = 0; 306 } 307 EXPORT_SYMBOL(amdtp_stream_pcm_prepare); 308 309 static unsigned int calculate_data_blocks(struct amdtp_stream *s, 310 unsigned int syt) 311 { 312 unsigned int phase, data_blocks; 313 314 /* Blocking mode. */ 315 if (s->flags & CIP_BLOCKING) { 316 /* This module generate empty packet for 'no data'. */ 317 if (syt == CIP_SYT_NO_INFO) 318 data_blocks = 0; 319 else 320 data_blocks = s->syt_interval; 321 /* Non-blocking mode. */ 322 } else { 323 if (!cip_sfc_is_base_44100(s->sfc)) { 324 /* Sample_rate / 8000 is an integer, and precomputed. */ 325 data_blocks = s->data_block_state; 326 } else { 327 phase = s->data_block_state; 328 329 /* 330 * This calculates the number of data blocks per packet so that 331 * 1) the overall rate is correct and exactly synchronized to 332 * the bus clock, and 333 * 2) packets with a rounded-up number of blocks occur as early 334 * as possible in the sequence (to prevent underruns of the 335 * device's buffer). 336 */ 337 if (s->sfc == CIP_SFC_44100) 338 /* 6 6 5 6 5 6 5 ... */ 339 data_blocks = 5 + ((phase & 1) ^ 340 (phase == 0 || phase >= 40)); 341 else 342 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ 343 data_blocks = 11 * (s->sfc >> 1) + (phase == 0); 344 if (++phase >= (80 >> (s->sfc >> 1))) 345 phase = 0; 346 s->data_block_state = phase; 347 } 348 } 349 350 return data_blocks; 351 } 352 353 static unsigned int calculate_syt(struct amdtp_stream *s, 354 unsigned int cycle) 355 { 356 unsigned int syt_offset, phase, index, syt; 357 358 if (s->last_syt_offset < TICKS_PER_CYCLE) { 359 if (!cip_sfc_is_base_44100(s->sfc)) 360 syt_offset = s->last_syt_offset + s->syt_offset_state; 361 else { 362 /* 363 * The time, in ticks, of the n'th SYT_INTERVAL sample is: 364 * n * SYT_INTERVAL * 24576000 / sample_rate 365 * Modulo TICKS_PER_CYCLE, the difference between successive 366 * elements is about 1386.23. Rounding the results of this 367 * formula to the SYT precision results in a sequence of 368 * differences that begins with: 369 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... 370 * This code generates _exactly_ the same sequence. 371 */ 372 phase = s->syt_offset_state; 373 index = phase % 13; 374 syt_offset = s->last_syt_offset; 375 syt_offset += 1386 + ((index && !(index & 3)) || 376 phase == 146); 377 if (++phase >= 147) 378 phase = 0; 379 s->syt_offset_state = phase; 380 } 381 } else 382 syt_offset = s->last_syt_offset - TICKS_PER_CYCLE; 383 s->last_syt_offset = syt_offset; 384 385 if (syt_offset < TICKS_PER_CYCLE) { 386 syt_offset += s->transfer_delay; 387 syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; 388 syt += syt_offset % TICKS_PER_CYCLE; 389 390 return syt & CIP_SYT_MASK; 391 } else { 392 return CIP_SYT_NO_INFO; 393 } 394 } 395 396 static void update_pcm_pointers(struct amdtp_stream *s, 397 struct snd_pcm_substream *pcm, 398 unsigned int frames) 399 { 400 unsigned int ptr; 401 402 ptr = s->pcm_buffer_pointer + frames; 403 if (ptr >= pcm->runtime->buffer_size) 404 ptr -= pcm->runtime->buffer_size; 405 WRITE_ONCE(s->pcm_buffer_pointer, ptr); 406 407 s->pcm_period_pointer += frames; 408 if (s->pcm_period_pointer >= pcm->runtime->period_size) { 409 s->pcm_period_pointer -= pcm->runtime->period_size; 410 tasklet_hi_schedule(&s->period_tasklet); 411 } 412 } 413 414 static void pcm_period_tasklet(unsigned long data) 415 { 416 struct amdtp_stream *s = (void *)data; 417 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); 418 419 if (pcm) 420 snd_pcm_period_elapsed(pcm); 421 } 422 423 static int queue_packet(struct amdtp_stream *s, unsigned int header_length, 424 unsigned int payload_length) 425 { 426 struct fw_iso_packet p = {0}; 427 int err = 0; 428 429 if (IS_ERR(s->context)) 430 goto end; 431 432 p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL); 433 p.tag = s->tag; 434 p.header_length = header_length; 435 if (payload_length > 0) 436 p.payload_length = payload_length; 437 else 438 p.skip = true; 439 err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer, 440 s->buffer.packets[s->packet_index].offset); 441 if (err < 0) { 442 dev_err(&s->unit->device, "queueing error: %d\n", err); 443 goto end; 444 } 445 446 if (++s->packet_index >= QUEUE_LENGTH) 447 s->packet_index = 0; 448 end: 449 return err; 450 } 451 452 static inline int queue_out_packet(struct amdtp_stream *s, 453 unsigned int payload_length) 454 { 455 return queue_packet(s, OUT_PACKET_HEADER_SIZE, payload_length); 456 } 457 458 static inline int queue_in_packet(struct amdtp_stream *s) 459 { 460 return queue_packet(s, IR_HEADER_SIZE, s->max_payload_length); 461 } 462 463 static int handle_out_packet(struct amdtp_stream *s, 464 unsigned int payload_length, unsigned int cycle, 465 unsigned int index) 466 { 467 __be32 *buffer; 468 unsigned int syt; 469 unsigned int data_blocks; 470 unsigned int pcm_frames; 471 struct snd_pcm_substream *pcm; 472 473 buffer = s->buffer.packets[s->packet_index].buffer; 474 syt = calculate_syt(s, cycle); 475 data_blocks = calculate_data_blocks(s, syt); 476 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt); 477 478 if (s->flags & CIP_DBC_IS_END_EVENT) 479 s->data_block_counter = 480 (s->data_block_counter + data_blocks) & 0xff; 481 482 buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | 483 (s->data_block_quadlets << CIP_DBS_SHIFT) | 484 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | 485 s->data_block_counter); 486 buffer[1] = cpu_to_be32(CIP_EOH | 487 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | 488 ((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | 489 (syt & CIP_SYT_MASK)); 490 491 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 492 s->data_block_counter = 493 (s->data_block_counter + data_blocks) & 0xff; 494 payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; 495 496 trace_out_packet(s, cycle, buffer, payload_length, index); 497 498 if (queue_out_packet(s, payload_length) < 0) 499 return -EIO; 500 501 pcm = READ_ONCE(s->pcm); 502 if (pcm && pcm_frames > 0) 503 update_pcm_pointers(s, pcm, pcm_frames); 504 505 /* No need to return the number of handled data blocks. */ 506 return 0; 507 } 508 509 static int handle_out_packet_without_header(struct amdtp_stream *s, 510 unsigned int payload_length, unsigned int cycle, 511 unsigned int index) 512 { 513 __be32 *buffer; 514 unsigned int syt; 515 unsigned int data_blocks; 516 unsigned int pcm_frames; 517 struct snd_pcm_substream *pcm; 518 519 buffer = s->buffer.packets[s->packet_index].buffer; 520 syt = calculate_syt(s, cycle); 521 data_blocks = calculate_data_blocks(s, syt); 522 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, &syt); 523 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; 524 525 payload_length = data_blocks * 4 * s->data_block_quadlets; 526 527 trace_out_packet_without_header(s, cycle, payload_length, data_blocks, 528 index); 529 530 if (queue_out_packet(s, payload_length) < 0) 531 return -EIO; 532 533 pcm = READ_ONCE(s->pcm); 534 if (pcm && pcm_frames > 0) 535 update_pcm_pointers(s, pcm, pcm_frames); 536 537 /* No need to return the number of handled data blocks. */ 538 return 0; 539 } 540 541 static int handle_in_packet(struct amdtp_stream *s, 542 unsigned int payload_length, unsigned int cycle, 543 unsigned int index) 544 { 545 __be32 *buffer; 546 u32 cip_header[2]; 547 unsigned int sph, fmt, fdf, syt; 548 unsigned int data_block_quadlets, data_block_counter, dbc_interval; 549 unsigned int data_blocks; 550 struct snd_pcm_substream *pcm; 551 unsigned int pcm_frames; 552 bool lost; 553 554 buffer = s->buffer.packets[s->packet_index].buffer; 555 cip_header[0] = be32_to_cpu(buffer[0]); 556 cip_header[1] = be32_to_cpu(buffer[1]); 557 558 trace_in_packet(s, cycle, cip_header, payload_length, index); 559 560 /* 561 * This module supports 'Two-quadlet CIP header with SYT field'. 562 * For convenience, also check FMT field is AM824 or not. 563 */ 564 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || 565 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && 566 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { 567 dev_info_ratelimited(&s->unit->device, 568 "Invalid CIP header for AMDTP: %08X:%08X\n", 569 cip_header[0], cip_header[1]); 570 data_blocks = 0; 571 pcm_frames = 0; 572 goto end; 573 } 574 575 /* Check valid protocol or not. */ 576 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT; 577 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT; 578 if (sph != s->sph || fmt != s->fmt) { 579 dev_info_ratelimited(&s->unit->device, 580 "Detect unexpected protocol: %08x %08x\n", 581 cip_header[0], cip_header[1]); 582 data_blocks = 0; 583 pcm_frames = 0; 584 goto end; 585 } 586 587 /* Calculate data blocks */ 588 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; 589 if (payload_length < 12 || 590 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { 591 data_blocks = 0; 592 } else { 593 data_block_quadlets = 594 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; 595 /* avoid division by zero */ 596 if (data_block_quadlets == 0) { 597 dev_err(&s->unit->device, 598 "Detect invalid value in dbs field: %08X\n", 599 cip_header[0]); 600 return -EPROTO; 601 } 602 if (s->flags & CIP_WRONG_DBS) 603 data_block_quadlets = s->data_block_quadlets; 604 605 data_blocks = (payload_length / 4 - 2) / 606 data_block_quadlets; 607 } 608 609 /* Check data block counter continuity */ 610 data_block_counter = cip_header[0] & CIP_DBC_MASK; 611 if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && 612 s->data_block_counter != UINT_MAX) 613 data_block_counter = s->data_block_counter; 614 615 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && 616 data_block_counter == s->tx_first_dbc) || 617 s->data_block_counter == UINT_MAX) { 618 lost = false; 619 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 620 lost = data_block_counter != s->data_block_counter; 621 } else { 622 if (data_blocks > 0 && s->tx_dbc_interval > 0) 623 dbc_interval = s->tx_dbc_interval; 624 else 625 dbc_interval = data_blocks; 626 627 lost = data_block_counter != 628 ((s->data_block_counter + dbc_interval) & 0xff); 629 } 630 631 if (lost) { 632 dev_err(&s->unit->device, 633 "Detect discontinuity of CIP: %02X %02X\n", 634 s->data_block_counter, data_block_counter); 635 return -EIO; 636 } 637 638 syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK; 639 pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt); 640 641 if (s->flags & CIP_DBC_IS_END_EVENT) 642 s->data_block_counter = data_block_counter; 643 else 644 s->data_block_counter = 645 (data_block_counter + data_blocks) & 0xff; 646 end: 647 if (queue_in_packet(s) < 0) 648 return -EIO; 649 650 pcm = READ_ONCE(s->pcm); 651 if (pcm && pcm_frames > 0) 652 update_pcm_pointers(s, pcm, pcm_frames); 653 654 return 0; 655 } 656 657 static int handle_in_packet_without_header(struct amdtp_stream *s, 658 unsigned int payload_length, unsigned int cycle, 659 unsigned int index) 660 { 661 __be32 *buffer; 662 unsigned int payload_quadlets; 663 unsigned int data_blocks; 664 struct snd_pcm_substream *pcm; 665 unsigned int pcm_frames; 666 667 buffer = s->buffer.packets[s->packet_index].buffer; 668 payload_quadlets = payload_length / 4; 669 data_blocks = payload_quadlets / s->data_block_quadlets; 670 671 trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks, 672 index); 673 674 pcm_frames = s->process_data_blocks(s, buffer, data_blocks, NULL); 675 s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; 676 677 if (queue_in_packet(s) < 0) 678 return -EIO; 679 680 pcm = READ_ONCE(s->pcm); 681 if (pcm && pcm_frames > 0) 682 update_pcm_pointers(s, pcm, pcm_frames); 683 684 return 0; 685 } 686 687 /* 688 * In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On 689 * the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent 690 * it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. 691 */ 692 static inline u32 compute_cycle_count(u32 tstamp) 693 { 694 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff); 695 } 696 697 static inline u32 increment_cycle_count(u32 cycle, unsigned int addend) 698 { 699 cycle += addend; 700 if (cycle >= 8 * CYCLES_PER_SECOND) 701 cycle -= 8 * CYCLES_PER_SECOND; 702 return cycle; 703 } 704 705 static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, 706 size_t header_length, void *header, 707 void *private_data) 708 { 709 struct amdtp_stream *s = private_data; 710 unsigned int i, packets = header_length / 4; 711 u32 cycle; 712 713 if (s->packet_index < 0) 714 return; 715 716 cycle = compute_cycle_count(tstamp); 717 718 /* Align to actual cycle count for the last packet. */ 719 cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets); 720 721 for (i = 0; i < packets; ++i) { 722 cycle = increment_cycle_count(cycle, 1); 723 if (s->handle_packet(s, 0, cycle, i) < 0) { 724 s->packet_index = -1; 725 if (in_interrupt()) 726 amdtp_stream_pcm_abort(s); 727 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 728 return; 729 } 730 } 731 732 fw_iso_context_queue_flush(s->context); 733 } 734 735 static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, 736 size_t header_length, void *header, 737 void *private_data) 738 { 739 struct amdtp_stream *s = private_data; 740 unsigned int i, packets; 741 unsigned int payload_length, max_payload_length; 742 __be32 *ctx_header = header; 743 744 if (s->packet_index < 0) 745 return; 746 747 /* The number of packets in buffer */ 748 packets = header_length / IR_HEADER_SIZE; 749 750 /* For buffer-over-run prevention. */ 751 max_payload_length = s->max_payload_length; 752 753 for (i = 0; i < packets; i++) { 754 u32 iso_header = be32_to_cpu(ctx_header[0]); 755 unsigned int cycle; 756 757 tstamp = be32_to_cpu(ctx_header[1]) & HEADER_TSTAMP_MASK; 758 cycle = compute_cycle_count(tstamp); 759 760 /* The number of bytes in this packet */ 761 payload_length = iso_header >> ISO_DATA_LENGTH_SHIFT; 762 if (payload_length > max_payload_length) { 763 dev_err(&s->unit->device, 764 "Detect jumbo payload: %04x %04x\n", 765 payload_length, max_payload_length); 766 break; 767 } 768 769 if (s->handle_packet(s, payload_length, cycle, i) < 0) 770 break; 771 772 ctx_header += IR_HEADER_SIZE / sizeof(__be32); 773 } 774 775 /* Queueing error or detecting invalid payload. */ 776 if (i < packets) { 777 s->packet_index = -1; 778 if (in_interrupt()) 779 amdtp_stream_pcm_abort(s); 780 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 781 return; 782 } 783 784 fw_iso_context_queue_flush(s->context); 785 } 786 787 /* this is executed one time */ 788 static void amdtp_stream_first_callback(struct fw_iso_context *context, 789 u32 tstamp, size_t header_length, 790 void *header, void *private_data) 791 { 792 struct amdtp_stream *s = private_data; 793 __be32 *ctx_header = header; 794 u32 cycle; 795 unsigned int packets; 796 797 /* 798 * For in-stream, first packet has come. 799 * For out-stream, prepared to transmit first packet 800 */ 801 s->callbacked = true; 802 wake_up(&s->callback_wait); 803 804 if (s->direction == AMDTP_IN_STREAM) { 805 tstamp = be32_to_cpu(ctx_header[1]) & HEADER_TSTAMP_MASK; 806 cycle = compute_cycle_count(tstamp); 807 808 context->callback.sc = in_stream_callback; 809 if (s->flags & CIP_NO_HEADER) 810 s->handle_packet = handle_in_packet_without_header; 811 else 812 s->handle_packet = handle_in_packet; 813 } else { 814 packets = header_length / 4; 815 cycle = compute_cycle_count(tstamp); 816 cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets); 817 context->callback.sc = out_stream_callback; 818 if (s->flags & CIP_NO_HEADER) 819 s->handle_packet = handle_out_packet_without_header; 820 else 821 s->handle_packet = handle_out_packet; 822 } 823 824 s->start_cycle = cycle; 825 826 context->callback.sc(context, tstamp, header_length, header, s); 827 } 828 829 /** 830 * amdtp_stream_start - start transferring packets 831 * @s: the AMDTP stream to start 832 * @channel: the isochronous channel on the bus 833 * @speed: firewire speed code 834 * 835 * The stream cannot be started until it has been configured with 836 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI 837 * device can be started. 838 */ 839 int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed) 840 { 841 static const struct { 842 unsigned int data_block; 843 unsigned int syt_offset; 844 } initial_state[] = { 845 [CIP_SFC_32000] = { 4, 3072 }, 846 [CIP_SFC_48000] = { 6, 1024 }, 847 [CIP_SFC_96000] = { 12, 1024 }, 848 [CIP_SFC_192000] = { 24, 1024 }, 849 [CIP_SFC_44100] = { 0, 67 }, 850 [CIP_SFC_88200] = { 0, 67 }, 851 [CIP_SFC_176400] = { 0, 67 }, 852 }; 853 unsigned int header_size; 854 enum dma_data_direction dir; 855 int type, tag, err; 856 857 mutex_lock(&s->mutex); 858 859 if (WARN_ON(amdtp_stream_running(s) || 860 (s->data_block_quadlets < 1))) { 861 err = -EBADFD; 862 goto err_unlock; 863 } 864 865 if (s->direction == AMDTP_IN_STREAM) 866 s->data_block_counter = UINT_MAX; 867 else 868 s->data_block_counter = 0; 869 s->data_block_state = initial_state[s->sfc].data_block; 870 s->syt_offset_state = initial_state[s->sfc].syt_offset; 871 s->last_syt_offset = TICKS_PER_CYCLE; 872 873 /* initialize packet buffer */ 874 if (s->direction == AMDTP_IN_STREAM) { 875 dir = DMA_FROM_DEVICE; 876 type = FW_ISO_CONTEXT_RECEIVE; 877 header_size = IR_HEADER_SIZE; 878 } else { 879 dir = DMA_TO_DEVICE; 880 type = FW_ISO_CONTEXT_TRANSMIT; 881 header_size = OUT_PACKET_HEADER_SIZE; 882 } 883 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, 884 amdtp_stream_get_max_payload(s), dir); 885 if (err < 0) 886 goto err_unlock; 887 888 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, 889 type, channel, speed, header_size, 890 amdtp_stream_first_callback, s); 891 if (IS_ERR(s->context)) { 892 err = PTR_ERR(s->context); 893 if (err == -EBUSY) 894 dev_err(&s->unit->device, 895 "no free stream on this controller\n"); 896 goto err_buffer; 897 } 898 899 amdtp_stream_update(s); 900 901 if (s->direction == AMDTP_IN_STREAM) 902 s->max_payload_length = amdtp_stream_get_max_payload(s); 903 904 if (s->flags & CIP_NO_HEADER) 905 s->tag = TAG_NO_CIP_HEADER; 906 else 907 s->tag = TAG_CIP; 908 909 s->packet_index = 0; 910 do { 911 if (s->direction == AMDTP_IN_STREAM) 912 err = queue_in_packet(s); 913 else 914 err = queue_out_packet(s, 0); 915 if (err < 0) 916 goto err_context; 917 } while (s->packet_index > 0); 918 919 /* NOTE: TAG1 matches CIP. This just affects in stream. */ 920 tag = FW_ISO_CONTEXT_MATCH_TAG1; 921 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) 922 tag |= FW_ISO_CONTEXT_MATCH_TAG0; 923 924 s->callbacked = false; 925 err = fw_iso_context_start(s->context, -1, 0, tag); 926 if (err < 0) 927 goto err_context; 928 929 mutex_unlock(&s->mutex); 930 931 return 0; 932 933 err_context: 934 fw_iso_context_destroy(s->context); 935 s->context = ERR_PTR(-1); 936 err_buffer: 937 iso_packets_buffer_destroy(&s->buffer, s->unit); 938 err_unlock: 939 mutex_unlock(&s->mutex); 940 941 return err; 942 } 943 EXPORT_SYMBOL(amdtp_stream_start); 944 945 /** 946 * amdtp_stream_pcm_pointer - get the PCM buffer position 947 * @s: the AMDTP stream that transports the PCM data 948 * 949 * Returns the current buffer position, in frames. 950 */ 951 unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s) 952 { 953 /* 954 * This function is called in software IRQ context of period_tasklet or 955 * process context. 956 * 957 * When the software IRQ context was scheduled by software IRQ context 958 * of IR/IT contexts, queued packets were already handled. Therefore, 959 * no need to flush the queue in buffer anymore. 960 * 961 * When the process context reach here, some packets will be already 962 * queued in the buffer. These packets should be handled immediately 963 * to keep better granularity of PCM pointer. 964 * 965 * Later, the process context will sometimes schedules software IRQ 966 * context of the period_tasklet. Then, no need to flush the queue by 967 * the same reason as described for IR/IT contexts. 968 */ 969 if (!in_interrupt() && amdtp_stream_running(s)) 970 fw_iso_context_flush_completions(s->context); 971 972 return READ_ONCE(s->pcm_buffer_pointer); 973 } 974 EXPORT_SYMBOL(amdtp_stream_pcm_pointer); 975 976 /** 977 * amdtp_stream_pcm_ack - acknowledge queued PCM frames 978 * @s: the AMDTP stream that transfers the PCM frames 979 * 980 * Returns zero always. 981 */ 982 int amdtp_stream_pcm_ack(struct amdtp_stream *s) 983 { 984 /* 985 * Process isochronous packets for recent isochronous cycle to handle 986 * queued PCM frames. 987 */ 988 if (amdtp_stream_running(s)) 989 fw_iso_context_flush_completions(s->context); 990 991 return 0; 992 } 993 EXPORT_SYMBOL(amdtp_stream_pcm_ack); 994 995 /** 996 * amdtp_stream_update - update the stream after a bus reset 997 * @s: the AMDTP stream 998 */ 999 void amdtp_stream_update(struct amdtp_stream *s) 1000 { 1001 /* Precomputing. */ 1002 WRITE_ONCE(s->source_node_id_field, 1003 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); 1004 } 1005 EXPORT_SYMBOL(amdtp_stream_update); 1006 1007 /** 1008 * amdtp_stream_stop - stop sending packets 1009 * @s: the AMDTP stream to stop 1010 * 1011 * All PCM and MIDI devices of the stream must be stopped before the stream 1012 * itself can be stopped. 1013 */ 1014 void amdtp_stream_stop(struct amdtp_stream *s) 1015 { 1016 mutex_lock(&s->mutex); 1017 1018 if (!amdtp_stream_running(s)) { 1019 mutex_unlock(&s->mutex); 1020 return; 1021 } 1022 1023 tasklet_kill(&s->period_tasklet); 1024 fw_iso_context_stop(s->context); 1025 fw_iso_context_destroy(s->context); 1026 s->context = ERR_PTR(-1); 1027 iso_packets_buffer_destroy(&s->buffer, s->unit); 1028 1029 s->callbacked = false; 1030 1031 mutex_unlock(&s->mutex); 1032 } 1033 EXPORT_SYMBOL(amdtp_stream_stop); 1034 1035 /** 1036 * amdtp_stream_pcm_abort - abort the running PCM device 1037 * @s: the AMDTP stream about to be stopped 1038 * 1039 * If the isochronous stream needs to be stopped asynchronously, call this 1040 * function first to stop the PCM device. 1041 */ 1042 void amdtp_stream_pcm_abort(struct amdtp_stream *s) 1043 { 1044 struct snd_pcm_substream *pcm; 1045 1046 pcm = READ_ONCE(s->pcm); 1047 if (pcm) 1048 snd_pcm_stop_xrun(pcm); 1049 } 1050 EXPORT_SYMBOL(amdtp_stream_pcm_abort); 1051