1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams 4 * with Common Isochronous Packet (IEC 61883-1) headers 5 * 6 * Copyright (c) Clemens Ladisch <clemens@ladisch.de> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/err.h> 11 #include <linux/firewire.h> 12 #include <linux/firewire-constants.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <sound/pcm.h> 16 #include <sound/pcm_params.h> 17 #include "amdtp-stream.h" 18 19 #define TICKS_PER_CYCLE 3072 20 #define CYCLES_PER_SECOND 8000 21 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) 22 23 #define OHCI_MAX_SECOND 8 24 25 /* Always support Linux tracing subsystem. */ 26 #define CREATE_TRACE_POINTS 27 #include "amdtp-stream-trace.h" 28 29 #define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ 30 31 /* isochronous header parameters */ 32 #define ISO_DATA_LENGTH_SHIFT 16 33 #define TAG_NO_CIP_HEADER 0 34 #define TAG_CIP 1 35 36 /* common isochronous packet header parameters */ 37 #define CIP_EOH_SHIFT 31 38 #define CIP_EOH (1u << CIP_EOH_SHIFT) 39 #define CIP_EOH_MASK 0x80000000 40 #define CIP_SID_SHIFT 24 41 #define CIP_SID_MASK 0x3f000000 42 #define CIP_DBS_MASK 0x00ff0000 43 #define CIP_DBS_SHIFT 16 44 #define CIP_SPH_MASK 0x00000400 45 #define CIP_SPH_SHIFT 10 46 #define CIP_DBC_MASK 0x000000ff 47 #define CIP_FMT_SHIFT 24 48 #define CIP_FMT_MASK 0x3f000000 49 #define CIP_FDF_MASK 0x00ff0000 50 #define CIP_FDF_SHIFT 16 51 #define CIP_SYT_MASK 0x0000ffff 52 #define CIP_SYT_NO_INFO 0xffff 53 54 /* Audio and Music transfer protocol specific parameters */ 55 #define CIP_FMT_AM 0x10 56 #define AMDTP_FDF_NO_DATA 0xff 57 58 // For iso header, tstamp and 2 CIP header. 59 #define IR_CTX_HEADER_SIZE_CIP 16 60 // For iso header and tstamp. 61 #define IR_CTX_HEADER_SIZE_NO_CIP 8 62 #define HEADER_TSTAMP_MASK 0x0000ffff 63 64 #define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header. 65 #define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing. 66 67 static void pcm_period_work(struct work_struct *work); 68 69 /** 70 * amdtp_stream_init - initialize an AMDTP stream structure 71 * @s: the AMDTP stream to initialize 72 * @unit: the target of the stream 73 * @dir: the direction of stream 74 * @flags: the packet transmission method to use 75 * @fmt: the value of fmt field in CIP header 76 * @process_ctx_payloads: callback handler to process payloads of isoc context 77 * @protocol_size: the size to allocate newly for protocol 78 */ 79 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, 80 enum amdtp_stream_direction dir, enum cip_flags flags, 81 unsigned int fmt, 82 amdtp_stream_process_ctx_payloads_t process_ctx_payloads, 83 unsigned int protocol_size) 84 { 85 if (process_ctx_payloads == NULL) 86 return -EINVAL; 87 88 s->protocol = kzalloc(protocol_size, GFP_KERNEL); 89 if (!s->protocol) 90 return -ENOMEM; 91 92 s->unit = unit; 93 s->direction = dir; 94 s->flags = flags; 95 s->context = ERR_PTR(-1); 96 mutex_init(&s->mutex); 97 INIT_WORK(&s->period_work, pcm_period_work); 98 s->packet_index = 0; 99 100 init_waitqueue_head(&s->callback_wait); 101 s->callbacked = false; 102 103 s->fmt = fmt; 104 s->process_ctx_payloads = process_ctx_payloads; 105 106 if (dir == AMDTP_OUT_STREAM) 107 s->ctx_data.rx.syt_override = -1; 108 109 return 0; 110 } 111 EXPORT_SYMBOL(amdtp_stream_init); 112 113 /** 114 * amdtp_stream_destroy - free stream resources 115 * @s: the AMDTP stream to destroy 116 */ 117 void amdtp_stream_destroy(struct amdtp_stream *s) 118 { 119 /* Not initialized. */ 120 if (s->protocol == NULL) 121 return; 122 123 WARN_ON(amdtp_stream_running(s)); 124 kfree(s->protocol); 125 mutex_destroy(&s->mutex); 126 } 127 EXPORT_SYMBOL(amdtp_stream_destroy); 128 129 const unsigned int amdtp_syt_intervals[CIP_SFC_COUNT] = { 130 [CIP_SFC_32000] = 8, 131 [CIP_SFC_44100] = 8, 132 [CIP_SFC_48000] = 8, 133 [CIP_SFC_88200] = 16, 134 [CIP_SFC_96000] = 16, 135 [CIP_SFC_176400] = 32, 136 [CIP_SFC_192000] = 32, 137 }; 138 EXPORT_SYMBOL(amdtp_syt_intervals); 139 140 const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = { 141 [CIP_SFC_32000] = 32000, 142 [CIP_SFC_44100] = 44100, 143 [CIP_SFC_48000] = 48000, 144 [CIP_SFC_88200] = 88200, 145 [CIP_SFC_96000] = 96000, 146 [CIP_SFC_176400] = 176400, 147 [CIP_SFC_192000] = 192000, 148 }; 149 EXPORT_SYMBOL(amdtp_rate_table); 150 151 static int apply_constraint_to_size(struct snd_pcm_hw_params *params, 152 struct snd_pcm_hw_rule *rule) 153 { 154 struct snd_interval *s = hw_param_interval(params, rule->var); 155 const struct snd_interval *r = 156 hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); 157 struct snd_interval t = {0}; 158 unsigned int step = 0; 159 int i; 160 161 for (i = 0; i < CIP_SFC_COUNT; ++i) { 162 if (snd_interval_test(r, amdtp_rate_table[i])) 163 step = max(step, amdtp_syt_intervals[i]); 164 } 165 166 t.min = roundup(s->min, step); 167 t.max = rounddown(s->max, step); 168 t.integer = 1; 169 170 return snd_interval_refine(s, &t); 171 } 172 173 /** 174 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream 175 * @s: the AMDTP stream, which must be initialized. 176 * @runtime: the PCM substream runtime 177 */ 178 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, 179 struct snd_pcm_runtime *runtime) 180 { 181 struct snd_pcm_hardware *hw = &runtime->hw; 182 unsigned int ctx_header_size; 183 unsigned int maximum_usec_per_period; 184 int err; 185 186 hw->info = SNDRV_PCM_INFO_BATCH | 187 SNDRV_PCM_INFO_BLOCK_TRANSFER | 188 SNDRV_PCM_INFO_INTERLEAVED | 189 SNDRV_PCM_INFO_JOINT_DUPLEX | 190 SNDRV_PCM_INFO_MMAP | 191 SNDRV_PCM_INFO_MMAP_VALID; 192 193 /* SNDRV_PCM_INFO_BATCH */ 194 hw->periods_min = 2; 195 hw->periods_max = UINT_MAX; 196 197 /* bytes for a frame */ 198 hw->period_bytes_min = 4 * hw->channels_max; 199 200 /* Just to prevent from allocating much pages. */ 201 hw->period_bytes_max = hw->period_bytes_min * 2048; 202 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; 203 204 // Linux driver for 1394 OHCI controller voluntarily flushes isoc 205 // context when total size of accumulated context header reaches 206 // PAGE_SIZE. This kicks work for the isoc context and brings 207 // callback in the middle of scheduled interrupts. 208 // Although AMDTP streams in the same domain use the same events per 209 // IRQ, use the largest size of context header between IT/IR contexts. 210 // Here, use the value of context header in IR context is for both 211 // contexts. 212 if (!(s->flags & CIP_NO_HEADER)) 213 ctx_header_size = IR_CTX_HEADER_SIZE_CIP; 214 else 215 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; 216 maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE / 217 CYCLES_PER_SECOND / ctx_header_size; 218 219 // In IEC 61883-6, one isoc packet can transfer events up to the value 220 // of syt interval. This comes from the interval of isoc cycle. As 1394 221 // OHCI controller can generate hardware IRQ per isoc packet, the 222 // interval is 125 usec. 223 // However, there are two ways of transmission in IEC 61883-6; blocking 224 // and non-blocking modes. In blocking mode, the sequence of isoc packet 225 // includes 'empty' or 'NODATA' packets which include no event. In 226 // non-blocking mode, the number of events per packet is variable up to 227 // the syt interval. 228 // Due to the above protocol design, the minimum PCM frames per 229 // interrupt should be double of the value of syt interval, thus it is 230 // 250 usec. 231 err = snd_pcm_hw_constraint_minmax(runtime, 232 SNDRV_PCM_HW_PARAM_PERIOD_TIME, 233 250, maximum_usec_per_period); 234 if (err < 0) 235 goto end; 236 237 /* Non-Blocking stream has no more constraints */ 238 if (!(s->flags & CIP_BLOCKING)) 239 goto end; 240 241 /* 242 * One AMDTP packet can include some frames. In blocking mode, the 243 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, 244 * depending on its sampling rate. For accurate period interrupt, it's 245 * preferrable to align period/buffer sizes to current SYT_INTERVAL. 246 */ 247 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 248 apply_constraint_to_size, NULL, 249 SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 250 SNDRV_PCM_HW_PARAM_RATE, -1); 251 if (err < 0) 252 goto end; 253 err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 254 apply_constraint_to_size, NULL, 255 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 256 SNDRV_PCM_HW_PARAM_RATE, -1); 257 if (err < 0) 258 goto end; 259 end: 260 return err; 261 } 262 EXPORT_SYMBOL(amdtp_stream_add_pcm_hw_constraints); 263 264 /** 265 * amdtp_stream_set_parameters - set stream parameters 266 * @s: the AMDTP stream to configure 267 * @rate: the sample rate 268 * @data_block_quadlets: the size of a data block in quadlet unit 269 * 270 * The parameters must be set before the stream is started, and must not be 271 * changed while the stream is running. 272 */ 273 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, 274 unsigned int data_block_quadlets) 275 { 276 unsigned int sfc; 277 278 for (sfc = 0; sfc < ARRAY_SIZE(amdtp_rate_table); ++sfc) { 279 if (amdtp_rate_table[sfc] == rate) 280 break; 281 } 282 if (sfc == ARRAY_SIZE(amdtp_rate_table)) 283 return -EINVAL; 284 285 s->sfc = sfc; 286 s->data_block_quadlets = data_block_quadlets; 287 s->syt_interval = amdtp_syt_intervals[sfc]; 288 289 // default buffering in the device. 290 if (s->direction == AMDTP_OUT_STREAM) { 291 s->ctx_data.rx.transfer_delay = 292 TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; 293 294 if (s->flags & CIP_BLOCKING) { 295 // additional buffering needed to adjust for no-data 296 // packets. 297 s->ctx_data.rx.transfer_delay += 298 TICKS_PER_SECOND * s->syt_interval / rate; 299 } 300 } 301 302 return 0; 303 } 304 EXPORT_SYMBOL(amdtp_stream_set_parameters); 305 306 /** 307 * amdtp_stream_get_max_payload - get the stream's packet size 308 * @s: the AMDTP stream 309 * 310 * This function must not be called before the stream has been configured 311 * with amdtp_stream_set_parameters(). 312 */ 313 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) 314 { 315 unsigned int multiplier = 1; 316 unsigned int cip_header_size = 0; 317 318 if (s->flags & CIP_JUMBO_PAYLOAD) 319 multiplier = 5; 320 if (!(s->flags & CIP_NO_HEADER)) 321 cip_header_size = sizeof(__be32) * 2; 322 323 return cip_header_size + 324 s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; 325 } 326 EXPORT_SYMBOL(amdtp_stream_get_max_payload); 327 328 /** 329 * amdtp_stream_pcm_prepare - prepare PCM device for running 330 * @s: the AMDTP stream 331 * 332 * This function should be called from the PCM device's .prepare callback. 333 */ 334 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) 335 { 336 cancel_work_sync(&s->period_work); 337 s->pcm_buffer_pointer = 0; 338 s->pcm_period_pointer = 0; 339 } 340 EXPORT_SYMBOL(amdtp_stream_pcm_prepare); 341 342 static unsigned int calculate_data_blocks(unsigned int *data_block_state, 343 bool is_blocking, bool is_no_info, 344 unsigned int syt_interval, enum cip_sfc sfc) 345 { 346 unsigned int data_blocks; 347 348 /* Blocking mode. */ 349 if (is_blocking) { 350 /* This module generate empty packet for 'no data'. */ 351 if (is_no_info) 352 data_blocks = 0; 353 else 354 data_blocks = syt_interval; 355 /* Non-blocking mode. */ 356 } else { 357 if (!cip_sfc_is_base_44100(sfc)) { 358 // Sample_rate / 8000 is an integer, and precomputed. 359 data_blocks = *data_block_state; 360 } else { 361 unsigned int phase = *data_block_state; 362 363 /* 364 * This calculates the number of data blocks per packet so that 365 * 1) the overall rate is correct and exactly synchronized to 366 * the bus clock, and 367 * 2) packets with a rounded-up number of blocks occur as early 368 * as possible in the sequence (to prevent underruns of the 369 * device's buffer). 370 */ 371 if (sfc == CIP_SFC_44100) 372 /* 6 6 5 6 5 6 5 ... */ 373 data_blocks = 5 + ((phase & 1) ^ 374 (phase == 0 || phase >= 40)); 375 else 376 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ 377 data_blocks = 11 * (sfc >> 1) + (phase == 0); 378 if (++phase >= (80 >> (sfc >> 1))) 379 phase = 0; 380 *data_block_state = phase; 381 } 382 } 383 384 return data_blocks; 385 } 386 387 static unsigned int calculate_syt_offset(unsigned int *last_syt_offset, 388 unsigned int *syt_offset_state, enum cip_sfc sfc) 389 { 390 unsigned int syt_offset; 391 392 if (*last_syt_offset < TICKS_PER_CYCLE) { 393 if (!cip_sfc_is_base_44100(sfc)) 394 syt_offset = *last_syt_offset + *syt_offset_state; 395 else { 396 /* 397 * The time, in ticks, of the n'th SYT_INTERVAL sample is: 398 * n * SYT_INTERVAL * 24576000 / sample_rate 399 * Modulo TICKS_PER_CYCLE, the difference between successive 400 * elements is about 1386.23. Rounding the results of this 401 * formula to the SYT precision results in a sequence of 402 * differences that begins with: 403 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... 404 * This code generates _exactly_ the same sequence. 405 */ 406 unsigned int phase = *syt_offset_state; 407 unsigned int index = phase % 13; 408 409 syt_offset = *last_syt_offset; 410 syt_offset += 1386 + ((index && !(index & 3)) || 411 phase == 146); 412 if (++phase >= 147) 413 phase = 0; 414 *syt_offset_state = phase; 415 } 416 } else 417 syt_offset = *last_syt_offset - TICKS_PER_CYCLE; 418 *last_syt_offset = syt_offset; 419 420 if (syt_offset >= TICKS_PER_CYCLE) 421 syt_offset = CIP_SYT_NO_INFO; 422 423 return syt_offset; 424 } 425 426 static void update_pcm_pointers(struct amdtp_stream *s, 427 struct snd_pcm_substream *pcm, 428 unsigned int frames) 429 { 430 unsigned int ptr; 431 432 ptr = s->pcm_buffer_pointer + frames; 433 if (ptr >= pcm->runtime->buffer_size) 434 ptr -= pcm->runtime->buffer_size; 435 WRITE_ONCE(s->pcm_buffer_pointer, ptr); 436 437 s->pcm_period_pointer += frames; 438 if (s->pcm_period_pointer >= pcm->runtime->period_size) { 439 s->pcm_period_pointer -= pcm->runtime->period_size; 440 queue_work(system_highpri_wq, &s->period_work); 441 } 442 } 443 444 static void pcm_period_work(struct work_struct *work) 445 { 446 struct amdtp_stream *s = container_of(work, struct amdtp_stream, 447 period_work); 448 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); 449 450 if (pcm) 451 snd_pcm_period_elapsed(pcm); 452 } 453 454 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, 455 bool sched_irq) 456 { 457 int err; 458 459 params->interrupt = sched_irq; 460 params->tag = s->tag; 461 params->sy = 0; 462 463 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, 464 s->buffer.packets[s->packet_index].offset); 465 if (err < 0) { 466 dev_err(&s->unit->device, "queueing error: %d\n", err); 467 goto end; 468 } 469 470 if (++s->packet_index >= s->queue_size) 471 s->packet_index = 0; 472 end: 473 return err; 474 } 475 476 static inline int queue_out_packet(struct amdtp_stream *s, 477 struct fw_iso_packet *params, bool sched_irq) 478 { 479 params->skip = 480 !!(params->header_length == 0 && params->payload_length == 0); 481 return queue_packet(s, params, sched_irq); 482 } 483 484 static inline int queue_in_packet(struct amdtp_stream *s, 485 struct fw_iso_packet *params) 486 { 487 // Queue one packet for IR context. 488 params->header_length = s->ctx_data.tx.ctx_header_size; 489 params->payload_length = s->ctx_data.tx.max_ctx_payload_length; 490 params->skip = false; 491 return queue_packet(s, params, false); 492 } 493 494 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], 495 unsigned int data_block_counter, unsigned int syt) 496 { 497 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | 498 (s->data_block_quadlets << CIP_DBS_SHIFT) | 499 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | 500 data_block_counter); 501 cip_header[1] = cpu_to_be32(CIP_EOH | 502 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | 503 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | 504 (syt & CIP_SYT_MASK)); 505 } 506 507 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, 508 struct fw_iso_packet *params, 509 unsigned int data_blocks, 510 unsigned int data_block_counter, 511 unsigned int syt, unsigned int index) 512 { 513 unsigned int payload_length; 514 __be32 *cip_header; 515 516 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; 517 params->payload_length = payload_length; 518 519 if (!(s->flags & CIP_NO_HEADER)) { 520 cip_header = (__be32 *)params->header; 521 generate_cip_header(s, cip_header, data_block_counter, syt); 522 params->header_length = 2 * sizeof(__be32); 523 payload_length += params->header_length; 524 } else { 525 cip_header = NULL; 526 } 527 528 trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks, 529 data_block_counter, index); 530 } 531 532 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, 533 unsigned int payload_length, 534 unsigned int *data_blocks, 535 unsigned int *data_block_counter, unsigned int *syt) 536 { 537 u32 cip_header[2]; 538 unsigned int sph; 539 unsigned int fmt; 540 unsigned int fdf; 541 unsigned int dbc; 542 bool lost; 543 544 cip_header[0] = be32_to_cpu(buf[0]); 545 cip_header[1] = be32_to_cpu(buf[1]); 546 547 /* 548 * This module supports 'Two-quadlet CIP header with SYT field'. 549 * For convenience, also check FMT field is AM824 or not. 550 */ 551 if ((((cip_header[0] & CIP_EOH_MASK) == CIP_EOH) || 552 ((cip_header[1] & CIP_EOH_MASK) != CIP_EOH)) && 553 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { 554 dev_info_ratelimited(&s->unit->device, 555 "Invalid CIP header for AMDTP: %08X:%08X\n", 556 cip_header[0], cip_header[1]); 557 return -EAGAIN; 558 } 559 560 /* Check valid protocol or not. */ 561 sph = (cip_header[0] & CIP_SPH_MASK) >> CIP_SPH_SHIFT; 562 fmt = (cip_header[1] & CIP_FMT_MASK) >> CIP_FMT_SHIFT; 563 if (sph != s->sph || fmt != s->fmt) { 564 dev_info_ratelimited(&s->unit->device, 565 "Detect unexpected protocol: %08x %08x\n", 566 cip_header[0], cip_header[1]); 567 return -EAGAIN; 568 } 569 570 /* Calculate data blocks */ 571 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; 572 if (payload_length < sizeof(__be32) * 2 || 573 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { 574 *data_blocks = 0; 575 } else { 576 unsigned int data_block_quadlets = 577 (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; 578 /* avoid division by zero */ 579 if (data_block_quadlets == 0) { 580 dev_err(&s->unit->device, 581 "Detect invalid value in dbs field: %08X\n", 582 cip_header[0]); 583 return -EPROTO; 584 } 585 if (s->flags & CIP_WRONG_DBS) 586 data_block_quadlets = s->data_block_quadlets; 587 588 *data_blocks = (payload_length / sizeof(__be32) - 2) / 589 data_block_quadlets; 590 } 591 592 /* Check data block counter continuity */ 593 dbc = cip_header[0] & CIP_DBC_MASK; 594 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && 595 *data_block_counter != UINT_MAX) 596 dbc = *data_block_counter; 597 598 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || 599 *data_block_counter == UINT_MAX) { 600 lost = false; 601 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 602 lost = dbc != *data_block_counter; 603 } else { 604 unsigned int dbc_interval; 605 606 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) 607 dbc_interval = s->ctx_data.tx.dbc_interval; 608 else 609 dbc_interval = *data_blocks; 610 611 lost = dbc != ((*data_block_counter + dbc_interval) & 0xff); 612 } 613 614 if (lost) { 615 dev_err(&s->unit->device, 616 "Detect discontinuity of CIP: %02X %02X\n", 617 *data_block_counter, dbc); 618 return -EIO; 619 } 620 621 *data_block_counter = dbc; 622 623 *syt = cip_header[1] & CIP_SYT_MASK; 624 625 return 0; 626 } 627 628 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, 629 const __be32 *ctx_header, 630 unsigned int *payload_length, 631 unsigned int *data_blocks, 632 unsigned int *data_block_counter, 633 unsigned int *syt, unsigned int index) 634 { 635 const __be32 *cip_header; 636 int err; 637 638 *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT; 639 if (*payload_length > s->ctx_data.tx.ctx_header_size + 640 s->ctx_data.tx.max_ctx_payload_length) { 641 dev_err(&s->unit->device, 642 "Detect jumbo payload: %04x %04x\n", 643 *payload_length, s->ctx_data.tx.max_ctx_payload_length); 644 return -EIO; 645 } 646 647 if (!(s->flags & CIP_NO_HEADER)) { 648 cip_header = ctx_header + 2; 649 err = check_cip_header(s, cip_header, *payload_length, 650 data_blocks, data_block_counter, syt); 651 if (err < 0) 652 return err; 653 } else { 654 cip_header = NULL; 655 err = 0; 656 *data_blocks = *payload_length / sizeof(__be32) / 657 s->data_block_quadlets; 658 *syt = 0; 659 660 if (*data_block_counter == UINT_MAX) 661 *data_block_counter = 0; 662 } 663 664 trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks, 665 *data_block_counter, index); 666 667 return err; 668 } 669 670 // In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On 671 // the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent 672 // it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. 673 static inline u32 compute_cycle_count(__be32 ctx_header_tstamp) 674 { 675 u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK; 676 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff); 677 } 678 679 static inline u32 increment_cycle_count(u32 cycle, unsigned int addend) 680 { 681 cycle += addend; 682 if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND) 683 cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND; 684 return cycle; 685 } 686 687 // Align to actual cycle count for the packet which is going to be scheduled. 688 // This module queued the same number of isochronous cycle as the size of queue 689 // to kip isochronous cycle, therefore it's OK to just increment the cycle by 690 // the size of queue for scheduled cycle. 691 static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp, 692 unsigned int queue_size) 693 { 694 u32 cycle = compute_cycle_count(ctx_header_tstamp); 695 return increment_cycle_count(cycle, queue_size); 696 } 697 698 static int generate_device_pkt_descs(struct amdtp_stream *s, 699 struct pkt_desc *descs, 700 const __be32 *ctx_header, 701 unsigned int packets) 702 { 703 unsigned int dbc = s->data_block_counter; 704 int i; 705 int err; 706 707 for (i = 0; i < packets; ++i) { 708 struct pkt_desc *desc = descs + i; 709 unsigned int index = (s->packet_index + i) % s->queue_size; 710 unsigned int cycle; 711 unsigned int payload_length; 712 unsigned int data_blocks; 713 unsigned int syt; 714 715 cycle = compute_cycle_count(ctx_header[1]); 716 717 err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length, 718 &data_blocks, &dbc, &syt, i); 719 if (err < 0) 720 return err; 721 722 desc->cycle = cycle; 723 desc->syt = syt; 724 desc->data_blocks = data_blocks; 725 desc->data_block_counter = dbc; 726 desc->ctx_payload = s->buffer.packets[index].buffer; 727 728 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 729 dbc = (dbc + desc->data_blocks) & 0xff; 730 731 ctx_header += 732 s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); 733 } 734 735 s->data_block_counter = dbc; 736 737 return 0; 738 } 739 740 static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle, 741 unsigned int transfer_delay) 742 { 743 unsigned int syt; 744 745 syt_offset += transfer_delay; 746 syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) | 747 (syt_offset % TICKS_PER_CYCLE); 748 return syt & CIP_SYT_MASK; 749 } 750 751 static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs, 752 const __be32 *ctx_header, unsigned int packets, 753 const struct seq_desc *seq_descs, 754 unsigned int seq_size) 755 { 756 unsigned int dbc = s->data_block_counter; 757 unsigned int seq_index = s->ctx_data.rx.seq_index; 758 int i; 759 760 for (i = 0; i < packets; ++i) { 761 struct pkt_desc *desc = descs + i; 762 unsigned int index = (s->packet_index + i) % s->queue_size; 763 const struct seq_desc *seq = seq_descs + seq_index; 764 unsigned int syt; 765 766 desc->cycle = compute_it_cycle(*ctx_header, s->queue_size); 767 768 syt = seq->syt_offset; 769 if (syt != CIP_SYT_NO_INFO) { 770 syt = compute_syt(syt, desc->cycle, 771 s->ctx_data.rx.transfer_delay); 772 } 773 desc->syt = syt; 774 desc->data_blocks = seq->data_blocks; 775 776 if (s->flags & CIP_DBC_IS_END_EVENT) 777 dbc = (dbc + desc->data_blocks) & 0xff; 778 779 desc->data_block_counter = dbc; 780 781 if (!(s->flags & CIP_DBC_IS_END_EVENT)) 782 dbc = (dbc + desc->data_blocks) & 0xff; 783 784 desc->ctx_payload = s->buffer.packets[index].buffer; 785 786 seq_index = (seq_index + 1) % seq_size; 787 788 ++ctx_header; 789 } 790 791 s->data_block_counter = dbc; 792 s->ctx_data.rx.seq_index = seq_index; 793 } 794 795 static inline void cancel_stream(struct amdtp_stream *s) 796 { 797 s->packet_index = -1; 798 if (current_work() == &s->period_work) 799 amdtp_stream_pcm_abort(s); 800 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); 801 } 802 803 static void process_ctx_payloads(struct amdtp_stream *s, 804 const struct pkt_desc *descs, 805 unsigned int packets) 806 { 807 struct snd_pcm_substream *pcm; 808 unsigned int pcm_frames; 809 810 pcm = READ_ONCE(s->pcm); 811 pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm); 812 if (pcm) 813 update_pcm_pointers(s, pcm, pcm_frames); 814 } 815 816 static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, 817 size_t header_length, void *header, 818 void *private_data) 819 { 820 struct amdtp_stream *s = private_data; 821 const struct amdtp_domain *d = s->domain; 822 const __be32 *ctx_header = header; 823 unsigned int events_per_period = s->ctx_data.rx.events_per_period; 824 unsigned int event_count = s->ctx_data.rx.event_count; 825 unsigned int packets; 826 int i; 827 828 if (s->packet_index < 0) 829 return; 830 831 // Calculate the number of packets in buffer and check XRUN. 832 packets = header_length / sizeof(*ctx_header); 833 834 generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs, 835 d->seq_size); 836 837 process_ctx_payloads(s, s->pkt_descs, packets); 838 839 for (i = 0; i < packets; ++i) { 840 const struct pkt_desc *desc = s->pkt_descs + i; 841 unsigned int syt; 842 struct { 843 struct fw_iso_packet params; 844 __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)]; 845 } template = { {0}, {0} }; 846 bool sched_irq = false; 847 848 if (s->ctx_data.rx.syt_override < 0) 849 syt = desc->syt; 850 else 851 syt = s->ctx_data.rx.syt_override; 852 853 build_it_pkt_header(s, desc->cycle, &template.params, 854 desc->data_blocks, desc->data_block_counter, 855 syt, i); 856 857 if (s == s->domain->irq_target) { 858 event_count += desc->data_blocks; 859 if (event_count >= events_per_period) { 860 event_count -= events_per_period; 861 sched_irq = true; 862 } 863 } 864 865 if (queue_out_packet(s, &template.params, sched_irq) < 0) { 866 cancel_stream(s); 867 return; 868 } 869 } 870 871 s->ctx_data.rx.event_count = event_count; 872 } 873 874 static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, 875 size_t header_length, void *header, 876 void *private_data) 877 { 878 struct amdtp_stream *s = private_data; 879 __be32 *ctx_header = header; 880 unsigned int packets; 881 int i; 882 int err; 883 884 if (s->packet_index < 0) 885 return; 886 887 // Calculate the number of packets in buffer and check XRUN. 888 packets = header_length / s->ctx_data.tx.ctx_header_size; 889 890 err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets); 891 if (err < 0) { 892 if (err != -EAGAIN) { 893 cancel_stream(s); 894 return; 895 } 896 } else { 897 process_ctx_payloads(s, s->pkt_descs, packets); 898 } 899 900 for (i = 0; i < packets; ++i) { 901 struct fw_iso_packet params = {0}; 902 903 if (queue_in_packet(s, ¶ms) < 0) { 904 cancel_stream(s); 905 return; 906 } 907 } 908 } 909 910 static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets) 911 { 912 struct amdtp_stream *irq_target = d->irq_target; 913 unsigned int seq_tail = d->seq_tail; 914 unsigned int seq_size = d->seq_size; 915 unsigned int min_avail; 916 struct amdtp_stream *s; 917 918 min_avail = d->seq_size; 919 list_for_each_entry(s, &d->streams, list) { 920 unsigned int seq_index; 921 unsigned int avail; 922 923 if (s->direction == AMDTP_IN_STREAM) 924 continue; 925 926 seq_index = s->ctx_data.rx.seq_index; 927 avail = d->seq_tail; 928 if (seq_index > avail) 929 avail += d->seq_size; 930 avail -= seq_index; 931 932 if (avail < min_avail) 933 min_avail = avail; 934 } 935 936 while (min_avail < packets) { 937 struct seq_desc *desc = d->seq_descs + seq_tail; 938 939 desc->syt_offset = calculate_syt_offset(&d->last_syt_offset, 940 &d->syt_offset_state, irq_target->sfc); 941 desc->data_blocks = calculate_data_blocks(&d->data_block_state, 942 !!(irq_target->flags & CIP_BLOCKING), 943 desc->syt_offset == CIP_SYT_NO_INFO, 944 irq_target->syt_interval, irq_target->sfc); 945 946 ++seq_tail; 947 seq_tail %= seq_size; 948 949 ++min_avail; 950 } 951 952 d->seq_tail = seq_tail; 953 } 954 955 static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, 956 size_t header_length, void *header, 957 void *private_data) 958 { 959 struct amdtp_stream *irq_target = private_data; 960 struct amdtp_domain *d = irq_target->domain; 961 unsigned int packets = header_length / sizeof(__be32); 962 struct amdtp_stream *s; 963 964 // Record enough entries with extra 3 cycles at least. 965 pool_ideal_seq_descs(d, packets + 3); 966 967 out_stream_callback(context, tstamp, header_length, header, irq_target); 968 if (amdtp_streaming_error(irq_target)) 969 goto error; 970 971 list_for_each_entry(s, &d->streams, list) { 972 if (s != irq_target && amdtp_stream_running(s)) { 973 fw_iso_context_flush_completions(s->context); 974 if (amdtp_streaming_error(s)) 975 goto error; 976 } 977 } 978 979 return; 980 error: 981 if (amdtp_stream_running(irq_target)) 982 cancel_stream(irq_target); 983 984 list_for_each_entry(s, &d->streams, list) { 985 if (amdtp_stream_running(s)) 986 cancel_stream(s); 987 } 988 } 989 990 // this is executed one time. 991 static void amdtp_stream_first_callback(struct fw_iso_context *context, 992 u32 tstamp, size_t header_length, 993 void *header, void *private_data) 994 { 995 struct amdtp_stream *s = private_data; 996 const __be32 *ctx_header = header; 997 u32 cycle; 998 999 /* 1000 * For in-stream, first packet has come. 1001 * For out-stream, prepared to transmit first packet 1002 */ 1003 s->callbacked = true; 1004 wake_up(&s->callback_wait); 1005 1006 if (s->direction == AMDTP_IN_STREAM) { 1007 cycle = compute_cycle_count(ctx_header[1]); 1008 1009 context->callback.sc = in_stream_callback; 1010 } else { 1011 cycle = compute_it_cycle(*ctx_header, s->queue_size); 1012 1013 if (s == s->domain->irq_target) 1014 context->callback.sc = irq_target_callback; 1015 else 1016 context->callback.sc = out_stream_callback; 1017 } 1018 1019 s->start_cycle = cycle; 1020 1021 context->callback.sc(context, tstamp, header_length, header, s); 1022 } 1023 1024 /** 1025 * amdtp_stream_start - start transferring packets 1026 * @s: the AMDTP stream to start 1027 * @channel: the isochronous channel on the bus 1028 * @speed: firewire speed code 1029 * @start_cycle: the isochronous cycle to start the context. Start immediately 1030 * if negative value is given. 1031 * @queue_size: The number of packets in the queue. 1032 * @idle_irq_interval: the interval to queue packet during initial state. 1033 * 1034 * The stream cannot be started until it has been configured with 1035 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI 1036 * device can be started. 1037 */ 1038 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, 1039 int start_cycle, unsigned int queue_size, 1040 unsigned int idle_irq_interval) 1041 { 1042 bool is_irq_target = (s == s->domain->irq_target); 1043 unsigned int ctx_header_size; 1044 unsigned int max_ctx_payload_size; 1045 enum dma_data_direction dir; 1046 int type, tag, err; 1047 1048 mutex_lock(&s->mutex); 1049 1050 if (WARN_ON(amdtp_stream_running(s) || 1051 (s->data_block_quadlets < 1))) { 1052 err = -EBADFD; 1053 goto err_unlock; 1054 } 1055 1056 if (s->direction == AMDTP_IN_STREAM) { 1057 // NOTE: IT context should be used for constant IRQ. 1058 if (is_irq_target) { 1059 err = -EINVAL; 1060 goto err_unlock; 1061 } 1062 1063 s->data_block_counter = UINT_MAX; 1064 } else { 1065 s->data_block_counter = 0; 1066 } 1067 1068 /* initialize packet buffer */ 1069 if (s->direction == AMDTP_IN_STREAM) { 1070 dir = DMA_FROM_DEVICE; 1071 type = FW_ISO_CONTEXT_RECEIVE; 1072 if (!(s->flags & CIP_NO_HEADER)) 1073 ctx_header_size = IR_CTX_HEADER_SIZE_CIP; 1074 else 1075 ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; 1076 1077 max_ctx_payload_size = amdtp_stream_get_max_payload(s) - 1078 ctx_header_size; 1079 } else { 1080 dir = DMA_TO_DEVICE; 1081 type = FW_ISO_CONTEXT_TRANSMIT; 1082 ctx_header_size = 0; // No effect for IT context. 1083 1084 max_ctx_payload_size = amdtp_stream_get_max_payload(s); 1085 if (!(s->flags & CIP_NO_HEADER)) 1086 max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP; 1087 } 1088 1089 err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, 1090 max_ctx_payload_size, dir); 1091 if (err < 0) 1092 goto err_unlock; 1093 s->queue_size = queue_size; 1094 1095 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, 1096 type, channel, speed, ctx_header_size, 1097 amdtp_stream_first_callback, s); 1098 if (IS_ERR(s->context)) { 1099 err = PTR_ERR(s->context); 1100 if (err == -EBUSY) 1101 dev_err(&s->unit->device, 1102 "no free stream on this controller\n"); 1103 goto err_buffer; 1104 } 1105 1106 amdtp_stream_update(s); 1107 1108 if (s->direction == AMDTP_IN_STREAM) { 1109 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; 1110 s->ctx_data.tx.ctx_header_size = ctx_header_size; 1111 } 1112 1113 if (s->flags & CIP_NO_HEADER) 1114 s->tag = TAG_NO_CIP_HEADER; 1115 else 1116 s->tag = TAG_CIP; 1117 1118 s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs), 1119 GFP_KERNEL); 1120 if (!s->pkt_descs) { 1121 err = -ENOMEM; 1122 goto err_context; 1123 } 1124 1125 s->packet_index = 0; 1126 do { 1127 struct fw_iso_packet params; 1128 1129 if (s->direction == AMDTP_IN_STREAM) { 1130 err = queue_in_packet(s, ¶ms); 1131 } else { 1132 bool sched_irq = false; 1133 1134 params.header_length = 0; 1135 params.payload_length = 0; 1136 1137 if (is_irq_target) { 1138 sched_irq = !((s->packet_index + 1) % 1139 idle_irq_interval); 1140 } 1141 1142 err = queue_out_packet(s, ¶ms, sched_irq); 1143 } 1144 if (err < 0) 1145 goto err_pkt_descs; 1146 } while (s->packet_index > 0); 1147 1148 /* NOTE: TAG1 matches CIP. This just affects in stream. */ 1149 tag = FW_ISO_CONTEXT_MATCH_TAG1; 1150 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) 1151 tag |= FW_ISO_CONTEXT_MATCH_TAG0; 1152 1153 s->callbacked = false; 1154 err = fw_iso_context_start(s->context, start_cycle, 0, tag); 1155 if (err < 0) 1156 goto err_pkt_descs; 1157 1158 mutex_unlock(&s->mutex); 1159 1160 return 0; 1161 err_pkt_descs: 1162 kfree(s->pkt_descs); 1163 err_context: 1164 fw_iso_context_destroy(s->context); 1165 s->context = ERR_PTR(-1); 1166 err_buffer: 1167 iso_packets_buffer_destroy(&s->buffer, s->unit); 1168 err_unlock: 1169 mutex_unlock(&s->mutex); 1170 1171 return err; 1172 } 1173 1174 /** 1175 * amdtp_domain_stream_pcm_pointer - get the PCM buffer position 1176 * @d: the AMDTP domain. 1177 * @s: the AMDTP stream that transports the PCM data 1178 * 1179 * Returns the current buffer position, in frames. 1180 */ 1181 unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, 1182 struct amdtp_stream *s) 1183 { 1184 struct amdtp_stream *irq_target = d->irq_target; 1185 1186 if (irq_target && amdtp_stream_running(irq_target)) { 1187 // This function is called in software IRQ context of 1188 // period_work or process context. 1189 // 1190 // When the software IRQ context was scheduled by software IRQ 1191 // context of IT contexts, queued packets were already handled. 1192 // Therefore, no need to flush the queue in buffer furthermore. 1193 // 1194 // When the process context reach here, some packets will be 1195 // already queued in the buffer. These packets should be handled 1196 // immediately to keep better granularity of PCM pointer. 1197 // 1198 // Later, the process context will sometimes schedules software 1199 // IRQ context of the period_work. Then, no need to flush the 1200 // queue by the same reason as described in the above 1201 if (current_work() != &s->period_work) { 1202 // Queued packet should be processed without any kernel 1203 // preemption to keep latency against bus cycle. 1204 preempt_disable(); 1205 fw_iso_context_flush_completions(irq_target->context); 1206 preempt_enable(); 1207 } 1208 } 1209 1210 return READ_ONCE(s->pcm_buffer_pointer); 1211 } 1212 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer); 1213 1214 /** 1215 * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames 1216 * @d: the AMDTP domain. 1217 * @s: the AMDTP stream that transfers the PCM frames 1218 * 1219 * Returns zero always. 1220 */ 1221 int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s) 1222 { 1223 struct amdtp_stream *irq_target = d->irq_target; 1224 1225 // Process isochronous packets for recent isochronous cycle to handle 1226 // queued PCM frames. 1227 if (irq_target && amdtp_stream_running(irq_target)) { 1228 // Queued packet should be processed without any kernel 1229 // preemption to keep latency against bus cycle. 1230 preempt_disable(); 1231 fw_iso_context_flush_completions(irq_target->context); 1232 preempt_enable(); 1233 } 1234 1235 return 0; 1236 } 1237 EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack); 1238 1239 /** 1240 * amdtp_stream_update - update the stream after a bus reset 1241 * @s: the AMDTP stream 1242 */ 1243 void amdtp_stream_update(struct amdtp_stream *s) 1244 { 1245 /* Precomputing. */ 1246 WRITE_ONCE(s->source_node_id_field, 1247 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); 1248 } 1249 EXPORT_SYMBOL(amdtp_stream_update); 1250 1251 /** 1252 * amdtp_stream_stop - stop sending packets 1253 * @s: the AMDTP stream to stop 1254 * 1255 * All PCM and MIDI devices of the stream must be stopped before the stream 1256 * itself can be stopped. 1257 */ 1258 static void amdtp_stream_stop(struct amdtp_stream *s) 1259 { 1260 mutex_lock(&s->mutex); 1261 1262 if (!amdtp_stream_running(s)) { 1263 mutex_unlock(&s->mutex); 1264 return; 1265 } 1266 1267 cancel_work_sync(&s->period_work); 1268 fw_iso_context_stop(s->context); 1269 fw_iso_context_destroy(s->context); 1270 s->context = ERR_PTR(-1); 1271 iso_packets_buffer_destroy(&s->buffer, s->unit); 1272 kfree(s->pkt_descs); 1273 1274 s->callbacked = false; 1275 1276 mutex_unlock(&s->mutex); 1277 } 1278 1279 /** 1280 * amdtp_stream_pcm_abort - abort the running PCM device 1281 * @s: the AMDTP stream about to be stopped 1282 * 1283 * If the isochronous stream needs to be stopped asynchronously, call this 1284 * function first to stop the PCM device. 1285 */ 1286 void amdtp_stream_pcm_abort(struct amdtp_stream *s) 1287 { 1288 struct snd_pcm_substream *pcm; 1289 1290 pcm = READ_ONCE(s->pcm); 1291 if (pcm) 1292 snd_pcm_stop_xrun(pcm); 1293 } 1294 EXPORT_SYMBOL(amdtp_stream_pcm_abort); 1295 1296 /** 1297 * amdtp_domain_init - initialize an AMDTP domain structure 1298 * @d: the AMDTP domain to initialize. 1299 */ 1300 int amdtp_domain_init(struct amdtp_domain *d) 1301 { 1302 INIT_LIST_HEAD(&d->streams); 1303 1304 d->events_per_period = 0; 1305 1306 d->seq_descs = NULL; 1307 1308 return 0; 1309 } 1310 EXPORT_SYMBOL_GPL(amdtp_domain_init); 1311 1312 /** 1313 * amdtp_domain_destroy - destroy an AMDTP domain structure 1314 * @d: the AMDTP domain to destroy. 1315 */ 1316 void amdtp_domain_destroy(struct amdtp_domain *d) 1317 { 1318 // At present nothing to do. 1319 return; 1320 } 1321 EXPORT_SYMBOL_GPL(amdtp_domain_destroy); 1322 1323 /** 1324 * amdtp_domain_add_stream - register isoc context into the domain. 1325 * @d: the AMDTP domain. 1326 * @s: the AMDTP stream. 1327 * @channel: the isochronous channel on the bus. 1328 * @speed: firewire speed code. 1329 */ 1330 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, 1331 int channel, int speed) 1332 { 1333 struct amdtp_stream *tmp; 1334 1335 list_for_each_entry(tmp, &d->streams, list) { 1336 if (s == tmp) 1337 return -EBUSY; 1338 } 1339 1340 list_add(&s->list, &d->streams); 1341 1342 s->channel = channel; 1343 s->speed = speed; 1344 s->domain = d; 1345 1346 return 0; 1347 } 1348 EXPORT_SYMBOL_GPL(amdtp_domain_add_stream); 1349 1350 static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle) 1351 { 1352 int generation; 1353 int rcode; 1354 __be32 reg; 1355 u32 data; 1356 1357 // This is a request to local 1394 OHCI controller and expected to 1358 // complete without any event waiting. 1359 generation = fw_card->generation; 1360 smp_rmb(); // node_id vs. generation. 1361 rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST, 1362 fw_card->node_id, generation, SCODE_100, 1363 CSR_REGISTER_BASE + CSR_CYCLE_TIME, 1364 ®, sizeof(reg)); 1365 if (rcode != RCODE_COMPLETE) 1366 return -EIO; 1367 1368 data = be32_to_cpu(reg); 1369 *cur_cycle = data >> 12; 1370 1371 return 0; 1372 } 1373 1374 /** 1375 * amdtp_domain_start - start sending packets for isoc context in the domain. 1376 * @d: the AMDTP domain. 1377 * @ir_delay_cycle: the cycle delay to start all IR contexts. 1378 */ 1379 int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle) 1380 { 1381 static const struct { 1382 unsigned int data_block; 1383 unsigned int syt_offset; 1384 } *entry, initial_state[] = { 1385 [CIP_SFC_32000] = { 4, 3072 }, 1386 [CIP_SFC_48000] = { 6, 1024 }, 1387 [CIP_SFC_96000] = { 12, 1024 }, 1388 [CIP_SFC_192000] = { 24, 1024 }, 1389 [CIP_SFC_44100] = { 0, 67 }, 1390 [CIP_SFC_88200] = { 0, 67 }, 1391 [CIP_SFC_176400] = { 0, 67 }, 1392 }; 1393 unsigned int events_per_buffer = d->events_per_buffer; 1394 unsigned int events_per_period = d->events_per_period; 1395 unsigned int idle_irq_interval; 1396 unsigned int queue_size; 1397 struct amdtp_stream *s; 1398 int cycle; 1399 int err; 1400 1401 // Select an IT context as IRQ target. 1402 list_for_each_entry(s, &d->streams, list) { 1403 if (s->direction == AMDTP_OUT_STREAM) 1404 break; 1405 } 1406 if (!s) 1407 return -ENXIO; 1408 d->irq_target = s; 1409 1410 // This is a case that AMDTP streams in domain run just for MIDI 1411 // substream. Use the number of events equivalent to 10 msec as 1412 // interval of hardware IRQ. 1413 if (events_per_period == 0) 1414 events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100; 1415 if (events_per_buffer == 0) 1416 events_per_buffer = events_per_period * 3; 1417 1418 queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer, 1419 amdtp_rate_table[d->irq_target->sfc]); 1420 1421 d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL); 1422 if (!d->seq_descs) 1423 return -ENOMEM; 1424 d->seq_size = queue_size; 1425 d->seq_tail = 0; 1426 1427 entry = &initial_state[s->sfc]; 1428 d->data_block_state = entry->data_block; 1429 d->syt_offset_state = entry->syt_offset; 1430 d->last_syt_offset = TICKS_PER_CYCLE; 1431 1432 if (ir_delay_cycle > 0) { 1433 struct fw_card *fw_card = fw_parent_device(s->unit)->card; 1434 1435 err = get_current_cycle_time(fw_card, &cycle); 1436 if (err < 0) 1437 goto error; 1438 1439 // No need to care overflow in cycle field because of enough 1440 // width. 1441 cycle += ir_delay_cycle; 1442 1443 // Round up to sec field. 1444 if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) { 1445 unsigned int sec; 1446 1447 // The sec field can overflow. 1448 sec = (cycle & 0xffffe000) >> 13; 1449 cycle = (++sec << 13) | 1450 ((cycle & 0x00001fff) / CYCLES_PER_SECOND); 1451 } 1452 1453 // In OHCI 1394 specification, lower 2 bits are available for 1454 // sec field. 1455 cycle &= 0x00007fff; 1456 } else { 1457 cycle = -1; 1458 } 1459 1460 list_for_each_entry(s, &d->streams, list) { 1461 int cycle_match; 1462 1463 if (s->direction == AMDTP_IN_STREAM) { 1464 cycle_match = cycle; 1465 } else { 1466 // IT context starts immediately. 1467 cycle_match = -1; 1468 s->ctx_data.rx.seq_index = 0; 1469 } 1470 1471 if (s != d->irq_target) { 1472 err = amdtp_stream_start(s, s->channel, s->speed, 1473 cycle_match, queue_size, 0); 1474 if (err < 0) 1475 goto error; 1476 } 1477 } 1478 1479 s = d->irq_target; 1480 s->ctx_data.rx.events_per_period = events_per_period; 1481 s->ctx_data.rx.event_count = 0; 1482 s->ctx_data.rx.seq_index = 0; 1483 1484 idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period, 1485 amdtp_rate_table[d->irq_target->sfc]); 1486 err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size, 1487 idle_irq_interval); 1488 if (err < 0) 1489 goto error; 1490 1491 return 0; 1492 error: 1493 list_for_each_entry(s, &d->streams, list) 1494 amdtp_stream_stop(s); 1495 kfree(d->seq_descs); 1496 d->seq_descs = NULL; 1497 return err; 1498 } 1499 EXPORT_SYMBOL_GPL(amdtp_domain_start); 1500 1501 /** 1502 * amdtp_domain_stop - stop sending packets for isoc context in the same domain. 1503 * @d: the AMDTP domain to which the isoc contexts belong. 1504 */ 1505 void amdtp_domain_stop(struct amdtp_domain *d) 1506 { 1507 struct amdtp_stream *s, *next; 1508 1509 if (d->irq_target) 1510 amdtp_stream_stop(d->irq_target); 1511 1512 list_for_each_entry_safe(s, next, &d->streams, list) { 1513 list_del(&s->list); 1514 1515 if (s != d->irq_target) 1516 amdtp_stream_stop(s); 1517 } 1518 1519 d->events_per_period = 0; 1520 d->irq_target = NULL; 1521 1522 kfree(d->seq_descs); 1523 d->seq_descs = NULL; 1524 } 1525 EXPORT_SYMBOL_GPL(amdtp_domain_stop); 1526