1 /* 2 * QEMU model of Xilinx AXI-DMA block. 3 * 4 * Copyright (c) 2011 Edgar E. Iglesias. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "hw/sysbus.h" 27 #include "qapi/error.h" 28 #include "qemu/timer.h" 29 #include "hw/hw.h" 30 #include "hw/irq.h" 31 #include "hw/ptimer.h" 32 #include "hw/qdev-properties.h" 33 #include "qemu/log.h" 34 #include "qemu/module.h" 35 36 #include "sysemu/dma.h" 37 #include "hw/stream.h" 38 #include "qom/object.h" 39 40 #define D(x) 41 42 #define TYPE_XILINX_AXI_DMA "xlnx.axi-dma" 43 #define TYPE_XILINX_AXI_DMA_DATA_STREAM "xilinx-axi-dma-data-stream" 44 #define TYPE_XILINX_AXI_DMA_CONTROL_STREAM "xilinx-axi-dma-control-stream" 45 46 typedef struct XilinxAXIDMA XilinxAXIDMA; 47 DECLARE_INSTANCE_CHECKER(XilinxAXIDMA, XILINX_AXI_DMA, 48 TYPE_XILINX_AXI_DMA) 49 50 typedef struct XilinxAXIDMAStreamSlave XilinxAXIDMAStreamSlave; 51 DECLARE_INSTANCE_CHECKER(XilinxAXIDMAStreamSlave, XILINX_AXI_DMA_DATA_STREAM, 52 TYPE_XILINX_AXI_DMA_DATA_STREAM) 53 54 DECLARE_INSTANCE_CHECKER(XilinxAXIDMAStreamSlave, XILINX_AXI_DMA_CONTROL_STREAM, 55 TYPE_XILINX_AXI_DMA_CONTROL_STREAM) 56 57 #define R_DMACR (0x00 / 4) 58 #define R_DMASR (0x04 / 4) 59 #define R_CURDESC (0x08 / 4) 60 #define R_TAILDESC (0x10 / 4) 61 #define R_MAX (0x30 / 4) 62 63 #define CONTROL_PAYLOAD_WORDS 5 64 #define CONTROL_PAYLOAD_SIZE (CONTROL_PAYLOAD_WORDS * (sizeof(uint32_t))) 65 66 67 enum { 68 DMACR_RUNSTOP = 1, 69 DMACR_TAILPTR_MODE = 2, 70 DMACR_RESET = 4 71 }; 72 73 enum { 74 DMASR_HALTED = 1, 75 DMASR_IDLE = 2, 76 DMASR_IOC_IRQ = 1 << 12, 77 DMASR_DLY_IRQ = 1 << 13, 78 79 DMASR_IRQ_MASK = 7 << 12 80 }; 81 82 struct SDesc { 83 uint64_t nxtdesc; 84 uint64_t buffer_address; 85 uint64_t reserved; 86 uint32_t control; 87 uint32_t status; 88 uint8_t app[CONTROL_PAYLOAD_SIZE]; 89 }; 90 91 enum { 92 SDESC_CTRL_EOF = (1 << 26), 93 SDESC_CTRL_SOF = (1 << 27), 94 95 SDESC_CTRL_LEN_MASK = (1 << 23) - 1 96 }; 97 98 enum { 99 SDESC_STATUS_EOF = (1 << 26), 100 SDESC_STATUS_SOF_BIT = 27, 101 SDESC_STATUS_SOF = (1 << SDESC_STATUS_SOF_BIT), 102 SDESC_STATUS_COMPLETE = (1 << 31) 103 }; 104 105 struct Stream { 106 struct XilinxAXIDMA *dma; 107 ptimer_state *ptimer; 108 qemu_irq irq; 109 110 int nr; 111 112 bool sof; 113 struct SDesc desc; 114 unsigned int complete_cnt; 115 uint32_t regs[R_MAX]; 116 uint8_t app[20]; 117 unsigned char txbuf[16 * 1024]; 118 }; 119 120 struct XilinxAXIDMAStreamSlave { 121 Object parent; 122 123 struct XilinxAXIDMA *dma; 124 }; 125 126 struct XilinxAXIDMA { 127 SysBusDevice busdev; 128 MemoryRegion iomem; 129 MemoryRegion *dma_mr; 130 AddressSpace as; 131 132 uint32_t freqhz; 133 StreamSlave *tx_data_dev; 134 StreamSlave *tx_control_dev; 135 XilinxAXIDMAStreamSlave rx_data_dev; 136 XilinxAXIDMAStreamSlave rx_control_dev; 137 138 struct Stream streams[2]; 139 140 StreamCanPushNotifyFn notify; 141 void *notify_opaque; 142 }; 143 144 /* 145 * Helper calls to extract info from descriptors and other trivial 146 * state from regs. 147 */ 148 static inline int stream_desc_sof(struct SDesc *d) 149 { 150 return d->control & SDESC_CTRL_SOF; 151 } 152 153 static inline int stream_desc_eof(struct SDesc *d) 154 { 155 return d->control & SDESC_CTRL_EOF; 156 } 157 158 static inline int stream_resetting(struct Stream *s) 159 { 160 return !!(s->regs[R_DMACR] & DMACR_RESET); 161 } 162 163 static inline int stream_running(struct Stream *s) 164 { 165 return s->regs[R_DMACR] & DMACR_RUNSTOP; 166 } 167 168 static inline int stream_idle(struct Stream *s) 169 { 170 return !!(s->regs[R_DMASR] & DMASR_IDLE); 171 } 172 173 static void stream_reset(struct Stream *s) 174 { 175 s->regs[R_DMASR] = DMASR_HALTED; /* starts up halted. */ 176 s->regs[R_DMACR] = 1 << 16; /* Starts with one in compl threshold. */ 177 s->sof = true; 178 } 179 180 /* Map an offset addr into a channel index. */ 181 static inline int streamid_from_addr(hwaddr addr) 182 { 183 int sid; 184 185 sid = addr / (0x30); 186 sid &= 1; 187 return sid; 188 } 189 190 static void stream_desc_load(struct Stream *s, hwaddr addr) 191 { 192 struct SDesc *d = &s->desc; 193 194 address_space_read(&s->dma->as, addr, MEMTXATTRS_UNSPECIFIED, d, sizeof *d); 195 196 /* Convert from LE into host endianness. */ 197 d->buffer_address = le64_to_cpu(d->buffer_address); 198 d->nxtdesc = le64_to_cpu(d->nxtdesc); 199 d->control = le32_to_cpu(d->control); 200 d->status = le32_to_cpu(d->status); 201 } 202 203 static void stream_desc_store(struct Stream *s, hwaddr addr) 204 { 205 struct SDesc *d = &s->desc; 206 207 /* Convert from host endianness into LE. */ 208 d->buffer_address = cpu_to_le64(d->buffer_address); 209 d->nxtdesc = cpu_to_le64(d->nxtdesc); 210 d->control = cpu_to_le32(d->control); 211 d->status = cpu_to_le32(d->status); 212 address_space_write(&s->dma->as, addr, MEMTXATTRS_UNSPECIFIED, 213 d, sizeof *d); 214 } 215 216 static void stream_update_irq(struct Stream *s) 217 { 218 unsigned int pending, mask, irq; 219 220 pending = s->regs[R_DMASR] & DMASR_IRQ_MASK; 221 mask = s->regs[R_DMACR] & DMASR_IRQ_MASK; 222 223 irq = pending & mask; 224 225 qemu_set_irq(s->irq, !!irq); 226 } 227 228 static void stream_reload_complete_cnt(struct Stream *s) 229 { 230 unsigned int comp_th; 231 comp_th = (s->regs[R_DMACR] >> 16) & 0xff; 232 s->complete_cnt = comp_th; 233 } 234 235 static void timer_hit(void *opaque) 236 { 237 struct Stream *s = opaque; 238 239 stream_reload_complete_cnt(s); 240 s->regs[R_DMASR] |= DMASR_DLY_IRQ; 241 stream_update_irq(s); 242 } 243 244 static void stream_complete(struct Stream *s) 245 { 246 unsigned int comp_delay; 247 248 /* Start the delayed timer. */ 249 ptimer_transaction_begin(s->ptimer); 250 comp_delay = s->regs[R_DMACR] >> 24; 251 if (comp_delay) { 252 ptimer_stop(s->ptimer); 253 ptimer_set_count(s->ptimer, comp_delay); 254 ptimer_run(s->ptimer, 1); 255 } 256 257 s->complete_cnt--; 258 if (s->complete_cnt == 0) { 259 /* Raise the IOC irq. */ 260 s->regs[R_DMASR] |= DMASR_IOC_IRQ; 261 stream_reload_complete_cnt(s); 262 } 263 ptimer_transaction_commit(s->ptimer); 264 } 265 266 static void stream_process_mem2s(struct Stream *s, StreamSlave *tx_data_dev, 267 StreamSlave *tx_control_dev) 268 { 269 uint32_t prev_d; 270 uint32_t txlen; 271 uint64_t addr; 272 bool eop; 273 274 if (!stream_running(s) || stream_idle(s)) { 275 return; 276 } 277 278 while (1) { 279 stream_desc_load(s, s->regs[R_CURDESC]); 280 281 if (s->desc.status & SDESC_STATUS_COMPLETE) { 282 s->regs[R_DMASR] |= DMASR_HALTED; 283 break; 284 } 285 286 if (stream_desc_sof(&s->desc)) { 287 stream_push(tx_control_dev, s->desc.app, sizeof(s->desc.app), true); 288 } 289 290 txlen = s->desc.control & SDESC_CTRL_LEN_MASK; 291 292 eop = stream_desc_eof(&s->desc); 293 addr = s->desc.buffer_address; 294 while (txlen) { 295 unsigned int len; 296 297 len = txlen > sizeof s->txbuf ? sizeof s->txbuf : txlen; 298 address_space_read(&s->dma->as, addr, 299 MEMTXATTRS_UNSPECIFIED, 300 s->txbuf, len); 301 stream_push(tx_data_dev, s->txbuf, len, eop && len == txlen); 302 txlen -= len; 303 addr += len; 304 } 305 306 if (eop) { 307 stream_complete(s); 308 } 309 310 /* Update the descriptor. */ 311 s->desc.status = txlen | SDESC_STATUS_COMPLETE; 312 stream_desc_store(s, s->regs[R_CURDESC]); 313 314 /* Advance. */ 315 prev_d = s->regs[R_CURDESC]; 316 s->regs[R_CURDESC] = s->desc.nxtdesc; 317 if (prev_d == s->regs[R_TAILDESC]) { 318 s->regs[R_DMASR] |= DMASR_IDLE; 319 break; 320 } 321 } 322 } 323 324 static size_t stream_process_s2mem(struct Stream *s, unsigned char *buf, 325 size_t len, bool eop) 326 { 327 uint32_t prev_d; 328 unsigned int rxlen; 329 size_t pos = 0; 330 331 if (!stream_running(s) || stream_idle(s)) { 332 return 0; 333 } 334 335 while (len) { 336 stream_desc_load(s, s->regs[R_CURDESC]); 337 338 if (s->desc.status & SDESC_STATUS_COMPLETE) { 339 s->regs[R_DMASR] |= DMASR_HALTED; 340 break; 341 } 342 343 rxlen = s->desc.control & SDESC_CTRL_LEN_MASK; 344 if (rxlen > len) { 345 /* It fits. */ 346 rxlen = len; 347 } 348 349 address_space_write(&s->dma->as, s->desc.buffer_address, 350 MEMTXATTRS_UNSPECIFIED, buf + pos, rxlen); 351 len -= rxlen; 352 pos += rxlen; 353 354 /* Update the descriptor. */ 355 if (eop) { 356 stream_complete(s); 357 memcpy(s->desc.app, s->app, sizeof(s->desc.app)); 358 s->desc.status |= SDESC_STATUS_EOF; 359 } 360 361 s->desc.status |= s->sof << SDESC_STATUS_SOF_BIT; 362 s->desc.status |= SDESC_STATUS_COMPLETE; 363 stream_desc_store(s, s->regs[R_CURDESC]); 364 s->sof = eop; 365 366 /* Advance. */ 367 prev_d = s->regs[R_CURDESC]; 368 s->regs[R_CURDESC] = s->desc.nxtdesc; 369 if (prev_d == s->regs[R_TAILDESC]) { 370 s->regs[R_DMASR] |= DMASR_IDLE; 371 break; 372 } 373 } 374 375 return pos; 376 } 377 378 static void xilinx_axidma_reset(DeviceState *dev) 379 { 380 int i; 381 XilinxAXIDMA *s = XILINX_AXI_DMA(dev); 382 383 for (i = 0; i < 2; i++) { 384 stream_reset(&s->streams[i]); 385 } 386 } 387 388 static size_t 389 xilinx_axidma_control_stream_push(StreamSlave *obj, unsigned char *buf, 390 size_t len, bool eop) 391 { 392 XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM(obj); 393 struct Stream *s = &cs->dma->streams[1]; 394 395 if (len != CONTROL_PAYLOAD_SIZE) { 396 hw_error("AXI DMA requires %d byte control stream payload\n", 397 (int)CONTROL_PAYLOAD_SIZE); 398 } 399 400 memcpy(s->app, buf, len); 401 return len; 402 } 403 404 static bool 405 xilinx_axidma_data_stream_can_push(StreamSlave *obj, 406 StreamCanPushNotifyFn notify, 407 void *notify_opaque) 408 { 409 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj); 410 struct Stream *s = &ds->dma->streams[1]; 411 412 if (!stream_running(s) || stream_idle(s)) { 413 ds->dma->notify = notify; 414 ds->dma->notify_opaque = notify_opaque; 415 return false; 416 } 417 418 return true; 419 } 420 421 static size_t 422 xilinx_axidma_data_stream_push(StreamSlave *obj, unsigned char *buf, size_t len, 423 bool eop) 424 { 425 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(obj); 426 struct Stream *s = &ds->dma->streams[1]; 427 size_t ret; 428 429 ret = stream_process_s2mem(s, buf, len, eop); 430 stream_update_irq(s); 431 return ret; 432 } 433 434 static uint64_t axidma_read(void *opaque, hwaddr addr, 435 unsigned size) 436 { 437 XilinxAXIDMA *d = opaque; 438 struct Stream *s; 439 uint32_t r = 0; 440 int sid; 441 442 sid = streamid_from_addr(addr); 443 s = &d->streams[sid]; 444 445 addr = addr % 0x30; 446 addr >>= 2; 447 switch (addr) { 448 case R_DMACR: 449 /* Simulate one cycles reset delay. */ 450 s->regs[addr] &= ~DMACR_RESET; 451 r = s->regs[addr]; 452 break; 453 case R_DMASR: 454 s->regs[addr] &= 0xffff; 455 s->regs[addr] |= (s->complete_cnt & 0xff) << 16; 456 s->regs[addr] |= (ptimer_get_count(s->ptimer) & 0xff) << 24; 457 r = s->regs[addr]; 458 break; 459 default: 460 r = s->regs[addr]; 461 D(qemu_log("%s ch=%d addr=" TARGET_FMT_plx " v=%x\n", 462 __func__, sid, addr * 4, r)); 463 break; 464 } 465 return r; 466 467 } 468 469 static void axidma_write(void *opaque, hwaddr addr, 470 uint64_t value, unsigned size) 471 { 472 XilinxAXIDMA *d = opaque; 473 struct Stream *s; 474 int sid; 475 476 sid = streamid_from_addr(addr); 477 s = &d->streams[sid]; 478 479 addr = addr % 0x30; 480 addr >>= 2; 481 switch (addr) { 482 case R_DMACR: 483 /* Tailptr mode is always on. */ 484 value |= DMACR_TAILPTR_MODE; 485 /* Remember our previous reset state. */ 486 value |= (s->regs[addr] & DMACR_RESET); 487 s->regs[addr] = value; 488 489 if (value & DMACR_RESET) { 490 stream_reset(s); 491 } 492 493 if ((value & 1) && !stream_resetting(s)) { 494 /* Start processing. */ 495 s->regs[R_DMASR] &= ~(DMASR_HALTED | DMASR_IDLE); 496 } 497 stream_reload_complete_cnt(s); 498 break; 499 500 case R_DMASR: 501 /* Mask away write to clear irq lines. */ 502 value &= ~(value & DMASR_IRQ_MASK); 503 s->regs[addr] = value; 504 break; 505 506 case R_TAILDESC: 507 s->regs[addr] = value; 508 s->regs[R_DMASR] &= ~DMASR_IDLE; /* Not idle. */ 509 if (!sid) { 510 stream_process_mem2s(s, d->tx_data_dev, d->tx_control_dev); 511 } 512 break; 513 default: 514 D(qemu_log("%s: ch=%d addr=" TARGET_FMT_plx " v=%x\n", 515 __func__, sid, addr * 4, (unsigned)value)); 516 s->regs[addr] = value; 517 break; 518 } 519 if (sid == 1 && d->notify) { 520 StreamCanPushNotifyFn notifytmp = d->notify; 521 d->notify = NULL; 522 notifytmp(d->notify_opaque); 523 } 524 stream_update_irq(s); 525 } 526 527 static const MemoryRegionOps axidma_ops = { 528 .read = axidma_read, 529 .write = axidma_write, 530 .endianness = DEVICE_NATIVE_ENDIAN, 531 }; 532 533 static void xilinx_axidma_realize(DeviceState *dev, Error **errp) 534 { 535 XilinxAXIDMA *s = XILINX_AXI_DMA(dev); 536 XilinxAXIDMAStreamSlave *ds = XILINX_AXI_DMA_DATA_STREAM(&s->rx_data_dev); 537 XilinxAXIDMAStreamSlave *cs = XILINX_AXI_DMA_CONTROL_STREAM( 538 &s->rx_control_dev); 539 int i; 540 541 object_property_add_link(OBJECT(ds), "dma", TYPE_XILINX_AXI_DMA, 542 (Object **)&ds->dma, 543 object_property_allow_set_link, 544 OBJ_PROP_LINK_STRONG); 545 object_property_add_link(OBJECT(cs), "dma", TYPE_XILINX_AXI_DMA, 546 (Object **)&cs->dma, 547 object_property_allow_set_link, 548 OBJ_PROP_LINK_STRONG); 549 object_property_set_link(OBJECT(ds), "dma", OBJECT(s), &error_abort); 550 object_property_set_link(OBJECT(cs), "dma", OBJECT(s), &error_abort); 551 552 for (i = 0; i < 2; i++) { 553 struct Stream *st = &s->streams[i]; 554 555 st->dma = s; 556 st->nr = i; 557 st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT); 558 ptimer_transaction_begin(st->ptimer); 559 ptimer_set_freq(st->ptimer, s->freqhz); 560 ptimer_transaction_commit(st->ptimer); 561 } 562 563 address_space_init(&s->as, 564 s->dma_mr ? s->dma_mr : get_system_memory(), "dma"); 565 } 566 567 static void xilinx_axidma_init(Object *obj) 568 { 569 XilinxAXIDMA *s = XILINX_AXI_DMA(obj); 570 SysBusDevice *sbd = SYS_BUS_DEVICE(obj); 571 572 object_initialize_child(OBJECT(s), "axistream-connected-target", 573 &s->rx_data_dev, TYPE_XILINX_AXI_DMA_DATA_STREAM); 574 object_initialize_child(OBJECT(s), "axistream-control-connected-target", 575 &s->rx_control_dev, 576 TYPE_XILINX_AXI_DMA_CONTROL_STREAM); 577 object_property_add_link(obj, "dma", TYPE_MEMORY_REGION, 578 (Object **)&s->dma_mr, 579 qdev_prop_allow_set_link_before_realize, 580 OBJ_PROP_LINK_STRONG); 581 582 sysbus_init_irq(sbd, &s->streams[0].irq); 583 sysbus_init_irq(sbd, &s->streams[1].irq); 584 585 memory_region_init_io(&s->iomem, obj, &axidma_ops, s, 586 "xlnx.axi-dma", R_MAX * 4 * 2); 587 sysbus_init_mmio(sbd, &s->iomem); 588 } 589 590 static Property axidma_properties[] = { 591 DEFINE_PROP_UINT32("freqhz", XilinxAXIDMA, freqhz, 50000000), 592 DEFINE_PROP_LINK("axistream-connected", XilinxAXIDMA, 593 tx_data_dev, TYPE_STREAM_SLAVE, StreamSlave *), 594 DEFINE_PROP_LINK("axistream-control-connected", XilinxAXIDMA, 595 tx_control_dev, TYPE_STREAM_SLAVE, StreamSlave *), 596 DEFINE_PROP_END_OF_LIST(), 597 }; 598 599 static void axidma_class_init(ObjectClass *klass, void *data) 600 { 601 DeviceClass *dc = DEVICE_CLASS(klass); 602 603 dc->realize = xilinx_axidma_realize, 604 dc->reset = xilinx_axidma_reset; 605 device_class_set_props(dc, axidma_properties); 606 } 607 608 static StreamSlaveClass xilinx_axidma_data_stream_class = { 609 .push = xilinx_axidma_data_stream_push, 610 .can_push = xilinx_axidma_data_stream_can_push, 611 }; 612 613 static StreamSlaveClass xilinx_axidma_control_stream_class = { 614 .push = xilinx_axidma_control_stream_push, 615 }; 616 617 static void xilinx_axidma_stream_class_init(ObjectClass *klass, void *data) 618 { 619 StreamSlaveClass *ssc = STREAM_SLAVE_CLASS(klass); 620 621 ssc->push = ((StreamSlaveClass *)data)->push; 622 ssc->can_push = ((StreamSlaveClass *)data)->can_push; 623 } 624 625 static const TypeInfo axidma_info = { 626 .name = TYPE_XILINX_AXI_DMA, 627 .parent = TYPE_SYS_BUS_DEVICE, 628 .instance_size = sizeof(XilinxAXIDMA), 629 .class_init = axidma_class_init, 630 .instance_init = xilinx_axidma_init, 631 }; 632 633 static const TypeInfo xilinx_axidma_data_stream_info = { 634 .name = TYPE_XILINX_AXI_DMA_DATA_STREAM, 635 .parent = TYPE_OBJECT, 636 .instance_size = sizeof(XilinxAXIDMAStreamSlave), 637 .class_init = xilinx_axidma_stream_class_init, 638 .class_data = &xilinx_axidma_data_stream_class, 639 .interfaces = (InterfaceInfo[]) { 640 { TYPE_STREAM_SLAVE }, 641 { } 642 } 643 }; 644 645 static const TypeInfo xilinx_axidma_control_stream_info = { 646 .name = TYPE_XILINX_AXI_DMA_CONTROL_STREAM, 647 .parent = TYPE_OBJECT, 648 .instance_size = sizeof(XilinxAXIDMAStreamSlave), 649 .class_init = xilinx_axidma_stream_class_init, 650 .class_data = &xilinx_axidma_control_stream_class, 651 .interfaces = (InterfaceInfo[]) { 652 { TYPE_STREAM_SLAVE }, 653 { } 654 } 655 }; 656 657 static void xilinx_axidma_register_types(void) 658 { 659 type_register_static(&axidma_info); 660 type_register_static(&xilinx_axidma_data_stream_info); 661 type_register_static(&xilinx_axidma_control_stream_info); 662 } 663 664 type_init(xilinx_axidma_register_types) 665