1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core.c - Implementation of core module of MOST Linux driver stack 4 * 5 * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG 6 */ 7 8 #include <linux/module.h> 9 #include <linux/fs.h> 10 #include <linux/slab.h> 11 #include <linux/init.h> 12 #include <linux/device.h> 13 #include <linux/list.h> 14 #include <linux/poll.h> 15 #include <linux/wait.h> 16 #include <linux/kobject.h> 17 #include <linux/mutex.h> 18 #include <linux/completion.h> 19 #include <linux/sysfs.h> 20 #include <linux/kthread.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/idr.h> 23 #include <linux/most.h> 24 25 #define MAX_CHANNELS 64 26 #define STRING_SIZE 80 27 28 static struct ida mdev_id; 29 static int dummy_num_buffers; 30 static struct list_head comp_list; 31 32 struct pipe { 33 struct most_component *comp; 34 int refs; 35 int num_buffers; 36 }; 37 38 struct most_channel { 39 struct device dev; 40 struct completion cleanup; 41 atomic_t mbo_ref; 42 atomic_t mbo_nq_level; 43 u16 channel_id; 44 char name[STRING_SIZE]; 45 bool is_poisoned; 46 struct mutex start_mutex; /* channel activation synchronization */ 47 struct mutex nq_mutex; /* nq thread synchronization */ 48 int is_starving; 49 struct most_interface *iface; 50 struct most_channel_config cfg; 51 bool keep_mbo; 52 bool enqueue_halt; 53 struct list_head fifo; 54 spinlock_t fifo_lock; /* fifo access synchronization */ 55 struct list_head halt_fifo; 56 struct list_head list; 57 struct pipe pipe0; 58 struct pipe pipe1; 59 struct list_head trash_fifo; 60 struct task_struct *hdm_enqueue_task; 61 wait_queue_head_t hdm_fifo_wq; 62 63 }; 64 65 #define to_channel(d) container_of(d, struct most_channel, dev) 66 67 struct interface_private { 68 int dev_id; 69 char name[STRING_SIZE]; 70 struct most_channel *channel[MAX_CHANNELS]; 71 struct list_head channel_list; 72 }; 73 74 static const struct { 75 int most_ch_data_type; 76 const char *name; 77 } ch_data_type[] = { 78 { MOST_CH_CONTROL, "control" }, 79 { MOST_CH_ASYNC, "async" }, 80 { MOST_CH_SYNC, "sync" }, 81 { MOST_CH_ISOC, "isoc"}, 82 { MOST_CH_ISOC, "isoc_avp"}, 83 }; 84 85 /** 86 * list_pop_mbo - retrieves the first MBO of the list and removes it 87 * @ptr: the list head to grab the MBO from. 88 */ 89 #define list_pop_mbo(ptr) \ 90 ({ \ 91 struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \ 92 list_del(&_mbo->list); \ 93 _mbo; \ 94 }) 95 96 /** 97 * most_free_mbo_coherent - free an MBO and its coherent buffer 98 * @mbo: most buffer 99 */ 100 static void most_free_mbo_coherent(struct mbo *mbo) 101 { 102 struct most_channel *c = mbo->context; 103 u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; 104 105 if (c->iface->dma_free) 106 c->iface->dma_free(mbo, coherent_buf_size); 107 else 108 kfree(mbo->virt_address); 109 kfree(mbo); 110 if (atomic_sub_and_test(1, &c->mbo_ref)) 111 complete(&c->cleanup); 112 } 113 114 /** 115 * flush_channel_fifos - clear the channel fifos 116 * @c: pointer to channel object 117 */ 118 static void flush_channel_fifos(struct most_channel *c) 119 { 120 unsigned long flags, hf_flags; 121 struct mbo *mbo, *tmp; 122 123 if (list_empty(&c->fifo) && list_empty(&c->halt_fifo)) 124 return; 125 126 spin_lock_irqsave(&c->fifo_lock, flags); 127 list_for_each_entry_safe(mbo, tmp, &c->fifo, list) { 128 list_del(&mbo->list); 129 spin_unlock_irqrestore(&c->fifo_lock, flags); 130 most_free_mbo_coherent(mbo); 131 spin_lock_irqsave(&c->fifo_lock, flags); 132 } 133 spin_unlock_irqrestore(&c->fifo_lock, flags); 134 135 spin_lock_irqsave(&c->fifo_lock, hf_flags); 136 list_for_each_entry_safe(mbo, tmp, &c->halt_fifo, list) { 137 list_del(&mbo->list); 138 spin_unlock_irqrestore(&c->fifo_lock, hf_flags); 139 most_free_mbo_coherent(mbo); 140 spin_lock_irqsave(&c->fifo_lock, hf_flags); 141 } 142 spin_unlock_irqrestore(&c->fifo_lock, hf_flags); 143 144 if (unlikely((!list_empty(&c->fifo) || !list_empty(&c->halt_fifo)))) 145 dev_warn(&c->dev, "Channel or trash fifo not empty\n"); 146 } 147 148 /** 149 * flush_trash_fifo - clear the trash fifo 150 * @c: pointer to channel object 151 */ 152 static int flush_trash_fifo(struct most_channel *c) 153 { 154 struct mbo *mbo, *tmp; 155 unsigned long flags; 156 157 spin_lock_irqsave(&c->fifo_lock, flags); 158 list_for_each_entry_safe(mbo, tmp, &c->trash_fifo, list) { 159 list_del(&mbo->list); 160 spin_unlock_irqrestore(&c->fifo_lock, flags); 161 most_free_mbo_coherent(mbo); 162 spin_lock_irqsave(&c->fifo_lock, flags); 163 } 164 spin_unlock_irqrestore(&c->fifo_lock, flags); 165 return 0; 166 } 167 168 static ssize_t available_directions_show(struct device *dev, 169 struct device_attribute *attr, 170 char *buf) 171 { 172 struct most_channel *c = to_channel(dev); 173 unsigned int i = c->channel_id; 174 175 strcpy(buf, ""); 176 if (c->iface->channel_vector[i].direction & MOST_CH_RX) 177 strcat(buf, "rx "); 178 if (c->iface->channel_vector[i].direction & MOST_CH_TX) 179 strcat(buf, "tx "); 180 strcat(buf, "\n"); 181 return strlen(buf); 182 } 183 184 static ssize_t available_datatypes_show(struct device *dev, 185 struct device_attribute *attr, 186 char *buf) 187 { 188 struct most_channel *c = to_channel(dev); 189 unsigned int i = c->channel_id; 190 191 strcpy(buf, ""); 192 if (c->iface->channel_vector[i].data_type & MOST_CH_CONTROL) 193 strcat(buf, "control "); 194 if (c->iface->channel_vector[i].data_type & MOST_CH_ASYNC) 195 strcat(buf, "async "); 196 if (c->iface->channel_vector[i].data_type & MOST_CH_SYNC) 197 strcat(buf, "sync "); 198 if (c->iface->channel_vector[i].data_type & MOST_CH_ISOC) 199 strcat(buf, "isoc "); 200 strcat(buf, "\n"); 201 return strlen(buf); 202 } 203 204 static ssize_t number_of_packet_buffers_show(struct device *dev, 205 struct device_attribute *attr, 206 char *buf) 207 { 208 struct most_channel *c = to_channel(dev); 209 unsigned int i = c->channel_id; 210 211 return snprintf(buf, PAGE_SIZE, "%d\n", 212 c->iface->channel_vector[i].num_buffers_packet); 213 } 214 215 static ssize_t number_of_stream_buffers_show(struct device *dev, 216 struct device_attribute *attr, 217 char *buf) 218 { 219 struct most_channel *c = to_channel(dev); 220 unsigned int i = c->channel_id; 221 222 return snprintf(buf, PAGE_SIZE, "%d\n", 223 c->iface->channel_vector[i].num_buffers_streaming); 224 } 225 226 static ssize_t size_of_packet_buffer_show(struct device *dev, 227 struct device_attribute *attr, 228 char *buf) 229 { 230 struct most_channel *c = to_channel(dev); 231 unsigned int i = c->channel_id; 232 233 return snprintf(buf, PAGE_SIZE, "%d\n", 234 c->iface->channel_vector[i].buffer_size_packet); 235 } 236 237 static ssize_t size_of_stream_buffer_show(struct device *dev, 238 struct device_attribute *attr, 239 char *buf) 240 { 241 struct most_channel *c = to_channel(dev); 242 unsigned int i = c->channel_id; 243 244 return snprintf(buf, PAGE_SIZE, "%d\n", 245 c->iface->channel_vector[i].buffer_size_streaming); 246 } 247 248 static ssize_t channel_starving_show(struct device *dev, 249 struct device_attribute *attr, 250 char *buf) 251 { 252 struct most_channel *c = to_channel(dev); 253 254 return snprintf(buf, PAGE_SIZE, "%d\n", c->is_starving); 255 } 256 257 static ssize_t set_number_of_buffers_show(struct device *dev, 258 struct device_attribute *attr, 259 char *buf) 260 { 261 struct most_channel *c = to_channel(dev); 262 263 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.num_buffers); 264 } 265 266 static ssize_t set_buffer_size_show(struct device *dev, 267 struct device_attribute *attr, 268 char *buf) 269 { 270 struct most_channel *c = to_channel(dev); 271 272 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.buffer_size); 273 } 274 275 static ssize_t set_direction_show(struct device *dev, 276 struct device_attribute *attr, 277 char *buf) 278 { 279 struct most_channel *c = to_channel(dev); 280 281 if (c->cfg.direction & MOST_CH_TX) 282 return snprintf(buf, PAGE_SIZE, "tx\n"); 283 else if (c->cfg.direction & MOST_CH_RX) 284 return snprintf(buf, PAGE_SIZE, "rx\n"); 285 return snprintf(buf, PAGE_SIZE, "unconfigured\n"); 286 } 287 288 static ssize_t set_datatype_show(struct device *dev, 289 struct device_attribute *attr, 290 char *buf) 291 { 292 int i; 293 struct most_channel *c = to_channel(dev); 294 295 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { 296 if (c->cfg.data_type & ch_data_type[i].most_ch_data_type) 297 return snprintf(buf, PAGE_SIZE, "%s", 298 ch_data_type[i].name); 299 } 300 return snprintf(buf, PAGE_SIZE, "unconfigured\n"); 301 } 302 303 static ssize_t set_subbuffer_size_show(struct device *dev, 304 struct device_attribute *attr, 305 char *buf) 306 { 307 struct most_channel *c = to_channel(dev); 308 309 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.subbuffer_size); 310 } 311 312 static ssize_t set_packets_per_xact_show(struct device *dev, 313 struct device_attribute *attr, 314 char *buf) 315 { 316 struct most_channel *c = to_channel(dev); 317 318 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.packets_per_xact); 319 } 320 321 static ssize_t set_dbr_size_show(struct device *dev, 322 struct device_attribute *attr, char *buf) 323 { 324 struct most_channel *c = to_channel(dev); 325 326 return snprintf(buf, PAGE_SIZE, "%d\n", c->cfg.dbr_size); 327 } 328 329 #define to_dev_attr(a) container_of(a, struct device_attribute, attr) 330 static umode_t channel_attr_is_visible(struct kobject *kobj, 331 struct attribute *attr, int index) 332 { 333 struct device_attribute *dev_attr = to_dev_attr(attr); 334 struct device *dev = kobj_to_dev(kobj); 335 struct most_channel *c = to_channel(dev); 336 337 if (!strcmp(dev_attr->attr.name, "set_dbr_size") && 338 (c->iface->interface != ITYPE_MEDIALB_DIM2)) 339 return 0; 340 if (!strcmp(dev_attr->attr.name, "set_packets_per_xact") && 341 (c->iface->interface != ITYPE_USB)) 342 return 0; 343 344 return attr->mode; 345 } 346 347 #define DEV_ATTR(_name) (&dev_attr_##_name.attr) 348 349 static DEVICE_ATTR_RO(available_directions); 350 static DEVICE_ATTR_RO(available_datatypes); 351 static DEVICE_ATTR_RO(number_of_packet_buffers); 352 static DEVICE_ATTR_RO(number_of_stream_buffers); 353 static DEVICE_ATTR_RO(size_of_stream_buffer); 354 static DEVICE_ATTR_RO(size_of_packet_buffer); 355 static DEVICE_ATTR_RO(channel_starving); 356 static DEVICE_ATTR_RO(set_buffer_size); 357 static DEVICE_ATTR_RO(set_number_of_buffers); 358 static DEVICE_ATTR_RO(set_direction); 359 static DEVICE_ATTR_RO(set_datatype); 360 static DEVICE_ATTR_RO(set_subbuffer_size); 361 static DEVICE_ATTR_RO(set_packets_per_xact); 362 static DEVICE_ATTR_RO(set_dbr_size); 363 364 static struct attribute *channel_attrs[] = { 365 DEV_ATTR(available_directions), 366 DEV_ATTR(available_datatypes), 367 DEV_ATTR(number_of_packet_buffers), 368 DEV_ATTR(number_of_stream_buffers), 369 DEV_ATTR(size_of_stream_buffer), 370 DEV_ATTR(size_of_packet_buffer), 371 DEV_ATTR(channel_starving), 372 DEV_ATTR(set_buffer_size), 373 DEV_ATTR(set_number_of_buffers), 374 DEV_ATTR(set_direction), 375 DEV_ATTR(set_datatype), 376 DEV_ATTR(set_subbuffer_size), 377 DEV_ATTR(set_packets_per_xact), 378 DEV_ATTR(set_dbr_size), 379 NULL, 380 }; 381 382 static struct attribute_group channel_attr_group = { 383 .attrs = channel_attrs, 384 .is_visible = channel_attr_is_visible, 385 }; 386 387 static const struct attribute_group *channel_attr_groups[] = { 388 &channel_attr_group, 389 NULL, 390 }; 391 392 static ssize_t description_show(struct device *dev, 393 struct device_attribute *attr, 394 char *buf) 395 { 396 struct most_interface *iface = dev_get_drvdata(dev); 397 398 return snprintf(buf, PAGE_SIZE, "%s\n", iface->description); 399 } 400 401 static ssize_t interface_show(struct device *dev, 402 struct device_attribute *attr, 403 char *buf) 404 { 405 struct most_interface *iface = dev_get_drvdata(dev); 406 407 switch (iface->interface) { 408 case ITYPE_LOOPBACK: 409 return snprintf(buf, PAGE_SIZE, "loopback\n"); 410 case ITYPE_I2C: 411 return snprintf(buf, PAGE_SIZE, "i2c\n"); 412 case ITYPE_I2S: 413 return snprintf(buf, PAGE_SIZE, "i2s\n"); 414 case ITYPE_TSI: 415 return snprintf(buf, PAGE_SIZE, "tsi\n"); 416 case ITYPE_HBI: 417 return snprintf(buf, PAGE_SIZE, "hbi\n"); 418 case ITYPE_MEDIALB_DIM: 419 return snprintf(buf, PAGE_SIZE, "mlb_dim\n"); 420 case ITYPE_MEDIALB_DIM2: 421 return snprintf(buf, PAGE_SIZE, "mlb_dim2\n"); 422 case ITYPE_USB: 423 return snprintf(buf, PAGE_SIZE, "usb\n"); 424 case ITYPE_PCIE: 425 return snprintf(buf, PAGE_SIZE, "pcie\n"); 426 } 427 return snprintf(buf, PAGE_SIZE, "unknown\n"); 428 } 429 430 static DEVICE_ATTR_RO(description); 431 static DEVICE_ATTR_RO(interface); 432 433 static struct attribute *interface_attrs[] = { 434 DEV_ATTR(description), 435 DEV_ATTR(interface), 436 NULL, 437 }; 438 439 static struct attribute_group interface_attr_group = { 440 .attrs = interface_attrs, 441 }; 442 443 static const struct attribute_group *interface_attr_groups[] = { 444 &interface_attr_group, 445 NULL, 446 }; 447 448 static struct most_component *match_component(char *name) 449 { 450 struct most_component *comp; 451 452 list_for_each_entry(comp, &comp_list, list) { 453 if (!strcmp(comp->name, name)) 454 return comp; 455 } 456 return NULL; 457 } 458 459 struct show_links_data { 460 int offs; 461 char *buf; 462 }; 463 464 static int print_links(struct device *dev, void *data) 465 { 466 struct show_links_data *d = data; 467 int offs = d->offs; 468 char *buf = d->buf; 469 struct most_channel *c; 470 struct most_interface *iface = dev_get_drvdata(dev); 471 472 list_for_each_entry(c, &iface->p->channel_list, list) { 473 if (c->pipe0.comp) { 474 offs += scnprintf(buf + offs, 475 PAGE_SIZE - offs, 476 "%s:%s:%s\n", 477 c->pipe0.comp->name, 478 dev_name(iface->dev), 479 dev_name(&c->dev)); 480 } 481 if (c->pipe1.comp) { 482 offs += scnprintf(buf + offs, 483 PAGE_SIZE - offs, 484 "%s:%s:%s\n", 485 c->pipe1.comp->name, 486 dev_name(iface->dev), 487 dev_name(&c->dev)); 488 } 489 } 490 d->offs = offs; 491 return 0; 492 } 493 494 static int most_match(struct device *dev, struct device_driver *drv) 495 { 496 if (!strcmp(dev_name(dev), "most")) 497 return 0; 498 else 499 return 1; 500 } 501 502 static struct bus_type mostbus = { 503 .name = "most", 504 .match = most_match, 505 }; 506 507 static ssize_t links_show(struct device_driver *drv, char *buf) 508 { 509 struct show_links_data d = { .buf = buf }; 510 511 bus_for_each_dev(&mostbus, NULL, &d, print_links); 512 return d.offs; 513 } 514 515 static ssize_t components_show(struct device_driver *drv, char *buf) 516 { 517 struct most_component *comp; 518 int offs = 0; 519 520 list_for_each_entry(comp, &comp_list, list) { 521 offs += scnprintf(buf + offs, PAGE_SIZE - offs, "%s\n", 522 comp->name); 523 } 524 return offs; 525 } 526 527 /** 528 * get_channel - get pointer to channel 529 * @mdev: name of the device interface 530 * @mdev_ch: name of channel 531 */ 532 static struct most_channel *get_channel(char *mdev, char *mdev_ch) 533 { 534 struct device *dev = NULL; 535 struct most_interface *iface; 536 struct most_channel *c, *tmp; 537 538 dev = bus_find_device_by_name(&mostbus, NULL, mdev); 539 if (!dev) 540 return NULL; 541 put_device(dev); 542 iface = dev_get_drvdata(dev); 543 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { 544 if (!strcmp(dev_name(&c->dev), mdev_ch)) 545 return c; 546 } 547 return NULL; 548 } 549 550 static 551 inline int link_channel_to_component(struct most_channel *c, 552 struct most_component *comp, 553 char *name, 554 char *comp_param) 555 { 556 int ret; 557 struct most_component **comp_ptr; 558 559 if (!c->pipe0.comp) 560 comp_ptr = &c->pipe0.comp; 561 else if (!c->pipe1.comp) 562 comp_ptr = &c->pipe1.comp; 563 else 564 return -ENOSPC; 565 566 *comp_ptr = comp; 567 ret = comp->probe_channel(c->iface, c->channel_id, &c->cfg, name, 568 comp_param); 569 if (ret) { 570 *comp_ptr = NULL; 571 return ret; 572 } 573 return 0; 574 } 575 576 int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val) 577 { 578 struct most_channel *c = get_channel(mdev, mdev_ch); 579 580 if (!c) 581 return -ENODEV; 582 c->cfg.buffer_size = val; 583 return 0; 584 } 585 586 int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val) 587 { 588 struct most_channel *c = get_channel(mdev, mdev_ch); 589 590 if (!c) 591 return -ENODEV; 592 c->cfg.subbuffer_size = val; 593 return 0; 594 } 595 596 int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val) 597 { 598 struct most_channel *c = get_channel(mdev, mdev_ch); 599 600 if (!c) 601 return -ENODEV; 602 c->cfg.dbr_size = val; 603 return 0; 604 } 605 606 int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val) 607 { 608 struct most_channel *c = get_channel(mdev, mdev_ch); 609 610 if (!c) 611 return -ENODEV; 612 c->cfg.num_buffers = val; 613 return 0; 614 } 615 616 int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf) 617 { 618 int i; 619 struct most_channel *c = get_channel(mdev, mdev_ch); 620 621 if (!c) 622 return -ENODEV; 623 for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { 624 if (!strcmp(buf, ch_data_type[i].name)) { 625 c->cfg.data_type = ch_data_type[i].most_ch_data_type; 626 break; 627 } 628 } 629 630 if (i == ARRAY_SIZE(ch_data_type)) 631 dev_warn(&c->dev, "Invalid attribute settings\n"); 632 return 0; 633 } 634 635 int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf) 636 { 637 struct most_channel *c = get_channel(mdev, mdev_ch); 638 639 if (!c) 640 return -ENODEV; 641 if (!strcmp(buf, "dir_rx")) { 642 c->cfg.direction = MOST_CH_RX; 643 } else if (!strcmp(buf, "rx")) { 644 c->cfg.direction = MOST_CH_RX; 645 } else if (!strcmp(buf, "dir_tx")) { 646 c->cfg.direction = MOST_CH_TX; 647 } else if (!strcmp(buf, "tx")) { 648 c->cfg.direction = MOST_CH_TX; 649 } else { 650 dev_err(&c->dev, "Invalid direction\n"); 651 return -ENODATA; 652 } 653 return 0; 654 } 655 656 int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val) 657 { 658 struct most_channel *c = get_channel(mdev, mdev_ch); 659 660 if (!c) 661 return -ENODEV; 662 c->cfg.packets_per_xact = val; 663 return 0; 664 } 665 666 int most_cfg_complete(char *comp_name) 667 { 668 struct most_component *comp; 669 670 comp = match_component(comp_name); 671 if (!comp) 672 return -ENODEV; 673 674 return comp->cfg_complete(); 675 } 676 677 int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name, 678 char *comp_param) 679 { 680 struct most_channel *c = get_channel(mdev, mdev_ch); 681 struct most_component *comp = match_component(comp_name); 682 683 if (!c || !comp) 684 return -ENODEV; 685 686 return link_channel_to_component(c, comp, link_name, comp_param); 687 } 688 689 int most_remove_link(char *mdev, char *mdev_ch, char *comp_name) 690 { 691 struct most_channel *c; 692 struct most_component *comp; 693 694 comp = match_component(comp_name); 695 if (!comp) 696 return -ENODEV; 697 c = get_channel(mdev, mdev_ch); 698 if (!c) 699 return -ENODEV; 700 701 if (comp->disconnect_channel(c->iface, c->channel_id)) 702 return -EIO; 703 if (c->pipe0.comp == comp) 704 c->pipe0.comp = NULL; 705 if (c->pipe1.comp == comp) 706 c->pipe1.comp = NULL; 707 return 0; 708 } 709 710 #define DRV_ATTR(_name) (&driver_attr_##_name.attr) 711 712 static DRIVER_ATTR_RO(links); 713 static DRIVER_ATTR_RO(components); 714 715 static struct attribute *mc_attrs[] = { 716 DRV_ATTR(links), 717 DRV_ATTR(components), 718 NULL, 719 }; 720 721 static struct attribute_group mc_attr_group = { 722 .attrs = mc_attrs, 723 }; 724 725 static const struct attribute_group *mc_attr_groups[] = { 726 &mc_attr_group, 727 NULL, 728 }; 729 730 static struct device_driver mostbus_driver = { 731 .name = "most_core", 732 .bus = &mostbus, 733 .groups = mc_attr_groups, 734 }; 735 736 static inline void trash_mbo(struct mbo *mbo) 737 { 738 unsigned long flags; 739 struct most_channel *c = mbo->context; 740 741 spin_lock_irqsave(&c->fifo_lock, flags); 742 list_add(&mbo->list, &c->trash_fifo); 743 spin_unlock_irqrestore(&c->fifo_lock, flags); 744 } 745 746 static bool hdm_mbo_ready(struct most_channel *c) 747 { 748 bool empty; 749 750 if (c->enqueue_halt) 751 return false; 752 753 spin_lock_irq(&c->fifo_lock); 754 empty = list_empty(&c->halt_fifo); 755 spin_unlock_irq(&c->fifo_lock); 756 757 return !empty; 758 } 759 760 static void nq_hdm_mbo(struct mbo *mbo) 761 { 762 unsigned long flags; 763 struct most_channel *c = mbo->context; 764 765 spin_lock_irqsave(&c->fifo_lock, flags); 766 list_add_tail(&mbo->list, &c->halt_fifo); 767 spin_unlock_irqrestore(&c->fifo_lock, flags); 768 wake_up_interruptible(&c->hdm_fifo_wq); 769 } 770 771 static int hdm_enqueue_thread(void *data) 772 { 773 struct most_channel *c = data; 774 struct mbo *mbo; 775 int ret; 776 typeof(c->iface->enqueue) enqueue = c->iface->enqueue; 777 778 while (likely(!kthread_should_stop())) { 779 wait_event_interruptible(c->hdm_fifo_wq, 780 hdm_mbo_ready(c) || 781 kthread_should_stop()); 782 783 mutex_lock(&c->nq_mutex); 784 spin_lock_irq(&c->fifo_lock); 785 if (unlikely(c->enqueue_halt || list_empty(&c->halt_fifo))) { 786 spin_unlock_irq(&c->fifo_lock); 787 mutex_unlock(&c->nq_mutex); 788 continue; 789 } 790 791 mbo = list_pop_mbo(&c->halt_fifo); 792 spin_unlock_irq(&c->fifo_lock); 793 794 if (c->cfg.direction == MOST_CH_RX) 795 mbo->buffer_length = c->cfg.buffer_size; 796 797 ret = enqueue(mbo->ifp, mbo->hdm_channel_id, mbo); 798 mutex_unlock(&c->nq_mutex); 799 800 if (unlikely(ret)) { 801 dev_err(&c->dev, "Buffer enqueue failed\n"); 802 nq_hdm_mbo(mbo); 803 c->hdm_enqueue_task = NULL; 804 return 0; 805 } 806 } 807 808 return 0; 809 } 810 811 static int run_enqueue_thread(struct most_channel *c, int channel_id) 812 { 813 struct task_struct *task = 814 kthread_run(hdm_enqueue_thread, c, "hdm_fifo_%d", 815 channel_id); 816 817 if (IS_ERR(task)) 818 return PTR_ERR(task); 819 820 c->hdm_enqueue_task = task; 821 return 0; 822 } 823 824 /** 825 * arm_mbo - recycle MBO for further usage 826 * @mbo: most buffer 827 * 828 * This puts an MBO back to the list to have it ready for up coming 829 * tx transactions. 830 * 831 * In case the MBO belongs to a channel that recently has been 832 * poisoned, the MBO is scheduled to be trashed. 833 * Calls the completion handler of an attached component. 834 */ 835 static void arm_mbo(struct mbo *mbo) 836 { 837 unsigned long flags; 838 struct most_channel *c; 839 840 c = mbo->context; 841 842 if (c->is_poisoned) { 843 trash_mbo(mbo); 844 return; 845 } 846 847 spin_lock_irqsave(&c->fifo_lock, flags); 848 ++*mbo->num_buffers_ptr; 849 list_add_tail(&mbo->list, &c->fifo); 850 spin_unlock_irqrestore(&c->fifo_lock, flags); 851 852 if (c->pipe0.refs && c->pipe0.comp->tx_completion) 853 c->pipe0.comp->tx_completion(c->iface, c->channel_id); 854 855 if (c->pipe1.refs && c->pipe1.comp->tx_completion) 856 c->pipe1.comp->tx_completion(c->iface, c->channel_id); 857 } 858 859 /** 860 * arm_mbo_chain - helper function that arms an MBO chain for the HDM 861 * @c: pointer to interface channel 862 * @dir: direction of the channel 863 * @compl: pointer to completion function 864 * 865 * This allocates buffer objects including the containing DMA coherent 866 * buffer and puts them in the fifo. 867 * Buffers of Rx channels are put in the kthread fifo, hence immediately 868 * submitted to the HDM. 869 * 870 * Returns the number of allocated and enqueued MBOs. 871 */ 872 static int arm_mbo_chain(struct most_channel *c, int dir, 873 void (*compl)(struct mbo *)) 874 { 875 unsigned int i; 876 struct mbo *mbo; 877 unsigned long flags; 878 u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len; 879 880 atomic_set(&c->mbo_nq_level, 0); 881 882 for (i = 0; i < c->cfg.num_buffers; i++) { 883 mbo = kzalloc(sizeof(*mbo), GFP_KERNEL); 884 if (!mbo) 885 goto flush_fifos; 886 887 mbo->context = c; 888 mbo->ifp = c->iface; 889 mbo->hdm_channel_id = c->channel_id; 890 if (c->iface->dma_alloc) { 891 mbo->virt_address = 892 c->iface->dma_alloc(mbo, coherent_buf_size); 893 } else { 894 mbo->virt_address = 895 kzalloc(coherent_buf_size, GFP_KERNEL); 896 } 897 if (!mbo->virt_address) 898 goto release_mbo; 899 900 mbo->complete = compl; 901 mbo->num_buffers_ptr = &dummy_num_buffers; 902 if (dir == MOST_CH_RX) { 903 nq_hdm_mbo(mbo); 904 atomic_inc(&c->mbo_nq_level); 905 } else { 906 spin_lock_irqsave(&c->fifo_lock, flags); 907 list_add_tail(&mbo->list, &c->fifo); 908 spin_unlock_irqrestore(&c->fifo_lock, flags); 909 } 910 } 911 return c->cfg.num_buffers; 912 913 release_mbo: 914 kfree(mbo); 915 916 flush_fifos: 917 flush_channel_fifos(c); 918 return 0; 919 } 920 921 /** 922 * most_submit_mbo - submits an MBO to fifo 923 * @mbo: most buffer 924 */ 925 void most_submit_mbo(struct mbo *mbo) 926 { 927 if (WARN_ONCE(!mbo || !mbo->context, 928 "Bad buffer or missing channel reference\n")) 929 return; 930 931 nq_hdm_mbo(mbo); 932 } 933 EXPORT_SYMBOL_GPL(most_submit_mbo); 934 935 /** 936 * most_write_completion - write completion handler 937 * @mbo: most buffer 938 * 939 * This recycles the MBO for further usage. In case the channel has been 940 * poisoned, the MBO is scheduled to be trashed. 941 */ 942 static void most_write_completion(struct mbo *mbo) 943 { 944 struct most_channel *c; 945 946 c = mbo->context; 947 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) 948 trash_mbo(mbo); 949 else 950 arm_mbo(mbo); 951 } 952 953 int channel_has_mbo(struct most_interface *iface, int id, 954 struct most_component *comp) 955 { 956 struct most_channel *c = iface->p->channel[id]; 957 unsigned long flags; 958 int empty; 959 960 if (unlikely(!c)) 961 return -EINVAL; 962 963 if (c->pipe0.refs && c->pipe1.refs && 964 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || 965 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) 966 return 0; 967 968 spin_lock_irqsave(&c->fifo_lock, flags); 969 empty = list_empty(&c->fifo); 970 spin_unlock_irqrestore(&c->fifo_lock, flags); 971 return !empty; 972 } 973 EXPORT_SYMBOL_GPL(channel_has_mbo); 974 975 /** 976 * most_get_mbo - get pointer to an MBO of pool 977 * @iface: pointer to interface instance 978 * @id: channel ID 979 * @comp: driver component 980 * 981 * This attempts to get a free buffer out of the channel fifo. 982 * Returns a pointer to MBO on success or NULL otherwise. 983 */ 984 struct mbo *most_get_mbo(struct most_interface *iface, int id, 985 struct most_component *comp) 986 { 987 struct mbo *mbo; 988 struct most_channel *c; 989 unsigned long flags; 990 int *num_buffers_ptr; 991 992 c = iface->p->channel[id]; 993 if (unlikely(!c)) 994 return NULL; 995 996 if (c->pipe0.refs && c->pipe1.refs && 997 ((comp == c->pipe0.comp && c->pipe0.num_buffers <= 0) || 998 (comp == c->pipe1.comp && c->pipe1.num_buffers <= 0))) 999 return NULL; 1000 1001 if (comp == c->pipe0.comp) 1002 num_buffers_ptr = &c->pipe0.num_buffers; 1003 else if (comp == c->pipe1.comp) 1004 num_buffers_ptr = &c->pipe1.num_buffers; 1005 else 1006 num_buffers_ptr = &dummy_num_buffers; 1007 1008 spin_lock_irqsave(&c->fifo_lock, flags); 1009 if (list_empty(&c->fifo)) { 1010 spin_unlock_irqrestore(&c->fifo_lock, flags); 1011 return NULL; 1012 } 1013 mbo = list_pop_mbo(&c->fifo); 1014 --*num_buffers_ptr; 1015 spin_unlock_irqrestore(&c->fifo_lock, flags); 1016 1017 mbo->num_buffers_ptr = num_buffers_ptr; 1018 mbo->buffer_length = c->cfg.buffer_size; 1019 return mbo; 1020 } 1021 EXPORT_SYMBOL_GPL(most_get_mbo); 1022 1023 /** 1024 * most_put_mbo - return buffer to pool 1025 * @mbo: most buffer 1026 */ 1027 void most_put_mbo(struct mbo *mbo) 1028 { 1029 struct most_channel *c = mbo->context; 1030 1031 if (c->cfg.direction == MOST_CH_TX) { 1032 arm_mbo(mbo); 1033 return; 1034 } 1035 nq_hdm_mbo(mbo); 1036 atomic_inc(&c->mbo_nq_level); 1037 } 1038 EXPORT_SYMBOL_GPL(most_put_mbo); 1039 1040 /** 1041 * most_read_completion - read completion handler 1042 * @mbo: most buffer 1043 * 1044 * This function is called by the HDM when data has been received from the 1045 * hardware and copied to the buffer of the MBO. 1046 * 1047 * In case the channel has been poisoned it puts the buffer in the trash queue. 1048 * Otherwise, it passes the buffer to an component for further processing. 1049 */ 1050 static void most_read_completion(struct mbo *mbo) 1051 { 1052 struct most_channel *c = mbo->context; 1053 1054 if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) { 1055 trash_mbo(mbo); 1056 return; 1057 } 1058 1059 if (mbo->status == MBO_E_INVAL) { 1060 nq_hdm_mbo(mbo); 1061 atomic_inc(&c->mbo_nq_level); 1062 return; 1063 } 1064 1065 if (atomic_sub_and_test(1, &c->mbo_nq_level)) 1066 c->is_starving = 1; 1067 1068 if (c->pipe0.refs && c->pipe0.comp->rx_completion && 1069 c->pipe0.comp->rx_completion(mbo) == 0) 1070 return; 1071 1072 if (c->pipe1.refs && c->pipe1.comp->rx_completion && 1073 c->pipe1.comp->rx_completion(mbo) == 0) 1074 return; 1075 1076 most_put_mbo(mbo); 1077 } 1078 1079 /** 1080 * most_start_channel - prepares a channel for communication 1081 * @iface: pointer to interface instance 1082 * @id: channel ID 1083 * @comp: driver component 1084 * 1085 * This prepares the channel for usage. Cross-checks whether the 1086 * channel's been properly configured. 1087 * 1088 * Returns 0 on success or error code otherwise. 1089 */ 1090 int most_start_channel(struct most_interface *iface, int id, 1091 struct most_component *comp) 1092 { 1093 int num_buffer; 1094 int ret; 1095 struct most_channel *c = iface->p->channel[id]; 1096 1097 if (unlikely(!c)) 1098 return -EINVAL; 1099 1100 mutex_lock(&c->start_mutex); 1101 if (c->pipe0.refs + c->pipe1.refs > 0) 1102 goto out; /* already started by another component */ 1103 1104 if (!try_module_get(iface->mod)) { 1105 dev_err(&c->dev, "Failed to acquire HDM lock\n"); 1106 mutex_unlock(&c->start_mutex); 1107 return -ENOLCK; 1108 } 1109 1110 c->cfg.extra_len = 0; 1111 if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) { 1112 dev_err(&c->dev, "Channel configuration failed. Go check settings...\n"); 1113 ret = -EINVAL; 1114 goto err_put_module; 1115 } 1116 1117 init_waitqueue_head(&c->hdm_fifo_wq); 1118 1119 if (c->cfg.direction == MOST_CH_RX) 1120 num_buffer = arm_mbo_chain(c, c->cfg.direction, 1121 most_read_completion); 1122 else 1123 num_buffer = arm_mbo_chain(c, c->cfg.direction, 1124 most_write_completion); 1125 if (unlikely(!num_buffer)) { 1126 ret = -ENOMEM; 1127 goto err_put_module; 1128 } 1129 1130 ret = run_enqueue_thread(c, id); 1131 if (ret) 1132 goto err_put_module; 1133 1134 c->is_starving = 0; 1135 c->pipe0.num_buffers = c->cfg.num_buffers / 2; 1136 c->pipe1.num_buffers = c->cfg.num_buffers - c->pipe0.num_buffers; 1137 atomic_set(&c->mbo_ref, num_buffer); 1138 1139 out: 1140 if (comp == c->pipe0.comp) 1141 c->pipe0.refs++; 1142 if (comp == c->pipe1.comp) 1143 c->pipe1.refs++; 1144 mutex_unlock(&c->start_mutex); 1145 return 0; 1146 1147 err_put_module: 1148 module_put(iface->mod); 1149 mutex_unlock(&c->start_mutex); 1150 return ret; 1151 } 1152 EXPORT_SYMBOL_GPL(most_start_channel); 1153 1154 /** 1155 * most_stop_channel - stops a running channel 1156 * @iface: pointer to interface instance 1157 * @id: channel ID 1158 * @comp: driver component 1159 */ 1160 int most_stop_channel(struct most_interface *iface, int id, 1161 struct most_component *comp) 1162 { 1163 struct most_channel *c; 1164 1165 if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) { 1166 pr_err("Bad interface or index out of range\n"); 1167 return -EINVAL; 1168 } 1169 c = iface->p->channel[id]; 1170 if (unlikely(!c)) 1171 return -EINVAL; 1172 1173 mutex_lock(&c->start_mutex); 1174 if (c->pipe0.refs + c->pipe1.refs >= 2) 1175 goto out; 1176 1177 if (c->hdm_enqueue_task) 1178 kthread_stop(c->hdm_enqueue_task); 1179 c->hdm_enqueue_task = NULL; 1180 1181 if (iface->mod) 1182 module_put(iface->mod); 1183 1184 c->is_poisoned = true; 1185 if (c->iface->poison_channel(c->iface, c->channel_id)) { 1186 dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id, 1187 c->iface->description); 1188 mutex_unlock(&c->start_mutex); 1189 return -EAGAIN; 1190 } 1191 flush_trash_fifo(c); 1192 flush_channel_fifos(c); 1193 1194 #ifdef CMPL_INTERRUPTIBLE 1195 if (wait_for_completion_interruptible(&c->cleanup)) { 1196 dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id); 1197 mutex_unlock(&c->start_mutex); 1198 return -EINTR; 1199 } 1200 #else 1201 wait_for_completion(&c->cleanup); 1202 #endif 1203 c->is_poisoned = false; 1204 1205 out: 1206 if (comp == c->pipe0.comp) 1207 c->pipe0.refs--; 1208 if (comp == c->pipe1.comp) 1209 c->pipe1.refs--; 1210 mutex_unlock(&c->start_mutex); 1211 return 0; 1212 } 1213 EXPORT_SYMBOL_GPL(most_stop_channel); 1214 1215 /** 1216 * most_register_component - registers a driver component with the core 1217 * @comp: driver component 1218 */ 1219 int most_register_component(struct most_component *comp) 1220 { 1221 if (!comp) { 1222 pr_err("Bad component\n"); 1223 return -EINVAL; 1224 } 1225 list_add_tail(&comp->list, &comp_list); 1226 return 0; 1227 } 1228 EXPORT_SYMBOL_GPL(most_register_component); 1229 1230 static int disconnect_channels(struct device *dev, void *data) 1231 { 1232 struct most_interface *iface; 1233 struct most_channel *c, *tmp; 1234 struct most_component *comp = data; 1235 1236 iface = dev_get_drvdata(dev); 1237 list_for_each_entry_safe(c, tmp, &iface->p->channel_list, list) { 1238 if (c->pipe0.comp == comp || c->pipe1.comp == comp) 1239 comp->disconnect_channel(c->iface, c->channel_id); 1240 if (c->pipe0.comp == comp) 1241 c->pipe0.comp = NULL; 1242 if (c->pipe1.comp == comp) 1243 c->pipe1.comp = NULL; 1244 } 1245 return 0; 1246 } 1247 1248 /** 1249 * most_deregister_component - deregisters a driver component with the core 1250 * @comp: driver component 1251 */ 1252 int most_deregister_component(struct most_component *comp) 1253 { 1254 if (!comp) { 1255 pr_err("Bad component\n"); 1256 return -EINVAL; 1257 } 1258 1259 bus_for_each_dev(&mostbus, NULL, comp, disconnect_channels); 1260 list_del(&comp->list); 1261 return 0; 1262 } 1263 EXPORT_SYMBOL_GPL(most_deregister_component); 1264 1265 static void release_channel(struct device *dev) 1266 { 1267 struct most_channel *c = to_channel(dev); 1268 1269 kfree(c); 1270 } 1271 1272 /** 1273 * most_register_interface - registers an interface with core 1274 * @iface: device interface 1275 * 1276 * Allocates and initializes a new interface instance and all of its channels. 1277 * Returns a pointer to kobject or an error pointer. 1278 */ 1279 int most_register_interface(struct most_interface *iface) 1280 { 1281 unsigned int i; 1282 int id; 1283 struct most_channel *c; 1284 1285 if (!iface || !iface->enqueue || !iface->configure || 1286 !iface->poison_channel || (iface->num_channels > MAX_CHANNELS)) { 1287 dev_err(iface->dev, "Bad interface or channel overflow\n"); 1288 return -EINVAL; 1289 } 1290 1291 id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL); 1292 if (id < 0) { 1293 dev_err(iface->dev, "Failed to allocate device ID\n"); 1294 return id; 1295 } 1296 1297 iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL); 1298 if (!iface->p) { 1299 ida_simple_remove(&mdev_id, id); 1300 return -ENOMEM; 1301 } 1302 1303 INIT_LIST_HEAD(&iface->p->channel_list); 1304 iface->p->dev_id = id; 1305 strscpy(iface->p->name, iface->description, sizeof(iface->p->name)); 1306 iface->dev->bus = &mostbus; 1307 iface->dev->groups = interface_attr_groups; 1308 dev_set_drvdata(iface->dev, iface); 1309 if (device_register(iface->dev)) { 1310 dev_err(iface->dev, "Failed to register interface device\n"); 1311 kfree(iface->p); 1312 put_device(iface->dev); 1313 ida_simple_remove(&mdev_id, id); 1314 return -ENOMEM; 1315 } 1316 1317 for (i = 0; i < iface->num_channels; i++) { 1318 const char *name_suffix = iface->channel_vector[i].name_suffix; 1319 1320 c = kzalloc(sizeof(*c), GFP_KERNEL); 1321 if (!c) 1322 goto err_free_resources; 1323 if (!name_suffix) 1324 snprintf(c->name, STRING_SIZE, "ch%d", i); 1325 else 1326 snprintf(c->name, STRING_SIZE, "%s", name_suffix); 1327 c->dev.init_name = c->name; 1328 c->dev.parent = iface->dev; 1329 c->dev.groups = channel_attr_groups; 1330 c->dev.release = release_channel; 1331 iface->p->channel[i] = c; 1332 c->is_starving = 0; 1333 c->iface = iface; 1334 c->channel_id = i; 1335 c->keep_mbo = false; 1336 c->enqueue_halt = false; 1337 c->is_poisoned = false; 1338 c->cfg.direction = 0; 1339 c->cfg.data_type = 0; 1340 c->cfg.num_buffers = 0; 1341 c->cfg.buffer_size = 0; 1342 c->cfg.subbuffer_size = 0; 1343 c->cfg.packets_per_xact = 0; 1344 spin_lock_init(&c->fifo_lock); 1345 INIT_LIST_HEAD(&c->fifo); 1346 INIT_LIST_HEAD(&c->trash_fifo); 1347 INIT_LIST_HEAD(&c->halt_fifo); 1348 init_completion(&c->cleanup); 1349 atomic_set(&c->mbo_ref, 0); 1350 mutex_init(&c->start_mutex); 1351 mutex_init(&c->nq_mutex); 1352 list_add_tail(&c->list, &iface->p->channel_list); 1353 if (device_register(&c->dev)) { 1354 dev_err(&c->dev, "Failed to register channel device\n"); 1355 goto err_free_most_channel; 1356 } 1357 } 1358 most_interface_register_notify(iface->description); 1359 return 0; 1360 1361 err_free_most_channel: 1362 put_device(&c->dev); 1363 1364 err_free_resources: 1365 while (i > 0) { 1366 c = iface->p->channel[--i]; 1367 device_unregister(&c->dev); 1368 } 1369 kfree(iface->p); 1370 device_unregister(iface->dev); 1371 ida_simple_remove(&mdev_id, id); 1372 return -ENOMEM; 1373 } 1374 EXPORT_SYMBOL_GPL(most_register_interface); 1375 1376 /** 1377 * most_deregister_interface - deregisters an interface with core 1378 * @iface: device interface 1379 * 1380 * Before removing an interface instance from the list, all running 1381 * channels are stopped and poisoned. 1382 */ 1383 void most_deregister_interface(struct most_interface *iface) 1384 { 1385 int i; 1386 struct most_channel *c; 1387 1388 for (i = 0; i < iface->num_channels; i++) { 1389 c = iface->p->channel[i]; 1390 if (c->pipe0.comp) 1391 c->pipe0.comp->disconnect_channel(c->iface, 1392 c->channel_id); 1393 if (c->pipe1.comp) 1394 c->pipe1.comp->disconnect_channel(c->iface, 1395 c->channel_id); 1396 c->pipe0.comp = NULL; 1397 c->pipe1.comp = NULL; 1398 list_del(&c->list); 1399 device_unregister(&c->dev); 1400 } 1401 1402 ida_simple_remove(&mdev_id, iface->p->dev_id); 1403 kfree(iface->p); 1404 device_unregister(iface->dev); 1405 } 1406 EXPORT_SYMBOL_GPL(most_deregister_interface); 1407 1408 /** 1409 * most_stop_enqueue - prevents core from enqueueing MBOs 1410 * @iface: pointer to interface 1411 * @id: channel id 1412 * 1413 * This is called by an HDM that _cannot_ attend to its duties and 1414 * is imminent to get run over by the core. The core is not going to 1415 * enqueue any further packets unless the flagging HDM calls 1416 * most_resume enqueue(). 1417 */ 1418 void most_stop_enqueue(struct most_interface *iface, int id) 1419 { 1420 struct most_channel *c = iface->p->channel[id]; 1421 1422 if (!c) 1423 return; 1424 1425 mutex_lock(&c->nq_mutex); 1426 c->enqueue_halt = true; 1427 mutex_unlock(&c->nq_mutex); 1428 } 1429 EXPORT_SYMBOL_GPL(most_stop_enqueue); 1430 1431 /** 1432 * most_resume_enqueue - allow core to enqueue MBOs again 1433 * @iface: pointer to interface 1434 * @id: channel id 1435 * 1436 * This clears the enqueue halt flag and enqueues all MBOs currently 1437 * sitting in the wait fifo. 1438 */ 1439 void most_resume_enqueue(struct most_interface *iface, int id) 1440 { 1441 struct most_channel *c = iface->p->channel[id]; 1442 1443 if (!c) 1444 return; 1445 1446 mutex_lock(&c->nq_mutex); 1447 c->enqueue_halt = false; 1448 mutex_unlock(&c->nq_mutex); 1449 1450 wake_up_interruptible(&c->hdm_fifo_wq); 1451 } 1452 EXPORT_SYMBOL_GPL(most_resume_enqueue); 1453 1454 static int __init most_init(void) 1455 { 1456 int err; 1457 1458 INIT_LIST_HEAD(&comp_list); 1459 ida_init(&mdev_id); 1460 1461 err = bus_register(&mostbus); 1462 if (err) { 1463 pr_err("Failed to register most bus\n"); 1464 return err; 1465 } 1466 err = driver_register(&mostbus_driver); 1467 if (err) { 1468 pr_err("Failed to register core driver\n"); 1469 goto err_unregister_bus; 1470 } 1471 configfs_init(); 1472 return 0; 1473 1474 err_unregister_bus: 1475 bus_unregister(&mostbus); 1476 return err; 1477 } 1478 1479 static void __exit most_exit(void) 1480 { 1481 driver_unregister(&mostbus_driver); 1482 bus_unregister(&mostbus); 1483 ida_destroy(&mdev_id); 1484 } 1485 1486 subsys_initcall(most_init); 1487 module_exit(most_exit); 1488 MODULE_LICENSE("GPL"); 1489 MODULE_AUTHOR("Christian Gromm <christian.gromm@microchip.com>"); 1490 MODULE_DESCRIPTION("Core module of stacked MOST Linux driver"); 1491