1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * cmt_speech.c - HSI CMT speech driver 4 * 5 * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved. 6 * 7 * Contact: Kai Vehmanen <kai.vehmanen@nokia.com> 8 * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com> 9 */ 10 11 #include <linux/errno.h> 12 #include <linux/module.h> 13 #include <linux/types.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/miscdevice.h> 17 #include <linux/mm.h> 18 #include <linux/slab.h> 19 #include <linux/fs.h> 20 #include <linux/poll.h> 21 #include <linux/sched/signal.h> 22 #include <linux/ioctl.h> 23 #include <linux/uaccess.h> 24 #include <linux/pm_qos.h> 25 #include <linux/hsi/hsi.h> 26 #include <linux/hsi/ssi_protocol.h> 27 #include <linux/hsi/cs-protocol.h> 28 29 #define CS_MMAP_SIZE PAGE_SIZE 30 31 struct char_queue { 32 struct list_head list; 33 u32 msg; 34 }; 35 36 struct cs_char { 37 unsigned int opened; 38 struct hsi_client *cl; 39 struct cs_hsi_iface *hi; 40 struct list_head chardev_queue; 41 struct list_head dataind_queue; 42 int dataind_pending; 43 /* mmap things */ 44 unsigned long mmap_base; 45 unsigned long mmap_size; 46 spinlock_t lock; 47 struct fasync_struct *async_queue; 48 wait_queue_head_t wait; 49 /* hsi channel ids */ 50 int channel_id_cmd; 51 int channel_id_data; 52 }; 53 54 #define SSI_CHANNEL_STATE_READING 1 55 #define SSI_CHANNEL_STATE_WRITING (1 << 1) 56 #define SSI_CHANNEL_STATE_POLL (1 << 2) 57 #define SSI_CHANNEL_STATE_ERROR (1 << 3) 58 59 #define TARGET_MASK 0xf000000 60 #define TARGET_REMOTE (1 << CS_DOMAIN_SHIFT) 61 #define TARGET_LOCAL 0 62 63 /* Number of pre-allocated commands buffers */ 64 #define CS_MAX_CMDS 4 65 66 /* 67 * During data transfers, transactions must be handled 68 * within 20ms (fixed value in cmtspeech HSI protocol) 69 */ 70 #define CS_QOS_LATENCY_FOR_DATA_USEC 20000 71 72 /* Timeout to wait for pending HSI transfers to complete */ 73 #define CS_HSI_TRANSFER_TIMEOUT_MS 500 74 75 76 #define RX_PTR_BOUNDARY_SHIFT 8 77 #define RX_PTR_MAX_SHIFT (RX_PTR_BOUNDARY_SHIFT + \ 78 CS_MAX_BUFFERS_SHIFT) 79 struct cs_hsi_iface { 80 struct hsi_client *cl; 81 struct hsi_client *master; 82 83 unsigned int iface_state; 84 unsigned int wakeline_state; 85 unsigned int control_state; 86 unsigned int data_state; 87 88 /* state exposed to application */ 89 struct cs_mmap_config_block *mmap_cfg; 90 91 unsigned long mmap_base; 92 unsigned long mmap_size; 93 94 unsigned int rx_slot; 95 unsigned int tx_slot; 96 97 /* note: for security reasons, we do not trust the contents of 98 * mmap_cfg, but instead duplicate the variables here */ 99 unsigned int buf_size; 100 unsigned int rx_bufs; 101 unsigned int tx_bufs; 102 unsigned int rx_ptr_boundary; 103 unsigned int rx_offsets[CS_MAX_BUFFERS]; 104 unsigned int tx_offsets[CS_MAX_BUFFERS]; 105 106 /* size of aligned memory blocks */ 107 unsigned int slot_size; 108 unsigned int flags; 109 110 struct list_head cmdqueue; 111 112 struct hsi_msg *data_rx_msg; 113 struct hsi_msg *data_tx_msg; 114 wait_queue_head_t datawait; 115 116 struct pm_qos_request pm_qos_req; 117 118 spinlock_t lock; 119 }; 120 121 static struct cs_char cs_char_data; 122 123 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi); 124 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi); 125 126 static inline void rx_ptr_shift_too_big(void) 127 { 128 BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX); 129 } 130 131 static void cs_notify(u32 message, struct list_head *head) 132 { 133 struct char_queue *entry; 134 135 spin_lock(&cs_char_data.lock); 136 137 if (!cs_char_data.opened) { 138 spin_unlock(&cs_char_data.lock); 139 goto out; 140 } 141 142 entry = kmalloc(sizeof(*entry), GFP_ATOMIC); 143 if (!entry) { 144 dev_err(&cs_char_data.cl->device, 145 "Can't allocate new entry for the queue.\n"); 146 spin_unlock(&cs_char_data.lock); 147 goto out; 148 } 149 150 entry->msg = message; 151 list_add_tail(&entry->list, head); 152 153 spin_unlock(&cs_char_data.lock); 154 155 wake_up_interruptible(&cs_char_data.wait); 156 kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN); 157 158 out: 159 return; 160 } 161 162 static u32 cs_pop_entry(struct list_head *head) 163 { 164 struct char_queue *entry; 165 u32 data; 166 167 entry = list_entry(head->next, struct char_queue, list); 168 data = entry->msg; 169 list_del(&entry->list); 170 kfree(entry); 171 172 return data; 173 } 174 175 static void cs_notify_control(u32 message) 176 { 177 cs_notify(message, &cs_char_data.chardev_queue); 178 } 179 180 static void cs_notify_data(u32 message, int maxlength) 181 { 182 cs_notify(message, &cs_char_data.dataind_queue); 183 184 spin_lock(&cs_char_data.lock); 185 cs_char_data.dataind_pending++; 186 while (cs_char_data.dataind_pending > maxlength && 187 !list_empty(&cs_char_data.dataind_queue)) { 188 dev_dbg(&cs_char_data.cl->device, "data notification " 189 "queue overrun (%u entries)\n", cs_char_data.dataind_pending); 190 191 cs_pop_entry(&cs_char_data.dataind_queue); 192 cs_char_data.dataind_pending--; 193 } 194 spin_unlock(&cs_char_data.lock); 195 } 196 197 static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd) 198 { 199 u32 *data = sg_virt(msg->sgt.sgl); 200 *data = cmd; 201 } 202 203 static inline u32 cs_get_cmd(struct hsi_msg *msg) 204 { 205 u32 *data = sg_virt(msg->sgt.sgl); 206 return *data; 207 } 208 209 static void cs_release_cmd(struct hsi_msg *msg) 210 { 211 struct cs_hsi_iface *hi = msg->context; 212 213 list_add_tail(&msg->link, &hi->cmdqueue); 214 } 215 216 static void cs_cmd_destructor(struct hsi_msg *msg) 217 { 218 struct cs_hsi_iface *hi = msg->context; 219 220 spin_lock(&hi->lock); 221 222 dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n"); 223 224 if (hi->iface_state != CS_STATE_CLOSED) 225 dev_err(&hi->cl->device, "Cmd flushed while driver active\n"); 226 227 if (msg->ttype == HSI_MSG_READ) 228 hi->control_state &= 229 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING); 230 else if (msg->ttype == HSI_MSG_WRITE && 231 hi->control_state & SSI_CHANNEL_STATE_WRITING) 232 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; 233 234 cs_release_cmd(msg); 235 236 spin_unlock(&hi->lock); 237 } 238 239 static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi) 240 { 241 struct hsi_msg *msg; 242 243 BUG_ON(list_empty(&ssi->cmdqueue)); 244 245 msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link); 246 list_del(&msg->link); 247 msg->destructor = cs_cmd_destructor; 248 249 return msg; 250 } 251 252 static void cs_free_cmds(struct cs_hsi_iface *ssi) 253 { 254 struct hsi_msg *msg, *tmp; 255 256 list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) { 257 list_del(&msg->link); 258 msg->destructor = NULL; 259 kfree(sg_virt(msg->sgt.sgl)); 260 hsi_free_msg(msg); 261 } 262 } 263 264 static int cs_alloc_cmds(struct cs_hsi_iface *hi) 265 { 266 struct hsi_msg *msg; 267 u32 *buf; 268 unsigned int i; 269 270 INIT_LIST_HEAD(&hi->cmdqueue); 271 272 for (i = 0; i < CS_MAX_CMDS; i++) { 273 msg = hsi_alloc_msg(1, GFP_KERNEL); 274 if (!msg) 275 goto out; 276 buf = kmalloc(sizeof(*buf), GFP_KERNEL); 277 if (!buf) { 278 hsi_free_msg(msg); 279 goto out; 280 } 281 sg_init_one(msg->sgt.sgl, buf, sizeof(*buf)); 282 msg->channel = cs_char_data.channel_id_cmd; 283 msg->context = hi; 284 list_add_tail(&msg->link, &hi->cmdqueue); 285 } 286 287 return 0; 288 289 out: 290 cs_free_cmds(hi); 291 return -ENOMEM; 292 } 293 294 static void cs_hsi_data_destructor(struct hsi_msg *msg) 295 { 296 struct cs_hsi_iface *hi = msg->context; 297 const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX"; 298 299 dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir); 300 301 spin_lock(&hi->lock); 302 if (hi->iface_state != CS_STATE_CLOSED) 303 dev_err(&cs_char_data.cl->device, 304 "Data %s flush while device active\n", dir); 305 if (msg->ttype == HSI_MSG_READ) 306 hi->data_state &= 307 ~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING); 308 else 309 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; 310 311 msg->status = HSI_STATUS_COMPLETED; 312 if (unlikely(waitqueue_active(&hi->datawait))) 313 wake_up_interruptible(&hi->datawait); 314 315 spin_unlock(&hi->lock); 316 } 317 318 static int cs_hsi_alloc_data(struct cs_hsi_iface *hi) 319 { 320 struct hsi_msg *txmsg, *rxmsg; 321 int res = 0; 322 323 rxmsg = hsi_alloc_msg(1, GFP_KERNEL); 324 if (!rxmsg) { 325 res = -ENOMEM; 326 goto out1; 327 } 328 rxmsg->channel = cs_char_data.channel_id_data; 329 rxmsg->destructor = cs_hsi_data_destructor; 330 rxmsg->context = hi; 331 332 txmsg = hsi_alloc_msg(1, GFP_KERNEL); 333 if (!txmsg) { 334 res = -ENOMEM; 335 goto out2; 336 } 337 txmsg->channel = cs_char_data.channel_id_data; 338 txmsg->destructor = cs_hsi_data_destructor; 339 txmsg->context = hi; 340 341 hi->data_rx_msg = rxmsg; 342 hi->data_tx_msg = txmsg; 343 344 return 0; 345 346 out2: 347 hsi_free_msg(rxmsg); 348 out1: 349 return res; 350 } 351 352 static void cs_hsi_free_data_msg(struct hsi_msg *msg) 353 { 354 WARN_ON(msg->status != HSI_STATUS_COMPLETED && 355 msg->status != HSI_STATUS_ERROR); 356 hsi_free_msg(msg); 357 } 358 359 static void cs_hsi_free_data(struct cs_hsi_iface *hi) 360 { 361 cs_hsi_free_data_msg(hi->data_rx_msg); 362 cs_hsi_free_data_msg(hi->data_tx_msg); 363 } 364 365 static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi, 366 struct hsi_msg *msg, const char *info, 367 unsigned int *state) 368 { 369 spin_lock(&hi->lock); 370 dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n", 371 info, msg->status, *state); 372 } 373 374 static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi) 375 { 376 spin_unlock(&hi->lock); 377 } 378 379 static inline void __cs_hsi_error_read_bits(unsigned int *state) 380 { 381 *state |= SSI_CHANNEL_STATE_ERROR; 382 *state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL); 383 } 384 385 static inline void __cs_hsi_error_write_bits(unsigned int *state) 386 { 387 *state |= SSI_CHANNEL_STATE_ERROR; 388 *state &= ~SSI_CHANNEL_STATE_WRITING; 389 } 390 391 static void cs_hsi_control_read_error(struct cs_hsi_iface *hi, 392 struct hsi_msg *msg) 393 { 394 __cs_hsi_error_pre(hi, msg, "control read", &hi->control_state); 395 cs_release_cmd(msg); 396 __cs_hsi_error_read_bits(&hi->control_state); 397 __cs_hsi_error_post(hi); 398 } 399 400 static void cs_hsi_control_write_error(struct cs_hsi_iface *hi, 401 struct hsi_msg *msg) 402 { 403 __cs_hsi_error_pre(hi, msg, "control write", &hi->control_state); 404 cs_release_cmd(msg); 405 __cs_hsi_error_write_bits(&hi->control_state); 406 __cs_hsi_error_post(hi); 407 408 } 409 410 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg) 411 { 412 __cs_hsi_error_pre(hi, msg, "data read", &hi->data_state); 413 __cs_hsi_error_read_bits(&hi->data_state); 414 __cs_hsi_error_post(hi); 415 } 416 417 static void cs_hsi_data_write_error(struct cs_hsi_iface *hi, 418 struct hsi_msg *msg) 419 { 420 __cs_hsi_error_pre(hi, msg, "data write", &hi->data_state); 421 __cs_hsi_error_write_bits(&hi->data_state); 422 __cs_hsi_error_post(hi); 423 } 424 425 static void cs_hsi_read_on_control_complete(struct hsi_msg *msg) 426 { 427 u32 cmd = cs_get_cmd(msg); 428 struct cs_hsi_iface *hi = msg->context; 429 430 spin_lock(&hi->lock); 431 hi->control_state &= ~SSI_CHANNEL_STATE_READING; 432 if (msg->status == HSI_STATUS_ERROR) { 433 dev_err(&hi->cl->device, "Control RX error detected\n"); 434 spin_unlock(&hi->lock); 435 cs_hsi_control_read_error(hi, msg); 436 goto out; 437 } 438 dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); 439 cs_release_cmd(msg); 440 if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) { 441 struct timespec64 tspec; 442 struct cs_timestamp *tstamp = 443 &hi->mmap_cfg->tstamp_rx_ctrl; 444 445 ktime_get_ts64(&tspec); 446 447 tstamp->tv_sec = (__u32) tspec.tv_sec; 448 tstamp->tv_nsec = (__u32) tspec.tv_nsec; 449 } 450 spin_unlock(&hi->lock); 451 452 cs_notify_control(cmd); 453 454 out: 455 cs_hsi_read_on_control(hi); 456 } 457 458 static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg) 459 { 460 struct cs_hsi_iface *hi = msg->context; 461 int ret; 462 463 if (msg->status == HSI_STATUS_ERROR) { 464 dev_err(&hi->cl->device, "Control peek RX error detected\n"); 465 cs_hsi_control_read_error(hi, msg); 466 return; 467 } 468 469 WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING)); 470 471 dev_dbg(&hi->cl->device, "Peek on control complete, reading\n"); 472 msg->sgt.nents = 1; 473 msg->complete = cs_hsi_read_on_control_complete; 474 ret = hsi_async_read(hi->cl, msg); 475 if (ret) 476 cs_hsi_control_read_error(hi, msg); 477 } 478 479 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi) 480 { 481 struct hsi_msg *msg; 482 int ret; 483 484 spin_lock(&hi->lock); 485 if (hi->control_state & SSI_CHANNEL_STATE_READING) { 486 dev_err(&hi->cl->device, "Control read already pending (%d)\n", 487 hi->control_state); 488 spin_unlock(&hi->lock); 489 return; 490 } 491 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { 492 dev_err(&hi->cl->device, "Control read error (%d)\n", 493 hi->control_state); 494 spin_unlock(&hi->lock); 495 return; 496 } 497 hi->control_state |= SSI_CHANNEL_STATE_READING; 498 dev_dbg(&hi->cl->device, "Issuing RX on control\n"); 499 msg = cs_claim_cmd(hi); 500 spin_unlock(&hi->lock); 501 502 msg->sgt.nents = 0; 503 msg->complete = cs_hsi_peek_on_control_complete; 504 ret = hsi_async_read(hi->cl, msg); 505 if (ret) 506 cs_hsi_control_read_error(hi, msg); 507 } 508 509 static void cs_hsi_write_on_control_complete(struct hsi_msg *msg) 510 { 511 struct cs_hsi_iface *hi = msg->context; 512 if (msg->status == HSI_STATUS_COMPLETED) { 513 spin_lock(&hi->lock); 514 hi->control_state &= ~SSI_CHANNEL_STATE_WRITING; 515 cs_release_cmd(msg); 516 spin_unlock(&hi->lock); 517 } else if (msg->status == HSI_STATUS_ERROR) { 518 cs_hsi_control_write_error(hi, msg); 519 } else { 520 dev_err(&hi->cl->device, 521 "unexpected status in control write callback %d\n", 522 msg->status); 523 } 524 } 525 526 static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message) 527 { 528 struct hsi_msg *msg; 529 int ret; 530 531 spin_lock(&hi->lock); 532 if (hi->control_state & SSI_CHANNEL_STATE_ERROR) { 533 spin_unlock(&hi->lock); 534 return -EIO; 535 } 536 if (hi->control_state & SSI_CHANNEL_STATE_WRITING) { 537 dev_err(&hi->cl->device, 538 "Write still pending on control channel.\n"); 539 spin_unlock(&hi->lock); 540 return -EBUSY; 541 } 542 hi->control_state |= SSI_CHANNEL_STATE_WRITING; 543 msg = cs_claim_cmd(hi); 544 spin_unlock(&hi->lock); 545 546 cs_set_cmd(msg, message); 547 msg->sgt.nents = 1; 548 msg->complete = cs_hsi_write_on_control_complete; 549 dev_dbg(&hi->cl->device, 550 "Sending control message %08X\n", message); 551 ret = hsi_async_write(hi->cl, msg); 552 if (ret) { 553 dev_err(&hi->cl->device, 554 "async_write failed with %d\n", ret); 555 cs_hsi_control_write_error(hi, msg); 556 } 557 558 /* 559 * Make sure control read is always pending when issuing 560 * new control writes. This is needed as the controller 561 * may flush our messages if e.g. the peer device reboots 562 * unexpectedly (and we cannot directly resubmit a new read from 563 * the message destructor; see cs_cmd_destructor()). 564 */ 565 if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) { 566 dev_err(&hi->cl->device, "Restarting control reads\n"); 567 cs_hsi_read_on_control(hi); 568 } 569 570 return 0; 571 } 572 573 static void cs_hsi_read_on_data_complete(struct hsi_msg *msg) 574 { 575 struct cs_hsi_iface *hi = msg->context; 576 u32 payload; 577 578 if (unlikely(msg->status == HSI_STATUS_ERROR)) { 579 cs_hsi_data_read_error(hi, msg); 580 return; 581 } 582 583 spin_lock(&hi->lock); 584 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING)); 585 hi->data_state &= ~SSI_CHANNEL_STATE_READING; 586 payload = CS_RX_DATA_RECEIVED; 587 payload |= hi->rx_slot; 588 hi->rx_slot++; 589 hi->rx_slot %= hi->rx_ptr_boundary; 590 /* expose current rx ptr in mmap area */ 591 hi->mmap_cfg->rx_ptr = hi->rx_slot; 592 if (unlikely(waitqueue_active(&hi->datawait))) 593 wake_up_interruptible(&hi->datawait); 594 spin_unlock(&hi->lock); 595 596 cs_notify_data(payload, hi->rx_bufs); 597 cs_hsi_read_on_data(hi); 598 } 599 600 static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg) 601 { 602 struct cs_hsi_iface *hi = msg->context; 603 u32 *address; 604 int ret; 605 606 if (unlikely(msg->status == HSI_STATUS_ERROR)) { 607 cs_hsi_data_read_error(hi, msg); 608 return; 609 } 610 if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) { 611 dev_err(&hi->cl->device, "Data received in invalid state\n"); 612 cs_hsi_data_read_error(hi, msg); 613 return; 614 } 615 616 spin_lock(&hi->lock); 617 WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL)); 618 hi->data_state &= ~SSI_CHANNEL_STATE_POLL; 619 hi->data_state |= SSI_CHANNEL_STATE_READING; 620 spin_unlock(&hi->lock); 621 622 address = (u32 *)(hi->mmap_base + 623 hi->rx_offsets[hi->rx_slot % hi->rx_bufs]); 624 sg_init_one(msg->sgt.sgl, address, hi->buf_size); 625 msg->sgt.nents = 1; 626 msg->complete = cs_hsi_read_on_data_complete; 627 ret = hsi_async_read(hi->cl, msg); 628 if (ret) 629 cs_hsi_data_read_error(hi, msg); 630 } 631 632 /* 633 * Read/write transaction is ongoing. Returns false if in 634 * SSI_CHANNEL_STATE_POLL state. 635 */ 636 static inline int cs_state_xfer_active(unsigned int state) 637 { 638 return (state & SSI_CHANNEL_STATE_WRITING) || 639 (state & SSI_CHANNEL_STATE_READING); 640 } 641 642 /* 643 * No pending read/writes 644 */ 645 static inline int cs_state_idle(unsigned int state) 646 { 647 return !(state & ~SSI_CHANNEL_STATE_ERROR); 648 } 649 650 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi) 651 { 652 struct hsi_msg *rxmsg; 653 int ret; 654 655 spin_lock(&hi->lock); 656 if (hi->data_state & 657 (SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) { 658 dev_dbg(&hi->cl->device, "Data read already pending (%u)\n", 659 hi->data_state); 660 spin_unlock(&hi->lock); 661 return; 662 } 663 hi->data_state |= SSI_CHANNEL_STATE_POLL; 664 spin_unlock(&hi->lock); 665 666 rxmsg = hi->data_rx_msg; 667 sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0); 668 rxmsg->sgt.nents = 0; 669 rxmsg->complete = cs_hsi_peek_on_data_complete; 670 671 ret = hsi_async_read(hi->cl, rxmsg); 672 if (ret) 673 cs_hsi_data_read_error(hi, rxmsg); 674 } 675 676 static void cs_hsi_write_on_data_complete(struct hsi_msg *msg) 677 { 678 struct cs_hsi_iface *hi = msg->context; 679 680 if (msg->status == HSI_STATUS_COMPLETED) { 681 spin_lock(&hi->lock); 682 hi->data_state &= ~SSI_CHANNEL_STATE_WRITING; 683 if (unlikely(waitqueue_active(&hi->datawait))) 684 wake_up_interruptible(&hi->datawait); 685 spin_unlock(&hi->lock); 686 } else { 687 cs_hsi_data_write_error(hi, msg); 688 } 689 } 690 691 static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot) 692 { 693 u32 *address; 694 struct hsi_msg *txmsg; 695 int ret; 696 697 spin_lock(&hi->lock); 698 if (hi->iface_state != CS_STATE_CONFIGURED) { 699 dev_err(&hi->cl->device, "Not configured, aborting\n"); 700 ret = -EINVAL; 701 goto error; 702 } 703 if (hi->data_state & SSI_CHANNEL_STATE_ERROR) { 704 dev_err(&hi->cl->device, "HSI error, aborting\n"); 705 ret = -EIO; 706 goto error; 707 } 708 if (hi->data_state & SSI_CHANNEL_STATE_WRITING) { 709 dev_err(&hi->cl->device, "Write pending on data channel.\n"); 710 ret = -EBUSY; 711 goto error; 712 } 713 hi->data_state |= SSI_CHANNEL_STATE_WRITING; 714 spin_unlock(&hi->lock); 715 716 hi->tx_slot = slot; 717 address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]); 718 txmsg = hi->data_tx_msg; 719 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size); 720 txmsg->complete = cs_hsi_write_on_data_complete; 721 ret = hsi_async_write(hi->cl, txmsg); 722 if (ret) 723 cs_hsi_data_write_error(hi, txmsg); 724 725 return ret; 726 727 error: 728 spin_unlock(&hi->lock); 729 if (ret == -EIO) 730 cs_hsi_data_write_error(hi, hi->data_tx_msg); 731 732 return ret; 733 } 734 735 static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi) 736 { 737 return hi->iface_state; 738 } 739 740 static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd) 741 { 742 int ret = 0; 743 744 local_bh_disable(); 745 switch (cmd & TARGET_MASK) { 746 case TARGET_REMOTE: 747 ret = cs_hsi_write_on_control(hi, cmd); 748 break; 749 case TARGET_LOCAL: 750 if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY) 751 ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK); 752 else 753 ret = -EINVAL; 754 break; 755 default: 756 ret = -EINVAL; 757 break; 758 } 759 local_bh_enable(); 760 761 return ret; 762 } 763 764 static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state) 765 { 766 int change = 0; 767 768 spin_lock_bh(&hi->lock); 769 if (hi->wakeline_state != new_state) { 770 hi->wakeline_state = new_state; 771 change = 1; 772 dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n", 773 new_state, hi->cl); 774 } 775 spin_unlock_bh(&hi->lock); 776 777 if (change) { 778 if (new_state) 779 ssip_slave_start_tx(hi->master); 780 else 781 ssip_slave_stop_tx(hi->master); 782 } 783 784 dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n", 785 new_state, hi->cl); 786 } 787 788 static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs) 789 { 790 hi->rx_bufs = rx_bufs; 791 hi->tx_bufs = tx_bufs; 792 hi->mmap_cfg->rx_bufs = rx_bufs; 793 hi->mmap_cfg->tx_bufs = tx_bufs; 794 795 if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) { 796 /* 797 * For more robust overrun detection, let the rx 798 * pointer run in range 0..'boundary-1'. Boundary 799 * is a multiple of rx_bufs, and limited in max size 800 * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff 801 * calculation. 802 */ 803 hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT); 804 hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary; 805 } else { 806 hi->rx_ptr_boundary = hi->rx_bufs; 807 } 808 } 809 810 static int check_buf_params(struct cs_hsi_iface *hi, 811 const struct cs_buffer_config *buf_cfg) 812 { 813 size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) * 814 (buf_cfg->rx_bufs + buf_cfg->tx_bufs); 815 size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); 816 int r = 0; 817 818 if (buf_cfg->rx_bufs > CS_MAX_BUFFERS || 819 buf_cfg->tx_bufs > CS_MAX_BUFFERS) { 820 r = -EINVAL; 821 } else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) { 822 dev_err(&hi->cl->device, "No space for the requested buffer " 823 "configuration\n"); 824 r = -ENOBUFS; 825 } 826 827 return r; 828 } 829 830 /** 831 * Block until pending data transfers have completed. 832 */ 833 static int cs_hsi_data_sync(struct cs_hsi_iface *hi) 834 { 835 int r = 0; 836 837 spin_lock_bh(&hi->lock); 838 839 if (!cs_state_xfer_active(hi->data_state)) { 840 dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n"); 841 goto out; 842 } 843 844 for (;;) { 845 int s; 846 DEFINE_WAIT(wait); 847 if (!cs_state_xfer_active(hi->data_state)) 848 goto out; 849 if (signal_pending(current)) { 850 r = -ERESTARTSYS; 851 goto out; 852 } 853 /** 854 * prepare_to_wait must be called with hi->lock held 855 * so that callbacks can check for waitqueue_active() 856 */ 857 prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE); 858 spin_unlock_bh(&hi->lock); 859 s = schedule_timeout( 860 msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS)); 861 spin_lock_bh(&hi->lock); 862 finish_wait(&hi->datawait, &wait); 863 if (!s) { 864 dev_dbg(&hi->cl->device, 865 "hsi_data_sync timeout after %d ms\n", 866 CS_HSI_TRANSFER_TIMEOUT_MS); 867 r = -EIO; 868 goto out; 869 } 870 } 871 872 out: 873 spin_unlock_bh(&hi->lock); 874 dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r); 875 876 return r; 877 } 878 879 static void cs_hsi_data_enable(struct cs_hsi_iface *hi, 880 struct cs_buffer_config *buf_cfg) 881 { 882 unsigned int data_start, i; 883 884 BUG_ON(hi->buf_size == 0); 885 886 set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs); 887 888 hi->slot_size = L1_CACHE_ALIGN(hi->buf_size); 889 dev_dbg(&hi->cl->device, 890 "setting slot size to %u, buf size %u, align %u\n", 891 hi->slot_size, hi->buf_size, L1_CACHE_BYTES); 892 893 data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg)); 894 dev_dbg(&hi->cl->device, 895 "setting data start at %u, cfg block %u, align %u\n", 896 data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES); 897 898 for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) { 899 hi->rx_offsets[i] = data_start + i * hi->slot_size; 900 hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i]; 901 dev_dbg(&hi->cl->device, "DL buf #%u at %u\n", 902 i, hi->rx_offsets[i]); 903 } 904 for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) { 905 hi->tx_offsets[i] = data_start + 906 (i + hi->mmap_cfg->rx_bufs) * hi->slot_size; 907 hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i]; 908 dev_dbg(&hi->cl->device, "UL buf #%u at %u\n", 909 i, hi->rx_offsets[i]); 910 } 911 912 hi->iface_state = CS_STATE_CONFIGURED; 913 } 914 915 static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state) 916 { 917 if (old_state == CS_STATE_CONFIGURED) { 918 dev_dbg(&hi->cl->device, 919 "closing data channel with slot size 0\n"); 920 hi->iface_state = CS_STATE_OPENED; 921 } 922 } 923 924 static int cs_hsi_buf_config(struct cs_hsi_iface *hi, 925 struct cs_buffer_config *buf_cfg) 926 { 927 int r = 0; 928 unsigned int old_state = hi->iface_state; 929 930 spin_lock_bh(&hi->lock); 931 /* Prevent new transactions during buffer reconfig */ 932 if (old_state == CS_STATE_CONFIGURED) 933 hi->iface_state = CS_STATE_OPENED; 934 spin_unlock_bh(&hi->lock); 935 936 /* 937 * make sure that no non-zero data reads are ongoing before 938 * proceeding to change the buffer layout 939 */ 940 r = cs_hsi_data_sync(hi); 941 if (r < 0) 942 return r; 943 944 WARN_ON(cs_state_xfer_active(hi->data_state)); 945 946 spin_lock_bh(&hi->lock); 947 r = check_buf_params(hi, buf_cfg); 948 if (r < 0) 949 goto error; 950 951 hi->buf_size = buf_cfg->buf_size; 952 hi->mmap_cfg->buf_size = hi->buf_size; 953 hi->flags = buf_cfg->flags; 954 955 hi->rx_slot = 0; 956 hi->tx_slot = 0; 957 hi->slot_size = 0; 958 959 if (hi->buf_size) 960 cs_hsi_data_enable(hi, buf_cfg); 961 else 962 cs_hsi_data_disable(hi, old_state); 963 964 spin_unlock_bh(&hi->lock); 965 966 if (old_state != hi->iface_state) { 967 if (hi->iface_state == CS_STATE_CONFIGURED) { 968 pm_qos_add_request(&hi->pm_qos_req, 969 PM_QOS_CPU_DMA_LATENCY, 970 CS_QOS_LATENCY_FOR_DATA_USEC); 971 local_bh_disable(); 972 cs_hsi_read_on_data(hi); 973 local_bh_enable(); 974 } else if (old_state == CS_STATE_CONFIGURED) { 975 pm_qos_remove_request(&hi->pm_qos_req); 976 } 977 } 978 return r; 979 980 error: 981 spin_unlock_bh(&hi->lock); 982 return r; 983 } 984 985 static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl, 986 unsigned long mmap_base, unsigned long mmap_size) 987 { 988 int err = 0; 989 struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL); 990 991 dev_dbg(&cl->device, "cs_hsi_start\n"); 992 993 if (!hsi_if) { 994 err = -ENOMEM; 995 goto leave0; 996 } 997 spin_lock_init(&hsi_if->lock); 998 hsi_if->cl = cl; 999 hsi_if->iface_state = CS_STATE_CLOSED; 1000 hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base; 1001 hsi_if->mmap_base = mmap_base; 1002 hsi_if->mmap_size = mmap_size; 1003 memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg)); 1004 init_waitqueue_head(&hsi_if->datawait); 1005 err = cs_alloc_cmds(hsi_if); 1006 if (err < 0) { 1007 dev_err(&cl->device, "Unable to alloc HSI messages\n"); 1008 goto leave1; 1009 } 1010 err = cs_hsi_alloc_data(hsi_if); 1011 if (err < 0) { 1012 dev_err(&cl->device, "Unable to alloc HSI messages for data\n"); 1013 goto leave2; 1014 } 1015 err = hsi_claim_port(cl, 1); 1016 if (err < 0) { 1017 dev_err(&cl->device, 1018 "Could not open, HSI port already claimed\n"); 1019 goto leave3; 1020 } 1021 hsi_if->master = ssip_slave_get_master(cl); 1022 if (IS_ERR(hsi_if->master)) { 1023 err = PTR_ERR(hsi_if->master); 1024 dev_err(&cl->device, "Could not get HSI master client\n"); 1025 goto leave4; 1026 } 1027 if (!ssip_slave_running(hsi_if->master)) { 1028 err = -ENODEV; 1029 dev_err(&cl->device, 1030 "HSI port not initialized\n"); 1031 goto leave4; 1032 } 1033 1034 hsi_if->iface_state = CS_STATE_OPENED; 1035 local_bh_disable(); 1036 cs_hsi_read_on_control(hsi_if); 1037 local_bh_enable(); 1038 1039 dev_dbg(&cl->device, "cs_hsi_start...done\n"); 1040 1041 BUG_ON(!hi); 1042 *hi = hsi_if; 1043 1044 return 0; 1045 1046 leave4: 1047 hsi_release_port(cl); 1048 leave3: 1049 cs_hsi_free_data(hsi_if); 1050 leave2: 1051 cs_free_cmds(hsi_if); 1052 leave1: 1053 kfree(hsi_if); 1054 leave0: 1055 dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n"); 1056 1057 return err; 1058 } 1059 1060 static void cs_hsi_stop(struct cs_hsi_iface *hi) 1061 { 1062 dev_dbg(&hi->cl->device, "cs_hsi_stop\n"); 1063 cs_hsi_set_wakeline(hi, 0); 1064 ssip_slave_put_master(hi->master); 1065 1066 /* hsi_release_port() needs to be called with CS_STATE_CLOSED */ 1067 hi->iface_state = CS_STATE_CLOSED; 1068 hsi_release_port(hi->cl); 1069 1070 /* 1071 * hsi_release_port() should flush out all the pending 1072 * messages, so cs_state_idle() should be true for both 1073 * control and data channels. 1074 */ 1075 WARN_ON(!cs_state_idle(hi->control_state)); 1076 WARN_ON(!cs_state_idle(hi->data_state)); 1077 1078 if (pm_qos_request_active(&hi->pm_qos_req)) 1079 pm_qos_remove_request(&hi->pm_qos_req); 1080 1081 spin_lock_bh(&hi->lock); 1082 cs_hsi_free_data(hi); 1083 cs_free_cmds(hi); 1084 spin_unlock_bh(&hi->lock); 1085 kfree(hi); 1086 } 1087 1088 static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf) 1089 { 1090 struct cs_char *csdata = vmf->vma->vm_private_data; 1091 struct page *page; 1092 1093 page = virt_to_page(csdata->mmap_base); 1094 get_page(page); 1095 vmf->page = page; 1096 1097 return 0; 1098 } 1099 1100 static const struct vm_operations_struct cs_char_vm_ops = { 1101 .fault = cs_char_vma_fault, 1102 }; 1103 1104 static int cs_char_fasync(int fd, struct file *file, int on) 1105 { 1106 struct cs_char *csdata = file->private_data; 1107 1108 if (fasync_helper(fd, file, on, &csdata->async_queue) < 0) 1109 return -EIO; 1110 1111 return 0; 1112 } 1113 1114 static __poll_t cs_char_poll(struct file *file, poll_table *wait) 1115 { 1116 struct cs_char *csdata = file->private_data; 1117 __poll_t ret = 0; 1118 1119 poll_wait(file, &cs_char_data.wait, wait); 1120 spin_lock_bh(&csdata->lock); 1121 if (!list_empty(&csdata->chardev_queue)) 1122 ret = EPOLLIN | EPOLLRDNORM; 1123 else if (!list_empty(&csdata->dataind_queue)) 1124 ret = EPOLLIN | EPOLLRDNORM; 1125 spin_unlock_bh(&csdata->lock); 1126 1127 return ret; 1128 } 1129 1130 static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count, 1131 loff_t *unused) 1132 { 1133 struct cs_char *csdata = file->private_data; 1134 u32 data; 1135 ssize_t retval; 1136 1137 if (count < sizeof(data)) 1138 return -EINVAL; 1139 1140 for (;;) { 1141 DEFINE_WAIT(wait); 1142 1143 spin_lock_bh(&csdata->lock); 1144 if (!list_empty(&csdata->chardev_queue)) { 1145 data = cs_pop_entry(&csdata->chardev_queue); 1146 } else if (!list_empty(&csdata->dataind_queue)) { 1147 data = cs_pop_entry(&csdata->dataind_queue); 1148 csdata->dataind_pending--; 1149 } else { 1150 data = 0; 1151 } 1152 spin_unlock_bh(&csdata->lock); 1153 1154 if (data) 1155 break; 1156 if (file->f_flags & O_NONBLOCK) { 1157 retval = -EAGAIN; 1158 goto out; 1159 } else if (signal_pending(current)) { 1160 retval = -ERESTARTSYS; 1161 goto out; 1162 } 1163 prepare_to_wait_exclusive(&csdata->wait, &wait, 1164 TASK_INTERRUPTIBLE); 1165 schedule(); 1166 finish_wait(&csdata->wait, &wait); 1167 } 1168 1169 retval = put_user(data, (u32 __user *)buf); 1170 if (!retval) 1171 retval = sizeof(data); 1172 1173 out: 1174 return retval; 1175 } 1176 1177 static ssize_t cs_char_write(struct file *file, const char __user *buf, 1178 size_t count, loff_t *unused) 1179 { 1180 struct cs_char *csdata = file->private_data; 1181 u32 data; 1182 int err; 1183 ssize_t retval; 1184 1185 if (count < sizeof(data)) 1186 return -EINVAL; 1187 1188 if (get_user(data, (u32 __user *)buf)) 1189 retval = -EFAULT; 1190 else 1191 retval = count; 1192 1193 err = cs_hsi_command(csdata->hi, data); 1194 if (err < 0) 1195 retval = err; 1196 1197 return retval; 1198 } 1199 1200 static long cs_char_ioctl(struct file *file, unsigned int cmd, 1201 unsigned long arg) 1202 { 1203 struct cs_char *csdata = file->private_data; 1204 int r = 0; 1205 1206 switch (cmd) { 1207 case CS_GET_STATE: { 1208 unsigned int state; 1209 1210 state = cs_hsi_get_state(csdata->hi); 1211 if (copy_to_user((void __user *)arg, &state, sizeof(state))) 1212 r = -EFAULT; 1213 1214 break; 1215 } 1216 case CS_SET_WAKELINE: { 1217 unsigned int state; 1218 1219 if (copy_from_user(&state, (void __user *)arg, sizeof(state))) { 1220 r = -EFAULT; 1221 break; 1222 } 1223 1224 if (state > 1) { 1225 r = -EINVAL; 1226 break; 1227 } 1228 1229 cs_hsi_set_wakeline(csdata->hi, !!state); 1230 1231 break; 1232 } 1233 case CS_GET_IF_VERSION: { 1234 unsigned int ifver = CS_IF_VERSION; 1235 1236 if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver))) 1237 r = -EFAULT; 1238 1239 break; 1240 } 1241 case CS_CONFIG_BUFS: { 1242 struct cs_buffer_config buf_cfg; 1243 1244 if (copy_from_user(&buf_cfg, (void __user *)arg, 1245 sizeof(buf_cfg))) 1246 r = -EFAULT; 1247 else 1248 r = cs_hsi_buf_config(csdata->hi, &buf_cfg); 1249 1250 break; 1251 } 1252 default: 1253 r = -ENOTTY; 1254 break; 1255 } 1256 1257 return r; 1258 } 1259 1260 static int cs_char_mmap(struct file *file, struct vm_area_struct *vma) 1261 { 1262 if (vma->vm_end < vma->vm_start) 1263 return -EINVAL; 1264 1265 if (vma_pages(vma) != 1) 1266 return -EINVAL; 1267 1268 vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND; 1269 vma->vm_ops = &cs_char_vm_ops; 1270 vma->vm_private_data = file->private_data; 1271 1272 return 0; 1273 } 1274 1275 static int cs_char_open(struct inode *unused, struct file *file) 1276 { 1277 int ret = 0; 1278 unsigned long p; 1279 1280 spin_lock_bh(&cs_char_data.lock); 1281 if (cs_char_data.opened) { 1282 ret = -EBUSY; 1283 spin_unlock_bh(&cs_char_data.lock); 1284 goto out1; 1285 } 1286 cs_char_data.opened = 1; 1287 cs_char_data.dataind_pending = 0; 1288 spin_unlock_bh(&cs_char_data.lock); 1289 1290 p = get_zeroed_page(GFP_KERNEL); 1291 if (!p) { 1292 ret = -ENOMEM; 1293 goto out2; 1294 } 1295 1296 ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE); 1297 if (ret) { 1298 dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n"); 1299 goto out3; 1300 } 1301 1302 /* these are only used in release so lock not needed */ 1303 cs_char_data.mmap_base = p; 1304 cs_char_data.mmap_size = CS_MMAP_SIZE; 1305 1306 file->private_data = &cs_char_data; 1307 1308 return 0; 1309 1310 out3: 1311 free_page(p); 1312 out2: 1313 spin_lock_bh(&cs_char_data.lock); 1314 cs_char_data.opened = 0; 1315 spin_unlock_bh(&cs_char_data.lock); 1316 out1: 1317 return ret; 1318 } 1319 1320 static void cs_free_char_queue(struct list_head *head) 1321 { 1322 struct char_queue *entry; 1323 struct list_head *cursor, *next; 1324 1325 if (!list_empty(head)) { 1326 list_for_each_safe(cursor, next, head) { 1327 entry = list_entry(cursor, struct char_queue, list); 1328 list_del(&entry->list); 1329 kfree(entry); 1330 } 1331 } 1332 1333 } 1334 1335 static int cs_char_release(struct inode *unused, struct file *file) 1336 { 1337 struct cs_char *csdata = file->private_data; 1338 1339 cs_hsi_stop(csdata->hi); 1340 spin_lock_bh(&csdata->lock); 1341 csdata->hi = NULL; 1342 free_page(csdata->mmap_base); 1343 cs_free_char_queue(&csdata->chardev_queue); 1344 cs_free_char_queue(&csdata->dataind_queue); 1345 csdata->opened = 0; 1346 spin_unlock_bh(&csdata->lock); 1347 1348 return 0; 1349 } 1350 1351 static const struct file_operations cs_char_fops = { 1352 .owner = THIS_MODULE, 1353 .read = cs_char_read, 1354 .write = cs_char_write, 1355 .poll = cs_char_poll, 1356 .unlocked_ioctl = cs_char_ioctl, 1357 .mmap = cs_char_mmap, 1358 .open = cs_char_open, 1359 .release = cs_char_release, 1360 .fasync = cs_char_fasync, 1361 }; 1362 1363 static struct miscdevice cs_char_miscdev = { 1364 .minor = MISC_DYNAMIC_MINOR, 1365 .name = "cmt_speech", 1366 .fops = &cs_char_fops 1367 }; 1368 1369 static int cs_hsi_client_probe(struct device *dev) 1370 { 1371 int err = 0; 1372 struct hsi_client *cl = to_hsi_client(dev); 1373 1374 dev_dbg(dev, "hsi_client_probe\n"); 1375 init_waitqueue_head(&cs_char_data.wait); 1376 spin_lock_init(&cs_char_data.lock); 1377 cs_char_data.opened = 0; 1378 cs_char_data.cl = cl; 1379 cs_char_data.hi = NULL; 1380 INIT_LIST_HEAD(&cs_char_data.chardev_queue); 1381 INIT_LIST_HEAD(&cs_char_data.dataind_queue); 1382 1383 cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl, 1384 "speech-control"); 1385 if (cs_char_data.channel_id_cmd < 0) { 1386 err = cs_char_data.channel_id_cmd; 1387 dev_err(dev, "Could not get cmd channel (%d)\n", err); 1388 return err; 1389 } 1390 1391 cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl, 1392 "speech-data"); 1393 if (cs_char_data.channel_id_data < 0) { 1394 err = cs_char_data.channel_id_data; 1395 dev_err(dev, "Could not get data channel (%d)\n", err); 1396 return err; 1397 } 1398 1399 err = misc_register(&cs_char_miscdev); 1400 if (err) 1401 dev_err(dev, "Failed to register: %d\n", err); 1402 1403 return err; 1404 } 1405 1406 static int cs_hsi_client_remove(struct device *dev) 1407 { 1408 struct cs_hsi_iface *hi; 1409 1410 dev_dbg(dev, "hsi_client_remove\n"); 1411 misc_deregister(&cs_char_miscdev); 1412 spin_lock_bh(&cs_char_data.lock); 1413 hi = cs_char_data.hi; 1414 cs_char_data.hi = NULL; 1415 spin_unlock_bh(&cs_char_data.lock); 1416 if (hi) 1417 cs_hsi_stop(hi); 1418 1419 return 0; 1420 } 1421 1422 static struct hsi_client_driver cs_hsi_driver = { 1423 .driver = { 1424 .name = "cmt-speech", 1425 .owner = THIS_MODULE, 1426 .probe = cs_hsi_client_probe, 1427 .remove = cs_hsi_client_remove, 1428 }, 1429 }; 1430 1431 static int __init cs_char_init(void) 1432 { 1433 pr_info("CMT speech driver added\n"); 1434 return hsi_register_client_driver(&cs_hsi_driver); 1435 } 1436 module_init(cs_char_init); 1437 1438 static void __exit cs_char_exit(void) 1439 { 1440 hsi_unregister_client_driver(&cs_hsi_driver); 1441 pr_info("CMT speech driver removed\n"); 1442 } 1443 module_exit(cs_char_exit); 1444 1445 MODULE_ALIAS("hsi:cmt-speech"); 1446 MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>"); 1447 MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>"); 1448 MODULE_DESCRIPTION("CMT speech driver"); 1449 MODULE_LICENSE("GPL v2"); 1450