1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright 2017-2021 NXP 3 4 #include <linux/dma-mapping.h> 5 #include <linux/slab.h> 6 #include <linux/module.h> 7 #include <linux/delay.h> 8 #include <linux/rpmsg.h> 9 #include <sound/core.h> 10 #include <sound/pcm.h> 11 #include <sound/pcm_params.h> 12 #include <sound/dmaengine_pcm.h> 13 #include <sound/soc.h> 14 15 #include "imx-pcm.h" 16 #include "fsl_rpmsg.h" 17 #include "imx-pcm-rpmsg.h" 18 19 static struct snd_pcm_hardware imx_rpmsg_pcm_hardware = { 20 .info = SNDRV_PCM_INFO_INTERLEAVED | 21 SNDRV_PCM_INFO_BLOCK_TRANSFER | 22 SNDRV_PCM_INFO_MMAP | 23 SNDRV_PCM_INFO_MMAP_VALID | 24 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP | 25 SNDRV_PCM_INFO_PAUSE | 26 SNDRV_PCM_INFO_RESUME, 27 .buffer_bytes_max = IMX_DEFAULT_DMABUF_SIZE, 28 .period_bytes_min = 512, 29 .period_bytes_max = 65536, 30 .periods_min = 2, 31 .periods_max = 6000, 32 .fifo_size = 0, 33 }; 34 35 static int imx_rpmsg_pcm_send_message(struct rpmsg_msg *msg, 36 struct rpmsg_info *info) 37 { 38 struct rpmsg_device *rpdev = info->rpdev; 39 int ret = 0; 40 41 mutex_lock(&info->msg_lock); 42 if (!rpdev) { 43 dev_err(info->dev, "rpmsg channel not ready\n"); 44 mutex_unlock(&info->msg_lock); 45 return -EINVAL; 46 } 47 48 dev_dbg(&rpdev->dev, "send cmd %d\n", msg->s_msg.header.cmd); 49 50 if (!(msg->s_msg.header.type == MSG_TYPE_C)) 51 reinit_completion(&info->cmd_complete); 52 53 ret = rpmsg_send(rpdev->ept, (void *)&msg->s_msg, 54 sizeof(struct rpmsg_s_msg)); 55 if (ret) { 56 dev_err(&rpdev->dev, "rpmsg_send failed: %d\n", ret); 57 mutex_unlock(&info->msg_lock); 58 return ret; 59 } 60 61 /* No receive msg for TYPE_C command */ 62 if (msg->s_msg.header.type == MSG_TYPE_C) { 63 mutex_unlock(&info->msg_lock); 64 return 0; 65 } 66 67 /* wait response from rpmsg */ 68 ret = wait_for_completion_timeout(&info->cmd_complete, 69 msecs_to_jiffies(RPMSG_TIMEOUT)); 70 if (!ret) { 71 dev_err(&rpdev->dev, "rpmsg_send cmd %d timeout!\n", 72 msg->s_msg.header.cmd); 73 mutex_unlock(&info->msg_lock); 74 return -ETIMEDOUT; 75 } 76 77 memcpy(&msg->r_msg, &info->r_msg, sizeof(struct rpmsg_r_msg)); 78 memcpy(&info->msg[msg->r_msg.header.cmd].r_msg, 79 &msg->r_msg, sizeof(struct rpmsg_r_msg)); 80 81 /* 82 * Reset the buffer pointer to be zero, actully we have 83 * set the buffer pointer to be zero in imx_rpmsg_terminate_all 84 * But if there is timer task queued in queue, after it is 85 * executed the buffer pointer will be changed, so need to 86 * reset it again with TERMINATE command. 87 */ 88 switch (msg->s_msg.header.cmd) { 89 case TX_TERMINATE: 90 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0; 91 break; 92 case RX_TERMINATE: 93 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 94 break; 95 default: 96 break; 97 } 98 99 dev_dbg(&rpdev->dev, "cmd:%d, resp %d\n", msg->s_msg.header.cmd, 100 info->r_msg.param.resp); 101 102 mutex_unlock(&info->msg_lock); 103 104 return 0; 105 } 106 107 static int imx_rpmsg_insert_workqueue(struct snd_pcm_substream *substream, 108 struct rpmsg_msg *msg, 109 struct rpmsg_info *info) 110 { 111 unsigned long flags; 112 int ret = 0; 113 114 /* 115 * Queue the work to workqueue. 116 * If the queue is full, drop the message. 117 */ 118 spin_lock_irqsave(&info->wq_lock, flags); 119 if (info->work_write_index != info->work_read_index) { 120 int index = info->work_write_index; 121 122 memcpy(&info->work_list[index].msg, msg, 123 sizeof(struct rpmsg_s_msg)); 124 125 queue_work(info->rpmsg_wq, &info->work_list[index].work); 126 info->work_write_index++; 127 info->work_write_index %= WORK_MAX_NUM; 128 } else { 129 info->msg_drop_count[substream->stream]++; 130 ret = -EPIPE; 131 } 132 spin_unlock_irqrestore(&info->wq_lock, flags); 133 134 return ret; 135 } 136 137 static int imx_rpmsg_pcm_hw_params(struct snd_soc_component *component, 138 struct snd_pcm_substream *substream, 139 struct snd_pcm_hw_params *params) 140 { 141 struct rpmsg_info *info = dev_get_drvdata(component->dev); 142 struct rpmsg_msg *msg; 143 int ret = 0; 144 145 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 146 msg = &info->msg[TX_HW_PARAM]; 147 msg->s_msg.header.cmd = TX_HW_PARAM; 148 } else { 149 msg = &info->msg[RX_HW_PARAM]; 150 msg->s_msg.header.cmd = RX_HW_PARAM; 151 } 152 153 msg->s_msg.param.rate = params_rate(params); 154 155 switch (params_format(params)) { 156 case SNDRV_PCM_FORMAT_S16_LE: 157 msg->s_msg.param.format = RPMSG_S16_LE; 158 break; 159 case SNDRV_PCM_FORMAT_S24_LE: 160 msg->s_msg.param.format = RPMSG_S24_LE; 161 break; 162 case SNDRV_PCM_FORMAT_DSD_U16_LE: 163 msg->s_msg.param.format = RPMSG_DSD_U16_LE; 164 break; 165 case SNDRV_PCM_FORMAT_DSD_U32_LE: 166 msg->s_msg.param.format = RPMSG_DSD_U32_LE; 167 break; 168 default: 169 msg->s_msg.param.format = RPMSG_S32_LE; 170 break; 171 } 172 173 switch (params_channels(params)) { 174 case 1: 175 msg->s_msg.param.channels = RPMSG_CH_LEFT; 176 break; 177 case 2: 178 msg->s_msg.param.channels = RPMSG_CH_STEREO; 179 break; 180 default: 181 msg->s_msg.param.channels = params_channels(params); 182 break; 183 } 184 185 info->send_message(msg, info); 186 187 return ret; 188 } 189 190 static snd_pcm_uframes_t imx_rpmsg_pcm_pointer(struct snd_soc_component *component, 191 struct snd_pcm_substream *substream) 192 { 193 struct rpmsg_info *info = dev_get_drvdata(component->dev); 194 struct rpmsg_msg *msg; 195 unsigned int pos = 0; 196 int buffer_tail = 0; 197 198 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 199 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM]; 200 else 201 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM]; 202 203 buffer_tail = msg->r_msg.param.buffer_tail; 204 pos = buffer_tail * snd_pcm_lib_period_bytes(substream); 205 206 return bytes_to_frames(substream->runtime, pos); 207 } 208 209 static void imx_rpmsg_timer_callback(struct timer_list *t) 210 { 211 struct stream_timer *stream_timer = 212 from_timer(stream_timer, t, timer); 213 struct snd_pcm_substream *substream = stream_timer->substream; 214 struct rpmsg_info *info = stream_timer->info; 215 struct rpmsg_msg *msg; 216 217 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 218 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM]; 219 msg->s_msg.header.cmd = TX_PERIOD_DONE; 220 } else { 221 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM]; 222 msg->s_msg.header.cmd = RX_PERIOD_DONE; 223 } 224 225 imx_rpmsg_insert_workqueue(substream, msg, info); 226 } 227 228 static int imx_rpmsg_pcm_open(struct snd_soc_component *component, 229 struct snd_pcm_substream *substream) 230 { 231 struct rpmsg_info *info = dev_get_drvdata(component->dev); 232 struct rpmsg_msg *msg; 233 int ret = 0; 234 int cmd; 235 236 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 237 msg = &info->msg[TX_OPEN]; 238 msg->s_msg.header.cmd = TX_OPEN; 239 240 /* reinitialize buffer counter*/ 241 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM; 242 info->msg[cmd].s_msg.param.buffer_tail = 0; 243 info->msg[cmd].r_msg.param.buffer_tail = 0; 244 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0; 245 246 } else { 247 msg = &info->msg[RX_OPEN]; 248 msg->s_msg.header.cmd = RX_OPEN; 249 250 /* reinitialize buffer counter*/ 251 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM; 252 info->msg[cmd].s_msg.param.buffer_tail = 0; 253 info->msg[cmd].r_msg.param.buffer_tail = 0; 254 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 255 } 256 257 info->send_message(msg, info); 258 259 imx_rpmsg_pcm_hardware.period_bytes_max = 260 imx_rpmsg_pcm_hardware.buffer_bytes_max / 2; 261 262 snd_soc_set_runtime_hwparams(substream, &imx_rpmsg_pcm_hardware); 263 264 ret = snd_pcm_hw_constraint_integer(substream->runtime, 265 SNDRV_PCM_HW_PARAM_PERIODS); 266 if (ret < 0) 267 return ret; 268 269 info->msg_drop_count[substream->stream] = 0; 270 271 /* Create timer*/ 272 info->stream_timer[substream->stream].info = info; 273 info->stream_timer[substream->stream].substream = substream; 274 timer_setup(&info->stream_timer[substream->stream].timer, 275 imx_rpmsg_timer_callback, 0); 276 return ret; 277 } 278 279 static int imx_rpmsg_pcm_close(struct snd_soc_component *component, 280 struct snd_pcm_substream *substream) 281 { 282 struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream); 283 struct rpmsg_info *info = dev_get_drvdata(component->dev); 284 struct rpmsg_msg *msg; 285 int ret = 0; 286 287 /* Flush work in workqueue to make TX_CLOSE is the last message */ 288 flush_workqueue(info->rpmsg_wq); 289 290 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 291 msg = &info->msg[TX_CLOSE]; 292 msg->s_msg.header.cmd = TX_CLOSE; 293 } else { 294 msg = &info->msg[RX_CLOSE]; 295 msg->s_msg.header.cmd = RX_CLOSE; 296 } 297 298 info->send_message(msg, info); 299 300 del_timer(&info->stream_timer[substream->stream].timer); 301 302 rtd->dai_link->ignore_suspend = 0; 303 304 if (info->msg_drop_count[substream->stream]) 305 dev_warn(rtd->dev, "Msg is dropped!, number is %d\n", 306 info->msg_drop_count[substream->stream]); 307 308 return ret; 309 } 310 311 static int imx_rpmsg_pcm_prepare(struct snd_soc_component *component, 312 struct snd_pcm_substream *substream) 313 { 314 struct snd_pcm_runtime *runtime = substream->runtime; 315 struct snd_soc_pcm_runtime *rtd = substream->private_data; 316 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 317 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 318 319 /* 320 * NON-MMAP mode, NONBLOCK, Version 2, enable lpa in dts 321 * four conditions to determine the lpa is enabled. 322 */ 323 if ((runtime->access == SNDRV_PCM_ACCESS_RW_INTERLEAVED || 324 runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) && 325 rpmsg->enable_lpa) { 326 /* 327 * Ignore suspend operation in low power mode 328 * M core will continue playback music on A core suspend. 329 */ 330 rtd->dai_link->ignore_suspend = 1; 331 rpmsg->force_lpa = 1; 332 } else { 333 rpmsg->force_lpa = 0; 334 } 335 336 return 0; 337 } 338 339 static void imx_rpmsg_pcm_dma_complete(void *arg) 340 { 341 struct snd_pcm_substream *substream = arg; 342 343 snd_pcm_period_elapsed(substream); 344 } 345 346 static int imx_rpmsg_prepare_and_submit(struct snd_soc_component *component, 347 struct snd_pcm_substream *substream) 348 { 349 struct rpmsg_info *info = dev_get_drvdata(component->dev); 350 struct rpmsg_msg *msg; 351 352 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 353 msg = &info->msg[TX_BUFFER]; 354 msg->s_msg.header.cmd = TX_BUFFER; 355 } else { 356 msg = &info->msg[RX_BUFFER]; 357 msg->s_msg.header.cmd = RX_BUFFER; 358 } 359 360 /* Send buffer address and buffer size */ 361 msg->s_msg.param.buffer_addr = substream->runtime->dma_addr; 362 msg->s_msg.param.buffer_size = snd_pcm_lib_buffer_bytes(substream); 363 msg->s_msg.param.period_size = snd_pcm_lib_period_bytes(substream); 364 msg->s_msg.param.buffer_tail = 0; 365 366 info->num_period[substream->stream] = msg->s_msg.param.buffer_size / 367 msg->s_msg.param.period_size; 368 369 info->callback[substream->stream] = imx_rpmsg_pcm_dma_complete; 370 info->callback_param[substream->stream] = substream; 371 372 return imx_rpmsg_insert_workqueue(substream, msg, info); 373 } 374 375 static int imx_rpmsg_async_issue_pending(struct snd_soc_component *component, 376 struct snd_pcm_substream *substream) 377 { 378 struct rpmsg_info *info = dev_get_drvdata(component->dev); 379 struct rpmsg_msg *msg; 380 381 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 382 msg = &info->msg[TX_START]; 383 msg->s_msg.header.cmd = TX_START; 384 } else { 385 msg = &info->msg[RX_START]; 386 msg->s_msg.header.cmd = RX_START; 387 } 388 389 return imx_rpmsg_insert_workqueue(substream, msg, info); 390 } 391 392 static int imx_rpmsg_restart(struct snd_soc_component *component, 393 struct snd_pcm_substream *substream) 394 { 395 struct rpmsg_info *info = dev_get_drvdata(component->dev); 396 struct rpmsg_msg *msg; 397 398 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 399 msg = &info->msg[TX_RESTART]; 400 msg->s_msg.header.cmd = TX_RESTART; 401 } else { 402 msg = &info->msg[RX_RESTART]; 403 msg->s_msg.header.cmd = RX_RESTART; 404 } 405 406 return imx_rpmsg_insert_workqueue(substream, msg, info); 407 } 408 409 static int imx_rpmsg_pause(struct snd_soc_component *component, 410 struct snd_pcm_substream *substream) 411 { 412 struct rpmsg_info *info = dev_get_drvdata(component->dev); 413 struct rpmsg_msg *msg; 414 415 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 416 msg = &info->msg[TX_PAUSE]; 417 msg->s_msg.header.cmd = TX_PAUSE; 418 } else { 419 msg = &info->msg[RX_PAUSE]; 420 msg->s_msg.header.cmd = RX_PAUSE; 421 } 422 423 return imx_rpmsg_insert_workqueue(substream, msg, info); 424 } 425 426 static int imx_rpmsg_terminate_all(struct snd_soc_component *component, 427 struct snd_pcm_substream *substream) 428 { 429 struct rpmsg_info *info = dev_get_drvdata(component->dev); 430 struct rpmsg_msg *msg; 431 int cmd; 432 433 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 434 msg = &info->msg[TX_TERMINATE]; 435 msg->s_msg.header.cmd = TX_TERMINATE; 436 /* Clear buffer count*/ 437 cmd = TX_PERIOD_DONE + MSG_TYPE_A_NUM; 438 info->msg[cmd].s_msg.param.buffer_tail = 0; 439 info->msg[cmd].r_msg.param.buffer_tail = 0; 440 info->msg[TX_POINTER].r_msg.param.buffer_offset = 0; 441 } else { 442 msg = &info->msg[RX_TERMINATE]; 443 msg->s_msg.header.cmd = RX_TERMINATE; 444 /* Clear buffer count*/ 445 cmd = RX_PERIOD_DONE + MSG_TYPE_A_NUM; 446 info->msg[cmd].s_msg.param.buffer_tail = 0; 447 info->msg[cmd].r_msg.param.buffer_tail = 0; 448 info->msg[RX_POINTER].r_msg.param.buffer_offset = 0; 449 } 450 451 del_timer(&info->stream_timer[substream->stream].timer); 452 453 return imx_rpmsg_insert_workqueue(substream, msg, info); 454 } 455 456 static int imx_rpmsg_pcm_trigger(struct snd_soc_component *component, 457 struct snd_pcm_substream *substream, int cmd) 458 { 459 struct snd_pcm_runtime *runtime = substream->runtime; 460 struct snd_soc_pcm_runtime *rtd = substream->private_data; 461 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 462 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 463 int ret = 0; 464 465 switch (cmd) { 466 case SNDRV_PCM_TRIGGER_START: 467 ret = imx_rpmsg_prepare_and_submit(component, substream); 468 if (ret) 469 return ret; 470 ret = imx_rpmsg_async_issue_pending(component, substream); 471 break; 472 case SNDRV_PCM_TRIGGER_RESUME: 473 if (rpmsg->force_lpa) 474 break; 475 fallthrough; 476 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 477 ret = imx_rpmsg_restart(component, substream); 478 break; 479 case SNDRV_PCM_TRIGGER_SUSPEND: 480 if (!rpmsg->force_lpa) { 481 if (runtime->info & SNDRV_PCM_INFO_PAUSE) 482 ret = imx_rpmsg_pause(component, substream); 483 else 484 ret = imx_rpmsg_terminate_all(component, substream); 485 } 486 break; 487 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 488 ret = imx_rpmsg_pause(component, substream); 489 break; 490 case SNDRV_PCM_TRIGGER_STOP: 491 ret = imx_rpmsg_terminate_all(component, substream); 492 break; 493 default: 494 return -EINVAL; 495 } 496 497 if (ret) 498 return ret; 499 500 return 0; 501 } 502 503 /* 504 * imx_rpmsg_pcm_ack 505 * 506 * Send the period index to M core through rpmsg, but not send 507 * all the period index to M core, reduce some unnessesary msg 508 * to reduce the pressure of rpmsg bandwidth. 509 */ 510 static int imx_rpmsg_pcm_ack(struct snd_soc_component *component, 511 struct snd_pcm_substream *substream) 512 { 513 struct snd_pcm_runtime *runtime = substream->runtime; 514 struct snd_soc_pcm_runtime *rtd = substream->private_data; 515 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 516 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 517 struct rpmsg_info *info = dev_get_drvdata(component->dev); 518 snd_pcm_uframes_t period_size = runtime->period_size; 519 snd_pcm_sframes_t avail; 520 struct timer_list *timer; 521 struct rpmsg_msg *msg; 522 unsigned long flags; 523 int buffer_tail = 0; 524 int written_num; 525 526 if (!rpmsg->force_lpa) 527 return 0; 528 529 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 530 msg = &info->msg[TX_PERIOD_DONE + MSG_TYPE_A_NUM]; 531 msg->s_msg.header.cmd = TX_PERIOD_DONE; 532 } else { 533 msg = &info->msg[RX_PERIOD_DONE + MSG_TYPE_A_NUM]; 534 msg->s_msg.header.cmd = RX_PERIOD_DONE; 535 } 536 537 msg->s_msg.header.type = MSG_TYPE_C; 538 539 buffer_tail = (frames_to_bytes(runtime, runtime->control->appl_ptr) % 540 snd_pcm_lib_buffer_bytes(substream)); 541 buffer_tail = buffer_tail / snd_pcm_lib_period_bytes(substream); 542 543 /* There is update for period index */ 544 if (buffer_tail != msg->s_msg.param.buffer_tail) { 545 written_num = buffer_tail - msg->s_msg.param.buffer_tail; 546 if (written_num < 0) 547 written_num += runtime->periods; 548 549 msg->s_msg.param.buffer_tail = buffer_tail; 550 551 /* The notification message is updated to latest */ 552 spin_lock_irqsave(&info->lock[substream->stream], flags); 553 memcpy(&info->notify[substream->stream], msg, 554 sizeof(struct rpmsg_s_msg)); 555 info->notify_updated[substream->stream] = true; 556 spin_unlock_irqrestore(&info->lock[substream->stream], flags); 557 558 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 559 avail = snd_pcm_playback_hw_avail(runtime); 560 else 561 avail = snd_pcm_capture_hw_avail(runtime); 562 563 timer = &info->stream_timer[substream->stream].timer; 564 /* 565 * If the data in the buffer is less than one period before 566 * this fill, which means the data may not enough on M 567 * core side, we need to send message immediately to let 568 * M core know the pointer is updated. 569 * if there is more than one period data in the buffer before 570 * this fill, which means the data is enough on M core side, 571 * we can delay one period (using timer) to send the message 572 * for reduce the message number in workqueue, because the 573 * pointer may be updated by ack function later, we can 574 * send latest pointer to M core side. 575 */ 576 if ((avail - written_num * period_size) <= period_size) { 577 imx_rpmsg_insert_workqueue(substream, msg, info); 578 } else if (rpmsg->force_lpa && !timer_pending(timer)) { 579 int time_msec; 580 581 time_msec = (int)(runtime->period_size * 1000 / runtime->rate); 582 mod_timer(timer, jiffies + msecs_to_jiffies(time_msec)); 583 } 584 } 585 586 return 0; 587 } 588 589 static int imx_rpmsg_pcm_new(struct snd_soc_component *component, 590 struct snd_soc_pcm_runtime *rtd) 591 { 592 struct snd_card *card = rtd->card->snd_card; 593 struct snd_pcm *pcm = rtd->pcm; 594 struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0); 595 struct fsl_rpmsg *rpmsg = dev_get_drvdata(cpu_dai->dev); 596 int ret; 597 598 ret = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32)); 599 if (ret) 600 return ret; 601 602 imx_rpmsg_pcm_hardware.buffer_bytes_max = rpmsg->buffer_size; 603 return snd_pcm_set_fixed_buffer_all(pcm, SNDRV_DMA_TYPE_DEV_WC, 604 pcm->card->dev, rpmsg->buffer_size); 605 } 606 607 static const struct snd_soc_component_driver imx_rpmsg_soc_component = { 608 .name = IMX_PCM_DRV_NAME, 609 .pcm_construct = imx_rpmsg_pcm_new, 610 .open = imx_rpmsg_pcm_open, 611 .close = imx_rpmsg_pcm_close, 612 .hw_params = imx_rpmsg_pcm_hw_params, 613 .trigger = imx_rpmsg_pcm_trigger, 614 .pointer = imx_rpmsg_pcm_pointer, 615 .ack = imx_rpmsg_pcm_ack, 616 .prepare = imx_rpmsg_pcm_prepare, 617 }; 618 619 static void imx_rpmsg_pcm_work(struct work_struct *work) 620 { 621 struct work_of_rpmsg *work_of_rpmsg; 622 bool is_notification = false; 623 struct rpmsg_info *info; 624 struct rpmsg_msg msg; 625 unsigned long flags; 626 627 work_of_rpmsg = container_of(work, struct work_of_rpmsg, work); 628 info = work_of_rpmsg->info; 629 630 /* 631 * Every work in the work queue, first we check if there 632 * is update for period is filled, because there may be not 633 * enough data in M core side, need to let M core know 634 * data is updated immediately. 635 */ 636 spin_lock_irqsave(&info->lock[TX], flags); 637 if (info->notify_updated[TX]) { 638 memcpy(&msg, &info->notify[TX], sizeof(struct rpmsg_s_msg)); 639 info->notify_updated[TX] = false; 640 spin_unlock_irqrestore(&info->lock[TX], flags); 641 info->send_message(&msg, info); 642 } else { 643 spin_unlock_irqrestore(&info->lock[TX], flags); 644 } 645 646 spin_lock_irqsave(&info->lock[RX], flags); 647 if (info->notify_updated[RX]) { 648 memcpy(&msg, &info->notify[RX], sizeof(struct rpmsg_s_msg)); 649 info->notify_updated[RX] = false; 650 spin_unlock_irqrestore(&info->lock[RX], flags); 651 info->send_message(&msg, info); 652 } else { 653 spin_unlock_irqrestore(&info->lock[RX], flags); 654 } 655 656 /* Skip the notification message for it has been processed above */ 657 if (work_of_rpmsg->msg.s_msg.header.type == MSG_TYPE_C && 658 (work_of_rpmsg->msg.s_msg.header.cmd == TX_PERIOD_DONE || 659 work_of_rpmsg->msg.s_msg.header.cmd == RX_PERIOD_DONE)) 660 is_notification = true; 661 662 if (!is_notification) 663 info->send_message(&work_of_rpmsg->msg, info); 664 665 /* update read index */ 666 spin_lock_irqsave(&info->wq_lock, flags); 667 info->work_read_index++; 668 info->work_read_index %= WORK_MAX_NUM; 669 spin_unlock_irqrestore(&info->wq_lock, flags); 670 } 671 672 static int imx_rpmsg_pcm_probe(struct platform_device *pdev) 673 { 674 struct snd_soc_component *component; 675 struct rpmsg_info *info; 676 int ret, i; 677 678 info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); 679 if (!info) 680 return -ENOMEM; 681 682 platform_set_drvdata(pdev, info); 683 684 info->rpdev = container_of(pdev->dev.parent, struct rpmsg_device, dev); 685 info->dev = &pdev->dev; 686 /* Setup work queue */ 687 info->rpmsg_wq = alloc_ordered_workqueue(info->rpdev->id.name, 688 WQ_HIGHPRI | 689 WQ_UNBOUND | 690 WQ_FREEZABLE); 691 if (!info->rpmsg_wq) { 692 dev_err(&pdev->dev, "workqueue create failed\n"); 693 return -ENOMEM; 694 } 695 696 /* Write index initialize 1, make it differ with the read index */ 697 info->work_write_index = 1; 698 info->send_message = imx_rpmsg_pcm_send_message; 699 700 for (i = 0; i < WORK_MAX_NUM; i++) { 701 INIT_WORK(&info->work_list[i].work, imx_rpmsg_pcm_work); 702 info->work_list[i].info = info; 703 } 704 705 /* Initialize msg */ 706 for (i = 0; i < MSG_MAX_NUM; i++) { 707 info->msg[i].s_msg.header.cate = IMX_RPMSG_AUDIO; 708 info->msg[i].s_msg.header.major = IMX_RMPSG_MAJOR; 709 info->msg[i].s_msg.header.minor = IMX_RMPSG_MINOR; 710 info->msg[i].s_msg.header.type = MSG_TYPE_A; 711 info->msg[i].s_msg.param.audioindex = 0; 712 } 713 714 init_completion(&info->cmd_complete); 715 mutex_init(&info->msg_lock); 716 spin_lock_init(&info->lock[TX]); 717 spin_lock_init(&info->lock[RX]); 718 spin_lock_init(&info->wq_lock); 719 720 ret = devm_snd_soc_register_component(&pdev->dev, 721 &imx_rpmsg_soc_component, 722 NULL, 0); 723 if (ret) 724 goto fail; 725 726 component = snd_soc_lookup_component(&pdev->dev, NULL); 727 if (!component) { 728 ret = -EINVAL; 729 goto fail; 730 } 731 732 /* platform component name is used by machine driver to link with */ 733 component->name = info->rpdev->id.name; 734 735 #ifdef CONFIG_DEBUG_FS 736 component->debugfs_prefix = "rpmsg"; 737 #endif 738 739 return 0; 740 741 fail: 742 if (info->rpmsg_wq) 743 destroy_workqueue(info->rpmsg_wq); 744 745 return ret; 746 } 747 748 static int imx_rpmsg_pcm_remove(struct platform_device *pdev) 749 { 750 struct rpmsg_info *info = platform_get_drvdata(pdev); 751 752 if (info->rpmsg_wq) 753 destroy_workqueue(info->rpmsg_wq); 754 755 return 0; 756 } 757 758 #ifdef CONFIG_PM 759 static int imx_rpmsg_pcm_runtime_resume(struct device *dev) 760 { 761 struct rpmsg_info *info = dev_get_drvdata(dev); 762 763 cpu_latency_qos_add_request(&info->pm_qos_req, 0); 764 765 return 0; 766 } 767 768 static int imx_rpmsg_pcm_runtime_suspend(struct device *dev) 769 { 770 struct rpmsg_info *info = dev_get_drvdata(dev); 771 772 cpu_latency_qos_remove_request(&info->pm_qos_req); 773 774 return 0; 775 } 776 #endif 777 778 #ifdef CONFIG_PM_SLEEP 779 static int imx_rpmsg_pcm_suspend(struct device *dev) 780 { 781 struct rpmsg_info *info = dev_get_drvdata(dev); 782 struct rpmsg_msg *rpmsg_tx; 783 struct rpmsg_msg *rpmsg_rx; 784 785 rpmsg_tx = &info->msg[TX_SUSPEND]; 786 rpmsg_rx = &info->msg[RX_SUSPEND]; 787 788 rpmsg_tx->s_msg.header.cmd = TX_SUSPEND; 789 info->send_message(rpmsg_tx, info); 790 791 rpmsg_rx->s_msg.header.cmd = RX_SUSPEND; 792 info->send_message(rpmsg_rx, info); 793 794 return 0; 795 } 796 797 static int imx_rpmsg_pcm_resume(struct device *dev) 798 { 799 struct rpmsg_info *info = dev_get_drvdata(dev); 800 struct rpmsg_msg *rpmsg_tx; 801 struct rpmsg_msg *rpmsg_rx; 802 803 rpmsg_tx = &info->msg[TX_RESUME]; 804 rpmsg_rx = &info->msg[RX_RESUME]; 805 806 rpmsg_tx->s_msg.header.cmd = TX_RESUME; 807 info->send_message(rpmsg_tx, info); 808 809 rpmsg_rx->s_msg.header.cmd = RX_RESUME; 810 info->send_message(rpmsg_rx, info); 811 812 return 0; 813 } 814 #endif /* CONFIG_PM_SLEEP */ 815 816 static const struct dev_pm_ops imx_rpmsg_pcm_pm_ops = { 817 SET_RUNTIME_PM_OPS(imx_rpmsg_pcm_runtime_suspend, 818 imx_rpmsg_pcm_runtime_resume, 819 NULL) 820 SET_SYSTEM_SLEEP_PM_OPS(imx_rpmsg_pcm_suspend, 821 imx_rpmsg_pcm_resume) 822 }; 823 824 static struct platform_driver imx_pcm_rpmsg_driver = { 825 .probe = imx_rpmsg_pcm_probe, 826 .remove = imx_rpmsg_pcm_remove, 827 .driver = { 828 .name = IMX_PCM_DRV_NAME, 829 .pm = &imx_rpmsg_pcm_pm_ops, 830 }, 831 }; 832 module_platform_driver(imx_pcm_rpmsg_driver); 833 834 MODULE_DESCRIPTION("Freescale SoC Audio RPMSG PCM interface"); 835 MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>"); 836 MODULE_ALIAS("platform:" IMX_PCM_DRV_NAME); 837 MODULE_LICENSE("GPL v2"); 838