1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ 2 /* 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * Copyright(c) 2018 Intel Corporation. All rights reserved. 7 * 8 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com> 9 */ 10 11 #ifndef __SOUND_SOC_SOF_IO_H 12 #define __SOUND_SOC_SOF_IO_H 13 14 #include <linux/device.h> 15 #include <linux/interrupt.h> 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 #include <sound/pcm.h> 19 #include "sof-priv.h" 20 21 #define sof_ops(sdev) \ 22 ((sdev)->pdata->desc->ops) 23 24 static inline int sof_ops_init(struct snd_sof_dev *sdev) 25 { 26 if (sdev->pdata->desc->ops_init) 27 return sdev->pdata->desc->ops_init(sdev); 28 29 return 0; 30 } 31 32 /* Mandatory operations are verified during probing */ 33 34 /* init */ 35 static inline int snd_sof_probe(struct snd_sof_dev *sdev) 36 { 37 return sof_ops(sdev)->probe(sdev); 38 } 39 40 static inline int snd_sof_remove(struct snd_sof_dev *sdev) 41 { 42 if (sof_ops(sdev)->remove) 43 return sof_ops(sdev)->remove(sdev); 44 45 return 0; 46 } 47 48 static inline int snd_sof_shutdown(struct snd_sof_dev *sdev) 49 { 50 if (sof_ops(sdev)->shutdown) 51 return sof_ops(sdev)->shutdown(sdev); 52 53 return 0; 54 } 55 56 /* control */ 57 58 /* 59 * snd_sof_dsp_run returns the core mask of the cores that are available 60 * after successful fw boot 61 */ 62 static inline int snd_sof_dsp_run(struct snd_sof_dev *sdev) 63 { 64 return sof_ops(sdev)->run(sdev); 65 } 66 67 static inline int snd_sof_dsp_stall(struct snd_sof_dev *sdev, unsigned int core_mask) 68 { 69 if (sof_ops(sdev)->stall) 70 return sof_ops(sdev)->stall(sdev, core_mask); 71 72 return 0; 73 } 74 75 static inline int snd_sof_dsp_reset(struct snd_sof_dev *sdev) 76 { 77 if (sof_ops(sdev)->reset) 78 return sof_ops(sdev)->reset(sdev); 79 80 return 0; 81 } 82 83 /* dsp core get/put */ 84 static inline int snd_sof_dsp_core_get(struct snd_sof_dev *sdev, int core) 85 { 86 if (core > sdev->num_cores - 1) { 87 dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core, 88 sdev->num_cores); 89 return -EINVAL; 90 } 91 92 if (sof_ops(sdev)->core_get) { 93 int ret; 94 95 /* if current ref_count is > 0, increment it and return */ 96 if (sdev->dsp_core_ref_count[core] > 0) { 97 sdev->dsp_core_ref_count[core]++; 98 return 0; 99 } 100 101 /* power up the core */ 102 ret = sof_ops(sdev)->core_get(sdev, core); 103 if (ret < 0) 104 return ret; 105 106 /* increment ref_count */ 107 sdev->dsp_core_ref_count[core]++; 108 109 /* and update enabled_cores_mask */ 110 sdev->enabled_cores_mask |= BIT(core); 111 112 dev_dbg(sdev->dev, "Core %d powered up\n", core); 113 } 114 115 return 0; 116 } 117 118 static inline int snd_sof_dsp_core_put(struct snd_sof_dev *sdev, int core) 119 { 120 if (core > sdev->num_cores - 1) { 121 dev_err(sdev->dev, "invalid core id: %d for num_cores: %d\n", core, 122 sdev->num_cores); 123 return -EINVAL; 124 } 125 126 if (sof_ops(sdev)->core_put) { 127 int ret; 128 129 /* decrement ref_count and return if it is > 0 */ 130 if (--(sdev->dsp_core_ref_count[core]) > 0) 131 return 0; 132 133 /* power down the core */ 134 ret = sof_ops(sdev)->core_put(sdev, core); 135 if (ret < 0) 136 return ret; 137 138 /* and update enabled_cores_mask */ 139 sdev->enabled_cores_mask &= ~BIT(core); 140 141 dev_dbg(sdev->dev, "Core %d powered down\n", core); 142 } 143 144 return 0; 145 } 146 147 /* pre/post fw load */ 148 static inline int snd_sof_dsp_pre_fw_run(struct snd_sof_dev *sdev) 149 { 150 if (sof_ops(sdev)->pre_fw_run) 151 return sof_ops(sdev)->pre_fw_run(sdev); 152 153 return 0; 154 } 155 156 static inline int snd_sof_dsp_post_fw_run(struct snd_sof_dev *sdev) 157 { 158 if (sof_ops(sdev)->post_fw_run) 159 return sof_ops(sdev)->post_fw_run(sdev); 160 161 return 0; 162 } 163 164 /* parse platform specific extended manifest */ 165 static inline int snd_sof_dsp_parse_platform_ext_manifest(struct snd_sof_dev *sdev, 166 const struct sof_ext_man_elem_header *hdr) 167 { 168 if (sof_ops(sdev)->parse_platform_ext_manifest) 169 return sof_ops(sdev)->parse_platform_ext_manifest(sdev, hdr); 170 171 return 0; 172 } 173 174 /* misc */ 175 176 /** 177 * snd_sof_dsp_get_bar_index - Maps a section type with a BAR index 178 * 179 * @sdev: sof device 180 * @type: section type as described by snd_sof_fw_blk_type 181 * 182 * Returns the corresponding BAR index (a positive integer) or -EINVAL 183 * in case there is no mapping 184 */ 185 static inline int snd_sof_dsp_get_bar_index(struct snd_sof_dev *sdev, u32 type) 186 { 187 if (sof_ops(sdev)->get_bar_index) 188 return sof_ops(sdev)->get_bar_index(sdev, type); 189 190 return sdev->mmio_bar; 191 } 192 193 static inline int snd_sof_dsp_get_mailbox_offset(struct snd_sof_dev *sdev) 194 { 195 if (sof_ops(sdev)->get_mailbox_offset) 196 return sof_ops(sdev)->get_mailbox_offset(sdev); 197 198 dev_err(sdev->dev, "error: %s not defined\n", __func__); 199 return -ENOTSUPP; 200 } 201 202 static inline int snd_sof_dsp_get_window_offset(struct snd_sof_dev *sdev, 203 u32 id) 204 { 205 if (sof_ops(sdev)->get_window_offset) 206 return sof_ops(sdev)->get_window_offset(sdev, id); 207 208 dev_err(sdev->dev, "error: %s not defined\n", __func__); 209 return -ENOTSUPP; 210 } 211 /* power management */ 212 static inline int snd_sof_dsp_resume(struct snd_sof_dev *sdev) 213 { 214 if (sof_ops(sdev)->resume) 215 return sof_ops(sdev)->resume(sdev); 216 217 return 0; 218 } 219 220 static inline int snd_sof_dsp_suspend(struct snd_sof_dev *sdev, 221 u32 target_state) 222 { 223 if (sof_ops(sdev)->suspend) 224 return sof_ops(sdev)->suspend(sdev, target_state); 225 226 return 0; 227 } 228 229 static inline int snd_sof_dsp_runtime_resume(struct snd_sof_dev *sdev) 230 { 231 if (sof_ops(sdev)->runtime_resume) 232 return sof_ops(sdev)->runtime_resume(sdev); 233 234 return 0; 235 } 236 237 static inline int snd_sof_dsp_runtime_suspend(struct snd_sof_dev *sdev) 238 { 239 if (sof_ops(sdev)->runtime_suspend) 240 return sof_ops(sdev)->runtime_suspend(sdev); 241 242 return 0; 243 } 244 245 static inline int snd_sof_dsp_runtime_idle(struct snd_sof_dev *sdev) 246 { 247 if (sof_ops(sdev)->runtime_idle) 248 return sof_ops(sdev)->runtime_idle(sdev); 249 250 return 0; 251 } 252 253 static inline int snd_sof_dsp_hw_params_upon_resume(struct snd_sof_dev *sdev) 254 { 255 if (sof_ops(sdev)->set_hw_params_upon_resume) 256 return sof_ops(sdev)->set_hw_params_upon_resume(sdev); 257 return 0; 258 } 259 260 static inline int snd_sof_dsp_set_clk(struct snd_sof_dev *sdev, u32 freq) 261 { 262 if (sof_ops(sdev)->set_clk) 263 return sof_ops(sdev)->set_clk(sdev, freq); 264 265 return 0; 266 } 267 268 static inline int 269 snd_sof_dsp_set_power_state(struct snd_sof_dev *sdev, 270 const struct sof_dsp_power_state *target_state) 271 { 272 int ret = 0; 273 274 mutex_lock(&sdev->power_state_access); 275 276 if (sof_ops(sdev)->set_power_state) 277 ret = sof_ops(sdev)->set_power_state(sdev, target_state); 278 279 mutex_unlock(&sdev->power_state_access); 280 281 return ret; 282 } 283 284 /* debug */ 285 void snd_sof_dsp_dbg_dump(struct snd_sof_dev *sdev, const char *msg, u32 flags); 286 287 static inline int snd_sof_debugfs_add_region_item(struct snd_sof_dev *sdev, 288 enum snd_sof_fw_blk_type blk_type, u32 offset, size_t size, 289 const char *name, enum sof_debugfs_access_type access_type) 290 { 291 if (sof_ops(sdev) && sof_ops(sdev)->debugfs_add_region_item) 292 return sof_ops(sdev)->debugfs_add_region_item(sdev, blk_type, offset, 293 size, name, access_type); 294 295 return 0; 296 } 297 298 /* register IO */ 299 static inline void snd_sof_dsp_write(struct snd_sof_dev *sdev, u32 bar, 300 u32 offset, u32 value) 301 { 302 if (sof_ops(sdev)->write) { 303 sof_ops(sdev)->write(sdev, sdev->bar[bar] + offset, value); 304 return; 305 } 306 307 dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); 308 } 309 310 static inline void snd_sof_dsp_write64(struct snd_sof_dev *sdev, u32 bar, 311 u32 offset, u64 value) 312 { 313 if (sof_ops(sdev)->write64) { 314 sof_ops(sdev)->write64(sdev, sdev->bar[bar] + offset, value); 315 return; 316 } 317 318 dev_err_ratelimited(sdev->dev, "error: %s not defined\n", __func__); 319 } 320 321 static inline u32 snd_sof_dsp_read(struct snd_sof_dev *sdev, u32 bar, 322 u32 offset) 323 { 324 if (sof_ops(sdev)->read) 325 return sof_ops(sdev)->read(sdev, sdev->bar[bar] + offset); 326 327 dev_err(sdev->dev, "error: %s not defined\n", __func__); 328 return -ENOTSUPP; 329 } 330 331 static inline u64 snd_sof_dsp_read64(struct snd_sof_dev *sdev, u32 bar, 332 u32 offset) 333 { 334 if (sof_ops(sdev)->read64) 335 return sof_ops(sdev)->read64(sdev, sdev->bar[bar] + offset); 336 337 dev_err(sdev->dev, "error: %s not defined\n", __func__); 338 return -ENOTSUPP; 339 } 340 341 /* block IO */ 342 static inline int snd_sof_dsp_block_read(struct snd_sof_dev *sdev, 343 enum snd_sof_fw_blk_type blk_type, 344 u32 offset, void *dest, size_t bytes) 345 { 346 return sof_ops(sdev)->block_read(sdev, blk_type, offset, dest, bytes); 347 } 348 349 static inline int snd_sof_dsp_block_write(struct snd_sof_dev *sdev, 350 enum snd_sof_fw_blk_type blk_type, 351 u32 offset, void *src, size_t bytes) 352 { 353 return sof_ops(sdev)->block_write(sdev, blk_type, offset, src, bytes); 354 } 355 356 /* mailbox IO */ 357 static inline void snd_sof_dsp_mailbox_read(struct snd_sof_dev *sdev, 358 u32 offset, void *dest, size_t bytes) 359 { 360 if (sof_ops(sdev)->mailbox_read) 361 sof_ops(sdev)->mailbox_read(sdev, offset, dest, bytes); 362 } 363 364 static inline void snd_sof_dsp_mailbox_write(struct snd_sof_dev *sdev, 365 u32 offset, void *src, size_t bytes) 366 { 367 if (sof_ops(sdev)->mailbox_write) 368 sof_ops(sdev)->mailbox_write(sdev, offset, src, bytes); 369 } 370 371 /* ipc */ 372 static inline int snd_sof_dsp_send_msg(struct snd_sof_dev *sdev, 373 struct snd_sof_ipc_msg *msg) 374 { 375 return sof_ops(sdev)->send_msg(sdev, msg); 376 } 377 378 /* host PCM ops */ 379 static inline int 380 snd_sof_pcm_platform_open(struct snd_sof_dev *sdev, 381 struct snd_pcm_substream *substream) 382 { 383 if (sof_ops(sdev) && sof_ops(sdev)->pcm_open) 384 return sof_ops(sdev)->pcm_open(sdev, substream); 385 386 return 0; 387 } 388 389 /* disconnect pcm substream to a host stream */ 390 static inline int 391 snd_sof_pcm_platform_close(struct snd_sof_dev *sdev, 392 struct snd_pcm_substream *substream) 393 { 394 if (sof_ops(sdev) && sof_ops(sdev)->pcm_close) 395 return sof_ops(sdev)->pcm_close(sdev, substream); 396 397 return 0; 398 } 399 400 /* host stream hw params */ 401 static inline int 402 snd_sof_pcm_platform_hw_params(struct snd_sof_dev *sdev, 403 struct snd_pcm_substream *substream, 404 struct snd_pcm_hw_params *params, 405 struct snd_sof_platform_stream_params *platform_params) 406 { 407 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_params) 408 return sof_ops(sdev)->pcm_hw_params(sdev, substream, params, 409 platform_params); 410 411 return 0; 412 } 413 414 /* host stream hw free */ 415 static inline int 416 snd_sof_pcm_platform_hw_free(struct snd_sof_dev *sdev, 417 struct snd_pcm_substream *substream) 418 { 419 if (sof_ops(sdev) && sof_ops(sdev)->pcm_hw_free) 420 return sof_ops(sdev)->pcm_hw_free(sdev, substream); 421 422 return 0; 423 } 424 425 /* host stream trigger */ 426 static inline int 427 snd_sof_pcm_platform_trigger(struct snd_sof_dev *sdev, 428 struct snd_pcm_substream *substream, int cmd) 429 { 430 if (sof_ops(sdev) && sof_ops(sdev)->pcm_trigger) 431 return sof_ops(sdev)->pcm_trigger(sdev, substream, cmd); 432 433 return 0; 434 } 435 436 /* Firmware loading */ 437 static inline int snd_sof_load_firmware(struct snd_sof_dev *sdev) 438 { 439 dev_dbg(sdev->dev, "loading firmware\n"); 440 441 return sof_ops(sdev)->load_firmware(sdev); 442 } 443 444 /* host DSP message data */ 445 static inline int snd_sof_ipc_msg_data(struct snd_sof_dev *sdev, 446 struct snd_pcm_substream *substream, 447 void *p, size_t sz) 448 { 449 return sof_ops(sdev)->ipc_msg_data(sdev, substream, p, sz); 450 } 451 /* host side configuration of the stream's data offset in stream mailbox area */ 452 static inline int 453 snd_sof_set_stream_data_offset(struct snd_sof_dev *sdev, 454 struct snd_pcm_substream *substream, 455 size_t posn_offset) 456 { 457 if (sof_ops(sdev) && sof_ops(sdev)->set_stream_data_offset) 458 return sof_ops(sdev)->set_stream_data_offset(sdev, substream, 459 posn_offset); 460 461 return 0; 462 } 463 464 /* host stream pointer */ 465 static inline snd_pcm_uframes_t 466 snd_sof_pcm_platform_pointer(struct snd_sof_dev *sdev, 467 struct snd_pcm_substream *substream) 468 { 469 if (sof_ops(sdev) && sof_ops(sdev)->pcm_pointer) 470 return sof_ops(sdev)->pcm_pointer(sdev, substream); 471 472 return 0; 473 } 474 475 /* pcm ack */ 476 static inline int snd_sof_pcm_platform_ack(struct snd_sof_dev *sdev, 477 struct snd_pcm_substream *substream) 478 { 479 if (sof_ops(sdev) && sof_ops(sdev)->pcm_ack) 480 return sof_ops(sdev)->pcm_ack(sdev, substream); 481 482 return 0; 483 } 484 485 /* machine driver */ 486 static inline int 487 snd_sof_machine_register(struct snd_sof_dev *sdev, void *pdata) 488 { 489 if (sof_ops(sdev) && sof_ops(sdev)->machine_register) 490 return sof_ops(sdev)->machine_register(sdev, pdata); 491 492 return 0; 493 } 494 495 static inline void 496 snd_sof_machine_unregister(struct snd_sof_dev *sdev, void *pdata) 497 { 498 if (sof_ops(sdev) && sof_ops(sdev)->machine_unregister) 499 sof_ops(sdev)->machine_unregister(sdev, pdata); 500 } 501 502 static inline struct snd_soc_acpi_mach * 503 snd_sof_machine_select(struct snd_sof_dev *sdev) 504 { 505 if (sof_ops(sdev) && sof_ops(sdev)->machine_select) 506 return sof_ops(sdev)->machine_select(sdev); 507 508 return NULL; 509 } 510 511 static inline void 512 snd_sof_set_mach_params(struct snd_soc_acpi_mach *mach, 513 struct snd_sof_dev *sdev) 514 { 515 if (sof_ops(sdev) && sof_ops(sdev)->set_mach_params) 516 sof_ops(sdev)->set_mach_params(mach, sdev); 517 } 518 519 /** 520 * snd_sof_dsp_register_poll_timeout - Periodically poll an address 521 * until a condition is met or a timeout occurs 522 * @op: accessor function (takes @addr as its only argument) 523 * @addr: Address to poll 524 * @val: Variable to read the value into 525 * @cond: Break condition (usually involving @val) 526 * @sleep_us: Maximum time to sleep between reads in us (0 527 * tight-loops). Should be less than ~20ms since usleep_range 528 * is used (see Documentation/timers/timers-howto.rst). 529 * @timeout_us: Timeout in us, 0 means never timeout 530 * 531 * Returns 0 on success and -ETIMEDOUT upon a timeout. In either 532 * case, the last read value at @addr is stored in @val. Must not 533 * be called from atomic context if sleep_us or timeout_us are used. 534 * 535 * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. 536 */ 537 #define snd_sof_dsp_read_poll_timeout(sdev, bar, offset, val, cond, sleep_us, timeout_us) \ 538 ({ \ 539 u64 __timeout_us = (timeout_us); \ 540 unsigned long __sleep_us = (sleep_us); \ 541 ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \ 542 might_sleep_if((__sleep_us) != 0); \ 543 for (;;) { \ 544 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 545 if (cond) { \ 546 dev_dbg(sdev->dev, \ 547 "FW Poll Status: reg[%#x]=%#x successful\n", \ 548 (offset), (val)); \ 549 break; \ 550 } \ 551 if (__timeout_us && \ 552 ktime_compare(ktime_get(), __timeout) > 0) { \ 553 (val) = snd_sof_dsp_read(sdev, bar, offset); \ 554 dev_dbg(sdev->dev, \ 555 "FW Poll Status: reg[%#x]=%#x timedout\n", \ 556 (offset), (val)); \ 557 break; \ 558 } \ 559 if (__sleep_us) \ 560 usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ 561 } \ 562 (cond) ? 0 : -ETIMEDOUT; \ 563 }) 564 565 /* This is for registers bits with attribute RWC */ 566 bool snd_sof_pci_update_bits(struct snd_sof_dev *sdev, u32 offset, 567 u32 mask, u32 value); 568 569 bool snd_sof_dsp_update_bits_unlocked(struct snd_sof_dev *sdev, u32 bar, 570 u32 offset, u32 mask, u32 value); 571 572 bool snd_sof_dsp_update_bits64_unlocked(struct snd_sof_dev *sdev, u32 bar, 573 u32 offset, u64 mask, u64 value); 574 575 bool snd_sof_dsp_update_bits(struct snd_sof_dev *sdev, u32 bar, u32 offset, 576 u32 mask, u32 value); 577 578 bool snd_sof_dsp_update_bits64(struct snd_sof_dev *sdev, u32 bar, 579 u32 offset, u64 mask, u64 value); 580 581 void snd_sof_dsp_update_bits_forced(struct snd_sof_dev *sdev, u32 bar, 582 u32 offset, u32 mask, u32 value); 583 584 int snd_sof_dsp_register_poll(struct snd_sof_dev *sdev, u32 bar, u32 offset, 585 u32 mask, u32 target, u32 timeout_ms, 586 u32 interval_us); 587 588 void snd_sof_dsp_panic(struct snd_sof_dev *sdev, u32 offset, bool non_recoverable); 589 #endif 590