1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 #include <linux/acpi.h> 5 #include <linux/delay.h> 6 #include <linux/mod_devicetable.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/soundwire/sdw_registers.h> 9 #include <linux/soundwire/sdw.h> 10 #include <linux/soundwire/sdw_type.h> 11 #include "bus.h" 12 #include "irq.h" 13 #include "sysfs_local.h" 14 15 static DEFINE_IDA(sdw_bus_ida); 16 17 static int sdw_get_id(struct sdw_bus *bus) 18 { 19 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL); 20 21 if (rc < 0) 22 return rc; 23 24 bus->id = rc; 25 return 0; 26 } 27 28 /** 29 * sdw_bus_master_add() - add a bus Master instance 30 * @bus: bus instance 31 * @parent: parent device 32 * @fwnode: firmware node handle 33 * 34 * Initializes the bus instance, read properties and create child 35 * devices. 36 */ 37 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, 38 struct fwnode_handle *fwnode) 39 { 40 struct sdw_master_prop *prop = NULL; 41 int ret; 42 43 if (!parent) { 44 pr_err("SoundWire parent device is not set\n"); 45 return -ENODEV; 46 } 47 48 ret = sdw_get_id(bus); 49 if (ret < 0) { 50 dev_err(parent, "Failed to get bus id\n"); 51 return ret; 52 } 53 54 ret = sdw_master_device_add(bus, parent, fwnode); 55 if (ret < 0) { 56 dev_err(parent, "Failed to add master device at link %d\n", 57 bus->link_id); 58 return ret; 59 } 60 61 if (!bus->ops) { 62 dev_err(bus->dev, "SoundWire Bus ops are not set\n"); 63 return -EINVAL; 64 } 65 66 if (!bus->compute_params) { 67 dev_err(bus->dev, 68 "Bandwidth allocation not configured, compute_params no set\n"); 69 return -EINVAL; 70 } 71 72 /* 73 * Give each bus_lock and msg_lock a unique key so that lockdep won't 74 * trigger a deadlock warning when the locks of several buses are 75 * grabbed during configuration of a multi-bus stream. 76 */ 77 lockdep_register_key(&bus->msg_lock_key); 78 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key); 79 80 lockdep_register_key(&bus->bus_lock_key); 81 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key); 82 83 INIT_LIST_HEAD(&bus->slaves); 84 INIT_LIST_HEAD(&bus->m_rt_list); 85 86 /* 87 * Initialize multi_link flag 88 */ 89 bus->multi_link = false; 90 if (bus->ops->read_prop) { 91 ret = bus->ops->read_prop(bus); 92 if (ret < 0) { 93 dev_err(bus->dev, 94 "Bus read properties failed:%d\n", ret); 95 return ret; 96 } 97 } 98 99 sdw_bus_debugfs_init(bus); 100 101 /* 102 * Device numbers in SoundWire are 0 through 15. Enumeration device 103 * number (0), Broadcast device number (15), Group numbers (12 and 104 * 13) and Master device number (14) are not used for assignment so 105 * mask these and other higher bits. 106 */ 107 108 /* Set higher order bits */ 109 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); 110 111 /* Set enumuration device number and broadcast device number */ 112 set_bit(SDW_ENUM_DEV_NUM, bus->assigned); 113 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); 114 115 /* Set group device numbers and master device number */ 116 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned); 117 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); 118 set_bit(SDW_MASTER_DEV_NUM, bus->assigned); 119 120 /* 121 * SDW is an enumerable bus, but devices can be powered off. So, 122 * they won't be able to report as present. 123 * 124 * Create Slave devices based on Slaves described in 125 * the respective firmware (ACPI/DT) 126 */ 127 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev)) 128 ret = sdw_acpi_find_slaves(bus); 129 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node) 130 ret = sdw_of_find_slaves(bus); 131 else 132 ret = -ENOTSUPP; /* No ACPI/DT so error out */ 133 134 if (ret < 0) { 135 dev_err(bus->dev, "Finding slaves failed:%d\n", ret); 136 return ret; 137 } 138 139 /* 140 * Initialize clock values based on Master properties. The max 141 * frequency is read from max_clk_freq property. Current assumption 142 * is that the bus will start at highest clock frequency when 143 * powered on. 144 * 145 * Default active bank will be 0 as out of reset the Slaves have 146 * to start with bank 0 (Table 40 of Spec) 147 */ 148 prop = &bus->prop; 149 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR; 150 bus->params.curr_dr_freq = bus->params.max_dr_freq; 151 bus->params.curr_bank = SDW_BANK0; 152 bus->params.next_bank = SDW_BANK1; 153 154 ret = sdw_irq_create(bus, fwnode); 155 if (ret) 156 return ret; 157 158 return 0; 159 } 160 EXPORT_SYMBOL(sdw_bus_master_add); 161 162 static int sdw_delete_slave(struct device *dev, void *data) 163 { 164 struct sdw_slave *slave = dev_to_sdw_dev(dev); 165 struct sdw_bus *bus = slave->bus; 166 167 pm_runtime_disable(dev); 168 169 sdw_slave_debugfs_exit(slave); 170 171 mutex_lock(&bus->bus_lock); 172 173 if (slave->dev_num) { /* clear dev_num if assigned */ 174 clear_bit(slave->dev_num, bus->assigned); 175 if (bus->ops && bus->ops->put_device_num) 176 bus->ops->put_device_num(bus, slave); 177 } 178 list_del_init(&slave->node); 179 mutex_unlock(&bus->bus_lock); 180 181 device_unregister(dev); 182 return 0; 183 } 184 185 /** 186 * sdw_bus_master_delete() - delete the bus master instance 187 * @bus: bus to be deleted 188 * 189 * Remove the instance, delete the child devices. 190 */ 191 void sdw_bus_master_delete(struct sdw_bus *bus) 192 { 193 device_for_each_child(bus->dev, NULL, sdw_delete_slave); 194 195 sdw_irq_delete(bus); 196 197 sdw_master_device_del(bus); 198 199 sdw_bus_debugfs_exit(bus); 200 lockdep_unregister_key(&bus->bus_lock_key); 201 lockdep_unregister_key(&bus->msg_lock_key); 202 ida_free(&sdw_bus_ida, bus->id); 203 } 204 EXPORT_SYMBOL(sdw_bus_master_delete); 205 206 /* 207 * SDW IO Calls 208 */ 209 210 static inline int find_response_code(enum sdw_command_response resp) 211 { 212 switch (resp) { 213 case SDW_CMD_OK: 214 return 0; 215 216 case SDW_CMD_IGNORED: 217 return -ENODATA; 218 219 case SDW_CMD_TIMEOUT: 220 return -ETIMEDOUT; 221 222 default: 223 return -EIO; 224 } 225 } 226 227 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 228 { 229 int retry = bus->prop.err_threshold; 230 enum sdw_command_response resp; 231 int ret = 0, i; 232 233 for (i = 0; i <= retry; i++) { 234 resp = bus->ops->xfer_msg(bus, msg); 235 ret = find_response_code(resp); 236 237 /* if cmd is ok or ignored return */ 238 if (ret == 0 || ret == -ENODATA) 239 return ret; 240 } 241 242 return ret; 243 } 244 245 static inline int do_transfer_defer(struct sdw_bus *bus, 246 struct sdw_msg *msg) 247 { 248 struct sdw_defer *defer = &bus->defer_msg; 249 int retry = bus->prop.err_threshold; 250 enum sdw_command_response resp; 251 int ret = 0, i; 252 253 defer->msg = msg; 254 defer->length = msg->len; 255 init_completion(&defer->complete); 256 257 for (i = 0; i <= retry; i++) { 258 resp = bus->ops->xfer_msg_defer(bus); 259 ret = find_response_code(resp); 260 /* if cmd is ok or ignored return */ 261 if (ret == 0 || ret == -ENODATA) 262 return ret; 263 } 264 265 return ret; 266 } 267 268 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg) 269 { 270 int ret; 271 272 ret = do_transfer(bus, msg); 273 if (ret != 0 && ret != -ENODATA) 274 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n", 275 msg->dev_num, ret, 276 (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read", 277 msg->addr, msg->len); 278 279 return ret; 280 } 281 282 /** 283 * sdw_transfer() - Synchronous transfer message to a SDW Slave device 284 * @bus: SDW bus 285 * @msg: SDW message to be xfered 286 */ 287 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 288 { 289 int ret; 290 291 mutex_lock(&bus->msg_lock); 292 293 ret = sdw_transfer_unlocked(bus, msg); 294 295 mutex_unlock(&bus->msg_lock); 296 297 return ret; 298 } 299 300 /** 301 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers 302 * @bus: SDW bus 303 * @sync_delay: Delay before reading status 304 */ 305 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay) 306 { 307 u32 status; 308 309 if (!bus->ops->read_ping_status) 310 return; 311 312 /* 313 * wait for peripheral to sync if desired. 10-15ms should be more than 314 * enough in most cases. 315 */ 316 if (sync_delay) 317 usleep_range(10000, 15000); 318 319 mutex_lock(&bus->msg_lock); 320 321 status = bus->ops->read_ping_status(bus); 322 323 mutex_unlock(&bus->msg_lock); 324 325 if (!status) 326 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__); 327 else 328 dev_dbg(bus->dev, "PING status: %#x\n", status); 329 } 330 EXPORT_SYMBOL(sdw_show_ping_status); 331 332 /** 333 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device 334 * @bus: SDW bus 335 * @msg: SDW message to be xfered 336 * 337 * Caller needs to hold the msg_lock lock while calling this 338 */ 339 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg) 340 { 341 int ret; 342 343 if (!bus->ops->xfer_msg_defer) 344 return -ENOTSUPP; 345 346 ret = do_transfer_defer(bus, msg); 347 if (ret != 0 && ret != -ENODATA) 348 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n", 349 msg->dev_num, ret); 350 351 return ret; 352 } 353 354 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 355 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf) 356 { 357 memset(msg, 0, sizeof(*msg)); 358 msg->addr = addr; /* addr is 16 bit and truncated here */ 359 msg->len = count; 360 msg->dev_num = dev_num; 361 msg->flags = flags; 362 msg->buf = buf; 363 364 if (addr < SDW_REG_NO_PAGE) /* no paging area */ 365 return 0; 366 367 if (addr >= SDW_REG_MAX) { /* illegal addr */ 368 pr_err("SDW: Invalid address %x passed\n", addr); 369 return -EINVAL; 370 } 371 372 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */ 373 if (slave && !slave->prop.paging_support) 374 return 0; 375 /* no need for else as that will fall-through to paging */ 376 } 377 378 /* paging mandatory */ 379 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) { 380 pr_err("SDW: Invalid device for paging :%d\n", dev_num); 381 return -EINVAL; 382 } 383 384 if (!slave) { 385 pr_err("SDW: No slave for paging addr\n"); 386 return -EINVAL; 387 } 388 389 if (!slave->prop.paging_support) { 390 dev_err(&slave->dev, 391 "address %x needs paging but no support\n", addr); 392 return -EINVAL; 393 } 394 395 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr); 396 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr); 397 msg->addr |= BIT(15); 398 msg->page = true; 399 400 return 0; 401 } 402 403 /* 404 * Read/Write IO functions. 405 */ 406 407 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags, 408 size_t count, u8 *val) 409 { 410 struct sdw_msg msg; 411 size_t size; 412 int ret; 413 414 while (count) { 415 // Only handle bytes up to next page boundary 416 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR)); 417 418 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val); 419 if (ret < 0) 420 return ret; 421 422 ret = sdw_transfer(slave->bus, &msg); 423 if (ret < 0 && !slave->is_mockup_device) 424 return ret; 425 426 addr += size; 427 val += size; 428 count -= size; 429 } 430 431 return 0; 432 } 433 434 /** 435 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM 436 * @slave: SDW Slave 437 * @addr: Register address 438 * @count: length 439 * @val: Buffer for values to be read 440 * 441 * Note that if the message crosses a page boundary each page will be 442 * transferred under a separate invocation of the msg_lock. 443 */ 444 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 445 { 446 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val); 447 } 448 EXPORT_SYMBOL(sdw_nread_no_pm); 449 450 /** 451 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM 452 * @slave: SDW Slave 453 * @addr: Register address 454 * @count: length 455 * @val: Buffer for values to be written 456 * 457 * Note that if the message crosses a page boundary each page will be 458 * transferred under a separate invocation of the msg_lock. 459 */ 460 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 461 { 462 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val); 463 } 464 EXPORT_SYMBOL(sdw_nwrite_no_pm); 465 466 /** 467 * sdw_write_no_pm() - Write a SDW Slave register with no PM 468 * @slave: SDW Slave 469 * @addr: Register address 470 * @value: Register value 471 */ 472 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) 473 { 474 return sdw_nwrite_no_pm(slave, addr, 1, &value); 475 } 476 EXPORT_SYMBOL(sdw_write_no_pm); 477 478 static int 479 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr) 480 { 481 struct sdw_msg msg; 482 u8 buf; 483 int ret; 484 485 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 486 SDW_MSG_FLAG_READ, &buf); 487 if (ret < 0) 488 return ret; 489 490 ret = sdw_transfer(bus, &msg); 491 if (ret < 0) 492 return ret; 493 494 return buf; 495 } 496 497 static int 498 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 499 { 500 struct sdw_msg msg; 501 int ret; 502 503 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 504 SDW_MSG_FLAG_WRITE, &value); 505 if (ret < 0) 506 return ret; 507 508 return sdw_transfer(bus, &msg); 509 } 510 511 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr) 512 { 513 struct sdw_msg msg; 514 u8 buf; 515 int ret; 516 517 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 518 SDW_MSG_FLAG_READ, &buf); 519 if (ret < 0) 520 return ret; 521 522 ret = sdw_transfer_unlocked(bus, &msg); 523 if (ret < 0) 524 return ret; 525 526 return buf; 527 } 528 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked); 529 530 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 531 { 532 struct sdw_msg msg; 533 int ret; 534 535 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 536 SDW_MSG_FLAG_WRITE, &value); 537 if (ret < 0) 538 return ret; 539 540 return sdw_transfer_unlocked(bus, &msg); 541 } 542 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked); 543 544 /** 545 * sdw_read_no_pm() - Read a SDW Slave register with no PM 546 * @slave: SDW Slave 547 * @addr: Register address 548 */ 549 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr) 550 { 551 u8 buf; 552 int ret; 553 554 ret = sdw_nread_no_pm(slave, addr, 1, &buf); 555 if (ret < 0) 556 return ret; 557 else 558 return buf; 559 } 560 EXPORT_SYMBOL(sdw_read_no_pm); 561 562 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 563 { 564 int tmp; 565 566 tmp = sdw_read_no_pm(slave, addr); 567 if (tmp < 0) 568 return tmp; 569 570 tmp = (tmp & ~mask) | val; 571 return sdw_write_no_pm(slave, addr, tmp); 572 } 573 EXPORT_SYMBOL(sdw_update_no_pm); 574 575 /* Read-Modify-Write Slave register */ 576 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 577 { 578 int tmp; 579 580 tmp = sdw_read(slave, addr); 581 if (tmp < 0) 582 return tmp; 583 584 tmp = (tmp & ~mask) | val; 585 return sdw_write(slave, addr, tmp); 586 } 587 EXPORT_SYMBOL(sdw_update); 588 589 /** 590 * sdw_nread() - Read "n" contiguous SDW Slave registers 591 * @slave: SDW Slave 592 * @addr: Register address 593 * @count: length 594 * @val: Buffer for values to be read 595 * 596 * This version of the function will take a PM reference to the slave 597 * device. 598 * Note that if the message crosses a page boundary each page will be 599 * transferred under a separate invocation of the msg_lock. 600 */ 601 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 602 { 603 int ret; 604 605 ret = pm_runtime_get_sync(&slave->dev); 606 if (ret < 0 && ret != -EACCES) { 607 pm_runtime_put_noidle(&slave->dev); 608 return ret; 609 } 610 611 ret = sdw_nread_no_pm(slave, addr, count, val); 612 613 pm_runtime_mark_last_busy(&slave->dev); 614 pm_runtime_put(&slave->dev); 615 616 return ret; 617 } 618 EXPORT_SYMBOL(sdw_nread); 619 620 /** 621 * sdw_nwrite() - Write "n" contiguous SDW Slave registers 622 * @slave: SDW Slave 623 * @addr: Register address 624 * @count: length 625 * @val: Buffer for values to be written 626 * 627 * This version of the function will take a PM reference to the slave 628 * device. 629 * Note that if the message crosses a page boundary each page will be 630 * transferred under a separate invocation of the msg_lock. 631 */ 632 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 633 { 634 int ret; 635 636 ret = pm_runtime_get_sync(&slave->dev); 637 if (ret < 0 && ret != -EACCES) { 638 pm_runtime_put_noidle(&slave->dev); 639 return ret; 640 } 641 642 ret = sdw_nwrite_no_pm(slave, addr, count, val); 643 644 pm_runtime_mark_last_busy(&slave->dev); 645 pm_runtime_put(&slave->dev); 646 647 return ret; 648 } 649 EXPORT_SYMBOL(sdw_nwrite); 650 651 /** 652 * sdw_read() - Read a SDW Slave register 653 * @slave: SDW Slave 654 * @addr: Register address 655 * 656 * This version of the function will take a PM reference to the slave 657 * device. 658 */ 659 int sdw_read(struct sdw_slave *slave, u32 addr) 660 { 661 u8 buf; 662 int ret; 663 664 ret = sdw_nread(slave, addr, 1, &buf); 665 if (ret < 0) 666 return ret; 667 668 return buf; 669 } 670 EXPORT_SYMBOL(sdw_read); 671 672 /** 673 * sdw_write() - Write a SDW Slave register 674 * @slave: SDW Slave 675 * @addr: Register address 676 * @value: Register value 677 * 678 * This version of the function will take a PM reference to the slave 679 * device. 680 */ 681 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) 682 { 683 return sdw_nwrite(slave, addr, 1, &value); 684 } 685 EXPORT_SYMBOL(sdw_write); 686 687 /* 688 * SDW alert handling 689 */ 690 691 /* called with bus_lock held */ 692 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i) 693 { 694 struct sdw_slave *slave; 695 696 list_for_each_entry(slave, &bus->slaves, node) { 697 if (slave->dev_num == i) 698 return slave; 699 } 700 701 return NULL; 702 } 703 704 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id) 705 { 706 if (slave->id.mfg_id != id.mfg_id || 707 slave->id.part_id != id.part_id || 708 slave->id.class_id != id.class_id || 709 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID && 710 slave->id.unique_id != id.unique_id)) 711 return -ENODEV; 712 713 return 0; 714 } 715 EXPORT_SYMBOL(sdw_compare_devid); 716 717 /* called with bus_lock held */ 718 static int sdw_get_device_num(struct sdw_slave *slave) 719 { 720 struct sdw_bus *bus = slave->bus; 721 int bit; 722 723 if (bus->ops && bus->ops->get_device_num) { 724 bit = bus->ops->get_device_num(bus, slave); 725 if (bit < 0) 726 goto err; 727 } else { 728 bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES); 729 if (bit == SDW_MAX_DEVICES) { 730 bit = -ENODEV; 731 goto err; 732 } 733 } 734 735 /* 736 * Do not update dev_num in Slave data structure here, 737 * Update once program dev_num is successful 738 */ 739 set_bit(bit, bus->assigned); 740 741 err: 742 return bit; 743 } 744 745 static int sdw_assign_device_num(struct sdw_slave *slave) 746 { 747 struct sdw_bus *bus = slave->bus; 748 int ret, dev_num; 749 bool new_device = false; 750 751 /* check first if device number is assigned, if so reuse that */ 752 if (!slave->dev_num) { 753 if (!slave->dev_num_sticky) { 754 mutex_lock(&slave->bus->bus_lock); 755 dev_num = sdw_get_device_num(slave); 756 mutex_unlock(&slave->bus->bus_lock); 757 if (dev_num < 0) { 758 dev_err(bus->dev, "Get dev_num failed: %d\n", 759 dev_num); 760 return dev_num; 761 } 762 slave->dev_num = dev_num; 763 slave->dev_num_sticky = dev_num; 764 new_device = true; 765 } else { 766 slave->dev_num = slave->dev_num_sticky; 767 } 768 } 769 770 if (!new_device) 771 dev_dbg(bus->dev, 772 "Slave already registered, reusing dev_num:%d\n", 773 slave->dev_num); 774 775 /* Clear the slave->dev_num to transfer message on device 0 */ 776 dev_num = slave->dev_num; 777 slave->dev_num = 0; 778 779 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num); 780 if (ret < 0) { 781 dev_err(bus->dev, "Program device_num %d failed: %d\n", 782 dev_num, ret); 783 return ret; 784 } 785 786 /* After xfer of msg, restore dev_num */ 787 slave->dev_num = slave->dev_num_sticky; 788 789 if (bus->ops && bus->ops->new_peripheral_assigned) 790 bus->ops->new_peripheral_assigned(bus, slave, dev_num); 791 792 return 0; 793 } 794 795 void sdw_extract_slave_id(struct sdw_bus *bus, 796 u64 addr, struct sdw_slave_id *id) 797 { 798 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr); 799 800 id->sdw_version = SDW_VERSION(addr); 801 id->unique_id = SDW_UNIQUE_ID(addr); 802 id->mfg_id = SDW_MFG_ID(addr); 803 id->part_id = SDW_PART_ID(addr); 804 id->class_id = SDW_CLASS_ID(addr); 805 806 dev_dbg(bus->dev, 807 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n", 808 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version); 809 } 810 EXPORT_SYMBOL(sdw_extract_slave_id); 811 812 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed) 813 { 814 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0}; 815 struct sdw_slave *slave, *_s; 816 struct sdw_slave_id id; 817 struct sdw_msg msg; 818 bool found; 819 int count = 0, ret; 820 u64 addr; 821 822 *programmed = false; 823 824 /* No Slave, so use raw xfer api */ 825 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0, 826 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf); 827 if (ret < 0) 828 return ret; 829 830 do { 831 ret = sdw_transfer(bus, &msg); 832 if (ret == -ENODATA) { /* end of device id reads */ 833 dev_dbg(bus->dev, "No more devices to enumerate\n"); 834 ret = 0; 835 break; 836 } 837 if (ret < 0) { 838 dev_err(bus->dev, "DEVID read fail:%d\n", ret); 839 break; 840 } 841 842 /* 843 * Construct the addr and extract. Cast the higher shift 844 * bits to avoid truncation due to size limit. 845 */ 846 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) | 847 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) | 848 ((u64)buf[0] << 40); 849 850 sdw_extract_slave_id(bus, addr, &id); 851 852 found = false; 853 /* Now compare with entries */ 854 list_for_each_entry_safe(slave, _s, &bus->slaves, node) { 855 if (sdw_compare_devid(slave, id) == 0) { 856 found = true; 857 858 /* 859 * To prevent skipping state-machine stages don't 860 * program a device until we've seen it UNATTACH. 861 * Must return here because no other device on #0 862 * can be detected until this one has been 863 * assigned a device ID. 864 */ 865 if (slave->status != SDW_SLAVE_UNATTACHED) 866 return 0; 867 868 /* 869 * Assign a new dev_num to this Slave and 870 * not mark it present. It will be marked 871 * present after it reports ATTACHED on new 872 * dev_num 873 */ 874 ret = sdw_assign_device_num(slave); 875 if (ret < 0) { 876 dev_err(bus->dev, 877 "Assign dev_num failed:%d\n", 878 ret); 879 return ret; 880 } 881 882 *programmed = true; 883 884 break; 885 } 886 } 887 888 if (!found) { 889 /* TODO: Park this device in Group 13 */ 890 891 /* 892 * add Slave device even if there is no platform 893 * firmware description. There will be no driver probe 894 * but the user/integration will be able to see the 895 * device, enumeration status and device number in sysfs 896 */ 897 sdw_slave_add(bus, &id, NULL); 898 899 dev_err(bus->dev, "Slave Entry not found\n"); 900 } 901 902 count++; 903 904 /* 905 * Check till error out or retry (count) exhausts. 906 * Device can drop off and rejoin during enumeration 907 * so count till twice the bound. 908 */ 909 910 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2)); 911 912 return ret; 913 } 914 915 static void sdw_modify_slave_status(struct sdw_slave *slave, 916 enum sdw_slave_status status) 917 { 918 struct sdw_bus *bus = slave->bus; 919 920 mutex_lock(&bus->bus_lock); 921 922 dev_vdbg(bus->dev, 923 "changing status slave %d status %d new status %d\n", 924 slave->dev_num, slave->status, status); 925 926 if (status == SDW_SLAVE_UNATTACHED) { 927 dev_dbg(&slave->dev, 928 "initializing enumeration and init completion for Slave %d\n", 929 slave->dev_num); 930 931 reinit_completion(&slave->enumeration_complete); 932 reinit_completion(&slave->initialization_complete); 933 934 } else if ((status == SDW_SLAVE_ATTACHED) && 935 (slave->status == SDW_SLAVE_UNATTACHED)) { 936 dev_dbg(&slave->dev, 937 "signaling enumeration completion for Slave %d\n", 938 slave->dev_num); 939 940 complete_all(&slave->enumeration_complete); 941 } 942 slave->status = status; 943 mutex_unlock(&bus->bus_lock); 944 } 945 946 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave, 947 enum sdw_clk_stop_mode mode, 948 enum sdw_clk_stop_type type) 949 { 950 int ret = 0; 951 952 mutex_lock(&slave->sdw_dev_lock); 953 954 if (slave->probed) { 955 struct device *dev = &slave->dev; 956 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 957 958 if (drv->ops && drv->ops->clk_stop) 959 ret = drv->ops->clk_stop(slave, mode, type); 960 } 961 962 mutex_unlock(&slave->sdw_dev_lock); 963 964 return ret; 965 } 966 967 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave, 968 enum sdw_clk_stop_mode mode, 969 bool prepare) 970 { 971 bool wake_en; 972 u32 val = 0; 973 int ret; 974 975 wake_en = slave->prop.wake_capable; 976 977 if (prepare) { 978 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP; 979 980 if (mode == SDW_CLK_STOP_MODE1) 981 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1; 982 983 if (wake_en) 984 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN; 985 } else { 986 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL); 987 if (ret < 0) { 988 if (ret != -ENODATA) 989 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret); 990 return ret; 991 } 992 val = ret; 993 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP); 994 } 995 996 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val); 997 998 if (ret < 0 && ret != -ENODATA) 999 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret); 1000 1001 return ret; 1002 } 1003 1004 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num) 1005 { 1006 int retry = bus->clk_stop_timeout; 1007 int val; 1008 1009 do { 1010 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT); 1011 if (val < 0) { 1012 if (val != -ENODATA) 1013 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val); 1014 return val; 1015 } 1016 val &= SDW_SCP_STAT_CLK_STP_NF; 1017 if (!val) { 1018 dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d\n", 1019 dev_num); 1020 return 0; 1021 } 1022 1023 usleep_range(1000, 1500); 1024 retry--; 1025 } while (retry); 1026 1027 dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d\n", 1028 dev_num); 1029 1030 return -ETIMEDOUT; 1031 } 1032 1033 /** 1034 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop 1035 * 1036 * @bus: SDW bus instance 1037 * 1038 * Query Slave for clock stop mode and prepare for that mode. 1039 */ 1040 int sdw_bus_prep_clk_stop(struct sdw_bus *bus) 1041 { 1042 bool simple_clk_stop = true; 1043 struct sdw_slave *slave; 1044 bool is_slave = false; 1045 int ret = 0; 1046 1047 /* 1048 * In order to save on transition time, prepare 1049 * each Slave and then wait for all Slave(s) to be 1050 * prepared for clock stop. 1051 * If one of the Slave devices has lost sync and 1052 * replies with Command Ignored/-ENODATA, we continue 1053 * the loop 1054 */ 1055 list_for_each_entry(slave, &bus->slaves, node) { 1056 if (!slave->dev_num) 1057 continue; 1058 1059 if (slave->status != SDW_SLAVE_ATTACHED && 1060 slave->status != SDW_SLAVE_ALERT) 1061 continue; 1062 1063 /* Identify if Slave(s) are available on Bus */ 1064 is_slave = true; 1065 1066 ret = sdw_slave_clk_stop_callback(slave, 1067 SDW_CLK_STOP_MODE0, 1068 SDW_CLK_PRE_PREPARE); 1069 if (ret < 0 && ret != -ENODATA) { 1070 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret); 1071 return ret; 1072 } 1073 1074 /* Only prepare a Slave device if needed */ 1075 if (!slave->prop.simple_clk_stop_capable) { 1076 simple_clk_stop = false; 1077 1078 ret = sdw_slave_clk_stop_prepare(slave, 1079 SDW_CLK_STOP_MODE0, 1080 true); 1081 if (ret < 0 && ret != -ENODATA) { 1082 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret); 1083 return ret; 1084 } 1085 } 1086 } 1087 1088 /* Skip remaining clock stop preparation if no Slave is attached */ 1089 if (!is_slave) 1090 return 0; 1091 1092 /* 1093 * Don't wait for all Slaves to be ready if they follow the simple 1094 * state machine 1095 */ 1096 if (!simple_clk_stop) { 1097 ret = sdw_bus_wait_for_clk_prep_deprep(bus, 1098 SDW_BROADCAST_DEV_NUM); 1099 /* 1100 * if there are no Slave devices present and the reply is 1101 * Command_Ignored/-ENODATA, we don't need to continue with the 1102 * flow and can just return here. The error code is not modified 1103 * and its handling left as an exercise for the caller. 1104 */ 1105 if (ret < 0) 1106 return ret; 1107 } 1108 1109 /* Inform slaves that prep is done */ 1110 list_for_each_entry(slave, &bus->slaves, node) { 1111 if (!slave->dev_num) 1112 continue; 1113 1114 if (slave->status != SDW_SLAVE_ATTACHED && 1115 slave->status != SDW_SLAVE_ALERT) 1116 continue; 1117 1118 ret = sdw_slave_clk_stop_callback(slave, 1119 SDW_CLK_STOP_MODE0, 1120 SDW_CLK_POST_PREPARE); 1121 1122 if (ret < 0 && ret != -ENODATA) { 1123 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret); 1124 return ret; 1125 } 1126 } 1127 1128 return 0; 1129 } 1130 EXPORT_SYMBOL(sdw_bus_prep_clk_stop); 1131 1132 /** 1133 * sdw_bus_clk_stop: stop bus clock 1134 * 1135 * @bus: SDW bus instance 1136 * 1137 * After preparing the Slaves for clock stop, stop the clock by broadcasting 1138 * write to SCP_CTRL register. 1139 */ 1140 int sdw_bus_clk_stop(struct sdw_bus *bus) 1141 { 1142 int ret; 1143 1144 /* 1145 * broadcast clock stop now, attached Slaves will ACK this, 1146 * unattached will ignore 1147 */ 1148 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM, 1149 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW); 1150 if (ret < 0) { 1151 if (ret != -ENODATA) 1152 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret); 1153 return ret; 1154 } 1155 1156 return 0; 1157 } 1158 EXPORT_SYMBOL(sdw_bus_clk_stop); 1159 1160 /** 1161 * sdw_bus_exit_clk_stop: Exit clock stop mode 1162 * 1163 * @bus: SDW bus instance 1164 * 1165 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves 1166 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate 1167 * back. 1168 */ 1169 int sdw_bus_exit_clk_stop(struct sdw_bus *bus) 1170 { 1171 bool simple_clk_stop = true; 1172 struct sdw_slave *slave; 1173 bool is_slave = false; 1174 int ret; 1175 1176 /* 1177 * In order to save on transition time, de-prepare 1178 * each Slave and then wait for all Slave(s) to be 1179 * de-prepared after clock resume. 1180 */ 1181 list_for_each_entry(slave, &bus->slaves, node) { 1182 if (!slave->dev_num) 1183 continue; 1184 1185 if (slave->status != SDW_SLAVE_ATTACHED && 1186 slave->status != SDW_SLAVE_ALERT) 1187 continue; 1188 1189 /* Identify if Slave(s) are available on Bus */ 1190 is_slave = true; 1191 1192 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1193 SDW_CLK_PRE_DEPREPARE); 1194 if (ret < 0) 1195 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret); 1196 1197 /* Only de-prepare a Slave device if needed */ 1198 if (!slave->prop.simple_clk_stop_capable) { 1199 simple_clk_stop = false; 1200 1201 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0, 1202 false); 1203 1204 if (ret < 0) 1205 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret); 1206 } 1207 } 1208 1209 /* Skip remaining clock stop de-preparation if no Slave is attached */ 1210 if (!is_slave) 1211 return 0; 1212 1213 /* 1214 * Don't wait for all Slaves to be ready if they follow the simple 1215 * state machine 1216 */ 1217 if (!simple_clk_stop) { 1218 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM); 1219 if (ret < 0) 1220 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret); 1221 } 1222 1223 list_for_each_entry(slave, &bus->slaves, node) { 1224 if (!slave->dev_num) 1225 continue; 1226 1227 if (slave->status != SDW_SLAVE_ATTACHED && 1228 slave->status != SDW_SLAVE_ALERT) 1229 continue; 1230 1231 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1232 SDW_CLK_POST_DEPREPARE); 1233 if (ret < 0) 1234 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret); 1235 } 1236 1237 return 0; 1238 } 1239 EXPORT_SYMBOL(sdw_bus_exit_clk_stop); 1240 1241 int sdw_configure_dpn_intr(struct sdw_slave *slave, 1242 int port, bool enable, int mask) 1243 { 1244 u32 addr; 1245 int ret; 1246 u8 val = 0; 1247 1248 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) { 1249 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n", 1250 enable ? "on" : "off"); 1251 mask |= SDW_DPN_INT_TEST_FAIL; 1252 } 1253 1254 addr = SDW_DPN_INTMASK(port); 1255 1256 /* Set/Clear port ready interrupt mask */ 1257 if (enable) { 1258 val |= mask; 1259 val |= SDW_DPN_INT_PORT_READY; 1260 } else { 1261 val &= ~(mask); 1262 val &= ~SDW_DPN_INT_PORT_READY; 1263 } 1264 1265 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); 1266 if (ret < 0) 1267 dev_err(&slave->dev, 1268 "SDW_DPN_INTMASK write failed:%d\n", val); 1269 1270 return ret; 1271 } 1272 1273 static int sdw_slave_set_frequency(struct sdw_slave *slave) 1274 { 1275 u32 mclk_freq = slave->bus->prop.mclk_freq; 1276 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1; 1277 unsigned int scale; 1278 u8 scale_index; 1279 u8 base; 1280 int ret; 1281 1282 /* 1283 * frequency base and scale registers are required for SDCA 1284 * devices. They may also be used for 1.2+/non-SDCA devices. 1285 * Driver can set the property, we will need a DisCo property 1286 * to discover this case from platform firmware. 1287 */ 1288 if (!slave->id.class_id && !slave->prop.clock_reg_supported) 1289 return 0; 1290 1291 if (!mclk_freq) { 1292 dev_err(&slave->dev, 1293 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n"); 1294 return -EINVAL; 1295 } 1296 1297 /* 1298 * map base frequency using Table 89 of SoundWire 1.2 spec. 1299 * The order of the tests just follows the specification, this 1300 * is not a selection between possible values or a search for 1301 * the best value but just a mapping. Only one case per platform 1302 * is relevant. 1303 * Some BIOS have inconsistent values for mclk_freq but a 1304 * correct root so we force the mclk_freq to avoid variations. 1305 */ 1306 if (!(19200000 % mclk_freq)) { 1307 mclk_freq = 19200000; 1308 base = SDW_SCP_BASE_CLOCK_19200000_HZ; 1309 } else if (!(24000000 % mclk_freq)) { 1310 mclk_freq = 24000000; 1311 base = SDW_SCP_BASE_CLOCK_24000000_HZ; 1312 } else if (!(24576000 % mclk_freq)) { 1313 mclk_freq = 24576000; 1314 base = SDW_SCP_BASE_CLOCK_24576000_HZ; 1315 } else if (!(22579200 % mclk_freq)) { 1316 mclk_freq = 22579200; 1317 base = SDW_SCP_BASE_CLOCK_22579200_HZ; 1318 } else if (!(32000000 % mclk_freq)) { 1319 mclk_freq = 32000000; 1320 base = SDW_SCP_BASE_CLOCK_32000000_HZ; 1321 } else { 1322 dev_err(&slave->dev, 1323 "Unsupported clock base, mclk %d\n", 1324 mclk_freq); 1325 return -EINVAL; 1326 } 1327 1328 if (mclk_freq % curr_freq) { 1329 dev_err(&slave->dev, 1330 "mclk %d is not multiple of bus curr_freq %d\n", 1331 mclk_freq, curr_freq); 1332 return -EINVAL; 1333 } 1334 1335 scale = mclk_freq / curr_freq; 1336 1337 /* 1338 * map scale to Table 90 of SoundWire 1.2 spec - and check 1339 * that the scale is a power of two and maximum 64 1340 */ 1341 scale_index = ilog2(scale); 1342 1343 if (BIT(scale_index) != scale || scale_index > 6) { 1344 dev_err(&slave->dev, 1345 "No match found for scale %d, bus mclk %d curr_freq %d\n", 1346 scale, mclk_freq, curr_freq); 1347 return -EINVAL; 1348 } 1349 scale_index++; 1350 1351 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base); 1352 if (ret < 0) { 1353 dev_err(&slave->dev, 1354 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret); 1355 return ret; 1356 } 1357 1358 /* initialize scale for both banks */ 1359 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index); 1360 if (ret < 0) { 1361 dev_err(&slave->dev, 1362 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret); 1363 return ret; 1364 } 1365 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index); 1366 if (ret < 0) 1367 dev_err(&slave->dev, 1368 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret); 1369 1370 dev_dbg(&slave->dev, 1371 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n", 1372 base, scale_index, mclk_freq, curr_freq); 1373 1374 return ret; 1375 } 1376 1377 static int sdw_initialize_slave(struct sdw_slave *slave) 1378 { 1379 struct sdw_slave_prop *prop = &slave->prop; 1380 int status; 1381 int ret; 1382 u8 val; 1383 1384 ret = sdw_slave_set_frequency(slave); 1385 if (ret < 0) 1386 return ret; 1387 1388 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) { 1389 /* Clear bus clash interrupt before enabling interrupt mask */ 1390 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1391 if (status < 0) { 1392 dev_err(&slave->dev, 1393 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status); 1394 return status; 1395 } 1396 if (status & SDW_SCP_INT1_BUS_CLASH) { 1397 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n"); 1398 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH); 1399 if (ret < 0) { 1400 dev_err(&slave->dev, 1401 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret); 1402 return ret; 1403 } 1404 } 1405 } 1406 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) && 1407 !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) { 1408 /* Clear parity interrupt before enabling interrupt mask */ 1409 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1410 if (status < 0) { 1411 dev_err(&slave->dev, 1412 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status); 1413 return status; 1414 } 1415 if (status & SDW_SCP_INT1_PARITY) { 1416 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n"); 1417 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY); 1418 if (ret < 0) { 1419 dev_err(&slave->dev, 1420 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret); 1421 return ret; 1422 } 1423 } 1424 } 1425 1426 /* 1427 * Set SCP_INT1_MASK register, typically bus clash and 1428 * implementation-defined interrupt mask. The Parity detection 1429 * may not always be correct on startup so its use is 1430 * device-dependent, it might e.g. only be enabled in 1431 * steady-state after a couple of frames. 1432 */ 1433 val = slave->prop.scp_int1_mask; 1434 1435 /* Enable SCP interrupts */ 1436 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val); 1437 if (ret < 0) { 1438 dev_err(&slave->dev, 1439 "SDW_SCP_INTMASK1 write failed:%d\n", ret); 1440 return ret; 1441 } 1442 1443 /* No need to continue if DP0 is not present */ 1444 if (!slave->prop.dp0_prop) 1445 return 0; 1446 1447 /* Enable DP0 interrupts */ 1448 val = prop->dp0_prop->imp_def_interrupts; 1449 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE; 1450 1451 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val); 1452 if (ret < 0) 1453 dev_err(&slave->dev, 1454 "SDW_DP0_INTMASK read failed:%d\n", ret); 1455 return ret; 1456 } 1457 1458 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) 1459 { 1460 u8 clear, impl_int_mask; 1461 int status, status2, ret, count = 0; 1462 1463 status = sdw_read_no_pm(slave, SDW_DP0_INT); 1464 if (status < 0) { 1465 dev_err(&slave->dev, 1466 "SDW_DP0_INT read failed:%d\n", status); 1467 return status; 1468 } 1469 1470 do { 1471 clear = status & ~SDW_DP0_INTERRUPTS; 1472 1473 if (status & SDW_DP0_INT_TEST_FAIL) { 1474 dev_err(&slave->dev, "Test fail for port 0\n"); 1475 clear |= SDW_DP0_INT_TEST_FAIL; 1476 } 1477 1478 /* 1479 * Assumption: PORT_READY interrupt will be received only for 1480 * ports implementing Channel Prepare state machine (CP_SM) 1481 */ 1482 1483 if (status & SDW_DP0_INT_PORT_READY) { 1484 complete(&slave->port_ready[0]); 1485 clear |= SDW_DP0_INT_PORT_READY; 1486 } 1487 1488 if (status & SDW_DP0_INT_BRA_FAILURE) { 1489 dev_err(&slave->dev, "BRA failed\n"); 1490 clear |= SDW_DP0_INT_BRA_FAILURE; 1491 } 1492 1493 impl_int_mask = SDW_DP0_INT_IMPDEF1 | 1494 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3; 1495 1496 if (status & impl_int_mask) { 1497 clear |= impl_int_mask; 1498 *slave_status = clear; 1499 } 1500 1501 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */ 1502 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear); 1503 if (ret < 0) { 1504 dev_err(&slave->dev, 1505 "SDW_DP0_INT write failed:%d\n", ret); 1506 return ret; 1507 } 1508 1509 /* Read DP0 interrupt again */ 1510 status2 = sdw_read_no_pm(slave, SDW_DP0_INT); 1511 if (status2 < 0) { 1512 dev_err(&slave->dev, 1513 "SDW_DP0_INT read failed:%d\n", status2); 1514 return status2; 1515 } 1516 /* filter to limit loop to interrupts identified in the first status read */ 1517 status &= status2; 1518 1519 count++; 1520 1521 /* we can get alerts while processing so keep retrying */ 1522 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1523 1524 if (count == SDW_READ_INTR_CLEAR_RETRY) 1525 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n"); 1526 1527 return ret; 1528 } 1529 1530 static int sdw_handle_port_interrupt(struct sdw_slave *slave, 1531 int port, u8 *slave_status) 1532 { 1533 u8 clear, impl_int_mask; 1534 int status, status2, ret, count = 0; 1535 u32 addr; 1536 1537 if (port == 0) 1538 return sdw_handle_dp0_interrupt(slave, slave_status); 1539 1540 addr = SDW_DPN_INT(port); 1541 status = sdw_read_no_pm(slave, addr); 1542 if (status < 0) { 1543 dev_err(&slave->dev, 1544 "SDW_DPN_INT read failed:%d\n", status); 1545 1546 return status; 1547 } 1548 1549 do { 1550 clear = status & ~SDW_DPN_INTERRUPTS; 1551 1552 if (status & SDW_DPN_INT_TEST_FAIL) { 1553 dev_err(&slave->dev, "Test fail for port:%d\n", port); 1554 clear |= SDW_DPN_INT_TEST_FAIL; 1555 } 1556 1557 /* 1558 * Assumption: PORT_READY interrupt will be received only 1559 * for ports implementing CP_SM. 1560 */ 1561 if (status & SDW_DPN_INT_PORT_READY) { 1562 complete(&slave->port_ready[port]); 1563 clear |= SDW_DPN_INT_PORT_READY; 1564 } 1565 1566 impl_int_mask = SDW_DPN_INT_IMPDEF1 | 1567 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3; 1568 1569 if (status & impl_int_mask) { 1570 clear |= impl_int_mask; 1571 *slave_status = clear; 1572 } 1573 1574 /* clear the interrupt but don't touch reserved fields */ 1575 ret = sdw_write_no_pm(slave, addr, clear); 1576 if (ret < 0) { 1577 dev_err(&slave->dev, 1578 "SDW_DPN_INT write failed:%d\n", ret); 1579 return ret; 1580 } 1581 1582 /* Read DPN interrupt again */ 1583 status2 = sdw_read_no_pm(slave, addr); 1584 if (status2 < 0) { 1585 dev_err(&slave->dev, 1586 "SDW_DPN_INT read failed:%d\n", status2); 1587 return status2; 1588 } 1589 /* filter to limit loop to interrupts identified in the first status read */ 1590 status &= status2; 1591 1592 count++; 1593 1594 /* we can get alerts while processing so keep retrying */ 1595 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1596 1597 if (count == SDW_READ_INTR_CLEAR_RETRY) 1598 dev_warn(&slave->dev, "Reached MAX_RETRY on port read"); 1599 1600 return ret; 1601 } 1602 1603 static int sdw_handle_slave_alerts(struct sdw_slave *slave) 1604 { 1605 struct sdw_slave_intr_status slave_intr; 1606 u8 clear = 0, bit, port_status[15] = {0}; 1607 int port_num, stat, ret, count = 0; 1608 unsigned long port; 1609 bool slave_notify; 1610 u8 sdca_cascade = 0; 1611 u8 buf, buf2[2]; 1612 bool parity_check; 1613 bool parity_quirk; 1614 1615 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); 1616 1617 ret = pm_runtime_get_sync(&slave->dev); 1618 if (ret < 0 && ret != -EACCES) { 1619 dev_err(&slave->dev, "Failed to resume device: %d\n", ret); 1620 pm_runtime_put_noidle(&slave->dev); 1621 return ret; 1622 } 1623 1624 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */ 1625 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1626 if (ret < 0) { 1627 dev_err(&slave->dev, 1628 "SDW_SCP_INT1 read failed:%d\n", ret); 1629 goto io_err; 1630 } 1631 buf = ret; 1632 1633 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1634 if (ret < 0) { 1635 dev_err(&slave->dev, 1636 "SDW_SCP_INT2/3 read failed:%d\n", ret); 1637 goto io_err; 1638 } 1639 1640 if (slave->id.class_id) { 1641 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1642 if (ret < 0) { 1643 dev_err(&slave->dev, 1644 "SDW_DP0_INT read failed:%d\n", ret); 1645 goto io_err; 1646 } 1647 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1648 } 1649 1650 do { 1651 slave_notify = false; 1652 1653 /* 1654 * Check parity, bus clash and Slave (impl defined) 1655 * interrupt 1656 */ 1657 if (buf & SDW_SCP_INT1_PARITY) { 1658 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY; 1659 parity_quirk = !slave->first_interrupt_done && 1660 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY); 1661 1662 if (parity_check && !parity_quirk) 1663 dev_err(&slave->dev, "Parity error detected\n"); 1664 clear |= SDW_SCP_INT1_PARITY; 1665 } 1666 1667 if (buf & SDW_SCP_INT1_BUS_CLASH) { 1668 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH) 1669 dev_err(&slave->dev, "Bus clash detected\n"); 1670 clear |= SDW_SCP_INT1_BUS_CLASH; 1671 } 1672 1673 /* 1674 * When bus clash or parity errors are detected, such errors 1675 * are unlikely to be recoverable errors. 1676 * TODO: In such scenario, reset bus. Make this configurable 1677 * via sysfs property with bus reset being the default. 1678 */ 1679 1680 if (buf & SDW_SCP_INT1_IMPL_DEF) { 1681 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) { 1682 dev_dbg(&slave->dev, "Slave impl defined interrupt\n"); 1683 slave_notify = true; 1684 } 1685 clear |= SDW_SCP_INT1_IMPL_DEF; 1686 } 1687 1688 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */ 1689 if (sdca_cascade) 1690 slave_notify = true; 1691 1692 /* Check port 0 - 3 interrupts */ 1693 port = buf & SDW_SCP_INT1_PORT0_3; 1694 1695 /* To get port number corresponding to bits, shift it */ 1696 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port); 1697 for_each_set_bit(bit, &port, 8) { 1698 sdw_handle_port_interrupt(slave, bit, 1699 &port_status[bit]); 1700 } 1701 1702 /* Check if cascade 2 interrupt is present */ 1703 if (buf & SDW_SCP_INT1_SCP2_CASCADE) { 1704 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10; 1705 for_each_set_bit(bit, &port, 8) { 1706 /* scp2 ports start from 4 */ 1707 port_num = bit + 4; 1708 sdw_handle_port_interrupt(slave, 1709 port_num, 1710 &port_status[port_num]); 1711 } 1712 } 1713 1714 /* now check last cascade */ 1715 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) { 1716 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14; 1717 for_each_set_bit(bit, &port, 8) { 1718 /* scp3 ports start from 11 */ 1719 port_num = bit + 11; 1720 sdw_handle_port_interrupt(slave, 1721 port_num, 1722 &port_status[port_num]); 1723 } 1724 } 1725 1726 /* Update the Slave driver */ 1727 if (slave_notify) { 1728 mutex_lock(&slave->sdw_dev_lock); 1729 1730 if (slave->probed) { 1731 struct device *dev = &slave->dev; 1732 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1733 1734 if (slave->prop.use_domain_irq && slave->irq) 1735 handle_nested_irq(slave->irq); 1736 1737 if (drv->ops && drv->ops->interrupt_callback) { 1738 slave_intr.sdca_cascade = sdca_cascade; 1739 slave_intr.control_port = clear; 1740 memcpy(slave_intr.port, &port_status, 1741 sizeof(slave_intr.port)); 1742 1743 drv->ops->interrupt_callback(slave, &slave_intr); 1744 } 1745 } 1746 1747 mutex_unlock(&slave->sdw_dev_lock); 1748 } 1749 1750 /* Ack interrupt */ 1751 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear); 1752 if (ret < 0) { 1753 dev_err(&slave->dev, 1754 "SDW_SCP_INT1 write failed:%d\n", ret); 1755 goto io_err; 1756 } 1757 1758 /* at this point all initial interrupt sources were handled */ 1759 slave->first_interrupt_done = true; 1760 1761 /* 1762 * Read status again to ensure no new interrupts arrived 1763 * while servicing interrupts. 1764 */ 1765 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1766 if (ret < 0) { 1767 dev_err(&slave->dev, 1768 "SDW_SCP_INT1 recheck read failed:%d\n", ret); 1769 goto io_err; 1770 } 1771 buf = ret; 1772 1773 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1774 if (ret < 0) { 1775 dev_err(&slave->dev, 1776 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret); 1777 goto io_err; 1778 } 1779 1780 if (slave->id.class_id) { 1781 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1782 if (ret < 0) { 1783 dev_err(&slave->dev, 1784 "SDW_DP0_INT recheck read failed:%d\n", ret); 1785 goto io_err; 1786 } 1787 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1788 } 1789 1790 /* 1791 * Make sure no interrupts are pending 1792 */ 1793 stat = buf || buf2[0] || buf2[1] || sdca_cascade; 1794 1795 /* 1796 * Exit loop if Slave is continuously in ALERT state even 1797 * after servicing the interrupt multiple times. 1798 */ 1799 count++; 1800 1801 /* we can get alerts while processing so keep retrying */ 1802 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 1803 1804 if (count == SDW_READ_INTR_CLEAR_RETRY) 1805 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n"); 1806 1807 io_err: 1808 pm_runtime_mark_last_busy(&slave->dev); 1809 pm_runtime_put_autosuspend(&slave->dev); 1810 1811 return ret; 1812 } 1813 1814 static int sdw_update_slave_status(struct sdw_slave *slave, 1815 enum sdw_slave_status status) 1816 { 1817 int ret = 0; 1818 1819 mutex_lock(&slave->sdw_dev_lock); 1820 1821 if (slave->probed) { 1822 struct device *dev = &slave->dev; 1823 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1824 1825 if (drv->ops && drv->ops->update_status) 1826 ret = drv->ops->update_status(slave, status); 1827 } 1828 1829 mutex_unlock(&slave->sdw_dev_lock); 1830 1831 return ret; 1832 } 1833 1834 /** 1835 * sdw_handle_slave_status() - Handle Slave status 1836 * @bus: SDW bus instance 1837 * @status: Status for all Slave(s) 1838 */ 1839 int sdw_handle_slave_status(struct sdw_bus *bus, 1840 enum sdw_slave_status status[]) 1841 { 1842 enum sdw_slave_status prev_status; 1843 struct sdw_slave *slave; 1844 bool attached_initializing, id_programmed; 1845 int i, ret = 0; 1846 1847 /* first check if any Slaves fell off the bus */ 1848 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1849 mutex_lock(&bus->bus_lock); 1850 if (test_bit(i, bus->assigned) == false) { 1851 mutex_unlock(&bus->bus_lock); 1852 continue; 1853 } 1854 mutex_unlock(&bus->bus_lock); 1855 1856 slave = sdw_get_slave(bus, i); 1857 if (!slave) 1858 continue; 1859 1860 if (status[i] == SDW_SLAVE_UNATTACHED && 1861 slave->status != SDW_SLAVE_UNATTACHED) { 1862 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n", 1863 i, slave->status); 1864 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1865 1866 /* Ensure driver knows that peripheral unattached */ 1867 ret = sdw_update_slave_status(slave, status[i]); 1868 if (ret < 0) 1869 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret); 1870 } 1871 } 1872 1873 if (status[0] == SDW_SLAVE_ATTACHED) { 1874 dev_dbg(bus->dev, "Slave attached, programming device number\n"); 1875 1876 /* 1877 * Programming a device number will have side effects, 1878 * so we deal with other devices at a later time. 1879 * This relies on those devices reporting ATTACHED, which will 1880 * trigger another call to this function. This will only 1881 * happen if at least one device ID was programmed. 1882 * Error returns from sdw_program_device_num() are currently 1883 * ignored because there's no useful recovery that can be done. 1884 * Returning the error here could result in the current status 1885 * of other devices not being handled, because if no device IDs 1886 * were programmed there's nothing to guarantee a status change 1887 * to trigger another call to this function. 1888 */ 1889 sdw_program_device_num(bus, &id_programmed); 1890 if (id_programmed) 1891 return 0; 1892 } 1893 1894 /* Continue to check other slave statuses */ 1895 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1896 mutex_lock(&bus->bus_lock); 1897 if (test_bit(i, bus->assigned) == false) { 1898 mutex_unlock(&bus->bus_lock); 1899 continue; 1900 } 1901 mutex_unlock(&bus->bus_lock); 1902 1903 slave = sdw_get_slave(bus, i); 1904 if (!slave) 1905 continue; 1906 1907 attached_initializing = false; 1908 1909 switch (status[i]) { 1910 case SDW_SLAVE_UNATTACHED: 1911 if (slave->status == SDW_SLAVE_UNATTACHED) 1912 break; 1913 1914 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n", 1915 i, slave->status); 1916 1917 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1918 break; 1919 1920 case SDW_SLAVE_ALERT: 1921 ret = sdw_handle_slave_alerts(slave); 1922 if (ret < 0) 1923 dev_err(&slave->dev, 1924 "Slave %d alert handling failed: %d\n", 1925 i, ret); 1926 break; 1927 1928 case SDW_SLAVE_ATTACHED: 1929 if (slave->status == SDW_SLAVE_ATTACHED) 1930 break; 1931 1932 prev_status = slave->status; 1933 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED); 1934 1935 if (prev_status == SDW_SLAVE_ALERT) 1936 break; 1937 1938 attached_initializing = true; 1939 1940 ret = sdw_initialize_slave(slave); 1941 if (ret < 0) 1942 dev_err(&slave->dev, 1943 "Slave %d initialization failed: %d\n", 1944 i, ret); 1945 1946 break; 1947 1948 default: 1949 dev_err(&slave->dev, "Invalid slave %d status:%d\n", 1950 i, status[i]); 1951 break; 1952 } 1953 1954 ret = sdw_update_slave_status(slave, status[i]); 1955 if (ret < 0) 1956 dev_err(&slave->dev, 1957 "Update Slave status failed:%d\n", ret); 1958 if (attached_initializing) { 1959 dev_dbg(&slave->dev, 1960 "signaling initialization completion for Slave %d\n", 1961 slave->dev_num); 1962 1963 complete_all(&slave->initialization_complete); 1964 1965 /* 1966 * If the manager became pm_runtime active, the peripherals will be 1967 * restarted and attach, but their pm_runtime status may remain 1968 * suspended. If the 'update_slave_status' callback initiates 1969 * any sort of deferred processing, this processing would not be 1970 * cancelled on pm_runtime suspend. 1971 * To avoid such zombie states, we queue a request to resume. 1972 * This would be a no-op in case the peripheral was being resumed 1973 * by e.g. the ALSA/ASoC framework. 1974 */ 1975 pm_request_resume(&slave->dev); 1976 } 1977 } 1978 1979 return ret; 1980 } 1981 EXPORT_SYMBOL(sdw_handle_slave_status); 1982 1983 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request) 1984 { 1985 struct sdw_slave *slave; 1986 int i; 1987 1988 /* Check all non-zero devices */ 1989 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1990 mutex_lock(&bus->bus_lock); 1991 if (test_bit(i, bus->assigned) == false) { 1992 mutex_unlock(&bus->bus_lock); 1993 continue; 1994 } 1995 mutex_unlock(&bus->bus_lock); 1996 1997 slave = sdw_get_slave(bus, i); 1998 if (!slave) 1999 continue; 2000 2001 if (slave->status != SDW_SLAVE_UNATTACHED) { 2002 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 2003 slave->first_interrupt_done = false; 2004 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED); 2005 } 2006 2007 /* keep track of request, used in pm_runtime resume */ 2008 slave->unattach_request = request; 2009 } 2010 } 2011 EXPORT_SYMBOL(sdw_clear_slave_status); 2012