1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 #include <linux/acpi.h> 5 #include <linux/delay.h> 6 #include <linux/mod_devicetable.h> 7 #include <linux/pm_runtime.h> 8 #include <linux/soundwire/sdw_registers.h> 9 #include <linux/soundwire/sdw.h> 10 #include <linux/soundwire/sdw_type.h> 11 #include "bus.h" 12 #include "irq.h" 13 #include "sysfs_local.h" 14 15 static DEFINE_IDA(sdw_bus_ida); 16 17 static int sdw_get_id(struct sdw_bus *bus) 18 { 19 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL); 20 21 if (rc < 0) 22 return rc; 23 24 bus->id = rc; 25 26 if (bus->controller_id == -1) 27 bus->controller_id = rc; 28 29 return 0; 30 } 31 32 /** 33 * sdw_bus_master_add() - add a bus Master instance 34 * @bus: bus instance 35 * @parent: parent device 36 * @fwnode: firmware node handle 37 * 38 * Initializes the bus instance, read properties and create child 39 * devices. 40 */ 41 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, 42 struct fwnode_handle *fwnode) 43 { 44 struct sdw_master_prop *prop = NULL; 45 int ret; 46 47 if (!parent) { 48 pr_err("SoundWire parent device is not set\n"); 49 return -ENODEV; 50 } 51 52 ret = sdw_get_id(bus); 53 if (ret < 0) { 54 dev_err(parent, "Failed to get bus id\n"); 55 return ret; 56 } 57 58 ret = sdw_master_device_add(bus, parent, fwnode); 59 if (ret < 0) { 60 dev_err(parent, "Failed to add master device at link %d\n", 61 bus->link_id); 62 return ret; 63 } 64 65 if (!bus->ops) { 66 dev_err(bus->dev, "SoundWire Bus ops are not set\n"); 67 return -EINVAL; 68 } 69 70 if (!bus->compute_params) { 71 dev_err(bus->dev, 72 "Bandwidth allocation not configured, compute_params no set\n"); 73 return -EINVAL; 74 } 75 76 /* 77 * Give each bus_lock and msg_lock a unique key so that lockdep won't 78 * trigger a deadlock warning when the locks of several buses are 79 * grabbed during configuration of a multi-bus stream. 80 */ 81 lockdep_register_key(&bus->msg_lock_key); 82 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key); 83 84 lockdep_register_key(&bus->bus_lock_key); 85 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key); 86 87 INIT_LIST_HEAD(&bus->slaves); 88 INIT_LIST_HEAD(&bus->m_rt_list); 89 90 /* 91 * Initialize multi_link flag 92 */ 93 bus->multi_link = false; 94 if (bus->ops->read_prop) { 95 ret = bus->ops->read_prop(bus); 96 if (ret < 0) { 97 dev_err(bus->dev, 98 "Bus read properties failed:%d\n", ret); 99 return ret; 100 } 101 } 102 103 sdw_bus_debugfs_init(bus); 104 105 /* 106 * Device numbers in SoundWire are 0 through 15. Enumeration device 107 * number (0), Broadcast device number (15), Group numbers (12 and 108 * 13) and Master device number (14) are not used for assignment so 109 * mask these and other higher bits. 110 */ 111 112 /* Set higher order bits */ 113 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); 114 115 /* Set enumuration device number and broadcast device number */ 116 set_bit(SDW_ENUM_DEV_NUM, bus->assigned); 117 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); 118 119 /* Set group device numbers and master device number */ 120 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned); 121 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); 122 set_bit(SDW_MASTER_DEV_NUM, bus->assigned); 123 124 /* 125 * SDW is an enumerable bus, but devices can be powered off. So, 126 * they won't be able to report as present. 127 * 128 * Create Slave devices based on Slaves described in 129 * the respective firmware (ACPI/DT) 130 */ 131 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev)) 132 ret = sdw_acpi_find_slaves(bus); 133 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node) 134 ret = sdw_of_find_slaves(bus); 135 else 136 ret = -ENOTSUPP; /* No ACPI/DT so error out */ 137 138 if (ret < 0) { 139 dev_err(bus->dev, "Finding slaves failed:%d\n", ret); 140 return ret; 141 } 142 143 /* 144 * Initialize clock values based on Master properties. The max 145 * frequency is read from max_clk_freq property. Current assumption 146 * is that the bus will start at highest clock frequency when 147 * powered on. 148 * 149 * Default active bank will be 0 as out of reset the Slaves have 150 * to start with bank 0 (Table 40 of Spec) 151 */ 152 prop = &bus->prop; 153 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR; 154 bus->params.curr_dr_freq = bus->params.max_dr_freq; 155 bus->params.curr_bank = SDW_BANK0; 156 bus->params.next_bank = SDW_BANK1; 157 158 ret = sdw_irq_create(bus, fwnode); 159 if (ret) 160 return ret; 161 162 return 0; 163 } 164 EXPORT_SYMBOL(sdw_bus_master_add); 165 166 static int sdw_delete_slave(struct device *dev, void *data) 167 { 168 struct sdw_slave *slave = dev_to_sdw_dev(dev); 169 struct sdw_bus *bus = slave->bus; 170 171 pm_runtime_disable(dev); 172 173 sdw_slave_debugfs_exit(slave); 174 175 mutex_lock(&bus->bus_lock); 176 177 if (slave->dev_num) { /* clear dev_num if assigned */ 178 clear_bit(slave->dev_num, bus->assigned); 179 if (bus->ops && bus->ops->put_device_num) 180 bus->ops->put_device_num(bus, slave); 181 } 182 list_del_init(&slave->node); 183 mutex_unlock(&bus->bus_lock); 184 185 device_unregister(dev); 186 return 0; 187 } 188 189 /** 190 * sdw_bus_master_delete() - delete the bus master instance 191 * @bus: bus to be deleted 192 * 193 * Remove the instance, delete the child devices. 194 */ 195 void sdw_bus_master_delete(struct sdw_bus *bus) 196 { 197 device_for_each_child(bus->dev, NULL, sdw_delete_slave); 198 199 sdw_irq_delete(bus); 200 201 sdw_master_device_del(bus); 202 203 sdw_bus_debugfs_exit(bus); 204 lockdep_unregister_key(&bus->bus_lock_key); 205 lockdep_unregister_key(&bus->msg_lock_key); 206 ida_free(&sdw_bus_ida, bus->id); 207 } 208 EXPORT_SYMBOL(sdw_bus_master_delete); 209 210 /* 211 * SDW IO Calls 212 */ 213 214 static inline int find_response_code(enum sdw_command_response resp) 215 { 216 switch (resp) { 217 case SDW_CMD_OK: 218 return 0; 219 220 case SDW_CMD_IGNORED: 221 return -ENODATA; 222 223 case SDW_CMD_TIMEOUT: 224 return -ETIMEDOUT; 225 226 default: 227 return -EIO; 228 } 229 } 230 231 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 232 { 233 int retry = bus->prop.err_threshold; 234 enum sdw_command_response resp; 235 int ret = 0, i; 236 237 for (i = 0; i <= retry; i++) { 238 resp = bus->ops->xfer_msg(bus, msg); 239 ret = find_response_code(resp); 240 241 /* if cmd is ok or ignored return */ 242 if (ret == 0 || ret == -ENODATA) 243 return ret; 244 } 245 246 return ret; 247 } 248 249 static inline int do_transfer_defer(struct sdw_bus *bus, 250 struct sdw_msg *msg) 251 { 252 struct sdw_defer *defer = &bus->defer_msg; 253 int retry = bus->prop.err_threshold; 254 enum sdw_command_response resp; 255 int ret = 0, i; 256 257 defer->msg = msg; 258 defer->length = msg->len; 259 init_completion(&defer->complete); 260 261 for (i = 0; i <= retry; i++) { 262 resp = bus->ops->xfer_msg_defer(bus); 263 ret = find_response_code(resp); 264 /* if cmd is ok or ignored return */ 265 if (ret == 0 || ret == -ENODATA) 266 return ret; 267 } 268 269 return ret; 270 } 271 272 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg) 273 { 274 int ret; 275 276 ret = do_transfer(bus, msg); 277 if (ret != 0 && ret != -ENODATA) 278 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n", 279 msg->dev_num, ret, 280 (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read", 281 msg->addr, msg->len); 282 283 return ret; 284 } 285 286 /** 287 * sdw_transfer() - Synchronous transfer message to a SDW Slave device 288 * @bus: SDW bus 289 * @msg: SDW message to be xfered 290 */ 291 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 292 { 293 int ret; 294 295 mutex_lock(&bus->msg_lock); 296 297 ret = sdw_transfer_unlocked(bus, msg); 298 299 mutex_unlock(&bus->msg_lock); 300 301 return ret; 302 } 303 304 /** 305 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers 306 * @bus: SDW bus 307 * @sync_delay: Delay before reading status 308 */ 309 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay) 310 { 311 u32 status; 312 313 if (!bus->ops->read_ping_status) 314 return; 315 316 /* 317 * wait for peripheral to sync if desired. 10-15ms should be more than 318 * enough in most cases. 319 */ 320 if (sync_delay) 321 usleep_range(10000, 15000); 322 323 mutex_lock(&bus->msg_lock); 324 325 status = bus->ops->read_ping_status(bus); 326 327 mutex_unlock(&bus->msg_lock); 328 329 if (!status) 330 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__); 331 else 332 dev_dbg(bus->dev, "PING status: %#x\n", status); 333 } 334 EXPORT_SYMBOL(sdw_show_ping_status); 335 336 /** 337 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device 338 * @bus: SDW bus 339 * @msg: SDW message to be xfered 340 * 341 * Caller needs to hold the msg_lock lock while calling this 342 */ 343 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg) 344 { 345 int ret; 346 347 if (!bus->ops->xfer_msg_defer) 348 return -ENOTSUPP; 349 350 ret = do_transfer_defer(bus, msg); 351 if (ret != 0 && ret != -ENODATA) 352 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n", 353 msg->dev_num, ret); 354 355 return ret; 356 } 357 358 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 359 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf) 360 { 361 memset(msg, 0, sizeof(*msg)); 362 msg->addr = addr; /* addr is 16 bit and truncated here */ 363 msg->len = count; 364 msg->dev_num = dev_num; 365 msg->flags = flags; 366 msg->buf = buf; 367 368 if (addr < SDW_REG_NO_PAGE) /* no paging area */ 369 return 0; 370 371 if (addr >= SDW_REG_MAX) { /* illegal addr */ 372 pr_err("SDW: Invalid address %x passed\n", addr); 373 return -EINVAL; 374 } 375 376 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */ 377 if (slave && !slave->prop.paging_support) 378 return 0; 379 /* no need for else as that will fall-through to paging */ 380 } 381 382 /* paging mandatory */ 383 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) { 384 pr_err("SDW: Invalid device for paging :%d\n", dev_num); 385 return -EINVAL; 386 } 387 388 if (!slave) { 389 pr_err("SDW: No slave for paging addr\n"); 390 return -EINVAL; 391 } 392 393 if (!slave->prop.paging_support) { 394 dev_err(&slave->dev, 395 "address %x needs paging but no support\n", addr); 396 return -EINVAL; 397 } 398 399 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr); 400 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr); 401 msg->addr |= BIT(15); 402 msg->page = true; 403 404 return 0; 405 } 406 407 /* 408 * Read/Write IO functions. 409 */ 410 411 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags, 412 size_t count, u8 *val) 413 { 414 struct sdw_msg msg; 415 size_t size; 416 int ret; 417 418 while (count) { 419 // Only handle bytes up to next page boundary 420 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR)); 421 422 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val); 423 if (ret < 0) 424 return ret; 425 426 ret = sdw_transfer(slave->bus, &msg); 427 if (ret < 0 && !slave->is_mockup_device) 428 return ret; 429 430 addr += size; 431 val += size; 432 count -= size; 433 } 434 435 return 0; 436 } 437 438 /** 439 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM 440 * @slave: SDW Slave 441 * @addr: Register address 442 * @count: length 443 * @val: Buffer for values to be read 444 * 445 * Note that if the message crosses a page boundary each page will be 446 * transferred under a separate invocation of the msg_lock. 447 */ 448 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 449 { 450 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val); 451 } 452 EXPORT_SYMBOL(sdw_nread_no_pm); 453 454 /** 455 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM 456 * @slave: SDW Slave 457 * @addr: Register address 458 * @count: length 459 * @val: Buffer for values to be written 460 * 461 * Note that if the message crosses a page boundary each page will be 462 * transferred under a separate invocation of the msg_lock. 463 */ 464 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 465 { 466 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val); 467 } 468 EXPORT_SYMBOL(sdw_nwrite_no_pm); 469 470 /** 471 * sdw_write_no_pm() - Write a SDW Slave register with no PM 472 * @slave: SDW Slave 473 * @addr: Register address 474 * @value: Register value 475 */ 476 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) 477 { 478 return sdw_nwrite_no_pm(slave, addr, 1, &value); 479 } 480 EXPORT_SYMBOL(sdw_write_no_pm); 481 482 static int 483 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr) 484 { 485 struct sdw_msg msg; 486 u8 buf; 487 int ret; 488 489 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 490 SDW_MSG_FLAG_READ, &buf); 491 if (ret < 0) 492 return ret; 493 494 ret = sdw_transfer(bus, &msg); 495 if (ret < 0) 496 return ret; 497 498 return buf; 499 } 500 501 static int 502 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 503 { 504 struct sdw_msg msg; 505 int ret; 506 507 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 508 SDW_MSG_FLAG_WRITE, &value); 509 if (ret < 0) 510 return ret; 511 512 return sdw_transfer(bus, &msg); 513 } 514 515 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr) 516 { 517 struct sdw_msg msg; 518 u8 buf; 519 int ret; 520 521 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 522 SDW_MSG_FLAG_READ, &buf); 523 if (ret < 0) 524 return ret; 525 526 ret = sdw_transfer_unlocked(bus, &msg); 527 if (ret < 0) 528 return ret; 529 530 return buf; 531 } 532 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked); 533 534 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 535 { 536 struct sdw_msg msg; 537 int ret; 538 539 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 540 SDW_MSG_FLAG_WRITE, &value); 541 if (ret < 0) 542 return ret; 543 544 return sdw_transfer_unlocked(bus, &msg); 545 } 546 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked); 547 548 /** 549 * sdw_read_no_pm() - Read a SDW Slave register with no PM 550 * @slave: SDW Slave 551 * @addr: Register address 552 */ 553 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr) 554 { 555 u8 buf; 556 int ret; 557 558 ret = sdw_nread_no_pm(slave, addr, 1, &buf); 559 if (ret < 0) 560 return ret; 561 else 562 return buf; 563 } 564 EXPORT_SYMBOL(sdw_read_no_pm); 565 566 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 567 { 568 int tmp; 569 570 tmp = sdw_read_no_pm(slave, addr); 571 if (tmp < 0) 572 return tmp; 573 574 tmp = (tmp & ~mask) | val; 575 return sdw_write_no_pm(slave, addr, tmp); 576 } 577 EXPORT_SYMBOL(sdw_update_no_pm); 578 579 /* Read-Modify-Write Slave register */ 580 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 581 { 582 int tmp; 583 584 tmp = sdw_read(slave, addr); 585 if (tmp < 0) 586 return tmp; 587 588 tmp = (tmp & ~mask) | val; 589 return sdw_write(slave, addr, tmp); 590 } 591 EXPORT_SYMBOL(sdw_update); 592 593 /** 594 * sdw_nread() - Read "n" contiguous SDW Slave registers 595 * @slave: SDW Slave 596 * @addr: Register address 597 * @count: length 598 * @val: Buffer for values to be read 599 * 600 * This version of the function will take a PM reference to the slave 601 * device. 602 * Note that if the message crosses a page boundary each page will be 603 * transferred under a separate invocation of the msg_lock. 604 */ 605 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 606 { 607 int ret; 608 609 ret = pm_runtime_get_sync(&slave->dev); 610 if (ret < 0 && ret != -EACCES) { 611 pm_runtime_put_noidle(&slave->dev); 612 return ret; 613 } 614 615 ret = sdw_nread_no_pm(slave, addr, count, val); 616 617 pm_runtime_mark_last_busy(&slave->dev); 618 pm_runtime_put(&slave->dev); 619 620 return ret; 621 } 622 EXPORT_SYMBOL(sdw_nread); 623 624 /** 625 * sdw_nwrite() - Write "n" contiguous SDW Slave registers 626 * @slave: SDW Slave 627 * @addr: Register address 628 * @count: length 629 * @val: Buffer for values to be written 630 * 631 * This version of the function will take a PM reference to the slave 632 * device. 633 * Note that if the message crosses a page boundary each page will be 634 * transferred under a separate invocation of the msg_lock. 635 */ 636 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 637 { 638 int ret; 639 640 ret = pm_runtime_get_sync(&slave->dev); 641 if (ret < 0 && ret != -EACCES) { 642 pm_runtime_put_noidle(&slave->dev); 643 return ret; 644 } 645 646 ret = sdw_nwrite_no_pm(slave, addr, count, val); 647 648 pm_runtime_mark_last_busy(&slave->dev); 649 pm_runtime_put(&slave->dev); 650 651 return ret; 652 } 653 EXPORT_SYMBOL(sdw_nwrite); 654 655 /** 656 * sdw_read() - Read a SDW Slave register 657 * @slave: SDW Slave 658 * @addr: Register address 659 * 660 * This version of the function will take a PM reference to the slave 661 * device. 662 */ 663 int sdw_read(struct sdw_slave *slave, u32 addr) 664 { 665 u8 buf; 666 int ret; 667 668 ret = sdw_nread(slave, addr, 1, &buf); 669 if (ret < 0) 670 return ret; 671 672 return buf; 673 } 674 EXPORT_SYMBOL(sdw_read); 675 676 /** 677 * sdw_write() - Write a SDW Slave register 678 * @slave: SDW Slave 679 * @addr: Register address 680 * @value: Register value 681 * 682 * This version of the function will take a PM reference to the slave 683 * device. 684 */ 685 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) 686 { 687 return sdw_nwrite(slave, addr, 1, &value); 688 } 689 EXPORT_SYMBOL(sdw_write); 690 691 /* 692 * SDW alert handling 693 */ 694 695 /* called with bus_lock held */ 696 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i) 697 { 698 struct sdw_slave *slave; 699 700 list_for_each_entry(slave, &bus->slaves, node) { 701 if (slave->dev_num == i) 702 return slave; 703 } 704 705 return NULL; 706 } 707 708 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id) 709 { 710 if (slave->id.mfg_id != id.mfg_id || 711 slave->id.part_id != id.part_id || 712 slave->id.class_id != id.class_id || 713 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID && 714 slave->id.unique_id != id.unique_id)) 715 return -ENODEV; 716 717 return 0; 718 } 719 EXPORT_SYMBOL(sdw_compare_devid); 720 721 /* called with bus_lock held */ 722 static int sdw_get_device_num(struct sdw_slave *slave) 723 { 724 struct sdw_bus *bus = slave->bus; 725 int bit; 726 727 if (bus->ops && bus->ops->get_device_num) { 728 bit = bus->ops->get_device_num(bus, slave); 729 if (bit < 0) 730 goto err; 731 } else { 732 bit = find_first_zero_bit(bus->assigned, SDW_MAX_DEVICES); 733 if (bit == SDW_MAX_DEVICES) { 734 bit = -ENODEV; 735 goto err; 736 } 737 } 738 739 /* 740 * Do not update dev_num in Slave data structure here, 741 * Update once program dev_num is successful 742 */ 743 set_bit(bit, bus->assigned); 744 745 err: 746 return bit; 747 } 748 749 static int sdw_assign_device_num(struct sdw_slave *slave) 750 { 751 struct sdw_bus *bus = slave->bus; 752 int ret, dev_num; 753 bool new_device = false; 754 755 /* check first if device number is assigned, if so reuse that */ 756 if (!slave->dev_num) { 757 if (!slave->dev_num_sticky) { 758 mutex_lock(&slave->bus->bus_lock); 759 dev_num = sdw_get_device_num(slave); 760 mutex_unlock(&slave->bus->bus_lock); 761 if (dev_num < 0) { 762 dev_err(bus->dev, "Get dev_num failed: %d\n", 763 dev_num); 764 return dev_num; 765 } 766 slave->dev_num = dev_num; 767 slave->dev_num_sticky = dev_num; 768 new_device = true; 769 } else { 770 slave->dev_num = slave->dev_num_sticky; 771 } 772 } 773 774 if (!new_device) 775 dev_dbg(bus->dev, 776 "Slave already registered, reusing dev_num:%d\n", 777 slave->dev_num); 778 779 /* Clear the slave->dev_num to transfer message on device 0 */ 780 dev_num = slave->dev_num; 781 slave->dev_num = 0; 782 783 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num); 784 if (ret < 0) { 785 dev_err(bus->dev, "Program device_num %d failed: %d\n", 786 dev_num, ret); 787 return ret; 788 } 789 790 /* After xfer of msg, restore dev_num */ 791 slave->dev_num = slave->dev_num_sticky; 792 793 if (bus->ops && bus->ops->new_peripheral_assigned) 794 bus->ops->new_peripheral_assigned(bus, slave, dev_num); 795 796 return 0; 797 } 798 799 void sdw_extract_slave_id(struct sdw_bus *bus, 800 u64 addr, struct sdw_slave_id *id) 801 { 802 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr); 803 804 id->sdw_version = SDW_VERSION(addr); 805 id->unique_id = SDW_UNIQUE_ID(addr); 806 id->mfg_id = SDW_MFG_ID(addr); 807 id->part_id = SDW_PART_ID(addr); 808 id->class_id = SDW_CLASS_ID(addr); 809 810 dev_dbg(bus->dev, 811 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n", 812 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version); 813 } 814 EXPORT_SYMBOL(sdw_extract_slave_id); 815 816 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed) 817 { 818 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0}; 819 struct sdw_slave *slave, *_s; 820 struct sdw_slave_id id; 821 struct sdw_msg msg; 822 bool found; 823 int count = 0, ret; 824 u64 addr; 825 826 *programmed = false; 827 828 /* No Slave, so use raw xfer api */ 829 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0, 830 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf); 831 if (ret < 0) 832 return ret; 833 834 do { 835 ret = sdw_transfer(bus, &msg); 836 if (ret == -ENODATA) { /* end of device id reads */ 837 dev_dbg(bus->dev, "No more devices to enumerate\n"); 838 ret = 0; 839 break; 840 } 841 if (ret < 0) { 842 dev_err(bus->dev, "DEVID read fail:%d\n", ret); 843 break; 844 } 845 846 /* 847 * Construct the addr and extract. Cast the higher shift 848 * bits to avoid truncation due to size limit. 849 */ 850 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) | 851 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) | 852 ((u64)buf[0] << 40); 853 854 sdw_extract_slave_id(bus, addr, &id); 855 856 found = false; 857 /* Now compare with entries */ 858 list_for_each_entry_safe(slave, _s, &bus->slaves, node) { 859 if (sdw_compare_devid(slave, id) == 0) { 860 found = true; 861 862 /* 863 * To prevent skipping state-machine stages don't 864 * program a device until we've seen it UNATTACH. 865 * Must return here because no other device on #0 866 * can be detected until this one has been 867 * assigned a device ID. 868 */ 869 if (slave->status != SDW_SLAVE_UNATTACHED) 870 return 0; 871 872 /* 873 * Assign a new dev_num to this Slave and 874 * not mark it present. It will be marked 875 * present after it reports ATTACHED on new 876 * dev_num 877 */ 878 ret = sdw_assign_device_num(slave); 879 if (ret < 0) { 880 dev_err(bus->dev, 881 "Assign dev_num failed:%d\n", 882 ret); 883 return ret; 884 } 885 886 *programmed = true; 887 888 break; 889 } 890 } 891 892 if (!found) { 893 /* TODO: Park this device in Group 13 */ 894 895 /* 896 * add Slave device even if there is no platform 897 * firmware description. There will be no driver probe 898 * but the user/integration will be able to see the 899 * device, enumeration status and device number in sysfs 900 */ 901 sdw_slave_add(bus, &id, NULL); 902 903 dev_err(bus->dev, "Slave Entry not found\n"); 904 } 905 906 count++; 907 908 /* 909 * Check till error out or retry (count) exhausts. 910 * Device can drop off and rejoin during enumeration 911 * so count till twice the bound. 912 */ 913 914 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2)); 915 916 return ret; 917 } 918 919 static void sdw_modify_slave_status(struct sdw_slave *slave, 920 enum sdw_slave_status status) 921 { 922 struct sdw_bus *bus = slave->bus; 923 924 mutex_lock(&bus->bus_lock); 925 926 dev_vdbg(bus->dev, 927 "changing status slave %d status %d new status %d\n", 928 slave->dev_num, slave->status, status); 929 930 if (status == SDW_SLAVE_UNATTACHED) { 931 dev_dbg(&slave->dev, 932 "initializing enumeration and init completion for Slave %d\n", 933 slave->dev_num); 934 935 reinit_completion(&slave->enumeration_complete); 936 reinit_completion(&slave->initialization_complete); 937 938 } else if ((status == SDW_SLAVE_ATTACHED) && 939 (slave->status == SDW_SLAVE_UNATTACHED)) { 940 dev_dbg(&slave->dev, 941 "signaling enumeration completion for Slave %d\n", 942 slave->dev_num); 943 944 complete_all(&slave->enumeration_complete); 945 } 946 slave->status = status; 947 mutex_unlock(&bus->bus_lock); 948 } 949 950 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave, 951 enum sdw_clk_stop_mode mode, 952 enum sdw_clk_stop_type type) 953 { 954 int ret = 0; 955 956 mutex_lock(&slave->sdw_dev_lock); 957 958 if (slave->probed) { 959 struct device *dev = &slave->dev; 960 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 961 962 if (drv->ops && drv->ops->clk_stop) 963 ret = drv->ops->clk_stop(slave, mode, type); 964 } 965 966 mutex_unlock(&slave->sdw_dev_lock); 967 968 return ret; 969 } 970 971 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave, 972 enum sdw_clk_stop_mode mode, 973 bool prepare) 974 { 975 bool wake_en; 976 u32 val = 0; 977 int ret; 978 979 wake_en = slave->prop.wake_capable; 980 981 if (prepare) { 982 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP; 983 984 if (mode == SDW_CLK_STOP_MODE1) 985 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1; 986 987 if (wake_en) 988 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN; 989 } else { 990 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL); 991 if (ret < 0) { 992 if (ret != -ENODATA) 993 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret); 994 return ret; 995 } 996 val = ret; 997 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP); 998 } 999 1000 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val); 1001 1002 if (ret < 0 && ret != -ENODATA) 1003 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret); 1004 1005 return ret; 1006 } 1007 1008 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num) 1009 { 1010 int retry = bus->clk_stop_timeout; 1011 int val; 1012 1013 do { 1014 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT); 1015 if (val < 0) { 1016 if (val != -ENODATA) 1017 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val); 1018 return val; 1019 } 1020 val &= SDW_SCP_STAT_CLK_STP_NF; 1021 if (!val) { 1022 dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d\n", 1023 dev_num); 1024 return 0; 1025 } 1026 1027 usleep_range(1000, 1500); 1028 retry--; 1029 } while (retry); 1030 1031 dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d\n", 1032 dev_num); 1033 1034 return -ETIMEDOUT; 1035 } 1036 1037 /** 1038 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop 1039 * 1040 * @bus: SDW bus instance 1041 * 1042 * Query Slave for clock stop mode and prepare for that mode. 1043 */ 1044 int sdw_bus_prep_clk_stop(struct sdw_bus *bus) 1045 { 1046 bool simple_clk_stop = true; 1047 struct sdw_slave *slave; 1048 bool is_slave = false; 1049 int ret = 0; 1050 1051 /* 1052 * In order to save on transition time, prepare 1053 * each Slave and then wait for all Slave(s) to be 1054 * prepared for clock stop. 1055 * If one of the Slave devices has lost sync and 1056 * replies with Command Ignored/-ENODATA, we continue 1057 * the loop 1058 */ 1059 list_for_each_entry(slave, &bus->slaves, node) { 1060 if (!slave->dev_num) 1061 continue; 1062 1063 if (slave->status != SDW_SLAVE_ATTACHED && 1064 slave->status != SDW_SLAVE_ALERT) 1065 continue; 1066 1067 /* Identify if Slave(s) are available on Bus */ 1068 is_slave = true; 1069 1070 ret = sdw_slave_clk_stop_callback(slave, 1071 SDW_CLK_STOP_MODE0, 1072 SDW_CLK_PRE_PREPARE); 1073 if (ret < 0 && ret != -ENODATA) { 1074 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret); 1075 return ret; 1076 } 1077 1078 /* Only prepare a Slave device if needed */ 1079 if (!slave->prop.simple_clk_stop_capable) { 1080 simple_clk_stop = false; 1081 1082 ret = sdw_slave_clk_stop_prepare(slave, 1083 SDW_CLK_STOP_MODE0, 1084 true); 1085 if (ret < 0 && ret != -ENODATA) { 1086 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret); 1087 return ret; 1088 } 1089 } 1090 } 1091 1092 /* Skip remaining clock stop preparation if no Slave is attached */ 1093 if (!is_slave) 1094 return 0; 1095 1096 /* 1097 * Don't wait for all Slaves to be ready if they follow the simple 1098 * state machine 1099 */ 1100 if (!simple_clk_stop) { 1101 ret = sdw_bus_wait_for_clk_prep_deprep(bus, 1102 SDW_BROADCAST_DEV_NUM); 1103 /* 1104 * if there are no Slave devices present and the reply is 1105 * Command_Ignored/-ENODATA, we don't need to continue with the 1106 * flow and can just return here. The error code is not modified 1107 * and its handling left as an exercise for the caller. 1108 */ 1109 if (ret < 0) 1110 return ret; 1111 } 1112 1113 /* Inform slaves that prep is done */ 1114 list_for_each_entry(slave, &bus->slaves, node) { 1115 if (!slave->dev_num) 1116 continue; 1117 1118 if (slave->status != SDW_SLAVE_ATTACHED && 1119 slave->status != SDW_SLAVE_ALERT) 1120 continue; 1121 1122 ret = sdw_slave_clk_stop_callback(slave, 1123 SDW_CLK_STOP_MODE0, 1124 SDW_CLK_POST_PREPARE); 1125 1126 if (ret < 0 && ret != -ENODATA) { 1127 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret); 1128 return ret; 1129 } 1130 } 1131 1132 return 0; 1133 } 1134 EXPORT_SYMBOL(sdw_bus_prep_clk_stop); 1135 1136 /** 1137 * sdw_bus_clk_stop: stop bus clock 1138 * 1139 * @bus: SDW bus instance 1140 * 1141 * After preparing the Slaves for clock stop, stop the clock by broadcasting 1142 * write to SCP_CTRL register. 1143 */ 1144 int sdw_bus_clk_stop(struct sdw_bus *bus) 1145 { 1146 int ret; 1147 1148 /* 1149 * broadcast clock stop now, attached Slaves will ACK this, 1150 * unattached will ignore 1151 */ 1152 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM, 1153 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW); 1154 if (ret < 0) { 1155 if (ret != -ENODATA) 1156 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret); 1157 return ret; 1158 } 1159 1160 return 0; 1161 } 1162 EXPORT_SYMBOL(sdw_bus_clk_stop); 1163 1164 /** 1165 * sdw_bus_exit_clk_stop: Exit clock stop mode 1166 * 1167 * @bus: SDW bus instance 1168 * 1169 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves 1170 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate 1171 * back. 1172 */ 1173 int sdw_bus_exit_clk_stop(struct sdw_bus *bus) 1174 { 1175 bool simple_clk_stop = true; 1176 struct sdw_slave *slave; 1177 bool is_slave = false; 1178 int ret; 1179 1180 /* 1181 * In order to save on transition time, de-prepare 1182 * each Slave and then wait for all Slave(s) to be 1183 * de-prepared after clock resume. 1184 */ 1185 list_for_each_entry(slave, &bus->slaves, node) { 1186 if (!slave->dev_num) 1187 continue; 1188 1189 if (slave->status != SDW_SLAVE_ATTACHED && 1190 slave->status != SDW_SLAVE_ALERT) 1191 continue; 1192 1193 /* Identify if Slave(s) are available on Bus */ 1194 is_slave = true; 1195 1196 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1197 SDW_CLK_PRE_DEPREPARE); 1198 if (ret < 0) 1199 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret); 1200 1201 /* Only de-prepare a Slave device if needed */ 1202 if (!slave->prop.simple_clk_stop_capable) { 1203 simple_clk_stop = false; 1204 1205 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0, 1206 false); 1207 1208 if (ret < 0) 1209 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret); 1210 } 1211 } 1212 1213 /* Skip remaining clock stop de-preparation if no Slave is attached */ 1214 if (!is_slave) 1215 return 0; 1216 1217 /* 1218 * Don't wait for all Slaves to be ready if they follow the simple 1219 * state machine 1220 */ 1221 if (!simple_clk_stop) { 1222 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM); 1223 if (ret < 0) 1224 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret); 1225 } 1226 1227 list_for_each_entry(slave, &bus->slaves, node) { 1228 if (!slave->dev_num) 1229 continue; 1230 1231 if (slave->status != SDW_SLAVE_ATTACHED && 1232 slave->status != SDW_SLAVE_ALERT) 1233 continue; 1234 1235 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1236 SDW_CLK_POST_DEPREPARE); 1237 if (ret < 0) 1238 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret); 1239 } 1240 1241 return 0; 1242 } 1243 EXPORT_SYMBOL(sdw_bus_exit_clk_stop); 1244 1245 int sdw_configure_dpn_intr(struct sdw_slave *slave, 1246 int port, bool enable, int mask) 1247 { 1248 u32 addr; 1249 int ret; 1250 u8 val = 0; 1251 1252 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) { 1253 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n", 1254 enable ? "on" : "off"); 1255 mask |= SDW_DPN_INT_TEST_FAIL; 1256 } 1257 1258 addr = SDW_DPN_INTMASK(port); 1259 1260 /* Set/Clear port ready interrupt mask */ 1261 if (enable) { 1262 val |= mask; 1263 val |= SDW_DPN_INT_PORT_READY; 1264 } else { 1265 val &= ~(mask); 1266 val &= ~SDW_DPN_INT_PORT_READY; 1267 } 1268 1269 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); 1270 if (ret < 0) 1271 dev_err(&slave->dev, 1272 "SDW_DPN_INTMASK write failed:%d\n", val); 1273 1274 return ret; 1275 } 1276 1277 static int sdw_slave_set_frequency(struct sdw_slave *slave) 1278 { 1279 u32 mclk_freq = slave->bus->prop.mclk_freq; 1280 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1; 1281 unsigned int scale; 1282 u8 scale_index; 1283 u8 base; 1284 int ret; 1285 1286 /* 1287 * frequency base and scale registers are required for SDCA 1288 * devices. They may also be used for 1.2+/non-SDCA devices. 1289 * Driver can set the property, we will need a DisCo property 1290 * to discover this case from platform firmware. 1291 */ 1292 if (!slave->id.class_id && !slave->prop.clock_reg_supported) 1293 return 0; 1294 1295 if (!mclk_freq) { 1296 dev_err(&slave->dev, 1297 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n"); 1298 return -EINVAL; 1299 } 1300 1301 /* 1302 * map base frequency using Table 89 of SoundWire 1.2 spec. 1303 * The order of the tests just follows the specification, this 1304 * is not a selection between possible values or a search for 1305 * the best value but just a mapping. Only one case per platform 1306 * is relevant. 1307 * Some BIOS have inconsistent values for mclk_freq but a 1308 * correct root so we force the mclk_freq to avoid variations. 1309 */ 1310 if (!(19200000 % mclk_freq)) { 1311 mclk_freq = 19200000; 1312 base = SDW_SCP_BASE_CLOCK_19200000_HZ; 1313 } else if (!(24000000 % mclk_freq)) { 1314 mclk_freq = 24000000; 1315 base = SDW_SCP_BASE_CLOCK_24000000_HZ; 1316 } else if (!(24576000 % mclk_freq)) { 1317 mclk_freq = 24576000; 1318 base = SDW_SCP_BASE_CLOCK_24576000_HZ; 1319 } else if (!(22579200 % mclk_freq)) { 1320 mclk_freq = 22579200; 1321 base = SDW_SCP_BASE_CLOCK_22579200_HZ; 1322 } else if (!(32000000 % mclk_freq)) { 1323 mclk_freq = 32000000; 1324 base = SDW_SCP_BASE_CLOCK_32000000_HZ; 1325 } else { 1326 dev_err(&slave->dev, 1327 "Unsupported clock base, mclk %d\n", 1328 mclk_freq); 1329 return -EINVAL; 1330 } 1331 1332 if (mclk_freq % curr_freq) { 1333 dev_err(&slave->dev, 1334 "mclk %d is not multiple of bus curr_freq %d\n", 1335 mclk_freq, curr_freq); 1336 return -EINVAL; 1337 } 1338 1339 scale = mclk_freq / curr_freq; 1340 1341 /* 1342 * map scale to Table 90 of SoundWire 1.2 spec - and check 1343 * that the scale is a power of two and maximum 64 1344 */ 1345 scale_index = ilog2(scale); 1346 1347 if (BIT(scale_index) != scale || scale_index > 6) { 1348 dev_err(&slave->dev, 1349 "No match found for scale %d, bus mclk %d curr_freq %d\n", 1350 scale, mclk_freq, curr_freq); 1351 return -EINVAL; 1352 } 1353 scale_index++; 1354 1355 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base); 1356 if (ret < 0) { 1357 dev_err(&slave->dev, 1358 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret); 1359 return ret; 1360 } 1361 1362 /* initialize scale for both banks */ 1363 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index); 1364 if (ret < 0) { 1365 dev_err(&slave->dev, 1366 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret); 1367 return ret; 1368 } 1369 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index); 1370 if (ret < 0) 1371 dev_err(&slave->dev, 1372 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret); 1373 1374 dev_dbg(&slave->dev, 1375 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n", 1376 base, scale_index, mclk_freq, curr_freq); 1377 1378 return ret; 1379 } 1380 1381 static int sdw_initialize_slave(struct sdw_slave *slave) 1382 { 1383 struct sdw_slave_prop *prop = &slave->prop; 1384 int status; 1385 int ret; 1386 u8 val; 1387 1388 ret = sdw_slave_set_frequency(slave); 1389 if (ret < 0) 1390 return ret; 1391 1392 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) { 1393 /* Clear bus clash interrupt before enabling interrupt mask */ 1394 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1395 if (status < 0) { 1396 dev_err(&slave->dev, 1397 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status); 1398 return status; 1399 } 1400 if (status & SDW_SCP_INT1_BUS_CLASH) { 1401 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n"); 1402 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH); 1403 if (ret < 0) { 1404 dev_err(&slave->dev, 1405 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret); 1406 return ret; 1407 } 1408 } 1409 } 1410 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) && 1411 !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) { 1412 /* Clear parity interrupt before enabling interrupt mask */ 1413 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1414 if (status < 0) { 1415 dev_err(&slave->dev, 1416 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status); 1417 return status; 1418 } 1419 if (status & SDW_SCP_INT1_PARITY) { 1420 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n"); 1421 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY); 1422 if (ret < 0) { 1423 dev_err(&slave->dev, 1424 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret); 1425 return ret; 1426 } 1427 } 1428 } 1429 1430 /* 1431 * Set SCP_INT1_MASK register, typically bus clash and 1432 * implementation-defined interrupt mask. The Parity detection 1433 * may not always be correct on startup so its use is 1434 * device-dependent, it might e.g. only be enabled in 1435 * steady-state after a couple of frames. 1436 */ 1437 val = slave->prop.scp_int1_mask; 1438 1439 /* Enable SCP interrupts */ 1440 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val); 1441 if (ret < 0) { 1442 dev_err(&slave->dev, 1443 "SDW_SCP_INTMASK1 write failed:%d\n", ret); 1444 return ret; 1445 } 1446 1447 /* No need to continue if DP0 is not present */ 1448 if (!slave->prop.dp0_prop) 1449 return 0; 1450 1451 /* Enable DP0 interrupts */ 1452 val = prop->dp0_prop->imp_def_interrupts; 1453 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE; 1454 1455 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val); 1456 if (ret < 0) 1457 dev_err(&slave->dev, 1458 "SDW_DP0_INTMASK read failed:%d\n", ret); 1459 return ret; 1460 } 1461 1462 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) 1463 { 1464 u8 clear, impl_int_mask; 1465 int status, status2, ret, count = 0; 1466 1467 status = sdw_read_no_pm(slave, SDW_DP0_INT); 1468 if (status < 0) { 1469 dev_err(&slave->dev, 1470 "SDW_DP0_INT read failed:%d\n", status); 1471 return status; 1472 } 1473 1474 do { 1475 clear = status & ~SDW_DP0_INTERRUPTS; 1476 1477 if (status & SDW_DP0_INT_TEST_FAIL) { 1478 dev_err(&slave->dev, "Test fail for port 0\n"); 1479 clear |= SDW_DP0_INT_TEST_FAIL; 1480 } 1481 1482 /* 1483 * Assumption: PORT_READY interrupt will be received only for 1484 * ports implementing Channel Prepare state machine (CP_SM) 1485 */ 1486 1487 if (status & SDW_DP0_INT_PORT_READY) { 1488 complete(&slave->port_ready[0]); 1489 clear |= SDW_DP0_INT_PORT_READY; 1490 } 1491 1492 if (status & SDW_DP0_INT_BRA_FAILURE) { 1493 dev_err(&slave->dev, "BRA failed\n"); 1494 clear |= SDW_DP0_INT_BRA_FAILURE; 1495 } 1496 1497 impl_int_mask = SDW_DP0_INT_IMPDEF1 | 1498 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3; 1499 1500 if (status & impl_int_mask) { 1501 clear |= impl_int_mask; 1502 *slave_status = clear; 1503 } 1504 1505 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */ 1506 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear); 1507 if (ret < 0) { 1508 dev_err(&slave->dev, 1509 "SDW_DP0_INT write failed:%d\n", ret); 1510 return ret; 1511 } 1512 1513 /* Read DP0 interrupt again */ 1514 status2 = sdw_read_no_pm(slave, SDW_DP0_INT); 1515 if (status2 < 0) { 1516 dev_err(&slave->dev, 1517 "SDW_DP0_INT read failed:%d\n", status2); 1518 return status2; 1519 } 1520 /* filter to limit loop to interrupts identified in the first status read */ 1521 status &= status2; 1522 1523 count++; 1524 1525 /* we can get alerts while processing so keep retrying */ 1526 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1527 1528 if (count == SDW_READ_INTR_CLEAR_RETRY) 1529 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n"); 1530 1531 return ret; 1532 } 1533 1534 static int sdw_handle_port_interrupt(struct sdw_slave *slave, 1535 int port, u8 *slave_status) 1536 { 1537 u8 clear, impl_int_mask; 1538 int status, status2, ret, count = 0; 1539 u32 addr; 1540 1541 if (port == 0) 1542 return sdw_handle_dp0_interrupt(slave, slave_status); 1543 1544 addr = SDW_DPN_INT(port); 1545 status = sdw_read_no_pm(slave, addr); 1546 if (status < 0) { 1547 dev_err(&slave->dev, 1548 "SDW_DPN_INT read failed:%d\n", status); 1549 1550 return status; 1551 } 1552 1553 do { 1554 clear = status & ~SDW_DPN_INTERRUPTS; 1555 1556 if (status & SDW_DPN_INT_TEST_FAIL) { 1557 dev_err(&slave->dev, "Test fail for port:%d\n", port); 1558 clear |= SDW_DPN_INT_TEST_FAIL; 1559 } 1560 1561 /* 1562 * Assumption: PORT_READY interrupt will be received only 1563 * for ports implementing CP_SM. 1564 */ 1565 if (status & SDW_DPN_INT_PORT_READY) { 1566 complete(&slave->port_ready[port]); 1567 clear |= SDW_DPN_INT_PORT_READY; 1568 } 1569 1570 impl_int_mask = SDW_DPN_INT_IMPDEF1 | 1571 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3; 1572 1573 if (status & impl_int_mask) { 1574 clear |= impl_int_mask; 1575 *slave_status = clear; 1576 } 1577 1578 /* clear the interrupt but don't touch reserved fields */ 1579 ret = sdw_write_no_pm(slave, addr, clear); 1580 if (ret < 0) { 1581 dev_err(&slave->dev, 1582 "SDW_DPN_INT write failed:%d\n", ret); 1583 return ret; 1584 } 1585 1586 /* Read DPN interrupt again */ 1587 status2 = sdw_read_no_pm(slave, addr); 1588 if (status2 < 0) { 1589 dev_err(&slave->dev, 1590 "SDW_DPN_INT read failed:%d\n", status2); 1591 return status2; 1592 } 1593 /* filter to limit loop to interrupts identified in the first status read */ 1594 status &= status2; 1595 1596 count++; 1597 1598 /* we can get alerts while processing so keep retrying */ 1599 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1600 1601 if (count == SDW_READ_INTR_CLEAR_RETRY) 1602 dev_warn(&slave->dev, "Reached MAX_RETRY on port read"); 1603 1604 return ret; 1605 } 1606 1607 static int sdw_handle_slave_alerts(struct sdw_slave *slave) 1608 { 1609 struct sdw_slave_intr_status slave_intr; 1610 u8 clear = 0, bit, port_status[15] = {0}; 1611 int port_num, stat, ret, count = 0; 1612 unsigned long port; 1613 bool slave_notify; 1614 u8 sdca_cascade = 0; 1615 u8 buf, buf2[2]; 1616 bool parity_check; 1617 bool parity_quirk; 1618 1619 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); 1620 1621 ret = pm_runtime_get_sync(&slave->dev); 1622 if (ret < 0 && ret != -EACCES) { 1623 dev_err(&slave->dev, "Failed to resume device: %d\n", ret); 1624 pm_runtime_put_noidle(&slave->dev); 1625 return ret; 1626 } 1627 1628 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */ 1629 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1630 if (ret < 0) { 1631 dev_err(&slave->dev, 1632 "SDW_SCP_INT1 read failed:%d\n", ret); 1633 goto io_err; 1634 } 1635 buf = ret; 1636 1637 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1638 if (ret < 0) { 1639 dev_err(&slave->dev, 1640 "SDW_SCP_INT2/3 read failed:%d\n", ret); 1641 goto io_err; 1642 } 1643 1644 if (slave->id.class_id) { 1645 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1646 if (ret < 0) { 1647 dev_err(&slave->dev, 1648 "SDW_DP0_INT read failed:%d\n", ret); 1649 goto io_err; 1650 } 1651 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1652 } 1653 1654 do { 1655 slave_notify = false; 1656 1657 /* 1658 * Check parity, bus clash and Slave (impl defined) 1659 * interrupt 1660 */ 1661 if (buf & SDW_SCP_INT1_PARITY) { 1662 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY; 1663 parity_quirk = !slave->first_interrupt_done && 1664 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY); 1665 1666 if (parity_check && !parity_quirk) 1667 dev_err(&slave->dev, "Parity error detected\n"); 1668 clear |= SDW_SCP_INT1_PARITY; 1669 } 1670 1671 if (buf & SDW_SCP_INT1_BUS_CLASH) { 1672 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH) 1673 dev_err(&slave->dev, "Bus clash detected\n"); 1674 clear |= SDW_SCP_INT1_BUS_CLASH; 1675 } 1676 1677 /* 1678 * When bus clash or parity errors are detected, such errors 1679 * are unlikely to be recoverable errors. 1680 * TODO: In such scenario, reset bus. Make this configurable 1681 * via sysfs property with bus reset being the default. 1682 */ 1683 1684 if (buf & SDW_SCP_INT1_IMPL_DEF) { 1685 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) { 1686 dev_dbg(&slave->dev, "Slave impl defined interrupt\n"); 1687 slave_notify = true; 1688 } 1689 clear |= SDW_SCP_INT1_IMPL_DEF; 1690 } 1691 1692 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */ 1693 if (sdca_cascade) 1694 slave_notify = true; 1695 1696 /* Check port 0 - 3 interrupts */ 1697 port = buf & SDW_SCP_INT1_PORT0_3; 1698 1699 /* To get port number corresponding to bits, shift it */ 1700 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port); 1701 for_each_set_bit(bit, &port, 8) { 1702 sdw_handle_port_interrupt(slave, bit, 1703 &port_status[bit]); 1704 } 1705 1706 /* Check if cascade 2 interrupt is present */ 1707 if (buf & SDW_SCP_INT1_SCP2_CASCADE) { 1708 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10; 1709 for_each_set_bit(bit, &port, 8) { 1710 /* scp2 ports start from 4 */ 1711 port_num = bit + 4; 1712 sdw_handle_port_interrupt(slave, 1713 port_num, 1714 &port_status[port_num]); 1715 } 1716 } 1717 1718 /* now check last cascade */ 1719 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) { 1720 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14; 1721 for_each_set_bit(bit, &port, 8) { 1722 /* scp3 ports start from 11 */ 1723 port_num = bit + 11; 1724 sdw_handle_port_interrupt(slave, 1725 port_num, 1726 &port_status[port_num]); 1727 } 1728 } 1729 1730 /* Update the Slave driver */ 1731 if (slave_notify) { 1732 mutex_lock(&slave->sdw_dev_lock); 1733 1734 if (slave->probed) { 1735 struct device *dev = &slave->dev; 1736 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1737 1738 if (slave->prop.use_domain_irq && slave->irq) 1739 handle_nested_irq(slave->irq); 1740 1741 if (drv->ops && drv->ops->interrupt_callback) { 1742 slave_intr.sdca_cascade = sdca_cascade; 1743 slave_intr.control_port = clear; 1744 memcpy(slave_intr.port, &port_status, 1745 sizeof(slave_intr.port)); 1746 1747 drv->ops->interrupt_callback(slave, &slave_intr); 1748 } 1749 } 1750 1751 mutex_unlock(&slave->sdw_dev_lock); 1752 } 1753 1754 /* Ack interrupt */ 1755 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear); 1756 if (ret < 0) { 1757 dev_err(&slave->dev, 1758 "SDW_SCP_INT1 write failed:%d\n", ret); 1759 goto io_err; 1760 } 1761 1762 /* at this point all initial interrupt sources were handled */ 1763 slave->first_interrupt_done = true; 1764 1765 /* 1766 * Read status again to ensure no new interrupts arrived 1767 * while servicing interrupts. 1768 */ 1769 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1770 if (ret < 0) { 1771 dev_err(&slave->dev, 1772 "SDW_SCP_INT1 recheck read failed:%d\n", ret); 1773 goto io_err; 1774 } 1775 buf = ret; 1776 1777 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1778 if (ret < 0) { 1779 dev_err(&slave->dev, 1780 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret); 1781 goto io_err; 1782 } 1783 1784 if (slave->id.class_id) { 1785 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1786 if (ret < 0) { 1787 dev_err(&slave->dev, 1788 "SDW_DP0_INT recheck read failed:%d\n", ret); 1789 goto io_err; 1790 } 1791 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1792 } 1793 1794 /* 1795 * Make sure no interrupts are pending 1796 */ 1797 stat = buf || buf2[0] || buf2[1] || sdca_cascade; 1798 1799 /* 1800 * Exit loop if Slave is continuously in ALERT state even 1801 * after servicing the interrupt multiple times. 1802 */ 1803 count++; 1804 1805 /* we can get alerts while processing so keep retrying */ 1806 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 1807 1808 if (count == SDW_READ_INTR_CLEAR_RETRY) 1809 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n"); 1810 1811 io_err: 1812 pm_runtime_mark_last_busy(&slave->dev); 1813 pm_runtime_put_autosuspend(&slave->dev); 1814 1815 return ret; 1816 } 1817 1818 static int sdw_update_slave_status(struct sdw_slave *slave, 1819 enum sdw_slave_status status) 1820 { 1821 int ret = 0; 1822 1823 mutex_lock(&slave->sdw_dev_lock); 1824 1825 if (slave->probed) { 1826 struct device *dev = &slave->dev; 1827 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1828 1829 if (drv->ops && drv->ops->update_status) 1830 ret = drv->ops->update_status(slave, status); 1831 } 1832 1833 mutex_unlock(&slave->sdw_dev_lock); 1834 1835 return ret; 1836 } 1837 1838 /** 1839 * sdw_handle_slave_status() - Handle Slave status 1840 * @bus: SDW bus instance 1841 * @status: Status for all Slave(s) 1842 */ 1843 int sdw_handle_slave_status(struct sdw_bus *bus, 1844 enum sdw_slave_status status[]) 1845 { 1846 enum sdw_slave_status prev_status; 1847 struct sdw_slave *slave; 1848 bool attached_initializing, id_programmed; 1849 int i, ret = 0; 1850 1851 /* first check if any Slaves fell off the bus */ 1852 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1853 mutex_lock(&bus->bus_lock); 1854 if (test_bit(i, bus->assigned) == false) { 1855 mutex_unlock(&bus->bus_lock); 1856 continue; 1857 } 1858 mutex_unlock(&bus->bus_lock); 1859 1860 slave = sdw_get_slave(bus, i); 1861 if (!slave) 1862 continue; 1863 1864 if (status[i] == SDW_SLAVE_UNATTACHED && 1865 slave->status != SDW_SLAVE_UNATTACHED) { 1866 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n", 1867 i, slave->status); 1868 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1869 1870 /* Ensure driver knows that peripheral unattached */ 1871 ret = sdw_update_slave_status(slave, status[i]); 1872 if (ret < 0) 1873 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret); 1874 } 1875 } 1876 1877 if (status[0] == SDW_SLAVE_ATTACHED) { 1878 dev_dbg(bus->dev, "Slave attached, programming device number\n"); 1879 1880 /* 1881 * Programming a device number will have side effects, 1882 * so we deal with other devices at a later time. 1883 * This relies on those devices reporting ATTACHED, which will 1884 * trigger another call to this function. This will only 1885 * happen if at least one device ID was programmed. 1886 * Error returns from sdw_program_device_num() are currently 1887 * ignored because there's no useful recovery that can be done. 1888 * Returning the error here could result in the current status 1889 * of other devices not being handled, because if no device IDs 1890 * were programmed there's nothing to guarantee a status change 1891 * to trigger another call to this function. 1892 */ 1893 sdw_program_device_num(bus, &id_programmed); 1894 if (id_programmed) 1895 return 0; 1896 } 1897 1898 /* Continue to check other slave statuses */ 1899 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1900 mutex_lock(&bus->bus_lock); 1901 if (test_bit(i, bus->assigned) == false) { 1902 mutex_unlock(&bus->bus_lock); 1903 continue; 1904 } 1905 mutex_unlock(&bus->bus_lock); 1906 1907 slave = sdw_get_slave(bus, i); 1908 if (!slave) 1909 continue; 1910 1911 attached_initializing = false; 1912 1913 switch (status[i]) { 1914 case SDW_SLAVE_UNATTACHED: 1915 if (slave->status == SDW_SLAVE_UNATTACHED) 1916 break; 1917 1918 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n", 1919 i, slave->status); 1920 1921 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1922 break; 1923 1924 case SDW_SLAVE_ALERT: 1925 ret = sdw_handle_slave_alerts(slave); 1926 if (ret < 0) 1927 dev_err(&slave->dev, 1928 "Slave %d alert handling failed: %d\n", 1929 i, ret); 1930 break; 1931 1932 case SDW_SLAVE_ATTACHED: 1933 if (slave->status == SDW_SLAVE_ATTACHED) 1934 break; 1935 1936 prev_status = slave->status; 1937 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED); 1938 1939 if (prev_status == SDW_SLAVE_ALERT) 1940 break; 1941 1942 attached_initializing = true; 1943 1944 ret = sdw_initialize_slave(slave); 1945 if (ret < 0) 1946 dev_err(&slave->dev, 1947 "Slave %d initialization failed: %d\n", 1948 i, ret); 1949 1950 break; 1951 1952 default: 1953 dev_err(&slave->dev, "Invalid slave %d status:%d\n", 1954 i, status[i]); 1955 break; 1956 } 1957 1958 ret = sdw_update_slave_status(slave, status[i]); 1959 if (ret < 0) 1960 dev_err(&slave->dev, 1961 "Update Slave status failed:%d\n", ret); 1962 if (attached_initializing) { 1963 dev_dbg(&slave->dev, 1964 "signaling initialization completion for Slave %d\n", 1965 slave->dev_num); 1966 1967 complete_all(&slave->initialization_complete); 1968 1969 /* 1970 * If the manager became pm_runtime active, the peripherals will be 1971 * restarted and attach, but their pm_runtime status may remain 1972 * suspended. If the 'update_slave_status' callback initiates 1973 * any sort of deferred processing, this processing would not be 1974 * cancelled on pm_runtime suspend. 1975 * To avoid such zombie states, we queue a request to resume. 1976 * This would be a no-op in case the peripheral was being resumed 1977 * by e.g. the ALSA/ASoC framework. 1978 */ 1979 pm_request_resume(&slave->dev); 1980 } 1981 } 1982 1983 return ret; 1984 } 1985 EXPORT_SYMBOL(sdw_handle_slave_status); 1986 1987 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request) 1988 { 1989 struct sdw_slave *slave; 1990 int i; 1991 1992 /* Check all non-zero devices */ 1993 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1994 mutex_lock(&bus->bus_lock); 1995 if (test_bit(i, bus->assigned) == false) { 1996 mutex_unlock(&bus->bus_lock); 1997 continue; 1998 } 1999 mutex_unlock(&bus->bus_lock); 2000 2001 slave = sdw_get_slave(bus, i); 2002 if (!slave) 2003 continue; 2004 2005 if (slave->status != SDW_SLAVE_UNATTACHED) { 2006 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 2007 slave->first_interrupt_done = false; 2008 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED); 2009 } 2010 2011 /* keep track of request, used in pm_runtime resume */ 2012 slave->unattach_request = request; 2013 } 2014 } 2015 EXPORT_SYMBOL(sdw_clear_slave_status); 2016