1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) 2 // Copyright(c) 2015-17 Intel Corporation. 3 4 #include <linux/acpi.h> 5 #include <linux/delay.h> 6 #include <linux/irq.h> 7 #include <linux/mod_devicetable.h> 8 #include <linux/pm_runtime.h> 9 #include <linux/soundwire/sdw_registers.h> 10 #include <linux/soundwire/sdw.h> 11 #include <linux/soundwire/sdw_type.h> 12 #include "bus.h" 13 #include "sysfs_local.h" 14 15 static DEFINE_IDA(sdw_bus_ida); 16 static DEFINE_IDA(sdw_peripheral_ida); 17 18 static int sdw_get_id(struct sdw_bus *bus) 19 { 20 int rc = ida_alloc(&sdw_bus_ida, GFP_KERNEL); 21 22 if (rc < 0) 23 return rc; 24 25 bus->id = rc; 26 return 0; 27 } 28 29 static int sdw_irq_map(struct irq_domain *h, unsigned int virq, 30 irq_hw_number_t hw) 31 { 32 struct sdw_bus *bus = h->host_data; 33 34 irq_set_chip_data(virq, bus); 35 irq_set_chip(virq, &bus->irq_chip); 36 irq_set_nested_thread(virq, 1); 37 irq_set_noprobe(virq); 38 39 return 0; 40 } 41 42 static const struct irq_domain_ops sdw_domain_ops = { 43 .map = sdw_irq_map, 44 }; 45 46 /** 47 * sdw_bus_master_add() - add a bus Master instance 48 * @bus: bus instance 49 * @parent: parent device 50 * @fwnode: firmware node handle 51 * 52 * Initializes the bus instance, read properties and create child 53 * devices. 54 */ 55 int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, 56 struct fwnode_handle *fwnode) 57 { 58 struct sdw_master_prop *prop = NULL; 59 int ret; 60 61 if (!parent) { 62 pr_err("SoundWire parent device is not set\n"); 63 return -ENODEV; 64 } 65 66 ret = sdw_get_id(bus); 67 if (ret < 0) { 68 dev_err(parent, "Failed to get bus id\n"); 69 return ret; 70 } 71 72 ret = sdw_master_device_add(bus, parent, fwnode); 73 if (ret < 0) { 74 dev_err(parent, "Failed to add master device at link %d\n", 75 bus->link_id); 76 return ret; 77 } 78 79 if (!bus->ops) { 80 dev_err(bus->dev, "SoundWire Bus ops are not set\n"); 81 return -EINVAL; 82 } 83 84 if (!bus->compute_params) { 85 dev_err(bus->dev, 86 "Bandwidth allocation not configured, compute_params no set\n"); 87 return -EINVAL; 88 } 89 90 /* 91 * Give each bus_lock and msg_lock a unique key so that lockdep won't 92 * trigger a deadlock warning when the locks of several buses are 93 * grabbed during configuration of a multi-bus stream. 94 */ 95 lockdep_register_key(&bus->msg_lock_key); 96 __mutex_init(&bus->msg_lock, "msg_lock", &bus->msg_lock_key); 97 98 lockdep_register_key(&bus->bus_lock_key); 99 __mutex_init(&bus->bus_lock, "bus_lock", &bus->bus_lock_key); 100 101 INIT_LIST_HEAD(&bus->slaves); 102 INIT_LIST_HEAD(&bus->m_rt_list); 103 104 /* 105 * Initialize multi_link flag 106 */ 107 bus->multi_link = false; 108 if (bus->ops->read_prop) { 109 ret = bus->ops->read_prop(bus); 110 if (ret < 0) { 111 dev_err(bus->dev, 112 "Bus read properties failed:%d\n", ret); 113 return ret; 114 } 115 } 116 117 sdw_bus_debugfs_init(bus); 118 119 /* 120 * Device numbers in SoundWire are 0 through 15. Enumeration device 121 * number (0), Broadcast device number (15), Group numbers (12 and 122 * 13) and Master device number (14) are not used for assignment so 123 * mask these and other higher bits. 124 */ 125 126 /* Set higher order bits */ 127 *bus->assigned = ~GENMASK(SDW_BROADCAST_DEV_NUM, SDW_ENUM_DEV_NUM); 128 129 /* Set enumuration device number and broadcast device number */ 130 set_bit(SDW_ENUM_DEV_NUM, bus->assigned); 131 set_bit(SDW_BROADCAST_DEV_NUM, bus->assigned); 132 133 /* Set group device numbers and master device number */ 134 set_bit(SDW_GROUP12_DEV_NUM, bus->assigned); 135 set_bit(SDW_GROUP13_DEV_NUM, bus->assigned); 136 set_bit(SDW_MASTER_DEV_NUM, bus->assigned); 137 138 /* 139 * SDW is an enumerable bus, but devices can be powered off. So, 140 * they won't be able to report as present. 141 * 142 * Create Slave devices based on Slaves described in 143 * the respective firmware (ACPI/DT) 144 */ 145 if (IS_ENABLED(CONFIG_ACPI) && ACPI_HANDLE(bus->dev)) 146 ret = sdw_acpi_find_slaves(bus); 147 else if (IS_ENABLED(CONFIG_OF) && bus->dev->of_node) 148 ret = sdw_of_find_slaves(bus); 149 else 150 ret = -ENOTSUPP; /* No ACPI/DT so error out */ 151 152 if (ret < 0) { 153 dev_err(bus->dev, "Finding slaves failed:%d\n", ret); 154 return ret; 155 } 156 157 /* 158 * Initialize clock values based on Master properties. The max 159 * frequency is read from max_clk_freq property. Current assumption 160 * is that the bus will start at highest clock frequency when 161 * powered on. 162 * 163 * Default active bank will be 0 as out of reset the Slaves have 164 * to start with bank 0 (Table 40 of Spec) 165 */ 166 prop = &bus->prop; 167 bus->params.max_dr_freq = prop->max_clk_freq * SDW_DOUBLE_RATE_FACTOR; 168 bus->params.curr_dr_freq = bus->params.max_dr_freq; 169 bus->params.curr_bank = SDW_BANK0; 170 bus->params.next_bank = SDW_BANK1; 171 172 bus->irq_chip.name = dev_name(bus->dev); 173 bus->domain = irq_domain_create_linear(fwnode, SDW_MAX_DEVICES, 174 &sdw_domain_ops, bus); 175 if (!bus->domain) { 176 dev_err(bus->dev, "Failed to add IRQ domain\n"); 177 return -EINVAL; 178 } 179 180 return 0; 181 } 182 EXPORT_SYMBOL(sdw_bus_master_add); 183 184 static int sdw_delete_slave(struct device *dev, void *data) 185 { 186 struct sdw_slave *slave = dev_to_sdw_dev(dev); 187 struct sdw_bus *bus = slave->bus; 188 189 pm_runtime_disable(dev); 190 191 sdw_slave_debugfs_exit(slave); 192 193 mutex_lock(&bus->bus_lock); 194 195 if (slave->dev_num) { /* clear dev_num if assigned */ 196 clear_bit(slave->dev_num, bus->assigned); 197 if (bus->dev_num_ida_min) 198 ida_free(&sdw_peripheral_ida, slave->dev_num); 199 } 200 list_del_init(&slave->node); 201 mutex_unlock(&bus->bus_lock); 202 203 device_unregister(dev); 204 return 0; 205 } 206 207 /** 208 * sdw_bus_master_delete() - delete the bus master instance 209 * @bus: bus to be deleted 210 * 211 * Remove the instance, delete the child devices. 212 */ 213 void sdw_bus_master_delete(struct sdw_bus *bus) 214 { 215 device_for_each_child(bus->dev, NULL, sdw_delete_slave); 216 217 irq_domain_remove(bus->domain); 218 219 sdw_master_device_del(bus); 220 221 sdw_bus_debugfs_exit(bus); 222 lockdep_unregister_key(&bus->bus_lock_key); 223 lockdep_unregister_key(&bus->msg_lock_key); 224 ida_free(&sdw_bus_ida, bus->id); 225 } 226 EXPORT_SYMBOL(sdw_bus_master_delete); 227 228 /* 229 * SDW IO Calls 230 */ 231 232 static inline int find_response_code(enum sdw_command_response resp) 233 { 234 switch (resp) { 235 case SDW_CMD_OK: 236 return 0; 237 238 case SDW_CMD_IGNORED: 239 return -ENODATA; 240 241 case SDW_CMD_TIMEOUT: 242 return -ETIMEDOUT; 243 244 default: 245 return -EIO; 246 } 247 } 248 249 static inline int do_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 250 { 251 int retry = bus->prop.err_threshold; 252 enum sdw_command_response resp; 253 int ret = 0, i; 254 255 for (i = 0; i <= retry; i++) { 256 resp = bus->ops->xfer_msg(bus, msg); 257 ret = find_response_code(resp); 258 259 /* if cmd is ok or ignored return */ 260 if (ret == 0 || ret == -ENODATA) 261 return ret; 262 } 263 264 return ret; 265 } 266 267 static inline int do_transfer_defer(struct sdw_bus *bus, 268 struct sdw_msg *msg) 269 { 270 struct sdw_defer *defer = &bus->defer_msg; 271 int retry = bus->prop.err_threshold; 272 enum sdw_command_response resp; 273 int ret = 0, i; 274 275 defer->msg = msg; 276 defer->length = msg->len; 277 init_completion(&defer->complete); 278 279 for (i = 0; i <= retry; i++) { 280 resp = bus->ops->xfer_msg_defer(bus); 281 ret = find_response_code(resp); 282 /* if cmd is ok or ignored return */ 283 if (ret == 0 || ret == -ENODATA) 284 return ret; 285 } 286 287 return ret; 288 } 289 290 static int sdw_transfer_unlocked(struct sdw_bus *bus, struct sdw_msg *msg) 291 { 292 int ret; 293 294 ret = do_transfer(bus, msg); 295 if (ret != 0 && ret != -ENODATA) 296 dev_err(bus->dev, "trf on Slave %d failed:%d %s addr %x count %d\n", 297 msg->dev_num, ret, 298 (msg->flags & SDW_MSG_FLAG_WRITE) ? "write" : "read", 299 msg->addr, msg->len); 300 301 return ret; 302 } 303 304 /** 305 * sdw_transfer() - Synchronous transfer message to a SDW Slave device 306 * @bus: SDW bus 307 * @msg: SDW message to be xfered 308 */ 309 int sdw_transfer(struct sdw_bus *bus, struct sdw_msg *msg) 310 { 311 int ret; 312 313 mutex_lock(&bus->msg_lock); 314 315 ret = sdw_transfer_unlocked(bus, msg); 316 317 mutex_unlock(&bus->msg_lock); 318 319 return ret; 320 } 321 322 /** 323 * sdw_show_ping_status() - Direct report of PING status, to be used by Peripheral drivers 324 * @bus: SDW bus 325 * @sync_delay: Delay before reading status 326 */ 327 void sdw_show_ping_status(struct sdw_bus *bus, bool sync_delay) 328 { 329 u32 status; 330 331 if (!bus->ops->read_ping_status) 332 return; 333 334 /* 335 * wait for peripheral to sync if desired. 10-15ms should be more than 336 * enough in most cases. 337 */ 338 if (sync_delay) 339 usleep_range(10000, 15000); 340 341 mutex_lock(&bus->msg_lock); 342 343 status = bus->ops->read_ping_status(bus); 344 345 mutex_unlock(&bus->msg_lock); 346 347 if (!status) 348 dev_warn(bus->dev, "%s: no peripherals attached\n", __func__); 349 else 350 dev_dbg(bus->dev, "PING status: %#x\n", status); 351 } 352 EXPORT_SYMBOL(sdw_show_ping_status); 353 354 /** 355 * sdw_transfer_defer() - Asynchronously transfer message to a SDW Slave device 356 * @bus: SDW bus 357 * @msg: SDW message to be xfered 358 * 359 * Caller needs to hold the msg_lock lock while calling this 360 */ 361 int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg) 362 { 363 int ret; 364 365 if (!bus->ops->xfer_msg_defer) 366 return -ENOTSUPP; 367 368 ret = do_transfer_defer(bus, msg); 369 if (ret != 0 && ret != -ENODATA) 370 dev_err(bus->dev, "Defer trf on Slave %d failed:%d\n", 371 msg->dev_num, ret); 372 373 return ret; 374 } 375 376 int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave, 377 u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf) 378 { 379 memset(msg, 0, sizeof(*msg)); 380 msg->addr = addr; /* addr is 16 bit and truncated here */ 381 msg->len = count; 382 msg->dev_num = dev_num; 383 msg->flags = flags; 384 msg->buf = buf; 385 386 if (addr < SDW_REG_NO_PAGE) /* no paging area */ 387 return 0; 388 389 if (addr >= SDW_REG_MAX) { /* illegal addr */ 390 pr_err("SDW: Invalid address %x passed\n", addr); 391 return -EINVAL; 392 } 393 394 if (addr < SDW_REG_OPTIONAL_PAGE) { /* 32k but no page */ 395 if (slave && !slave->prop.paging_support) 396 return 0; 397 /* no need for else as that will fall-through to paging */ 398 } 399 400 /* paging mandatory */ 401 if (dev_num == SDW_ENUM_DEV_NUM || dev_num == SDW_BROADCAST_DEV_NUM) { 402 pr_err("SDW: Invalid device for paging :%d\n", dev_num); 403 return -EINVAL; 404 } 405 406 if (!slave) { 407 pr_err("SDW: No slave for paging addr\n"); 408 return -EINVAL; 409 } 410 411 if (!slave->prop.paging_support) { 412 dev_err(&slave->dev, 413 "address %x needs paging but no support\n", addr); 414 return -EINVAL; 415 } 416 417 msg->addr_page1 = FIELD_GET(SDW_SCP_ADDRPAGE1_MASK, addr); 418 msg->addr_page2 = FIELD_GET(SDW_SCP_ADDRPAGE2_MASK, addr); 419 msg->addr |= BIT(15); 420 msg->page = true; 421 422 return 0; 423 } 424 425 /* 426 * Read/Write IO functions. 427 */ 428 429 static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags, 430 size_t count, u8 *val) 431 { 432 struct sdw_msg msg; 433 size_t size; 434 int ret; 435 436 while (count) { 437 // Only handle bytes up to next page boundary 438 size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR)); 439 440 ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val); 441 if (ret < 0) 442 return ret; 443 444 ret = sdw_transfer(slave->bus, &msg); 445 if (ret < 0 && !slave->is_mockup_device) 446 return ret; 447 448 addr += size; 449 val += size; 450 count -= size; 451 } 452 453 return 0; 454 } 455 456 /** 457 * sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM 458 * @slave: SDW Slave 459 * @addr: Register address 460 * @count: length 461 * @val: Buffer for values to be read 462 * 463 * Note that if the message crosses a page boundary each page will be 464 * transferred under a separate invocation of the msg_lock. 465 */ 466 int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 467 { 468 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val); 469 } 470 EXPORT_SYMBOL(sdw_nread_no_pm); 471 472 /** 473 * sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM 474 * @slave: SDW Slave 475 * @addr: Register address 476 * @count: length 477 * @val: Buffer for values to be written 478 * 479 * Note that if the message crosses a page boundary each page will be 480 * transferred under a separate invocation of the msg_lock. 481 */ 482 int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 483 { 484 return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val); 485 } 486 EXPORT_SYMBOL(sdw_nwrite_no_pm); 487 488 /** 489 * sdw_write_no_pm() - Write a SDW Slave register with no PM 490 * @slave: SDW Slave 491 * @addr: Register address 492 * @value: Register value 493 */ 494 int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value) 495 { 496 return sdw_nwrite_no_pm(slave, addr, 1, &value); 497 } 498 EXPORT_SYMBOL(sdw_write_no_pm); 499 500 static int 501 sdw_bread_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr) 502 { 503 struct sdw_msg msg; 504 u8 buf; 505 int ret; 506 507 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 508 SDW_MSG_FLAG_READ, &buf); 509 if (ret < 0) 510 return ret; 511 512 ret = sdw_transfer(bus, &msg); 513 if (ret < 0) 514 return ret; 515 516 return buf; 517 } 518 519 static int 520 sdw_bwrite_no_pm(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 521 { 522 struct sdw_msg msg; 523 int ret; 524 525 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 526 SDW_MSG_FLAG_WRITE, &value); 527 if (ret < 0) 528 return ret; 529 530 return sdw_transfer(bus, &msg); 531 } 532 533 int sdw_bread_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr) 534 { 535 struct sdw_msg msg; 536 u8 buf; 537 int ret; 538 539 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 540 SDW_MSG_FLAG_READ, &buf); 541 if (ret < 0) 542 return ret; 543 544 ret = sdw_transfer_unlocked(bus, &msg); 545 if (ret < 0) 546 return ret; 547 548 return buf; 549 } 550 EXPORT_SYMBOL(sdw_bread_no_pm_unlocked); 551 552 int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 value) 553 { 554 struct sdw_msg msg; 555 int ret; 556 557 ret = sdw_fill_msg(&msg, NULL, addr, 1, dev_num, 558 SDW_MSG_FLAG_WRITE, &value); 559 if (ret < 0) 560 return ret; 561 562 return sdw_transfer_unlocked(bus, &msg); 563 } 564 EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked); 565 566 /** 567 * sdw_read_no_pm() - Read a SDW Slave register with no PM 568 * @slave: SDW Slave 569 * @addr: Register address 570 */ 571 int sdw_read_no_pm(struct sdw_slave *slave, u32 addr) 572 { 573 u8 buf; 574 int ret; 575 576 ret = sdw_nread_no_pm(slave, addr, 1, &buf); 577 if (ret < 0) 578 return ret; 579 else 580 return buf; 581 } 582 EXPORT_SYMBOL(sdw_read_no_pm); 583 584 int sdw_update_no_pm(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 585 { 586 int tmp; 587 588 tmp = sdw_read_no_pm(slave, addr); 589 if (tmp < 0) 590 return tmp; 591 592 tmp = (tmp & ~mask) | val; 593 return sdw_write_no_pm(slave, addr, tmp); 594 } 595 EXPORT_SYMBOL(sdw_update_no_pm); 596 597 /* Read-Modify-Write Slave register */ 598 int sdw_update(struct sdw_slave *slave, u32 addr, u8 mask, u8 val) 599 { 600 int tmp; 601 602 tmp = sdw_read(slave, addr); 603 if (tmp < 0) 604 return tmp; 605 606 tmp = (tmp & ~mask) | val; 607 return sdw_write(slave, addr, tmp); 608 } 609 EXPORT_SYMBOL(sdw_update); 610 611 /** 612 * sdw_nread() - Read "n" contiguous SDW Slave registers 613 * @slave: SDW Slave 614 * @addr: Register address 615 * @count: length 616 * @val: Buffer for values to be read 617 * 618 * This version of the function will take a PM reference to the slave 619 * device. 620 * Note that if the message crosses a page boundary each page will be 621 * transferred under a separate invocation of the msg_lock. 622 */ 623 int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val) 624 { 625 int ret; 626 627 ret = pm_runtime_get_sync(&slave->dev); 628 if (ret < 0 && ret != -EACCES) { 629 pm_runtime_put_noidle(&slave->dev); 630 return ret; 631 } 632 633 ret = sdw_nread_no_pm(slave, addr, count, val); 634 635 pm_runtime_mark_last_busy(&slave->dev); 636 pm_runtime_put(&slave->dev); 637 638 return ret; 639 } 640 EXPORT_SYMBOL(sdw_nread); 641 642 /** 643 * sdw_nwrite() - Write "n" contiguous SDW Slave registers 644 * @slave: SDW Slave 645 * @addr: Register address 646 * @count: length 647 * @val: Buffer for values to be written 648 * 649 * This version of the function will take a PM reference to the slave 650 * device. 651 * Note that if the message crosses a page boundary each page will be 652 * transferred under a separate invocation of the msg_lock. 653 */ 654 int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val) 655 { 656 int ret; 657 658 ret = pm_runtime_get_sync(&slave->dev); 659 if (ret < 0 && ret != -EACCES) { 660 pm_runtime_put_noidle(&slave->dev); 661 return ret; 662 } 663 664 ret = sdw_nwrite_no_pm(slave, addr, count, val); 665 666 pm_runtime_mark_last_busy(&slave->dev); 667 pm_runtime_put(&slave->dev); 668 669 return ret; 670 } 671 EXPORT_SYMBOL(sdw_nwrite); 672 673 /** 674 * sdw_read() - Read a SDW Slave register 675 * @slave: SDW Slave 676 * @addr: Register address 677 * 678 * This version of the function will take a PM reference to the slave 679 * device. 680 */ 681 int sdw_read(struct sdw_slave *slave, u32 addr) 682 { 683 u8 buf; 684 int ret; 685 686 ret = sdw_nread(slave, addr, 1, &buf); 687 if (ret < 0) 688 return ret; 689 690 return buf; 691 } 692 EXPORT_SYMBOL(sdw_read); 693 694 /** 695 * sdw_write() - Write a SDW Slave register 696 * @slave: SDW Slave 697 * @addr: Register address 698 * @value: Register value 699 * 700 * This version of the function will take a PM reference to the slave 701 * device. 702 */ 703 int sdw_write(struct sdw_slave *slave, u32 addr, u8 value) 704 { 705 return sdw_nwrite(slave, addr, 1, &value); 706 } 707 EXPORT_SYMBOL(sdw_write); 708 709 /* 710 * SDW alert handling 711 */ 712 713 /* called with bus_lock held */ 714 static struct sdw_slave *sdw_get_slave(struct sdw_bus *bus, int i) 715 { 716 struct sdw_slave *slave; 717 718 list_for_each_entry(slave, &bus->slaves, node) { 719 if (slave->dev_num == i) 720 return slave; 721 } 722 723 return NULL; 724 } 725 726 int sdw_compare_devid(struct sdw_slave *slave, struct sdw_slave_id id) 727 { 728 if (slave->id.mfg_id != id.mfg_id || 729 slave->id.part_id != id.part_id || 730 slave->id.class_id != id.class_id || 731 (slave->id.unique_id != SDW_IGNORED_UNIQUE_ID && 732 slave->id.unique_id != id.unique_id)) 733 return -ENODEV; 734 735 return 0; 736 } 737 EXPORT_SYMBOL(sdw_compare_devid); 738 739 /* called with bus_lock held */ 740 static int sdw_get_device_num(struct sdw_slave *slave) 741 { 742 int bit; 743 744 if (slave->bus->dev_num_ida_min) { 745 bit = ida_alloc_range(&sdw_peripheral_ida, 746 slave->bus->dev_num_ida_min, SDW_MAX_DEVICES, 747 GFP_KERNEL); 748 if (bit < 0) 749 goto err; 750 } else { 751 bit = find_first_zero_bit(slave->bus->assigned, SDW_MAX_DEVICES); 752 if (bit == SDW_MAX_DEVICES) { 753 bit = -ENODEV; 754 goto err; 755 } 756 } 757 758 /* 759 * Do not update dev_num in Slave data structure here, 760 * Update once program dev_num is successful 761 */ 762 set_bit(bit, slave->bus->assigned); 763 764 err: 765 return bit; 766 } 767 768 static int sdw_assign_device_num(struct sdw_slave *slave) 769 { 770 struct sdw_bus *bus = slave->bus; 771 int ret, dev_num; 772 bool new_device = false; 773 774 /* check first if device number is assigned, if so reuse that */ 775 if (!slave->dev_num) { 776 if (!slave->dev_num_sticky) { 777 mutex_lock(&slave->bus->bus_lock); 778 dev_num = sdw_get_device_num(slave); 779 mutex_unlock(&slave->bus->bus_lock); 780 if (dev_num < 0) { 781 dev_err(bus->dev, "Get dev_num failed: %d\n", 782 dev_num); 783 return dev_num; 784 } 785 slave->dev_num = dev_num; 786 slave->dev_num_sticky = dev_num; 787 new_device = true; 788 } else { 789 slave->dev_num = slave->dev_num_sticky; 790 } 791 } 792 793 if (!new_device) 794 dev_dbg(bus->dev, 795 "Slave already registered, reusing dev_num:%d\n", 796 slave->dev_num); 797 798 /* Clear the slave->dev_num to transfer message on device 0 */ 799 dev_num = slave->dev_num; 800 slave->dev_num = 0; 801 802 ret = sdw_write_no_pm(slave, SDW_SCP_DEVNUMBER, dev_num); 803 if (ret < 0) { 804 dev_err(bus->dev, "Program device_num %d failed: %d\n", 805 dev_num, ret); 806 return ret; 807 } 808 809 /* After xfer of msg, restore dev_num */ 810 slave->dev_num = slave->dev_num_sticky; 811 812 if (bus->ops && bus->ops->new_peripheral_assigned) 813 bus->ops->new_peripheral_assigned(bus, dev_num); 814 815 return 0; 816 } 817 818 void sdw_extract_slave_id(struct sdw_bus *bus, 819 u64 addr, struct sdw_slave_id *id) 820 { 821 dev_dbg(bus->dev, "SDW Slave Addr: %llx\n", addr); 822 823 id->sdw_version = SDW_VERSION(addr); 824 id->unique_id = SDW_UNIQUE_ID(addr); 825 id->mfg_id = SDW_MFG_ID(addr); 826 id->part_id = SDW_PART_ID(addr); 827 id->class_id = SDW_CLASS_ID(addr); 828 829 dev_dbg(bus->dev, 830 "SDW Slave class_id 0x%02x, mfg_id 0x%04x, part_id 0x%04x, unique_id 0x%x, version 0x%x\n", 831 id->class_id, id->mfg_id, id->part_id, id->unique_id, id->sdw_version); 832 } 833 EXPORT_SYMBOL(sdw_extract_slave_id); 834 835 static int sdw_program_device_num(struct sdw_bus *bus, bool *programmed) 836 { 837 u8 buf[SDW_NUM_DEV_ID_REGISTERS] = {0}; 838 struct sdw_slave *slave, *_s; 839 struct sdw_slave_id id; 840 struct sdw_msg msg; 841 bool found; 842 int count = 0, ret; 843 u64 addr; 844 845 *programmed = false; 846 847 /* No Slave, so use raw xfer api */ 848 ret = sdw_fill_msg(&msg, NULL, SDW_SCP_DEVID_0, 849 SDW_NUM_DEV_ID_REGISTERS, 0, SDW_MSG_FLAG_READ, buf); 850 if (ret < 0) 851 return ret; 852 853 do { 854 ret = sdw_transfer(bus, &msg); 855 if (ret == -ENODATA) { /* end of device id reads */ 856 dev_dbg(bus->dev, "No more devices to enumerate\n"); 857 ret = 0; 858 break; 859 } 860 if (ret < 0) { 861 dev_err(bus->dev, "DEVID read fail:%d\n", ret); 862 break; 863 } 864 865 /* 866 * Construct the addr and extract. Cast the higher shift 867 * bits to avoid truncation due to size limit. 868 */ 869 addr = buf[5] | (buf[4] << 8) | (buf[3] << 16) | 870 ((u64)buf[2] << 24) | ((u64)buf[1] << 32) | 871 ((u64)buf[0] << 40); 872 873 sdw_extract_slave_id(bus, addr, &id); 874 875 found = false; 876 /* Now compare with entries */ 877 list_for_each_entry_safe(slave, _s, &bus->slaves, node) { 878 if (sdw_compare_devid(slave, id) == 0) { 879 found = true; 880 881 /* 882 * To prevent skipping state-machine stages don't 883 * program a device until we've seen it UNATTACH. 884 * Must return here because no other device on #0 885 * can be detected until this one has been 886 * assigned a device ID. 887 */ 888 if (slave->status != SDW_SLAVE_UNATTACHED) 889 return 0; 890 891 /* 892 * Assign a new dev_num to this Slave and 893 * not mark it present. It will be marked 894 * present after it reports ATTACHED on new 895 * dev_num 896 */ 897 ret = sdw_assign_device_num(slave); 898 if (ret < 0) { 899 dev_err(bus->dev, 900 "Assign dev_num failed:%d\n", 901 ret); 902 return ret; 903 } 904 905 *programmed = true; 906 907 break; 908 } 909 } 910 911 if (!found) { 912 /* TODO: Park this device in Group 13 */ 913 914 /* 915 * add Slave device even if there is no platform 916 * firmware description. There will be no driver probe 917 * but the user/integration will be able to see the 918 * device, enumeration status and device number in sysfs 919 */ 920 sdw_slave_add(bus, &id, NULL); 921 922 dev_err(bus->dev, "Slave Entry not found\n"); 923 } 924 925 count++; 926 927 /* 928 * Check till error out or retry (count) exhausts. 929 * Device can drop off and rejoin during enumeration 930 * so count till twice the bound. 931 */ 932 933 } while (ret == 0 && count < (SDW_MAX_DEVICES * 2)); 934 935 return ret; 936 } 937 938 static void sdw_modify_slave_status(struct sdw_slave *slave, 939 enum sdw_slave_status status) 940 { 941 struct sdw_bus *bus = slave->bus; 942 943 mutex_lock(&bus->bus_lock); 944 945 dev_vdbg(bus->dev, 946 "changing status slave %d status %d new status %d\n", 947 slave->dev_num, slave->status, status); 948 949 if (status == SDW_SLAVE_UNATTACHED) { 950 dev_dbg(&slave->dev, 951 "initializing enumeration and init completion for Slave %d\n", 952 slave->dev_num); 953 954 reinit_completion(&slave->enumeration_complete); 955 reinit_completion(&slave->initialization_complete); 956 957 } else if ((status == SDW_SLAVE_ATTACHED) && 958 (slave->status == SDW_SLAVE_UNATTACHED)) { 959 dev_dbg(&slave->dev, 960 "signaling enumeration completion for Slave %d\n", 961 slave->dev_num); 962 963 complete_all(&slave->enumeration_complete); 964 } 965 slave->status = status; 966 mutex_unlock(&bus->bus_lock); 967 } 968 969 static int sdw_slave_clk_stop_callback(struct sdw_slave *slave, 970 enum sdw_clk_stop_mode mode, 971 enum sdw_clk_stop_type type) 972 { 973 int ret = 0; 974 975 mutex_lock(&slave->sdw_dev_lock); 976 977 if (slave->probed) { 978 struct device *dev = &slave->dev; 979 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 980 981 if (drv->ops && drv->ops->clk_stop) 982 ret = drv->ops->clk_stop(slave, mode, type); 983 } 984 985 mutex_unlock(&slave->sdw_dev_lock); 986 987 return ret; 988 } 989 990 static int sdw_slave_clk_stop_prepare(struct sdw_slave *slave, 991 enum sdw_clk_stop_mode mode, 992 bool prepare) 993 { 994 bool wake_en; 995 u32 val = 0; 996 int ret; 997 998 wake_en = slave->prop.wake_capable; 999 1000 if (prepare) { 1001 val = SDW_SCP_SYSTEMCTRL_CLK_STP_PREP; 1002 1003 if (mode == SDW_CLK_STOP_MODE1) 1004 val |= SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1; 1005 1006 if (wake_en) 1007 val |= SDW_SCP_SYSTEMCTRL_WAKE_UP_EN; 1008 } else { 1009 ret = sdw_read_no_pm(slave, SDW_SCP_SYSTEMCTRL); 1010 if (ret < 0) { 1011 if (ret != -ENODATA) 1012 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL read failed:%d\n", ret); 1013 return ret; 1014 } 1015 val = ret; 1016 val &= ~(SDW_SCP_SYSTEMCTRL_CLK_STP_PREP); 1017 } 1018 1019 ret = sdw_write_no_pm(slave, SDW_SCP_SYSTEMCTRL, val); 1020 1021 if (ret < 0 && ret != -ENODATA) 1022 dev_err(&slave->dev, "SDW_SCP_SYSTEMCTRL write failed:%d\n", ret); 1023 1024 return ret; 1025 } 1026 1027 static int sdw_bus_wait_for_clk_prep_deprep(struct sdw_bus *bus, u16 dev_num) 1028 { 1029 int retry = bus->clk_stop_timeout; 1030 int val; 1031 1032 do { 1033 val = sdw_bread_no_pm(bus, dev_num, SDW_SCP_STAT); 1034 if (val < 0) { 1035 if (val != -ENODATA) 1036 dev_err(bus->dev, "SDW_SCP_STAT bread failed:%d\n", val); 1037 return val; 1038 } 1039 val &= SDW_SCP_STAT_CLK_STP_NF; 1040 if (!val) { 1041 dev_dbg(bus->dev, "clock stop prep/de-prep done slave:%d\n", 1042 dev_num); 1043 return 0; 1044 } 1045 1046 usleep_range(1000, 1500); 1047 retry--; 1048 } while (retry); 1049 1050 dev_err(bus->dev, "clock stop prep/de-prep failed slave:%d\n", 1051 dev_num); 1052 1053 return -ETIMEDOUT; 1054 } 1055 1056 /** 1057 * sdw_bus_prep_clk_stop: prepare Slave(s) for clock stop 1058 * 1059 * @bus: SDW bus instance 1060 * 1061 * Query Slave for clock stop mode and prepare for that mode. 1062 */ 1063 int sdw_bus_prep_clk_stop(struct sdw_bus *bus) 1064 { 1065 bool simple_clk_stop = true; 1066 struct sdw_slave *slave; 1067 bool is_slave = false; 1068 int ret = 0; 1069 1070 /* 1071 * In order to save on transition time, prepare 1072 * each Slave and then wait for all Slave(s) to be 1073 * prepared for clock stop. 1074 * If one of the Slave devices has lost sync and 1075 * replies with Command Ignored/-ENODATA, we continue 1076 * the loop 1077 */ 1078 list_for_each_entry(slave, &bus->slaves, node) { 1079 if (!slave->dev_num) 1080 continue; 1081 1082 if (slave->status != SDW_SLAVE_ATTACHED && 1083 slave->status != SDW_SLAVE_ALERT) 1084 continue; 1085 1086 /* Identify if Slave(s) are available on Bus */ 1087 is_slave = true; 1088 1089 ret = sdw_slave_clk_stop_callback(slave, 1090 SDW_CLK_STOP_MODE0, 1091 SDW_CLK_PRE_PREPARE); 1092 if (ret < 0 && ret != -ENODATA) { 1093 dev_err(&slave->dev, "clock stop pre-prepare cb failed:%d\n", ret); 1094 return ret; 1095 } 1096 1097 /* Only prepare a Slave device if needed */ 1098 if (!slave->prop.simple_clk_stop_capable) { 1099 simple_clk_stop = false; 1100 1101 ret = sdw_slave_clk_stop_prepare(slave, 1102 SDW_CLK_STOP_MODE0, 1103 true); 1104 if (ret < 0 && ret != -ENODATA) { 1105 dev_err(&slave->dev, "clock stop prepare failed:%d\n", ret); 1106 return ret; 1107 } 1108 } 1109 } 1110 1111 /* Skip remaining clock stop preparation if no Slave is attached */ 1112 if (!is_slave) 1113 return 0; 1114 1115 /* 1116 * Don't wait for all Slaves to be ready if they follow the simple 1117 * state machine 1118 */ 1119 if (!simple_clk_stop) { 1120 ret = sdw_bus_wait_for_clk_prep_deprep(bus, 1121 SDW_BROADCAST_DEV_NUM); 1122 /* 1123 * if there are no Slave devices present and the reply is 1124 * Command_Ignored/-ENODATA, we don't need to continue with the 1125 * flow and can just return here. The error code is not modified 1126 * and its handling left as an exercise for the caller. 1127 */ 1128 if (ret < 0) 1129 return ret; 1130 } 1131 1132 /* Inform slaves that prep is done */ 1133 list_for_each_entry(slave, &bus->slaves, node) { 1134 if (!slave->dev_num) 1135 continue; 1136 1137 if (slave->status != SDW_SLAVE_ATTACHED && 1138 slave->status != SDW_SLAVE_ALERT) 1139 continue; 1140 1141 ret = sdw_slave_clk_stop_callback(slave, 1142 SDW_CLK_STOP_MODE0, 1143 SDW_CLK_POST_PREPARE); 1144 1145 if (ret < 0 && ret != -ENODATA) { 1146 dev_err(&slave->dev, "clock stop post-prepare cb failed:%d\n", ret); 1147 return ret; 1148 } 1149 } 1150 1151 return 0; 1152 } 1153 EXPORT_SYMBOL(sdw_bus_prep_clk_stop); 1154 1155 /** 1156 * sdw_bus_clk_stop: stop bus clock 1157 * 1158 * @bus: SDW bus instance 1159 * 1160 * After preparing the Slaves for clock stop, stop the clock by broadcasting 1161 * write to SCP_CTRL register. 1162 */ 1163 int sdw_bus_clk_stop(struct sdw_bus *bus) 1164 { 1165 int ret; 1166 1167 /* 1168 * broadcast clock stop now, attached Slaves will ACK this, 1169 * unattached will ignore 1170 */ 1171 ret = sdw_bwrite_no_pm(bus, SDW_BROADCAST_DEV_NUM, 1172 SDW_SCP_CTRL, SDW_SCP_CTRL_CLK_STP_NOW); 1173 if (ret < 0) { 1174 if (ret != -ENODATA) 1175 dev_err(bus->dev, "ClockStopNow Broadcast msg failed %d\n", ret); 1176 return ret; 1177 } 1178 1179 return 0; 1180 } 1181 EXPORT_SYMBOL(sdw_bus_clk_stop); 1182 1183 /** 1184 * sdw_bus_exit_clk_stop: Exit clock stop mode 1185 * 1186 * @bus: SDW bus instance 1187 * 1188 * This De-prepares the Slaves by exiting Clock Stop Mode 0. For the Slaves 1189 * exiting Clock Stop Mode 1, they will be de-prepared after they enumerate 1190 * back. 1191 */ 1192 int sdw_bus_exit_clk_stop(struct sdw_bus *bus) 1193 { 1194 bool simple_clk_stop = true; 1195 struct sdw_slave *slave; 1196 bool is_slave = false; 1197 int ret; 1198 1199 /* 1200 * In order to save on transition time, de-prepare 1201 * each Slave and then wait for all Slave(s) to be 1202 * de-prepared after clock resume. 1203 */ 1204 list_for_each_entry(slave, &bus->slaves, node) { 1205 if (!slave->dev_num) 1206 continue; 1207 1208 if (slave->status != SDW_SLAVE_ATTACHED && 1209 slave->status != SDW_SLAVE_ALERT) 1210 continue; 1211 1212 /* Identify if Slave(s) are available on Bus */ 1213 is_slave = true; 1214 1215 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1216 SDW_CLK_PRE_DEPREPARE); 1217 if (ret < 0) 1218 dev_warn(&slave->dev, "clock stop pre-deprepare cb failed:%d\n", ret); 1219 1220 /* Only de-prepare a Slave device if needed */ 1221 if (!slave->prop.simple_clk_stop_capable) { 1222 simple_clk_stop = false; 1223 1224 ret = sdw_slave_clk_stop_prepare(slave, SDW_CLK_STOP_MODE0, 1225 false); 1226 1227 if (ret < 0) 1228 dev_warn(&slave->dev, "clock stop deprepare failed:%d\n", ret); 1229 } 1230 } 1231 1232 /* Skip remaining clock stop de-preparation if no Slave is attached */ 1233 if (!is_slave) 1234 return 0; 1235 1236 /* 1237 * Don't wait for all Slaves to be ready if they follow the simple 1238 * state machine 1239 */ 1240 if (!simple_clk_stop) { 1241 ret = sdw_bus_wait_for_clk_prep_deprep(bus, SDW_BROADCAST_DEV_NUM); 1242 if (ret < 0) 1243 dev_warn(bus->dev, "clock stop deprepare wait failed:%d\n", ret); 1244 } 1245 1246 list_for_each_entry(slave, &bus->slaves, node) { 1247 if (!slave->dev_num) 1248 continue; 1249 1250 if (slave->status != SDW_SLAVE_ATTACHED && 1251 slave->status != SDW_SLAVE_ALERT) 1252 continue; 1253 1254 ret = sdw_slave_clk_stop_callback(slave, SDW_CLK_STOP_MODE0, 1255 SDW_CLK_POST_DEPREPARE); 1256 if (ret < 0) 1257 dev_warn(&slave->dev, "clock stop post-deprepare cb failed:%d\n", ret); 1258 } 1259 1260 return 0; 1261 } 1262 EXPORT_SYMBOL(sdw_bus_exit_clk_stop); 1263 1264 int sdw_configure_dpn_intr(struct sdw_slave *slave, 1265 int port, bool enable, int mask) 1266 { 1267 u32 addr; 1268 int ret; 1269 u8 val = 0; 1270 1271 if (slave->bus->params.s_data_mode != SDW_PORT_DATA_MODE_NORMAL) { 1272 dev_dbg(&slave->dev, "TEST FAIL interrupt %s\n", 1273 enable ? "on" : "off"); 1274 mask |= SDW_DPN_INT_TEST_FAIL; 1275 } 1276 1277 addr = SDW_DPN_INTMASK(port); 1278 1279 /* Set/Clear port ready interrupt mask */ 1280 if (enable) { 1281 val |= mask; 1282 val |= SDW_DPN_INT_PORT_READY; 1283 } else { 1284 val &= ~(mask); 1285 val &= ~SDW_DPN_INT_PORT_READY; 1286 } 1287 1288 ret = sdw_update_no_pm(slave, addr, (mask | SDW_DPN_INT_PORT_READY), val); 1289 if (ret < 0) 1290 dev_err(&slave->dev, 1291 "SDW_DPN_INTMASK write failed:%d\n", val); 1292 1293 return ret; 1294 } 1295 1296 static int sdw_slave_set_frequency(struct sdw_slave *slave) 1297 { 1298 u32 mclk_freq = slave->bus->prop.mclk_freq; 1299 u32 curr_freq = slave->bus->params.curr_dr_freq >> 1; 1300 unsigned int scale; 1301 u8 scale_index; 1302 u8 base; 1303 int ret; 1304 1305 /* 1306 * frequency base and scale registers are required for SDCA 1307 * devices. They may also be used for 1.2+/non-SDCA devices. 1308 * Driver can set the property, we will need a DisCo property 1309 * to discover this case from platform firmware. 1310 */ 1311 if (!slave->id.class_id && !slave->prop.clock_reg_supported) 1312 return 0; 1313 1314 if (!mclk_freq) { 1315 dev_err(&slave->dev, 1316 "no bus MCLK, cannot set SDW_SCP_BUS_CLOCK_BASE\n"); 1317 return -EINVAL; 1318 } 1319 1320 /* 1321 * map base frequency using Table 89 of SoundWire 1.2 spec. 1322 * The order of the tests just follows the specification, this 1323 * is not a selection between possible values or a search for 1324 * the best value but just a mapping. Only one case per platform 1325 * is relevant. 1326 * Some BIOS have inconsistent values for mclk_freq but a 1327 * correct root so we force the mclk_freq to avoid variations. 1328 */ 1329 if (!(19200000 % mclk_freq)) { 1330 mclk_freq = 19200000; 1331 base = SDW_SCP_BASE_CLOCK_19200000_HZ; 1332 } else if (!(24000000 % mclk_freq)) { 1333 mclk_freq = 24000000; 1334 base = SDW_SCP_BASE_CLOCK_24000000_HZ; 1335 } else if (!(24576000 % mclk_freq)) { 1336 mclk_freq = 24576000; 1337 base = SDW_SCP_BASE_CLOCK_24576000_HZ; 1338 } else if (!(22579200 % mclk_freq)) { 1339 mclk_freq = 22579200; 1340 base = SDW_SCP_BASE_CLOCK_22579200_HZ; 1341 } else if (!(32000000 % mclk_freq)) { 1342 mclk_freq = 32000000; 1343 base = SDW_SCP_BASE_CLOCK_32000000_HZ; 1344 } else { 1345 dev_err(&slave->dev, 1346 "Unsupported clock base, mclk %d\n", 1347 mclk_freq); 1348 return -EINVAL; 1349 } 1350 1351 if (mclk_freq % curr_freq) { 1352 dev_err(&slave->dev, 1353 "mclk %d is not multiple of bus curr_freq %d\n", 1354 mclk_freq, curr_freq); 1355 return -EINVAL; 1356 } 1357 1358 scale = mclk_freq / curr_freq; 1359 1360 /* 1361 * map scale to Table 90 of SoundWire 1.2 spec - and check 1362 * that the scale is a power of two and maximum 64 1363 */ 1364 scale_index = ilog2(scale); 1365 1366 if (BIT(scale_index) != scale || scale_index > 6) { 1367 dev_err(&slave->dev, 1368 "No match found for scale %d, bus mclk %d curr_freq %d\n", 1369 scale, mclk_freq, curr_freq); 1370 return -EINVAL; 1371 } 1372 scale_index++; 1373 1374 ret = sdw_write_no_pm(slave, SDW_SCP_BUS_CLOCK_BASE, base); 1375 if (ret < 0) { 1376 dev_err(&slave->dev, 1377 "SDW_SCP_BUS_CLOCK_BASE write failed:%d\n", ret); 1378 return ret; 1379 } 1380 1381 /* initialize scale for both banks */ 1382 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B0, scale_index); 1383 if (ret < 0) { 1384 dev_err(&slave->dev, 1385 "SDW_SCP_BUSCLOCK_SCALE_B0 write failed:%d\n", ret); 1386 return ret; 1387 } 1388 ret = sdw_write_no_pm(slave, SDW_SCP_BUSCLOCK_SCALE_B1, scale_index); 1389 if (ret < 0) 1390 dev_err(&slave->dev, 1391 "SDW_SCP_BUSCLOCK_SCALE_B1 write failed:%d\n", ret); 1392 1393 dev_dbg(&slave->dev, 1394 "Configured bus base %d, scale %d, mclk %d, curr_freq %d\n", 1395 base, scale_index, mclk_freq, curr_freq); 1396 1397 return ret; 1398 } 1399 1400 static int sdw_initialize_slave(struct sdw_slave *slave) 1401 { 1402 struct sdw_slave_prop *prop = &slave->prop; 1403 int status; 1404 int ret; 1405 u8 val; 1406 1407 ret = sdw_slave_set_frequency(slave); 1408 if (ret < 0) 1409 return ret; 1410 1411 if (slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_CLASH) { 1412 /* Clear bus clash interrupt before enabling interrupt mask */ 1413 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1414 if (status < 0) { 1415 dev_err(&slave->dev, 1416 "SDW_SCP_INT1 (BUS_CLASH) read failed:%d\n", status); 1417 return status; 1418 } 1419 if (status & SDW_SCP_INT1_BUS_CLASH) { 1420 dev_warn(&slave->dev, "Bus clash detected before INT mask is enabled\n"); 1421 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_BUS_CLASH); 1422 if (ret < 0) { 1423 dev_err(&slave->dev, 1424 "SDW_SCP_INT1 (BUS_CLASH) write failed:%d\n", ret); 1425 return ret; 1426 } 1427 } 1428 } 1429 if ((slave->bus->prop.quirks & SDW_MASTER_QUIRKS_CLEAR_INITIAL_PARITY) && 1430 !(slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY)) { 1431 /* Clear parity interrupt before enabling interrupt mask */ 1432 status = sdw_read_no_pm(slave, SDW_SCP_INT1); 1433 if (status < 0) { 1434 dev_err(&slave->dev, 1435 "SDW_SCP_INT1 (PARITY) read failed:%d\n", status); 1436 return status; 1437 } 1438 if (status & SDW_SCP_INT1_PARITY) { 1439 dev_warn(&slave->dev, "PARITY error detected before INT mask is enabled\n"); 1440 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, SDW_SCP_INT1_PARITY); 1441 if (ret < 0) { 1442 dev_err(&slave->dev, 1443 "SDW_SCP_INT1 (PARITY) write failed:%d\n", ret); 1444 return ret; 1445 } 1446 } 1447 } 1448 1449 /* 1450 * Set SCP_INT1_MASK register, typically bus clash and 1451 * implementation-defined interrupt mask. The Parity detection 1452 * may not always be correct on startup so its use is 1453 * device-dependent, it might e.g. only be enabled in 1454 * steady-state after a couple of frames. 1455 */ 1456 val = slave->prop.scp_int1_mask; 1457 1458 /* Enable SCP interrupts */ 1459 ret = sdw_update_no_pm(slave, SDW_SCP_INTMASK1, val, val); 1460 if (ret < 0) { 1461 dev_err(&slave->dev, 1462 "SDW_SCP_INTMASK1 write failed:%d\n", ret); 1463 return ret; 1464 } 1465 1466 /* No need to continue if DP0 is not present */ 1467 if (!slave->prop.dp0_prop) 1468 return 0; 1469 1470 /* Enable DP0 interrupts */ 1471 val = prop->dp0_prop->imp_def_interrupts; 1472 val |= SDW_DP0_INT_PORT_READY | SDW_DP0_INT_BRA_FAILURE; 1473 1474 ret = sdw_update_no_pm(slave, SDW_DP0_INTMASK, val, val); 1475 if (ret < 0) 1476 dev_err(&slave->dev, 1477 "SDW_DP0_INTMASK read failed:%d\n", ret); 1478 return ret; 1479 } 1480 1481 static int sdw_handle_dp0_interrupt(struct sdw_slave *slave, u8 *slave_status) 1482 { 1483 u8 clear, impl_int_mask; 1484 int status, status2, ret, count = 0; 1485 1486 status = sdw_read_no_pm(slave, SDW_DP0_INT); 1487 if (status < 0) { 1488 dev_err(&slave->dev, 1489 "SDW_DP0_INT read failed:%d\n", status); 1490 return status; 1491 } 1492 1493 do { 1494 clear = status & ~SDW_DP0_INTERRUPTS; 1495 1496 if (status & SDW_DP0_INT_TEST_FAIL) { 1497 dev_err(&slave->dev, "Test fail for port 0\n"); 1498 clear |= SDW_DP0_INT_TEST_FAIL; 1499 } 1500 1501 /* 1502 * Assumption: PORT_READY interrupt will be received only for 1503 * ports implementing Channel Prepare state machine (CP_SM) 1504 */ 1505 1506 if (status & SDW_DP0_INT_PORT_READY) { 1507 complete(&slave->port_ready[0]); 1508 clear |= SDW_DP0_INT_PORT_READY; 1509 } 1510 1511 if (status & SDW_DP0_INT_BRA_FAILURE) { 1512 dev_err(&slave->dev, "BRA failed\n"); 1513 clear |= SDW_DP0_INT_BRA_FAILURE; 1514 } 1515 1516 impl_int_mask = SDW_DP0_INT_IMPDEF1 | 1517 SDW_DP0_INT_IMPDEF2 | SDW_DP0_INT_IMPDEF3; 1518 1519 if (status & impl_int_mask) { 1520 clear |= impl_int_mask; 1521 *slave_status = clear; 1522 } 1523 1524 /* clear the interrupts but don't touch reserved and SDCA_CASCADE fields */ 1525 ret = sdw_write_no_pm(slave, SDW_DP0_INT, clear); 1526 if (ret < 0) { 1527 dev_err(&slave->dev, 1528 "SDW_DP0_INT write failed:%d\n", ret); 1529 return ret; 1530 } 1531 1532 /* Read DP0 interrupt again */ 1533 status2 = sdw_read_no_pm(slave, SDW_DP0_INT); 1534 if (status2 < 0) { 1535 dev_err(&slave->dev, 1536 "SDW_DP0_INT read failed:%d\n", status2); 1537 return status2; 1538 } 1539 /* filter to limit loop to interrupts identified in the first status read */ 1540 status &= status2; 1541 1542 count++; 1543 1544 /* we can get alerts while processing so keep retrying */ 1545 } while ((status & SDW_DP0_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1546 1547 if (count == SDW_READ_INTR_CLEAR_RETRY) 1548 dev_warn(&slave->dev, "Reached MAX_RETRY on DP0 read\n"); 1549 1550 return ret; 1551 } 1552 1553 static int sdw_handle_port_interrupt(struct sdw_slave *slave, 1554 int port, u8 *slave_status) 1555 { 1556 u8 clear, impl_int_mask; 1557 int status, status2, ret, count = 0; 1558 u32 addr; 1559 1560 if (port == 0) 1561 return sdw_handle_dp0_interrupt(slave, slave_status); 1562 1563 addr = SDW_DPN_INT(port); 1564 status = sdw_read_no_pm(slave, addr); 1565 if (status < 0) { 1566 dev_err(&slave->dev, 1567 "SDW_DPN_INT read failed:%d\n", status); 1568 1569 return status; 1570 } 1571 1572 do { 1573 clear = status & ~SDW_DPN_INTERRUPTS; 1574 1575 if (status & SDW_DPN_INT_TEST_FAIL) { 1576 dev_err(&slave->dev, "Test fail for port:%d\n", port); 1577 clear |= SDW_DPN_INT_TEST_FAIL; 1578 } 1579 1580 /* 1581 * Assumption: PORT_READY interrupt will be received only 1582 * for ports implementing CP_SM. 1583 */ 1584 if (status & SDW_DPN_INT_PORT_READY) { 1585 complete(&slave->port_ready[port]); 1586 clear |= SDW_DPN_INT_PORT_READY; 1587 } 1588 1589 impl_int_mask = SDW_DPN_INT_IMPDEF1 | 1590 SDW_DPN_INT_IMPDEF2 | SDW_DPN_INT_IMPDEF3; 1591 1592 if (status & impl_int_mask) { 1593 clear |= impl_int_mask; 1594 *slave_status = clear; 1595 } 1596 1597 /* clear the interrupt but don't touch reserved fields */ 1598 ret = sdw_write_no_pm(slave, addr, clear); 1599 if (ret < 0) { 1600 dev_err(&slave->dev, 1601 "SDW_DPN_INT write failed:%d\n", ret); 1602 return ret; 1603 } 1604 1605 /* Read DPN interrupt again */ 1606 status2 = sdw_read_no_pm(slave, addr); 1607 if (status2 < 0) { 1608 dev_err(&slave->dev, 1609 "SDW_DPN_INT read failed:%d\n", status2); 1610 return status2; 1611 } 1612 /* filter to limit loop to interrupts identified in the first status read */ 1613 status &= status2; 1614 1615 count++; 1616 1617 /* we can get alerts while processing so keep retrying */ 1618 } while ((status & SDW_DPN_INTERRUPTS) && (count < SDW_READ_INTR_CLEAR_RETRY)); 1619 1620 if (count == SDW_READ_INTR_CLEAR_RETRY) 1621 dev_warn(&slave->dev, "Reached MAX_RETRY on port read"); 1622 1623 return ret; 1624 } 1625 1626 static int sdw_handle_slave_alerts(struct sdw_slave *slave) 1627 { 1628 struct sdw_slave_intr_status slave_intr; 1629 u8 clear = 0, bit, port_status[15] = {0}; 1630 int port_num, stat, ret, count = 0; 1631 unsigned long port; 1632 bool slave_notify; 1633 u8 sdca_cascade = 0; 1634 u8 buf, buf2[2]; 1635 bool parity_check; 1636 bool parity_quirk; 1637 1638 sdw_modify_slave_status(slave, SDW_SLAVE_ALERT); 1639 1640 ret = pm_runtime_get_sync(&slave->dev); 1641 if (ret < 0 && ret != -EACCES) { 1642 dev_err(&slave->dev, "Failed to resume device: %d\n", ret); 1643 pm_runtime_put_noidle(&slave->dev); 1644 return ret; 1645 } 1646 1647 /* Read Intstat 1, Intstat 2 and Intstat 3 registers */ 1648 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1649 if (ret < 0) { 1650 dev_err(&slave->dev, 1651 "SDW_SCP_INT1 read failed:%d\n", ret); 1652 goto io_err; 1653 } 1654 buf = ret; 1655 1656 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1657 if (ret < 0) { 1658 dev_err(&slave->dev, 1659 "SDW_SCP_INT2/3 read failed:%d\n", ret); 1660 goto io_err; 1661 } 1662 1663 if (slave->id.class_id) { 1664 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1665 if (ret < 0) { 1666 dev_err(&slave->dev, 1667 "SDW_DP0_INT read failed:%d\n", ret); 1668 goto io_err; 1669 } 1670 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1671 } 1672 1673 do { 1674 slave_notify = false; 1675 1676 /* 1677 * Check parity, bus clash and Slave (impl defined) 1678 * interrupt 1679 */ 1680 if (buf & SDW_SCP_INT1_PARITY) { 1681 parity_check = slave->prop.scp_int1_mask & SDW_SCP_INT1_PARITY; 1682 parity_quirk = !slave->first_interrupt_done && 1683 (slave->prop.quirks & SDW_SLAVE_QUIRKS_INVALID_INITIAL_PARITY); 1684 1685 if (parity_check && !parity_quirk) 1686 dev_err(&slave->dev, "Parity error detected\n"); 1687 clear |= SDW_SCP_INT1_PARITY; 1688 } 1689 1690 if (buf & SDW_SCP_INT1_BUS_CLASH) { 1691 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_BUS_CLASH) 1692 dev_err(&slave->dev, "Bus clash detected\n"); 1693 clear |= SDW_SCP_INT1_BUS_CLASH; 1694 } 1695 1696 /* 1697 * When bus clash or parity errors are detected, such errors 1698 * are unlikely to be recoverable errors. 1699 * TODO: In such scenario, reset bus. Make this configurable 1700 * via sysfs property with bus reset being the default. 1701 */ 1702 1703 if (buf & SDW_SCP_INT1_IMPL_DEF) { 1704 if (slave->prop.scp_int1_mask & SDW_SCP_INT1_IMPL_DEF) { 1705 dev_dbg(&slave->dev, "Slave impl defined interrupt\n"); 1706 slave_notify = true; 1707 } 1708 clear |= SDW_SCP_INT1_IMPL_DEF; 1709 } 1710 1711 /* the SDCA interrupts are cleared in the codec driver .interrupt_callback() */ 1712 if (sdca_cascade) 1713 slave_notify = true; 1714 1715 /* Check port 0 - 3 interrupts */ 1716 port = buf & SDW_SCP_INT1_PORT0_3; 1717 1718 /* To get port number corresponding to bits, shift it */ 1719 port = FIELD_GET(SDW_SCP_INT1_PORT0_3, port); 1720 for_each_set_bit(bit, &port, 8) { 1721 sdw_handle_port_interrupt(slave, bit, 1722 &port_status[bit]); 1723 } 1724 1725 /* Check if cascade 2 interrupt is present */ 1726 if (buf & SDW_SCP_INT1_SCP2_CASCADE) { 1727 port = buf2[0] & SDW_SCP_INTSTAT2_PORT4_10; 1728 for_each_set_bit(bit, &port, 8) { 1729 /* scp2 ports start from 4 */ 1730 port_num = bit + 4; 1731 sdw_handle_port_interrupt(slave, 1732 port_num, 1733 &port_status[port_num]); 1734 } 1735 } 1736 1737 /* now check last cascade */ 1738 if (buf2[0] & SDW_SCP_INTSTAT2_SCP3_CASCADE) { 1739 port = buf2[1] & SDW_SCP_INTSTAT3_PORT11_14; 1740 for_each_set_bit(bit, &port, 8) { 1741 /* scp3 ports start from 11 */ 1742 port_num = bit + 11; 1743 sdw_handle_port_interrupt(slave, 1744 port_num, 1745 &port_status[port_num]); 1746 } 1747 } 1748 1749 /* Update the Slave driver */ 1750 if (slave_notify) { 1751 mutex_lock(&slave->sdw_dev_lock); 1752 1753 if (slave->probed) { 1754 struct device *dev = &slave->dev; 1755 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1756 1757 if (slave->prop.use_domain_irq && slave->irq) 1758 handle_nested_irq(slave->irq); 1759 1760 if (drv->ops && drv->ops->interrupt_callback) { 1761 slave_intr.sdca_cascade = sdca_cascade; 1762 slave_intr.control_port = clear; 1763 memcpy(slave_intr.port, &port_status, 1764 sizeof(slave_intr.port)); 1765 1766 drv->ops->interrupt_callback(slave, &slave_intr); 1767 } 1768 } 1769 1770 mutex_unlock(&slave->sdw_dev_lock); 1771 } 1772 1773 /* Ack interrupt */ 1774 ret = sdw_write_no_pm(slave, SDW_SCP_INT1, clear); 1775 if (ret < 0) { 1776 dev_err(&slave->dev, 1777 "SDW_SCP_INT1 write failed:%d\n", ret); 1778 goto io_err; 1779 } 1780 1781 /* at this point all initial interrupt sources were handled */ 1782 slave->first_interrupt_done = true; 1783 1784 /* 1785 * Read status again to ensure no new interrupts arrived 1786 * while servicing interrupts. 1787 */ 1788 ret = sdw_read_no_pm(slave, SDW_SCP_INT1); 1789 if (ret < 0) { 1790 dev_err(&slave->dev, 1791 "SDW_SCP_INT1 recheck read failed:%d\n", ret); 1792 goto io_err; 1793 } 1794 buf = ret; 1795 1796 ret = sdw_nread_no_pm(slave, SDW_SCP_INTSTAT2, 2, buf2); 1797 if (ret < 0) { 1798 dev_err(&slave->dev, 1799 "SDW_SCP_INT2/3 recheck read failed:%d\n", ret); 1800 goto io_err; 1801 } 1802 1803 if (slave->id.class_id) { 1804 ret = sdw_read_no_pm(slave, SDW_DP0_INT); 1805 if (ret < 0) { 1806 dev_err(&slave->dev, 1807 "SDW_DP0_INT recheck read failed:%d\n", ret); 1808 goto io_err; 1809 } 1810 sdca_cascade = ret & SDW_DP0_SDCA_CASCADE; 1811 } 1812 1813 /* 1814 * Make sure no interrupts are pending 1815 */ 1816 stat = buf || buf2[0] || buf2[1] || sdca_cascade; 1817 1818 /* 1819 * Exit loop if Slave is continuously in ALERT state even 1820 * after servicing the interrupt multiple times. 1821 */ 1822 count++; 1823 1824 /* we can get alerts while processing so keep retrying */ 1825 } while (stat != 0 && count < SDW_READ_INTR_CLEAR_RETRY); 1826 1827 if (count == SDW_READ_INTR_CLEAR_RETRY) 1828 dev_warn(&slave->dev, "Reached MAX_RETRY on alert read\n"); 1829 1830 io_err: 1831 pm_runtime_mark_last_busy(&slave->dev); 1832 pm_runtime_put_autosuspend(&slave->dev); 1833 1834 return ret; 1835 } 1836 1837 static int sdw_update_slave_status(struct sdw_slave *slave, 1838 enum sdw_slave_status status) 1839 { 1840 int ret = 0; 1841 1842 mutex_lock(&slave->sdw_dev_lock); 1843 1844 if (slave->probed) { 1845 struct device *dev = &slave->dev; 1846 struct sdw_driver *drv = drv_to_sdw_driver(dev->driver); 1847 1848 if (drv->ops && drv->ops->update_status) 1849 ret = drv->ops->update_status(slave, status); 1850 } 1851 1852 mutex_unlock(&slave->sdw_dev_lock); 1853 1854 return ret; 1855 } 1856 1857 /** 1858 * sdw_handle_slave_status() - Handle Slave status 1859 * @bus: SDW bus instance 1860 * @status: Status for all Slave(s) 1861 */ 1862 int sdw_handle_slave_status(struct sdw_bus *bus, 1863 enum sdw_slave_status status[]) 1864 { 1865 enum sdw_slave_status prev_status; 1866 struct sdw_slave *slave; 1867 bool attached_initializing, id_programmed; 1868 int i, ret = 0; 1869 1870 /* first check if any Slaves fell off the bus */ 1871 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1872 mutex_lock(&bus->bus_lock); 1873 if (test_bit(i, bus->assigned) == false) { 1874 mutex_unlock(&bus->bus_lock); 1875 continue; 1876 } 1877 mutex_unlock(&bus->bus_lock); 1878 1879 slave = sdw_get_slave(bus, i); 1880 if (!slave) 1881 continue; 1882 1883 if (status[i] == SDW_SLAVE_UNATTACHED && 1884 slave->status != SDW_SLAVE_UNATTACHED) { 1885 dev_warn(&slave->dev, "Slave %d state check1: UNATTACHED, status was %d\n", 1886 i, slave->status); 1887 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1888 1889 /* Ensure driver knows that peripheral unattached */ 1890 ret = sdw_update_slave_status(slave, status[i]); 1891 if (ret < 0) 1892 dev_warn(&slave->dev, "Update Slave status failed:%d\n", ret); 1893 } 1894 } 1895 1896 if (status[0] == SDW_SLAVE_ATTACHED) { 1897 dev_dbg(bus->dev, "Slave attached, programming device number\n"); 1898 1899 /* 1900 * Programming a device number will have side effects, 1901 * so we deal with other devices at a later time. 1902 * This relies on those devices reporting ATTACHED, which will 1903 * trigger another call to this function. This will only 1904 * happen if at least one device ID was programmed. 1905 * Error returns from sdw_program_device_num() are currently 1906 * ignored because there's no useful recovery that can be done. 1907 * Returning the error here could result in the current status 1908 * of other devices not being handled, because if no device IDs 1909 * were programmed there's nothing to guarantee a status change 1910 * to trigger another call to this function. 1911 */ 1912 sdw_program_device_num(bus, &id_programmed); 1913 if (id_programmed) 1914 return 0; 1915 } 1916 1917 /* Continue to check other slave statuses */ 1918 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 1919 mutex_lock(&bus->bus_lock); 1920 if (test_bit(i, bus->assigned) == false) { 1921 mutex_unlock(&bus->bus_lock); 1922 continue; 1923 } 1924 mutex_unlock(&bus->bus_lock); 1925 1926 slave = sdw_get_slave(bus, i); 1927 if (!slave) 1928 continue; 1929 1930 attached_initializing = false; 1931 1932 switch (status[i]) { 1933 case SDW_SLAVE_UNATTACHED: 1934 if (slave->status == SDW_SLAVE_UNATTACHED) 1935 break; 1936 1937 dev_warn(&slave->dev, "Slave %d state check2: UNATTACHED, status was %d\n", 1938 i, slave->status); 1939 1940 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 1941 break; 1942 1943 case SDW_SLAVE_ALERT: 1944 ret = sdw_handle_slave_alerts(slave); 1945 if (ret < 0) 1946 dev_err(&slave->dev, 1947 "Slave %d alert handling failed: %d\n", 1948 i, ret); 1949 break; 1950 1951 case SDW_SLAVE_ATTACHED: 1952 if (slave->status == SDW_SLAVE_ATTACHED) 1953 break; 1954 1955 prev_status = slave->status; 1956 sdw_modify_slave_status(slave, SDW_SLAVE_ATTACHED); 1957 1958 if (prev_status == SDW_SLAVE_ALERT) 1959 break; 1960 1961 attached_initializing = true; 1962 1963 ret = sdw_initialize_slave(slave); 1964 if (ret < 0) 1965 dev_err(&slave->dev, 1966 "Slave %d initialization failed: %d\n", 1967 i, ret); 1968 1969 break; 1970 1971 default: 1972 dev_err(&slave->dev, "Invalid slave %d status:%d\n", 1973 i, status[i]); 1974 break; 1975 } 1976 1977 ret = sdw_update_slave_status(slave, status[i]); 1978 if (ret < 0) 1979 dev_err(&slave->dev, 1980 "Update Slave status failed:%d\n", ret); 1981 if (attached_initializing) { 1982 dev_dbg(&slave->dev, 1983 "signaling initialization completion for Slave %d\n", 1984 slave->dev_num); 1985 1986 complete_all(&slave->initialization_complete); 1987 1988 /* 1989 * If the manager became pm_runtime active, the peripherals will be 1990 * restarted and attach, but their pm_runtime status may remain 1991 * suspended. If the 'update_slave_status' callback initiates 1992 * any sort of deferred processing, this processing would not be 1993 * cancelled on pm_runtime suspend. 1994 * To avoid such zombie states, we queue a request to resume. 1995 * This would be a no-op in case the peripheral was being resumed 1996 * by e.g. the ALSA/ASoC framework. 1997 */ 1998 pm_request_resume(&slave->dev); 1999 } 2000 } 2001 2002 return ret; 2003 } 2004 EXPORT_SYMBOL(sdw_handle_slave_status); 2005 2006 void sdw_clear_slave_status(struct sdw_bus *bus, u32 request) 2007 { 2008 struct sdw_slave *slave; 2009 int i; 2010 2011 /* Check all non-zero devices */ 2012 for (i = 1; i <= SDW_MAX_DEVICES; i++) { 2013 mutex_lock(&bus->bus_lock); 2014 if (test_bit(i, bus->assigned) == false) { 2015 mutex_unlock(&bus->bus_lock); 2016 continue; 2017 } 2018 mutex_unlock(&bus->bus_lock); 2019 2020 slave = sdw_get_slave(bus, i); 2021 if (!slave) 2022 continue; 2023 2024 if (slave->status != SDW_SLAVE_UNATTACHED) { 2025 sdw_modify_slave_status(slave, SDW_SLAVE_UNATTACHED); 2026 slave->first_interrupt_done = false; 2027 sdw_update_slave_status(slave, SDW_SLAVE_UNATTACHED); 2028 } 2029 2030 /* keep track of request, used in pm_runtime resume */ 2031 slave->unattach_request = request; 2032 } 2033 } 2034 EXPORT_SYMBOL(sdw_clear_slave_status); 2035