1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2011-2016 Synaptics Incorporated 4 * Copyright (c) 2011 Unixphere 5 * 6 * This driver provides the core support for a single RMI4-based device. 7 * 8 * The RMI4 specification can be found here (URL split for line length): 9 * 10 * http://www.synaptics.com/sites/default/files/ 11 * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf 12 */ 13 14 #include <linux/bitmap.h> 15 #include <linux/delay.h> 16 #include <linux/fs.h> 17 #include <linux/irq.h> 18 #include <linux/pm.h> 19 #include <linux/slab.h> 20 #include <linux/of.h> 21 #include <linux/irqdomain.h> 22 #include <uapi/linux/input.h> 23 #include <linux/rmi.h> 24 #include "rmi_bus.h" 25 #include "rmi_driver.h" 26 27 #define HAS_NONSTANDARD_PDT_MASK 0x40 28 #define RMI4_MAX_PAGE 0xff 29 #define RMI4_PAGE_SIZE 0x100 30 #define RMI4_PAGE_MASK 0xFF00 31 32 #define RMI_DEVICE_RESET_CMD 0x01 33 #define DEFAULT_RESET_DELAY_MS 100 34 35 void rmi_free_function_list(struct rmi_device *rmi_dev) 36 { 37 struct rmi_function *fn, *tmp; 38 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 39 40 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); 41 42 /* Doing it in the reverse order so F01 will be removed last */ 43 list_for_each_entry_safe_reverse(fn, tmp, 44 &data->function_list, node) { 45 list_del(&fn->node); 46 rmi_unregister_function(fn); 47 } 48 49 devm_kfree(&rmi_dev->dev, data->irq_memory); 50 data->irq_memory = NULL; 51 data->irq_status = NULL; 52 data->fn_irq_bits = NULL; 53 data->current_irq_mask = NULL; 54 data->new_irq_mask = NULL; 55 56 data->f01_container = NULL; 57 data->f34_container = NULL; 58 } 59 60 static int reset_one_function(struct rmi_function *fn) 61 { 62 struct rmi_function_handler *fh; 63 int retval = 0; 64 65 if (!fn || !fn->dev.driver) 66 return 0; 67 68 fh = to_rmi_function_handler(fn->dev.driver); 69 if (fh->reset) { 70 retval = fh->reset(fn); 71 if (retval < 0) 72 dev_err(&fn->dev, "Reset failed with code %d.\n", 73 retval); 74 } 75 76 return retval; 77 } 78 79 static int configure_one_function(struct rmi_function *fn) 80 { 81 struct rmi_function_handler *fh; 82 int retval = 0; 83 84 if (!fn || !fn->dev.driver) 85 return 0; 86 87 fh = to_rmi_function_handler(fn->dev.driver); 88 if (fh->config) { 89 retval = fh->config(fn); 90 if (retval < 0) 91 dev_err(&fn->dev, "Config failed with code %d.\n", 92 retval); 93 } 94 95 return retval; 96 } 97 98 static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev) 99 { 100 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 101 struct rmi_function *entry; 102 int retval; 103 104 list_for_each_entry(entry, &data->function_list, node) { 105 retval = reset_one_function(entry); 106 if (retval < 0) 107 return retval; 108 } 109 110 return 0; 111 } 112 113 static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev) 114 { 115 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 116 struct rmi_function *entry; 117 int retval; 118 119 list_for_each_entry(entry, &data->function_list, node) { 120 retval = configure_one_function(entry); 121 if (retval < 0) 122 return retval; 123 } 124 125 return 0; 126 } 127 128 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) 129 { 130 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 131 struct device *dev = &rmi_dev->dev; 132 int i; 133 int error; 134 135 if (!data) 136 return 0; 137 138 if (!data->attn_data.data) { 139 error = rmi_read_block(rmi_dev, 140 data->f01_container->fd.data_base_addr + 1, 141 data->irq_status, data->num_of_irq_regs); 142 if (error < 0) { 143 dev_err(dev, "Failed to read irqs, code=%d\n", error); 144 return error; 145 } 146 } 147 148 mutex_lock(&data->irq_mutex); 149 bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask, 150 data->irq_count); 151 /* 152 * At this point, irq_status has all bits that are set in the 153 * interrupt status register and are enabled. 154 */ 155 mutex_unlock(&data->irq_mutex); 156 157 for_each_set_bit(i, data->irq_status, data->irq_count) 158 handle_nested_irq(irq_find_mapping(data->irqdomain, i)); 159 160 if (data->input) 161 input_sync(data->input); 162 163 return 0; 164 } 165 166 void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, 167 void *data, size_t size) 168 { 169 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 170 struct rmi4_attn_data attn_data; 171 void *fifo_data; 172 173 if (!drvdata->enabled) 174 return; 175 176 fifo_data = kmemdup(data, size, GFP_ATOMIC); 177 if (!fifo_data) 178 return; 179 180 attn_data.irq_status = irq_status; 181 attn_data.size = size; 182 attn_data.data = fifo_data; 183 184 kfifo_put(&drvdata->attn_fifo, attn_data); 185 } 186 EXPORT_SYMBOL_GPL(rmi_set_attn_data); 187 188 static irqreturn_t rmi_irq_fn(int irq, void *dev_id) 189 { 190 struct rmi_device *rmi_dev = dev_id; 191 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 192 struct rmi4_attn_data attn_data = {0}; 193 int ret, count; 194 195 count = kfifo_get(&drvdata->attn_fifo, &attn_data); 196 if (count) { 197 *(drvdata->irq_status) = attn_data.irq_status; 198 drvdata->attn_data = attn_data; 199 } 200 201 ret = rmi_process_interrupt_requests(rmi_dev); 202 if (ret) 203 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, 204 "Failed to process interrupt request: %d\n", ret); 205 206 if (count) { 207 kfree(attn_data.data); 208 attn_data.data = NULL; 209 } 210 211 if (!kfifo_is_empty(&drvdata->attn_fifo)) 212 return rmi_irq_fn(irq, dev_id); 213 214 return IRQ_HANDLED; 215 } 216 217 static int rmi_irq_init(struct rmi_device *rmi_dev) 218 { 219 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 220 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 221 int irq_flags = irq_get_trigger_type(pdata->irq); 222 int ret; 223 224 if (!irq_flags) 225 irq_flags = IRQF_TRIGGER_LOW; 226 227 ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL, 228 rmi_irq_fn, irq_flags | IRQF_ONESHOT, 229 dev_driver_string(rmi_dev->xport->dev), 230 rmi_dev); 231 if (ret < 0) { 232 dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n", 233 pdata->irq); 234 235 return ret; 236 } 237 238 data->enabled = true; 239 240 return 0; 241 } 242 243 struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number) 244 { 245 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 246 struct rmi_function *entry; 247 248 list_for_each_entry(entry, &data->function_list, node) { 249 if (entry->fd.function_number == number) 250 return entry; 251 } 252 253 return NULL; 254 } 255 256 static int suspend_one_function(struct rmi_function *fn) 257 { 258 struct rmi_function_handler *fh; 259 int retval = 0; 260 261 if (!fn || !fn->dev.driver) 262 return 0; 263 264 fh = to_rmi_function_handler(fn->dev.driver); 265 if (fh->suspend) { 266 retval = fh->suspend(fn); 267 if (retval < 0) 268 dev_err(&fn->dev, "Suspend failed with code %d.\n", 269 retval); 270 } 271 272 return retval; 273 } 274 275 static int rmi_suspend_functions(struct rmi_device *rmi_dev) 276 { 277 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 278 struct rmi_function *entry; 279 int retval; 280 281 list_for_each_entry(entry, &data->function_list, node) { 282 retval = suspend_one_function(entry); 283 if (retval < 0) 284 return retval; 285 } 286 287 return 0; 288 } 289 290 static int resume_one_function(struct rmi_function *fn) 291 { 292 struct rmi_function_handler *fh; 293 int retval = 0; 294 295 if (!fn || !fn->dev.driver) 296 return 0; 297 298 fh = to_rmi_function_handler(fn->dev.driver); 299 if (fh->resume) { 300 retval = fh->resume(fn); 301 if (retval < 0) 302 dev_err(&fn->dev, "Resume failed with code %d.\n", 303 retval); 304 } 305 306 return retval; 307 } 308 309 static int rmi_resume_functions(struct rmi_device *rmi_dev) 310 { 311 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 312 struct rmi_function *entry; 313 int retval; 314 315 list_for_each_entry(entry, &data->function_list, node) { 316 retval = resume_one_function(entry); 317 if (retval < 0) 318 return retval; 319 } 320 321 return 0; 322 } 323 324 int rmi_enable_sensor(struct rmi_device *rmi_dev) 325 { 326 int retval = 0; 327 328 retval = rmi_driver_process_config_requests(rmi_dev); 329 if (retval < 0) 330 return retval; 331 332 return rmi_process_interrupt_requests(rmi_dev); 333 } 334 335 /** 336 * rmi_driver_set_input_params - set input device id and other data. 337 * 338 * @rmi_dev: Pointer to an RMI device 339 * @input: Pointer to input device 340 * 341 */ 342 static int rmi_driver_set_input_params(struct rmi_device *rmi_dev, 343 struct input_dev *input) 344 { 345 input->name = SYNAPTICS_INPUT_DEVICE_NAME; 346 input->id.vendor = SYNAPTICS_VENDOR_ID; 347 input->id.bustype = BUS_RMI; 348 return 0; 349 } 350 351 static void rmi_driver_set_input_name(struct rmi_device *rmi_dev, 352 struct input_dev *input) 353 { 354 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 355 const char *device_name = rmi_f01_get_product_ID(data->f01_container); 356 char *name; 357 358 name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL, 359 "Synaptics %s", device_name); 360 if (!name) 361 return; 362 363 input->name = name; 364 } 365 366 static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev, 367 unsigned long *mask) 368 { 369 int error = 0; 370 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 371 struct device *dev = &rmi_dev->dev; 372 373 mutex_lock(&data->irq_mutex); 374 bitmap_or(data->new_irq_mask, 375 data->current_irq_mask, mask, data->irq_count); 376 377 error = rmi_write_block(rmi_dev, 378 data->f01_container->fd.control_base_addr + 1, 379 data->new_irq_mask, data->num_of_irq_regs); 380 if (error < 0) { 381 dev_err(dev, "%s: Failed to change enabled interrupts!", 382 __func__); 383 goto error_unlock; 384 } 385 bitmap_copy(data->current_irq_mask, data->new_irq_mask, 386 data->num_of_irq_regs); 387 388 error_unlock: 389 mutex_unlock(&data->irq_mutex); 390 return error; 391 } 392 393 static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev, 394 unsigned long *mask) 395 { 396 int error = 0; 397 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 398 struct device *dev = &rmi_dev->dev; 399 400 mutex_lock(&data->irq_mutex); 401 bitmap_andnot(data->new_irq_mask, 402 data->current_irq_mask, mask, data->irq_count); 403 404 error = rmi_write_block(rmi_dev, 405 data->f01_container->fd.control_base_addr + 1, 406 data->new_irq_mask, data->num_of_irq_regs); 407 if (error < 0) { 408 dev_err(dev, "%s: Failed to change enabled interrupts!", 409 __func__); 410 goto error_unlock; 411 } 412 bitmap_copy(data->current_irq_mask, data->new_irq_mask, 413 data->num_of_irq_regs); 414 415 error_unlock: 416 mutex_unlock(&data->irq_mutex); 417 return error; 418 } 419 420 static int rmi_driver_reset_handler(struct rmi_device *rmi_dev) 421 { 422 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 423 int error; 424 425 /* 426 * Can get called before the driver is fully ready to deal with 427 * this situation. 428 */ 429 if (!data || !data->f01_container) { 430 dev_warn(&rmi_dev->dev, 431 "Not ready to handle reset yet!\n"); 432 return 0; 433 } 434 435 error = rmi_read_block(rmi_dev, 436 data->f01_container->fd.control_base_addr + 1, 437 data->current_irq_mask, data->num_of_irq_regs); 438 if (error < 0) { 439 dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n", 440 __func__); 441 return error; 442 } 443 444 error = rmi_driver_process_reset_requests(rmi_dev); 445 if (error < 0) 446 return error; 447 448 error = rmi_driver_process_config_requests(rmi_dev); 449 if (error < 0) 450 return error; 451 452 return 0; 453 } 454 455 static int rmi_read_pdt_entry(struct rmi_device *rmi_dev, 456 struct pdt_entry *entry, u16 pdt_address) 457 { 458 u8 buf[RMI_PDT_ENTRY_SIZE]; 459 int error; 460 461 error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE); 462 if (error) { 463 dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n", 464 pdt_address, error); 465 return error; 466 } 467 468 entry->page_start = pdt_address & RMI4_PAGE_MASK; 469 entry->query_base_addr = buf[0]; 470 entry->command_base_addr = buf[1]; 471 entry->control_base_addr = buf[2]; 472 entry->data_base_addr = buf[3]; 473 entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK; 474 entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5; 475 entry->function_number = buf[5]; 476 477 return 0; 478 } 479 480 static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt, 481 struct rmi_function_descriptor *fd) 482 { 483 fd->query_base_addr = pdt->query_base_addr + pdt->page_start; 484 fd->command_base_addr = pdt->command_base_addr + pdt->page_start; 485 fd->control_base_addr = pdt->control_base_addr + pdt->page_start; 486 fd->data_base_addr = pdt->data_base_addr + pdt->page_start; 487 fd->function_number = pdt->function_number; 488 fd->interrupt_source_count = pdt->interrupt_source_count; 489 fd->function_version = pdt->function_version; 490 } 491 492 #define RMI_SCAN_CONTINUE 0 493 #define RMI_SCAN_DONE 1 494 495 static int rmi_scan_pdt_page(struct rmi_device *rmi_dev, 496 int page, 497 int *empty_pages, 498 void *ctx, 499 int (*callback)(struct rmi_device *rmi_dev, 500 void *ctx, 501 const struct pdt_entry *entry)) 502 { 503 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 504 struct pdt_entry pdt_entry; 505 u16 page_start = RMI4_PAGE_SIZE * page; 506 u16 pdt_start = page_start + PDT_START_SCAN_LOCATION; 507 u16 pdt_end = page_start + PDT_END_SCAN_LOCATION; 508 u16 addr; 509 int error; 510 int retval; 511 512 for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) { 513 error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr); 514 if (error) 515 return error; 516 517 if (RMI4_END_OF_PDT(pdt_entry.function_number)) 518 break; 519 520 retval = callback(rmi_dev, ctx, &pdt_entry); 521 if (retval != RMI_SCAN_CONTINUE) 522 return retval; 523 } 524 525 /* 526 * Count number of empty PDT pages. If a gap of two pages 527 * or more is found, stop scanning. 528 */ 529 if (addr == pdt_start) 530 ++*empty_pages; 531 else 532 *empty_pages = 0; 533 534 return (data->bootloader_mode || *empty_pages >= 2) ? 535 RMI_SCAN_DONE : RMI_SCAN_CONTINUE; 536 } 537 538 int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx, 539 int (*callback)(struct rmi_device *rmi_dev, 540 void *ctx, const struct pdt_entry *entry)) 541 { 542 int page; 543 int empty_pages = 0; 544 int retval = RMI_SCAN_DONE; 545 546 for (page = 0; page <= RMI4_MAX_PAGE; page++) { 547 retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages, 548 ctx, callback); 549 if (retval != RMI_SCAN_CONTINUE) 550 break; 551 } 552 553 return retval < 0 ? retval : 0; 554 } 555 556 int rmi_read_register_desc(struct rmi_device *d, u16 addr, 557 struct rmi_register_descriptor *rdesc) 558 { 559 int ret; 560 u8 size_presence_reg; 561 u8 buf[35]; 562 int presense_offset = 1; 563 u8 *struct_buf; 564 int reg; 565 int offset = 0; 566 int map_offset = 0; 567 int i; 568 int b; 569 570 /* 571 * The first register of the register descriptor is the size of 572 * the register descriptor's presense register. 573 */ 574 ret = rmi_read(d, addr, &size_presence_reg); 575 if (ret) 576 return ret; 577 ++addr; 578 579 if (size_presence_reg < 0 || size_presence_reg > 35) 580 return -EIO; 581 582 memset(buf, 0, sizeof(buf)); 583 584 /* 585 * The presence register contains the size of the register structure 586 * and a bitmap which identified which packet registers are present 587 * for this particular register type (ie query, control, or data). 588 */ 589 ret = rmi_read_block(d, addr, buf, size_presence_reg); 590 if (ret) 591 return ret; 592 ++addr; 593 594 if (buf[0] == 0) { 595 presense_offset = 3; 596 rdesc->struct_size = buf[1] | (buf[2] << 8); 597 } else { 598 rdesc->struct_size = buf[0]; 599 } 600 601 for (i = presense_offset; i < size_presence_reg; i++) { 602 for (b = 0; b < 8; b++) { 603 if (buf[i] & (0x1 << b)) 604 bitmap_set(rdesc->presense_map, map_offset, 1); 605 ++map_offset; 606 } 607 } 608 609 rdesc->num_registers = bitmap_weight(rdesc->presense_map, 610 RMI_REG_DESC_PRESENSE_BITS); 611 612 rdesc->registers = devm_kcalloc(&d->dev, 613 rdesc->num_registers, 614 sizeof(struct rmi_register_desc_item), 615 GFP_KERNEL); 616 if (!rdesc->registers) 617 return -ENOMEM; 618 619 /* 620 * Allocate a temporary buffer to hold the register structure. 621 * I'm not using devm_kzalloc here since it will not be retained 622 * after exiting this function 623 */ 624 struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL); 625 if (!struct_buf) 626 return -ENOMEM; 627 628 /* 629 * The register structure contains information about every packet 630 * register of this type. This includes the size of the packet 631 * register and a bitmap of all subpackets contained in the packet 632 * register. 633 */ 634 ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size); 635 if (ret) 636 goto free_struct_buff; 637 638 reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); 639 for (i = 0; i < rdesc->num_registers; i++) { 640 struct rmi_register_desc_item *item = &rdesc->registers[i]; 641 int reg_size = struct_buf[offset]; 642 643 ++offset; 644 if (reg_size == 0) { 645 reg_size = struct_buf[offset] | 646 (struct_buf[offset + 1] << 8); 647 offset += 2; 648 } 649 650 if (reg_size == 0) { 651 reg_size = struct_buf[offset] | 652 (struct_buf[offset + 1] << 8) | 653 (struct_buf[offset + 2] << 16) | 654 (struct_buf[offset + 3] << 24); 655 offset += 4; 656 } 657 658 item->reg = reg; 659 item->reg_size = reg_size; 660 661 map_offset = 0; 662 663 do { 664 for (b = 0; b < 7; b++) { 665 if (struct_buf[offset] & (0x1 << b)) 666 bitmap_set(item->subpacket_map, 667 map_offset, 1); 668 ++map_offset; 669 } 670 } while (struct_buf[offset++] & 0x80); 671 672 item->num_subpackets = bitmap_weight(item->subpacket_map, 673 RMI_REG_DESC_SUBPACKET_BITS); 674 675 rmi_dbg(RMI_DEBUG_CORE, &d->dev, 676 "%s: reg: %d reg size: %ld subpackets: %d\n", __func__, 677 item->reg, item->reg_size, item->num_subpackets); 678 679 reg = find_next_bit(rdesc->presense_map, 680 RMI_REG_DESC_PRESENSE_BITS, reg + 1); 681 } 682 683 free_struct_buff: 684 kfree(struct_buf); 685 return ret; 686 } 687 688 const struct rmi_register_desc_item *rmi_get_register_desc_item( 689 struct rmi_register_descriptor *rdesc, u16 reg) 690 { 691 const struct rmi_register_desc_item *item; 692 int i; 693 694 for (i = 0; i < rdesc->num_registers; i++) { 695 item = &rdesc->registers[i]; 696 if (item->reg == reg) 697 return item; 698 } 699 700 return NULL; 701 } 702 703 size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc) 704 { 705 const struct rmi_register_desc_item *item; 706 int i; 707 size_t size = 0; 708 709 for (i = 0; i < rdesc->num_registers; i++) { 710 item = &rdesc->registers[i]; 711 size += item->reg_size; 712 } 713 return size; 714 } 715 716 /* Compute the register offset relative to the base address */ 717 int rmi_register_desc_calc_reg_offset( 718 struct rmi_register_descriptor *rdesc, u16 reg) 719 { 720 const struct rmi_register_desc_item *item; 721 int offset = 0; 722 int i; 723 724 for (i = 0; i < rdesc->num_registers; i++) { 725 item = &rdesc->registers[i]; 726 if (item->reg == reg) 727 return offset; 728 ++offset; 729 } 730 return -1; 731 } 732 733 bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item, 734 u8 subpacket) 735 { 736 return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS, 737 subpacket) == subpacket; 738 } 739 740 static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev, 741 const struct pdt_entry *pdt) 742 { 743 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 744 int ret; 745 u8 status; 746 747 if (pdt->function_number == 0x34 && pdt->function_version > 1) { 748 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); 749 if (ret) { 750 dev_err(&rmi_dev->dev, 751 "Failed to read F34 status: %d.\n", ret); 752 return ret; 753 } 754 755 if (status & BIT(7)) 756 data->bootloader_mode = true; 757 } else if (pdt->function_number == 0x01) { 758 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); 759 if (ret) { 760 dev_err(&rmi_dev->dev, 761 "Failed to read F01 status: %d.\n", ret); 762 return ret; 763 } 764 765 if (status & BIT(6)) 766 data->bootloader_mode = true; 767 } 768 769 return 0; 770 } 771 772 static int rmi_count_irqs(struct rmi_device *rmi_dev, 773 void *ctx, const struct pdt_entry *pdt) 774 { 775 int *irq_count = ctx; 776 int ret; 777 778 *irq_count += pdt->interrupt_source_count; 779 780 ret = rmi_check_bootloader_mode(rmi_dev, pdt); 781 if (ret < 0) 782 return ret; 783 784 return RMI_SCAN_CONTINUE; 785 } 786 787 int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx, 788 const struct pdt_entry *pdt) 789 { 790 int error; 791 792 if (pdt->function_number == 0x01) { 793 u16 cmd_addr = pdt->page_start + pdt->command_base_addr; 794 u8 cmd_buf = RMI_DEVICE_RESET_CMD; 795 const struct rmi_device_platform_data *pdata = 796 rmi_get_platform_data(rmi_dev); 797 798 if (rmi_dev->xport->ops->reset) { 799 error = rmi_dev->xport->ops->reset(rmi_dev->xport, 800 cmd_addr); 801 if (error) 802 return error; 803 804 return RMI_SCAN_DONE; 805 } 806 807 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n"); 808 error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1); 809 if (error) { 810 dev_err(&rmi_dev->dev, 811 "Initial reset failed. Code = %d.\n", error); 812 return error; 813 } 814 815 mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS); 816 817 return RMI_SCAN_DONE; 818 } 819 820 /* F01 should always be on page 0. If we don't find it there, fail. */ 821 return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV; 822 } 823 824 static int rmi_create_function(struct rmi_device *rmi_dev, 825 void *ctx, const struct pdt_entry *pdt) 826 { 827 struct device *dev = &rmi_dev->dev; 828 struct rmi_driver_data *data = dev_get_drvdata(dev); 829 int *current_irq_count = ctx; 830 struct rmi_function *fn; 831 int i; 832 int error; 833 834 rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n", 835 pdt->function_number); 836 837 fn = kzalloc(sizeof(struct rmi_function) + 838 BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long), 839 GFP_KERNEL); 840 if (!fn) { 841 dev_err(dev, "Failed to allocate memory for F%02X\n", 842 pdt->function_number); 843 return -ENOMEM; 844 } 845 846 INIT_LIST_HEAD(&fn->node); 847 rmi_driver_copy_pdt_to_fd(pdt, &fn->fd); 848 849 fn->rmi_dev = rmi_dev; 850 851 fn->num_of_irqs = pdt->interrupt_source_count; 852 fn->irq_pos = *current_irq_count; 853 *current_irq_count += fn->num_of_irqs; 854 855 for (i = 0; i < fn->num_of_irqs; i++) 856 set_bit(fn->irq_pos + i, fn->irq_mask); 857 858 error = rmi_register_function(fn); 859 if (error) 860 return error; 861 862 if (pdt->function_number == 0x01) 863 data->f01_container = fn; 864 else if (pdt->function_number == 0x34) 865 data->f34_container = fn; 866 867 list_add_tail(&fn->node, &data->function_list); 868 869 return RMI_SCAN_CONTINUE; 870 } 871 872 void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) 873 { 874 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 875 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 876 int irq = pdata->irq; 877 int irq_flags; 878 int retval; 879 880 mutex_lock(&data->enabled_mutex); 881 882 if (data->enabled) 883 goto out; 884 885 enable_irq(irq); 886 data->enabled = true; 887 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { 888 retval = disable_irq_wake(irq); 889 if (retval) 890 dev_warn(&rmi_dev->dev, 891 "Failed to disable irq for wake: %d\n", 892 retval); 893 } 894 895 /* 896 * Call rmi_process_interrupt_requests() after enabling irq, 897 * otherwise we may lose interrupt on edge-triggered systems. 898 */ 899 irq_flags = irq_get_trigger_type(pdata->irq); 900 if (irq_flags & IRQ_TYPE_EDGE_BOTH) 901 rmi_process_interrupt_requests(rmi_dev); 902 903 out: 904 mutex_unlock(&data->enabled_mutex); 905 } 906 907 void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake) 908 { 909 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 910 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 911 struct rmi4_attn_data attn_data = {0}; 912 int irq = pdata->irq; 913 int retval, count; 914 915 mutex_lock(&data->enabled_mutex); 916 917 if (!data->enabled) 918 goto out; 919 920 data->enabled = false; 921 disable_irq(irq); 922 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { 923 retval = enable_irq_wake(irq); 924 if (retval) 925 dev_warn(&rmi_dev->dev, 926 "Failed to enable irq for wake: %d\n", 927 retval); 928 } 929 930 /* make sure the fifo is clean */ 931 while (!kfifo_is_empty(&data->attn_fifo)) { 932 count = kfifo_get(&data->attn_fifo, &attn_data); 933 if (count) 934 kfree(attn_data.data); 935 } 936 937 out: 938 mutex_unlock(&data->enabled_mutex); 939 } 940 941 int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake) 942 { 943 int retval; 944 945 retval = rmi_suspend_functions(rmi_dev); 946 if (retval) 947 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", 948 retval); 949 950 rmi_disable_irq(rmi_dev, enable_wake); 951 return retval; 952 } 953 EXPORT_SYMBOL_GPL(rmi_driver_suspend); 954 955 int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake) 956 { 957 int retval; 958 959 rmi_enable_irq(rmi_dev, clear_wake); 960 961 retval = rmi_resume_functions(rmi_dev); 962 if (retval) 963 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", 964 retval); 965 966 return retval; 967 } 968 EXPORT_SYMBOL_GPL(rmi_driver_resume); 969 970 static int rmi_driver_remove(struct device *dev) 971 { 972 struct rmi_device *rmi_dev = to_rmi_device(dev); 973 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 974 975 rmi_disable_irq(rmi_dev, false); 976 977 irq_domain_remove(data->irqdomain); 978 data->irqdomain = NULL; 979 980 rmi_f34_remove_sysfs(rmi_dev); 981 rmi_free_function_list(rmi_dev); 982 983 return 0; 984 } 985 986 #ifdef CONFIG_OF 987 static int rmi_driver_of_probe(struct device *dev, 988 struct rmi_device_platform_data *pdata) 989 { 990 int retval; 991 992 retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms, 993 "syna,reset-delay-ms", 1); 994 if (retval) 995 return retval; 996 997 return 0; 998 } 999 #else 1000 static inline int rmi_driver_of_probe(struct device *dev, 1001 struct rmi_device_platform_data *pdata) 1002 { 1003 return -ENODEV; 1004 } 1005 #endif 1006 1007 int rmi_probe_interrupts(struct rmi_driver_data *data) 1008 { 1009 struct rmi_device *rmi_dev = data->rmi_dev; 1010 struct device *dev = &rmi_dev->dev; 1011 struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode; 1012 int irq_count = 0; 1013 size_t size; 1014 int retval; 1015 1016 /* 1017 * We need to count the IRQs and allocate their storage before scanning 1018 * the PDT and creating the function entries, because adding a new 1019 * function can trigger events that result in the IRQ related storage 1020 * being accessed. 1021 */ 1022 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__); 1023 data->bootloader_mode = false; 1024 1025 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs); 1026 if (retval < 0) { 1027 dev_err(dev, "IRQ counting failed with code %d.\n", retval); 1028 return retval; 1029 } 1030 1031 if (data->bootloader_mode) 1032 dev_warn(dev, "Device in bootloader mode.\n"); 1033 1034 /* Allocate and register a linear revmap irq_domain */ 1035 data->irqdomain = irq_domain_create_linear(fwnode, irq_count, 1036 &irq_domain_simple_ops, 1037 data); 1038 if (!data->irqdomain) { 1039 dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n"); 1040 return -ENOMEM; 1041 } 1042 1043 data->irq_count = irq_count; 1044 data->num_of_irq_regs = (data->irq_count + 7) / 8; 1045 1046 size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long); 1047 data->irq_memory = devm_kcalloc(dev, size, 4, GFP_KERNEL); 1048 if (!data->irq_memory) { 1049 dev_err(dev, "Failed to allocate memory for irq masks.\n"); 1050 return -ENOMEM; 1051 } 1052 1053 data->irq_status = data->irq_memory + size * 0; 1054 data->fn_irq_bits = data->irq_memory + size * 1; 1055 data->current_irq_mask = data->irq_memory + size * 2; 1056 data->new_irq_mask = data->irq_memory + size * 3; 1057 1058 return retval; 1059 } 1060 1061 int rmi_init_functions(struct rmi_driver_data *data) 1062 { 1063 struct rmi_device *rmi_dev = data->rmi_dev; 1064 struct device *dev = &rmi_dev->dev; 1065 int irq_count = 0; 1066 int retval; 1067 1068 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__); 1069 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function); 1070 if (retval < 0) { 1071 dev_err(dev, "Function creation failed with code %d.\n", 1072 retval); 1073 goto err_destroy_functions; 1074 } 1075 1076 if (!data->f01_container) { 1077 dev_err(dev, "Missing F01 container!\n"); 1078 retval = -EINVAL; 1079 goto err_destroy_functions; 1080 } 1081 1082 retval = rmi_read_block(rmi_dev, 1083 data->f01_container->fd.control_base_addr + 1, 1084 data->current_irq_mask, data->num_of_irq_regs); 1085 if (retval < 0) { 1086 dev_err(dev, "%s: Failed to read current IRQ mask.\n", 1087 __func__); 1088 goto err_destroy_functions; 1089 } 1090 1091 return 0; 1092 1093 err_destroy_functions: 1094 rmi_free_function_list(rmi_dev); 1095 return retval; 1096 } 1097 1098 static int rmi_driver_probe(struct device *dev) 1099 { 1100 struct rmi_driver *rmi_driver; 1101 struct rmi_driver_data *data; 1102 struct rmi_device_platform_data *pdata; 1103 struct rmi_device *rmi_dev; 1104 int retval; 1105 1106 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n", 1107 __func__); 1108 1109 if (!rmi_is_physical_device(dev)) { 1110 rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n"); 1111 return -ENODEV; 1112 } 1113 1114 rmi_dev = to_rmi_device(dev); 1115 rmi_driver = to_rmi_driver(dev->driver); 1116 rmi_dev->driver = rmi_driver; 1117 1118 pdata = rmi_get_platform_data(rmi_dev); 1119 1120 if (rmi_dev->xport->dev->of_node) { 1121 retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata); 1122 if (retval) 1123 return retval; 1124 } 1125 1126 data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL); 1127 if (!data) 1128 return -ENOMEM; 1129 1130 INIT_LIST_HEAD(&data->function_list); 1131 data->rmi_dev = rmi_dev; 1132 dev_set_drvdata(&rmi_dev->dev, data); 1133 1134 /* 1135 * Right before a warm boot, the sensor might be in some unusual state, 1136 * such as F54 diagnostics, or F34 bootloader mode after a firmware 1137 * or configuration update. In order to clear the sensor to a known 1138 * state and/or apply any updates, we issue a initial reset to clear any 1139 * previous settings and force it into normal operation. 1140 * 1141 * We have to do this before actually building the PDT because 1142 * the reflash updates (if any) might cause various registers to move 1143 * around. 1144 * 1145 * For a number of reasons, this initial reset may fail to return 1146 * within the specified time, but we'll still be able to bring up the 1147 * driver normally after that failure. This occurs most commonly in 1148 * a cold boot situation (where then firmware takes longer to come up 1149 * than from a warm boot) and the reset_delay_ms in the platform data 1150 * has been set too short to accommodate that. Since the sensor will 1151 * eventually come up and be usable, we don't want to just fail here 1152 * and leave the customer's device unusable. So we warn them, and 1153 * continue processing. 1154 */ 1155 retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset); 1156 if (retval < 0) 1157 dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n"); 1158 1159 retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props); 1160 if (retval < 0) { 1161 /* 1162 * we'll print out a warning and continue since 1163 * failure to get the PDT properties is not a cause to fail 1164 */ 1165 dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n", 1166 PDT_PROPERTIES_LOCATION, retval); 1167 } 1168 1169 mutex_init(&data->irq_mutex); 1170 mutex_init(&data->enabled_mutex); 1171 1172 retval = rmi_probe_interrupts(data); 1173 if (retval) 1174 goto err; 1175 1176 if (rmi_dev->xport->input) { 1177 /* 1178 * The transport driver already has an input device. 1179 * In some cases it is preferable to reuse the transport 1180 * devices input device instead of creating a new one here. 1181 * One example is some HID touchpads report "pass-through" 1182 * button events are not reported by rmi registers. 1183 */ 1184 data->input = rmi_dev->xport->input; 1185 } else { 1186 data->input = devm_input_allocate_device(dev); 1187 if (!data->input) { 1188 dev_err(dev, "%s: Failed to allocate input device.\n", 1189 __func__); 1190 retval = -ENOMEM; 1191 goto err; 1192 } 1193 rmi_driver_set_input_params(rmi_dev, data->input); 1194 data->input->phys = devm_kasprintf(dev, GFP_KERNEL, 1195 "%s/input0", dev_name(dev)); 1196 } 1197 1198 retval = rmi_init_functions(data); 1199 if (retval) 1200 goto err; 1201 1202 retval = rmi_f34_create_sysfs(rmi_dev); 1203 if (retval) 1204 goto err; 1205 1206 if (data->input) { 1207 rmi_driver_set_input_name(rmi_dev, data->input); 1208 if (!rmi_dev->xport->input) { 1209 if (input_register_device(data->input)) { 1210 dev_err(dev, "%s: Failed to register input device.\n", 1211 __func__); 1212 goto err_destroy_functions; 1213 } 1214 } 1215 } 1216 1217 retval = rmi_irq_init(rmi_dev); 1218 if (retval < 0) 1219 goto err_destroy_functions; 1220 1221 if (data->f01_container->dev.driver) { 1222 /* Driver already bound, so enable ATTN now. */ 1223 retval = rmi_enable_sensor(rmi_dev); 1224 if (retval) 1225 goto err_disable_irq; 1226 } 1227 1228 return 0; 1229 1230 err_disable_irq: 1231 rmi_disable_irq(rmi_dev, false); 1232 err_destroy_functions: 1233 rmi_free_function_list(rmi_dev); 1234 err: 1235 return retval; 1236 } 1237 1238 static struct rmi_driver rmi_physical_driver = { 1239 .driver = { 1240 .owner = THIS_MODULE, 1241 .name = "rmi4_physical", 1242 .bus = &rmi_bus_type, 1243 .probe = rmi_driver_probe, 1244 .remove = rmi_driver_remove, 1245 }, 1246 .reset_handler = rmi_driver_reset_handler, 1247 .clear_irq_bits = rmi_driver_clear_irq_bits, 1248 .set_irq_bits = rmi_driver_set_irq_bits, 1249 .set_input_params = rmi_driver_set_input_params, 1250 }; 1251 1252 bool rmi_is_physical_driver(struct device_driver *drv) 1253 { 1254 return drv == &rmi_physical_driver.driver; 1255 } 1256 1257 int __init rmi_register_physical_driver(void) 1258 { 1259 int error; 1260 1261 error = driver_register(&rmi_physical_driver.driver); 1262 if (error) { 1263 pr_err("%s: driver register failed, code=%d.\n", __func__, 1264 error); 1265 return error; 1266 } 1267 1268 return 0; 1269 } 1270 1271 void __exit rmi_unregister_physical_driver(void) 1272 { 1273 driver_unregister(&rmi_physical_driver.driver); 1274 } 1275