1 /* 2 * Copyright (c) 2011-2016 Synaptics Incorporated 3 * Copyright (c) 2011 Unixphere 4 * 5 * This driver provides the core support for a single RMI4-based device. 6 * 7 * The RMI4 specification can be found here (URL split for line length): 8 * 9 * http://www.synaptics.com/sites/default/files/ 10 * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 */ 16 17 #include <linux/bitmap.h> 18 #include <linux/delay.h> 19 #include <linux/fs.h> 20 #include <linux/irq.h> 21 #include <linux/pm.h> 22 #include <linux/slab.h> 23 #include <linux/of.h> 24 #include <uapi/linux/input.h> 25 #include <linux/rmi.h> 26 #include "rmi_bus.h" 27 #include "rmi_driver.h" 28 29 #define HAS_NONSTANDARD_PDT_MASK 0x40 30 #define RMI4_MAX_PAGE 0xff 31 #define RMI4_PAGE_SIZE 0x100 32 #define RMI4_PAGE_MASK 0xFF00 33 34 #define RMI_DEVICE_RESET_CMD 0x01 35 #define DEFAULT_RESET_DELAY_MS 100 36 37 void rmi_free_function_list(struct rmi_device *rmi_dev) 38 { 39 struct rmi_function *fn, *tmp; 40 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 41 42 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); 43 44 devm_kfree(&rmi_dev->dev, data->irq_memory); 45 data->irq_memory = NULL; 46 data->irq_status = NULL; 47 data->fn_irq_bits = NULL; 48 data->current_irq_mask = NULL; 49 data->new_irq_mask = NULL; 50 51 data->f01_container = NULL; 52 data->f34_container = NULL; 53 54 /* Doing it in the reverse order so F01 will be removed last */ 55 list_for_each_entry_safe_reverse(fn, tmp, 56 &data->function_list, node) { 57 list_del(&fn->node); 58 rmi_unregister_function(fn); 59 } 60 } 61 62 static int reset_one_function(struct rmi_function *fn) 63 { 64 struct rmi_function_handler *fh; 65 int retval = 0; 66 67 if (!fn || !fn->dev.driver) 68 return 0; 69 70 fh = to_rmi_function_handler(fn->dev.driver); 71 if (fh->reset) { 72 retval = fh->reset(fn); 73 if (retval < 0) 74 dev_err(&fn->dev, "Reset failed with code %d.\n", 75 retval); 76 } 77 78 return retval; 79 } 80 81 static int configure_one_function(struct rmi_function *fn) 82 { 83 struct rmi_function_handler *fh; 84 int retval = 0; 85 86 if (!fn || !fn->dev.driver) 87 return 0; 88 89 fh = to_rmi_function_handler(fn->dev.driver); 90 if (fh->config) { 91 retval = fh->config(fn); 92 if (retval < 0) 93 dev_err(&fn->dev, "Config failed with code %d.\n", 94 retval); 95 } 96 97 return retval; 98 } 99 100 static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev) 101 { 102 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 103 struct rmi_function *entry; 104 int retval; 105 106 list_for_each_entry(entry, &data->function_list, node) { 107 retval = reset_one_function(entry); 108 if (retval < 0) 109 return retval; 110 } 111 112 return 0; 113 } 114 115 static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev) 116 { 117 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 118 struct rmi_function *entry; 119 int retval; 120 121 list_for_each_entry(entry, &data->function_list, node) { 122 retval = configure_one_function(entry); 123 if (retval < 0) 124 return retval; 125 } 126 127 return 0; 128 } 129 130 static void process_one_interrupt(struct rmi_driver_data *data, 131 struct rmi_function *fn) 132 { 133 struct rmi_function_handler *fh; 134 135 if (!fn || !fn->dev.driver) 136 return; 137 138 fh = to_rmi_function_handler(fn->dev.driver); 139 if (fh->attention) { 140 bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask, 141 data->irq_count); 142 if (!bitmap_empty(data->fn_irq_bits, data->irq_count)) 143 fh->attention(fn, data->fn_irq_bits); 144 } 145 } 146 147 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) 148 { 149 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 150 struct device *dev = &rmi_dev->dev; 151 struct rmi_function *entry; 152 int error; 153 154 if (!data) 155 return 0; 156 157 if (!data->attn_data.data) { 158 error = rmi_read_block(rmi_dev, 159 data->f01_container->fd.data_base_addr + 1, 160 data->irq_status, data->num_of_irq_regs); 161 if (error < 0) { 162 dev_err(dev, "Failed to read irqs, code=%d\n", error); 163 return error; 164 } 165 } 166 167 mutex_lock(&data->irq_mutex); 168 bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask, 169 data->irq_count); 170 /* 171 * At this point, irq_status has all bits that are set in the 172 * interrupt status register and are enabled. 173 */ 174 mutex_unlock(&data->irq_mutex); 175 176 /* 177 * It would be nice to be able to use irq_chip to handle these 178 * nested IRQs. Unfortunately, most of the current customers for 179 * this driver are using older kernels (3.0.x) that don't support 180 * the features required for that. Once they've shifted to more 181 * recent kernels (say, 3.3 and higher), this should be switched to 182 * use irq_chip. 183 */ 184 list_for_each_entry(entry, &data->function_list, node) 185 process_one_interrupt(data, entry); 186 187 if (data->input) 188 input_sync(data->input); 189 190 return 0; 191 } 192 193 void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, 194 void *data, size_t size) 195 { 196 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 197 struct rmi4_attn_data attn_data; 198 void *fifo_data; 199 200 if (!drvdata->enabled) 201 return; 202 203 fifo_data = kmemdup(data, size, GFP_ATOMIC); 204 if (!fifo_data) 205 return; 206 207 attn_data.irq_status = irq_status; 208 attn_data.size = size; 209 attn_data.data = fifo_data; 210 211 kfifo_put(&drvdata->attn_fifo, attn_data); 212 } 213 EXPORT_SYMBOL_GPL(rmi_set_attn_data); 214 215 static irqreturn_t rmi_irq_fn(int irq, void *dev_id) 216 { 217 struct rmi_device *rmi_dev = dev_id; 218 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 219 struct rmi4_attn_data attn_data = {0}; 220 int ret, count; 221 222 count = kfifo_get(&drvdata->attn_fifo, &attn_data); 223 if (count) { 224 *(drvdata->irq_status) = attn_data.irq_status; 225 drvdata->attn_data = attn_data; 226 } 227 228 ret = rmi_process_interrupt_requests(rmi_dev); 229 if (ret) 230 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, 231 "Failed to process interrupt request: %d\n", ret); 232 233 if (count) 234 kfree(attn_data.data); 235 236 if (!kfifo_is_empty(&drvdata->attn_fifo)) 237 return rmi_irq_fn(irq, dev_id); 238 239 return IRQ_HANDLED; 240 } 241 242 static int rmi_irq_init(struct rmi_device *rmi_dev) 243 { 244 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 245 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 246 int irq_flags = irq_get_trigger_type(pdata->irq); 247 int ret; 248 249 if (!irq_flags) 250 irq_flags = IRQF_TRIGGER_LOW; 251 252 ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL, 253 rmi_irq_fn, irq_flags | IRQF_ONESHOT, 254 dev_driver_string(rmi_dev->xport->dev), 255 rmi_dev); 256 if (ret < 0) { 257 dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n", 258 pdata->irq); 259 260 return ret; 261 } 262 263 data->enabled = true; 264 265 return 0; 266 } 267 268 struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number) 269 { 270 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 271 struct rmi_function *entry; 272 273 list_for_each_entry(entry, &data->function_list, node) { 274 if (entry->fd.function_number == number) 275 return entry; 276 } 277 278 return NULL; 279 } 280 281 static int suspend_one_function(struct rmi_function *fn) 282 { 283 struct rmi_function_handler *fh; 284 int retval = 0; 285 286 if (!fn || !fn->dev.driver) 287 return 0; 288 289 fh = to_rmi_function_handler(fn->dev.driver); 290 if (fh->suspend) { 291 retval = fh->suspend(fn); 292 if (retval < 0) 293 dev_err(&fn->dev, "Suspend failed with code %d.\n", 294 retval); 295 } 296 297 return retval; 298 } 299 300 static int rmi_suspend_functions(struct rmi_device *rmi_dev) 301 { 302 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 303 struct rmi_function *entry; 304 int retval; 305 306 list_for_each_entry(entry, &data->function_list, node) { 307 retval = suspend_one_function(entry); 308 if (retval < 0) 309 return retval; 310 } 311 312 return 0; 313 } 314 315 static int resume_one_function(struct rmi_function *fn) 316 { 317 struct rmi_function_handler *fh; 318 int retval = 0; 319 320 if (!fn || !fn->dev.driver) 321 return 0; 322 323 fh = to_rmi_function_handler(fn->dev.driver); 324 if (fh->resume) { 325 retval = fh->resume(fn); 326 if (retval < 0) 327 dev_err(&fn->dev, "Resume failed with code %d.\n", 328 retval); 329 } 330 331 return retval; 332 } 333 334 static int rmi_resume_functions(struct rmi_device *rmi_dev) 335 { 336 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 337 struct rmi_function *entry; 338 int retval; 339 340 list_for_each_entry(entry, &data->function_list, node) { 341 retval = resume_one_function(entry); 342 if (retval < 0) 343 return retval; 344 } 345 346 return 0; 347 } 348 349 int rmi_enable_sensor(struct rmi_device *rmi_dev) 350 { 351 int retval = 0; 352 353 retval = rmi_driver_process_config_requests(rmi_dev); 354 if (retval < 0) 355 return retval; 356 357 return rmi_process_interrupt_requests(rmi_dev); 358 } 359 360 /** 361 * rmi_driver_set_input_params - set input device id and other data. 362 * 363 * @rmi_dev: Pointer to an RMI device 364 * @input: Pointer to input device 365 * 366 */ 367 static int rmi_driver_set_input_params(struct rmi_device *rmi_dev, 368 struct input_dev *input) 369 { 370 input->name = SYNAPTICS_INPUT_DEVICE_NAME; 371 input->id.vendor = SYNAPTICS_VENDOR_ID; 372 input->id.bustype = BUS_RMI; 373 return 0; 374 } 375 376 static void rmi_driver_set_input_name(struct rmi_device *rmi_dev, 377 struct input_dev *input) 378 { 379 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 380 const char *device_name = rmi_f01_get_product_ID(data->f01_container); 381 char *name; 382 383 name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL, 384 "Synaptics %s", device_name); 385 if (!name) 386 return; 387 388 input->name = name; 389 } 390 391 static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev, 392 unsigned long *mask) 393 { 394 int error = 0; 395 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 396 struct device *dev = &rmi_dev->dev; 397 398 mutex_lock(&data->irq_mutex); 399 bitmap_or(data->new_irq_mask, 400 data->current_irq_mask, mask, data->irq_count); 401 402 error = rmi_write_block(rmi_dev, 403 data->f01_container->fd.control_base_addr + 1, 404 data->new_irq_mask, data->num_of_irq_regs); 405 if (error < 0) { 406 dev_err(dev, "%s: Failed to change enabled interrupts!", 407 __func__); 408 goto error_unlock; 409 } 410 bitmap_copy(data->current_irq_mask, data->new_irq_mask, 411 data->num_of_irq_regs); 412 413 error_unlock: 414 mutex_unlock(&data->irq_mutex); 415 return error; 416 } 417 418 static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev, 419 unsigned long *mask) 420 { 421 int error = 0; 422 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 423 struct device *dev = &rmi_dev->dev; 424 425 mutex_lock(&data->irq_mutex); 426 bitmap_andnot(data->new_irq_mask, 427 data->current_irq_mask, mask, data->irq_count); 428 429 error = rmi_write_block(rmi_dev, 430 data->f01_container->fd.control_base_addr + 1, 431 data->new_irq_mask, data->num_of_irq_regs); 432 if (error < 0) { 433 dev_err(dev, "%s: Failed to change enabled interrupts!", 434 __func__); 435 goto error_unlock; 436 } 437 bitmap_copy(data->current_irq_mask, data->new_irq_mask, 438 data->num_of_irq_regs); 439 440 error_unlock: 441 mutex_unlock(&data->irq_mutex); 442 return error; 443 } 444 445 static int rmi_driver_reset_handler(struct rmi_device *rmi_dev) 446 { 447 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 448 int error; 449 450 /* 451 * Can get called before the driver is fully ready to deal with 452 * this situation. 453 */ 454 if (!data || !data->f01_container) { 455 dev_warn(&rmi_dev->dev, 456 "Not ready to handle reset yet!\n"); 457 return 0; 458 } 459 460 error = rmi_read_block(rmi_dev, 461 data->f01_container->fd.control_base_addr + 1, 462 data->current_irq_mask, data->num_of_irq_regs); 463 if (error < 0) { 464 dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n", 465 __func__); 466 return error; 467 } 468 469 error = rmi_driver_process_reset_requests(rmi_dev); 470 if (error < 0) 471 return error; 472 473 error = rmi_driver_process_config_requests(rmi_dev); 474 if (error < 0) 475 return error; 476 477 return 0; 478 } 479 480 static int rmi_read_pdt_entry(struct rmi_device *rmi_dev, 481 struct pdt_entry *entry, u16 pdt_address) 482 { 483 u8 buf[RMI_PDT_ENTRY_SIZE]; 484 int error; 485 486 error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE); 487 if (error) { 488 dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n", 489 pdt_address, error); 490 return error; 491 } 492 493 entry->page_start = pdt_address & RMI4_PAGE_MASK; 494 entry->query_base_addr = buf[0]; 495 entry->command_base_addr = buf[1]; 496 entry->control_base_addr = buf[2]; 497 entry->data_base_addr = buf[3]; 498 entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK; 499 entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5; 500 entry->function_number = buf[5]; 501 502 return 0; 503 } 504 505 static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt, 506 struct rmi_function_descriptor *fd) 507 { 508 fd->query_base_addr = pdt->query_base_addr + pdt->page_start; 509 fd->command_base_addr = pdt->command_base_addr + pdt->page_start; 510 fd->control_base_addr = pdt->control_base_addr + pdt->page_start; 511 fd->data_base_addr = pdt->data_base_addr + pdt->page_start; 512 fd->function_number = pdt->function_number; 513 fd->interrupt_source_count = pdt->interrupt_source_count; 514 fd->function_version = pdt->function_version; 515 } 516 517 #define RMI_SCAN_CONTINUE 0 518 #define RMI_SCAN_DONE 1 519 520 static int rmi_scan_pdt_page(struct rmi_device *rmi_dev, 521 int page, 522 int *empty_pages, 523 void *ctx, 524 int (*callback)(struct rmi_device *rmi_dev, 525 void *ctx, 526 const struct pdt_entry *entry)) 527 { 528 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 529 struct pdt_entry pdt_entry; 530 u16 page_start = RMI4_PAGE_SIZE * page; 531 u16 pdt_start = page_start + PDT_START_SCAN_LOCATION; 532 u16 pdt_end = page_start + PDT_END_SCAN_LOCATION; 533 u16 addr; 534 int error; 535 int retval; 536 537 for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) { 538 error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr); 539 if (error) 540 return error; 541 542 if (RMI4_END_OF_PDT(pdt_entry.function_number)) 543 break; 544 545 retval = callback(rmi_dev, ctx, &pdt_entry); 546 if (retval != RMI_SCAN_CONTINUE) 547 return retval; 548 } 549 550 /* 551 * Count number of empty PDT pages. If a gap of two pages 552 * or more is found, stop scanning. 553 */ 554 if (addr == pdt_start) 555 ++*empty_pages; 556 else 557 *empty_pages = 0; 558 559 return (data->bootloader_mode || *empty_pages >= 2) ? 560 RMI_SCAN_DONE : RMI_SCAN_CONTINUE; 561 } 562 563 int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx, 564 int (*callback)(struct rmi_device *rmi_dev, 565 void *ctx, const struct pdt_entry *entry)) 566 { 567 int page; 568 int empty_pages = 0; 569 int retval = RMI_SCAN_DONE; 570 571 for (page = 0; page <= RMI4_MAX_PAGE; page++) { 572 retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages, 573 ctx, callback); 574 if (retval != RMI_SCAN_CONTINUE) 575 break; 576 } 577 578 return retval < 0 ? retval : 0; 579 } 580 581 int rmi_read_register_desc(struct rmi_device *d, u16 addr, 582 struct rmi_register_descriptor *rdesc) 583 { 584 int ret; 585 u8 size_presence_reg; 586 u8 buf[35]; 587 int presense_offset = 1; 588 u8 *struct_buf; 589 int reg; 590 int offset = 0; 591 int map_offset = 0; 592 int i; 593 int b; 594 595 /* 596 * The first register of the register descriptor is the size of 597 * the register descriptor's presense register. 598 */ 599 ret = rmi_read(d, addr, &size_presence_reg); 600 if (ret) 601 return ret; 602 ++addr; 603 604 if (size_presence_reg < 0 || size_presence_reg > 35) 605 return -EIO; 606 607 memset(buf, 0, sizeof(buf)); 608 609 /* 610 * The presence register contains the size of the register structure 611 * and a bitmap which identified which packet registers are present 612 * for this particular register type (ie query, control, or data). 613 */ 614 ret = rmi_read_block(d, addr, buf, size_presence_reg); 615 if (ret) 616 return ret; 617 ++addr; 618 619 if (buf[0] == 0) { 620 presense_offset = 3; 621 rdesc->struct_size = buf[1] | (buf[2] << 8); 622 } else { 623 rdesc->struct_size = buf[0]; 624 } 625 626 for (i = presense_offset; i < size_presence_reg; i++) { 627 for (b = 0; b < 8; b++) { 628 if (buf[i] & (0x1 << b)) 629 bitmap_set(rdesc->presense_map, map_offset, 1); 630 ++map_offset; 631 } 632 } 633 634 rdesc->num_registers = bitmap_weight(rdesc->presense_map, 635 RMI_REG_DESC_PRESENSE_BITS); 636 637 rdesc->registers = devm_kzalloc(&d->dev, rdesc->num_registers * 638 sizeof(struct rmi_register_desc_item), 639 GFP_KERNEL); 640 if (!rdesc->registers) 641 return -ENOMEM; 642 643 /* 644 * Allocate a temporary buffer to hold the register structure. 645 * I'm not using devm_kzalloc here since it will not be retained 646 * after exiting this function 647 */ 648 struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL); 649 if (!struct_buf) 650 return -ENOMEM; 651 652 /* 653 * The register structure contains information about every packet 654 * register of this type. This includes the size of the packet 655 * register and a bitmap of all subpackets contained in the packet 656 * register. 657 */ 658 ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size); 659 if (ret) 660 goto free_struct_buff; 661 662 reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); 663 for (i = 0; i < rdesc->num_registers; i++) { 664 struct rmi_register_desc_item *item = &rdesc->registers[i]; 665 int reg_size = struct_buf[offset]; 666 667 ++offset; 668 if (reg_size == 0) { 669 reg_size = struct_buf[offset] | 670 (struct_buf[offset + 1] << 8); 671 offset += 2; 672 } 673 674 if (reg_size == 0) { 675 reg_size = struct_buf[offset] | 676 (struct_buf[offset + 1] << 8) | 677 (struct_buf[offset + 2] << 16) | 678 (struct_buf[offset + 3] << 24); 679 offset += 4; 680 } 681 682 item->reg = reg; 683 item->reg_size = reg_size; 684 685 map_offset = 0; 686 687 do { 688 for (b = 0; b < 7; b++) { 689 if (struct_buf[offset] & (0x1 << b)) 690 bitmap_set(item->subpacket_map, 691 map_offset, 1); 692 ++map_offset; 693 } 694 } while (struct_buf[offset++] & 0x80); 695 696 item->num_subpackets = bitmap_weight(item->subpacket_map, 697 RMI_REG_DESC_SUBPACKET_BITS); 698 699 rmi_dbg(RMI_DEBUG_CORE, &d->dev, 700 "%s: reg: %d reg size: %ld subpackets: %d\n", __func__, 701 item->reg, item->reg_size, item->num_subpackets); 702 703 reg = find_next_bit(rdesc->presense_map, 704 RMI_REG_DESC_PRESENSE_BITS, reg + 1); 705 } 706 707 free_struct_buff: 708 kfree(struct_buf); 709 return ret; 710 } 711 712 const struct rmi_register_desc_item *rmi_get_register_desc_item( 713 struct rmi_register_descriptor *rdesc, u16 reg) 714 { 715 const struct rmi_register_desc_item *item; 716 int i; 717 718 for (i = 0; i < rdesc->num_registers; i++) { 719 item = &rdesc->registers[i]; 720 if (item->reg == reg) 721 return item; 722 } 723 724 return NULL; 725 } 726 727 size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc) 728 { 729 const struct rmi_register_desc_item *item; 730 int i; 731 size_t size = 0; 732 733 for (i = 0; i < rdesc->num_registers; i++) { 734 item = &rdesc->registers[i]; 735 size += item->reg_size; 736 } 737 return size; 738 } 739 740 /* Compute the register offset relative to the base address */ 741 int rmi_register_desc_calc_reg_offset( 742 struct rmi_register_descriptor *rdesc, u16 reg) 743 { 744 const struct rmi_register_desc_item *item; 745 int offset = 0; 746 int i; 747 748 for (i = 0; i < rdesc->num_registers; i++) { 749 item = &rdesc->registers[i]; 750 if (item->reg == reg) 751 return offset; 752 ++offset; 753 } 754 return -1; 755 } 756 757 bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item, 758 u8 subpacket) 759 { 760 return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS, 761 subpacket) == subpacket; 762 } 763 764 static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev, 765 const struct pdt_entry *pdt) 766 { 767 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 768 int ret; 769 u8 status; 770 771 if (pdt->function_number == 0x34 && pdt->function_version > 1) { 772 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); 773 if (ret) { 774 dev_err(&rmi_dev->dev, 775 "Failed to read F34 status: %d.\n", ret); 776 return ret; 777 } 778 779 if (status & BIT(7)) 780 data->bootloader_mode = true; 781 } else if (pdt->function_number == 0x01) { 782 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); 783 if (ret) { 784 dev_err(&rmi_dev->dev, 785 "Failed to read F01 status: %d.\n", ret); 786 return ret; 787 } 788 789 if (status & BIT(6)) 790 data->bootloader_mode = true; 791 } 792 793 return 0; 794 } 795 796 static int rmi_count_irqs(struct rmi_device *rmi_dev, 797 void *ctx, const struct pdt_entry *pdt) 798 { 799 int *irq_count = ctx; 800 int ret; 801 802 *irq_count += pdt->interrupt_source_count; 803 804 ret = rmi_check_bootloader_mode(rmi_dev, pdt); 805 if (ret < 0) 806 return ret; 807 808 return RMI_SCAN_CONTINUE; 809 } 810 811 int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx, 812 const struct pdt_entry *pdt) 813 { 814 int error; 815 816 if (pdt->function_number == 0x01) { 817 u16 cmd_addr = pdt->page_start + pdt->command_base_addr; 818 u8 cmd_buf = RMI_DEVICE_RESET_CMD; 819 const struct rmi_device_platform_data *pdata = 820 rmi_get_platform_data(rmi_dev); 821 822 if (rmi_dev->xport->ops->reset) { 823 error = rmi_dev->xport->ops->reset(rmi_dev->xport, 824 cmd_addr); 825 if (error) 826 return error; 827 828 return RMI_SCAN_DONE; 829 } 830 831 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n"); 832 error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1); 833 if (error) { 834 dev_err(&rmi_dev->dev, 835 "Initial reset failed. Code = %d.\n", error); 836 return error; 837 } 838 839 mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS); 840 841 return RMI_SCAN_DONE; 842 } 843 844 /* F01 should always be on page 0. If we don't find it there, fail. */ 845 return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV; 846 } 847 848 static int rmi_create_function(struct rmi_device *rmi_dev, 849 void *ctx, const struct pdt_entry *pdt) 850 { 851 struct device *dev = &rmi_dev->dev; 852 struct rmi_driver_data *data = dev_get_drvdata(dev); 853 int *current_irq_count = ctx; 854 struct rmi_function *fn; 855 int i; 856 int error; 857 858 rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n", 859 pdt->function_number); 860 861 fn = kzalloc(sizeof(struct rmi_function) + 862 BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long), 863 GFP_KERNEL); 864 if (!fn) { 865 dev_err(dev, "Failed to allocate memory for F%02X\n", 866 pdt->function_number); 867 return -ENOMEM; 868 } 869 870 INIT_LIST_HEAD(&fn->node); 871 rmi_driver_copy_pdt_to_fd(pdt, &fn->fd); 872 873 fn->rmi_dev = rmi_dev; 874 875 fn->num_of_irqs = pdt->interrupt_source_count; 876 fn->irq_pos = *current_irq_count; 877 *current_irq_count += fn->num_of_irqs; 878 879 for (i = 0; i < fn->num_of_irqs; i++) 880 set_bit(fn->irq_pos + i, fn->irq_mask); 881 882 error = rmi_register_function(fn); 883 if (error) 884 goto err_put_fn; 885 886 if (pdt->function_number == 0x01) 887 data->f01_container = fn; 888 else if (pdt->function_number == 0x34) 889 data->f34_container = fn; 890 891 list_add_tail(&fn->node, &data->function_list); 892 893 return RMI_SCAN_CONTINUE; 894 895 err_put_fn: 896 put_device(&fn->dev); 897 return error; 898 } 899 900 void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) 901 { 902 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 903 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 904 int irq = pdata->irq; 905 int irq_flags; 906 int retval; 907 908 mutex_lock(&data->enabled_mutex); 909 910 if (data->enabled) 911 goto out; 912 913 enable_irq(irq); 914 data->enabled = true; 915 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { 916 retval = disable_irq_wake(irq); 917 if (retval) 918 dev_warn(&rmi_dev->dev, 919 "Failed to disable irq for wake: %d\n", 920 retval); 921 } 922 923 /* 924 * Call rmi_process_interrupt_requests() after enabling irq, 925 * otherwise we may lose interrupt on edge-triggered systems. 926 */ 927 irq_flags = irq_get_trigger_type(pdata->irq); 928 if (irq_flags & IRQ_TYPE_EDGE_BOTH) 929 rmi_process_interrupt_requests(rmi_dev); 930 931 out: 932 mutex_unlock(&data->enabled_mutex); 933 } 934 935 void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake) 936 { 937 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 938 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 939 struct rmi4_attn_data attn_data = {0}; 940 int irq = pdata->irq; 941 int retval, count; 942 943 mutex_lock(&data->enabled_mutex); 944 945 if (!data->enabled) 946 goto out; 947 948 data->enabled = false; 949 disable_irq(irq); 950 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { 951 retval = enable_irq_wake(irq); 952 if (retval) 953 dev_warn(&rmi_dev->dev, 954 "Failed to enable irq for wake: %d\n", 955 retval); 956 } 957 958 /* make sure the fifo is clean */ 959 while (!kfifo_is_empty(&data->attn_fifo)) { 960 count = kfifo_get(&data->attn_fifo, &attn_data); 961 if (count) 962 kfree(attn_data.data); 963 } 964 965 out: 966 mutex_unlock(&data->enabled_mutex); 967 } 968 969 int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake) 970 { 971 int retval; 972 973 retval = rmi_suspend_functions(rmi_dev); 974 if (retval) 975 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", 976 retval); 977 978 rmi_disable_irq(rmi_dev, enable_wake); 979 return retval; 980 } 981 EXPORT_SYMBOL_GPL(rmi_driver_suspend); 982 983 int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake) 984 { 985 int retval; 986 987 rmi_enable_irq(rmi_dev, clear_wake); 988 989 retval = rmi_resume_functions(rmi_dev); 990 if (retval) 991 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", 992 retval); 993 994 return retval; 995 } 996 EXPORT_SYMBOL_GPL(rmi_driver_resume); 997 998 static int rmi_driver_remove(struct device *dev) 999 { 1000 struct rmi_device *rmi_dev = to_rmi_device(dev); 1001 1002 rmi_disable_irq(rmi_dev, false); 1003 1004 rmi_f34_remove_sysfs(rmi_dev); 1005 rmi_free_function_list(rmi_dev); 1006 1007 return 0; 1008 } 1009 1010 #ifdef CONFIG_OF 1011 static int rmi_driver_of_probe(struct device *dev, 1012 struct rmi_device_platform_data *pdata) 1013 { 1014 int retval; 1015 1016 retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms, 1017 "syna,reset-delay-ms", 1); 1018 if (retval) 1019 return retval; 1020 1021 return 0; 1022 } 1023 #else 1024 static inline int rmi_driver_of_probe(struct device *dev, 1025 struct rmi_device_platform_data *pdata) 1026 { 1027 return -ENODEV; 1028 } 1029 #endif 1030 1031 int rmi_probe_interrupts(struct rmi_driver_data *data) 1032 { 1033 struct rmi_device *rmi_dev = data->rmi_dev; 1034 struct device *dev = &rmi_dev->dev; 1035 int irq_count; 1036 size_t size; 1037 int retval; 1038 1039 /* 1040 * We need to count the IRQs and allocate their storage before scanning 1041 * the PDT and creating the function entries, because adding a new 1042 * function can trigger events that result in the IRQ related storage 1043 * being accessed. 1044 */ 1045 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__); 1046 irq_count = 0; 1047 data->bootloader_mode = false; 1048 1049 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs); 1050 if (retval < 0) { 1051 dev_err(dev, "IRQ counting failed with code %d.\n", retval); 1052 return retval; 1053 } 1054 1055 if (data->bootloader_mode) 1056 dev_warn(dev, "Device in bootloader mode.\n"); 1057 1058 data->irq_count = irq_count; 1059 data->num_of_irq_regs = (data->irq_count + 7) / 8; 1060 1061 size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long); 1062 data->irq_memory = devm_kzalloc(dev, size * 4, GFP_KERNEL); 1063 if (!data->irq_memory) { 1064 dev_err(dev, "Failed to allocate memory for irq masks.\n"); 1065 return -ENOMEM; 1066 } 1067 1068 data->irq_status = data->irq_memory + size * 0; 1069 data->fn_irq_bits = data->irq_memory + size * 1; 1070 data->current_irq_mask = data->irq_memory + size * 2; 1071 data->new_irq_mask = data->irq_memory + size * 3; 1072 1073 return retval; 1074 } 1075 1076 int rmi_init_functions(struct rmi_driver_data *data) 1077 { 1078 struct rmi_device *rmi_dev = data->rmi_dev; 1079 struct device *dev = &rmi_dev->dev; 1080 int irq_count; 1081 int retval; 1082 1083 irq_count = 0; 1084 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__); 1085 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function); 1086 if (retval < 0) { 1087 dev_err(dev, "Function creation failed with code %d.\n", 1088 retval); 1089 goto err_destroy_functions; 1090 } 1091 1092 if (!data->f01_container) { 1093 dev_err(dev, "Missing F01 container!\n"); 1094 retval = -EINVAL; 1095 goto err_destroy_functions; 1096 } 1097 1098 retval = rmi_read_block(rmi_dev, 1099 data->f01_container->fd.control_base_addr + 1, 1100 data->current_irq_mask, data->num_of_irq_regs); 1101 if (retval < 0) { 1102 dev_err(dev, "%s: Failed to read current IRQ mask.\n", 1103 __func__); 1104 goto err_destroy_functions; 1105 } 1106 1107 return 0; 1108 1109 err_destroy_functions: 1110 rmi_free_function_list(rmi_dev); 1111 return retval; 1112 } 1113 1114 static int rmi_driver_probe(struct device *dev) 1115 { 1116 struct rmi_driver *rmi_driver; 1117 struct rmi_driver_data *data; 1118 struct rmi_device_platform_data *pdata; 1119 struct rmi_device *rmi_dev; 1120 int retval; 1121 1122 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n", 1123 __func__); 1124 1125 if (!rmi_is_physical_device(dev)) { 1126 rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n"); 1127 return -ENODEV; 1128 } 1129 1130 rmi_dev = to_rmi_device(dev); 1131 rmi_driver = to_rmi_driver(dev->driver); 1132 rmi_dev->driver = rmi_driver; 1133 1134 pdata = rmi_get_platform_data(rmi_dev); 1135 1136 if (rmi_dev->xport->dev->of_node) { 1137 retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata); 1138 if (retval) 1139 return retval; 1140 } 1141 1142 data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL); 1143 if (!data) 1144 return -ENOMEM; 1145 1146 INIT_LIST_HEAD(&data->function_list); 1147 data->rmi_dev = rmi_dev; 1148 dev_set_drvdata(&rmi_dev->dev, data); 1149 1150 /* 1151 * Right before a warm boot, the sensor might be in some unusual state, 1152 * such as F54 diagnostics, or F34 bootloader mode after a firmware 1153 * or configuration update. In order to clear the sensor to a known 1154 * state and/or apply any updates, we issue a initial reset to clear any 1155 * previous settings and force it into normal operation. 1156 * 1157 * We have to do this before actually building the PDT because 1158 * the reflash updates (if any) might cause various registers to move 1159 * around. 1160 * 1161 * For a number of reasons, this initial reset may fail to return 1162 * within the specified time, but we'll still be able to bring up the 1163 * driver normally after that failure. This occurs most commonly in 1164 * a cold boot situation (where then firmware takes longer to come up 1165 * than from a warm boot) and the reset_delay_ms in the platform data 1166 * has been set too short to accommodate that. Since the sensor will 1167 * eventually come up and be usable, we don't want to just fail here 1168 * and leave the customer's device unusable. So we warn them, and 1169 * continue processing. 1170 */ 1171 retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset); 1172 if (retval < 0) 1173 dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n"); 1174 1175 retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props); 1176 if (retval < 0) { 1177 /* 1178 * we'll print out a warning and continue since 1179 * failure to get the PDT properties is not a cause to fail 1180 */ 1181 dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n", 1182 PDT_PROPERTIES_LOCATION, retval); 1183 } 1184 1185 mutex_init(&data->irq_mutex); 1186 mutex_init(&data->enabled_mutex); 1187 1188 retval = rmi_probe_interrupts(data); 1189 if (retval) 1190 goto err; 1191 1192 if (rmi_dev->xport->input) { 1193 /* 1194 * The transport driver already has an input device. 1195 * In some cases it is preferable to reuse the transport 1196 * devices input device instead of creating a new one here. 1197 * One example is some HID touchpads report "pass-through" 1198 * button events are not reported by rmi registers. 1199 */ 1200 data->input = rmi_dev->xport->input; 1201 } else { 1202 data->input = devm_input_allocate_device(dev); 1203 if (!data->input) { 1204 dev_err(dev, "%s: Failed to allocate input device.\n", 1205 __func__); 1206 retval = -ENOMEM; 1207 goto err; 1208 } 1209 rmi_driver_set_input_params(rmi_dev, data->input); 1210 data->input->phys = devm_kasprintf(dev, GFP_KERNEL, 1211 "%s/input0", dev_name(dev)); 1212 } 1213 1214 retval = rmi_init_functions(data); 1215 if (retval) 1216 goto err; 1217 1218 retval = rmi_f34_create_sysfs(rmi_dev); 1219 if (retval) 1220 goto err; 1221 1222 if (data->input) { 1223 rmi_driver_set_input_name(rmi_dev, data->input); 1224 if (!rmi_dev->xport->input) { 1225 if (input_register_device(data->input)) { 1226 dev_err(dev, "%s: Failed to register input device.\n", 1227 __func__); 1228 goto err_destroy_functions; 1229 } 1230 } 1231 } 1232 1233 retval = rmi_irq_init(rmi_dev); 1234 if (retval < 0) 1235 goto err_destroy_functions; 1236 1237 if (data->f01_container->dev.driver) { 1238 /* Driver already bound, so enable ATTN now. */ 1239 retval = rmi_enable_sensor(rmi_dev); 1240 if (retval) 1241 goto err_disable_irq; 1242 } 1243 1244 return 0; 1245 1246 err_disable_irq: 1247 rmi_disable_irq(rmi_dev, false); 1248 err_destroy_functions: 1249 rmi_free_function_list(rmi_dev); 1250 err: 1251 return retval; 1252 } 1253 1254 static struct rmi_driver rmi_physical_driver = { 1255 .driver = { 1256 .owner = THIS_MODULE, 1257 .name = "rmi4_physical", 1258 .bus = &rmi_bus_type, 1259 .probe = rmi_driver_probe, 1260 .remove = rmi_driver_remove, 1261 }, 1262 .reset_handler = rmi_driver_reset_handler, 1263 .clear_irq_bits = rmi_driver_clear_irq_bits, 1264 .set_irq_bits = rmi_driver_set_irq_bits, 1265 .set_input_params = rmi_driver_set_input_params, 1266 }; 1267 1268 bool rmi_is_physical_driver(struct device_driver *drv) 1269 { 1270 return drv == &rmi_physical_driver.driver; 1271 } 1272 1273 int __init rmi_register_physical_driver(void) 1274 { 1275 int error; 1276 1277 error = driver_register(&rmi_physical_driver.driver); 1278 if (error) { 1279 pr_err("%s: driver register failed, code=%d.\n", __func__, 1280 error); 1281 return error; 1282 } 1283 1284 return 0; 1285 } 1286 1287 void __exit rmi_unregister_physical_driver(void) 1288 { 1289 driver_unregister(&rmi_physical_driver.driver); 1290 } 1291