1 /* 2 * Copyright (c) 2011-2016 Synaptics Incorporated 3 * Copyright (c) 2011 Unixphere 4 * 5 * This driver provides the core support for a single RMI4-based device. 6 * 7 * The RMI4 specification can be found here (URL split for line length): 8 * 9 * http://www.synaptics.com/sites/default/files/ 10 * 511-000136-01-Rev-E-RMI4-Interfacing-Guide.pdf 11 * 12 * This program is free software; you can redistribute it and/or modify it 13 * under the terms of the GNU General Public License version 2 as published by 14 * the Free Software Foundation. 15 */ 16 17 #include <linux/bitmap.h> 18 #include <linux/delay.h> 19 #include <linux/fs.h> 20 #include <linux/irq.h> 21 #include <linux/pm.h> 22 #include <linux/slab.h> 23 #include <linux/of.h> 24 #include <uapi/linux/input.h> 25 #include <linux/rmi.h> 26 #include "rmi_bus.h" 27 #include "rmi_driver.h" 28 29 #define HAS_NONSTANDARD_PDT_MASK 0x40 30 #define RMI4_MAX_PAGE 0xff 31 #define RMI4_PAGE_SIZE 0x100 32 #define RMI4_PAGE_MASK 0xFF00 33 34 #define RMI_DEVICE_RESET_CMD 0x01 35 #define DEFAULT_RESET_DELAY_MS 100 36 37 void rmi_free_function_list(struct rmi_device *rmi_dev) 38 { 39 struct rmi_function *fn, *tmp; 40 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 41 42 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n"); 43 44 /* Doing it in the reverse order so F01 will be removed last */ 45 list_for_each_entry_safe_reverse(fn, tmp, 46 &data->function_list, node) { 47 list_del(&fn->node); 48 rmi_unregister_function(fn); 49 } 50 51 devm_kfree(&rmi_dev->dev, data->irq_memory); 52 data->irq_memory = NULL; 53 data->irq_status = NULL; 54 data->fn_irq_bits = NULL; 55 data->current_irq_mask = NULL; 56 data->new_irq_mask = NULL; 57 58 data->f01_container = NULL; 59 data->f34_container = NULL; 60 } 61 62 static int reset_one_function(struct rmi_function *fn) 63 { 64 struct rmi_function_handler *fh; 65 int retval = 0; 66 67 if (!fn || !fn->dev.driver) 68 return 0; 69 70 fh = to_rmi_function_handler(fn->dev.driver); 71 if (fh->reset) { 72 retval = fh->reset(fn); 73 if (retval < 0) 74 dev_err(&fn->dev, "Reset failed with code %d.\n", 75 retval); 76 } 77 78 return retval; 79 } 80 81 static int configure_one_function(struct rmi_function *fn) 82 { 83 struct rmi_function_handler *fh; 84 int retval = 0; 85 86 if (!fn || !fn->dev.driver) 87 return 0; 88 89 fh = to_rmi_function_handler(fn->dev.driver); 90 if (fh->config) { 91 retval = fh->config(fn); 92 if (retval < 0) 93 dev_err(&fn->dev, "Config failed with code %d.\n", 94 retval); 95 } 96 97 return retval; 98 } 99 100 static int rmi_driver_process_reset_requests(struct rmi_device *rmi_dev) 101 { 102 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 103 struct rmi_function *entry; 104 int retval; 105 106 list_for_each_entry(entry, &data->function_list, node) { 107 retval = reset_one_function(entry); 108 if (retval < 0) 109 return retval; 110 } 111 112 return 0; 113 } 114 115 static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev) 116 { 117 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 118 struct rmi_function *entry; 119 int retval; 120 121 list_for_each_entry(entry, &data->function_list, node) { 122 retval = configure_one_function(entry); 123 if (retval < 0) 124 return retval; 125 } 126 127 return 0; 128 } 129 130 static void process_one_interrupt(struct rmi_driver_data *data, 131 struct rmi_function *fn) 132 { 133 struct rmi_function_handler *fh; 134 135 if (!fn || !fn->dev.driver) 136 return; 137 138 fh = to_rmi_function_handler(fn->dev.driver); 139 if (fh->attention) { 140 bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask, 141 data->irq_count); 142 if (!bitmap_empty(data->fn_irq_bits, data->irq_count)) 143 fh->attention(fn, data->fn_irq_bits); 144 } 145 } 146 147 static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev) 148 { 149 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 150 struct device *dev = &rmi_dev->dev; 151 struct rmi_function *entry; 152 int error; 153 154 if (!data) 155 return 0; 156 157 if (!data->attn_data.data) { 158 error = rmi_read_block(rmi_dev, 159 data->f01_container->fd.data_base_addr + 1, 160 data->irq_status, data->num_of_irq_regs); 161 if (error < 0) { 162 dev_err(dev, "Failed to read irqs, code=%d\n", error); 163 return error; 164 } 165 } 166 167 mutex_lock(&data->irq_mutex); 168 bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask, 169 data->irq_count); 170 /* 171 * At this point, irq_status has all bits that are set in the 172 * interrupt status register and are enabled. 173 */ 174 mutex_unlock(&data->irq_mutex); 175 176 /* 177 * It would be nice to be able to use irq_chip to handle these 178 * nested IRQs. Unfortunately, most of the current customers for 179 * this driver are using older kernels (3.0.x) that don't support 180 * the features required for that. Once they've shifted to more 181 * recent kernels (say, 3.3 and higher), this should be switched to 182 * use irq_chip. 183 */ 184 list_for_each_entry(entry, &data->function_list, node) 185 process_one_interrupt(data, entry); 186 187 if (data->input) 188 input_sync(data->input); 189 190 return 0; 191 } 192 193 void rmi_set_attn_data(struct rmi_device *rmi_dev, unsigned long irq_status, 194 void *data, size_t size) 195 { 196 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 197 struct rmi4_attn_data attn_data; 198 void *fifo_data; 199 200 if (!drvdata->enabled) 201 return; 202 203 fifo_data = kmemdup(data, size, GFP_ATOMIC); 204 if (!fifo_data) 205 return; 206 207 attn_data.irq_status = irq_status; 208 attn_data.size = size; 209 attn_data.data = fifo_data; 210 211 kfifo_put(&drvdata->attn_fifo, attn_data); 212 } 213 EXPORT_SYMBOL_GPL(rmi_set_attn_data); 214 215 static irqreturn_t rmi_irq_fn(int irq, void *dev_id) 216 { 217 struct rmi_device *rmi_dev = dev_id; 218 struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev); 219 struct rmi4_attn_data attn_data = {0}; 220 int ret, count; 221 222 count = kfifo_get(&drvdata->attn_fifo, &attn_data); 223 if (count) { 224 *(drvdata->irq_status) = attn_data.irq_status; 225 drvdata->attn_data = attn_data; 226 } 227 228 ret = rmi_process_interrupt_requests(rmi_dev); 229 if (ret) 230 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, 231 "Failed to process interrupt request: %d\n", ret); 232 233 if (count) { 234 kfree(attn_data.data); 235 attn_data.data = NULL; 236 } 237 238 if (!kfifo_is_empty(&drvdata->attn_fifo)) 239 return rmi_irq_fn(irq, dev_id); 240 241 return IRQ_HANDLED; 242 } 243 244 static int rmi_irq_init(struct rmi_device *rmi_dev) 245 { 246 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 247 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 248 int irq_flags = irq_get_trigger_type(pdata->irq); 249 int ret; 250 251 if (!irq_flags) 252 irq_flags = IRQF_TRIGGER_LOW; 253 254 ret = devm_request_threaded_irq(&rmi_dev->dev, pdata->irq, NULL, 255 rmi_irq_fn, irq_flags | IRQF_ONESHOT, 256 dev_driver_string(rmi_dev->xport->dev), 257 rmi_dev); 258 if (ret < 0) { 259 dev_err(&rmi_dev->dev, "Failed to register interrupt %d\n", 260 pdata->irq); 261 262 return ret; 263 } 264 265 data->enabled = true; 266 267 return 0; 268 } 269 270 struct rmi_function *rmi_find_function(struct rmi_device *rmi_dev, u8 number) 271 { 272 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 273 struct rmi_function *entry; 274 275 list_for_each_entry(entry, &data->function_list, node) { 276 if (entry->fd.function_number == number) 277 return entry; 278 } 279 280 return NULL; 281 } 282 283 static int suspend_one_function(struct rmi_function *fn) 284 { 285 struct rmi_function_handler *fh; 286 int retval = 0; 287 288 if (!fn || !fn->dev.driver) 289 return 0; 290 291 fh = to_rmi_function_handler(fn->dev.driver); 292 if (fh->suspend) { 293 retval = fh->suspend(fn); 294 if (retval < 0) 295 dev_err(&fn->dev, "Suspend failed with code %d.\n", 296 retval); 297 } 298 299 return retval; 300 } 301 302 static int rmi_suspend_functions(struct rmi_device *rmi_dev) 303 { 304 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 305 struct rmi_function *entry; 306 int retval; 307 308 list_for_each_entry(entry, &data->function_list, node) { 309 retval = suspend_one_function(entry); 310 if (retval < 0) 311 return retval; 312 } 313 314 return 0; 315 } 316 317 static int resume_one_function(struct rmi_function *fn) 318 { 319 struct rmi_function_handler *fh; 320 int retval = 0; 321 322 if (!fn || !fn->dev.driver) 323 return 0; 324 325 fh = to_rmi_function_handler(fn->dev.driver); 326 if (fh->resume) { 327 retval = fh->resume(fn); 328 if (retval < 0) 329 dev_err(&fn->dev, "Resume failed with code %d.\n", 330 retval); 331 } 332 333 return retval; 334 } 335 336 static int rmi_resume_functions(struct rmi_device *rmi_dev) 337 { 338 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 339 struct rmi_function *entry; 340 int retval; 341 342 list_for_each_entry(entry, &data->function_list, node) { 343 retval = resume_one_function(entry); 344 if (retval < 0) 345 return retval; 346 } 347 348 return 0; 349 } 350 351 int rmi_enable_sensor(struct rmi_device *rmi_dev) 352 { 353 int retval = 0; 354 355 retval = rmi_driver_process_config_requests(rmi_dev); 356 if (retval < 0) 357 return retval; 358 359 return rmi_process_interrupt_requests(rmi_dev); 360 } 361 362 /** 363 * rmi_driver_set_input_params - set input device id and other data. 364 * 365 * @rmi_dev: Pointer to an RMI device 366 * @input: Pointer to input device 367 * 368 */ 369 static int rmi_driver_set_input_params(struct rmi_device *rmi_dev, 370 struct input_dev *input) 371 { 372 input->name = SYNAPTICS_INPUT_DEVICE_NAME; 373 input->id.vendor = SYNAPTICS_VENDOR_ID; 374 input->id.bustype = BUS_RMI; 375 return 0; 376 } 377 378 static void rmi_driver_set_input_name(struct rmi_device *rmi_dev, 379 struct input_dev *input) 380 { 381 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 382 const char *device_name = rmi_f01_get_product_ID(data->f01_container); 383 char *name; 384 385 name = devm_kasprintf(&rmi_dev->dev, GFP_KERNEL, 386 "Synaptics %s", device_name); 387 if (!name) 388 return; 389 390 input->name = name; 391 } 392 393 static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev, 394 unsigned long *mask) 395 { 396 int error = 0; 397 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 398 struct device *dev = &rmi_dev->dev; 399 400 mutex_lock(&data->irq_mutex); 401 bitmap_or(data->new_irq_mask, 402 data->current_irq_mask, mask, data->irq_count); 403 404 error = rmi_write_block(rmi_dev, 405 data->f01_container->fd.control_base_addr + 1, 406 data->new_irq_mask, data->num_of_irq_regs); 407 if (error < 0) { 408 dev_err(dev, "%s: Failed to change enabled interrupts!", 409 __func__); 410 goto error_unlock; 411 } 412 bitmap_copy(data->current_irq_mask, data->new_irq_mask, 413 data->num_of_irq_regs); 414 415 error_unlock: 416 mutex_unlock(&data->irq_mutex); 417 return error; 418 } 419 420 static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev, 421 unsigned long *mask) 422 { 423 int error = 0; 424 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 425 struct device *dev = &rmi_dev->dev; 426 427 mutex_lock(&data->irq_mutex); 428 bitmap_andnot(data->new_irq_mask, 429 data->current_irq_mask, mask, data->irq_count); 430 431 error = rmi_write_block(rmi_dev, 432 data->f01_container->fd.control_base_addr + 1, 433 data->new_irq_mask, data->num_of_irq_regs); 434 if (error < 0) { 435 dev_err(dev, "%s: Failed to change enabled interrupts!", 436 __func__); 437 goto error_unlock; 438 } 439 bitmap_copy(data->current_irq_mask, data->new_irq_mask, 440 data->num_of_irq_regs); 441 442 error_unlock: 443 mutex_unlock(&data->irq_mutex); 444 return error; 445 } 446 447 static int rmi_driver_reset_handler(struct rmi_device *rmi_dev) 448 { 449 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 450 int error; 451 452 /* 453 * Can get called before the driver is fully ready to deal with 454 * this situation. 455 */ 456 if (!data || !data->f01_container) { 457 dev_warn(&rmi_dev->dev, 458 "Not ready to handle reset yet!\n"); 459 return 0; 460 } 461 462 error = rmi_read_block(rmi_dev, 463 data->f01_container->fd.control_base_addr + 1, 464 data->current_irq_mask, data->num_of_irq_regs); 465 if (error < 0) { 466 dev_err(&rmi_dev->dev, "%s: Failed to read current IRQ mask.\n", 467 __func__); 468 return error; 469 } 470 471 error = rmi_driver_process_reset_requests(rmi_dev); 472 if (error < 0) 473 return error; 474 475 error = rmi_driver_process_config_requests(rmi_dev); 476 if (error < 0) 477 return error; 478 479 return 0; 480 } 481 482 static int rmi_read_pdt_entry(struct rmi_device *rmi_dev, 483 struct pdt_entry *entry, u16 pdt_address) 484 { 485 u8 buf[RMI_PDT_ENTRY_SIZE]; 486 int error; 487 488 error = rmi_read_block(rmi_dev, pdt_address, buf, RMI_PDT_ENTRY_SIZE); 489 if (error) { 490 dev_err(&rmi_dev->dev, "Read PDT entry at %#06x failed, code: %d.\n", 491 pdt_address, error); 492 return error; 493 } 494 495 entry->page_start = pdt_address & RMI4_PAGE_MASK; 496 entry->query_base_addr = buf[0]; 497 entry->command_base_addr = buf[1]; 498 entry->control_base_addr = buf[2]; 499 entry->data_base_addr = buf[3]; 500 entry->interrupt_source_count = buf[4] & RMI_PDT_INT_SOURCE_COUNT_MASK; 501 entry->function_version = (buf[4] & RMI_PDT_FUNCTION_VERSION_MASK) >> 5; 502 entry->function_number = buf[5]; 503 504 return 0; 505 } 506 507 static void rmi_driver_copy_pdt_to_fd(const struct pdt_entry *pdt, 508 struct rmi_function_descriptor *fd) 509 { 510 fd->query_base_addr = pdt->query_base_addr + pdt->page_start; 511 fd->command_base_addr = pdt->command_base_addr + pdt->page_start; 512 fd->control_base_addr = pdt->control_base_addr + pdt->page_start; 513 fd->data_base_addr = pdt->data_base_addr + pdt->page_start; 514 fd->function_number = pdt->function_number; 515 fd->interrupt_source_count = pdt->interrupt_source_count; 516 fd->function_version = pdt->function_version; 517 } 518 519 #define RMI_SCAN_CONTINUE 0 520 #define RMI_SCAN_DONE 1 521 522 static int rmi_scan_pdt_page(struct rmi_device *rmi_dev, 523 int page, 524 int *empty_pages, 525 void *ctx, 526 int (*callback)(struct rmi_device *rmi_dev, 527 void *ctx, 528 const struct pdt_entry *entry)) 529 { 530 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 531 struct pdt_entry pdt_entry; 532 u16 page_start = RMI4_PAGE_SIZE * page; 533 u16 pdt_start = page_start + PDT_START_SCAN_LOCATION; 534 u16 pdt_end = page_start + PDT_END_SCAN_LOCATION; 535 u16 addr; 536 int error; 537 int retval; 538 539 for (addr = pdt_start; addr >= pdt_end; addr -= RMI_PDT_ENTRY_SIZE) { 540 error = rmi_read_pdt_entry(rmi_dev, &pdt_entry, addr); 541 if (error) 542 return error; 543 544 if (RMI4_END_OF_PDT(pdt_entry.function_number)) 545 break; 546 547 retval = callback(rmi_dev, ctx, &pdt_entry); 548 if (retval != RMI_SCAN_CONTINUE) 549 return retval; 550 } 551 552 /* 553 * Count number of empty PDT pages. If a gap of two pages 554 * or more is found, stop scanning. 555 */ 556 if (addr == pdt_start) 557 ++*empty_pages; 558 else 559 *empty_pages = 0; 560 561 return (data->bootloader_mode || *empty_pages >= 2) ? 562 RMI_SCAN_DONE : RMI_SCAN_CONTINUE; 563 } 564 565 int rmi_scan_pdt(struct rmi_device *rmi_dev, void *ctx, 566 int (*callback)(struct rmi_device *rmi_dev, 567 void *ctx, const struct pdt_entry *entry)) 568 { 569 int page; 570 int empty_pages = 0; 571 int retval = RMI_SCAN_DONE; 572 573 for (page = 0; page <= RMI4_MAX_PAGE; page++) { 574 retval = rmi_scan_pdt_page(rmi_dev, page, &empty_pages, 575 ctx, callback); 576 if (retval != RMI_SCAN_CONTINUE) 577 break; 578 } 579 580 return retval < 0 ? retval : 0; 581 } 582 583 int rmi_read_register_desc(struct rmi_device *d, u16 addr, 584 struct rmi_register_descriptor *rdesc) 585 { 586 int ret; 587 u8 size_presence_reg; 588 u8 buf[35]; 589 int presense_offset = 1; 590 u8 *struct_buf; 591 int reg; 592 int offset = 0; 593 int map_offset = 0; 594 int i; 595 int b; 596 597 /* 598 * The first register of the register descriptor is the size of 599 * the register descriptor's presense register. 600 */ 601 ret = rmi_read(d, addr, &size_presence_reg); 602 if (ret) 603 return ret; 604 ++addr; 605 606 if (size_presence_reg < 0 || size_presence_reg > 35) 607 return -EIO; 608 609 memset(buf, 0, sizeof(buf)); 610 611 /* 612 * The presence register contains the size of the register structure 613 * and a bitmap which identified which packet registers are present 614 * for this particular register type (ie query, control, or data). 615 */ 616 ret = rmi_read_block(d, addr, buf, size_presence_reg); 617 if (ret) 618 return ret; 619 ++addr; 620 621 if (buf[0] == 0) { 622 presense_offset = 3; 623 rdesc->struct_size = buf[1] | (buf[2] << 8); 624 } else { 625 rdesc->struct_size = buf[0]; 626 } 627 628 for (i = presense_offset; i < size_presence_reg; i++) { 629 for (b = 0; b < 8; b++) { 630 if (buf[i] & (0x1 << b)) 631 bitmap_set(rdesc->presense_map, map_offset, 1); 632 ++map_offset; 633 } 634 } 635 636 rdesc->num_registers = bitmap_weight(rdesc->presense_map, 637 RMI_REG_DESC_PRESENSE_BITS); 638 639 rdesc->registers = devm_kcalloc(&d->dev, 640 rdesc->num_registers, 641 sizeof(struct rmi_register_desc_item), 642 GFP_KERNEL); 643 if (!rdesc->registers) 644 return -ENOMEM; 645 646 /* 647 * Allocate a temporary buffer to hold the register structure. 648 * I'm not using devm_kzalloc here since it will not be retained 649 * after exiting this function 650 */ 651 struct_buf = kzalloc(rdesc->struct_size, GFP_KERNEL); 652 if (!struct_buf) 653 return -ENOMEM; 654 655 /* 656 * The register structure contains information about every packet 657 * register of this type. This includes the size of the packet 658 * register and a bitmap of all subpackets contained in the packet 659 * register. 660 */ 661 ret = rmi_read_block(d, addr, struct_buf, rdesc->struct_size); 662 if (ret) 663 goto free_struct_buff; 664 665 reg = find_first_bit(rdesc->presense_map, RMI_REG_DESC_PRESENSE_BITS); 666 for (i = 0; i < rdesc->num_registers; i++) { 667 struct rmi_register_desc_item *item = &rdesc->registers[i]; 668 int reg_size = struct_buf[offset]; 669 670 ++offset; 671 if (reg_size == 0) { 672 reg_size = struct_buf[offset] | 673 (struct_buf[offset + 1] << 8); 674 offset += 2; 675 } 676 677 if (reg_size == 0) { 678 reg_size = struct_buf[offset] | 679 (struct_buf[offset + 1] << 8) | 680 (struct_buf[offset + 2] << 16) | 681 (struct_buf[offset + 3] << 24); 682 offset += 4; 683 } 684 685 item->reg = reg; 686 item->reg_size = reg_size; 687 688 map_offset = 0; 689 690 do { 691 for (b = 0; b < 7; b++) { 692 if (struct_buf[offset] & (0x1 << b)) 693 bitmap_set(item->subpacket_map, 694 map_offset, 1); 695 ++map_offset; 696 } 697 } while (struct_buf[offset++] & 0x80); 698 699 item->num_subpackets = bitmap_weight(item->subpacket_map, 700 RMI_REG_DESC_SUBPACKET_BITS); 701 702 rmi_dbg(RMI_DEBUG_CORE, &d->dev, 703 "%s: reg: %d reg size: %ld subpackets: %d\n", __func__, 704 item->reg, item->reg_size, item->num_subpackets); 705 706 reg = find_next_bit(rdesc->presense_map, 707 RMI_REG_DESC_PRESENSE_BITS, reg + 1); 708 } 709 710 free_struct_buff: 711 kfree(struct_buf); 712 return ret; 713 } 714 715 const struct rmi_register_desc_item *rmi_get_register_desc_item( 716 struct rmi_register_descriptor *rdesc, u16 reg) 717 { 718 const struct rmi_register_desc_item *item; 719 int i; 720 721 for (i = 0; i < rdesc->num_registers; i++) { 722 item = &rdesc->registers[i]; 723 if (item->reg == reg) 724 return item; 725 } 726 727 return NULL; 728 } 729 730 size_t rmi_register_desc_calc_size(struct rmi_register_descriptor *rdesc) 731 { 732 const struct rmi_register_desc_item *item; 733 int i; 734 size_t size = 0; 735 736 for (i = 0; i < rdesc->num_registers; i++) { 737 item = &rdesc->registers[i]; 738 size += item->reg_size; 739 } 740 return size; 741 } 742 743 /* Compute the register offset relative to the base address */ 744 int rmi_register_desc_calc_reg_offset( 745 struct rmi_register_descriptor *rdesc, u16 reg) 746 { 747 const struct rmi_register_desc_item *item; 748 int offset = 0; 749 int i; 750 751 for (i = 0; i < rdesc->num_registers; i++) { 752 item = &rdesc->registers[i]; 753 if (item->reg == reg) 754 return offset; 755 ++offset; 756 } 757 return -1; 758 } 759 760 bool rmi_register_desc_has_subpacket(const struct rmi_register_desc_item *item, 761 u8 subpacket) 762 { 763 return find_next_bit(item->subpacket_map, RMI_REG_DESC_PRESENSE_BITS, 764 subpacket) == subpacket; 765 } 766 767 static int rmi_check_bootloader_mode(struct rmi_device *rmi_dev, 768 const struct pdt_entry *pdt) 769 { 770 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 771 int ret; 772 u8 status; 773 774 if (pdt->function_number == 0x34 && pdt->function_version > 1) { 775 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); 776 if (ret) { 777 dev_err(&rmi_dev->dev, 778 "Failed to read F34 status: %d.\n", ret); 779 return ret; 780 } 781 782 if (status & BIT(7)) 783 data->bootloader_mode = true; 784 } else if (pdt->function_number == 0x01) { 785 ret = rmi_read(rmi_dev, pdt->data_base_addr, &status); 786 if (ret) { 787 dev_err(&rmi_dev->dev, 788 "Failed to read F01 status: %d.\n", ret); 789 return ret; 790 } 791 792 if (status & BIT(6)) 793 data->bootloader_mode = true; 794 } 795 796 return 0; 797 } 798 799 static int rmi_count_irqs(struct rmi_device *rmi_dev, 800 void *ctx, const struct pdt_entry *pdt) 801 { 802 int *irq_count = ctx; 803 int ret; 804 805 *irq_count += pdt->interrupt_source_count; 806 807 ret = rmi_check_bootloader_mode(rmi_dev, pdt); 808 if (ret < 0) 809 return ret; 810 811 return RMI_SCAN_CONTINUE; 812 } 813 814 int rmi_initial_reset(struct rmi_device *rmi_dev, void *ctx, 815 const struct pdt_entry *pdt) 816 { 817 int error; 818 819 if (pdt->function_number == 0x01) { 820 u16 cmd_addr = pdt->page_start + pdt->command_base_addr; 821 u8 cmd_buf = RMI_DEVICE_RESET_CMD; 822 const struct rmi_device_platform_data *pdata = 823 rmi_get_platform_data(rmi_dev); 824 825 if (rmi_dev->xport->ops->reset) { 826 error = rmi_dev->xport->ops->reset(rmi_dev->xport, 827 cmd_addr); 828 if (error) 829 return error; 830 831 return RMI_SCAN_DONE; 832 } 833 834 rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Sending reset\n"); 835 error = rmi_write_block(rmi_dev, cmd_addr, &cmd_buf, 1); 836 if (error) { 837 dev_err(&rmi_dev->dev, 838 "Initial reset failed. Code = %d.\n", error); 839 return error; 840 } 841 842 mdelay(pdata->reset_delay_ms ?: DEFAULT_RESET_DELAY_MS); 843 844 return RMI_SCAN_DONE; 845 } 846 847 /* F01 should always be on page 0. If we don't find it there, fail. */ 848 return pdt->page_start == 0 ? RMI_SCAN_CONTINUE : -ENODEV; 849 } 850 851 static int rmi_create_function(struct rmi_device *rmi_dev, 852 void *ctx, const struct pdt_entry *pdt) 853 { 854 struct device *dev = &rmi_dev->dev; 855 struct rmi_driver_data *data = dev_get_drvdata(dev); 856 int *current_irq_count = ctx; 857 struct rmi_function *fn; 858 int i; 859 int error; 860 861 rmi_dbg(RMI_DEBUG_CORE, dev, "Initializing F%02X.\n", 862 pdt->function_number); 863 864 fn = kzalloc(sizeof(struct rmi_function) + 865 BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long), 866 GFP_KERNEL); 867 if (!fn) { 868 dev_err(dev, "Failed to allocate memory for F%02X\n", 869 pdt->function_number); 870 return -ENOMEM; 871 } 872 873 INIT_LIST_HEAD(&fn->node); 874 rmi_driver_copy_pdt_to_fd(pdt, &fn->fd); 875 876 fn->rmi_dev = rmi_dev; 877 878 fn->num_of_irqs = pdt->interrupt_source_count; 879 fn->irq_pos = *current_irq_count; 880 *current_irq_count += fn->num_of_irqs; 881 882 for (i = 0; i < fn->num_of_irqs; i++) 883 set_bit(fn->irq_pos + i, fn->irq_mask); 884 885 error = rmi_register_function(fn); 886 if (error) 887 goto err_put_fn; 888 889 if (pdt->function_number == 0x01) 890 data->f01_container = fn; 891 else if (pdt->function_number == 0x34) 892 data->f34_container = fn; 893 894 list_add_tail(&fn->node, &data->function_list); 895 896 return RMI_SCAN_CONTINUE; 897 898 err_put_fn: 899 put_device(&fn->dev); 900 return error; 901 } 902 903 void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake) 904 { 905 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 906 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 907 int irq = pdata->irq; 908 int irq_flags; 909 int retval; 910 911 mutex_lock(&data->enabled_mutex); 912 913 if (data->enabled) 914 goto out; 915 916 enable_irq(irq); 917 data->enabled = true; 918 if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) { 919 retval = disable_irq_wake(irq); 920 if (retval) 921 dev_warn(&rmi_dev->dev, 922 "Failed to disable irq for wake: %d\n", 923 retval); 924 } 925 926 /* 927 * Call rmi_process_interrupt_requests() after enabling irq, 928 * otherwise we may lose interrupt on edge-triggered systems. 929 */ 930 irq_flags = irq_get_trigger_type(pdata->irq); 931 if (irq_flags & IRQ_TYPE_EDGE_BOTH) 932 rmi_process_interrupt_requests(rmi_dev); 933 934 out: 935 mutex_unlock(&data->enabled_mutex); 936 } 937 938 void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake) 939 { 940 struct rmi_device_platform_data *pdata = rmi_get_platform_data(rmi_dev); 941 struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev); 942 struct rmi4_attn_data attn_data = {0}; 943 int irq = pdata->irq; 944 int retval, count; 945 946 mutex_lock(&data->enabled_mutex); 947 948 if (!data->enabled) 949 goto out; 950 951 data->enabled = false; 952 disable_irq(irq); 953 if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) { 954 retval = enable_irq_wake(irq); 955 if (retval) 956 dev_warn(&rmi_dev->dev, 957 "Failed to enable irq for wake: %d\n", 958 retval); 959 } 960 961 /* make sure the fifo is clean */ 962 while (!kfifo_is_empty(&data->attn_fifo)) { 963 count = kfifo_get(&data->attn_fifo, &attn_data); 964 if (count) 965 kfree(attn_data.data); 966 } 967 968 out: 969 mutex_unlock(&data->enabled_mutex); 970 } 971 972 int rmi_driver_suspend(struct rmi_device *rmi_dev, bool enable_wake) 973 { 974 int retval; 975 976 retval = rmi_suspend_functions(rmi_dev); 977 if (retval) 978 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", 979 retval); 980 981 rmi_disable_irq(rmi_dev, enable_wake); 982 return retval; 983 } 984 EXPORT_SYMBOL_GPL(rmi_driver_suspend); 985 986 int rmi_driver_resume(struct rmi_device *rmi_dev, bool clear_wake) 987 { 988 int retval; 989 990 rmi_enable_irq(rmi_dev, clear_wake); 991 992 retval = rmi_resume_functions(rmi_dev); 993 if (retval) 994 dev_warn(&rmi_dev->dev, "Failed to suspend functions: %d\n", 995 retval); 996 997 return retval; 998 } 999 EXPORT_SYMBOL_GPL(rmi_driver_resume); 1000 1001 static int rmi_driver_remove(struct device *dev) 1002 { 1003 struct rmi_device *rmi_dev = to_rmi_device(dev); 1004 1005 rmi_disable_irq(rmi_dev, false); 1006 1007 rmi_f34_remove_sysfs(rmi_dev); 1008 rmi_free_function_list(rmi_dev); 1009 1010 return 0; 1011 } 1012 1013 #ifdef CONFIG_OF 1014 static int rmi_driver_of_probe(struct device *dev, 1015 struct rmi_device_platform_data *pdata) 1016 { 1017 int retval; 1018 1019 retval = rmi_of_property_read_u32(dev, &pdata->reset_delay_ms, 1020 "syna,reset-delay-ms", 1); 1021 if (retval) 1022 return retval; 1023 1024 return 0; 1025 } 1026 #else 1027 static inline int rmi_driver_of_probe(struct device *dev, 1028 struct rmi_device_platform_data *pdata) 1029 { 1030 return -ENODEV; 1031 } 1032 #endif 1033 1034 int rmi_probe_interrupts(struct rmi_driver_data *data) 1035 { 1036 struct rmi_device *rmi_dev = data->rmi_dev; 1037 struct device *dev = &rmi_dev->dev; 1038 int irq_count; 1039 size_t size; 1040 int retval; 1041 1042 /* 1043 * We need to count the IRQs and allocate their storage before scanning 1044 * the PDT and creating the function entries, because adding a new 1045 * function can trigger events that result in the IRQ related storage 1046 * being accessed. 1047 */ 1048 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__); 1049 irq_count = 0; 1050 data->bootloader_mode = false; 1051 1052 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs); 1053 if (retval < 0) { 1054 dev_err(dev, "IRQ counting failed with code %d.\n", retval); 1055 return retval; 1056 } 1057 1058 if (data->bootloader_mode) 1059 dev_warn(dev, "Device in bootloader mode.\n"); 1060 1061 data->irq_count = irq_count; 1062 data->num_of_irq_regs = (data->irq_count + 7) / 8; 1063 1064 size = BITS_TO_LONGS(data->irq_count) * sizeof(unsigned long); 1065 data->irq_memory = devm_kcalloc(dev, size, 4, GFP_KERNEL); 1066 if (!data->irq_memory) { 1067 dev_err(dev, "Failed to allocate memory for irq masks.\n"); 1068 return -ENOMEM; 1069 } 1070 1071 data->irq_status = data->irq_memory + size * 0; 1072 data->fn_irq_bits = data->irq_memory + size * 1; 1073 data->current_irq_mask = data->irq_memory + size * 2; 1074 data->new_irq_mask = data->irq_memory + size * 3; 1075 1076 return retval; 1077 } 1078 1079 int rmi_init_functions(struct rmi_driver_data *data) 1080 { 1081 struct rmi_device *rmi_dev = data->rmi_dev; 1082 struct device *dev = &rmi_dev->dev; 1083 int irq_count; 1084 int retval; 1085 1086 irq_count = 0; 1087 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__); 1088 retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function); 1089 if (retval < 0) { 1090 dev_err(dev, "Function creation failed with code %d.\n", 1091 retval); 1092 goto err_destroy_functions; 1093 } 1094 1095 if (!data->f01_container) { 1096 dev_err(dev, "Missing F01 container!\n"); 1097 retval = -EINVAL; 1098 goto err_destroy_functions; 1099 } 1100 1101 retval = rmi_read_block(rmi_dev, 1102 data->f01_container->fd.control_base_addr + 1, 1103 data->current_irq_mask, data->num_of_irq_regs); 1104 if (retval < 0) { 1105 dev_err(dev, "%s: Failed to read current IRQ mask.\n", 1106 __func__); 1107 goto err_destroy_functions; 1108 } 1109 1110 return 0; 1111 1112 err_destroy_functions: 1113 rmi_free_function_list(rmi_dev); 1114 return retval; 1115 } 1116 1117 static int rmi_driver_probe(struct device *dev) 1118 { 1119 struct rmi_driver *rmi_driver; 1120 struct rmi_driver_data *data; 1121 struct rmi_device_platform_data *pdata; 1122 struct rmi_device *rmi_dev; 1123 int retval; 1124 1125 rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Starting probe.\n", 1126 __func__); 1127 1128 if (!rmi_is_physical_device(dev)) { 1129 rmi_dbg(RMI_DEBUG_CORE, dev, "Not a physical device.\n"); 1130 return -ENODEV; 1131 } 1132 1133 rmi_dev = to_rmi_device(dev); 1134 rmi_driver = to_rmi_driver(dev->driver); 1135 rmi_dev->driver = rmi_driver; 1136 1137 pdata = rmi_get_platform_data(rmi_dev); 1138 1139 if (rmi_dev->xport->dev->of_node) { 1140 retval = rmi_driver_of_probe(rmi_dev->xport->dev, pdata); 1141 if (retval) 1142 return retval; 1143 } 1144 1145 data = devm_kzalloc(dev, sizeof(struct rmi_driver_data), GFP_KERNEL); 1146 if (!data) 1147 return -ENOMEM; 1148 1149 INIT_LIST_HEAD(&data->function_list); 1150 data->rmi_dev = rmi_dev; 1151 dev_set_drvdata(&rmi_dev->dev, data); 1152 1153 /* 1154 * Right before a warm boot, the sensor might be in some unusual state, 1155 * such as F54 diagnostics, or F34 bootloader mode after a firmware 1156 * or configuration update. In order to clear the sensor to a known 1157 * state and/or apply any updates, we issue a initial reset to clear any 1158 * previous settings and force it into normal operation. 1159 * 1160 * We have to do this before actually building the PDT because 1161 * the reflash updates (if any) might cause various registers to move 1162 * around. 1163 * 1164 * For a number of reasons, this initial reset may fail to return 1165 * within the specified time, but we'll still be able to bring up the 1166 * driver normally after that failure. This occurs most commonly in 1167 * a cold boot situation (where then firmware takes longer to come up 1168 * than from a warm boot) and the reset_delay_ms in the platform data 1169 * has been set too short to accommodate that. Since the sensor will 1170 * eventually come up and be usable, we don't want to just fail here 1171 * and leave the customer's device unusable. So we warn them, and 1172 * continue processing. 1173 */ 1174 retval = rmi_scan_pdt(rmi_dev, NULL, rmi_initial_reset); 1175 if (retval < 0) 1176 dev_warn(dev, "RMI initial reset failed! Continuing in spite of this.\n"); 1177 1178 retval = rmi_read(rmi_dev, PDT_PROPERTIES_LOCATION, &data->pdt_props); 1179 if (retval < 0) { 1180 /* 1181 * we'll print out a warning and continue since 1182 * failure to get the PDT properties is not a cause to fail 1183 */ 1184 dev_warn(dev, "Could not read PDT properties from %#06x (code %d). Assuming 0x00.\n", 1185 PDT_PROPERTIES_LOCATION, retval); 1186 } 1187 1188 mutex_init(&data->irq_mutex); 1189 mutex_init(&data->enabled_mutex); 1190 1191 retval = rmi_probe_interrupts(data); 1192 if (retval) 1193 goto err; 1194 1195 if (rmi_dev->xport->input) { 1196 /* 1197 * The transport driver already has an input device. 1198 * In some cases it is preferable to reuse the transport 1199 * devices input device instead of creating a new one here. 1200 * One example is some HID touchpads report "pass-through" 1201 * button events are not reported by rmi registers. 1202 */ 1203 data->input = rmi_dev->xport->input; 1204 } else { 1205 data->input = devm_input_allocate_device(dev); 1206 if (!data->input) { 1207 dev_err(dev, "%s: Failed to allocate input device.\n", 1208 __func__); 1209 retval = -ENOMEM; 1210 goto err; 1211 } 1212 rmi_driver_set_input_params(rmi_dev, data->input); 1213 data->input->phys = devm_kasprintf(dev, GFP_KERNEL, 1214 "%s/input0", dev_name(dev)); 1215 } 1216 1217 retval = rmi_init_functions(data); 1218 if (retval) 1219 goto err; 1220 1221 retval = rmi_f34_create_sysfs(rmi_dev); 1222 if (retval) 1223 goto err; 1224 1225 if (data->input) { 1226 rmi_driver_set_input_name(rmi_dev, data->input); 1227 if (!rmi_dev->xport->input) { 1228 if (input_register_device(data->input)) { 1229 dev_err(dev, "%s: Failed to register input device.\n", 1230 __func__); 1231 goto err_destroy_functions; 1232 } 1233 } 1234 } 1235 1236 retval = rmi_irq_init(rmi_dev); 1237 if (retval < 0) 1238 goto err_destroy_functions; 1239 1240 if (data->f01_container->dev.driver) { 1241 /* Driver already bound, so enable ATTN now. */ 1242 retval = rmi_enable_sensor(rmi_dev); 1243 if (retval) 1244 goto err_disable_irq; 1245 } 1246 1247 return 0; 1248 1249 err_disable_irq: 1250 rmi_disable_irq(rmi_dev, false); 1251 err_destroy_functions: 1252 rmi_free_function_list(rmi_dev); 1253 err: 1254 return retval; 1255 } 1256 1257 static struct rmi_driver rmi_physical_driver = { 1258 .driver = { 1259 .owner = THIS_MODULE, 1260 .name = "rmi4_physical", 1261 .bus = &rmi_bus_type, 1262 .probe = rmi_driver_probe, 1263 .remove = rmi_driver_remove, 1264 }, 1265 .reset_handler = rmi_driver_reset_handler, 1266 .clear_irq_bits = rmi_driver_clear_irq_bits, 1267 .set_irq_bits = rmi_driver_set_irq_bits, 1268 .set_input_params = rmi_driver_set_input_params, 1269 }; 1270 1271 bool rmi_is_physical_driver(struct device_driver *drv) 1272 { 1273 return drv == &rmi_physical_driver.driver; 1274 } 1275 1276 int __init rmi_register_physical_driver(void) 1277 { 1278 int error; 1279 1280 error = driver_register(&rmi_physical_driver.driver); 1281 if (error) { 1282 pr_err("%s: driver register failed, code=%d.\n", __func__, 1283 error); 1284 return error; 1285 } 1286 1287 return 0; 1288 } 1289 1290 void __exit rmi_unregister_physical_driver(void) 1291 { 1292 driver_unregister(&rmi_physical_driver.driver); 1293 } 1294