1 /* 2 * Device probing and sysfs code. 3 * 4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #include <linux/bug.h> 22 #include <linux/ctype.h> 23 #include <linux/delay.h> 24 #include <linux/device.h> 25 #include <linux/errno.h> 26 #include <linux/firewire.h> 27 #include <linux/firewire-constants.h> 28 #include <linux/idr.h> 29 #include <linux/jiffies.h> 30 #include <linux/kobject.h> 31 #include <linux/list.h> 32 #include <linux/mod_devicetable.h> 33 #include <linux/module.h> 34 #include <linux/mutex.h> 35 #include <linux/rwsem.h> 36 #include <linux/slab.h> 37 #include <linux/spinlock.h> 38 #include <linux/string.h> 39 #include <linux/workqueue.h> 40 41 #include <asm/atomic.h> 42 #include <asm/byteorder.h> 43 #include <asm/system.h> 44 45 #include "core.h" 46 47 void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p) 48 { 49 ci->p = p + 1; 50 ci->end = ci->p + (p[0] >> 16); 51 } 52 EXPORT_SYMBOL(fw_csr_iterator_init); 53 54 int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value) 55 { 56 *key = *ci->p >> 24; 57 *value = *ci->p & 0xffffff; 58 59 return ci->p++ < ci->end; 60 } 61 EXPORT_SYMBOL(fw_csr_iterator_next); 62 63 static const u32 *search_leaf(const u32 *directory, int search_key) 64 { 65 struct fw_csr_iterator ci; 66 int last_key = 0, key, value; 67 68 fw_csr_iterator_init(&ci, directory); 69 while (fw_csr_iterator_next(&ci, &key, &value)) { 70 if (last_key == search_key && 71 key == (CSR_DESCRIPTOR | CSR_LEAF)) 72 return ci.p - 1 + value; 73 74 last_key = key; 75 } 76 77 return NULL; 78 } 79 80 static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) 81 { 82 unsigned int quadlets, i; 83 char c; 84 85 if (!size || !buf) 86 return -EINVAL; 87 88 quadlets = min(block[0] >> 16, 256U); 89 if (quadlets < 2) 90 return -ENODATA; 91 92 if (block[1] != 0 || block[2] != 0) 93 /* unknown language/character set */ 94 return -ENODATA; 95 96 block += 3; 97 quadlets -= 2; 98 for (i = 0; i < quadlets * 4 && i < size - 1; i++) { 99 c = block[i / 4] >> (24 - 8 * (i % 4)); 100 if (c == '\0') 101 break; 102 buf[i] = c; 103 } 104 buf[i] = '\0'; 105 106 return i; 107 } 108 109 /** 110 * fw_csr_string() - reads a string from the configuration ROM 111 * @directory: e.g. root directory or unit directory 112 * @key: the key of the preceding directory entry 113 * @buf: where to put the string 114 * @size: size of @buf, in bytes 115 * 116 * The string is taken from a minimal ASCII text descriptor leaf after 117 * the immediate entry with @key. The string is zero-terminated. 118 * Returns strlen(buf) or a negative error code. 119 */ 120 int fw_csr_string(const u32 *directory, int key, char *buf, size_t size) 121 { 122 const u32 *leaf = search_leaf(directory, key); 123 if (!leaf) 124 return -ENOENT; 125 126 return textual_leaf_to_string(leaf, buf, size); 127 } 128 EXPORT_SYMBOL(fw_csr_string); 129 130 static void get_ids(const u32 *directory, int *id) 131 { 132 struct fw_csr_iterator ci; 133 int key, value; 134 135 fw_csr_iterator_init(&ci, directory); 136 while (fw_csr_iterator_next(&ci, &key, &value)) { 137 switch (key) { 138 case CSR_VENDOR: id[0] = value; break; 139 case CSR_MODEL: id[1] = value; break; 140 case CSR_SPECIFIER_ID: id[2] = value; break; 141 case CSR_VERSION: id[3] = value; break; 142 } 143 } 144 } 145 146 static void get_modalias_ids(struct fw_unit *unit, int *id) 147 { 148 get_ids(&fw_parent_device(unit)->config_rom[5], id); 149 get_ids(unit->directory, id); 150 } 151 152 static bool match_ids(const struct ieee1394_device_id *id_table, int *id) 153 { 154 int match = 0; 155 156 if (id[0] == id_table->vendor_id) 157 match |= IEEE1394_MATCH_VENDOR_ID; 158 if (id[1] == id_table->model_id) 159 match |= IEEE1394_MATCH_MODEL_ID; 160 if (id[2] == id_table->specifier_id) 161 match |= IEEE1394_MATCH_SPECIFIER_ID; 162 if (id[3] == id_table->version) 163 match |= IEEE1394_MATCH_VERSION; 164 165 return (match & id_table->match_flags) == id_table->match_flags; 166 } 167 168 static bool is_fw_unit(struct device *dev); 169 170 static int fw_unit_match(struct device *dev, struct device_driver *drv) 171 { 172 const struct ieee1394_device_id *id_table = 173 container_of(drv, struct fw_driver, driver)->id_table; 174 int id[] = {0, 0, 0, 0}; 175 176 /* We only allow binding to fw_units. */ 177 if (!is_fw_unit(dev)) 178 return 0; 179 180 get_modalias_ids(fw_unit(dev), id); 181 182 for (; id_table->match_flags != 0; id_table++) 183 if (match_ids(id_table, id)) 184 return 1; 185 186 return 0; 187 } 188 189 static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) 190 { 191 int id[] = {0, 0, 0, 0}; 192 193 get_modalias_ids(unit, id); 194 195 return snprintf(buffer, buffer_size, 196 "ieee1394:ven%08Xmo%08Xsp%08Xver%08X", 197 id[0], id[1], id[2], id[3]); 198 } 199 200 static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) 201 { 202 struct fw_unit *unit = fw_unit(dev); 203 char modalias[64]; 204 205 get_modalias(unit, modalias, sizeof(modalias)); 206 207 if (add_uevent_var(env, "MODALIAS=%s", modalias)) 208 return -ENOMEM; 209 210 return 0; 211 } 212 213 struct bus_type fw_bus_type = { 214 .name = "firewire", 215 .match = fw_unit_match, 216 }; 217 EXPORT_SYMBOL(fw_bus_type); 218 219 int fw_device_enable_phys_dma(struct fw_device *device) 220 { 221 int generation = device->generation; 222 223 /* device->node_id, accessed below, must not be older than generation */ 224 smp_rmb(); 225 226 return device->card->driver->enable_phys_dma(device->card, 227 device->node_id, 228 generation); 229 } 230 EXPORT_SYMBOL(fw_device_enable_phys_dma); 231 232 struct config_rom_attribute { 233 struct device_attribute attr; 234 u32 key; 235 }; 236 237 static ssize_t show_immediate(struct device *dev, 238 struct device_attribute *dattr, char *buf) 239 { 240 struct config_rom_attribute *attr = 241 container_of(dattr, struct config_rom_attribute, attr); 242 struct fw_csr_iterator ci; 243 const u32 *dir; 244 int key, value, ret = -ENOENT; 245 246 down_read(&fw_device_rwsem); 247 248 if (is_fw_unit(dev)) 249 dir = fw_unit(dev)->directory; 250 else 251 dir = fw_device(dev)->config_rom + 5; 252 253 fw_csr_iterator_init(&ci, dir); 254 while (fw_csr_iterator_next(&ci, &key, &value)) 255 if (attr->key == key) { 256 ret = snprintf(buf, buf ? PAGE_SIZE : 0, 257 "0x%06x\n", value); 258 break; 259 } 260 261 up_read(&fw_device_rwsem); 262 263 return ret; 264 } 265 266 #define IMMEDIATE_ATTR(name, key) \ 267 { __ATTR(name, S_IRUGO, show_immediate, NULL), key } 268 269 static ssize_t show_text_leaf(struct device *dev, 270 struct device_attribute *dattr, char *buf) 271 { 272 struct config_rom_attribute *attr = 273 container_of(dattr, struct config_rom_attribute, attr); 274 const u32 *dir; 275 size_t bufsize; 276 char dummy_buf[2]; 277 int ret; 278 279 down_read(&fw_device_rwsem); 280 281 if (is_fw_unit(dev)) 282 dir = fw_unit(dev)->directory; 283 else 284 dir = fw_device(dev)->config_rom + 5; 285 286 if (buf) { 287 bufsize = PAGE_SIZE - 1; 288 } else { 289 buf = dummy_buf; 290 bufsize = 1; 291 } 292 293 ret = fw_csr_string(dir, attr->key, buf, bufsize); 294 295 if (ret >= 0) { 296 /* Strip trailing whitespace and add newline. */ 297 while (ret > 0 && isspace(buf[ret - 1])) 298 ret--; 299 strcpy(buf + ret, "\n"); 300 ret++; 301 } 302 303 up_read(&fw_device_rwsem); 304 305 return ret; 306 } 307 308 #define TEXT_LEAF_ATTR(name, key) \ 309 { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key } 310 311 static struct config_rom_attribute config_rom_attributes[] = { 312 IMMEDIATE_ATTR(vendor, CSR_VENDOR), 313 IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION), 314 IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID), 315 IMMEDIATE_ATTR(version, CSR_VERSION), 316 IMMEDIATE_ATTR(model, CSR_MODEL), 317 TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR), 318 TEXT_LEAF_ATTR(model_name, CSR_MODEL), 319 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), 320 }; 321 322 static void init_fw_attribute_group(struct device *dev, 323 struct device_attribute *attrs, 324 struct fw_attribute_group *group) 325 { 326 struct device_attribute *attr; 327 int i, j; 328 329 for (j = 0; attrs[j].attr.name != NULL; j++) 330 group->attrs[j] = &attrs[j].attr; 331 332 for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) { 333 attr = &config_rom_attributes[i].attr; 334 if (attr->show(dev, attr, NULL) < 0) 335 continue; 336 group->attrs[j++] = &attr->attr; 337 } 338 339 group->attrs[j] = NULL; 340 group->groups[0] = &group->group; 341 group->groups[1] = NULL; 342 group->group.attrs = group->attrs; 343 dev->groups = (const struct attribute_group **) group->groups; 344 } 345 346 static ssize_t modalias_show(struct device *dev, 347 struct device_attribute *attr, char *buf) 348 { 349 struct fw_unit *unit = fw_unit(dev); 350 int length; 351 352 length = get_modalias(unit, buf, PAGE_SIZE); 353 strcpy(buf + length, "\n"); 354 355 return length + 1; 356 } 357 358 static ssize_t rom_index_show(struct device *dev, 359 struct device_attribute *attr, char *buf) 360 { 361 struct fw_device *device = fw_device(dev->parent); 362 struct fw_unit *unit = fw_unit(dev); 363 364 return snprintf(buf, PAGE_SIZE, "%d\n", 365 (int)(unit->directory - device->config_rom)); 366 } 367 368 static struct device_attribute fw_unit_attributes[] = { 369 __ATTR_RO(modalias), 370 __ATTR_RO(rom_index), 371 __ATTR_NULL, 372 }; 373 374 static ssize_t config_rom_show(struct device *dev, 375 struct device_attribute *attr, char *buf) 376 { 377 struct fw_device *device = fw_device(dev); 378 size_t length; 379 380 down_read(&fw_device_rwsem); 381 length = device->config_rom_length * 4; 382 memcpy(buf, device->config_rom, length); 383 up_read(&fw_device_rwsem); 384 385 return length; 386 } 387 388 static ssize_t guid_show(struct device *dev, 389 struct device_attribute *attr, char *buf) 390 { 391 struct fw_device *device = fw_device(dev); 392 int ret; 393 394 down_read(&fw_device_rwsem); 395 ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", 396 device->config_rom[3], device->config_rom[4]); 397 up_read(&fw_device_rwsem); 398 399 return ret; 400 } 401 402 static int units_sprintf(char *buf, const u32 *directory) 403 { 404 struct fw_csr_iterator ci; 405 int key, value; 406 int specifier_id = 0; 407 int version = 0; 408 409 fw_csr_iterator_init(&ci, directory); 410 while (fw_csr_iterator_next(&ci, &key, &value)) { 411 switch (key) { 412 case CSR_SPECIFIER_ID: 413 specifier_id = value; 414 break; 415 case CSR_VERSION: 416 version = value; 417 break; 418 } 419 } 420 421 return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version); 422 } 423 424 static ssize_t units_show(struct device *dev, 425 struct device_attribute *attr, char *buf) 426 { 427 struct fw_device *device = fw_device(dev); 428 struct fw_csr_iterator ci; 429 int key, value, i = 0; 430 431 down_read(&fw_device_rwsem); 432 fw_csr_iterator_init(&ci, &device->config_rom[5]); 433 while (fw_csr_iterator_next(&ci, &key, &value)) { 434 if (key != (CSR_UNIT | CSR_DIRECTORY)) 435 continue; 436 i += units_sprintf(&buf[i], ci.p + value - 1); 437 if (i >= PAGE_SIZE - (8 + 1 + 8 + 1)) 438 break; 439 } 440 up_read(&fw_device_rwsem); 441 442 if (i) 443 buf[i - 1] = '\n'; 444 445 return i; 446 } 447 448 static struct device_attribute fw_device_attributes[] = { 449 __ATTR_RO(config_rom), 450 __ATTR_RO(guid), 451 __ATTR_RO(units), 452 __ATTR_NULL, 453 }; 454 455 static int read_rom(struct fw_device *device, 456 int generation, int index, u32 *data) 457 { 458 int rcode; 459 460 /* device->node_id, accessed below, must not be older than generation */ 461 smp_rmb(); 462 463 rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, 464 device->node_id, generation, device->max_speed, 465 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, 466 data, 4); 467 be32_to_cpus(data); 468 469 return rcode; 470 } 471 472 #define MAX_CONFIG_ROM_SIZE 256 473 474 /* 475 * Read the bus info block, perform a speed probe, and read all of the rest of 476 * the config ROM. We do all this with a cached bus generation. If the bus 477 * generation changes under us, read_config_rom will fail and get retried. 478 * It's better to start all over in this case because the node from which we 479 * are reading the ROM may have changed the ROM during the reset. 480 */ 481 static int read_config_rom(struct fw_device *device, int generation) 482 { 483 const u32 *old_rom, *new_rom; 484 u32 *rom, *stack; 485 u32 sp, key; 486 int i, end, length, ret = -1; 487 488 rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE + 489 sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL); 490 if (rom == NULL) 491 return -ENOMEM; 492 493 stack = &rom[MAX_CONFIG_ROM_SIZE]; 494 memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE); 495 496 device->max_speed = SCODE_100; 497 498 /* First read the bus info block. */ 499 for (i = 0; i < 5; i++) { 500 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 501 goto out; 502 /* 503 * As per IEEE1212 7.2, during power-up, devices can 504 * reply with a 0 for the first quadlet of the config 505 * rom to indicate that they are booting (for example, 506 * if the firmware is on the disk of a external 507 * harddisk). In that case we just fail, and the 508 * retry mechanism will try again later. 509 */ 510 if (i == 0 && rom[i] == 0) 511 goto out; 512 } 513 514 device->max_speed = device->node->max_speed; 515 516 /* 517 * Determine the speed of 518 * - devices with link speed less than PHY speed, 519 * - devices with 1394b PHY (unless only connected to 1394a PHYs), 520 * - all devices if there are 1394b repeaters. 521 * Note, we cannot use the bus info block's link_spd as starting point 522 * because some buggy firmwares set it lower than necessary and because 523 * 1394-1995 nodes do not have the field. 524 */ 525 if ((rom[2] & 0x7) < device->max_speed || 526 device->max_speed == SCODE_BETA || 527 device->card->beta_repeaters_present) { 528 u32 dummy; 529 530 /* for S1600 and S3200 */ 531 if (device->max_speed == SCODE_BETA) 532 device->max_speed = device->card->link_speed; 533 534 while (device->max_speed > SCODE_100) { 535 if (read_rom(device, generation, 0, &dummy) == 536 RCODE_COMPLETE) 537 break; 538 device->max_speed--; 539 } 540 } 541 542 /* 543 * Now parse the config rom. The config rom is a recursive 544 * directory structure so we parse it using a stack of 545 * references to the blocks that make up the structure. We 546 * push a reference to the root directory on the stack to 547 * start things off. 548 */ 549 length = i; 550 sp = 0; 551 stack[sp++] = 0xc0000005; 552 while (sp > 0) { 553 /* 554 * Pop the next block reference of the stack. The 555 * lower 24 bits is the offset into the config rom, 556 * the upper 8 bits are the type of the reference the 557 * block. 558 */ 559 key = stack[--sp]; 560 i = key & 0xffffff; 561 if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) 562 goto out; 563 564 /* Read header quadlet for the block to get the length. */ 565 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 566 goto out; 567 end = i + (rom[i] >> 16) + 1; 568 if (end > MAX_CONFIG_ROM_SIZE) { 569 /* 570 * This block extends outside the config ROM which is 571 * a firmware bug. Ignore this whole block, i.e. 572 * simply set a fake block length of 0. 573 */ 574 fw_error("skipped invalid ROM block %x at %llx\n", 575 rom[i], 576 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); 577 rom[i] = 0; 578 end = i; 579 } 580 i++; 581 582 /* 583 * Now read in the block. If this is a directory 584 * block, check the entries as we read them to see if 585 * it references another block, and push it in that case. 586 */ 587 for (; i < end; i++) { 588 if (read_rom(device, generation, i, &rom[i]) != 589 RCODE_COMPLETE) 590 goto out; 591 592 if ((key >> 30) != 3 || (rom[i] >> 30) < 2) 593 continue; 594 /* 595 * Offset points outside the ROM. May be a firmware 596 * bug or an Extended ROM entry (IEEE 1212-2001 clause 597 * 7.7.18). Simply overwrite this pointer here by a 598 * fake immediate entry so that later iterators over 599 * the ROM don't have to check offsets all the time. 600 */ 601 if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) { 602 fw_error("skipped unsupported ROM entry %x at %llx\n", 603 rom[i], 604 i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM); 605 rom[i] = 0; 606 continue; 607 } 608 stack[sp++] = i + rom[i]; 609 } 610 if (length < i) 611 length = i; 612 } 613 614 old_rom = device->config_rom; 615 new_rom = kmemdup(rom, length * 4, GFP_KERNEL); 616 if (new_rom == NULL) 617 goto out; 618 619 down_write(&fw_device_rwsem); 620 device->config_rom = new_rom; 621 device->config_rom_length = length; 622 up_write(&fw_device_rwsem); 623 624 kfree(old_rom); 625 ret = 0; 626 device->max_rec = rom[2] >> 12 & 0xf; 627 device->cmc = rom[2] >> 30 & 1; 628 device->irmc = rom[2] >> 31 & 1; 629 out: 630 kfree(rom); 631 632 return ret; 633 } 634 635 static void fw_unit_release(struct device *dev) 636 { 637 struct fw_unit *unit = fw_unit(dev); 638 639 kfree(unit); 640 } 641 642 static struct device_type fw_unit_type = { 643 .uevent = fw_unit_uevent, 644 .release = fw_unit_release, 645 }; 646 647 static bool is_fw_unit(struct device *dev) 648 { 649 return dev->type == &fw_unit_type; 650 } 651 652 static void create_units(struct fw_device *device) 653 { 654 struct fw_csr_iterator ci; 655 struct fw_unit *unit; 656 int key, value, i; 657 658 i = 0; 659 fw_csr_iterator_init(&ci, &device->config_rom[5]); 660 while (fw_csr_iterator_next(&ci, &key, &value)) { 661 if (key != (CSR_UNIT | CSR_DIRECTORY)) 662 continue; 663 664 /* 665 * Get the address of the unit directory and try to 666 * match the drivers id_tables against it. 667 */ 668 unit = kzalloc(sizeof(*unit), GFP_KERNEL); 669 if (unit == NULL) { 670 fw_error("failed to allocate memory for unit\n"); 671 continue; 672 } 673 674 unit->directory = ci.p + value - 1; 675 unit->device.bus = &fw_bus_type; 676 unit->device.type = &fw_unit_type; 677 unit->device.parent = &device->device; 678 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++); 679 680 BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) < 681 ARRAY_SIZE(fw_unit_attributes) + 682 ARRAY_SIZE(config_rom_attributes)); 683 init_fw_attribute_group(&unit->device, 684 fw_unit_attributes, 685 &unit->attribute_group); 686 687 if (device_register(&unit->device) < 0) 688 goto skip_unit; 689 690 continue; 691 692 skip_unit: 693 kfree(unit); 694 } 695 } 696 697 static int shutdown_unit(struct device *device, void *data) 698 { 699 device_unregister(device); 700 701 return 0; 702 } 703 704 /* 705 * fw_device_rwsem acts as dual purpose mutex: 706 * - serializes accesses to fw_device_idr, 707 * - serializes accesses to fw_device.config_rom/.config_rom_length and 708 * fw_unit.directory, unless those accesses happen at safe occasions 709 */ 710 DECLARE_RWSEM(fw_device_rwsem); 711 712 DEFINE_IDR(fw_device_idr); 713 int fw_cdev_major; 714 715 struct fw_device *fw_device_get_by_devt(dev_t devt) 716 { 717 struct fw_device *device; 718 719 down_read(&fw_device_rwsem); 720 device = idr_find(&fw_device_idr, MINOR(devt)); 721 if (device) 722 fw_device_get(device); 723 up_read(&fw_device_rwsem); 724 725 return device; 726 } 727 728 /* 729 * These defines control the retry behavior for reading the config 730 * rom. It shouldn't be necessary to tweak these; if the device 731 * doesn't respond to a config rom read within 10 seconds, it's not 732 * going to respond at all. As for the initial delay, a lot of 733 * devices will be able to respond within half a second after bus 734 * reset. On the other hand, it's not really worth being more 735 * aggressive than that, since it scales pretty well; if 10 devices 736 * are plugged in, they're all getting read within one second. 737 */ 738 739 #define MAX_RETRIES 10 740 #define RETRY_DELAY (3 * HZ) 741 #define INITIAL_DELAY (HZ / 2) 742 #define SHUTDOWN_DELAY (2 * HZ) 743 744 static void fw_device_shutdown(struct work_struct *work) 745 { 746 struct fw_device *device = 747 container_of(work, struct fw_device, work.work); 748 int minor = MINOR(device->device.devt); 749 750 if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY) 751 && !list_empty(&device->card->link)) { 752 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 753 return; 754 } 755 756 if (atomic_cmpxchg(&device->state, 757 FW_DEVICE_GONE, 758 FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE) 759 return; 760 761 fw_device_cdev_remove(device); 762 device_for_each_child(&device->device, NULL, shutdown_unit); 763 device_unregister(&device->device); 764 765 down_write(&fw_device_rwsem); 766 idr_remove(&fw_device_idr, minor); 767 up_write(&fw_device_rwsem); 768 769 fw_device_put(device); 770 } 771 772 static void fw_device_release(struct device *dev) 773 { 774 struct fw_device *device = fw_device(dev); 775 struct fw_card *card = device->card; 776 unsigned long flags; 777 778 /* 779 * Take the card lock so we don't set this to NULL while a 780 * FW_NODE_UPDATED callback is being handled or while the 781 * bus manager work looks at this node. 782 */ 783 spin_lock_irqsave(&card->lock, flags); 784 device->node->data = NULL; 785 spin_unlock_irqrestore(&card->lock, flags); 786 787 fw_node_put(device->node); 788 kfree(device->config_rom); 789 kfree(device); 790 fw_card_put(card); 791 } 792 793 static struct device_type fw_device_type = { 794 .release = fw_device_release, 795 }; 796 797 static bool is_fw_device(struct device *dev) 798 { 799 return dev->type == &fw_device_type; 800 } 801 802 static int update_unit(struct device *dev, void *data) 803 { 804 struct fw_unit *unit = fw_unit(dev); 805 struct fw_driver *driver = (struct fw_driver *)dev->driver; 806 807 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { 808 device_lock(dev); 809 driver->update(unit); 810 device_unlock(dev); 811 } 812 813 return 0; 814 } 815 816 static void fw_device_update(struct work_struct *work) 817 { 818 struct fw_device *device = 819 container_of(work, struct fw_device, work.work); 820 821 fw_device_cdev_update(device); 822 device_for_each_child(&device->device, NULL, update_unit); 823 } 824 825 /* 826 * If a device was pending for deletion because its node went away but its 827 * bus info block and root directory header matches that of a newly discovered 828 * device, revive the existing fw_device. 829 * The newly allocated fw_device becomes obsolete instead. 830 */ 831 static int lookup_existing_device(struct device *dev, void *data) 832 { 833 struct fw_device *old = fw_device(dev); 834 struct fw_device *new = data; 835 struct fw_card *card = new->card; 836 int match = 0; 837 838 if (!is_fw_device(dev)) 839 return 0; 840 841 down_read(&fw_device_rwsem); /* serialize config_rom access */ 842 spin_lock_irq(&card->lock); /* serialize node access */ 843 844 if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 && 845 atomic_cmpxchg(&old->state, 846 FW_DEVICE_GONE, 847 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 848 struct fw_node *current_node = new->node; 849 struct fw_node *obsolete_node = old->node; 850 851 new->node = obsolete_node; 852 new->node->data = new; 853 old->node = current_node; 854 old->node->data = old; 855 856 old->max_speed = new->max_speed; 857 old->node_id = current_node->node_id; 858 smp_wmb(); /* update node_id before generation */ 859 old->generation = card->generation; 860 old->config_rom_retries = 0; 861 fw_notify("rediscovered device %s\n", dev_name(dev)); 862 863 PREPARE_DELAYED_WORK(&old->work, fw_device_update); 864 schedule_delayed_work(&old->work, 0); 865 866 if (current_node == card->root_node) 867 fw_schedule_bm_work(card, 0); 868 869 match = 1; 870 } 871 872 spin_unlock_irq(&card->lock); 873 up_read(&fw_device_rwsem); 874 875 return match; 876 } 877 878 enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; 879 880 static void set_broadcast_channel(struct fw_device *device, int generation) 881 { 882 struct fw_card *card = device->card; 883 __be32 data; 884 int rcode; 885 886 if (!card->broadcast_channel_allocated) 887 return; 888 889 /* 890 * The Broadcast_Channel Valid bit is required by nodes which want to 891 * transmit on this channel. Such transmissions are practically 892 * exclusive to IP over 1394 (RFC 2734). IP capable nodes are required 893 * to be IRM capable and have a max_rec of 8 or more. We use this fact 894 * to narrow down to which nodes we send Broadcast_Channel updates. 895 */ 896 if (!device->irmc || device->max_rec < 8) 897 return; 898 899 /* 900 * Some 1394-1995 nodes crash if this 1394a-2000 register is written. 901 * Perform a read test first. 902 */ 903 if (device->bc_implemented == BC_UNKNOWN) { 904 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, 905 device->node_id, generation, device->max_speed, 906 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, 907 &data, 4); 908 switch (rcode) { 909 case RCODE_COMPLETE: 910 if (data & cpu_to_be32(1 << 31)) { 911 device->bc_implemented = BC_IMPLEMENTED; 912 break; 913 } 914 /* else fall through to case address error */ 915 case RCODE_ADDRESS_ERROR: 916 device->bc_implemented = BC_UNIMPLEMENTED; 917 } 918 } 919 920 if (device->bc_implemented == BC_IMPLEMENTED) { 921 data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | 922 BROADCAST_CHANNEL_VALID); 923 fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, 924 device->node_id, generation, device->max_speed, 925 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, 926 &data, 4); 927 } 928 } 929 930 int fw_device_set_broadcast_channel(struct device *dev, void *gen) 931 { 932 if (is_fw_device(dev)) 933 set_broadcast_channel(fw_device(dev), (long)gen); 934 935 return 0; 936 } 937 938 static void fw_device_init(struct work_struct *work) 939 { 940 struct fw_device *device = 941 container_of(work, struct fw_device, work.work); 942 struct device *revived_dev; 943 int minor, ret; 944 945 /* 946 * All failure paths here set node->data to NULL, so that we 947 * don't try to do device_for_each_child() on a kfree()'d 948 * device. 949 */ 950 951 if (read_config_rom(device, device->generation) < 0) { 952 if (device->config_rom_retries < MAX_RETRIES && 953 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 954 device->config_rom_retries++; 955 schedule_delayed_work(&device->work, RETRY_DELAY); 956 } else { 957 fw_notify("giving up on config rom for node id %x\n", 958 device->node_id); 959 if (device->node == device->card->root_node) 960 fw_schedule_bm_work(device->card, 0); 961 fw_device_release(&device->device); 962 } 963 return; 964 } 965 966 revived_dev = device_find_child(device->card->device, 967 device, lookup_existing_device); 968 if (revived_dev) { 969 put_device(revived_dev); 970 fw_device_release(&device->device); 971 972 return; 973 } 974 975 device_initialize(&device->device); 976 977 fw_device_get(device); 978 down_write(&fw_device_rwsem); 979 ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? 980 idr_get_new(&fw_device_idr, device, &minor) : 981 -ENOMEM; 982 up_write(&fw_device_rwsem); 983 984 if (ret < 0) 985 goto error; 986 987 device->device.bus = &fw_bus_type; 988 device->device.type = &fw_device_type; 989 device->device.parent = device->card->device; 990 device->device.devt = MKDEV(fw_cdev_major, minor); 991 dev_set_name(&device->device, "fw%d", minor); 992 993 BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) < 994 ARRAY_SIZE(fw_device_attributes) + 995 ARRAY_SIZE(config_rom_attributes)); 996 init_fw_attribute_group(&device->device, 997 fw_device_attributes, 998 &device->attribute_group); 999 1000 if (device_add(&device->device)) { 1001 fw_error("Failed to add device.\n"); 1002 goto error_with_cdev; 1003 } 1004 1005 create_units(device); 1006 1007 /* 1008 * Transition the device to running state. If it got pulled 1009 * out from under us while we did the intialization work, we 1010 * have to shut down the device again here. Normally, though, 1011 * fw_node_event will be responsible for shutting it down when 1012 * necessary. We have to use the atomic cmpxchg here to avoid 1013 * racing with the FW_NODE_DESTROYED case in 1014 * fw_node_event(). 1015 */ 1016 if (atomic_cmpxchg(&device->state, 1017 FW_DEVICE_INITIALIZING, 1018 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 1019 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1020 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 1021 } else { 1022 if (device->config_rom_retries) 1023 fw_notify("created device %s: GUID %08x%08x, S%d00, " 1024 "%d config ROM retries\n", 1025 dev_name(&device->device), 1026 device->config_rom[3], device->config_rom[4], 1027 1 << device->max_speed, 1028 device->config_rom_retries); 1029 else 1030 fw_notify("created device %s: GUID %08x%08x, S%d00\n", 1031 dev_name(&device->device), 1032 device->config_rom[3], device->config_rom[4], 1033 1 << device->max_speed); 1034 device->config_rom_retries = 0; 1035 1036 set_broadcast_channel(device, device->generation); 1037 } 1038 1039 /* 1040 * Reschedule the IRM work if we just finished reading the 1041 * root node config rom. If this races with a bus reset we 1042 * just end up running the IRM work a couple of extra times - 1043 * pretty harmless. 1044 */ 1045 if (device->node == device->card->root_node) 1046 fw_schedule_bm_work(device->card, 0); 1047 1048 return; 1049 1050 error_with_cdev: 1051 down_write(&fw_device_rwsem); 1052 idr_remove(&fw_device_idr, minor); 1053 up_write(&fw_device_rwsem); 1054 error: 1055 fw_device_put(device); /* fw_device_idr's reference */ 1056 1057 put_device(&device->device); /* our reference */ 1058 } 1059 1060 enum { 1061 REREAD_BIB_ERROR, 1062 REREAD_BIB_GONE, 1063 REREAD_BIB_UNCHANGED, 1064 REREAD_BIB_CHANGED, 1065 }; 1066 1067 /* Reread and compare bus info block and header of root directory */ 1068 static int reread_config_rom(struct fw_device *device, int generation) 1069 { 1070 u32 q; 1071 int i; 1072 1073 for (i = 0; i < 6; i++) { 1074 if (read_rom(device, generation, i, &q) != RCODE_COMPLETE) 1075 return REREAD_BIB_ERROR; 1076 1077 if (i == 0 && q == 0) 1078 return REREAD_BIB_GONE; 1079 1080 if (q != device->config_rom[i]) 1081 return REREAD_BIB_CHANGED; 1082 } 1083 1084 return REREAD_BIB_UNCHANGED; 1085 } 1086 1087 static void fw_device_refresh(struct work_struct *work) 1088 { 1089 struct fw_device *device = 1090 container_of(work, struct fw_device, work.work); 1091 struct fw_card *card = device->card; 1092 int node_id = device->node_id; 1093 1094 switch (reread_config_rom(device, device->generation)) { 1095 case REREAD_BIB_ERROR: 1096 if (device->config_rom_retries < MAX_RETRIES / 2 && 1097 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1098 device->config_rom_retries++; 1099 schedule_delayed_work(&device->work, RETRY_DELAY / 2); 1100 1101 return; 1102 } 1103 goto give_up; 1104 1105 case REREAD_BIB_GONE: 1106 goto gone; 1107 1108 case REREAD_BIB_UNCHANGED: 1109 if (atomic_cmpxchg(&device->state, 1110 FW_DEVICE_INITIALIZING, 1111 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) 1112 goto gone; 1113 1114 fw_device_update(work); 1115 device->config_rom_retries = 0; 1116 goto out; 1117 1118 case REREAD_BIB_CHANGED: 1119 break; 1120 } 1121 1122 /* 1123 * Something changed. We keep things simple and don't investigate 1124 * further. We just destroy all previous units and create new ones. 1125 */ 1126 device_for_each_child(&device->device, NULL, shutdown_unit); 1127 1128 if (read_config_rom(device, device->generation) < 0) { 1129 if (device->config_rom_retries < MAX_RETRIES && 1130 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1131 device->config_rom_retries++; 1132 schedule_delayed_work(&device->work, RETRY_DELAY); 1133 1134 return; 1135 } 1136 goto give_up; 1137 } 1138 1139 fw_device_cdev_update(device); 1140 create_units(device); 1141 1142 /* Userspace may want to re-read attributes. */ 1143 kobject_uevent(&device->device.kobj, KOBJ_CHANGE); 1144 1145 if (atomic_cmpxchg(&device->state, 1146 FW_DEVICE_INITIALIZING, 1147 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) 1148 goto gone; 1149 1150 fw_notify("refreshed device %s\n", dev_name(&device->device)); 1151 device->config_rom_retries = 0; 1152 goto out; 1153 1154 give_up: 1155 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device)); 1156 gone: 1157 atomic_set(&device->state, FW_DEVICE_GONE); 1158 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1159 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 1160 out: 1161 if (node_id == card->root_node->node_id) 1162 fw_schedule_bm_work(card, 0); 1163 } 1164 1165 void fw_node_event(struct fw_card *card, struct fw_node *node, int event) 1166 { 1167 struct fw_device *device; 1168 1169 switch (event) { 1170 case FW_NODE_CREATED: 1171 case FW_NODE_LINK_ON: 1172 if (!node->link_on) 1173 break; 1174 create: 1175 device = kzalloc(sizeof(*device), GFP_ATOMIC); 1176 if (device == NULL) 1177 break; 1178 1179 /* 1180 * Do minimal intialization of the device here, the 1181 * rest will happen in fw_device_init(). 1182 * 1183 * Attention: A lot of things, even fw_device_get(), 1184 * cannot be done before fw_device_init() finished! 1185 * You can basically just check device->state and 1186 * schedule work until then, but only while holding 1187 * card->lock. 1188 */ 1189 atomic_set(&device->state, FW_DEVICE_INITIALIZING); 1190 device->card = fw_card_get(card); 1191 device->node = fw_node_get(node); 1192 device->node_id = node->node_id; 1193 device->generation = card->generation; 1194 device->is_local = node == card->local_node; 1195 mutex_init(&device->client_list_mutex); 1196 INIT_LIST_HEAD(&device->client_list); 1197 1198 /* 1199 * Set the node data to point back to this device so 1200 * FW_NODE_UPDATED callbacks can update the node_id 1201 * and generation for the device. 1202 */ 1203 node->data = device; 1204 1205 /* 1206 * Many devices are slow to respond after bus resets, 1207 * especially if they are bus powered and go through 1208 * power-up after getting plugged in. We schedule the 1209 * first config rom scan half a second after bus reset. 1210 */ 1211 INIT_DELAYED_WORK(&device->work, fw_device_init); 1212 schedule_delayed_work(&device->work, INITIAL_DELAY); 1213 break; 1214 1215 case FW_NODE_INITIATED_RESET: 1216 device = node->data; 1217 if (device == NULL) 1218 goto create; 1219 1220 device->node_id = node->node_id; 1221 smp_wmb(); /* update node_id before generation */ 1222 device->generation = card->generation; 1223 if (atomic_cmpxchg(&device->state, 1224 FW_DEVICE_RUNNING, 1225 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { 1226 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); 1227 schedule_delayed_work(&device->work, 1228 device->is_local ? 0 : INITIAL_DELAY); 1229 } 1230 break; 1231 1232 case FW_NODE_UPDATED: 1233 if (!node->link_on || node->data == NULL) 1234 break; 1235 1236 device = node->data; 1237 device->node_id = node->node_id; 1238 smp_wmb(); /* update node_id before generation */ 1239 device->generation = card->generation; 1240 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { 1241 PREPARE_DELAYED_WORK(&device->work, fw_device_update); 1242 schedule_delayed_work(&device->work, 0); 1243 } 1244 break; 1245 1246 case FW_NODE_DESTROYED: 1247 case FW_NODE_LINK_OFF: 1248 if (!node->data) 1249 break; 1250 1251 /* 1252 * Destroy the device associated with the node. There 1253 * are two cases here: either the device is fully 1254 * initialized (FW_DEVICE_RUNNING) or we're in the 1255 * process of reading its config rom 1256 * (FW_DEVICE_INITIALIZING). If it is fully 1257 * initialized we can reuse device->work to schedule a 1258 * full fw_device_shutdown(). If not, there's work 1259 * scheduled to read it's config rom, and we just put 1260 * the device in shutdown state to have that code fail 1261 * to create the device. 1262 */ 1263 device = node->data; 1264 if (atomic_xchg(&device->state, 1265 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { 1266 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1267 schedule_delayed_work(&device->work, 1268 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); 1269 } 1270 break; 1271 } 1272 } 1273