1 /* 2 * Device probing and sysfs code. 3 * 4 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software Foundation, 18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 19 */ 20 21 #include <linux/ctype.h> 22 #include <linux/delay.h> 23 #include <linux/device.h> 24 #include <linux/errno.h> 25 #include <linux/firewire.h> 26 #include <linux/firewire-constants.h> 27 #include <linux/idr.h> 28 #include <linux/jiffies.h> 29 #include <linux/kobject.h> 30 #include <linux/list.h> 31 #include <linux/mod_devicetable.h> 32 #include <linux/module.h> 33 #include <linux/mutex.h> 34 #include <linux/rwsem.h> 35 #include <linux/semaphore.h> 36 #include <linux/spinlock.h> 37 #include <linux/string.h> 38 #include <linux/workqueue.h> 39 40 #include <asm/atomic.h> 41 #include <asm/byteorder.h> 42 #include <asm/system.h> 43 44 #include "core.h" 45 46 void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p) 47 { 48 ci->p = p + 1; 49 ci->end = ci->p + (p[0] >> 16); 50 } 51 EXPORT_SYMBOL(fw_csr_iterator_init); 52 53 int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value) 54 { 55 *key = *ci->p >> 24; 56 *value = *ci->p & 0xffffff; 57 58 return ci->p++ < ci->end; 59 } 60 EXPORT_SYMBOL(fw_csr_iterator_next); 61 62 static bool is_fw_unit(struct device *dev); 63 64 static int match_unit_directory(u32 *directory, u32 match_flags, 65 const struct ieee1394_device_id *id) 66 { 67 struct fw_csr_iterator ci; 68 int key, value, match; 69 70 match = 0; 71 fw_csr_iterator_init(&ci, directory); 72 while (fw_csr_iterator_next(&ci, &key, &value)) { 73 if (key == CSR_VENDOR && value == id->vendor_id) 74 match |= IEEE1394_MATCH_VENDOR_ID; 75 if (key == CSR_MODEL && value == id->model_id) 76 match |= IEEE1394_MATCH_MODEL_ID; 77 if (key == CSR_SPECIFIER_ID && value == id->specifier_id) 78 match |= IEEE1394_MATCH_SPECIFIER_ID; 79 if (key == CSR_VERSION && value == id->version) 80 match |= IEEE1394_MATCH_VERSION; 81 } 82 83 return (match & match_flags) == match_flags; 84 } 85 86 static int fw_unit_match(struct device *dev, struct device_driver *drv) 87 { 88 struct fw_unit *unit = fw_unit(dev); 89 struct fw_device *device; 90 const struct ieee1394_device_id *id; 91 92 /* We only allow binding to fw_units. */ 93 if (!is_fw_unit(dev)) 94 return 0; 95 96 device = fw_parent_device(unit); 97 id = container_of(drv, struct fw_driver, driver)->id_table; 98 99 for (; id->match_flags != 0; id++) { 100 if (match_unit_directory(unit->directory, id->match_flags, id)) 101 return 1; 102 103 /* Also check vendor ID in the root directory. */ 104 if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) && 105 match_unit_directory(&device->config_rom[5], 106 IEEE1394_MATCH_VENDOR_ID, id) && 107 match_unit_directory(unit->directory, id->match_flags 108 & ~IEEE1394_MATCH_VENDOR_ID, id)) 109 return 1; 110 } 111 112 return 0; 113 } 114 115 static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size) 116 { 117 struct fw_device *device = fw_parent_device(unit); 118 struct fw_csr_iterator ci; 119 120 int key, value; 121 int vendor = 0; 122 int model = 0; 123 int specifier_id = 0; 124 int version = 0; 125 126 fw_csr_iterator_init(&ci, &device->config_rom[5]); 127 while (fw_csr_iterator_next(&ci, &key, &value)) { 128 switch (key) { 129 case CSR_VENDOR: 130 vendor = value; 131 break; 132 case CSR_MODEL: 133 model = value; 134 break; 135 } 136 } 137 138 fw_csr_iterator_init(&ci, unit->directory); 139 while (fw_csr_iterator_next(&ci, &key, &value)) { 140 switch (key) { 141 case CSR_SPECIFIER_ID: 142 specifier_id = value; 143 break; 144 case CSR_VERSION: 145 version = value; 146 break; 147 } 148 } 149 150 return snprintf(buffer, buffer_size, 151 "ieee1394:ven%08Xmo%08Xsp%08Xver%08X", 152 vendor, model, specifier_id, version); 153 } 154 155 static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env) 156 { 157 struct fw_unit *unit = fw_unit(dev); 158 char modalias[64]; 159 160 get_modalias(unit, modalias, sizeof(modalias)); 161 162 if (add_uevent_var(env, "MODALIAS=%s", modalias)) 163 return -ENOMEM; 164 165 return 0; 166 } 167 168 struct bus_type fw_bus_type = { 169 .name = "firewire", 170 .match = fw_unit_match, 171 }; 172 EXPORT_SYMBOL(fw_bus_type); 173 174 int fw_device_enable_phys_dma(struct fw_device *device) 175 { 176 int generation = device->generation; 177 178 /* device->node_id, accessed below, must not be older than generation */ 179 smp_rmb(); 180 181 return device->card->driver->enable_phys_dma(device->card, 182 device->node_id, 183 generation); 184 } 185 EXPORT_SYMBOL(fw_device_enable_phys_dma); 186 187 struct config_rom_attribute { 188 struct device_attribute attr; 189 u32 key; 190 }; 191 192 static ssize_t show_immediate(struct device *dev, 193 struct device_attribute *dattr, char *buf) 194 { 195 struct config_rom_attribute *attr = 196 container_of(dattr, struct config_rom_attribute, attr); 197 struct fw_csr_iterator ci; 198 u32 *dir; 199 int key, value, ret = -ENOENT; 200 201 down_read(&fw_device_rwsem); 202 203 if (is_fw_unit(dev)) 204 dir = fw_unit(dev)->directory; 205 else 206 dir = fw_device(dev)->config_rom + 5; 207 208 fw_csr_iterator_init(&ci, dir); 209 while (fw_csr_iterator_next(&ci, &key, &value)) 210 if (attr->key == key) { 211 ret = snprintf(buf, buf ? PAGE_SIZE : 0, 212 "0x%06x\n", value); 213 break; 214 } 215 216 up_read(&fw_device_rwsem); 217 218 return ret; 219 } 220 221 #define IMMEDIATE_ATTR(name, key) \ 222 { __ATTR(name, S_IRUGO, show_immediate, NULL), key } 223 224 static ssize_t show_text_leaf(struct device *dev, 225 struct device_attribute *dattr, char *buf) 226 { 227 struct config_rom_attribute *attr = 228 container_of(dattr, struct config_rom_attribute, attr); 229 struct fw_csr_iterator ci; 230 u32 *dir, *block = NULL, *p, *end; 231 int length, key, value, last_key = 0, ret = -ENOENT; 232 char *b; 233 234 down_read(&fw_device_rwsem); 235 236 if (is_fw_unit(dev)) 237 dir = fw_unit(dev)->directory; 238 else 239 dir = fw_device(dev)->config_rom + 5; 240 241 fw_csr_iterator_init(&ci, dir); 242 while (fw_csr_iterator_next(&ci, &key, &value)) { 243 if (attr->key == last_key && 244 key == (CSR_DESCRIPTOR | CSR_LEAF)) 245 block = ci.p - 1 + value; 246 last_key = key; 247 } 248 249 if (block == NULL) 250 goto out; 251 252 length = min(block[0] >> 16, 256U); 253 if (length < 3) 254 goto out; 255 256 if (block[1] != 0 || block[2] != 0) 257 /* Unknown encoding. */ 258 goto out; 259 260 if (buf == NULL) { 261 ret = length * 4; 262 goto out; 263 } 264 265 b = buf; 266 end = &block[length + 1]; 267 for (p = &block[3]; p < end; p++, b += 4) 268 * (u32 *) b = (__force u32) __cpu_to_be32(*p); 269 270 /* Strip trailing whitespace and add newline. */ 271 while (b--, (isspace(*b) || *b == '\0') && b > buf); 272 strcpy(b + 1, "\n"); 273 ret = b + 2 - buf; 274 out: 275 up_read(&fw_device_rwsem); 276 277 return ret; 278 } 279 280 #define TEXT_LEAF_ATTR(name, key) \ 281 { __ATTR(name, S_IRUGO, show_text_leaf, NULL), key } 282 283 static struct config_rom_attribute config_rom_attributes[] = { 284 IMMEDIATE_ATTR(vendor, CSR_VENDOR), 285 IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION), 286 IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID), 287 IMMEDIATE_ATTR(version, CSR_VERSION), 288 IMMEDIATE_ATTR(model, CSR_MODEL), 289 TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR), 290 TEXT_LEAF_ATTR(model_name, CSR_MODEL), 291 TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION), 292 }; 293 294 static void init_fw_attribute_group(struct device *dev, 295 struct device_attribute *attrs, 296 struct fw_attribute_group *group) 297 { 298 struct device_attribute *attr; 299 int i, j; 300 301 for (j = 0; attrs[j].attr.name != NULL; j++) 302 group->attrs[j] = &attrs[j].attr; 303 304 for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) { 305 attr = &config_rom_attributes[i].attr; 306 if (attr->show(dev, attr, NULL) < 0) 307 continue; 308 group->attrs[j++] = &attr->attr; 309 } 310 311 group->attrs[j] = NULL; 312 group->groups[0] = &group->group; 313 group->groups[1] = NULL; 314 group->group.attrs = group->attrs; 315 dev->groups = (const struct attribute_group **) group->groups; 316 } 317 318 static ssize_t modalias_show(struct device *dev, 319 struct device_attribute *attr, char *buf) 320 { 321 struct fw_unit *unit = fw_unit(dev); 322 int length; 323 324 length = get_modalias(unit, buf, PAGE_SIZE); 325 strcpy(buf + length, "\n"); 326 327 return length + 1; 328 } 329 330 static ssize_t rom_index_show(struct device *dev, 331 struct device_attribute *attr, char *buf) 332 { 333 struct fw_device *device = fw_device(dev->parent); 334 struct fw_unit *unit = fw_unit(dev); 335 336 return snprintf(buf, PAGE_SIZE, "%d\n", 337 (int)(unit->directory - device->config_rom)); 338 } 339 340 static struct device_attribute fw_unit_attributes[] = { 341 __ATTR_RO(modalias), 342 __ATTR_RO(rom_index), 343 __ATTR_NULL, 344 }; 345 346 static ssize_t config_rom_show(struct device *dev, 347 struct device_attribute *attr, char *buf) 348 { 349 struct fw_device *device = fw_device(dev); 350 size_t length; 351 352 down_read(&fw_device_rwsem); 353 length = device->config_rom_length * 4; 354 memcpy(buf, device->config_rom, length); 355 up_read(&fw_device_rwsem); 356 357 return length; 358 } 359 360 static ssize_t guid_show(struct device *dev, 361 struct device_attribute *attr, char *buf) 362 { 363 struct fw_device *device = fw_device(dev); 364 int ret; 365 366 down_read(&fw_device_rwsem); 367 ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", 368 device->config_rom[3], device->config_rom[4]); 369 up_read(&fw_device_rwsem); 370 371 return ret; 372 } 373 374 static int units_sprintf(char *buf, u32 *directory) 375 { 376 struct fw_csr_iterator ci; 377 int key, value; 378 int specifier_id = 0; 379 int version = 0; 380 381 fw_csr_iterator_init(&ci, directory); 382 while (fw_csr_iterator_next(&ci, &key, &value)) { 383 switch (key) { 384 case CSR_SPECIFIER_ID: 385 specifier_id = value; 386 break; 387 case CSR_VERSION: 388 version = value; 389 break; 390 } 391 } 392 393 return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version); 394 } 395 396 static ssize_t units_show(struct device *dev, 397 struct device_attribute *attr, char *buf) 398 { 399 struct fw_device *device = fw_device(dev); 400 struct fw_csr_iterator ci; 401 int key, value, i = 0; 402 403 down_read(&fw_device_rwsem); 404 fw_csr_iterator_init(&ci, &device->config_rom[5]); 405 while (fw_csr_iterator_next(&ci, &key, &value)) { 406 if (key != (CSR_UNIT | CSR_DIRECTORY)) 407 continue; 408 i += units_sprintf(&buf[i], ci.p + value - 1); 409 if (i >= PAGE_SIZE - (8 + 1 + 8 + 1)) 410 break; 411 } 412 up_read(&fw_device_rwsem); 413 414 if (i) 415 buf[i - 1] = '\n'; 416 417 return i; 418 } 419 420 static struct device_attribute fw_device_attributes[] = { 421 __ATTR_RO(config_rom), 422 __ATTR_RO(guid), 423 __ATTR_RO(units), 424 __ATTR_NULL, 425 }; 426 427 static int read_rom(struct fw_device *device, 428 int generation, int index, u32 *data) 429 { 430 int rcode; 431 432 /* device->node_id, accessed below, must not be older than generation */ 433 smp_rmb(); 434 435 rcode = fw_run_transaction(device->card, TCODE_READ_QUADLET_REQUEST, 436 device->node_id, generation, device->max_speed, 437 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4, 438 data, 4); 439 be32_to_cpus(data); 440 441 return rcode; 442 } 443 444 #define READ_BIB_ROM_SIZE 256 445 #define READ_BIB_STACK_SIZE 16 446 447 /* 448 * Read the bus info block, perform a speed probe, and read all of the rest of 449 * the config ROM. We do all this with a cached bus generation. If the bus 450 * generation changes under us, read_bus_info_block will fail and get retried. 451 * It's better to start all over in this case because the node from which we 452 * are reading the ROM may have changed the ROM during the reset. 453 */ 454 static int read_bus_info_block(struct fw_device *device, int generation) 455 { 456 u32 *rom, *stack, *old_rom, *new_rom; 457 u32 sp, key; 458 int i, end, length, ret = -1; 459 460 rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE + 461 sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL); 462 if (rom == NULL) 463 return -ENOMEM; 464 465 stack = &rom[READ_BIB_ROM_SIZE]; 466 467 device->max_speed = SCODE_100; 468 469 /* First read the bus info block. */ 470 for (i = 0; i < 5; i++) { 471 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 472 goto out; 473 /* 474 * As per IEEE1212 7.2, during power-up, devices can 475 * reply with a 0 for the first quadlet of the config 476 * rom to indicate that they are booting (for example, 477 * if the firmware is on the disk of a external 478 * harddisk). In that case we just fail, and the 479 * retry mechanism will try again later. 480 */ 481 if (i == 0 && rom[i] == 0) 482 goto out; 483 } 484 485 device->max_speed = device->node->max_speed; 486 487 /* 488 * Determine the speed of 489 * - devices with link speed less than PHY speed, 490 * - devices with 1394b PHY (unless only connected to 1394a PHYs), 491 * - all devices if there are 1394b repeaters. 492 * Note, we cannot use the bus info block's link_spd as starting point 493 * because some buggy firmwares set it lower than necessary and because 494 * 1394-1995 nodes do not have the field. 495 */ 496 if ((rom[2] & 0x7) < device->max_speed || 497 device->max_speed == SCODE_BETA || 498 device->card->beta_repeaters_present) { 499 u32 dummy; 500 501 /* for S1600 and S3200 */ 502 if (device->max_speed == SCODE_BETA) 503 device->max_speed = device->card->link_speed; 504 505 while (device->max_speed > SCODE_100) { 506 if (read_rom(device, generation, 0, &dummy) == 507 RCODE_COMPLETE) 508 break; 509 device->max_speed--; 510 } 511 } 512 513 /* 514 * Now parse the config rom. The config rom is a recursive 515 * directory structure so we parse it using a stack of 516 * references to the blocks that make up the structure. We 517 * push a reference to the root directory on the stack to 518 * start things off. 519 */ 520 length = i; 521 sp = 0; 522 stack[sp++] = 0xc0000005; 523 while (sp > 0) { 524 /* 525 * Pop the next block reference of the stack. The 526 * lower 24 bits is the offset into the config rom, 527 * the upper 8 bits are the type of the reference the 528 * block. 529 */ 530 key = stack[--sp]; 531 i = key & 0xffffff; 532 if (i >= READ_BIB_ROM_SIZE) 533 /* 534 * The reference points outside the standard 535 * config rom area, something's fishy. 536 */ 537 goto out; 538 539 /* Read header quadlet for the block to get the length. */ 540 if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE) 541 goto out; 542 end = i + (rom[i] >> 16) + 1; 543 i++; 544 if (end > READ_BIB_ROM_SIZE) 545 /* 546 * This block extends outside standard config 547 * area (and the array we're reading it 548 * into). That's broken, so ignore this 549 * device. 550 */ 551 goto out; 552 553 /* 554 * Now read in the block. If this is a directory 555 * block, check the entries as we read them to see if 556 * it references another block, and push it in that case. 557 */ 558 while (i < end) { 559 if (read_rom(device, generation, i, &rom[i]) != 560 RCODE_COMPLETE) 561 goto out; 562 if ((key >> 30) == 3 && (rom[i] >> 30) > 1 && 563 sp < READ_BIB_STACK_SIZE) 564 stack[sp++] = i + rom[i]; 565 i++; 566 } 567 if (length < i) 568 length = i; 569 } 570 571 old_rom = device->config_rom; 572 new_rom = kmemdup(rom, length * 4, GFP_KERNEL); 573 if (new_rom == NULL) 574 goto out; 575 576 down_write(&fw_device_rwsem); 577 device->config_rom = new_rom; 578 device->config_rom_length = length; 579 up_write(&fw_device_rwsem); 580 581 kfree(old_rom); 582 ret = 0; 583 device->max_rec = rom[2] >> 12 & 0xf; 584 device->cmc = rom[2] >> 30 & 1; 585 device->irmc = rom[2] >> 31 & 1; 586 out: 587 kfree(rom); 588 589 return ret; 590 } 591 592 static void fw_unit_release(struct device *dev) 593 { 594 struct fw_unit *unit = fw_unit(dev); 595 596 kfree(unit); 597 } 598 599 static struct device_type fw_unit_type = { 600 .uevent = fw_unit_uevent, 601 .release = fw_unit_release, 602 }; 603 604 static bool is_fw_unit(struct device *dev) 605 { 606 return dev->type == &fw_unit_type; 607 } 608 609 static void create_units(struct fw_device *device) 610 { 611 struct fw_csr_iterator ci; 612 struct fw_unit *unit; 613 int key, value, i; 614 615 i = 0; 616 fw_csr_iterator_init(&ci, &device->config_rom[5]); 617 while (fw_csr_iterator_next(&ci, &key, &value)) { 618 if (key != (CSR_UNIT | CSR_DIRECTORY)) 619 continue; 620 621 /* 622 * Get the address of the unit directory and try to 623 * match the drivers id_tables against it. 624 */ 625 unit = kzalloc(sizeof(*unit), GFP_KERNEL); 626 if (unit == NULL) { 627 fw_error("failed to allocate memory for unit\n"); 628 continue; 629 } 630 631 unit->directory = ci.p + value - 1; 632 unit->device.bus = &fw_bus_type; 633 unit->device.type = &fw_unit_type; 634 unit->device.parent = &device->device; 635 dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++); 636 637 BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) < 638 ARRAY_SIZE(fw_unit_attributes) + 639 ARRAY_SIZE(config_rom_attributes)); 640 init_fw_attribute_group(&unit->device, 641 fw_unit_attributes, 642 &unit->attribute_group); 643 644 if (device_register(&unit->device) < 0) 645 goto skip_unit; 646 647 continue; 648 649 skip_unit: 650 kfree(unit); 651 } 652 } 653 654 static int shutdown_unit(struct device *device, void *data) 655 { 656 device_unregister(device); 657 658 return 0; 659 } 660 661 /* 662 * fw_device_rwsem acts as dual purpose mutex: 663 * - serializes accesses to fw_device_idr, 664 * - serializes accesses to fw_device.config_rom/.config_rom_length and 665 * fw_unit.directory, unless those accesses happen at safe occasions 666 */ 667 DECLARE_RWSEM(fw_device_rwsem); 668 669 DEFINE_IDR(fw_device_idr); 670 int fw_cdev_major; 671 672 struct fw_device *fw_device_get_by_devt(dev_t devt) 673 { 674 struct fw_device *device; 675 676 down_read(&fw_device_rwsem); 677 device = idr_find(&fw_device_idr, MINOR(devt)); 678 if (device) 679 fw_device_get(device); 680 up_read(&fw_device_rwsem); 681 682 return device; 683 } 684 685 /* 686 * These defines control the retry behavior for reading the config 687 * rom. It shouldn't be necessary to tweak these; if the device 688 * doesn't respond to a config rom read within 10 seconds, it's not 689 * going to respond at all. As for the initial delay, a lot of 690 * devices will be able to respond within half a second after bus 691 * reset. On the other hand, it's not really worth being more 692 * aggressive than that, since it scales pretty well; if 10 devices 693 * are plugged in, they're all getting read within one second. 694 */ 695 696 #define MAX_RETRIES 10 697 #define RETRY_DELAY (3 * HZ) 698 #define INITIAL_DELAY (HZ / 2) 699 #define SHUTDOWN_DELAY (2 * HZ) 700 701 static void fw_device_shutdown(struct work_struct *work) 702 { 703 struct fw_device *device = 704 container_of(work, struct fw_device, work.work); 705 int minor = MINOR(device->device.devt); 706 707 if (time_is_after_jiffies(device->card->reset_jiffies + SHUTDOWN_DELAY) 708 && !list_empty(&device->card->link)) { 709 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 710 return; 711 } 712 713 if (atomic_cmpxchg(&device->state, 714 FW_DEVICE_GONE, 715 FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE) 716 return; 717 718 fw_device_cdev_remove(device); 719 device_for_each_child(&device->device, NULL, shutdown_unit); 720 device_unregister(&device->device); 721 722 down_write(&fw_device_rwsem); 723 idr_remove(&fw_device_idr, minor); 724 up_write(&fw_device_rwsem); 725 726 fw_device_put(device); 727 } 728 729 static void fw_device_release(struct device *dev) 730 { 731 struct fw_device *device = fw_device(dev); 732 struct fw_card *card = device->card; 733 unsigned long flags; 734 735 /* 736 * Take the card lock so we don't set this to NULL while a 737 * FW_NODE_UPDATED callback is being handled or while the 738 * bus manager work looks at this node. 739 */ 740 spin_lock_irqsave(&card->lock, flags); 741 device->node->data = NULL; 742 spin_unlock_irqrestore(&card->lock, flags); 743 744 fw_node_put(device->node); 745 kfree(device->config_rom); 746 kfree(device); 747 fw_card_put(card); 748 } 749 750 static struct device_type fw_device_type = { 751 .release = fw_device_release, 752 }; 753 754 static bool is_fw_device(struct device *dev) 755 { 756 return dev->type == &fw_device_type; 757 } 758 759 static int update_unit(struct device *dev, void *data) 760 { 761 struct fw_unit *unit = fw_unit(dev); 762 struct fw_driver *driver = (struct fw_driver *)dev->driver; 763 764 if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) { 765 down(&dev->sem); 766 driver->update(unit); 767 up(&dev->sem); 768 } 769 770 return 0; 771 } 772 773 static void fw_device_update(struct work_struct *work) 774 { 775 struct fw_device *device = 776 container_of(work, struct fw_device, work.work); 777 778 fw_device_cdev_update(device); 779 device_for_each_child(&device->device, NULL, update_unit); 780 } 781 782 /* 783 * If a device was pending for deletion because its node went away but its 784 * bus info block and root directory header matches that of a newly discovered 785 * device, revive the existing fw_device. 786 * The newly allocated fw_device becomes obsolete instead. 787 */ 788 static int lookup_existing_device(struct device *dev, void *data) 789 { 790 struct fw_device *old = fw_device(dev); 791 struct fw_device *new = data; 792 struct fw_card *card = new->card; 793 int match = 0; 794 795 if (!is_fw_device(dev)) 796 return 0; 797 798 down_read(&fw_device_rwsem); /* serialize config_rom access */ 799 spin_lock_irq(&card->lock); /* serialize node access */ 800 801 if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 && 802 atomic_cmpxchg(&old->state, 803 FW_DEVICE_GONE, 804 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 805 struct fw_node *current_node = new->node; 806 struct fw_node *obsolete_node = old->node; 807 808 new->node = obsolete_node; 809 new->node->data = new; 810 old->node = current_node; 811 old->node->data = old; 812 813 old->max_speed = new->max_speed; 814 old->node_id = current_node->node_id; 815 smp_wmb(); /* update node_id before generation */ 816 old->generation = card->generation; 817 old->config_rom_retries = 0; 818 fw_notify("rediscovered device %s\n", dev_name(dev)); 819 820 PREPARE_DELAYED_WORK(&old->work, fw_device_update); 821 schedule_delayed_work(&old->work, 0); 822 823 if (current_node == card->root_node) 824 fw_schedule_bm_work(card, 0); 825 826 match = 1; 827 } 828 829 spin_unlock_irq(&card->lock); 830 up_read(&fw_device_rwsem); 831 832 return match; 833 } 834 835 enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, }; 836 837 static void set_broadcast_channel(struct fw_device *device, int generation) 838 { 839 struct fw_card *card = device->card; 840 __be32 data; 841 int rcode; 842 843 if (!card->broadcast_channel_allocated) 844 return; 845 846 /* 847 * The Broadcast_Channel Valid bit is required by nodes which want to 848 * transmit on this channel. Such transmissions are practically 849 * exclusive to IP over 1394 (RFC 2734). IP capable nodes are required 850 * to be IRM capable and have a max_rec of 8 or more. We use this fact 851 * to narrow down to which nodes we send Broadcast_Channel updates. 852 */ 853 if (!device->irmc || device->max_rec < 8) 854 return; 855 856 /* 857 * Some 1394-1995 nodes crash if this 1394a-2000 register is written. 858 * Perform a read test first. 859 */ 860 if (device->bc_implemented == BC_UNKNOWN) { 861 rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST, 862 device->node_id, generation, device->max_speed, 863 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, 864 &data, 4); 865 switch (rcode) { 866 case RCODE_COMPLETE: 867 if (data & cpu_to_be32(1 << 31)) { 868 device->bc_implemented = BC_IMPLEMENTED; 869 break; 870 } 871 /* else fall through to case address error */ 872 case RCODE_ADDRESS_ERROR: 873 device->bc_implemented = BC_UNIMPLEMENTED; 874 } 875 } 876 877 if (device->bc_implemented == BC_IMPLEMENTED) { 878 data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL | 879 BROADCAST_CHANNEL_VALID); 880 fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST, 881 device->node_id, generation, device->max_speed, 882 CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL, 883 &data, 4); 884 } 885 } 886 887 int fw_device_set_broadcast_channel(struct device *dev, void *gen) 888 { 889 if (is_fw_device(dev)) 890 set_broadcast_channel(fw_device(dev), (long)gen); 891 892 return 0; 893 } 894 895 static void fw_device_init(struct work_struct *work) 896 { 897 struct fw_device *device = 898 container_of(work, struct fw_device, work.work); 899 struct device *revived_dev; 900 int minor, ret; 901 902 /* 903 * All failure paths here set node->data to NULL, so that we 904 * don't try to do device_for_each_child() on a kfree()'d 905 * device. 906 */ 907 908 if (read_bus_info_block(device, device->generation) < 0) { 909 if (device->config_rom_retries < MAX_RETRIES && 910 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 911 device->config_rom_retries++; 912 schedule_delayed_work(&device->work, RETRY_DELAY); 913 } else { 914 fw_notify("giving up on config rom for node id %x\n", 915 device->node_id); 916 if (device->node == device->card->root_node) 917 fw_schedule_bm_work(device->card, 0); 918 fw_device_release(&device->device); 919 } 920 return; 921 } 922 923 revived_dev = device_find_child(device->card->device, 924 device, lookup_existing_device); 925 if (revived_dev) { 926 put_device(revived_dev); 927 fw_device_release(&device->device); 928 929 return; 930 } 931 932 device_initialize(&device->device); 933 934 fw_device_get(device); 935 down_write(&fw_device_rwsem); 936 ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ? 937 idr_get_new(&fw_device_idr, device, &minor) : 938 -ENOMEM; 939 up_write(&fw_device_rwsem); 940 941 if (ret < 0) 942 goto error; 943 944 device->device.bus = &fw_bus_type; 945 device->device.type = &fw_device_type; 946 device->device.parent = device->card->device; 947 device->device.devt = MKDEV(fw_cdev_major, minor); 948 dev_set_name(&device->device, "fw%d", minor); 949 950 BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) < 951 ARRAY_SIZE(fw_device_attributes) + 952 ARRAY_SIZE(config_rom_attributes)); 953 init_fw_attribute_group(&device->device, 954 fw_device_attributes, 955 &device->attribute_group); 956 957 if (device_add(&device->device)) { 958 fw_error("Failed to add device.\n"); 959 goto error_with_cdev; 960 } 961 962 create_units(device); 963 964 /* 965 * Transition the device to running state. If it got pulled 966 * out from under us while we did the intialization work, we 967 * have to shut down the device again here. Normally, though, 968 * fw_node_event will be responsible for shutting it down when 969 * necessary. We have to use the atomic cmpxchg here to avoid 970 * racing with the FW_NODE_DESTROYED case in 971 * fw_node_event(). 972 */ 973 if (atomic_cmpxchg(&device->state, 974 FW_DEVICE_INITIALIZING, 975 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { 976 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 977 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 978 } else { 979 if (device->config_rom_retries) 980 fw_notify("created device %s: GUID %08x%08x, S%d00, " 981 "%d config ROM retries\n", 982 dev_name(&device->device), 983 device->config_rom[3], device->config_rom[4], 984 1 << device->max_speed, 985 device->config_rom_retries); 986 else 987 fw_notify("created device %s: GUID %08x%08x, S%d00\n", 988 dev_name(&device->device), 989 device->config_rom[3], device->config_rom[4], 990 1 << device->max_speed); 991 device->config_rom_retries = 0; 992 993 set_broadcast_channel(device, device->generation); 994 } 995 996 /* 997 * Reschedule the IRM work if we just finished reading the 998 * root node config rom. If this races with a bus reset we 999 * just end up running the IRM work a couple of extra times - 1000 * pretty harmless. 1001 */ 1002 if (device->node == device->card->root_node) 1003 fw_schedule_bm_work(device->card, 0); 1004 1005 return; 1006 1007 error_with_cdev: 1008 down_write(&fw_device_rwsem); 1009 idr_remove(&fw_device_idr, minor); 1010 up_write(&fw_device_rwsem); 1011 error: 1012 fw_device_put(device); /* fw_device_idr's reference */ 1013 1014 put_device(&device->device); /* our reference */ 1015 } 1016 1017 enum { 1018 REREAD_BIB_ERROR, 1019 REREAD_BIB_GONE, 1020 REREAD_BIB_UNCHANGED, 1021 REREAD_BIB_CHANGED, 1022 }; 1023 1024 /* Reread and compare bus info block and header of root directory */ 1025 static int reread_bus_info_block(struct fw_device *device, int generation) 1026 { 1027 u32 q; 1028 int i; 1029 1030 for (i = 0; i < 6; i++) { 1031 if (read_rom(device, generation, i, &q) != RCODE_COMPLETE) 1032 return REREAD_BIB_ERROR; 1033 1034 if (i == 0 && q == 0) 1035 return REREAD_BIB_GONE; 1036 1037 if (q != device->config_rom[i]) 1038 return REREAD_BIB_CHANGED; 1039 } 1040 1041 return REREAD_BIB_UNCHANGED; 1042 } 1043 1044 static void fw_device_refresh(struct work_struct *work) 1045 { 1046 struct fw_device *device = 1047 container_of(work, struct fw_device, work.work); 1048 struct fw_card *card = device->card; 1049 int node_id = device->node_id; 1050 1051 switch (reread_bus_info_block(device, device->generation)) { 1052 case REREAD_BIB_ERROR: 1053 if (device->config_rom_retries < MAX_RETRIES / 2 && 1054 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1055 device->config_rom_retries++; 1056 schedule_delayed_work(&device->work, RETRY_DELAY / 2); 1057 1058 return; 1059 } 1060 goto give_up; 1061 1062 case REREAD_BIB_GONE: 1063 goto gone; 1064 1065 case REREAD_BIB_UNCHANGED: 1066 if (atomic_cmpxchg(&device->state, 1067 FW_DEVICE_INITIALIZING, 1068 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) 1069 goto gone; 1070 1071 fw_device_update(work); 1072 device->config_rom_retries = 0; 1073 goto out; 1074 1075 case REREAD_BIB_CHANGED: 1076 break; 1077 } 1078 1079 /* 1080 * Something changed. We keep things simple and don't investigate 1081 * further. We just destroy all previous units and create new ones. 1082 */ 1083 device_for_each_child(&device->device, NULL, shutdown_unit); 1084 1085 if (read_bus_info_block(device, device->generation) < 0) { 1086 if (device->config_rom_retries < MAX_RETRIES && 1087 atomic_read(&device->state) == FW_DEVICE_INITIALIZING) { 1088 device->config_rom_retries++; 1089 schedule_delayed_work(&device->work, RETRY_DELAY); 1090 1091 return; 1092 } 1093 goto give_up; 1094 } 1095 1096 create_units(device); 1097 1098 /* Userspace may want to re-read attributes. */ 1099 kobject_uevent(&device->device.kobj, KOBJ_CHANGE); 1100 1101 if (atomic_cmpxchg(&device->state, 1102 FW_DEVICE_INITIALIZING, 1103 FW_DEVICE_RUNNING) == FW_DEVICE_GONE) 1104 goto gone; 1105 1106 fw_notify("refreshed device %s\n", dev_name(&device->device)); 1107 device->config_rom_retries = 0; 1108 goto out; 1109 1110 give_up: 1111 fw_notify("giving up on refresh of device %s\n", dev_name(&device->device)); 1112 gone: 1113 atomic_set(&device->state, FW_DEVICE_GONE); 1114 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1115 schedule_delayed_work(&device->work, SHUTDOWN_DELAY); 1116 out: 1117 if (node_id == card->root_node->node_id) 1118 fw_schedule_bm_work(card, 0); 1119 } 1120 1121 void fw_node_event(struct fw_card *card, struct fw_node *node, int event) 1122 { 1123 struct fw_device *device; 1124 1125 switch (event) { 1126 case FW_NODE_CREATED: 1127 case FW_NODE_LINK_ON: 1128 if (!node->link_on) 1129 break; 1130 create: 1131 device = kzalloc(sizeof(*device), GFP_ATOMIC); 1132 if (device == NULL) 1133 break; 1134 1135 /* 1136 * Do minimal intialization of the device here, the 1137 * rest will happen in fw_device_init(). 1138 * 1139 * Attention: A lot of things, even fw_device_get(), 1140 * cannot be done before fw_device_init() finished! 1141 * You can basically just check device->state and 1142 * schedule work until then, but only while holding 1143 * card->lock. 1144 */ 1145 atomic_set(&device->state, FW_DEVICE_INITIALIZING); 1146 device->card = fw_card_get(card); 1147 device->node = fw_node_get(node); 1148 device->node_id = node->node_id; 1149 device->generation = card->generation; 1150 device->is_local = node == card->local_node; 1151 mutex_init(&device->client_list_mutex); 1152 INIT_LIST_HEAD(&device->client_list); 1153 1154 /* 1155 * Set the node data to point back to this device so 1156 * FW_NODE_UPDATED callbacks can update the node_id 1157 * and generation for the device. 1158 */ 1159 node->data = device; 1160 1161 /* 1162 * Many devices are slow to respond after bus resets, 1163 * especially if they are bus powered and go through 1164 * power-up after getting plugged in. We schedule the 1165 * first config rom scan half a second after bus reset. 1166 */ 1167 INIT_DELAYED_WORK(&device->work, fw_device_init); 1168 schedule_delayed_work(&device->work, INITIAL_DELAY); 1169 break; 1170 1171 case FW_NODE_INITIATED_RESET: 1172 device = node->data; 1173 if (device == NULL) 1174 goto create; 1175 1176 device->node_id = node->node_id; 1177 smp_wmb(); /* update node_id before generation */ 1178 device->generation = card->generation; 1179 if (atomic_cmpxchg(&device->state, 1180 FW_DEVICE_RUNNING, 1181 FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { 1182 PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); 1183 schedule_delayed_work(&device->work, 1184 device->is_local ? 0 : INITIAL_DELAY); 1185 } 1186 break; 1187 1188 case FW_NODE_UPDATED: 1189 if (!node->link_on || node->data == NULL) 1190 break; 1191 1192 device = node->data; 1193 device->node_id = node->node_id; 1194 smp_wmb(); /* update node_id before generation */ 1195 device->generation = card->generation; 1196 if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { 1197 PREPARE_DELAYED_WORK(&device->work, fw_device_update); 1198 schedule_delayed_work(&device->work, 0); 1199 } 1200 break; 1201 1202 case FW_NODE_DESTROYED: 1203 case FW_NODE_LINK_OFF: 1204 if (!node->data) 1205 break; 1206 1207 /* 1208 * Destroy the device associated with the node. There 1209 * are two cases here: either the device is fully 1210 * initialized (FW_DEVICE_RUNNING) or we're in the 1211 * process of reading its config rom 1212 * (FW_DEVICE_INITIALIZING). If it is fully 1213 * initialized we can reuse device->work to schedule a 1214 * full fw_device_shutdown(). If not, there's work 1215 * scheduled to read it's config rom, and we just put 1216 * the device in shutdown state to have that code fail 1217 * to create the device. 1218 */ 1219 device = node->data; 1220 if (atomic_xchg(&device->state, 1221 FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { 1222 PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); 1223 schedule_delayed_work(&device->work, 1224 list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); 1225 } 1226 break; 1227 } 1228 } 1229