1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Core IEEE1394 transaction logic 4 * 5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 */ 7 8 #include <linux/bug.h> 9 #include <linux/completion.h> 10 #include <linux/device.h> 11 #include <linux/errno.h> 12 #include <linux/firewire.h> 13 #include <linux/firewire-constants.h> 14 #include <linux/fs.h> 15 #include <linux/init.h> 16 #include <linux/idr.h> 17 #include <linux/jiffies.h> 18 #include <linux/kernel.h> 19 #include <linux/list.h> 20 #include <linux/module.h> 21 #include <linux/rculist.h> 22 #include <linux/slab.h> 23 #include <linux/spinlock.h> 24 #include <linux/string.h> 25 #include <linux/timer.h> 26 #include <linux/types.h> 27 #include <linux/workqueue.h> 28 29 #include <asm/byteorder.h> 30 31 #include "core.h" 32 33 #define HEADER_PRI(pri) ((pri) << 0) 34 #define HEADER_TCODE(tcode) ((tcode) << 4) 35 #define HEADER_RETRY(retry) ((retry) << 8) 36 #define HEADER_TLABEL(tlabel) ((tlabel) << 10) 37 #define HEADER_DESTINATION(destination) ((destination) << 16) 38 #define HEADER_SOURCE(source) ((source) << 16) 39 #define HEADER_RCODE(rcode) ((rcode) << 12) 40 #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0) 41 #define HEADER_DATA_LENGTH(length) ((length) << 16) 42 #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0) 43 44 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 45 #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) 46 #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) 47 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 48 #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) 49 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 50 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 51 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 52 53 #define HEADER_DESTINATION_IS_BROADCAST(q) \ 54 (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) 55 56 #define PHY_PACKET_CONFIG 0x0 57 #define PHY_PACKET_LINK_ON 0x1 58 #define PHY_PACKET_SELF_ID 0x2 59 60 #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22)) 61 #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23)) 62 #define PHY_IDENTIFIER(id) ((id) << 30) 63 64 /* returns 0 if the split timeout handler is already running */ 65 static int try_cancel_split_timeout(struct fw_transaction *t) 66 { 67 if (t->is_split_transaction) 68 return del_timer(&t->split_timeout_timer); 69 else 70 return 1; 71 } 72 73 static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode, 74 u32 response_tstamp) 75 { 76 struct fw_transaction *t = NULL, *iter; 77 unsigned long flags; 78 79 spin_lock_irqsave(&card->lock, flags); 80 list_for_each_entry(iter, &card->transaction_list, link) { 81 if (iter == transaction) { 82 if (!try_cancel_split_timeout(iter)) { 83 spin_unlock_irqrestore(&card->lock, flags); 84 goto timed_out; 85 } 86 list_del_init(&iter->link); 87 card->tlabel_mask &= ~(1ULL << iter->tlabel); 88 t = iter; 89 break; 90 } 91 } 92 spin_unlock_irqrestore(&card->lock, flags); 93 94 if (t) { 95 if (!t->with_tstamp) { 96 t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data); 97 } else { 98 t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp, 99 NULL, 0, t->callback_data); 100 } 101 return 0; 102 } 103 104 timed_out: 105 return -ENOENT; 106 } 107 108 /* 109 * Only valid for transactions that are potentially pending (ie have 110 * been sent). 111 */ 112 int fw_cancel_transaction(struct fw_card *card, 113 struct fw_transaction *transaction) 114 { 115 u32 tstamp; 116 117 /* 118 * Cancel the packet transmission if it's still queued. That 119 * will call the packet transmission callback which cancels 120 * the transaction. 121 */ 122 123 if (card->driver->cancel_packet(card, &transaction->packet) == 0) 124 return 0; 125 126 /* 127 * If the request packet has already been sent, we need to see 128 * if the transaction is still pending and remove it in that case. 129 */ 130 131 if (transaction->packet.ack == 0) { 132 // The timestamp is reused since it was just read now. 133 tstamp = transaction->packet.timestamp; 134 } else { 135 u32 curr_cycle_time = 0; 136 137 (void)fw_card_read_cycle_time(card, &curr_cycle_time); 138 tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time); 139 } 140 141 return close_transaction(transaction, card, RCODE_CANCELLED, tstamp); 142 } 143 EXPORT_SYMBOL(fw_cancel_transaction); 144 145 static void split_transaction_timeout_callback(struct timer_list *timer) 146 { 147 struct fw_transaction *t = from_timer(t, timer, split_timeout_timer); 148 struct fw_card *card = t->card; 149 unsigned long flags; 150 151 spin_lock_irqsave(&card->lock, flags); 152 if (list_empty(&t->link)) { 153 spin_unlock_irqrestore(&card->lock, flags); 154 return; 155 } 156 list_del(&t->link); 157 card->tlabel_mask &= ~(1ULL << t->tlabel); 158 spin_unlock_irqrestore(&card->lock, flags); 159 160 if (!t->with_tstamp) { 161 t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data); 162 } else { 163 t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp, 164 t->split_timeout_cycle, NULL, 0, t->callback_data); 165 } 166 } 167 168 static void start_split_transaction_timeout(struct fw_transaction *t, 169 struct fw_card *card) 170 { 171 unsigned long flags; 172 173 spin_lock_irqsave(&card->lock, flags); 174 175 if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) { 176 spin_unlock_irqrestore(&card->lock, flags); 177 return; 178 } 179 180 t->is_split_transaction = true; 181 mod_timer(&t->split_timeout_timer, 182 jiffies + card->split_timeout_jiffies); 183 184 spin_unlock_irqrestore(&card->lock, flags); 185 } 186 187 static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp); 188 189 static void transmit_complete_callback(struct fw_packet *packet, 190 struct fw_card *card, int status) 191 { 192 struct fw_transaction *t = 193 container_of(packet, struct fw_transaction, packet); 194 195 switch (status) { 196 case ACK_COMPLETE: 197 close_transaction(t, card, RCODE_COMPLETE, packet->timestamp); 198 break; 199 case ACK_PENDING: 200 { 201 t->split_timeout_cycle = 202 compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff; 203 start_split_transaction_timeout(t, card); 204 break; 205 } 206 case ACK_BUSY_X: 207 case ACK_BUSY_A: 208 case ACK_BUSY_B: 209 close_transaction(t, card, RCODE_BUSY, packet->timestamp); 210 break; 211 case ACK_DATA_ERROR: 212 close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp); 213 break; 214 case ACK_TYPE_ERROR: 215 close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp); 216 break; 217 default: 218 /* 219 * In this case the ack is really a juju specific 220 * rcode, so just forward that to the callback. 221 */ 222 close_transaction(t, card, status, packet->timestamp); 223 break; 224 } 225 } 226 227 static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel, 228 int destination_id, int source_id, int generation, int speed, 229 unsigned long long offset, void *payload, size_t length) 230 { 231 int ext_tcode; 232 233 if (tcode == TCODE_STREAM_DATA) { 234 packet->header[0] = 235 HEADER_DATA_LENGTH(length) | 236 destination_id | 237 HEADER_TCODE(TCODE_STREAM_DATA); 238 packet->header_length = 4; 239 packet->payload = payload; 240 packet->payload_length = length; 241 242 goto common; 243 } 244 245 if (tcode > 0x10) { 246 ext_tcode = tcode & ~0x10; 247 tcode = TCODE_LOCK_REQUEST; 248 } else 249 ext_tcode = 0; 250 251 packet->header[0] = 252 HEADER_RETRY(RETRY_X) | 253 HEADER_TLABEL(tlabel) | 254 HEADER_TCODE(tcode) | 255 HEADER_DESTINATION(destination_id); 256 packet->header[1] = 257 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id); 258 packet->header[2] = 259 offset; 260 261 switch (tcode) { 262 case TCODE_WRITE_QUADLET_REQUEST: 263 packet->header[3] = *(u32 *)payload; 264 packet->header_length = 16; 265 packet->payload_length = 0; 266 break; 267 268 case TCODE_LOCK_REQUEST: 269 case TCODE_WRITE_BLOCK_REQUEST: 270 packet->header[3] = 271 HEADER_DATA_LENGTH(length) | 272 HEADER_EXTENDED_TCODE(ext_tcode); 273 packet->header_length = 16; 274 packet->payload = payload; 275 packet->payload_length = length; 276 break; 277 278 case TCODE_READ_QUADLET_REQUEST: 279 packet->header_length = 12; 280 packet->payload_length = 0; 281 break; 282 283 case TCODE_READ_BLOCK_REQUEST: 284 packet->header[3] = 285 HEADER_DATA_LENGTH(length) | 286 HEADER_EXTENDED_TCODE(ext_tcode); 287 packet->header_length = 16; 288 packet->payload_length = 0; 289 break; 290 291 default: 292 WARN(1, "wrong tcode %d\n", tcode); 293 } 294 common: 295 packet->speed = speed; 296 packet->generation = generation; 297 packet->ack = 0; 298 packet->payload_mapped = false; 299 } 300 301 static int allocate_tlabel(struct fw_card *card) 302 { 303 int tlabel; 304 305 tlabel = card->current_tlabel; 306 while (card->tlabel_mask & (1ULL << tlabel)) { 307 tlabel = (tlabel + 1) & 0x3f; 308 if (tlabel == card->current_tlabel) 309 return -EBUSY; 310 } 311 312 card->current_tlabel = (tlabel + 1) & 0x3f; 313 card->tlabel_mask |= 1ULL << tlabel; 314 315 return tlabel; 316 } 317 318 /** 319 * fw_send_request() - submit a request packet for transmission 320 * @card: interface to send the request at 321 * @t: transaction instance to which the request belongs 322 * @tcode: transaction code 323 * @destination_id: destination node ID, consisting of bus_ID and phy_ID 324 * @generation: bus generation in which request and response are valid 325 * @speed: transmission speed 326 * @offset: 48bit wide offset into destination's address space 327 * @payload: data payload for the request subaction 328 * @length: length of the payload, in bytes 329 * @callback: function to be called when the transaction is completed 330 * @callback_data: data to be passed to the transaction completion callback 331 * 332 * Submit a request packet into the asynchronous request transmission queue. 333 * Can be called from atomic context. If you prefer a blocking API, use 334 * fw_run_transaction() in a context that can sleep. 335 * 336 * In case of lock requests, specify one of the firewire-core specific %TCODE_ 337 * constants instead of %TCODE_LOCK_REQUEST in @tcode. 338 * 339 * Make sure that the value in @destination_id is not older than the one in 340 * @generation. Otherwise the request is in danger to be sent to a wrong node. 341 * 342 * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller 343 * needs to synthesize @destination_id with fw_stream_packet_destination_id(). 344 * It will contain tag, channel, and sy data instead of a node ID then. 345 * 346 * The payload buffer at @data is going to be DMA-mapped except in case of 347 * @length <= 8 or of local (loopback) requests. Hence make sure that the 348 * buffer complies with the restrictions of the streaming DMA mapping API. 349 * @payload must not be freed before the @callback is called. 350 * 351 * In case of request types without payload, @data is NULL and @length is 0. 352 * 353 * After the transaction is completed successfully or unsuccessfully, the 354 * @callback will be called. Among its parameters is the response code which 355 * is either one of the rcodes per IEEE 1394 or, in case of internal errors, 356 * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core 357 * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION, 358 * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request 359 * generation, or missing ACK respectively. 360 * 361 * Note some timing corner cases: fw_send_request() may complete much earlier 362 * than when the request packet actually hits the wire. On the other hand, 363 * transaction completion and hence execution of @callback may happen even 364 * before fw_send_request() returns. 365 */ 366 void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, 367 int destination_id, int generation, int speed, 368 unsigned long long offset, void *payload, size_t length, 369 fw_transaction_callback_t callback, void *callback_data) 370 { 371 unsigned long flags; 372 int tlabel; 373 374 /* 375 * Allocate tlabel from the bitmap and put the transaction on 376 * the list while holding the card spinlock. 377 */ 378 379 spin_lock_irqsave(&card->lock, flags); 380 381 tlabel = allocate_tlabel(card); 382 if (tlabel < 0) { 383 spin_unlock_irqrestore(&card->lock, flags); 384 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data); 385 return; 386 } 387 388 t->node_id = destination_id; 389 t->tlabel = tlabel; 390 t->card = card; 391 t->is_split_transaction = false; 392 timer_setup(&t->split_timeout_timer, 393 split_transaction_timeout_callback, 0); 394 t->callback.without_tstamp = callback; 395 t->with_tstamp = false; 396 t->callback_data = callback_data; 397 398 fw_fill_request(&t->packet, tcode, t->tlabel, 399 destination_id, card->node_id, generation, 400 speed, offset, payload, length); 401 t->packet.callback = transmit_complete_callback; 402 403 list_add_tail(&t->link, &card->transaction_list); 404 405 spin_unlock_irqrestore(&card->lock, flags); 406 407 card->driver->send_request(card, &t->packet); 408 } 409 EXPORT_SYMBOL(fw_send_request); 410 411 struct transaction_callback_data { 412 struct completion done; 413 void *payload; 414 int rcode; 415 }; 416 417 static void transaction_callback(struct fw_card *card, int rcode, 418 void *payload, size_t length, void *data) 419 { 420 struct transaction_callback_data *d = data; 421 422 if (rcode == RCODE_COMPLETE) 423 memcpy(d->payload, payload, length); 424 d->rcode = rcode; 425 complete(&d->done); 426 } 427 428 /** 429 * fw_run_transaction() - send request and sleep until transaction is completed 430 * @card: card interface for this request 431 * @tcode: transaction code 432 * @destination_id: destination node ID, consisting of bus_ID and phy_ID 433 * @generation: bus generation in which request and response are valid 434 * @speed: transmission speed 435 * @offset: 48bit wide offset into destination's address space 436 * @payload: data payload for the request subaction 437 * @length: length of the payload, in bytes 438 * 439 * Returns the RCODE. See fw_send_request() for parameter documentation. 440 * Unlike fw_send_request(), @data points to the payload of the request or/and 441 * to the payload of the response. DMA mapping restrictions apply to outbound 442 * request payloads of >= 8 bytes but not to inbound response payloads. 443 */ 444 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, 445 int generation, int speed, unsigned long long offset, 446 void *payload, size_t length) 447 { 448 struct transaction_callback_data d; 449 struct fw_transaction t; 450 451 timer_setup_on_stack(&t.split_timeout_timer, NULL, 0); 452 init_completion(&d.done); 453 d.payload = payload; 454 fw_send_request(card, &t, tcode, destination_id, generation, speed, 455 offset, payload, length, transaction_callback, &d); 456 wait_for_completion(&d.done); 457 destroy_timer_on_stack(&t.split_timeout_timer); 458 459 return d.rcode; 460 } 461 EXPORT_SYMBOL(fw_run_transaction); 462 463 static DEFINE_MUTEX(phy_config_mutex); 464 static DECLARE_COMPLETION(phy_config_done); 465 466 static void transmit_phy_packet_callback(struct fw_packet *packet, 467 struct fw_card *card, int status) 468 { 469 complete(&phy_config_done); 470 } 471 472 static struct fw_packet phy_config_packet = { 473 .header_length = 12, 474 .header[0] = TCODE_LINK_INTERNAL << 4, 475 .payload_length = 0, 476 .speed = SCODE_100, 477 .callback = transmit_phy_packet_callback, 478 }; 479 480 void fw_send_phy_config(struct fw_card *card, 481 int node_id, int generation, int gap_count) 482 { 483 long timeout = DIV_ROUND_UP(HZ, 10); 484 u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG); 485 486 if (node_id != FW_PHY_CONFIG_NO_NODE_ID) 487 data |= PHY_CONFIG_ROOT_ID(node_id); 488 489 if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) { 490 gap_count = card->driver->read_phy_reg(card, 1); 491 if (gap_count < 0) 492 return; 493 494 gap_count &= 63; 495 if (gap_count == 63) 496 return; 497 } 498 data |= PHY_CONFIG_GAP_COUNT(gap_count); 499 500 mutex_lock(&phy_config_mutex); 501 502 phy_config_packet.header[1] = data; 503 phy_config_packet.header[2] = ~data; 504 phy_config_packet.generation = generation; 505 reinit_completion(&phy_config_done); 506 507 card->driver->send_request(card, &phy_config_packet); 508 wait_for_completion_timeout(&phy_config_done, timeout); 509 510 mutex_unlock(&phy_config_mutex); 511 } 512 513 static struct fw_address_handler *lookup_overlapping_address_handler( 514 struct list_head *list, unsigned long long offset, size_t length) 515 { 516 struct fw_address_handler *handler; 517 518 list_for_each_entry_rcu(handler, list, link) { 519 if (handler->offset < offset + length && 520 offset < handler->offset + handler->length) 521 return handler; 522 } 523 524 return NULL; 525 } 526 527 static bool is_enclosing_handler(struct fw_address_handler *handler, 528 unsigned long long offset, size_t length) 529 { 530 return handler->offset <= offset && 531 offset + length <= handler->offset + handler->length; 532 } 533 534 static struct fw_address_handler *lookup_enclosing_address_handler( 535 struct list_head *list, unsigned long long offset, size_t length) 536 { 537 struct fw_address_handler *handler; 538 539 list_for_each_entry_rcu(handler, list, link) { 540 if (is_enclosing_handler(handler, offset, length)) 541 return handler; 542 } 543 544 return NULL; 545 } 546 547 static DEFINE_SPINLOCK(address_handler_list_lock); 548 static LIST_HEAD(address_handler_list); 549 550 const struct fw_address_region fw_high_memory_region = 551 { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, }; 552 EXPORT_SYMBOL(fw_high_memory_region); 553 554 static const struct fw_address_region low_memory_region = 555 { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, }; 556 557 #if 0 558 const struct fw_address_region fw_private_region = 559 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, }; 560 const struct fw_address_region fw_csr_region = 561 { .start = CSR_REGISTER_BASE, 562 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, }; 563 const struct fw_address_region fw_unit_space_region = 564 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, }; 565 #endif /* 0 */ 566 567 /** 568 * fw_core_add_address_handler() - register for incoming requests 569 * @handler: callback 570 * @region: region in the IEEE 1212 node space address range 571 * 572 * region->start, ->end, and handler->length have to be quadlet-aligned. 573 * 574 * When a request is received that falls within the specified address range, 575 * the specified callback is invoked. The parameters passed to the callback 576 * give the details of the particular request. 577 * 578 * To be called in process context. 579 * Return value: 0 on success, non-zero otherwise. 580 * 581 * The start offset of the handler's address region is determined by 582 * fw_core_add_address_handler() and is returned in handler->offset. 583 * 584 * Address allocations are exclusive, except for the FCP registers. 585 */ 586 int fw_core_add_address_handler(struct fw_address_handler *handler, 587 const struct fw_address_region *region) 588 { 589 struct fw_address_handler *other; 590 int ret = -EBUSY; 591 592 if (region->start & 0xffff000000000003ULL || 593 region->start >= region->end || 594 region->end > 0x0001000000000000ULL || 595 handler->length & 3 || 596 handler->length == 0) 597 return -EINVAL; 598 599 spin_lock(&address_handler_list_lock); 600 601 handler->offset = region->start; 602 while (handler->offset + handler->length <= region->end) { 603 if (is_in_fcp_region(handler->offset, handler->length)) 604 other = NULL; 605 else 606 other = lookup_overlapping_address_handler 607 (&address_handler_list, 608 handler->offset, handler->length); 609 if (other != NULL) { 610 handler->offset += other->length; 611 } else { 612 list_add_tail_rcu(&handler->link, &address_handler_list); 613 ret = 0; 614 break; 615 } 616 } 617 618 spin_unlock(&address_handler_list_lock); 619 620 return ret; 621 } 622 EXPORT_SYMBOL(fw_core_add_address_handler); 623 624 /** 625 * fw_core_remove_address_handler() - unregister an address handler 626 * @handler: callback 627 * 628 * To be called in process context. 629 * 630 * When fw_core_remove_address_handler() returns, @handler->callback() is 631 * guaranteed to not run on any CPU anymore. 632 */ 633 void fw_core_remove_address_handler(struct fw_address_handler *handler) 634 { 635 spin_lock(&address_handler_list_lock); 636 list_del_rcu(&handler->link); 637 spin_unlock(&address_handler_list_lock); 638 synchronize_rcu(); 639 } 640 EXPORT_SYMBOL(fw_core_remove_address_handler); 641 642 struct fw_request { 643 struct kref kref; 644 struct fw_packet response; 645 u32 request_header[4]; 646 int ack; 647 u32 timestamp; 648 u32 length; 649 u32 data[]; 650 }; 651 652 void fw_request_get(struct fw_request *request) 653 { 654 kref_get(&request->kref); 655 } 656 657 static void release_request(struct kref *kref) 658 { 659 struct fw_request *request = container_of(kref, struct fw_request, kref); 660 661 kfree(request); 662 } 663 664 void fw_request_put(struct fw_request *request) 665 { 666 kref_put(&request->kref, release_request); 667 } 668 669 static void free_response_callback(struct fw_packet *packet, 670 struct fw_card *card, int status) 671 { 672 struct fw_request *request = container_of(packet, struct fw_request, response); 673 674 // Decrease the reference count since not at in-flight. 675 fw_request_put(request); 676 677 // Decrease the reference count to release the object. 678 fw_request_put(request); 679 } 680 681 int fw_get_response_length(struct fw_request *r) 682 { 683 int tcode, ext_tcode, data_length; 684 685 tcode = HEADER_GET_TCODE(r->request_header[0]); 686 687 switch (tcode) { 688 case TCODE_WRITE_QUADLET_REQUEST: 689 case TCODE_WRITE_BLOCK_REQUEST: 690 return 0; 691 692 case TCODE_READ_QUADLET_REQUEST: 693 return 4; 694 695 case TCODE_READ_BLOCK_REQUEST: 696 data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); 697 return data_length; 698 699 case TCODE_LOCK_REQUEST: 700 ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]); 701 data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]); 702 switch (ext_tcode) { 703 case EXTCODE_FETCH_ADD: 704 case EXTCODE_LITTLE_ADD: 705 return data_length; 706 default: 707 return data_length / 2; 708 } 709 710 default: 711 WARN(1, "wrong tcode %d\n", tcode); 712 return 0; 713 } 714 } 715 716 void fw_fill_response(struct fw_packet *response, u32 *request_header, 717 int rcode, void *payload, size_t length) 718 { 719 int tcode, tlabel, extended_tcode, source, destination; 720 721 tcode = HEADER_GET_TCODE(request_header[0]); 722 tlabel = HEADER_GET_TLABEL(request_header[0]); 723 source = HEADER_GET_DESTINATION(request_header[0]); 724 destination = HEADER_GET_SOURCE(request_header[1]); 725 extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]); 726 727 response->header[0] = 728 HEADER_RETRY(RETRY_1) | 729 HEADER_TLABEL(tlabel) | 730 HEADER_DESTINATION(destination); 731 response->header[1] = 732 HEADER_SOURCE(source) | 733 HEADER_RCODE(rcode); 734 response->header[2] = 0; 735 736 switch (tcode) { 737 case TCODE_WRITE_QUADLET_REQUEST: 738 case TCODE_WRITE_BLOCK_REQUEST: 739 response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE); 740 response->header_length = 12; 741 response->payload_length = 0; 742 break; 743 744 case TCODE_READ_QUADLET_REQUEST: 745 response->header[0] |= 746 HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE); 747 if (payload != NULL) 748 response->header[3] = *(u32 *)payload; 749 else 750 response->header[3] = 0; 751 response->header_length = 16; 752 response->payload_length = 0; 753 break; 754 755 case TCODE_READ_BLOCK_REQUEST: 756 case TCODE_LOCK_REQUEST: 757 response->header[0] |= HEADER_TCODE(tcode + 2); 758 response->header[3] = 759 HEADER_DATA_LENGTH(length) | 760 HEADER_EXTENDED_TCODE(extended_tcode); 761 response->header_length = 16; 762 response->payload = payload; 763 response->payload_length = length; 764 break; 765 766 default: 767 WARN(1, "wrong tcode %d\n", tcode); 768 } 769 770 response->payload_mapped = false; 771 } 772 EXPORT_SYMBOL(fw_fill_response); 773 774 static u32 compute_split_timeout_timestamp(struct fw_card *card, 775 u32 request_timestamp) 776 { 777 unsigned int cycles; 778 u32 timestamp; 779 780 cycles = card->split_timeout_cycles; 781 cycles += request_timestamp & 0x1fff; 782 783 timestamp = request_timestamp & ~0x1fff; 784 timestamp += (cycles / 8000) << 13; 785 timestamp |= cycles % 8000; 786 787 return timestamp; 788 } 789 790 static struct fw_request *allocate_request(struct fw_card *card, 791 struct fw_packet *p) 792 { 793 struct fw_request *request; 794 u32 *data, length; 795 int request_tcode; 796 797 request_tcode = HEADER_GET_TCODE(p->header[0]); 798 switch (request_tcode) { 799 case TCODE_WRITE_QUADLET_REQUEST: 800 data = &p->header[3]; 801 length = 4; 802 break; 803 804 case TCODE_WRITE_BLOCK_REQUEST: 805 case TCODE_LOCK_REQUEST: 806 data = p->payload; 807 length = HEADER_GET_DATA_LENGTH(p->header[3]); 808 break; 809 810 case TCODE_READ_QUADLET_REQUEST: 811 data = NULL; 812 length = 4; 813 break; 814 815 case TCODE_READ_BLOCK_REQUEST: 816 data = NULL; 817 length = HEADER_GET_DATA_LENGTH(p->header[3]); 818 break; 819 820 default: 821 fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n", 822 p->header[0], p->header[1], p->header[2]); 823 return NULL; 824 } 825 826 request = kmalloc(sizeof(*request) + length, GFP_ATOMIC); 827 if (request == NULL) 828 return NULL; 829 kref_init(&request->kref); 830 831 request->response.speed = p->speed; 832 request->response.timestamp = 833 compute_split_timeout_timestamp(card, p->timestamp); 834 request->response.generation = p->generation; 835 request->response.ack = 0; 836 request->response.callback = free_response_callback; 837 request->ack = p->ack; 838 request->timestamp = p->timestamp; 839 request->length = length; 840 if (data) 841 memcpy(request->data, data, length); 842 843 memcpy(request->request_header, p->header, sizeof(p->header)); 844 845 return request; 846 } 847 848 /** 849 * fw_send_response: - send response packet for asynchronous transaction. 850 * @card: interface to send the response at. 851 * @request: firewire request data for the transaction. 852 * @rcode: response code to send. 853 * 854 * Submit a response packet into the asynchronous response transmission queue. The @request 855 * is going to be released when the transmission successfully finishes later. 856 */ 857 void fw_send_response(struct fw_card *card, 858 struct fw_request *request, int rcode) 859 { 860 /* unified transaction or broadcast transaction: don't respond */ 861 if (request->ack != ACK_PENDING || 862 HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) { 863 fw_request_put(request); 864 return; 865 } 866 867 if (rcode == RCODE_COMPLETE) 868 fw_fill_response(&request->response, request->request_header, 869 rcode, request->data, 870 fw_get_response_length(request)); 871 else 872 fw_fill_response(&request->response, request->request_header, 873 rcode, NULL, 0); 874 875 // Increase the reference count so that the object is kept during in-flight. 876 fw_request_get(request); 877 878 card->driver->send_response(card, &request->response); 879 } 880 EXPORT_SYMBOL(fw_send_response); 881 882 /** 883 * fw_get_request_speed() - returns speed at which the @request was received 884 * @request: firewire request data 885 */ 886 int fw_get_request_speed(struct fw_request *request) 887 { 888 return request->response.speed; 889 } 890 EXPORT_SYMBOL(fw_get_request_speed); 891 892 /** 893 * fw_request_get_timestamp: Get timestamp of the request. 894 * @request: The opaque pointer to request structure. 895 * 896 * Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The 897 * timestamp consists of the low order 3 bits of second field and the full 13 bits of count 898 * field of isochronous cycle time register. 899 * 900 * Returns: timestamp of the request. 901 */ 902 u32 fw_request_get_timestamp(const struct fw_request *request) 903 { 904 return request->timestamp; 905 } 906 EXPORT_SYMBOL_GPL(fw_request_get_timestamp); 907 908 static void handle_exclusive_region_request(struct fw_card *card, 909 struct fw_packet *p, 910 struct fw_request *request, 911 unsigned long long offset) 912 { 913 struct fw_address_handler *handler; 914 int tcode, destination, source; 915 916 destination = HEADER_GET_DESTINATION(p->header[0]); 917 source = HEADER_GET_SOURCE(p->header[1]); 918 tcode = HEADER_GET_TCODE(p->header[0]); 919 if (tcode == TCODE_LOCK_REQUEST) 920 tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]); 921 922 rcu_read_lock(); 923 handler = lookup_enclosing_address_handler(&address_handler_list, 924 offset, request->length); 925 if (handler) 926 handler->address_callback(card, request, 927 tcode, destination, source, 928 p->generation, offset, 929 request->data, request->length, 930 handler->callback_data); 931 rcu_read_unlock(); 932 933 if (!handler) 934 fw_send_response(card, request, RCODE_ADDRESS_ERROR); 935 } 936 937 static void handle_fcp_region_request(struct fw_card *card, 938 struct fw_packet *p, 939 struct fw_request *request, 940 unsigned long long offset) 941 { 942 struct fw_address_handler *handler; 943 int tcode, destination, source; 944 945 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) && 946 offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) || 947 request->length > 0x200) { 948 fw_send_response(card, request, RCODE_ADDRESS_ERROR); 949 950 return; 951 } 952 953 tcode = HEADER_GET_TCODE(p->header[0]); 954 destination = HEADER_GET_DESTINATION(p->header[0]); 955 source = HEADER_GET_SOURCE(p->header[1]); 956 957 if (tcode != TCODE_WRITE_QUADLET_REQUEST && 958 tcode != TCODE_WRITE_BLOCK_REQUEST) { 959 fw_send_response(card, request, RCODE_TYPE_ERROR); 960 961 return; 962 } 963 964 rcu_read_lock(); 965 list_for_each_entry_rcu(handler, &address_handler_list, link) { 966 if (is_enclosing_handler(handler, offset, request->length)) 967 handler->address_callback(card, request, tcode, 968 destination, source, 969 p->generation, offset, 970 request->data, 971 request->length, 972 handler->callback_data); 973 } 974 rcu_read_unlock(); 975 976 fw_send_response(card, request, RCODE_COMPLETE); 977 } 978 979 void fw_core_handle_request(struct fw_card *card, struct fw_packet *p) 980 { 981 struct fw_request *request; 982 unsigned long long offset; 983 984 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE) 985 return; 986 987 if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) { 988 fw_cdev_handle_phy_packet(card, p); 989 return; 990 } 991 992 request = allocate_request(card, p); 993 if (request == NULL) { 994 /* FIXME: send statically allocated busy packet. */ 995 return; 996 } 997 998 offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) | 999 p->header[2]; 1000 1001 if (!is_in_fcp_region(offset, request->length)) 1002 handle_exclusive_region_request(card, p, request, offset); 1003 else 1004 handle_fcp_region_request(card, p, request, offset); 1005 1006 } 1007 EXPORT_SYMBOL(fw_core_handle_request); 1008 1009 void fw_core_handle_response(struct fw_card *card, struct fw_packet *p) 1010 { 1011 struct fw_transaction *t = NULL, *iter; 1012 unsigned long flags; 1013 u32 *data; 1014 size_t data_length; 1015 int tcode, tlabel, source, rcode; 1016 1017 tcode = HEADER_GET_TCODE(p->header[0]); 1018 tlabel = HEADER_GET_TLABEL(p->header[0]); 1019 source = HEADER_GET_SOURCE(p->header[1]); 1020 rcode = HEADER_GET_RCODE(p->header[1]); 1021 1022 spin_lock_irqsave(&card->lock, flags); 1023 list_for_each_entry(iter, &card->transaction_list, link) { 1024 if (iter->node_id == source && iter->tlabel == tlabel) { 1025 if (!try_cancel_split_timeout(iter)) { 1026 spin_unlock_irqrestore(&card->lock, flags); 1027 goto timed_out; 1028 } 1029 list_del_init(&iter->link); 1030 card->tlabel_mask &= ~(1ULL << iter->tlabel); 1031 t = iter; 1032 break; 1033 } 1034 } 1035 spin_unlock_irqrestore(&card->lock, flags); 1036 1037 if (!t) { 1038 timed_out: 1039 fw_notice(card, "unsolicited response (source %x, tlabel %x)\n", 1040 source, tlabel); 1041 return; 1042 } 1043 1044 /* 1045 * FIXME: sanity check packet, is length correct, does tcodes 1046 * and addresses match. 1047 */ 1048 1049 switch (tcode) { 1050 case TCODE_READ_QUADLET_RESPONSE: 1051 data = (u32 *) &p->header[3]; 1052 data_length = 4; 1053 break; 1054 1055 case TCODE_WRITE_RESPONSE: 1056 data = NULL; 1057 data_length = 0; 1058 break; 1059 1060 case TCODE_READ_BLOCK_RESPONSE: 1061 case TCODE_LOCK_RESPONSE: 1062 data = p->payload; 1063 data_length = HEADER_GET_DATA_LENGTH(p->header[3]); 1064 break; 1065 1066 default: 1067 /* Should never happen, this is just to shut up gcc. */ 1068 data = NULL; 1069 data_length = 0; 1070 break; 1071 } 1072 1073 /* 1074 * The response handler may be executed while the request handler 1075 * is still pending. Cancel the request handler. 1076 */ 1077 card->driver->cancel_packet(card, &t->packet); 1078 1079 if (!t->with_tstamp) { 1080 t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data); 1081 } else { 1082 t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data, 1083 data_length, t->callback_data); 1084 } 1085 } 1086 EXPORT_SYMBOL(fw_core_handle_response); 1087 1088 /** 1089 * fw_rcode_string - convert a firewire result code to an error description 1090 * @rcode: the result code 1091 */ 1092 const char *fw_rcode_string(int rcode) 1093 { 1094 static const char *const names[] = { 1095 [RCODE_COMPLETE] = "no error", 1096 [RCODE_CONFLICT_ERROR] = "conflict error", 1097 [RCODE_DATA_ERROR] = "data error", 1098 [RCODE_TYPE_ERROR] = "type error", 1099 [RCODE_ADDRESS_ERROR] = "address error", 1100 [RCODE_SEND_ERROR] = "send error", 1101 [RCODE_CANCELLED] = "timeout", 1102 [RCODE_BUSY] = "busy", 1103 [RCODE_GENERATION] = "bus reset", 1104 [RCODE_NO_ACK] = "no ack", 1105 }; 1106 1107 if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode]) 1108 return names[rcode]; 1109 else 1110 return "unknown"; 1111 } 1112 EXPORT_SYMBOL(fw_rcode_string); 1113 1114 static const struct fw_address_region topology_map_region = 1115 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP, 1116 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, }; 1117 1118 static void handle_topology_map(struct fw_card *card, struct fw_request *request, 1119 int tcode, int destination, int source, int generation, 1120 unsigned long long offset, void *payload, size_t length, 1121 void *callback_data) 1122 { 1123 int start; 1124 1125 if (!TCODE_IS_READ_REQUEST(tcode)) { 1126 fw_send_response(card, request, RCODE_TYPE_ERROR); 1127 return; 1128 } 1129 1130 if ((offset & 3) > 0 || (length & 3) > 0) { 1131 fw_send_response(card, request, RCODE_ADDRESS_ERROR); 1132 return; 1133 } 1134 1135 start = (offset - topology_map_region.start) / 4; 1136 memcpy(payload, &card->topology_map[start], length); 1137 1138 fw_send_response(card, request, RCODE_COMPLETE); 1139 } 1140 1141 static struct fw_address_handler topology_map = { 1142 .length = 0x400, 1143 .address_callback = handle_topology_map, 1144 }; 1145 1146 static const struct fw_address_region registers_region = 1147 { .start = CSR_REGISTER_BASE, 1148 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, }; 1149 1150 static void update_split_timeout(struct fw_card *card) 1151 { 1152 unsigned int cycles; 1153 1154 cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19); 1155 1156 /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */ 1157 cycles = clamp(cycles, 800u, 3u * 8000u); 1158 1159 card->split_timeout_cycles = cycles; 1160 card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000); 1161 } 1162 1163 static void handle_registers(struct fw_card *card, struct fw_request *request, 1164 int tcode, int destination, int source, int generation, 1165 unsigned long long offset, void *payload, size_t length, 1166 void *callback_data) 1167 { 1168 int reg = offset & ~CSR_REGISTER_BASE; 1169 __be32 *data = payload; 1170 int rcode = RCODE_COMPLETE; 1171 unsigned long flags; 1172 1173 switch (reg) { 1174 case CSR_PRIORITY_BUDGET: 1175 if (!card->priority_budget_implemented) { 1176 rcode = RCODE_ADDRESS_ERROR; 1177 break; 1178 } 1179 fallthrough; 1180 1181 case CSR_NODE_IDS: 1182 /* 1183 * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8 1184 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges 1185 */ 1186 fallthrough; 1187 1188 case CSR_STATE_CLEAR: 1189 case CSR_STATE_SET: 1190 case CSR_CYCLE_TIME: 1191 case CSR_BUS_TIME: 1192 case CSR_BUSY_TIMEOUT: 1193 if (tcode == TCODE_READ_QUADLET_REQUEST) 1194 *data = cpu_to_be32(card->driver->read_csr(card, reg)); 1195 else if (tcode == TCODE_WRITE_QUADLET_REQUEST) 1196 card->driver->write_csr(card, reg, be32_to_cpu(*data)); 1197 else 1198 rcode = RCODE_TYPE_ERROR; 1199 break; 1200 1201 case CSR_RESET_START: 1202 if (tcode == TCODE_WRITE_QUADLET_REQUEST) 1203 card->driver->write_csr(card, CSR_STATE_CLEAR, 1204 CSR_STATE_BIT_ABDICATE); 1205 else 1206 rcode = RCODE_TYPE_ERROR; 1207 break; 1208 1209 case CSR_SPLIT_TIMEOUT_HI: 1210 if (tcode == TCODE_READ_QUADLET_REQUEST) { 1211 *data = cpu_to_be32(card->split_timeout_hi); 1212 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { 1213 spin_lock_irqsave(&card->lock, flags); 1214 card->split_timeout_hi = be32_to_cpu(*data) & 7; 1215 update_split_timeout(card); 1216 spin_unlock_irqrestore(&card->lock, flags); 1217 } else { 1218 rcode = RCODE_TYPE_ERROR; 1219 } 1220 break; 1221 1222 case CSR_SPLIT_TIMEOUT_LO: 1223 if (tcode == TCODE_READ_QUADLET_REQUEST) { 1224 *data = cpu_to_be32(card->split_timeout_lo); 1225 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) { 1226 spin_lock_irqsave(&card->lock, flags); 1227 card->split_timeout_lo = 1228 be32_to_cpu(*data) & 0xfff80000; 1229 update_split_timeout(card); 1230 spin_unlock_irqrestore(&card->lock, flags); 1231 } else { 1232 rcode = RCODE_TYPE_ERROR; 1233 } 1234 break; 1235 1236 case CSR_MAINT_UTILITY: 1237 if (tcode == TCODE_READ_QUADLET_REQUEST) 1238 *data = card->maint_utility_register; 1239 else if (tcode == TCODE_WRITE_QUADLET_REQUEST) 1240 card->maint_utility_register = *data; 1241 else 1242 rcode = RCODE_TYPE_ERROR; 1243 break; 1244 1245 case CSR_BROADCAST_CHANNEL: 1246 if (tcode == TCODE_READ_QUADLET_REQUEST) 1247 *data = cpu_to_be32(card->broadcast_channel); 1248 else if (tcode == TCODE_WRITE_QUADLET_REQUEST) 1249 card->broadcast_channel = 1250 (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) | 1251 BROADCAST_CHANNEL_INITIAL; 1252 else 1253 rcode = RCODE_TYPE_ERROR; 1254 break; 1255 1256 case CSR_BUS_MANAGER_ID: 1257 case CSR_BANDWIDTH_AVAILABLE: 1258 case CSR_CHANNELS_AVAILABLE_HI: 1259 case CSR_CHANNELS_AVAILABLE_LO: 1260 /* 1261 * FIXME: these are handled by the OHCI hardware and 1262 * the stack never sees these request. If we add 1263 * support for a new type of controller that doesn't 1264 * handle this in hardware we need to deal with these 1265 * transactions. 1266 */ 1267 BUG(); 1268 break; 1269 1270 default: 1271 rcode = RCODE_ADDRESS_ERROR; 1272 break; 1273 } 1274 1275 fw_send_response(card, request, rcode); 1276 } 1277 1278 static struct fw_address_handler registers = { 1279 .length = 0x400, 1280 .address_callback = handle_registers, 1281 }; 1282 1283 static void handle_low_memory(struct fw_card *card, struct fw_request *request, 1284 int tcode, int destination, int source, int generation, 1285 unsigned long long offset, void *payload, size_t length, 1286 void *callback_data) 1287 { 1288 /* 1289 * This catches requests not handled by the physical DMA unit, 1290 * i.e., wrong transaction types or unauthorized source nodes. 1291 */ 1292 fw_send_response(card, request, RCODE_TYPE_ERROR); 1293 } 1294 1295 static struct fw_address_handler low_memory = { 1296 .length = FW_MAX_PHYSICAL_RANGE, 1297 .address_callback = handle_low_memory, 1298 }; 1299 1300 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 1301 MODULE_DESCRIPTION("Core IEEE1394 transaction logic"); 1302 MODULE_LICENSE("GPL"); 1303 1304 static const u32 vendor_textual_descriptor[] = { 1305 /* textual descriptor leaf () */ 1306 0x00060000, 1307 0x00000000, 1308 0x00000000, 1309 0x4c696e75, /* L i n u */ 1310 0x78204669, /* x F i */ 1311 0x72657769, /* r e w i */ 1312 0x72650000, /* r e */ 1313 }; 1314 1315 static const u32 model_textual_descriptor[] = { 1316 /* model descriptor leaf () */ 1317 0x00030000, 1318 0x00000000, 1319 0x00000000, 1320 0x4a756a75, /* J u j u */ 1321 }; 1322 1323 static struct fw_descriptor vendor_id_descriptor = { 1324 .length = ARRAY_SIZE(vendor_textual_descriptor), 1325 .immediate = 0x03001f11, 1326 .key = 0x81000000, 1327 .data = vendor_textual_descriptor, 1328 }; 1329 1330 static struct fw_descriptor model_id_descriptor = { 1331 .length = ARRAY_SIZE(model_textual_descriptor), 1332 .immediate = 0x17023901, 1333 .key = 0x81000000, 1334 .data = model_textual_descriptor, 1335 }; 1336 1337 static int __init fw_core_init(void) 1338 { 1339 int ret; 1340 1341 fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0); 1342 if (!fw_workqueue) 1343 return -ENOMEM; 1344 1345 ret = bus_register(&fw_bus_type); 1346 if (ret < 0) { 1347 destroy_workqueue(fw_workqueue); 1348 return ret; 1349 } 1350 1351 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops); 1352 if (fw_cdev_major < 0) { 1353 bus_unregister(&fw_bus_type); 1354 destroy_workqueue(fw_workqueue); 1355 return fw_cdev_major; 1356 } 1357 1358 fw_core_add_address_handler(&topology_map, &topology_map_region); 1359 fw_core_add_address_handler(®isters, ®isters_region); 1360 fw_core_add_address_handler(&low_memory, &low_memory_region); 1361 fw_core_add_descriptor(&vendor_id_descriptor); 1362 fw_core_add_descriptor(&model_id_descriptor); 1363 1364 return 0; 1365 } 1366 1367 static void __exit fw_core_cleanup(void) 1368 { 1369 unregister_chrdev(fw_cdev_major, "firewire"); 1370 bus_unregister(&fw_bus_type); 1371 destroy_workqueue(fw_workqueue); 1372 idr_destroy(&fw_device_idr); 1373 } 1374 1375 module_init(fw_core_init); 1376 module_exit(fw_core_cleanup); 1377