1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Driver for OHCI 1394 controllers 4 * 5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net> 6 */ 7 8 #include <linux/bitops.h> 9 #include <linux/bug.h> 10 #include <linux/compiler.h> 11 #include <linux/delay.h> 12 #include <linux/device.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/firewire.h> 15 #include <linux/firewire-constants.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/module.h> 23 #include <linux/moduleparam.h> 24 #include <linux/mutex.h> 25 #include <linux/pci.h> 26 #include <linux/pci_ids.h> 27 #include <linux/slab.h> 28 #include <linux/spinlock.h> 29 #include <linux/string.h> 30 #include <linux/time.h> 31 #include <linux/vmalloc.h> 32 #include <linux/workqueue.h> 33 34 #include <asm/byteorder.h> 35 #include <asm/page.h> 36 37 #ifdef CONFIG_PPC_PMAC 38 #include <asm/pmac_feature.h> 39 #endif 40 41 #include "core.h" 42 #include "ohci.h" 43 44 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) 45 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) 46 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args) 47 48 #define DESCRIPTOR_OUTPUT_MORE 0 49 #define DESCRIPTOR_OUTPUT_LAST (1 << 12) 50 #define DESCRIPTOR_INPUT_MORE (2 << 12) 51 #define DESCRIPTOR_INPUT_LAST (3 << 12) 52 #define DESCRIPTOR_STATUS (1 << 11) 53 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8) 54 #define DESCRIPTOR_PING (1 << 7) 55 #define DESCRIPTOR_YY (1 << 6) 56 #define DESCRIPTOR_NO_IRQ (0 << 4) 57 #define DESCRIPTOR_IRQ_ERROR (1 << 4) 58 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4) 59 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) 60 #define DESCRIPTOR_WAIT (3 << 0) 61 62 #define DESCRIPTOR_CMD (0xf << 12) 63 64 struct descriptor { 65 __le16 req_count; 66 __le16 control; 67 __le32 data_address; 68 __le32 branch_address; 69 __le16 res_count; 70 __le16 transfer_status; 71 } __attribute__((aligned(16))); 72 73 #define CONTROL_SET(regs) (regs) 74 #define CONTROL_CLEAR(regs) ((regs) + 4) 75 #define COMMAND_PTR(regs) ((regs) + 12) 76 #define CONTEXT_MATCH(regs) ((regs) + 16) 77 78 #define AR_BUFFER_SIZE (32*1024) 79 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE) 80 /* we need at least two pages for proper list management */ 81 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2) 82 83 #define MAX_ASYNC_PAYLOAD 4096 84 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4) 85 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE) 86 87 struct ar_context { 88 struct fw_ohci *ohci; 89 struct page *pages[AR_BUFFERS]; 90 void *buffer; 91 struct descriptor *descriptors; 92 dma_addr_t descriptors_bus; 93 void *pointer; 94 unsigned int last_buffer_index; 95 u32 regs; 96 struct tasklet_struct tasklet; 97 }; 98 99 struct context; 100 101 typedef int (*descriptor_callback_t)(struct context *ctx, 102 struct descriptor *d, 103 struct descriptor *last); 104 105 /* 106 * A buffer that contains a block of DMA-able coherent memory used for 107 * storing a portion of a DMA descriptor program. 108 */ 109 struct descriptor_buffer { 110 struct list_head list; 111 dma_addr_t buffer_bus; 112 size_t buffer_size; 113 size_t used; 114 struct descriptor buffer[0]; 115 }; 116 117 struct context { 118 struct fw_ohci *ohci; 119 u32 regs; 120 int total_allocation; 121 u32 current_bus; 122 bool running; 123 bool flushing; 124 125 /* 126 * List of page-sized buffers for storing DMA descriptors. 127 * Head of list contains buffers in use and tail of list contains 128 * free buffers. 129 */ 130 struct list_head buffer_list; 131 132 /* 133 * Pointer to a buffer inside buffer_list that contains the tail 134 * end of the current DMA program. 135 */ 136 struct descriptor_buffer *buffer_tail; 137 138 /* 139 * The descriptor containing the branch address of the first 140 * descriptor that has not yet been filled by the device. 141 */ 142 struct descriptor *last; 143 144 /* 145 * The last descriptor block in the DMA program. It contains the branch 146 * address that must be updated upon appending a new descriptor. 147 */ 148 struct descriptor *prev; 149 int prev_z; 150 151 descriptor_callback_t callback; 152 153 struct tasklet_struct tasklet; 154 }; 155 156 #define IT_HEADER_SY(v) ((v) << 0) 157 #define IT_HEADER_TCODE(v) ((v) << 4) 158 #define IT_HEADER_CHANNEL(v) ((v) << 8) 159 #define IT_HEADER_TAG(v) ((v) << 14) 160 #define IT_HEADER_SPEED(v) ((v) << 16) 161 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16) 162 163 struct iso_context { 164 struct fw_iso_context base; 165 struct context context; 166 void *header; 167 size_t header_length; 168 unsigned long flushing_completions; 169 u32 mc_buffer_bus; 170 u16 mc_completed; 171 u16 last_timestamp; 172 u8 sync; 173 u8 tags; 174 }; 175 176 #define CONFIG_ROM_SIZE 1024 177 178 struct fw_ohci { 179 struct fw_card card; 180 181 __iomem char *registers; 182 int node_id; 183 int generation; 184 int request_generation; /* for timestamping incoming requests */ 185 unsigned quirks; 186 unsigned int pri_req_max; 187 u32 bus_time; 188 bool bus_time_running; 189 bool is_root; 190 bool csr_state_setclear_abdicate; 191 int n_ir; 192 int n_it; 193 /* 194 * Spinlock for accessing fw_ohci data. Never call out of 195 * this driver with this lock held. 196 */ 197 spinlock_t lock; 198 199 struct mutex phy_reg_mutex; 200 201 void *misc_buffer; 202 dma_addr_t misc_buffer_bus; 203 204 struct ar_context ar_request_ctx; 205 struct ar_context ar_response_ctx; 206 struct context at_request_ctx; 207 struct context at_response_ctx; 208 209 u32 it_context_support; 210 u32 it_context_mask; /* unoccupied IT contexts */ 211 struct iso_context *it_context_list; 212 u64 ir_context_channels; /* unoccupied channels */ 213 u32 ir_context_support; 214 u32 ir_context_mask; /* unoccupied IR contexts */ 215 struct iso_context *ir_context_list; 216 u64 mc_channels; /* channels in use by the multichannel IR context */ 217 bool mc_allocated; 218 219 __be32 *config_rom; 220 dma_addr_t config_rom_bus; 221 __be32 *next_config_rom; 222 dma_addr_t next_config_rom_bus; 223 __be32 next_header; 224 225 __le32 *self_id; 226 dma_addr_t self_id_bus; 227 struct work_struct bus_reset_work; 228 229 u32 self_id_buffer[512]; 230 }; 231 232 static struct workqueue_struct *selfid_workqueue; 233 234 static inline struct fw_ohci *fw_ohci(struct fw_card *card) 235 { 236 return container_of(card, struct fw_ohci, card); 237 } 238 239 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000 240 #define IR_CONTEXT_BUFFER_FILL 0x80000000 241 #define IR_CONTEXT_ISOCH_HEADER 0x40000000 242 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000 243 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000 244 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000 245 246 #define CONTEXT_RUN 0x8000 247 #define CONTEXT_WAKE 0x1000 248 #define CONTEXT_DEAD 0x0800 249 #define CONTEXT_ACTIVE 0x0400 250 251 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf 252 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 253 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 254 255 #define OHCI1394_REGISTER_SIZE 0x800 256 #define OHCI1394_PCI_HCI_Control 0x40 257 #define SELF_ID_BUF_SIZE 0x800 258 #define OHCI_TCODE_PHY_PACKET 0x0e 259 #define OHCI_VERSION_1_1 0x010010 260 261 static char ohci_driver_name[] = KBUILD_MODNAME; 262 263 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd 264 #define PCI_DEVICE_ID_AGERE_FW643 0x5901 265 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001 266 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380 267 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 268 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 269 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025 270 #define PCI_DEVICE_ID_VIA_VT630X 0x3044 271 #define PCI_REV_ID_VIA_VT6306 0x46 272 #define PCI_DEVICE_ID_VIA_VT6315 0x3403 273 274 #define QUIRK_CYCLE_TIMER 0x1 275 #define QUIRK_RESET_PACKET 0x2 276 #define QUIRK_BE_HEADERS 0x4 277 #define QUIRK_NO_1394A 0x8 278 #define QUIRK_NO_MSI 0x10 279 #define QUIRK_TI_SLLZ059 0x20 280 #define QUIRK_IR_WAKE 0x40 281 282 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ 283 static const struct { 284 unsigned short vendor, device, revision, flags; 285 } ohci_quirks[] = { 286 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID, 287 QUIRK_CYCLE_TIMER}, 288 289 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID, 290 QUIRK_BE_HEADERS}, 291 292 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, 293 QUIRK_NO_MSI}, 294 295 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, 296 QUIRK_RESET_PACKET}, 297 298 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID, 299 QUIRK_NO_MSI}, 300 301 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID, 302 QUIRK_CYCLE_TIMER}, 303 304 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID, 305 QUIRK_NO_MSI}, 306 307 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID, 308 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 309 310 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID, 311 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A}, 312 313 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID, 314 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, 315 316 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID, 317 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059}, 318 319 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, 320 QUIRK_RESET_PACKET}, 321 322 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306, 323 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, 324 325 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0, 326 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI}, 327 328 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID, 329 QUIRK_NO_MSI}, 330 331 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, 332 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, 333 }; 334 335 /* This overrides anything that was found in ohci_quirks[]. */ 336 static int param_quirks; 337 module_param_named(quirks, param_quirks, int, 0644); 338 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" 339 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER) 340 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET) 341 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS) 342 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) 343 ", disable MSI = " __stringify(QUIRK_NO_MSI) 344 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) 345 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) 346 ")"); 347 348 #define OHCI_PARAM_DEBUG_AT_AR 1 349 #define OHCI_PARAM_DEBUG_SELFIDS 2 350 #define OHCI_PARAM_DEBUG_IRQS 4 351 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */ 352 353 static int param_debug; 354 module_param_named(debug, param_debug, int, 0644); 355 MODULE_PARM_DESC(debug, "Verbose logging (default = 0" 356 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR) 357 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS) 358 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS) 359 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS) 360 ", or a combination, or all = -1)"); 361 362 static bool param_remote_dma; 363 module_param_named(remote_dma, param_remote_dma, bool, 0444); 364 MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)"); 365 366 static void log_irqs(struct fw_ohci *ohci, u32 evt) 367 { 368 if (likely(!(param_debug & 369 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS)))) 370 return; 371 372 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) && 373 !(evt & OHCI1394_busReset)) 374 return; 375 376 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, 377 evt & OHCI1394_selfIDComplete ? " selfID" : "", 378 evt & OHCI1394_RQPkt ? " AR_req" : "", 379 evt & OHCI1394_RSPkt ? " AR_resp" : "", 380 evt & OHCI1394_reqTxComplete ? " AT_req" : "", 381 evt & OHCI1394_respTxComplete ? " AT_resp" : "", 382 evt & OHCI1394_isochRx ? " IR" : "", 383 evt & OHCI1394_isochTx ? " IT" : "", 384 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "", 385 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "", 386 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "", 387 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "", 388 evt & OHCI1394_regAccessFail ? " regAccessFail" : "", 389 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "", 390 evt & OHCI1394_busReset ? " busReset" : "", 391 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt | 392 OHCI1394_RSPkt | OHCI1394_reqTxComplete | 393 OHCI1394_respTxComplete | OHCI1394_isochRx | 394 OHCI1394_isochTx | OHCI1394_postedWriteErr | 395 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds | 396 OHCI1394_cycleInconsistent | 397 OHCI1394_regAccessFail | OHCI1394_busReset) 398 ? " ?" : ""); 399 } 400 401 static const char *speed[] = { 402 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta", 403 }; 404 static const char *power[] = { 405 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W", 406 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W", 407 }; 408 static const char port[] = { '.', '-', 'p', 'c', }; 409 410 static char _p(u32 *s, int shift) 411 { 412 return port[*s >> shift & 3]; 413 } 414 415 static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) 416 { 417 u32 *s; 418 419 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) 420 return; 421 422 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n", 423 self_id_count, generation, ohci->node_id); 424 425 for (s = ohci->self_id_buffer; self_id_count--; ++s) 426 if ((*s & 1 << 23) == 0) 427 ohci_notice(ohci, 428 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n", 429 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), 430 speed[*s >> 14 & 3], *s >> 16 & 63, 431 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", 432 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); 433 else 434 ohci_notice(ohci, 435 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", 436 *s, *s >> 24 & 63, 437 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), 438 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); 439 } 440 441 static const char *evts[] = { 442 [0x00] = "evt_no_status", [0x01] = "-reserved-", 443 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack", 444 [0x04] = "evt_underrun", [0x05] = "evt_overrun", 445 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read", 446 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset", 447 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err", 448 [0x0c] = "-reserved-", [0x0d] = "-reserved-", 449 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed", 450 [0x10] = "-reserved-", [0x11] = "ack_complete", 451 [0x12] = "ack_pending ", [0x13] = "-reserved-", 452 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A", 453 [0x16] = "ack_busy_B", [0x17] = "-reserved-", 454 [0x18] = "-reserved-", [0x19] = "-reserved-", 455 [0x1a] = "-reserved-", [0x1b] = "ack_tardy", 456 [0x1c] = "-reserved-", [0x1d] = "ack_data_error", 457 [0x1e] = "ack_type_error", [0x1f] = "-reserved-", 458 [0x20] = "pending/cancelled", 459 }; 460 static const char *tcodes[] = { 461 [0x0] = "QW req", [0x1] = "BW req", 462 [0x2] = "W resp", [0x3] = "-reserved-", 463 [0x4] = "QR req", [0x5] = "BR req", 464 [0x6] = "QR resp", [0x7] = "BR resp", 465 [0x8] = "cycle start", [0x9] = "Lk req", 466 [0xa] = "async stream packet", [0xb] = "Lk resp", 467 [0xc] = "-reserved-", [0xd] = "-reserved-", 468 [0xe] = "link internal", [0xf] = "-reserved-", 469 }; 470 471 static void log_ar_at_event(struct fw_ohci *ohci, 472 char dir, int speed, u32 *header, int evt) 473 { 474 int tcode = header[0] >> 4 & 0xf; 475 char specific[12]; 476 477 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR))) 478 return; 479 480 if (unlikely(evt >= ARRAY_SIZE(evts))) 481 evt = 0x1f; 482 483 if (evt == OHCI1394_evt_bus_reset) { 484 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n", 485 dir, (header[2] >> 16) & 0xff); 486 return; 487 } 488 489 switch (tcode) { 490 case 0x0: case 0x6: case 0x8: 491 snprintf(specific, sizeof(specific), " = %08x", 492 be32_to_cpu((__force __be32)header[3])); 493 break; 494 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb: 495 snprintf(specific, sizeof(specific), " %x,%x", 496 header[3] >> 16, header[3] & 0xffff); 497 break; 498 default: 499 specific[0] = '\0'; 500 } 501 502 switch (tcode) { 503 case 0xa: 504 ohci_notice(ohci, "A%c %s, %s\n", 505 dir, evts[evt], tcodes[tcode]); 506 break; 507 case 0xe: 508 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", 509 dir, evts[evt], header[1], header[2]); 510 break; 511 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: 512 ohci_notice(ohci, 513 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", 514 dir, speed, header[0] >> 10 & 0x3f, 515 header[1] >> 16, header[0] >> 16, evts[evt], 516 tcodes[tcode], header[1] & 0xffff, header[2], specific); 517 break; 518 default: 519 ohci_notice(ohci, 520 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", 521 dir, speed, header[0] >> 10 & 0x3f, 522 header[1] >> 16, header[0] >> 16, evts[evt], 523 tcodes[tcode], specific); 524 } 525 } 526 527 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data) 528 { 529 writel(data, ohci->registers + offset); 530 } 531 532 static inline u32 reg_read(const struct fw_ohci *ohci, int offset) 533 { 534 return readl(ohci->registers + offset); 535 } 536 537 static inline void flush_writes(const struct fw_ohci *ohci) 538 { 539 /* Do a dummy read to flush writes. */ 540 reg_read(ohci, OHCI1394_Version); 541 } 542 543 /* 544 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and 545 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex. 546 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg() 547 * directly. Exceptions are intrinsically serialized contexts like pci_probe. 548 */ 549 static int read_phy_reg(struct fw_ohci *ohci, int addr) 550 { 551 u32 val; 552 int i; 553 554 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr)); 555 for (i = 0; i < 3 + 100; i++) { 556 val = reg_read(ohci, OHCI1394_PhyControl); 557 if (!~val) 558 return -ENODEV; /* Card was ejected. */ 559 560 if (val & OHCI1394_PhyControl_ReadDone) 561 return OHCI1394_PhyControl_ReadData(val); 562 563 /* 564 * Try a few times without waiting. Sleeping is necessary 565 * only when the link/PHY interface is busy. 566 */ 567 if (i >= 3) 568 msleep(1); 569 } 570 ohci_err(ohci, "failed to read phy reg %d\n", addr); 571 dump_stack(); 572 573 return -EBUSY; 574 } 575 576 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) 577 { 578 int i; 579 580 reg_write(ohci, OHCI1394_PhyControl, 581 OHCI1394_PhyControl_Write(addr, val)); 582 for (i = 0; i < 3 + 100; i++) { 583 val = reg_read(ohci, OHCI1394_PhyControl); 584 if (!~val) 585 return -ENODEV; /* Card was ejected. */ 586 587 if (!(val & OHCI1394_PhyControl_WritePending)) 588 return 0; 589 590 if (i >= 3) 591 msleep(1); 592 } 593 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val); 594 dump_stack(); 595 596 return -EBUSY; 597 } 598 599 static int update_phy_reg(struct fw_ohci *ohci, int addr, 600 int clear_bits, int set_bits) 601 { 602 int ret = read_phy_reg(ohci, addr); 603 if (ret < 0) 604 return ret; 605 606 /* 607 * The interrupt status bits are cleared by writing a one bit. 608 * Avoid clearing them unless explicitly requested in set_bits. 609 */ 610 if (addr == 5) 611 clear_bits |= PHY_INT_STATUS_BITS; 612 613 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits); 614 } 615 616 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr) 617 { 618 int ret; 619 620 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5); 621 if (ret < 0) 622 return ret; 623 624 return read_phy_reg(ohci, addr); 625 } 626 627 static int ohci_read_phy_reg(struct fw_card *card, int addr) 628 { 629 struct fw_ohci *ohci = fw_ohci(card); 630 int ret; 631 632 mutex_lock(&ohci->phy_reg_mutex); 633 ret = read_phy_reg(ohci, addr); 634 mutex_unlock(&ohci->phy_reg_mutex); 635 636 return ret; 637 } 638 639 static int ohci_update_phy_reg(struct fw_card *card, int addr, 640 int clear_bits, int set_bits) 641 { 642 struct fw_ohci *ohci = fw_ohci(card); 643 int ret; 644 645 mutex_lock(&ohci->phy_reg_mutex); 646 ret = update_phy_reg(ohci, addr, clear_bits, set_bits); 647 mutex_unlock(&ohci->phy_reg_mutex); 648 649 return ret; 650 } 651 652 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) 653 { 654 return page_private(ctx->pages[i]); 655 } 656 657 static void ar_context_link_page(struct ar_context *ctx, unsigned int index) 658 { 659 struct descriptor *d; 660 661 d = &ctx->descriptors[index]; 662 d->branch_address &= cpu_to_le32(~0xf); 663 d->res_count = cpu_to_le16(PAGE_SIZE); 664 d->transfer_status = 0; 665 666 wmb(); /* finish init of new descriptors before branch_address update */ 667 d = &ctx->descriptors[ctx->last_buffer_index]; 668 d->branch_address |= cpu_to_le32(1); 669 670 ctx->last_buffer_index = index; 671 672 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 673 } 674 675 static void ar_context_release(struct ar_context *ctx) 676 { 677 unsigned int i; 678 679 vunmap(ctx->buffer); 680 681 for (i = 0; i < AR_BUFFERS; i++) 682 if (ctx->pages[i]) { 683 dma_unmap_page(ctx->ohci->card.device, 684 ar_buffer_bus(ctx, i), 685 PAGE_SIZE, DMA_FROM_DEVICE); 686 __free_page(ctx->pages[i]); 687 } 688 } 689 690 static void ar_context_abort(struct ar_context *ctx, const char *error_msg) 691 { 692 struct fw_ohci *ohci = ctx->ohci; 693 694 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { 695 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 696 flush_writes(ohci); 697 698 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg); 699 } 700 /* FIXME: restart? */ 701 } 702 703 static inline unsigned int ar_next_buffer_index(unsigned int index) 704 { 705 return (index + 1) % AR_BUFFERS; 706 } 707 708 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) 709 { 710 return ar_next_buffer_index(ctx->last_buffer_index); 711 } 712 713 /* 714 * We search for the buffer that contains the last AR packet DMA data written 715 * by the controller. 716 */ 717 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, 718 unsigned int *buffer_offset) 719 { 720 unsigned int i, next_i, last = ctx->last_buffer_index; 721 __le16 res_count, next_res_count; 722 723 i = ar_first_buffer_index(ctx); 724 res_count = READ_ONCE(ctx->descriptors[i].res_count); 725 726 /* A buffer that is not yet completely filled must be the last one. */ 727 while (i != last && res_count == 0) { 728 729 /* Peek at the next descriptor. */ 730 next_i = ar_next_buffer_index(i); 731 rmb(); /* read descriptors in order */ 732 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); 733 /* 734 * If the next descriptor is still empty, we must stop at this 735 * descriptor. 736 */ 737 if (next_res_count == cpu_to_le16(PAGE_SIZE)) { 738 /* 739 * The exception is when the DMA data for one packet is 740 * split over three buffers; in this case, the middle 741 * buffer's descriptor might be never updated by the 742 * controller and look still empty, and we have to peek 743 * at the third one. 744 */ 745 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { 746 next_i = ar_next_buffer_index(next_i); 747 rmb(); 748 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); 749 if (next_res_count != cpu_to_le16(PAGE_SIZE)) 750 goto next_buffer_is_active; 751 } 752 753 break; 754 } 755 756 next_buffer_is_active: 757 i = next_i; 758 res_count = next_res_count; 759 } 760 761 rmb(); /* read res_count before the DMA data */ 762 763 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); 764 if (*buffer_offset > PAGE_SIZE) { 765 *buffer_offset = 0; 766 ar_context_abort(ctx, "corrupted descriptor"); 767 } 768 769 return i; 770 } 771 772 static void ar_sync_buffers_for_cpu(struct ar_context *ctx, 773 unsigned int end_buffer_index, 774 unsigned int end_buffer_offset) 775 { 776 unsigned int i; 777 778 i = ar_first_buffer_index(ctx); 779 while (i != end_buffer_index) { 780 dma_sync_single_for_cpu(ctx->ohci->card.device, 781 ar_buffer_bus(ctx, i), 782 PAGE_SIZE, DMA_FROM_DEVICE); 783 i = ar_next_buffer_index(i); 784 } 785 if (end_buffer_offset > 0) 786 dma_sync_single_for_cpu(ctx->ohci->card.device, 787 ar_buffer_bus(ctx, i), 788 end_buffer_offset, DMA_FROM_DEVICE); 789 } 790 791 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32) 792 #define cond_le32_to_cpu(v) \ 793 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v)) 794 #else 795 #define cond_le32_to_cpu(v) le32_to_cpu(v) 796 #endif 797 798 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) 799 { 800 struct fw_ohci *ohci = ctx->ohci; 801 struct fw_packet p; 802 u32 status, length, tcode; 803 int evt; 804 805 p.header[0] = cond_le32_to_cpu(buffer[0]); 806 p.header[1] = cond_le32_to_cpu(buffer[1]); 807 p.header[2] = cond_le32_to_cpu(buffer[2]); 808 809 tcode = (p.header[0] >> 4) & 0x0f; 810 switch (tcode) { 811 case TCODE_WRITE_QUADLET_REQUEST: 812 case TCODE_READ_QUADLET_RESPONSE: 813 p.header[3] = (__force __u32) buffer[3]; 814 p.header_length = 16; 815 p.payload_length = 0; 816 break; 817 818 case TCODE_READ_BLOCK_REQUEST : 819 p.header[3] = cond_le32_to_cpu(buffer[3]); 820 p.header_length = 16; 821 p.payload_length = 0; 822 break; 823 824 case TCODE_WRITE_BLOCK_REQUEST: 825 case TCODE_READ_BLOCK_RESPONSE: 826 case TCODE_LOCK_REQUEST: 827 case TCODE_LOCK_RESPONSE: 828 p.header[3] = cond_le32_to_cpu(buffer[3]); 829 p.header_length = 16; 830 p.payload_length = p.header[3] >> 16; 831 if (p.payload_length > MAX_ASYNC_PAYLOAD) { 832 ar_context_abort(ctx, "invalid packet length"); 833 return NULL; 834 } 835 break; 836 837 case TCODE_WRITE_RESPONSE: 838 case TCODE_READ_QUADLET_REQUEST: 839 case OHCI_TCODE_PHY_PACKET: 840 p.header_length = 12; 841 p.payload_length = 0; 842 break; 843 844 default: 845 ar_context_abort(ctx, "invalid tcode"); 846 return NULL; 847 } 848 849 p.payload = (void *) buffer + p.header_length; 850 851 /* FIXME: What to do about evt_* errors? */ 852 length = (p.header_length + p.payload_length + 3) / 4; 853 status = cond_le32_to_cpu(buffer[length]); 854 evt = (status >> 16) & 0x1f; 855 856 p.ack = evt - 16; 857 p.speed = (status >> 21) & 0x7; 858 p.timestamp = status & 0xffff; 859 p.generation = ohci->request_generation; 860 861 log_ar_at_event(ohci, 'R', p.speed, p.header, evt); 862 863 /* 864 * Several controllers, notably from NEC and VIA, forget to 865 * write ack_complete status at PHY packet reception. 866 */ 867 if (evt == OHCI1394_evt_no_status && 868 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4)) 869 p.ack = ACK_COMPLETE; 870 871 /* 872 * The OHCI bus reset handler synthesizes a PHY packet with 873 * the new generation number when a bus reset happens (see 874 * section 8.4.2.3). This helps us determine when a request 875 * was received and make sure we send the response in the same 876 * generation. We only need this for requests; for responses 877 * we use the unique tlabel for finding the matching 878 * request. 879 * 880 * Alas some chips sometimes emit bus reset packets with a 881 * wrong generation. We set the correct generation for these 882 * at a slightly incorrect time (in bus_reset_work). 883 */ 884 if (evt == OHCI1394_evt_bus_reset) { 885 if (!(ohci->quirks & QUIRK_RESET_PACKET)) 886 ohci->request_generation = (p.header[2] >> 16) & 0xff; 887 } else if (ctx == &ohci->ar_request_ctx) { 888 fw_core_handle_request(&ohci->card, &p); 889 } else { 890 fw_core_handle_response(&ohci->card, &p); 891 } 892 893 return buffer + length + 1; 894 } 895 896 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) 897 { 898 void *next; 899 900 while (p < end) { 901 next = handle_ar_packet(ctx, p); 902 if (!next) 903 return p; 904 p = next; 905 } 906 907 return p; 908 } 909 910 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) 911 { 912 unsigned int i; 913 914 i = ar_first_buffer_index(ctx); 915 while (i != end_buffer) { 916 dma_sync_single_for_device(ctx->ohci->card.device, 917 ar_buffer_bus(ctx, i), 918 PAGE_SIZE, DMA_FROM_DEVICE); 919 ar_context_link_page(ctx, i); 920 i = ar_next_buffer_index(i); 921 } 922 } 923 924 static void ar_context_tasklet(unsigned long data) 925 { 926 struct ar_context *ctx = (struct ar_context *)data; 927 unsigned int end_buffer_index, end_buffer_offset; 928 void *p, *end; 929 930 p = ctx->pointer; 931 if (!p) 932 return; 933 934 end_buffer_index = ar_search_last_active_buffer(ctx, 935 &end_buffer_offset); 936 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); 937 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; 938 939 if (end_buffer_index < ar_first_buffer_index(ctx)) { 940 /* 941 * The filled part of the overall buffer wraps around; handle 942 * all packets up to the buffer end here. If the last packet 943 * wraps around, its tail will be visible after the buffer end 944 * because the buffer start pages are mapped there again. 945 */ 946 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; 947 p = handle_ar_packets(ctx, p, buffer_end); 948 if (p < buffer_end) 949 goto error; 950 /* adjust p to point back into the actual buffer */ 951 p -= AR_BUFFERS * PAGE_SIZE; 952 } 953 954 p = handle_ar_packets(ctx, p, end); 955 if (p != end) { 956 if (p > end) 957 ar_context_abort(ctx, "inconsistent descriptor"); 958 goto error; 959 } 960 961 ctx->pointer = p; 962 ar_recycle_buffers(ctx, end_buffer_index); 963 964 return; 965 966 error: 967 ctx->pointer = NULL; 968 } 969 970 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, 971 unsigned int descriptors_offset, u32 regs) 972 { 973 unsigned int i; 974 dma_addr_t dma_addr; 975 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES]; 976 struct descriptor *d; 977 978 ctx->regs = regs; 979 ctx->ohci = ohci; 980 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); 981 982 for (i = 0; i < AR_BUFFERS; i++) { 983 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); 984 if (!ctx->pages[i]) 985 goto out_of_memory; 986 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], 987 0, PAGE_SIZE, DMA_FROM_DEVICE); 988 if (dma_mapping_error(ohci->card.device, dma_addr)) { 989 __free_page(ctx->pages[i]); 990 ctx->pages[i] = NULL; 991 goto out_of_memory; 992 } 993 set_page_private(ctx->pages[i], dma_addr); 994 } 995 996 for (i = 0; i < AR_BUFFERS; i++) 997 pages[i] = ctx->pages[i]; 998 for (i = 0; i < AR_WRAPAROUND_PAGES; i++) 999 pages[AR_BUFFERS + i] = ctx->pages[i]; 1000 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); 1001 if (!ctx->buffer) 1002 goto out_of_memory; 1003 1004 ctx->descriptors = ohci->misc_buffer + descriptors_offset; 1005 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; 1006 1007 for (i = 0; i < AR_BUFFERS; i++) { 1008 d = &ctx->descriptors[i]; 1009 d->req_count = cpu_to_le16(PAGE_SIZE); 1010 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 1011 DESCRIPTOR_STATUS | 1012 DESCRIPTOR_BRANCH_ALWAYS); 1013 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); 1014 d->branch_address = cpu_to_le32(ctx->descriptors_bus + 1015 ar_next_buffer_index(i) * sizeof(struct descriptor)); 1016 } 1017 1018 return 0; 1019 1020 out_of_memory: 1021 ar_context_release(ctx); 1022 1023 return -ENOMEM; 1024 } 1025 1026 static void ar_context_run(struct ar_context *ctx) 1027 { 1028 unsigned int i; 1029 1030 for (i = 0; i < AR_BUFFERS; i++) 1031 ar_context_link_page(ctx, i); 1032 1033 ctx->pointer = ctx->buffer; 1034 1035 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); 1036 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); 1037 } 1038 1039 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z) 1040 { 1041 __le16 branch; 1042 1043 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); 1044 1045 /* figure out which descriptor the branch address goes in */ 1046 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) 1047 return d; 1048 else 1049 return d + z - 1; 1050 } 1051 1052 static void context_tasklet(unsigned long data) 1053 { 1054 struct context *ctx = (struct context *) data; 1055 struct descriptor *d, *last; 1056 u32 address; 1057 int z; 1058 struct descriptor_buffer *desc; 1059 1060 desc = list_entry(ctx->buffer_list.next, 1061 struct descriptor_buffer, list); 1062 last = ctx->last; 1063 while (last->branch_address != 0) { 1064 struct descriptor_buffer *old_desc = desc; 1065 address = le32_to_cpu(last->branch_address); 1066 z = address & 0xf; 1067 address &= ~0xf; 1068 ctx->current_bus = address; 1069 1070 /* If the branch address points to a buffer outside of the 1071 * current buffer, advance to the next buffer. */ 1072 if (address < desc->buffer_bus || 1073 address >= desc->buffer_bus + desc->used) 1074 desc = list_entry(desc->list.next, 1075 struct descriptor_buffer, list); 1076 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); 1077 last = find_branch_descriptor(d, z); 1078 1079 if (!ctx->callback(ctx, d, last)) 1080 break; 1081 1082 if (old_desc != desc) { 1083 /* If we've advanced to the next buffer, move the 1084 * previous buffer to the free list. */ 1085 unsigned long flags; 1086 old_desc->used = 0; 1087 spin_lock_irqsave(&ctx->ohci->lock, flags); 1088 list_move_tail(&old_desc->list, &ctx->buffer_list); 1089 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1090 } 1091 ctx->last = last; 1092 } 1093 } 1094 1095 /* 1096 * Allocate a new buffer and add it to the list of free buffers for this 1097 * context. Must be called with ohci->lock held. 1098 */ 1099 static int context_add_buffer(struct context *ctx) 1100 { 1101 struct descriptor_buffer *desc; 1102 dma_addr_t uninitialized_var(bus_addr); 1103 int offset; 1104 1105 /* 1106 * 16MB of descriptors should be far more than enough for any DMA 1107 * program. This will catch run-away userspace or DoS attacks. 1108 */ 1109 if (ctx->total_allocation >= 16*1024*1024) 1110 return -ENOMEM; 1111 1112 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, 1113 &bus_addr, GFP_ATOMIC); 1114 if (!desc) 1115 return -ENOMEM; 1116 1117 offset = (void *)&desc->buffer - (void *)desc; 1118 /* 1119 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads 1120 * for descriptors, even 0x10-byte ones. This can cause page faults when 1121 * an IOMMU is in use and the oversized read crosses a page boundary. 1122 * Work around this by always leaving at least 0x10 bytes of padding. 1123 */ 1124 desc->buffer_size = PAGE_SIZE - offset - 0x10; 1125 desc->buffer_bus = bus_addr + offset; 1126 desc->used = 0; 1127 1128 list_add_tail(&desc->list, &ctx->buffer_list); 1129 ctx->total_allocation += PAGE_SIZE; 1130 1131 return 0; 1132 } 1133 1134 static int context_init(struct context *ctx, struct fw_ohci *ohci, 1135 u32 regs, descriptor_callback_t callback) 1136 { 1137 ctx->ohci = ohci; 1138 ctx->regs = regs; 1139 ctx->total_allocation = 0; 1140 1141 INIT_LIST_HEAD(&ctx->buffer_list); 1142 if (context_add_buffer(ctx) < 0) 1143 return -ENOMEM; 1144 1145 ctx->buffer_tail = list_entry(ctx->buffer_list.next, 1146 struct descriptor_buffer, list); 1147 1148 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); 1149 ctx->callback = callback; 1150 1151 /* 1152 * We put a dummy descriptor in the buffer that has a NULL 1153 * branch address and looks like it's been sent. That way we 1154 * have a descriptor to append DMA programs to. 1155 */ 1156 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); 1157 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); 1158 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); 1159 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); 1160 ctx->last = ctx->buffer_tail->buffer; 1161 ctx->prev = ctx->buffer_tail->buffer; 1162 ctx->prev_z = 1; 1163 1164 return 0; 1165 } 1166 1167 static void context_release(struct context *ctx) 1168 { 1169 struct fw_card *card = &ctx->ohci->card; 1170 struct descriptor_buffer *desc, *tmp; 1171 1172 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) 1173 dma_free_coherent(card->device, PAGE_SIZE, desc, 1174 desc->buffer_bus - 1175 ((void *)&desc->buffer - (void *)desc)); 1176 } 1177 1178 /* Must be called with ohci->lock held */ 1179 static struct descriptor *context_get_descriptors(struct context *ctx, 1180 int z, dma_addr_t *d_bus) 1181 { 1182 struct descriptor *d = NULL; 1183 struct descriptor_buffer *desc = ctx->buffer_tail; 1184 1185 if (z * sizeof(*d) > desc->buffer_size) 1186 return NULL; 1187 1188 if (z * sizeof(*d) > desc->buffer_size - desc->used) { 1189 /* No room for the descriptor in this buffer, so advance to the 1190 * next one. */ 1191 1192 if (desc->list.next == &ctx->buffer_list) { 1193 /* If there is no free buffer next in the list, 1194 * allocate one. */ 1195 if (context_add_buffer(ctx) < 0) 1196 return NULL; 1197 } 1198 desc = list_entry(desc->list.next, 1199 struct descriptor_buffer, list); 1200 ctx->buffer_tail = desc; 1201 } 1202 1203 d = desc->buffer + desc->used / sizeof(*d); 1204 memset(d, 0, z * sizeof(*d)); 1205 *d_bus = desc->buffer_bus + desc->used; 1206 1207 return d; 1208 } 1209 1210 static void context_run(struct context *ctx, u32 extra) 1211 { 1212 struct fw_ohci *ohci = ctx->ohci; 1213 1214 reg_write(ohci, COMMAND_PTR(ctx->regs), 1215 le32_to_cpu(ctx->last->branch_address)); 1216 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); 1217 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); 1218 ctx->running = true; 1219 flush_writes(ohci); 1220 } 1221 1222 static void context_append(struct context *ctx, 1223 struct descriptor *d, int z, int extra) 1224 { 1225 dma_addr_t d_bus; 1226 struct descriptor_buffer *desc = ctx->buffer_tail; 1227 struct descriptor *d_branch; 1228 1229 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); 1230 1231 desc->used += (z + extra) * sizeof(*d); 1232 1233 wmb(); /* finish init of new descriptors before branch_address update */ 1234 1235 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); 1236 d_branch->branch_address = cpu_to_le32(d_bus | z); 1237 1238 /* 1239 * VT6306 incorrectly checks only the single descriptor at the 1240 * CommandPtr when the wake bit is written, so if it's a 1241 * multi-descriptor block starting with an INPUT_MORE, put a copy of 1242 * the branch address in the first descriptor. 1243 * 1244 * Not doing this for transmit contexts since not sure how it interacts 1245 * with skip addresses. 1246 */ 1247 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && 1248 d_branch != ctx->prev && 1249 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == 1250 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) { 1251 ctx->prev->branch_address = cpu_to_le32(d_bus | z); 1252 } 1253 1254 ctx->prev = d; 1255 ctx->prev_z = z; 1256 } 1257 1258 static void context_stop(struct context *ctx) 1259 { 1260 struct fw_ohci *ohci = ctx->ohci; 1261 u32 reg; 1262 int i; 1263 1264 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); 1265 ctx->running = false; 1266 1267 for (i = 0; i < 1000; i++) { 1268 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); 1269 if ((reg & CONTEXT_ACTIVE) == 0) 1270 return; 1271 1272 if (i) 1273 udelay(10); 1274 } 1275 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg); 1276 } 1277 1278 struct driver_data { 1279 u8 inline_data[8]; 1280 struct fw_packet *packet; 1281 }; 1282 1283 /* 1284 * This function apppends a packet to the DMA queue for transmission. 1285 * Must always be called with the ochi->lock held to ensure proper 1286 * generation handling and locking around packet queue manipulation. 1287 */ 1288 static int at_context_queue_packet(struct context *ctx, 1289 struct fw_packet *packet) 1290 { 1291 struct fw_ohci *ohci = ctx->ohci; 1292 dma_addr_t d_bus, uninitialized_var(payload_bus); 1293 struct driver_data *driver_data; 1294 struct descriptor *d, *last; 1295 __le32 *header; 1296 int z, tcode; 1297 1298 d = context_get_descriptors(ctx, 4, &d_bus); 1299 if (d == NULL) { 1300 packet->ack = RCODE_SEND_ERROR; 1301 return -1; 1302 } 1303 1304 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 1305 d[0].res_count = cpu_to_le16(packet->timestamp); 1306 1307 /* 1308 * The DMA format for asynchronous link packets is different 1309 * from the IEEE1394 layout, so shift the fields around 1310 * accordingly. 1311 */ 1312 1313 tcode = (packet->header[0] >> 4) & 0x0f; 1314 header = (__le32 *) &d[1]; 1315 switch (tcode) { 1316 case TCODE_WRITE_QUADLET_REQUEST: 1317 case TCODE_WRITE_BLOCK_REQUEST: 1318 case TCODE_WRITE_RESPONSE: 1319 case TCODE_READ_QUADLET_REQUEST: 1320 case TCODE_READ_BLOCK_REQUEST: 1321 case TCODE_READ_QUADLET_RESPONSE: 1322 case TCODE_READ_BLOCK_RESPONSE: 1323 case TCODE_LOCK_REQUEST: 1324 case TCODE_LOCK_RESPONSE: 1325 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1326 (packet->speed << 16)); 1327 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | 1328 (packet->header[0] & 0xffff0000)); 1329 header[2] = cpu_to_le32(packet->header[2]); 1330 1331 if (TCODE_IS_BLOCK_PACKET(tcode)) 1332 header[3] = cpu_to_le32(packet->header[3]); 1333 else 1334 header[3] = (__force __le32) packet->header[3]; 1335 1336 d[0].req_count = cpu_to_le16(packet->header_length); 1337 break; 1338 1339 case TCODE_LINK_INTERNAL: 1340 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) | 1341 (packet->speed << 16)); 1342 header[1] = cpu_to_le32(packet->header[1]); 1343 header[2] = cpu_to_le32(packet->header[2]); 1344 d[0].req_count = cpu_to_le16(12); 1345 1346 if (is_ping_packet(&packet->header[1])) 1347 d[0].control |= cpu_to_le16(DESCRIPTOR_PING); 1348 break; 1349 1350 case TCODE_STREAM_DATA: 1351 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | 1352 (packet->speed << 16)); 1353 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); 1354 d[0].req_count = cpu_to_le16(8); 1355 break; 1356 1357 default: 1358 /* BUG(); */ 1359 packet->ack = RCODE_SEND_ERROR; 1360 return -1; 1361 } 1362 1363 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor)); 1364 driver_data = (struct driver_data *) &d[3]; 1365 driver_data->packet = packet; 1366 packet->driver_data = driver_data; 1367 1368 if (packet->payload_length > 0) { 1369 if (packet->payload_length > sizeof(driver_data->inline_data)) { 1370 payload_bus = dma_map_single(ohci->card.device, 1371 packet->payload, 1372 packet->payload_length, 1373 DMA_TO_DEVICE); 1374 if (dma_mapping_error(ohci->card.device, payload_bus)) { 1375 packet->ack = RCODE_SEND_ERROR; 1376 return -1; 1377 } 1378 packet->payload_bus = payload_bus; 1379 packet->payload_mapped = true; 1380 } else { 1381 memcpy(driver_data->inline_data, packet->payload, 1382 packet->payload_length); 1383 payload_bus = d_bus + 3 * sizeof(*d); 1384 } 1385 1386 d[2].req_count = cpu_to_le16(packet->payload_length); 1387 d[2].data_address = cpu_to_le32(payload_bus); 1388 last = &d[2]; 1389 z = 3; 1390 } else { 1391 last = &d[0]; 1392 z = 2; 1393 } 1394 1395 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 1396 DESCRIPTOR_IRQ_ALWAYS | 1397 DESCRIPTOR_BRANCH_ALWAYS); 1398 1399 /* FIXME: Document how the locking works. */ 1400 if (ohci->generation != packet->generation) { 1401 if (packet->payload_mapped) 1402 dma_unmap_single(ohci->card.device, payload_bus, 1403 packet->payload_length, DMA_TO_DEVICE); 1404 packet->ack = RCODE_GENERATION; 1405 return -1; 1406 } 1407 1408 context_append(ctx, d, z, 4 - z); 1409 1410 if (ctx->running) 1411 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 1412 else 1413 context_run(ctx, 0); 1414 1415 return 0; 1416 } 1417 1418 static void at_context_flush(struct context *ctx) 1419 { 1420 tasklet_disable(&ctx->tasklet); 1421 1422 ctx->flushing = true; 1423 context_tasklet((unsigned long)ctx); 1424 ctx->flushing = false; 1425 1426 tasklet_enable(&ctx->tasklet); 1427 } 1428 1429 static int handle_at_packet(struct context *context, 1430 struct descriptor *d, 1431 struct descriptor *last) 1432 { 1433 struct driver_data *driver_data; 1434 struct fw_packet *packet; 1435 struct fw_ohci *ohci = context->ohci; 1436 int evt; 1437 1438 if (last->transfer_status == 0 && !context->flushing) 1439 /* This descriptor isn't done yet, stop iteration. */ 1440 return 0; 1441 1442 driver_data = (struct driver_data *) &d[3]; 1443 packet = driver_data->packet; 1444 if (packet == NULL) 1445 /* This packet was cancelled, just continue. */ 1446 return 1; 1447 1448 if (packet->payload_mapped) 1449 dma_unmap_single(ohci->card.device, packet->payload_bus, 1450 packet->payload_length, DMA_TO_DEVICE); 1451 1452 evt = le16_to_cpu(last->transfer_status) & 0x1f; 1453 packet->timestamp = le16_to_cpu(last->res_count); 1454 1455 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); 1456 1457 switch (evt) { 1458 case OHCI1394_evt_timeout: 1459 /* Async response transmit timed out. */ 1460 packet->ack = RCODE_CANCELLED; 1461 break; 1462 1463 case OHCI1394_evt_flushed: 1464 /* 1465 * The packet was flushed should give same error as 1466 * when we try to use a stale generation count. 1467 */ 1468 packet->ack = RCODE_GENERATION; 1469 break; 1470 1471 case OHCI1394_evt_missing_ack: 1472 if (context->flushing) 1473 packet->ack = RCODE_GENERATION; 1474 else { 1475 /* 1476 * Using a valid (current) generation count, but the 1477 * node is not on the bus or not sending acks. 1478 */ 1479 packet->ack = RCODE_NO_ACK; 1480 } 1481 break; 1482 1483 case ACK_COMPLETE + 0x10: 1484 case ACK_PENDING + 0x10: 1485 case ACK_BUSY_X + 0x10: 1486 case ACK_BUSY_A + 0x10: 1487 case ACK_BUSY_B + 0x10: 1488 case ACK_DATA_ERROR + 0x10: 1489 case ACK_TYPE_ERROR + 0x10: 1490 packet->ack = evt - 0x10; 1491 break; 1492 1493 case OHCI1394_evt_no_status: 1494 if (context->flushing) { 1495 packet->ack = RCODE_GENERATION; 1496 break; 1497 } 1498 /* fall through */ 1499 1500 default: 1501 packet->ack = RCODE_SEND_ERROR; 1502 break; 1503 } 1504 1505 packet->callback(packet, &ohci->card, packet->ack); 1506 1507 return 1; 1508 } 1509 1510 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) 1511 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) 1512 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) 1513 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) 1514 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) 1515 1516 static void handle_local_rom(struct fw_ohci *ohci, 1517 struct fw_packet *packet, u32 csr) 1518 { 1519 struct fw_packet response; 1520 int tcode, length, i; 1521 1522 tcode = HEADER_GET_TCODE(packet->header[0]); 1523 if (TCODE_IS_BLOCK_PACKET(tcode)) 1524 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1525 else 1526 length = 4; 1527 1528 i = csr - CSR_CONFIG_ROM; 1529 if (i + length > CONFIG_ROM_SIZE) { 1530 fw_fill_response(&response, packet->header, 1531 RCODE_ADDRESS_ERROR, NULL, 0); 1532 } else if (!TCODE_IS_READ_REQUEST(tcode)) { 1533 fw_fill_response(&response, packet->header, 1534 RCODE_TYPE_ERROR, NULL, 0); 1535 } else { 1536 fw_fill_response(&response, packet->header, RCODE_COMPLETE, 1537 (void *) ohci->config_rom + i, length); 1538 } 1539 1540 fw_core_handle_response(&ohci->card, &response); 1541 } 1542 1543 static void handle_local_lock(struct fw_ohci *ohci, 1544 struct fw_packet *packet, u32 csr) 1545 { 1546 struct fw_packet response; 1547 int tcode, length, ext_tcode, sel, try; 1548 __be32 *payload, lock_old; 1549 u32 lock_arg, lock_data; 1550 1551 tcode = HEADER_GET_TCODE(packet->header[0]); 1552 length = HEADER_GET_DATA_LENGTH(packet->header[3]); 1553 payload = packet->payload; 1554 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); 1555 1556 if (tcode == TCODE_LOCK_REQUEST && 1557 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) { 1558 lock_arg = be32_to_cpu(payload[0]); 1559 lock_data = be32_to_cpu(payload[1]); 1560 } else if (tcode == TCODE_READ_QUADLET_REQUEST) { 1561 lock_arg = 0; 1562 lock_data = 0; 1563 } else { 1564 fw_fill_response(&response, packet->header, 1565 RCODE_TYPE_ERROR, NULL, 0); 1566 goto out; 1567 } 1568 1569 sel = (csr - CSR_BUS_MANAGER_ID) / 4; 1570 reg_write(ohci, OHCI1394_CSRData, lock_data); 1571 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg); 1572 reg_write(ohci, OHCI1394_CSRControl, sel); 1573 1574 for (try = 0; try < 20; try++) 1575 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) { 1576 lock_old = cpu_to_be32(reg_read(ohci, 1577 OHCI1394_CSRData)); 1578 fw_fill_response(&response, packet->header, 1579 RCODE_COMPLETE, 1580 &lock_old, sizeof(lock_old)); 1581 goto out; 1582 } 1583 1584 ohci_err(ohci, "swap not done (CSR lock timeout)\n"); 1585 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); 1586 1587 out: 1588 fw_core_handle_response(&ohci->card, &response); 1589 } 1590 1591 static void handle_local_request(struct context *ctx, struct fw_packet *packet) 1592 { 1593 u64 offset, csr; 1594 1595 if (ctx == &ctx->ohci->at_request_ctx) { 1596 packet->ack = ACK_PENDING; 1597 packet->callback(packet, &ctx->ohci->card, packet->ack); 1598 } 1599 1600 offset = 1601 ((unsigned long long) 1602 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | 1603 packet->header[2]; 1604 csr = offset - CSR_REGISTER_BASE; 1605 1606 /* Handle config rom reads. */ 1607 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END) 1608 handle_local_rom(ctx->ohci, packet, csr); 1609 else switch (csr) { 1610 case CSR_BUS_MANAGER_ID: 1611 case CSR_BANDWIDTH_AVAILABLE: 1612 case CSR_CHANNELS_AVAILABLE_HI: 1613 case CSR_CHANNELS_AVAILABLE_LO: 1614 handle_local_lock(ctx->ohci, packet, csr); 1615 break; 1616 default: 1617 if (ctx == &ctx->ohci->at_request_ctx) 1618 fw_core_handle_request(&ctx->ohci->card, packet); 1619 else 1620 fw_core_handle_response(&ctx->ohci->card, packet); 1621 break; 1622 } 1623 1624 if (ctx == &ctx->ohci->at_response_ctx) { 1625 packet->ack = ACK_COMPLETE; 1626 packet->callback(packet, &ctx->ohci->card, packet->ack); 1627 } 1628 } 1629 1630 static void at_context_transmit(struct context *ctx, struct fw_packet *packet) 1631 { 1632 unsigned long flags; 1633 int ret; 1634 1635 spin_lock_irqsave(&ctx->ohci->lock, flags); 1636 1637 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && 1638 ctx->ohci->generation == packet->generation) { 1639 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1640 handle_local_request(ctx, packet); 1641 return; 1642 } 1643 1644 ret = at_context_queue_packet(ctx, packet); 1645 spin_unlock_irqrestore(&ctx->ohci->lock, flags); 1646 1647 if (ret < 0) 1648 packet->callback(packet, &ctx->ohci->card, packet->ack); 1649 1650 } 1651 1652 static void detect_dead_context(struct fw_ohci *ohci, 1653 const char *name, unsigned int regs) 1654 { 1655 u32 ctl; 1656 1657 ctl = reg_read(ohci, CONTROL_SET(regs)); 1658 if (ctl & CONTEXT_DEAD) 1659 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n", 1660 name, evts[ctl & 0x1f]); 1661 } 1662 1663 static void handle_dead_contexts(struct fw_ohci *ohci) 1664 { 1665 unsigned int i; 1666 char name[8]; 1667 1668 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase); 1669 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase); 1670 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase); 1671 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase); 1672 for (i = 0; i < 32; ++i) { 1673 if (!(ohci->it_context_support & (1 << i))) 1674 continue; 1675 sprintf(name, "IT%u", i); 1676 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i)); 1677 } 1678 for (i = 0; i < 32; ++i) { 1679 if (!(ohci->ir_context_support & (1 << i))) 1680 continue; 1681 sprintf(name, "IR%u", i); 1682 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i)); 1683 } 1684 /* TODO: maybe try to flush and restart the dead contexts */ 1685 } 1686 1687 static u32 cycle_timer_ticks(u32 cycle_timer) 1688 { 1689 u32 ticks; 1690 1691 ticks = cycle_timer & 0xfff; 1692 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff); 1693 ticks += (3072 * 8000) * (cycle_timer >> 25); 1694 1695 return ticks; 1696 } 1697 1698 /* 1699 * Some controllers exhibit one or more of the following bugs when updating the 1700 * iso cycle timer register: 1701 * - When the lowest six bits are wrapping around to zero, a read that happens 1702 * at the same time will return garbage in the lowest ten bits. 1703 * - When the cycleOffset field wraps around to zero, the cycleCount field is 1704 * not incremented for about 60 ns. 1705 * - Occasionally, the entire register reads zero. 1706 * 1707 * To catch these, we read the register three times and ensure that the 1708 * difference between each two consecutive reads is approximately the same, i.e. 1709 * less than twice the other. Furthermore, any negative difference indicates an 1710 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to 1711 * execute, so we have enough precision to compute the ratio of the differences.) 1712 */ 1713 static u32 get_cycle_time(struct fw_ohci *ohci) 1714 { 1715 u32 c0, c1, c2; 1716 u32 t0, t1, t2; 1717 s32 diff01, diff12; 1718 int i; 1719 1720 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1721 1722 if (ohci->quirks & QUIRK_CYCLE_TIMER) { 1723 i = 0; 1724 c1 = c2; 1725 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1726 do { 1727 c0 = c1; 1728 c1 = c2; 1729 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer); 1730 t0 = cycle_timer_ticks(c0); 1731 t1 = cycle_timer_ticks(c1); 1732 t2 = cycle_timer_ticks(c2); 1733 diff01 = t1 - t0; 1734 diff12 = t2 - t1; 1735 } while ((diff01 <= 0 || diff12 <= 0 || 1736 diff01 / diff12 >= 2 || diff12 / diff01 >= 2) 1737 && i++ < 20); 1738 } 1739 1740 return c2; 1741 } 1742 1743 /* 1744 * This function has to be called at least every 64 seconds. The bus_time 1745 * field stores not only the upper 25 bits of the BUS_TIME register but also 1746 * the most significant bit of the cycle timer in bit 6 so that we can detect 1747 * changes in this bit. 1748 */ 1749 static u32 update_bus_time(struct fw_ohci *ohci) 1750 { 1751 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25; 1752 1753 if (unlikely(!ohci->bus_time_running)) { 1754 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds); 1755 ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) | 1756 (cycle_time_seconds & 0x40); 1757 ohci->bus_time_running = true; 1758 } 1759 1760 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) 1761 ohci->bus_time += 0x40; 1762 1763 return ohci->bus_time | cycle_time_seconds; 1764 } 1765 1766 static int get_status_for_port(struct fw_ohci *ohci, int port_index) 1767 { 1768 int reg; 1769 1770 mutex_lock(&ohci->phy_reg_mutex); 1771 reg = write_phy_reg(ohci, 7, port_index); 1772 if (reg >= 0) 1773 reg = read_phy_reg(ohci, 8); 1774 mutex_unlock(&ohci->phy_reg_mutex); 1775 if (reg < 0) 1776 return reg; 1777 1778 switch (reg & 0x0f) { 1779 case 0x06: 1780 return 2; /* is child node (connected to parent node) */ 1781 case 0x0e: 1782 return 3; /* is parent node (connected to child node) */ 1783 } 1784 return 1; /* not connected */ 1785 } 1786 1787 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id, 1788 int self_id_count) 1789 { 1790 int i; 1791 u32 entry; 1792 1793 for (i = 0; i < self_id_count; i++) { 1794 entry = ohci->self_id_buffer[i]; 1795 if ((self_id & 0xff000000) == (entry & 0xff000000)) 1796 return -1; 1797 if ((self_id & 0xff000000) < (entry & 0xff000000)) 1798 return i; 1799 } 1800 return i; 1801 } 1802 1803 static int initiated_reset(struct fw_ohci *ohci) 1804 { 1805 int reg; 1806 int ret = 0; 1807 1808 mutex_lock(&ohci->phy_reg_mutex); 1809 reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */ 1810 if (reg >= 0) { 1811 reg = read_phy_reg(ohci, 8); 1812 reg |= 0x40; 1813 reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */ 1814 if (reg >= 0) { 1815 reg = read_phy_reg(ohci, 12); /* read register 12 */ 1816 if (reg >= 0) { 1817 if ((reg & 0x08) == 0x08) { 1818 /* bit 3 indicates "initiated reset" */ 1819 ret = 0x2; 1820 } 1821 } 1822 } 1823 } 1824 mutex_unlock(&ohci->phy_reg_mutex); 1825 return ret; 1826 } 1827 1828 /* 1829 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally 1830 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059. 1831 * Construct the selfID from phy register contents. 1832 */ 1833 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) 1834 { 1835 int reg, i, pos, status; 1836 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */ 1837 u32 self_id = 0x8040c800; 1838 1839 reg = reg_read(ohci, OHCI1394_NodeID); 1840 if (!(reg & OHCI1394_NodeID_idValid)) { 1841 ohci_notice(ohci, 1842 "node ID not valid, new bus reset in progress\n"); 1843 return -EBUSY; 1844 } 1845 self_id |= ((reg & 0x3f) << 24); /* phy ID */ 1846 1847 reg = ohci_read_phy_reg(&ohci->card, 4); 1848 if (reg < 0) 1849 return reg; 1850 self_id |= ((reg & 0x07) << 8); /* power class */ 1851 1852 reg = ohci_read_phy_reg(&ohci->card, 1); 1853 if (reg < 0) 1854 return reg; 1855 self_id |= ((reg & 0x3f) << 16); /* gap count */ 1856 1857 for (i = 0; i < 3; i++) { 1858 status = get_status_for_port(ohci, i); 1859 if (status < 0) 1860 return status; 1861 self_id |= ((status & 0x3) << (6 - (i * 2))); 1862 } 1863 1864 self_id |= initiated_reset(ohci); 1865 1866 pos = get_self_id_pos(ohci, self_id, self_id_count); 1867 if (pos >= 0) { 1868 memmove(&(ohci->self_id_buffer[pos+1]), 1869 &(ohci->self_id_buffer[pos]), 1870 (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); 1871 ohci->self_id_buffer[pos] = self_id; 1872 self_id_count++; 1873 } 1874 return self_id_count; 1875 } 1876 1877 static void bus_reset_work(struct work_struct *work) 1878 { 1879 struct fw_ohci *ohci = 1880 container_of(work, struct fw_ohci, bus_reset_work); 1881 int self_id_count, generation, new_generation, i, j; 1882 u32 reg; 1883 void *free_rom = NULL; 1884 dma_addr_t free_rom_bus = 0; 1885 bool is_new_root; 1886 1887 reg = reg_read(ohci, OHCI1394_NodeID); 1888 if (!(reg & OHCI1394_NodeID_idValid)) { 1889 ohci_notice(ohci, 1890 "node ID not valid, new bus reset in progress\n"); 1891 return; 1892 } 1893 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { 1894 ohci_notice(ohci, "malconfigured bus\n"); 1895 return; 1896 } 1897 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | 1898 OHCI1394_NodeID_nodeNumber); 1899 1900 is_new_root = (reg & OHCI1394_NodeID_root) != 0; 1901 if (!(ohci->is_root && is_new_root)) 1902 reg_write(ohci, OHCI1394_LinkControlSet, 1903 OHCI1394_LinkControl_cycleMaster); 1904 ohci->is_root = is_new_root; 1905 1906 reg = reg_read(ohci, OHCI1394_SelfIDCount); 1907 if (reg & OHCI1394_SelfIDCount_selfIDError) { 1908 ohci_notice(ohci, "self ID receive error\n"); 1909 return; 1910 } 1911 /* 1912 * The count in the SelfIDCount register is the number of 1913 * bytes in the self ID receive buffer. Since we also receive 1914 * the inverted quadlets and a header quadlet, we shift one 1915 * bit extra to get the actual number of self IDs. 1916 */ 1917 self_id_count = (reg >> 3) & 0xff; 1918 1919 if (self_id_count > 252) { 1920 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg); 1921 return; 1922 } 1923 1924 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff; 1925 rmb(); 1926 1927 for (i = 1, j = 0; j < self_id_count; i += 2, j++) { 1928 u32 id = cond_le32_to_cpu(ohci->self_id[i]); 1929 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]); 1930 1931 if (id != ~id2) { 1932 /* 1933 * If the invalid data looks like a cycle start packet, 1934 * it's likely to be the result of the cycle master 1935 * having a wrong gap count. In this case, the self IDs 1936 * so far are valid and should be processed so that the 1937 * bus manager can then correct the gap count. 1938 */ 1939 if (id == 0xffff008f) { 1940 ohci_notice(ohci, "ignoring spurious self IDs\n"); 1941 self_id_count = j; 1942 break; 1943 } 1944 1945 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n", 1946 j, self_id_count, id, id2); 1947 return; 1948 } 1949 ohci->self_id_buffer[j] = id; 1950 } 1951 1952 if (ohci->quirks & QUIRK_TI_SLLZ059) { 1953 self_id_count = find_and_insert_self_id(ohci, self_id_count); 1954 if (self_id_count < 0) { 1955 ohci_notice(ohci, 1956 "could not construct local self ID\n"); 1957 return; 1958 } 1959 } 1960 1961 if (self_id_count == 0) { 1962 ohci_notice(ohci, "no self IDs\n"); 1963 return; 1964 } 1965 rmb(); 1966 1967 /* 1968 * Check the consistency of the self IDs we just read. The 1969 * problem we face is that a new bus reset can start while we 1970 * read out the self IDs from the DMA buffer. If this happens, 1971 * the DMA buffer will be overwritten with new self IDs and we 1972 * will read out inconsistent data. The OHCI specification 1973 * (section 11.2) recommends a technique similar to 1974 * linux/seqlock.h, where we remember the generation of the 1975 * self IDs in the buffer before reading them out and compare 1976 * it to the current generation after reading them out. If 1977 * the two generations match we know we have a consistent set 1978 * of self IDs. 1979 */ 1980 1981 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; 1982 if (new_generation != generation) { 1983 ohci_notice(ohci, "new bus reset, discarding self ids\n"); 1984 return; 1985 } 1986 1987 /* FIXME: Document how the locking works. */ 1988 spin_lock_irq(&ohci->lock); 1989 1990 ohci->generation = -1; /* prevent AT packet queueing */ 1991 context_stop(&ohci->at_request_ctx); 1992 context_stop(&ohci->at_response_ctx); 1993 1994 spin_unlock_irq(&ohci->lock); 1995 1996 /* 1997 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent 1998 * packets in the AT queues and software needs to drain them. 1999 * Some OHCI 1.1 controllers (JMicron) apparently require this too. 2000 */ 2001 at_context_flush(&ohci->at_request_ctx); 2002 at_context_flush(&ohci->at_response_ctx); 2003 2004 spin_lock_irq(&ohci->lock); 2005 2006 ohci->generation = generation; 2007 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); 2008 2009 if (ohci->quirks & QUIRK_RESET_PACKET) 2010 ohci->request_generation = generation; 2011 2012 /* 2013 * This next bit is unrelated to the AT context stuff but we 2014 * have to do it under the spinlock also. If a new config rom 2015 * was set up before this reset, the old one is now no longer 2016 * in use and we can free it. Update the config rom pointers 2017 * to point to the current config rom and clear the 2018 * next_config_rom pointer so a new update can take place. 2019 */ 2020 2021 if (ohci->next_config_rom != NULL) { 2022 if (ohci->next_config_rom != ohci->config_rom) { 2023 free_rom = ohci->config_rom; 2024 free_rom_bus = ohci->config_rom_bus; 2025 } 2026 ohci->config_rom = ohci->next_config_rom; 2027 ohci->config_rom_bus = ohci->next_config_rom_bus; 2028 ohci->next_config_rom = NULL; 2029 2030 /* 2031 * Restore config_rom image and manually update 2032 * config_rom registers. Writing the header quadlet 2033 * will indicate that the config rom is ready, so we 2034 * do that last. 2035 */ 2036 reg_write(ohci, OHCI1394_BusOptions, 2037 be32_to_cpu(ohci->config_rom[2])); 2038 ohci->config_rom[0] = ohci->next_header; 2039 reg_write(ohci, OHCI1394_ConfigROMhdr, 2040 be32_to_cpu(ohci->next_header)); 2041 } 2042 2043 if (param_remote_dma) { 2044 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0); 2045 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0); 2046 } 2047 2048 spin_unlock_irq(&ohci->lock); 2049 2050 if (free_rom) 2051 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2052 free_rom, free_rom_bus); 2053 2054 log_selfids(ohci, generation, self_id_count); 2055 2056 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, 2057 self_id_count, ohci->self_id_buffer, 2058 ohci->csr_state_setclear_abdicate); 2059 ohci->csr_state_setclear_abdicate = false; 2060 } 2061 2062 static irqreturn_t irq_handler(int irq, void *data) 2063 { 2064 struct fw_ohci *ohci = data; 2065 u32 event, iso_event; 2066 int i; 2067 2068 event = reg_read(ohci, OHCI1394_IntEventClear); 2069 2070 if (!event || !~event) 2071 return IRQ_NONE; 2072 2073 /* 2074 * busReset and postedWriteErr must not be cleared yet 2075 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1) 2076 */ 2077 reg_write(ohci, OHCI1394_IntEventClear, 2078 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr)); 2079 log_irqs(ohci, event); 2080 2081 if (event & OHCI1394_selfIDComplete) 2082 queue_work(selfid_workqueue, &ohci->bus_reset_work); 2083 2084 if (event & OHCI1394_RQPkt) 2085 tasklet_schedule(&ohci->ar_request_ctx.tasklet); 2086 2087 if (event & OHCI1394_RSPkt) 2088 tasklet_schedule(&ohci->ar_response_ctx.tasklet); 2089 2090 if (event & OHCI1394_reqTxComplete) 2091 tasklet_schedule(&ohci->at_request_ctx.tasklet); 2092 2093 if (event & OHCI1394_respTxComplete) 2094 tasklet_schedule(&ohci->at_response_ctx.tasklet); 2095 2096 if (event & OHCI1394_isochRx) { 2097 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear); 2098 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event); 2099 2100 while (iso_event) { 2101 i = ffs(iso_event) - 1; 2102 tasklet_schedule( 2103 &ohci->ir_context_list[i].context.tasklet); 2104 iso_event &= ~(1 << i); 2105 } 2106 } 2107 2108 if (event & OHCI1394_isochTx) { 2109 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear); 2110 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event); 2111 2112 while (iso_event) { 2113 i = ffs(iso_event) - 1; 2114 tasklet_schedule( 2115 &ohci->it_context_list[i].context.tasklet); 2116 iso_event &= ~(1 << i); 2117 } 2118 } 2119 2120 if (unlikely(event & OHCI1394_regAccessFail)) 2121 ohci_err(ohci, "register access failure\n"); 2122 2123 if (unlikely(event & OHCI1394_postedWriteErr)) { 2124 reg_read(ohci, OHCI1394_PostedWriteAddressHi); 2125 reg_read(ohci, OHCI1394_PostedWriteAddressLo); 2126 reg_write(ohci, OHCI1394_IntEventClear, 2127 OHCI1394_postedWriteErr); 2128 if (printk_ratelimit()) 2129 ohci_err(ohci, "PCI posted write error\n"); 2130 } 2131 2132 if (unlikely(event & OHCI1394_cycleTooLong)) { 2133 if (printk_ratelimit()) 2134 ohci_notice(ohci, "isochronous cycle too long\n"); 2135 reg_write(ohci, OHCI1394_LinkControlSet, 2136 OHCI1394_LinkControl_cycleMaster); 2137 } 2138 2139 if (unlikely(event & OHCI1394_cycleInconsistent)) { 2140 /* 2141 * We need to clear this event bit in order to make 2142 * cycleMatch isochronous I/O work. In theory we should 2143 * stop active cycleMatch iso contexts now and restart 2144 * them at least two cycles later. (FIXME?) 2145 */ 2146 if (printk_ratelimit()) 2147 ohci_notice(ohci, "isochronous cycle inconsistent\n"); 2148 } 2149 2150 if (unlikely(event & OHCI1394_unrecoverableError)) 2151 handle_dead_contexts(ohci); 2152 2153 if (event & OHCI1394_cycle64Seconds) { 2154 spin_lock(&ohci->lock); 2155 update_bus_time(ohci); 2156 spin_unlock(&ohci->lock); 2157 } else 2158 flush_writes(ohci); 2159 2160 return IRQ_HANDLED; 2161 } 2162 2163 static int software_reset(struct fw_ohci *ohci) 2164 { 2165 u32 val; 2166 int i; 2167 2168 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); 2169 for (i = 0; i < 500; i++) { 2170 val = reg_read(ohci, OHCI1394_HCControlSet); 2171 if (!~val) 2172 return -ENODEV; /* Card was ejected. */ 2173 2174 if (!(val & OHCI1394_HCControl_softReset)) 2175 return 0; 2176 2177 msleep(1); 2178 } 2179 2180 return -EBUSY; 2181 } 2182 2183 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length) 2184 { 2185 size_t size = length * 4; 2186 2187 memcpy(dest, src, size); 2188 if (size < CONFIG_ROM_SIZE) 2189 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); 2190 } 2191 2192 static int configure_1394a_enhancements(struct fw_ohci *ohci) 2193 { 2194 bool enable_1394a; 2195 int ret, clear, set, offset; 2196 2197 /* Check if the driver should configure link and PHY. */ 2198 if (!(reg_read(ohci, OHCI1394_HCControlSet) & 2199 OHCI1394_HCControl_programPhyEnable)) 2200 return 0; 2201 2202 /* Paranoia: check whether the PHY supports 1394a, too. */ 2203 enable_1394a = false; 2204 ret = read_phy_reg(ohci, 2); 2205 if (ret < 0) 2206 return ret; 2207 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) { 2208 ret = read_paged_phy_reg(ohci, 1, 8); 2209 if (ret < 0) 2210 return ret; 2211 if (ret >= 1) 2212 enable_1394a = true; 2213 } 2214 2215 if (ohci->quirks & QUIRK_NO_1394A) 2216 enable_1394a = false; 2217 2218 /* Configure PHY and link consistently. */ 2219 if (enable_1394a) { 2220 clear = 0; 2221 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 2222 } else { 2223 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI; 2224 set = 0; 2225 } 2226 ret = update_phy_reg(ohci, 5, clear, set); 2227 if (ret < 0) 2228 return ret; 2229 2230 if (enable_1394a) 2231 offset = OHCI1394_HCControlSet; 2232 else 2233 offset = OHCI1394_HCControlClear; 2234 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable); 2235 2236 /* Clean up: configuration has been taken care of. */ 2237 reg_write(ohci, OHCI1394_HCControlClear, 2238 OHCI1394_HCControl_programPhyEnable); 2239 2240 return 0; 2241 } 2242 2243 static int probe_tsb41ba3d(struct fw_ohci *ohci) 2244 { 2245 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */ 2246 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, }; 2247 int reg, i; 2248 2249 reg = read_phy_reg(ohci, 2); 2250 if (reg < 0) 2251 return reg; 2252 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS) 2253 return 0; 2254 2255 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { 2256 reg = read_paged_phy_reg(ohci, 1, i + 10); 2257 if (reg < 0) 2258 return reg; 2259 if (reg != id[i]) 2260 return 0; 2261 } 2262 return 1; 2263 } 2264 2265 static int ohci_enable(struct fw_card *card, 2266 const __be32 *config_rom, size_t length) 2267 { 2268 struct fw_ohci *ohci = fw_ohci(card); 2269 u32 lps, version, irqs; 2270 int i, ret; 2271 2272 ret = software_reset(ohci); 2273 if (ret < 0) { 2274 ohci_err(ohci, "failed to reset ohci card\n"); 2275 return ret; 2276 } 2277 2278 /* 2279 * Now enable LPS, which we need in order to start accessing 2280 * most of the registers. In fact, on some cards (ALI M5251), 2281 * accessing registers in the SClk domain without LPS enabled 2282 * will lock up the machine. Wait 50msec to make sure we have 2283 * full link enabled. However, with some cards (well, at least 2284 * a JMicron PCIe card), we have to try again sometimes. 2285 * 2286 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but 2287 * cannot actually use the phy at that time. These need tens of 2288 * millisecods pause between LPS write and first phy access too. 2289 */ 2290 2291 reg_write(ohci, OHCI1394_HCControlSet, 2292 OHCI1394_HCControl_LPS | 2293 OHCI1394_HCControl_postedWriteEnable); 2294 flush_writes(ohci); 2295 2296 for (lps = 0, i = 0; !lps && i < 3; i++) { 2297 msleep(50); 2298 lps = reg_read(ohci, OHCI1394_HCControlSet) & 2299 OHCI1394_HCControl_LPS; 2300 } 2301 2302 if (!lps) { 2303 ohci_err(ohci, "failed to set Link Power Status\n"); 2304 return -EIO; 2305 } 2306 2307 if (ohci->quirks & QUIRK_TI_SLLZ059) { 2308 ret = probe_tsb41ba3d(ohci); 2309 if (ret < 0) 2310 return ret; 2311 if (ret) 2312 ohci_notice(ohci, "local TSB41BA3D phy\n"); 2313 else 2314 ohci->quirks &= ~QUIRK_TI_SLLZ059; 2315 } 2316 2317 reg_write(ohci, OHCI1394_HCControlClear, 2318 OHCI1394_HCControl_noByteSwapData); 2319 2320 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); 2321 reg_write(ohci, OHCI1394_LinkControlSet, 2322 OHCI1394_LinkControl_cycleTimerEnable | 2323 OHCI1394_LinkControl_cycleMaster); 2324 2325 reg_write(ohci, OHCI1394_ATRetries, 2326 OHCI1394_MAX_AT_REQ_RETRIES | 2327 (OHCI1394_MAX_AT_RESP_RETRIES << 4) | 2328 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) | 2329 (200 << 16)); 2330 2331 ohci->bus_time_running = false; 2332 2333 for (i = 0; i < 32; i++) 2334 if (ohci->ir_context_support & (1 << i)) 2335 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i), 2336 IR_CONTEXT_MULTI_CHANNEL_MODE); 2337 2338 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 2339 if (version >= OHCI_VERSION_1_1) { 2340 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi, 2341 0xfffffffe); 2342 card->broadcast_channel_auto_allocated = true; 2343 } 2344 2345 /* Get implemented bits of the priority arbitration request counter. */ 2346 reg_write(ohci, OHCI1394_FairnessControl, 0x3f); 2347 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; 2348 reg_write(ohci, OHCI1394_FairnessControl, 0); 2349 card->priority_budget_implemented = ohci->pri_req_max != 0; 2350 2351 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16); 2352 reg_write(ohci, OHCI1394_IntEventClear, ~0); 2353 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 2354 2355 ret = configure_1394a_enhancements(ohci); 2356 if (ret < 0) 2357 return ret; 2358 2359 /* Activate link_on bit and contender bit in our self ID packets.*/ 2360 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER); 2361 if (ret < 0) 2362 return ret; 2363 2364 /* 2365 * When the link is not yet enabled, the atomic config rom 2366 * update mechanism described below in ohci_set_config_rom() 2367 * is not active. We have to update ConfigRomHeader and 2368 * BusOptions manually, and the write to ConfigROMmap takes 2369 * effect immediately. We tie this to the enabling of the 2370 * link, so we have a valid config rom before enabling - the 2371 * OHCI requires that ConfigROMhdr and BusOptions have valid 2372 * values before enabling. 2373 * 2374 * However, when the ConfigROMmap is written, some controllers 2375 * always read back quadlets 0 and 2 from the config rom to 2376 * the ConfigRomHeader and BusOptions registers on bus reset. 2377 * They shouldn't do that in this initial case where the link 2378 * isn't enabled. This means we have to use the same 2379 * workaround here, setting the bus header to 0 and then write 2380 * the right values in the bus reset tasklet. 2381 */ 2382 2383 if (config_rom) { 2384 ohci->next_config_rom = 2385 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2386 &ohci->next_config_rom_bus, 2387 GFP_KERNEL); 2388 if (ohci->next_config_rom == NULL) 2389 return -ENOMEM; 2390 2391 copy_config_rom(ohci->next_config_rom, config_rom, length); 2392 } else { 2393 /* 2394 * In the suspend case, config_rom is NULL, which 2395 * means that we just reuse the old config rom. 2396 */ 2397 ohci->next_config_rom = ohci->config_rom; 2398 ohci->next_config_rom_bus = ohci->config_rom_bus; 2399 } 2400 2401 ohci->next_header = ohci->next_config_rom[0]; 2402 ohci->next_config_rom[0] = 0; 2403 reg_write(ohci, OHCI1394_ConfigROMhdr, 0); 2404 reg_write(ohci, OHCI1394_BusOptions, 2405 be32_to_cpu(ohci->next_config_rom[2])); 2406 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2407 2408 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); 2409 2410 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | 2411 OHCI1394_RQPkt | OHCI1394_RSPkt | 2412 OHCI1394_isochTx | OHCI1394_isochRx | 2413 OHCI1394_postedWriteErr | 2414 OHCI1394_selfIDComplete | 2415 OHCI1394_regAccessFail | 2416 OHCI1394_cycleInconsistent | 2417 OHCI1394_unrecoverableError | 2418 OHCI1394_cycleTooLong | 2419 OHCI1394_masterIntEnable; 2420 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS) 2421 irqs |= OHCI1394_busReset; 2422 reg_write(ohci, OHCI1394_IntMaskSet, irqs); 2423 2424 reg_write(ohci, OHCI1394_HCControlSet, 2425 OHCI1394_HCControl_linkEnable | 2426 OHCI1394_HCControl_BIBimageValid); 2427 2428 reg_write(ohci, OHCI1394_LinkControlSet, 2429 OHCI1394_LinkControl_rcvSelfID | 2430 OHCI1394_LinkControl_rcvPhyPkt); 2431 2432 ar_context_run(&ohci->ar_request_ctx); 2433 ar_context_run(&ohci->ar_response_ctx); 2434 2435 flush_writes(ohci); 2436 2437 /* We are ready to go, reset bus to finish initialization. */ 2438 fw_schedule_bus_reset(&ohci->card, false, true); 2439 2440 return 0; 2441 } 2442 2443 static int ohci_set_config_rom(struct fw_card *card, 2444 const __be32 *config_rom, size_t length) 2445 { 2446 struct fw_ohci *ohci; 2447 __be32 *next_config_rom; 2448 dma_addr_t uninitialized_var(next_config_rom_bus); 2449 2450 ohci = fw_ohci(card); 2451 2452 /* 2453 * When the OHCI controller is enabled, the config rom update 2454 * mechanism is a bit tricky, but easy enough to use. See 2455 * section 5.5.6 in the OHCI specification. 2456 * 2457 * The OHCI controller caches the new config rom address in a 2458 * shadow register (ConfigROMmapNext) and needs a bus reset 2459 * for the changes to take place. When the bus reset is 2460 * detected, the controller loads the new values for the 2461 * ConfigRomHeader and BusOptions registers from the specified 2462 * config rom and loads ConfigROMmap from the ConfigROMmapNext 2463 * shadow register. All automatically and atomically. 2464 * 2465 * Now, there's a twist to this story. The automatic load of 2466 * ConfigRomHeader and BusOptions doesn't honor the 2467 * noByteSwapData bit, so with a be32 config rom, the 2468 * controller will load be32 values in to these registers 2469 * during the atomic update, even on litte endian 2470 * architectures. The workaround we use is to put a 0 in the 2471 * header quadlet; 0 is endian agnostic and means that the 2472 * config rom isn't ready yet. In the bus reset tasklet we 2473 * then set up the real values for the two registers. 2474 * 2475 * We use ohci->lock to avoid racing with the code that sets 2476 * ohci->next_config_rom to NULL (see bus_reset_work). 2477 */ 2478 2479 next_config_rom = 2480 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2481 &next_config_rom_bus, GFP_KERNEL); 2482 if (next_config_rom == NULL) 2483 return -ENOMEM; 2484 2485 spin_lock_irq(&ohci->lock); 2486 2487 /* 2488 * If there is not an already pending config_rom update, 2489 * push our new allocation into the ohci->next_config_rom 2490 * and then mark the local variable as null so that we 2491 * won't deallocate the new buffer. 2492 * 2493 * OTOH, if there is a pending config_rom update, just 2494 * use that buffer with the new config_rom data, and 2495 * let this routine free the unused DMA allocation. 2496 */ 2497 2498 if (ohci->next_config_rom == NULL) { 2499 ohci->next_config_rom = next_config_rom; 2500 ohci->next_config_rom_bus = next_config_rom_bus; 2501 next_config_rom = NULL; 2502 } 2503 2504 copy_config_rom(ohci->next_config_rom, config_rom, length); 2505 2506 ohci->next_header = config_rom[0]; 2507 ohci->next_config_rom[0] = 0; 2508 2509 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); 2510 2511 spin_unlock_irq(&ohci->lock); 2512 2513 /* If we didn't use the DMA allocation, delete it. */ 2514 if (next_config_rom != NULL) 2515 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 2516 next_config_rom, next_config_rom_bus); 2517 2518 /* 2519 * Now initiate a bus reset to have the changes take 2520 * effect. We clean up the old config rom memory and DMA 2521 * mappings in the bus reset tasklet, since the OHCI 2522 * controller could need to access it before the bus reset 2523 * takes effect. 2524 */ 2525 2526 fw_schedule_bus_reset(&ohci->card, true, true); 2527 2528 return 0; 2529 } 2530 2531 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet) 2532 { 2533 struct fw_ohci *ohci = fw_ohci(card); 2534 2535 at_context_transmit(&ohci->at_request_ctx, packet); 2536 } 2537 2538 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet) 2539 { 2540 struct fw_ohci *ohci = fw_ohci(card); 2541 2542 at_context_transmit(&ohci->at_response_ctx, packet); 2543 } 2544 2545 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet) 2546 { 2547 struct fw_ohci *ohci = fw_ohci(card); 2548 struct context *ctx = &ohci->at_request_ctx; 2549 struct driver_data *driver_data = packet->driver_data; 2550 int ret = -ENOENT; 2551 2552 tasklet_disable(&ctx->tasklet); 2553 2554 if (packet->ack != 0) 2555 goto out; 2556 2557 if (packet->payload_mapped) 2558 dma_unmap_single(ohci->card.device, packet->payload_bus, 2559 packet->payload_length, DMA_TO_DEVICE); 2560 2561 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); 2562 driver_data->packet = NULL; 2563 packet->ack = RCODE_CANCELLED; 2564 packet->callback(packet, &ohci->card, packet->ack); 2565 ret = 0; 2566 out: 2567 tasklet_enable(&ctx->tasklet); 2568 2569 return ret; 2570 } 2571 2572 static int ohci_enable_phys_dma(struct fw_card *card, 2573 int node_id, int generation) 2574 { 2575 struct fw_ohci *ohci = fw_ohci(card); 2576 unsigned long flags; 2577 int n, ret = 0; 2578 2579 if (param_remote_dma) 2580 return 0; 2581 2582 /* 2583 * FIXME: Make sure this bitmask is cleared when we clear the busReset 2584 * interrupt bit. Clear physReqResourceAllBuses on bus reset. 2585 */ 2586 2587 spin_lock_irqsave(&ohci->lock, flags); 2588 2589 if (ohci->generation != generation) { 2590 ret = -ESTALE; 2591 goto out; 2592 } 2593 2594 /* 2595 * Note, if the node ID contains a non-local bus ID, physical DMA is 2596 * enabled for _all_ nodes on remote buses. 2597 */ 2598 2599 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63; 2600 if (n < 32) 2601 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n); 2602 else 2603 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); 2604 2605 flush_writes(ohci); 2606 out: 2607 spin_unlock_irqrestore(&ohci->lock, flags); 2608 2609 return ret; 2610 } 2611 2612 static u32 ohci_read_csr(struct fw_card *card, int csr_offset) 2613 { 2614 struct fw_ohci *ohci = fw_ohci(card); 2615 unsigned long flags; 2616 u32 value; 2617 2618 switch (csr_offset) { 2619 case CSR_STATE_CLEAR: 2620 case CSR_STATE_SET: 2621 if (ohci->is_root && 2622 (reg_read(ohci, OHCI1394_LinkControlSet) & 2623 OHCI1394_LinkControl_cycleMaster)) 2624 value = CSR_STATE_BIT_CMSTR; 2625 else 2626 value = 0; 2627 if (ohci->csr_state_setclear_abdicate) 2628 value |= CSR_STATE_BIT_ABDICATE; 2629 2630 return value; 2631 2632 case CSR_NODE_IDS: 2633 return reg_read(ohci, OHCI1394_NodeID) << 16; 2634 2635 case CSR_CYCLE_TIME: 2636 return get_cycle_time(ohci); 2637 2638 case CSR_BUS_TIME: 2639 /* 2640 * We might be called just after the cycle timer has wrapped 2641 * around but just before the cycle64Seconds handler, so we 2642 * better check here, too, if the bus time needs to be updated. 2643 */ 2644 spin_lock_irqsave(&ohci->lock, flags); 2645 value = update_bus_time(ohci); 2646 spin_unlock_irqrestore(&ohci->lock, flags); 2647 return value; 2648 2649 case CSR_BUSY_TIMEOUT: 2650 value = reg_read(ohci, OHCI1394_ATRetries); 2651 return (value >> 4) & 0x0ffff00f; 2652 2653 case CSR_PRIORITY_BUDGET: 2654 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) | 2655 (ohci->pri_req_max << 8); 2656 2657 default: 2658 WARN_ON(1); 2659 return 0; 2660 } 2661 } 2662 2663 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value) 2664 { 2665 struct fw_ohci *ohci = fw_ohci(card); 2666 unsigned long flags; 2667 2668 switch (csr_offset) { 2669 case CSR_STATE_CLEAR: 2670 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2671 reg_write(ohci, OHCI1394_LinkControlClear, 2672 OHCI1394_LinkControl_cycleMaster); 2673 flush_writes(ohci); 2674 } 2675 if (value & CSR_STATE_BIT_ABDICATE) 2676 ohci->csr_state_setclear_abdicate = false; 2677 break; 2678 2679 case CSR_STATE_SET: 2680 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { 2681 reg_write(ohci, OHCI1394_LinkControlSet, 2682 OHCI1394_LinkControl_cycleMaster); 2683 flush_writes(ohci); 2684 } 2685 if (value & CSR_STATE_BIT_ABDICATE) 2686 ohci->csr_state_setclear_abdicate = true; 2687 break; 2688 2689 case CSR_NODE_IDS: 2690 reg_write(ohci, OHCI1394_NodeID, value >> 16); 2691 flush_writes(ohci); 2692 break; 2693 2694 case CSR_CYCLE_TIME: 2695 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value); 2696 reg_write(ohci, OHCI1394_IntEventSet, 2697 OHCI1394_cycleInconsistent); 2698 flush_writes(ohci); 2699 break; 2700 2701 case CSR_BUS_TIME: 2702 spin_lock_irqsave(&ohci->lock, flags); 2703 ohci->bus_time = (update_bus_time(ohci) & 0x40) | 2704 (value & ~0x7f); 2705 spin_unlock_irqrestore(&ohci->lock, flags); 2706 break; 2707 2708 case CSR_BUSY_TIMEOUT: 2709 value = (value & 0xf) | ((value & 0xf) << 4) | 2710 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4); 2711 reg_write(ohci, OHCI1394_ATRetries, value); 2712 flush_writes(ohci); 2713 break; 2714 2715 case CSR_PRIORITY_BUDGET: 2716 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f); 2717 flush_writes(ohci); 2718 break; 2719 2720 default: 2721 WARN_ON(1); 2722 break; 2723 } 2724 } 2725 2726 static void flush_iso_completions(struct iso_context *ctx) 2727 { 2728 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, 2729 ctx->header_length, ctx->header, 2730 ctx->base.callback_data); 2731 ctx->header_length = 0; 2732 } 2733 2734 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) 2735 { 2736 u32 *ctx_hdr; 2737 2738 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { 2739 if (ctx->base.drop_overflow_headers) 2740 return; 2741 flush_iso_completions(ctx); 2742 } 2743 2744 ctx_hdr = ctx->header + ctx->header_length; 2745 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); 2746 2747 /* 2748 * The two iso header quadlets are byteswapped to little 2749 * endian by the controller, but we want to present them 2750 * as big endian for consistency with the bus endianness. 2751 */ 2752 if (ctx->base.header_size > 0) 2753 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */ 2754 if (ctx->base.header_size > 4) 2755 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */ 2756 if (ctx->base.header_size > 8) 2757 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); 2758 ctx->header_length += ctx->base.header_size; 2759 } 2760 2761 static int handle_ir_packet_per_buffer(struct context *context, 2762 struct descriptor *d, 2763 struct descriptor *last) 2764 { 2765 struct iso_context *ctx = 2766 container_of(context, struct iso_context, context); 2767 struct descriptor *pd; 2768 u32 buffer_dma; 2769 2770 for (pd = d; pd <= last; pd++) 2771 if (pd->transfer_status) 2772 break; 2773 if (pd > last) 2774 /* Descriptor(s) not done yet, stop iteration */ 2775 return 0; 2776 2777 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { 2778 d++; 2779 buffer_dma = le32_to_cpu(d->data_address); 2780 dma_sync_single_range_for_cpu(context->ohci->card.device, 2781 buffer_dma & PAGE_MASK, 2782 buffer_dma & ~PAGE_MASK, 2783 le16_to_cpu(d->req_count), 2784 DMA_FROM_DEVICE); 2785 } 2786 2787 copy_iso_headers(ctx, (u32 *) (last + 1)); 2788 2789 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) 2790 flush_iso_completions(ctx); 2791 2792 return 1; 2793 } 2794 2795 /* d == last because each descriptor block is only a single descriptor. */ 2796 static int handle_ir_buffer_fill(struct context *context, 2797 struct descriptor *d, 2798 struct descriptor *last) 2799 { 2800 struct iso_context *ctx = 2801 container_of(context, struct iso_context, context); 2802 unsigned int req_count, res_count, completed; 2803 u32 buffer_dma; 2804 2805 req_count = le16_to_cpu(last->req_count); 2806 res_count = le16_to_cpu(READ_ONCE(last->res_count)); 2807 completed = req_count - res_count; 2808 buffer_dma = le32_to_cpu(last->data_address); 2809 2810 if (completed > 0) { 2811 ctx->mc_buffer_bus = buffer_dma; 2812 ctx->mc_completed = completed; 2813 } 2814 2815 if (res_count != 0) 2816 /* Descriptor(s) not done yet, stop iteration */ 2817 return 0; 2818 2819 dma_sync_single_range_for_cpu(context->ohci->card.device, 2820 buffer_dma & PAGE_MASK, 2821 buffer_dma & ~PAGE_MASK, 2822 completed, DMA_FROM_DEVICE); 2823 2824 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { 2825 ctx->base.callback.mc(&ctx->base, 2826 buffer_dma + completed, 2827 ctx->base.callback_data); 2828 ctx->mc_completed = 0; 2829 } 2830 2831 return 1; 2832 } 2833 2834 static void flush_ir_buffer_fill(struct iso_context *ctx) 2835 { 2836 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, 2837 ctx->mc_buffer_bus & PAGE_MASK, 2838 ctx->mc_buffer_bus & ~PAGE_MASK, 2839 ctx->mc_completed, DMA_FROM_DEVICE); 2840 2841 ctx->base.callback.mc(&ctx->base, 2842 ctx->mc_buffer_bus + ctx->mc_completed, 2843 ctx->base.callback_data); 2844 ctx->mc_completed = 0; 2845 } 2846 2847 static inline void sync_it_packet_for_cpu(struct context *context, 2848 struct descriptor *pd) 2849 { 2850 __le16 control; 2851 u32 buffer_dma; 2852 2853 /* only packets beginning with OUTPUT_MORE* have data buffers */ 2854 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) 2855 return; 2856 2857 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */ 2858 pd += 2; 2859 2860 /* 2861 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's 2862 * data buffer is in the context program's coherent page and must not 2863 * be synced. 2864 */ 2865 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == 2866 (context->current_bus & PAGE_MASK)) { 2867 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) 2868 return; 2869 pd++; 2870 } 2871 2872 do { 2873 buffer_dma = le32_to_cpu(pd->data_address); 2874 dma_sync_single_range_for_cpu(context->ohci->card.device, 2875 buffer_dma & PAGE_MASK, 2876 buffer_dma & ~PAGE_MASK, 2877 le16_to_cpu(pd->req_count), 2878 DMA_TO_DEVICE); 2879 control = pd->control; 2880 pd++; 2881 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))); 2882 } 2883 2884 static int handle_it_packet(struct context *context, 2885 struct descriptor *d, 2886 struct descriptor *last) 2887 { 2888 struct iso_context *ctx = 2889 container_of(context, struct iso_context, context); 2890 struct descriptor *pd; 2891 __be32 *ctx_hdr; 2892 2893 for (pd = d; pd <= last; pd++) 2894 if (pd->transfer_status) 2895 break; 2896 if (pd > last) 2897 /* Descriptor(s) not done yet, stop iteration */ 2898 return 0; 2899 2900 sync_it_packet_for_cpu(context, d); 2901 2902 if (ctx->header_length + 4 > PAGE_SIZE) { 2903 if (ctx->base.drop_overflow_headers) 2904 return 1; 2905 flush_iso_completions(ctx); 2906 } 2907 2908 ctx_hdr = ctx->header + ctx->header_length; 2909 ctx->last_timestamp = le16_to_cpu(last->res_count); 2910 /* Present this value as big-endian to match the receive code */ 2911 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | 2912 le16_to_cpu(pd->res_count)); 2913 ctx->header_length += 4; 2914 2915 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) 2916 flush_iso_completions(ctx); 2917 2918 return 1; 2919 } 2920 2921 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels) 2922 { 2923 u32 hi = channels >> 32, lo = channels; 2924 2925 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi); 2926 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo); 2927 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi); 2928 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo); 2929 ohci->mc_channels = channels; 2930 } 2931 2932 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card, 2933 int type, int channel, size_t header_size) 2934 { 2935 struct fw_ohci *ohci = fw_ohci(card); 2936 struct iso_context *uninitialized_var(ctx); 2937 descriptor_callback_t uninitialized_var(callback); 2938 u64 *uninitialized_var(channels); 2939 u32 *uninitialized_var(mask), uninitialized_var(regs); 2940 int index, ret = -EBUSY; 2941 2942 spin_lock_irq(&ohci->lock); 2943 2944 switch (type) { 2945 case FW_ISO_CONTEXT_TRANSMIT: 2946 mask = &ohci->it_context_mask; 2947 callback = handle_it_packet; 2948 index = ffs(*mask) - 1; 2949 if (index >= 0) { 2950 *mask &= ~(1 << index); 2951 regs = OHCI1394_IsoXmitContextBase(index); 2952 ctx = &ohci->it_context_list[index]; 2953 } 2954 break; 2955 2956 case FW_ISO_CONTEXT_RECEIVE: 2957 channels = &ohci->ir_context_channels; 2958 mask = &ohci->ir_context_mask; 2959 callback = handle_ir_packet_per_buffer; 2960 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; 2961 if (index >= 0) { 2962 *channels &= ~(1ULL << channel); 2963 *mask &= ~(1 << index); 2964 regs = OHCI1394_IsoRcvContextBase(index); 2965 ctx = &ohci->ir_context_list[index]; 2966 } 2967 break; 2968 2969 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 2970 mask = &ohci->ir_context_mask; 2971 callback = handle_ir_buffer_fill; 2972 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; 2973 if (index >= 0) { 2974 ohci->mc_allocated = true; 2975 *mask &= ~(1 << index); 2976 regs = OHCI1394_IsoRcvContextBase(index); 2977 ctx = &ohci->ir_context_list[index]; 2978 } 2979 break; 2980 2981 default: 2982 index = -1; 2983 ret = -ENOSYS; 2984 } 2985 2986 spin_unlock_irq(&ohci->lock); 2987 2988 if (index < 0) 2989 return ERR_PTR(ret); 2990 2991 memset(ctx, 0, sizeof(*ctx)); 2992 ctx->header_length = 0; 2993 ctx->header = (void *) __get_free_page(GFP_KERNEL); 2994 if (ctx->header == NULL) { 2995 ret = -ENOMEM; 2996 goto out; 2997 } 2998 ret = context_init(&ctx->context, ohci, regs, callback); 2999 if (ret < 0) 3000 goto out_with_header; 3001 3002 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { 3003 set_multichannel_mask(ohci, 0); 3004 ctx->mc_completed = 0; 3005 } 3006 3007 return &ctx->base; 3008 3009 out_with_header: 3010 free_page((unsigned long)ctx->header); 3011 out: 3012 spin_lock_irq(&ohci->lock); 3013 3014 switch (type) { 3015 case FW_ISO_CONTEXT_RECEIVE: 3016 *channels |= 1ULL << channel; 3017 break; 3018 3019 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3020 ohci->mc_allocated = false; 3021 break; 3022 } 3023 *mask |= 1 << index; 3024 3025 spin_unlock_irq(&ohci->lock); 3026 3027 return ERR_PTR(ret); 3028 } 3029 3030 static int ohci_start_iso(struct fw_iso_context *base, 3031 s32 cycle, u32 sync, u32 tags) 3032 { 3033 struct iso_context *ctx = container_of(base, struct iso_context, base); 3034 struct fw_ohci *ohci = ctx->context.ohci; 3035 u32 control = IR_CONTEXT_ISOCH_HEADER, match; 3036 int index; 3037 3038 /* the controller cannot start without any queued packets */ 3039 if (ctx->context.last->branch_address == 0) 3040 return -ENODATA; 3041 3042 switch (ctx->base.type) { 3043 case FW_ISO_CONTEXT_TRANSMIT: 3044 index = ctx - ohci->it_context_list; 3045 match = 0; 3046 if (cycle >= 0) 3047 match = IT_CONTEXT_CYCLE_MATCH_ENABLE | 3048 (cycle & 0x7fff) << 16; 3049 3050 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index); 3051 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index); 3052 context_run(&ctx->context, match); 3053 break; 3054 3055 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3056 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE; 3057 /* fall through */ 3058 case FW_ISO_CONTEXT_RECEIVE: 3059 index = ctx - ohci->ir_context_list; 3060 match = (tags << 28) | (sync << 8) | ctx->base.channel; 3061 if (cycle >= 0) { 3062 match |= (cycle & 0x07fff) << 12; 3063 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE; 3064 } 3065 3066 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index); 3067 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index); 3068 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); 3069 context_run(&ctx->context, control); 3070 3071 ctx->sync = sync; 3072 ctx->tags = tags; 3073 3074 break; 3075 } 3076 3077 return 0; 3078 } 3079 3080 static int ohci_stop_iso(struct fw_iso_context *base) 3081 { 3082 struct fw_ohci *ohci = fw_ohci(base->card); 3083 struct iso_context *ctx = container_of(base, struct iso_context, base); 3084 int index; 3085 3086 switch (ctx->base.type) { 3087 case FW_ISO_CONTEXT_TRANSMIT: 3088 index = ctx - ohci->it_context_list; 3089 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index); 3090 break; 3091 3092 case FW_ISO_CONTEXT_RECEIVE: 3093 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3094 index = ctx - ohci->ir_context_list; 3095 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index); 3096 break; 3097 } 3098 flush_writes(ohci); 3099 context_stop(&ctx->context); 3100 tasklet_kill(&ctx->context.tasklet); 3101 3102 return 0; 3103 } 3104 3105 static void ohci_free_iso_context(struct fw_iso_context *base) 3106 { 3107 struct fw_ohci *ohci = fw_ohci(base->card); 3108 struct iso_context *ctx = container_of(base, struct iso_context, base); 3109 unsigned long flags; 3110 int index; 3111 3112 ohci_stop_iso(base); 3113 context_release(&ctx->context); 3114 free_page((unsigned long)ctx->header); 3115 3116 spin_lock_irqsave(&ohci->lock, flags); 3117 3118 switch (base->type) { 3119 case FW_ISO_CONTEXT_TRANSMIT: 3120 index = ctx - ohci->it_context_list; 3121 ohci->it_context_mask |= 1 << index; 3122 break; 3123 3124 case FW_ISO_CONTEXT_RECEIVE: 3125 index = ctx - ohci->ir_context_list; 3126 ohci->ir_context_mask |= 1 << index; 3127 ohci->ir_context_channels |= 1ULL << base->channel; 3128 break; 3129 3130 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3131 index = ctx - ohci->ir_context_list; 3132 ohci->ir_context_mask |= 1 << index; 3133 ohci->ir_context_channels |= ohci->mc_channels; 3134 ohci->mc_channels = 0; 3135 ohci->mc_allocated = false; 3136 break; 3137 } 3138 3139 spin_unlock_irqrestore(&ohci->lock, flags); 3140 } 3141 3142 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels) 3143 { 3144 struct fw_ohci *ohci = fw_ohci(base->card); 3145 unsigned long flags; 3146 int ret; 3147 3148 switch (base->type) { 3149 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3150 3151 spin_lock_irqsave(&ohci->lock, flags); 3152 3153 /* Don't allow multichannel to grab other contexts' channels. */ 3154 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { 3155 *channels = ohci->ir_context_channels; 3156 ret = -EBUSY; 3157 } else { 3158 set_multichannel_mask(ohci, *channels); 3159 ret = 0; 3160 } 3161 3162 spin_unlock_irqrestore(&ohci->lock, flags); 3163 3164 break; 3165 default: 3166 ret = -EINVAL; 3167 } 3168 3169 return ret; 3170 } 3171 3172 #ifdef CONFIG_PM 3173 static void ohci_resume_iso_dma(struct fw_ohci *ohci) 3174 { 3175 int i; 3176 struct iso_context *ctx; 3177 3178 for (i = 0 ; i < ohci->n_ir ; i++) { 3179 ctx = &ohci->ir_context_list[i]; 3180 if (ctx->context.running) 3181 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); 3182 } 3183 3184 for (i = 0 ; i < ohci->n_it ; i++) { 3185 ctx = &ohci->it_context_list[i]; 3186 if (ctx->context.running) 3187 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); 3188 } 3189 } 3190 #endif 3191 3192 static int queue_iso_transmit(struct iso_context *ctx, 3193 struct fw_iso_packet *packet, 3194 struct fw_iso_buffer *buffer, 3195 unsigned long payload) 3196 { 3197 struct descriptor *d, *last, *pd; 3198 struct fw_iso_packet *p; 3199 __le32 *header; 3200 dma_addr_t d_bus, page_bus; 3201 u32 z, header_z, payload_z, irq; 3202 u32 payload_index, payload_end_index, next_page_index; 3203 int page, end_page, i, length, offset; 3204 3205 p = packet; 3206 payload_index = payload; 3207 3208 if (p->skip) 3209 z = 1; 3210 else 3211 z = 2; 3212 if (p->header_length > 0) 3213 z++; 3214 3215 /* Determine the first page the payload isn't contained in. */ 3216 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; 3217 if (p->payload_length > 0) 3218 payload_z = end_page - (payload_index >> PAGE_SHIFT); 3219 else 3220 payload_z = 0; 3221 3222 z += payload_z; 3223 3224 /* Get header size in number of descriptors. */ 3225 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); 3226 3227 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); 3228 if (d == NULL) 3229 return -ENOMEM; 3230 3231 if (!p->skip) { 3232 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE); 3233 d[0].req_count = cpu_to_le16(8); 3234 /* 3235 * Link the skip address to this descriptor itself. This causes 3236 * a context to skip a cycle whenever lost cycles or FIFO 3237 * overruns occur, without dropping the data. The application 3238 * should then decide whether this is an error condition or not. 3239 * FIXME: Make the context's cycle-lost behaviour configurable? 3240 */ 3241 d[0].branch_address = cpu_to_le32(d_bus | z); 3242 3243 header = (__le32 *) &d[1]; 3244 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | 3245 IT_HEADER_TAG(p->tag) | 3246 IT_HEADER_TCODE(TCODE_STREAM_DATA) | 3247 IT_HEADER_CHANNEL(ctx->base.channel) | 3248 IT_HEADER_SPEED(ctx->base.speed)); 3249 header[1] = 3250 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + 3251 p->payload_length)); 3252 } 3253 3254 if (p->header_length > 0) { 3255 d[2].req_count = cpu_to_le16(p->header_length); 3256 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d)); 3257 memcpy(&d[z], p->header, p->header_length); 3258 } 3259 3260 pd = d + z - payload_z; 3261 payload_end_index = payload_index + p->payload_length; 3262 for (i = 0; i < payload_z; i++) { 3263 page = payload_index >> PAGE_SHIFT; 3264 offset = payload_index & ~PAGE_MASK; 3265 next_page_index = (page + 1) << PAGE_SHIFT; 3266 length = 3267 min(next_page_index, payload_end_index) - payload_index; 3268 pd[i].req_count = cpu_to_le16(length); 3269 3270 page_bus = page_private(buffer->pages[page]); 3271 pd[i].data_address = cpu_to_le32(page_bus + offset); 3272 3273 dma_sync_single_range_for_device(ctx->context.ohci->card.device, 3274 page_bus, offset, length, 3275 DMA_TO_DEVICE); 3276 3277 payload_index += length; 3278 } 3279 3280 if (p->interrupt) 3281 irq = DESCRIPTOR_IRQ_ALWAYS; 3282 else 3283 irq = DESCRIPTOR_NO_IRQ; 3284 3285 last = z == 2 ? d : d + z - 1; 3286 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | 3287 DESCRIPTOR_STATUS | 3288 DESCRIPTOR_BRANCH_ALWAYS | 3289 irq); 3290 3291 context_append(&ctx->context, d, z, header_z); 3292 3293 return 0; 3294 } 3295 3296 static int queue_iso_packet_per_buffer(struct iso_context *ctx, 3297 struct fw_iso_packet *packet, 3298 struct fw_iso_buffer *buffer, 3299 unsigned long payload) 3300 { 3301 struct device *device = ctx->context.ohci->card.device; 3302 struct descriptor *d, *pd; 3303 dma_addr_t d_bus, page_bus; 3304 u32 z, header_z, rest; 3305 int i, j, length; 3306 int page, offset, packet_count, header_size, payload_per_buffer; 3307 3308 /* 3309 * The OHCI controller puts the isochronous header and trailer in the 3310 * buffer, so we need at least 8 bytes. 3311 */ 3312 packet_count = packet->header_length / ctx->base.header_size; 3313 header_size = max(ctx->base.header_size, (size_t)8); 3314 3315 /* Get header size in number of descriptors. */ 3316 header_z = DIV_ROUND_UP(header_size, sizeof(*d)); 3317 page = payload >> PAGE_SHIFT; 3318 offset = payload & ~PAGE_MASK; 3319 payload_per_buffer = packet->payload_length / packet_count; 3320 3321 for (i = 0; i < packet_count; i++) { 3322 /* d points to the header descriptor */ 3323 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1; 3324 d = context_get_descriptors(&ctx->context, 3325 z + header_z, &d_bus); 3326 if (d == NULL) 3327 return -ENOMEM; 3328 3329 d->control = cpu_to_le16(DESCRIPTOR_STATUS | 3330 DESCRIPTOR_INPUT_MORE); 3331 if (packet->skip && i == 0) 3332 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 3333 d->req_count = cpu_to_le16(header_size); 3334 d->res_count = d->req_count; 3335 d->transfer_status = 0; 3336 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 3337 3338 rest = payload_per_buffer; 3339 pd = d; 3340 for (j = 1; j < z; j++) { 3341 pd++; 3342 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 3343 DESCRIPTOR_INPUT_MORE); 3344 3345 if (offset + rest < PAGE_SIZE) 3346 length = rest; 3347 else 3348 length = PAGE_SIZE - offset; 3349 pd->req_count = cpu_to_le16(length); 3350 pd->res_count = pd->req_count; 3351 pd->transfer_status = 0; 3352 3353 page_bus = page_private(buffer->pages[page]); 3354 pd->data_address = cpu_to_le32(page_bus + offset); 3355 3356 dma_sync_single_range_for_device(device, page_bus, 3357 offset, length, 3358 DMA_FROM_DEVICE); 3359 3360 offset = (offset + length) & ~PAGE_MASK; 3361 rest -= length; 3362 if (offset == 0) 3363 page++; 3364 } 3365 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 3366 DESCRIPTOR_INPUT_LAST | 3367 DESCRIPTOR_BRANCH_ALWAYS); 3368 if (packet->interrupt && i == packet_count - 1) 3369 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 3370 3371 context_append(&ctx->context, d, z, header_z); 3372 } 3373 3374 return 0; 3375 } 3376 3377 static int queue_iso_buffer_fill(struct iso_context *ctx, 3378 struct fw_iso_packet *packet, 3379 struct fw_iso_buffer *buffer, 3380 unsigned long payload) 3381 { 3382 struct descriptor *d; 3383 dma_addr_t d_bus, page_bus; 3384 int page, offset, rest, z, i, length; 3385 3386 page = payload >> PAGE_SHIFT; 3387 offset = payload & ~PAGE_MASK; 3388 rest = packet->payload_length; 3389 3390 /* We need one descriptor for each page in the buffer. */ 3391 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE); 3392 3393 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) 3394 return -EFAULT; 3395 3396 for (i = 0; i < z; i++) { 3397 d = context_get_descriptors(&ctx->context, 1, &d_bus); 3398 if (d == NULL) 3399 return -ENOMEM; 3400 3401 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | 3402 DESCRIPTOR_BRANCH_ALWAYS); 3403 if (packet->skip && i == 0) 3404 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); 3405 if (packet->interrupt && i == z - 1) 3406 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); 3407 3408 if (offset + rest < PAGE_SIZE) 3409 length = rest; 3410 else 3411 length = PAGE_SIZE - offset; 3412 d->req_count = cpu_to_le16(length); 3413 d->res_count = d->req_count; 3414 d->transfer_status = 0; 3415 3416 page_bus = page_private(buffer->pages[page]); 3417 d->data_address = cpu_to_le32(page_bus + offset); 3418 3419 dma_sync_single_range_for_device(ctx->context.ohci->card.device, 3420 page_bus, offset, length, 3421 DMA_FROM_DEVICE); 3422 3423 rest -= length; 3424 offset = 0; 3425 page++; 3426 3427 context_append(&ctx->context, d, 1, 0); 3428 } 3429 3430 return 0; 3431 } 3432 3433 static int ohci_queue_iso(struct fw_iso_context *base, 3434 struct fw_iso_packet *packet, 3435 struct fw_iso_buffer *buffer, 3436 unsigned long payload) 3437 { 3438 struct iso_context *ctx = container_of(base, struct iso_context, base); 3439 unsigned long flags; 3440 int ret = -ENOSYS; 3441 3442 spin_lock_irqsave(&ctx->context.ohci->lock, flags); 3443 switch (base->type) { 3444 case FW_ISO_CONTEXT_TRANSMIT: 3445 ret = queue_iso_transmit(ctx, packet, buffer, payload); 3446 break; 3447 case FW_ISO_CONTEXT_RECEIVE: 3448 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); 3449 break; 3450 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3451 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); 3452 break; 3453 } 3454 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); 3455 3456 return ret; 3457 } 3458 3459 static void ohci_flush_queue_iso(struct fw_iso_context *base) 3460 { 3461 struct context *ctx = 3462 &container_of(base, struct iso_context, base)->context; 3463 3464 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); 3465 } 3466 3467 static int ohci_flush_iso_completions(struct fw_iso_context *base) 3468 { 3469 struct iso_context *ctx = container_of(base, struct iso_context, base); 3470 int ret = 0; 3471 3472 tasklet_disable(&ctx->context.tasklet); 3473 3474 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { 3475 context_tasklet((unsigned long)&ctx->context); 3476 3477 switch (base->type) { 3478 case FW_ISO_CONTEXT_TRANSMIT: 3479 case FW_ISO_CONTEXT_RECEIVE: 3480 if (ctx->header_length != 0) 3481 flush_iso_completions(ctx); 3482 break; 3483 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL: 3484 if (ctx->mc_completed != 0) 3485 flush_ir_buffer_fill(ctx); 3486 break; 3487 default: 3488 ret = -ENOSYS; 3489 } 3490 3491 clear_bit_unlock(0, &ctx->flushing_completions); 3492 smp_mb__after_atomic(); 3493 } 3494 3495 tasklet_enable(&ctx->context.tasklet); 3496 3497 return ret; 3498 } 3499 3500 static const struct fw_card_driver ohci_driver = { 3501 .enable = ohci_enable, 3502 .read_phy_reg = ohci_read_phy_reg, 3503 .update_phy_reg = ohci_update_phy_reg, 3504 .set_config_rom = ohci_set_config_rom, 3505 .send_request = ohci_send_request, 3506 .send_response = ohci_send_response, 3507 .cancel_packet = ohci_cancel_packet, 3508 .enable_phys_dma = ohci_enable_phys_dma, 3509 .read_csr = ohci_read_csr, 3510 .write_csr = ohci_write_csr, 3511 3512 .allocate_iso_context = ohci_allocate_iso_context, 3513 .free_iso_context = ohci_free_iso_context, 3514 .set_iso_channels = ohci_set_iso_channels, 3515 .queue_iso = ohci_queue_iso, 3516 .flush_queue_iso = ohci_flush_queue_iso, 3517 .flush_iso_completions = ohci_flush_iso_completions, 3518 .start_iso = ohci_start_iso, 3519 .stop_iso = ohci_stop_iso, 3520 }; 3521 3522 #ifdef CONFIG_PPC_PMAC 3523 static void pmac_ohci_on(struct pci_dev *dev) 3524 { 3525 if (machine_is(powermac)) { 3526 struct device_node *ofn = pci_device_to_OF_node(dev); 3527 3528 if (ofn) { 3529 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1); 3530 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1); 3531 } 3532 } 3533 } 3534 3535 static void pmac_ohci_off(struct pci_dev *dev) 3536 { 3537 if (machine_is(powermac)) { 3538 struct device_node *ofn = pci_device_to_OF_node(dev); 3539 3540 if (ofn) { 3541 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0); 3542 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0); 3543 } 3544 } 3545 } 3546 #else 3547 static inline void pmac_ohci_on(struct pci_dev *dev) {} 3548 static inline void pmac_ohci_off(struct pci_dev *dev) {} 3549 #endif /* CONFIG_PPC_PMAC */ 3550 3551 static int pci_probe(struct pci_dev *dev, 3552 const struct pci_device_id *ent) 3553 { 3554 struct fw_ohci *ohci; 3555 u32 bus_options, max_receive, link_speed, version; 3556 u64 guid; 3557 int i, err; 3558 size_t size; 3559 3560 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { 3561 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); 3562 return -ENOSYS; 3563 } 3564 3565 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL); 3566 if (ohci == NULL) { 3567 err = -ENOMEM; 3568 goto fail; 3569 } 3570 3571 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); 3572 3573 pmac_ohci_on(dev); 3574 3575 err = pci_enable_device(dev); 3576 if (err) { 3577 dev_err(&dev->dev, "failed to enable OHCI hardware\n"); 3578 goto fail_free; 3579 } 3580 3581 pci_set_master(dev); 3582 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0); 3583 pci_set_drvdata(dev, ohci); 3584 3585 spin_lock_init(&ohci->lock); 3586 mutex_init(&ohci->phy_reg_mutex); 3587 3588 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); 3589 3590 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) || 3591 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) { 3592 ohci_err(ohci, "invalid MMIO resource\n"); 3593 err = -ENXIO; 3594 goto fail_disable; 3595 } 3596 3597 err = pci_request_region(dev, 0, ohci_driver_name); 3598 if (err) { 3599 ohci_err(ohci, "MMIO resource unavailable\n"); 3600 goto fail_disable; 3601 } 3602 3603 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); 3604 if (ohci->registers == NULL) { 3605 ohci_err(ohci, "failed to remap registers\n"); 3606 err = -ENXIO; 3607 goto fail_iomem; 3608 } 3609 3610 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++) 3611 if ((ohci_quirks[i].vendor == dev->vendor) && 3612 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID || 3613 ohci_quirks[i].device == dev->device) && 3614 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID || 3615 ohci_quirks[i].revision >= dev->revision)) { 3616 ohci->quirks = ohci_quirks[i].flags; 3617 break; 3618 } 3619 if (param_quirks) 3620 ohci->quirks = param_quirks; 3621 3622 /* 3623 * Because dma_alloc_coherent() allocates at least one page, 3624 * we save space by using a common buffer for the AR request/ 3625 * response descriptors and the self IDs buffer. 3626 */ 3627 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4); 3628 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2); 3629 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device, 3630 PAGE_SIZE, 3631 &ohci->misc_buffer_bus, 3632 GFP_KERNEL); 3633 if (!ohci->misc_buffer) { 3634 err = -ENOMEM; 3635 goto fail_iounmap; 3636 } 3637 3638 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, 3639 OHCI1394_AsReqRcvContextControlSet); 3640 if (err < 0) 3641 goto fail_misc_buf; 3642 3643 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, 3644 OHCI1394_AsRspRcvContextControlSet); 3645 if (err < 0) 3646 goto fail_arreq_ctx; 3647 3648 err = context_init(&ohci->at_request_ctx, ohci, 3649 OHCI1394_AsReqTrContextControlSet, handle_at_packet); 3650 if (err < 0) 3651 goto fail_arrsp_ctx; 3652 3653 err = context_init(&ohci->at_response_ctx, ohci, 3654 OHCI1394_AsRspTrContextControlSet, handle_at_packet); 3655 if (err < 0) 3656 goto fail_atreq_ctx; 3657 3658 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0); 3659 ohci->ir_context_channels = ~0ULL; 3660 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); 3661 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0); 3662 ohci->ir_context_mask = ohci->ir_context_support; 3663 ohci->n_ir = hweight32(ohci->ir_context_mask); 3664 size = sizeof(struct iso_context) * ohci->n_ir; 3665 ohci->ir_context_list = kzalloc(size, GFP_KERNEL); 3666 3667 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0); 3668 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); 3669 /* JMicron JMB38x often shows 0 at first read, just ignore it */ 3670 if (!ohci->it_context_support) { 3671 ohci_notice(ohci, "overriding IsoXmitIntMask\n"); 3672 ohci->it_context_support = 0xf; 3673 } 3674 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0); 3675 ohci->it_context_mask = ohci->it_context_support; 3676 ohci->n_it = hweight32(ohci->it_context_mask); 3677 size = sizeof(struct iso_context) * ohci->n_it; 3678 ohci->it_context_list = kzalloc(size, GFP_KERNEL); 3679 3680 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) { 3681 err = -ENOMEM; 3682 goto fail_contexts; 3683 } 3684 3685 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; 3686 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; 3687 3688 bus_options = reg_read(ohci, OHCI1394_BusOptions); 3689 max_receive = (bus_options >> 12) & 0xf; 3690 link_speed = bus_options & 0x7; 3691 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | 3692 reg_read(ohci, OHCI1394_GUIDLo); 3693 3694 if (!(ohci->quirks & QUIRK_NO_MSI)) 3695 pci_enable_msi(dev); 3696 if (request_irq(dev->irq, irq_handler, 3697 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, 3698 ohci_driver_name, ohci)) { 3699 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); 3700 err = -EIO; 3701 goto fail_msi; 3702 } 3703 3704 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); 3705 if (err) 3706 goto fail_irq; 3707 3708 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; 3709 ohci_notice(ohci, 3710 "added OHCI v%x.%x device as card %d, " 3711 "%d IR + %d IT contexts, quirks 0x%x%s\n", 3712 version >> 16, version & 0xff, ohci->card.index, 3713 ohci->n_ir, ohci->n_it, ohci->quirks, 3714 reg_read(ohci, OHCI1394_PhyUpperBound) ? 3715 ", physUB" : ""); 3716 3717 return 0; 3718 3719 fail_irq: 3720 free_irq(dev->irq, ohci); 3721 fail_msi: 3722 pci_disable_msi(dev); 3723 fail_contexts: 3724 kfree(ohci->ir_context_list); 3725 kfree(ohci->it_context_list); 3726 context_release(&ohci->at_response_ctx); 3727 fail_atreq_ctx: 3728 context_release(&ohci->at_request_ctx); 3729 fail_arrsp_ctx: 3730 ar_context_release(&ohci->ar_response_ctx); 3731 fail_arreq_ctx: 3732 ar_context_release(&ohci->ar_request_ctx); 3733 fail_misc_buf: 3734 dma_free_coherent(ohci->card.device, PAGE_SIZE, 3735 ohci->misc_buffer, ohci->misc_buffer_bus); 3736 fail_iounmap: 3737 pci_iounmap(dev, ohci->registers); 3738 fail_iomem: 3739 pci_release_region(dev, 0); 3740 fail_disable: 3741 pci_disable_device(dev); 3742 fail_free: 3743 kfree(ohci); 3744 pmac_ohci_off(dev); 3745 fail: 3746 return err; 3747 } 3748 3749 static void pci_remove(struct pci_dev *dev) 3750 { 3751 struct fw_ohci *ohci = pci_get_drvdata(dev); 3752 3753 /* 3754 * If the removal is happening from the suspend state, LPS won't be 3755 * enabled and host registers (eg., IntMaskClear) won't be accessible. 3756 */ 3757 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) { 3758 reg_write(ohci, OHCI1394_IntMaskClear, ~0); 3759 flush_writes(ohci); 3760 } 3761 cancel_work_sync(&ohci->bus_reset_work); 3762 fw_core_remove_card(&ohci->card); 3763 3764 /* 3765 * FIXME: Fail all pending packets here, now that the upper 3766 * layers can't queue any more. 3767 */ 3768 3769 software_reset(ohci); 3770 free_irq(dev->irq, ohci); 3771 3772 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom) 3773 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3774 ohci->next_config_rom, ohci->next_config_rom_bus); 3775 if (ohci->config_rom) 3776 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, 3777 ohci->config_rom, ohci->config_rom_bus); 3778 ar_context_release(&ohci->ar_request_ctx); 3779 ar_context_release(&ohci->ar_response_ctx); 3780 dma_free_coherent(ohci->card.device, PAGE_SIZE, 3781 ohci->misc_buffer, ohci->misc_buffer_bus); 3782 context_release(&ohci->at_request_ctx); 3783 context_release(&ohci->at_response_ctx); 3784 kfree(ohci->it_context_list); 3785 kfree(ohci->ir_context_list); 3786 pci_disable_msi(dev); 3787 pci_iounmap(dev, ohci->registers); 3788 pci_release_region(dev, 0); 3789 pci_disable_device(dev); 3790 kfree(ohci); 3791 pmac_ohci_off(dev); 3792 3793 dev_notice(&dev->dev, "removed fw-ohci device\n"); 3794 } 3795 3796 #ifdef CONFIG_PM 3797 static int pci_suspend(struct pci_dev *dev, pm_message_t state) 3798 { 3799 struct fw_ohci *ohci = pci_get_drvdata(dev); 3800 int err; 3801 3802 software_reset(ohci); 3803 err = pci_save_state(dev); 3804 if (err) { 3805 ohci_err(ohci, "pci_save_state failed\n"); 3806 return err; 3807 } 3808 err = pci_set_power_state(dev, pci_choose_state(dev, state)); 3809 if (err) 3810 ohci_err(ohci, "pci_set_power_state failed with %d\n", err); 3811 pmac_ohci_off(dev); 3812 3813 return 0; 3814 } 3815 3816 static int pci_resume(struct pci_dev *dev) 3817 { 3818 struct fw_ohci *ohci = pci_get_drvdata(dev); 3819 int err; 3820 3821 pmac_ohci_on(dev); 3822 pci_set_power_state(dev, PCI_D0); 3823 pci_restore_state(dev); 3824 err = pci_enable_device(dev); 3825 if (err) { 3826 ohci_err(ohci, "pci_enable_device failed\n"); 3827 return err; 3828 } 3829 3830 /* Some systems don't setup GUID register on resume from ram */ 3831 if (!reg_read(ohci, OHCI1394_GUIDLo) && 3832 !reg_read(ohci, OHCI1394_GUIDHi)) { 3833 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); 3834 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); 3835 } 3836 3837 err = ohci_enable(&ohci->card, NULL, 0); 3838 if (err) 3839 return err; 3840 3841 ohci_resume_iso_dma(ohci); 3842 3843 return 0; 3844 } 3845 #endif 3846 3847 static const struct pci_device_id pci_table[] = { 3848 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) }, 3849 { } 3850 }; 3851 3852 MODULE_DEVICE_TABLE(pci, pci_table); 3853 3854 static struct pci_driver fw_ohci_pci_driver = { 3855 .name = ohci_driver_name, 3856 .id_table = pci_table, 3857 .probe = pci_probe, 3858 .remove = pci_remove, 3859 #ifdef CONFIG_PM 3860 .resume = pci_resume, 3861 .suspend = pci_suspend, 3862 #endif 3863 }; 3864 3865 static int __init fw_ohci_init(void) 3866 { 3867 selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0); 3868 if (!selfid_workqueue) 3869 return -ENOMEM; 3870 3871 return pci_register_driver(&fw_ohci_pci_driver); 3872 } 3873 3874 static void __exit fw_ohci_cleanup(void) 3875 { 3876 pci_unregister_driver(&fw_ohci_pci_driver); 3877 destroy_workqueue(selfid_workqueue); 3878 } 3879 3880 module_init(fw_ohci_init); 3881 module_exit(fw_ohci_cleanup); 3882 3883 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>"); 3884 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); 3885 MODULE_LICENSE("GPL"); 3886 3887 /* Provide a module alias so root-on-sbp2 initrds don't break. */ 3888 MODULE_ALIAS("ohci1394"); 3889