Lines Matching +full:lock +full:- +full:detect +full:- +full:precision +full:- +full:6 +full:ns +full:- +full:enable

1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
13 #include <linux/dma-mapping.h>
15 #include <linux/firewire-constants.h>
44 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
45 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
46 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
55 #define DESCRIPTOR_YY (1 << 6)
106 * A buffer that contains a block of DMA-able coherent memory used for
126 * List of page-sized buffers for storing DMA descriptors.
195 * this driver with this lock held.
197 spinlock_t lock; member
293 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ); in has_reboot_by_cycle_timer_read_quirk()
302 // Detect any type of AMD Ryzen machine. in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
306 // Detect VIA VT6306/6307/6308. in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
307 if (pdev->vendor != PCI_VENDOR_ID_VIA) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
309 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
312 // Detect Asmedia ASM1083/1085. in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
313 pcie_to_pci_bridge = pdev->bus->self; in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
314 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
316 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X) in detect_vt630x_with_asm1083_on_amd_ryzen_machine()
337 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
402 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
405 ", or a combination, or all = -1)");
409 MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
451 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
453 static const char port[] = { '.', '-', 'p', 'c', };
468 self_id_count, generation, ohci->node_id); in log_selfids()
470 for (s = ohci->self_id_buffer; self_id_count--; ++s) in log_selfids()
474 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), in log_selfids()
483 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2)); in log_selfids()
487 [0x00] = "evt_no_status", [0x01] = "-reserved-",
493 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
495 [0x10] = "-reserved-", [0x11] = "ack_complete",
496 [0x12] = "ack_pending ", [0x13] = "-reserved-",
498 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
499 [0x18] = "-reserved-", [0x19] = "-reserved-",
500 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
501 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
502 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
507 [0x2] = "W resp", [0x3] = "-reserved-",
512 [0xc] = "-reserved-", [0xd] = "-reserved-",
513 [0xe] = "link internal", [0xf] = "-reserved-",
558 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", in log_ar_at_event()
565 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", in log_ar_at_event()
574 writel(data, ohci->registers + offset); in reg_write()
579 return readl(ohci->registers + offset); in reg_read()
590 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
603 return -ENODEV; /* Card was ejected. */ in read_phy_reg()
618 return -EBUSY; in read_phy_reg()
630 return -ENODEV; /* Card was ejected. */ in write_phy_reg()
641 return -EBUSY; in write_phy_reg()
677 mutex_lock(&ohci->phy_reg_mutex); in ohci_read_phy_reg()
679 mutex_unlock(&ohci->phy_reg_mutex); in ohci_read_phy_reg()
690 mutex_lock(&ohci->phy_reg_mutex); in ohci_update_phy_reg()
692 mutex_unlock(&ohci->phy_reg_mutex); in ohci_update_phy_reg()
699 return page_private(ctx->pages[i]); in ar_buffer_bus()
706 d = &ctx->descriptors[index]; in ar_context_link_page()
707 d->branch_address &= cpu_to_le32(~0xf); in ar_context_link_page()
708 d->res_count = cpu_to_le16(PAGE_SIZE); in ar_context_link_page()
709 d->transfer_status = 0; in ar_context_link_page()
712 d = &ctx->descriptors[ctx->last_buffer_index]; in ar_context_link_page()
713 d->branch_address |= cpu_to_le32(1); in ar_context_link_page()
715 ctx->last_buffer_index = index; in ar_context_link_page()
717 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ar_context_link_page()
722 struct device *dev = ctx->ohci->card.device; in ar_context_release()
725 if (!ctx->buffer) in ar_context_release()
728 vunmap(ctx->buffer); in ar_context_release()
731 if (ctx->pages[i]) in ar_context_release()
732 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i], in ar_context_release()
739 struct fw_ohci *ohci = ctx->ohci; in ar_context_abort()
741 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { in ar_context_abort()
742 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in ar_context_abort()
757 return ar_next_buffer_index(ctx->last_buffer_index); in ar_first_buffer_index()
767 unsigned int i, next_i, last = ctx->last_buffer_index; in ar_search_last_active_buffer()
771 res_count = READ_ONCE(ctx->descriptors[i].res_count); in ar_search_last_active_buffer()
779 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
795 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
810 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count); in ar_search_last_active_buffer()
827 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
833 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
840 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
847 struct fw_ohci *ohci = ctx->ohci; in handle_ar_packet()
903 p.ack = evt - 16; in handle_ar_packet()
906 p.generation = ohci->request_generation; in handle_ar_packet()
932 if (!(ohci->quirks & QUIRK_RESET_PACKET)) in handle_ar_packet()
933 ohci->request_generation = (p.header[2] >> 16) & 0xff; in handle_ar_packet()
934 } else if (ctx == &ohci->ar_request_ctx) { in handle_ar_packet()
935 fw_core_handle_request(&ohci->card, &p); in handle_ar_packet()
937 fw_core_handle_response(&ohci->card, &p); in handle_ar_packet()
963 dma_sync_single_for_device(ctx->ohci->card.device, in ar_recycle_buffers()
977 p = ctx->pointer; in ar_context_tasklet()
984 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; in ar_context_tasklet()
993 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
998 p -= AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
1008 ctx->pointer = p; in ar_context_tasklet()
1014 ctx->pointer = NULL; in ar_context_tasklet()
1020 struct device *dev = ohci->card.device; in ar_context_init()
1026 ctx->regs = regs; in ar_context_init()
1027 ctx->ohci = ohci; in ar_context_init()
1028 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); in ar_context_init()
1031 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr, in ar_context_init()
1033 if (!ctx->pages[i]) in ar_context_init()
1035 set_page_private(ctx->pages[i], dma_addr); in ar_context_init()
1041 pages[i] = ctx->pages[i]; in ar_context_init()
1043 pages[AR_BUFFERS + i] = ctx->pages[i]; in ar_context_init()
1044 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); in ar_context_init()
1045 if (!ctx->buffer) in ar_context_init()
1048 ctx->descriptors = ohci->misc_buffer + descriptors_offset; in ar_context_init()
1049 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; in ar_context_init()
1052 d = &ctx->descriptors[i]; in ar_context_init()
1053 d->req_count = cpu_to_le16(PAGE_SIZE); in ar_context_init()
1054 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in ar_context_init()
1057 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); in ar_context_init()
1058 d->branch_address = cpu_to_le32(ctx->descriptors_bus + in ar_context_init()
1067 return -ENOMEM; in ar_context_init()
1077 ctx->pointer = ctx->buffer; in ar_context_run()
1079 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); in ar_context_run()
1080 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); in ar_context_run()
1087 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS); in find_branch_descriptor()
1093 return d + z - 1; in find_branch_descriptor()
1104 desc = list_entry(ctx->buffer_list.next, in context_tasklet()
1106 last = ctx->last; in context_tasklet()
1107 while (last->branch_address != 0) { in context_tasklet()
1109 address = le32_to_cpu(last->branch_address); in context_tasklet()
1112 ctx->current_bus = address; in context_tasklet()
1116 if (address < desc->buffer_bus || in context_tasklet()
1117 address >= desc->buffer_bus + desc->used) in context_tasklet()
1118 desc = list_entry(desc->list.next, in context_tasklet()
1120 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); in context_tasklet()
1123 if (!ctx->callback(ctx, d, last)) in context_tasklet()
1130 old_desc->used = 0; in context_tasklet()
1131 spin_lock_irqsave(&ctx->ohci->lock, flags); in context_tasklet()
1132 list_move_tail(&old_desc->list, &ctx->buffer_list); in context_tasklet()
1133 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in context_tasklet()
1135 ctx->last = last; in context_tasklet()
1141 * context. Must be called with ohci->lock held.
1151 * program. This will catch run-away userspace or DoS attacks. in context_add_buffer()
1153 if (ctx->total_allocation >= 16*1024*1024) in context_add_buffer()
1154 return -ENOMEM; in context_add_buffer()
1156 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC); in context_add_buffer()
1158 return -ENOMEM; in context_add_buffer()
1160 offset = (void *)&desc->buffer - (void *)desc; in context_add_buffer()
1162 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads in context_add_buffer()
1163 * for descriptors, even 0x10-byte ones. This can cause page faults when in context_add_buffer()
1167 desc->buffer_size = PAGE_SIZE - offset - 0x10; in context_add_buffer()
1168 desc->buffer_bus = bus_addr + offset; in context_add_buffer()
1169 desc->used = 0; in context_add_buffer()
1171 list_add_tail(&desc->list, &ctx->buffer_list); in context_add_buffer()
1172 ctx->total_allocation += PAGE_SIZE; in context_add_buffer()
1180 ctx->ohci = ohci; in context_init()
1181 ctx->regs = regs; in context_init()
1182 ctx->total_allocation = 0; in context_init()
1184 INIT_LIST_HEAD(&ctx->buffer_list); in context_init()
1186 return -ENOMEM; in context_init()
1188 ctx->buffer_tail = list_entry(ctx->buffer_list.next, in context_init()
1191 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); in context_init()
1192 ctx->callback = callback; in context_init()
1199 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); in context_init()
1200 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); in context_init()
1201 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); in context_init()
1202 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); in context_init()
1203 ctx->last = ctx->buffer_tail->buffer; in context_init()
1204 ctx->prev = ctx->buffer_tail->buffer; in context_init()
1205 ctx->prev_z = 1; in context_init()
1212 struct fw_card *card = &ctx->ohci->card; in context_release()
1215 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) { in context_release()
1216 dmam_free_coherent(card->device, PAGE_SIZE, desc, in context_release()
1217 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc)); in context_release()
1221 /* Must be called with ohci->lock held */
1226 struct descriptor_buffer *desc = ctx->buffer_tail; in context_get_descriptors()
1228 if (z * sizeof(*d) > desc->buffer_size) in context_get_descriptors()
1231 if (z * sizeof(*d) > desc->buffer_size - desc->used) { in context_get_descriptors()
1235 if (desc->list.next == &ctx->buffer_list) { in context_get_descriptors()
1241 desc = list_entry(desc->list.next, in context_get_descriptors()
1243 ctx->buffer_tail = desc; in context_get_descriptors()
1246 d = desc->buffer + desc->used / sizeof(*d); in context_get_descriptors()
1248 *d_bus = desc->buffer_bus + desc->used; in context_get_descriptors()
1255 struct fw_ohci *ohci = ctx->ohci; in context_run()
1257 reg_write(ohci, COMMAND_PTR(ctx->regs), in context_run()
1258 le32_to_cpu(ctx->last->branch_address)); in context_run()
1259 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); in context_run()
1260 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); in context_run()
1261 ctx->running = true; in context_run()
1269 struct descriptor_buffer *desc = ctx->buffer_tail; in context_append()
1272 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); in context_append()
1274 desc->used += (z + extra) * sizeof(*d); in context_append()
1278 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); in context_append()
1279 d_branch->branch_address = cpu_to_le32(d_bus | z); in context_append()
1284 * multi-descriptor block starting with an INPUT_MORE, put a copy of in context_append()
1290 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && in context_append()
1291 d_branch != ctx->prev && in context_append()
1292 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == in context_append()
1294 ctx->prev->branch_address = cpu_to_le32(d_bus | z); in context_append()
1297 ctx->prev = d; in context_append()
1298 ctx->prev_z = z; in context_append()
1303 struct fw_ohci *ohci = ctx->ohci; in context_stop()
1307 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in context_stop()
1308 ctx->running = false; in context_stop()
1311 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); in context_stop()
1328 * Must always be called with the ochi->lock held to ensure proper
1334 struct fw_ohci *ohci = ctx->ohci; in at_context_queue_packet()
1343 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1344 return -1; in at_context_queue_packet()
1348 d[0].res_count = cpu_to_le16(packet->timestamp); in at_context_queue_packet()
1356 tcode = (packet->header[0] >> 4) & 0x0f; in at_context_queue_packet()
1368 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | in at_context_queue_packet()
1369 (packet->speed << 16)); in at_context_queue_packet()
1370 header[1] = cpu_to_le32((packet->header[1] & 0xffff) | in at_context_queue_packet()
1371 (packet->header[0] & 0xffff0000)); in at_context_queue_packet()
1372 header[2] = cpu_to_le32(packet->header[2]); in at_context_queue_packet()
1375 header[3] = cpu_to_le32(packet->header[3]); in at_context_queue_packet()
1377 header[3] = (__force __le32) packet->header[3]; in at_context_queue_packet()
1379 d[0].req_count = cpu_to_le16(packet->header_length); in at_context_queue_packet()
1384 (packet->speed << 16)); in at_context_queue_packet()
1385 header[1] = cpu_to_le32(packet->header[1]); in at_context_queue_packet()
1386 header[2] = cpu_to_le32(packet->header[2]); in at_context_queue_packet()
1389 if (is_ping_packet(&packet->header[1])) in at_context_queue_packet()
1394 header[0] = cpu_to_le32((packet->header[0] & 0xffff) | in at_context_queue_packet()
1395 (packet->speed << 16)); in at_context_queue_packet()
1396 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000); in at_context_queue_packet()
1402 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1403 return -1; in at_context_queue_packet()
1408 driver_data->packet = packet; in at_context_queue_packet()
1409 packet->driver_data = driver_data; in at_context_queue_packet()
1411 if (packet->payload_length > 0) { in at_context_queue_packet()
1412 if (packet->payload_length > sizeof(driver_data->inline_data)) { in at_context_queue_packet()
1413 payload_bus = dma_map_single(ohci->card.device, in at_context_queue_packet()
1414 packet->payload, in at_context_queue_packet()
1415 packet->payload_length, in at_context_queue_packet()
1417 if (dma_mapping_error(ohci->card.device, payload_bus)) { in at_context_queue_packet()
1418 packet->ack = RCODE_SEND_ERROR; in at_context_queue_packet()
1419 return -1; in at_context_queue_packet()
1421 packet->payload_bus = payload_bus; in at_context_queue_packet()
1422 packet->payload_mapped = true; in at_context_queue_packet()
1424 memcpy(driver_data->inline_data, packet->payload, in at_context_queue_packet()
1425 packet->payload_length); in at_context_queue_packet()
1429 d[2].req_count = cpu_to_le16(packet->payload_length); in at_context_queue_packet()
1438 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in at_context_queue_packet()
1443 if (ohci->generation != packet->generation) { in at_context_queue_packet()
1444 if (packet->payload_mapped) in at_context_queue_packet()
1445 dma_unmap_single(ohci->card.device, payload_bus, in at_context_queue_packet()
1446 packet->payload_length, DMA_TO_DEVICE); in at_context_queue_packet()
1447 packet->ack = RCODE_GENERATION; in at_context_queue_packet()
1448 return -1; in at_context_queue_packet()
1451 context_append(ctx, d, z, 4 - z); in at_context_queue_packet()
1453 if (ctx->running) in at_context_queue_packet()
1454 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in at_context_queue_packet()
1463 tasklet_disable(&ctx->tasklet); in at_context_flush()
1465 ctx->flushing = true; in at_context_flush()
1467 ctx->flushing = false; in at_context_flush()
1469 tasklet_enable(&ctx->tasklet); in at_context_flush()
1478 struct fw_ohci *ohci = context->ohci; in handle_at_packet()
1481 if (last->transfer_status == 0 && !context->flushing) in handle_at_packet()
1486 packet = driver_data->packet; in handle_at_packet()
1491 if (packet->payload_mapped) in handle_at_packet()
1492 dma_unmap_single(ohci->card.device, packet->payload_bus, in handle_at_packet()
1493 packet->payload_length, DMA_TO_DEVICE); in handle_at_packet()
1495 evt = le16_to_cpu(last->transfer_status) & 0x1f; in handle_at_packet()
1496 packet->timestamp = le16_to_cpu(last->res_count); in handle_at_packet()
1498 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt); in handle_at_packet()
1503 packet->ack = RCODE_CANCELLED; in handle_at_packet()
1511 packet->ack = RCODE_GENERATION; in handle_at_packet()
1515 if (context->flushing) in handle_at_packet()
1516 packet->ack = RCODE_GENERATION; in handle_at_packet()
1522 packet->ack = RCODE_NO_ACK; in handle_at_packet()
1533 packet->ack = evt - 0x10; in handle_at_packet()
1537 if (context->flushing) { in handle_at_packet()
1538 packet->ack = RCODE_GENERATION; in handle_at_packet()
1544 packet->ack = RCODE_SEND_ERROR; in handle_at_packet()
1548 packet->callback(packet, &ohci->card, packet->ack); in handle_at_packet()
1567 tcode = HEADER_GET_TCODE(packet->header[0]); in handle_local_rom()
1569 length = HEADER_GET_DATA_LENGTH(packet->header[3]); in handle_local_rom()
1573 i = csr - CSR_CONFIG_ROM; in handle_local_rom()
1575 fw_fill_response(&response, packet->header, in handle_local_rom()
1578 fw_fill_response(&response, packet->header, in handle_local_rom()
1581 fw_fill_response(&response, packet->header, RCODE_COMPLETE, in handle_local_rom()
1582 (void *) ohci->config_rom + i, length); in handle_local_rom()
1587 fw_core_handle_response(&ohci->card, &response); in handle_local_rom()
1598 tcode = HEADER_GET_TCODE(packet->header[0]); in handle_local_lock()
1599 length = HEADER_GET_DATA_LENGTH(packet->header[3]); in handle_local_lock()
1600 payload = packet->payload; in handle_local_lock()
1601 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]); in handle_local_lock()
1611 fw_fill_response(&response, packet->header, in handle_local_lock()
1616 sel = (csr - CSR_BUS_MANAGER_ID) / 4; in handle_local_lock()
1625 fw_fill_response(&response, packet->header, in handle_local_lock()
1631 ohci_err(ohci, "swap not done (CSR lock timeout)\n"); in handle_local_lock()
1632 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); in handle_local_lock()
1637 fw_core_handle_response(&ohci->card, &response); in handle_local_lock()
1644 if (ctx == &ctx->ohci->at_request_ctx) { in handle_local_request()
1645 packet->ack = ACK_PENDING; in handle_local_request()
1646 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1651 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) | in handle_local_request()
1652 packet->header[2]; in handle_local_request()
1653 csr = offset - CSR_REGISTER_BASE; in handle_local_request()
1657 handle_local_rom(ctx->ohci, packet, csr); in handle_local_request()
1663 handle_local_lock(ctx->ohci, packet, csr); in handle_local_request()
1666 if (ctx == &ctx->ohci->at_request_ctx) in handle_local_request()
1667 fw_core_handle_request(&ctx->ohci->card, packet); in handle_local_request()
1669 fw_core_handle_response(&ctx->ohci->card, packet); in handle_local_request()
1673 if (ctx == &ctx->ohci->at_response_ctx) { in handle_local_request()
1674 packet->ack = ACK_COMPLETE; in handle_local_request()
1675 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1684 spin_lock_irqsave(&ctx->ohci->lock, flags); in at_context_transmit()
1686 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && in at_context_transmit()
1687 ctx->ohci->generation == packet->generation) { in at_context_transmit()
1688 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1691 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci)); in at_context_transmit()
1698 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1702 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci)); in at_context_transmit()
1704 packet->callback(packet, &ctx->ohci->card, packet->ack); in at_context_transmit()
1729 if (!(ohci->it_context_support & (1 << i))) in handle_dead_contexts()
1735 if (!(ohci->ir_context_support & (1 << i))) in handle_dead_contexts()
1757 * - When the lowest six bits are wrapping around to zero, a read that happens
1759 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1760 * not incremented for about 60 ns.
1761 * - Occasionally, the entire register reads zero.
1767 * execute, so we have enough precision to compute the ratio of the differences.)
1781 if (ohci->quirks & QUIRK_CYCLE_TIMER) { in get_cycle_time()
1792 diff01 = t1 - t0; in get_cycle_time()
1793 diff12 = t2 - t1; in get_cycle_time()
1805 * the most significant bit of the cycle timer in bit 6 so that we can detect
1812 if (unlikely(!ohci->bus_time_running)) { in update_bus_time()
1814 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) | in update_bus_time()
1816 ohci->bus_time_running = true; in update_bus_time()
1819 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40)) in update_bus_time()
1820 ohci->bus_time += 0x40; in update_bus_time()
1822 return ohci->bus_time | cycle_time_seconds; in update_bus_time()
1829 mutex_lock(&ohci->phy_reg_mutex); in get_status_for_port()
1833 mutex_unlock(&ohci->phy_reg_mutex); in get_status_for_port()
1853 entry = ohci->self_id_buffer[i]; in get_self_id_pos()
1855 return -1; in get_self_id_pos()
1867 mutex_lock(&ohci->phy_reg_mutex); in initiated_reset()
1883 mutex_unlock(&ohci->phy_reg_mutex); in initiated_reset()
1902 return -EBUSY; in find_and_insert_self_id()
1906 reg = ohci_read_phy_reg(&ohci->card, 4); in find_and_insert_self_id()
1911 reg = ohci_read_phy_reg(&ohci->card, 1); in find_and_insert_self_id()
1920 self_id |= ((status & 0x3) << (6 - (i * 2))); in find_and_insert_self_id()
1927 memmove(&(ohci->self_id_buffer[pos+1]), in find_and_insert_self_id()
1928 &(ohci->self_id_buffer[pos]), in find_and_insert_self_id()
1929 (self_id_count - pos) * sizeof(*ohci->self_id_buffer)); in find_and_insert_self_id()
1930 ohci->self_id_buffer[pos] = self_id; in find_and_insert_self_id()
1956 ohci->node_id = reg & (OHCI1394_NodeID_busNumber | in bus_reset_work()
1960 if (!(ohci->is_root && is_new_root)) in bus_reset_work()
1963 ohci->is_root = is_new_root; in bus_reset_work()
1983 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff; in bus_reset_work()
1987 u32 id = cond_le32_to_cpu(ohci->self_id[i]); in bus_reset_work()
1988 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]); in bus_reset_work()
2008 ohci->self_id_buffer[j] = id; in bus_reset_work()
2011 if (ohci->quirks & QUIRK_TI_SLLZ059) { in bus_reset_work()
2047 spin_lock_irq(&ohci->lock); in bus_reset_work()
2049 ohci->generation = -1; /* prevent AT packet queueing */ in bus_reset_work()
2050 context_stop(&ohci->at_request_ctx); in bus_reset_work()
2051 context_stop(&ohci->at_response_ctx); in bus_reset_work()
2053 spin_unlock_irq(&ohci->lock); in bus_reset_work()
2060 at_context_flush(&ohci->at_request_ctx); in bus_reset_work()
2061 at_context_flush(&ohci->at_response_ctx); in bus_reset_work()
2063 spin_lock_irq(&ohci->lock); in bus_reset_work()
2065 ohci->generation = generation; in bus_reset_work()
2070 if (ohci->quirks & QUIRK_RESET_PACKET) in bus_reset_work()
2071 ohci->request_generation = generation; in bus_reset_work()
2082 if (ohci->next_config_rom != NULL) { in bus_reset_work()
2083 if (ohci->next_config_rom != ohci->config_rom) { in bus_reset_work()
2084 free_rom = ohci->config_rom; in bus_reset_work()
2085 free_rom_bus = ohci->config_rom_bus; in bus_reset_work()
2087 ohci->config_rom = ohci->next_config_rom; in bus_reset_work()
2088 ohci->config_rom_bus = ohci->next_config_rom_bus; in bus_reset_work()
2089 ohci->next_config_rom = NULL; in bus_reset_work()
2098 be32_to_cpu(ohci->config_rom[2])); in bus_reset_work()
2099 ohci->config_rom[0] = ohci->next_header; in bus_reset_work()
2101 be32_to_cpu(ohci->next_header)); in bus_reset_work()
2109 spin_unlock_irq(&ohci->lock); in bus_reset_work()
2112 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus); in bus_reset_work()
2116 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation, in bus_reset_work()
2117 self_id_count, ohci->self_id_buffer, in bus_reset_work()
2118 ohci->csr_state_setclear_abdicate); in bus_reset_work()
2119 ohci->csr_state_setclear_abdicate = false; in bus_reset_work()
2144 queue_work(selfid_workqueue, &ohci->bus_reset_work); in irq_handler()
2147 tasklet_schedule(&ohci->ar_request_ctx.tasklet); in irq_handler()
2150 tasklet_schedule(&ohci->ar_response_ctx.tasklet); in irq_handler()
2153 tasklet_schedule(&ohci->at_request_ctx.tasklet); in irq_handler()
2156 tasklet_schedule(&ohci->at_response_ctx.tasklet); in irq_handler()
2163 i = ffs(iso_event) - 1; in irq_handler()
2165 &ohci->ir_context_list[i].context.tasklet); in irq_handler()
2175 i = ffs(iso_event) - 1; in irq_handler()
2177 &ohci->it_context_list[i].context.tasklet); in irq_handler()
2216 spin_lock(&ohci->lock); in irq_handler()
2218 spin_unlock(&ohci->lock); in irq_handler()
2234 return -ENODEV; /* Card was ejected. */ in software_reset()
2242 return -EBUSY; in software_reset()
2251 memset(&dest[length], 0, CONFIG_ROM_SIZE - size); in copy_config_rom()
2277 if (ohci->quirks & QUIRK_NO_1394A) in configure_1394a_enhancements()
2317 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) { in probe_tsb41ba3d()
2341 * Now enable LPS, which we need in order to start accessing in ohci_enable()
2344 * will lock up the machine. Wait 50msec to make sure we have in ohci_enable()
2366 return -EIO; in ohci_enable()
2369 if (ohci->quirks & QUIRK_TI_SLLZ059) { in ohci_enable()
2376 ohci->quirks &= ~QUIRK_TI_SLLZ059; in ohci_enable()
2382 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus); in ohci_enable()
2393 ohci->bus_time_running = false; in ohci_enable()
2396 if (ohci->ir_context_support & (1 << i)) in ohci_enable()
2404 card->broadcast_channel_auto_allocated = true; in ohci_enable()
2409 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f; in ohci_enable()
2411 card->priority_budget_implemented = ohci->pri_req_max != 0; in ohci_enable()
2432 * link, so we have a valid config rom before enabling - the in ohci_enable()
2446 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_enable()
2447 &ohci->next_config_rom_bus, GFP_KERNEL); in ohci_enable()
2448 if (ohci->next_config_rom == NULL) in ohci_enable()
2449 return -ENOMEM; in ohci_enable()
2451 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_enable()
2457 ohci->next_config_rom = ohci->config_rom; in ohci_enable()
2458 ohci->next_config_rom_bus = ohci->config_rom_bus; in ohci_enable()
2461 ohci->next_header = ohci->next_config_rom[0]; in ohci_enable()
2462 ohci->next_config_rom[0] = 0; in ohci_enable()
2465 be32_to_cpu(ohci->next_config_rom[2])); in ohci_enable()
2466 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_enable()
2492 ar_context_run(&ohci->ar_request_ctx); in ohci_enable()
2493 ar_context_run(&ohci->ar_response_ctx); in ohci_enable()
2498 fw_schedule_bus_reset(&ohci->card, false, true); in ohci_enable()
2515 * section 5.5.6 in the OHCI specification. in ohci_set_config_rom()
2535 * We use ohci->lock to avoid racing with the code that sets in ohci_set_config_rom()
2536 * ohci->next_config_rom to NULL (see bus_reset_work). in ohci_set_config_rom()
2539 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE, in ohci_set_config_rom()
2542 return -ENOMEM; in ohci_set_config_rom()
2544 spin_lock_irq(&ohci->lock); in ohci_set_config_rom()
2548 * push our new allocation into the ohci->next_config_rom in ohci_set_config_rom()
2557 if (ohci->next_config_rom == NULL) { in ohci_set_config_rom()
2558 ohci->next_config_rom = next_config_rom; in ohci_set_config_rom()
2559 ohci->next_config_rom_bus = next_config_rom_bus; in ohci_set_config_rom()
2563 copy_config_rom(ohci->next_config_rom, config_rom, length); in ohci_set_config_rom()
2565 ohci->next_header = config_rom[0]; in ohci_set_config_rom()
2566 ohci->next_config_rom[0] = 0; in ohci_set_config_rom()
2568 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus); in ohci_set_config_rom()
2570 spin_unlock_irq(&ohci->lock); in ohci_set_config_rom()
2574 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom, in ohci_set_config_rom()
2586 fw_schedule_bus_reset(&ohci->card, true, true); in ohci_set_config_rom()
2595 at_context_transmit(&ohci->at_request_ctx, packet); in ohci_send_request()
2602 at_context_transmit(&ohci->at_response_ctx, packet); in ohci_send_response()
2608 struct context *ctx = &ohci->at_request_ctx; in ohci_cancel_packet()
2609 struct driver_data *driver_data = packet->driver_data; in ohci_cancel_packet()
2610 int ret = -ENOENT; in ohci_cancel_packet()
2612 tasklet_disable_in_atomic(&ctx->tasklet); in ohci_cancel_packet()
2614 if (packet->ack != 0) in ohci_cancel_packet()
2617 if (packet->payload_mapped) in ohci_cancel_packet()
2618 dma_unmap_single(ohci->card.device, packet->payload_bus, in ohci_cancel_packet()
2619 packet->payload_length, DMA_TO_DEVICE); in ohci_cancel_packet()
2621 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20); in ohci_cancel_packet()
2622 driver_data->packet = NULL; in ohci_cancel_packet()
2623 packet->ack = RCODE_CANCELLED; in ohci_cancel_packet()
2626 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci)); in ohci_cancel_packet()
2628 packet->callback(packet, &ohci->card, packet->ack); in ohci_cancel_packet()
2631 tasklet_enable(&ctx->tasklet); in ohci_cancel_packet()
2651 spin_lock_irqsave(&ohci->lock, flags); in ohci_enable_phys_dma()
2653 if (ohci->generation != generation) { in ohci_enable_phys_dma()
2654 ret = -ESTALE; in ohci_enable_phys_dma()
2659 * Note, if the node ID contains a non-local bus ID, physical DMA is in ohci_enable_phys_dma()
2667 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32)); in ohci_enable_phys_dma()
2671 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_enable_phys_dma()
2685 if (ohci->is_root && in ohci_read_csr()
2691 if (ohci->csr_state_setclear_abdicate) in ohci_read_csr()
2708 spin_lock_irqsave(&ohci->lock, flags); in ohci_read_csr()
2710 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_read_csr()
2719 (ohci->pri_req_max << 8); in ohci_read_csr()
2734 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2740 ohci->csr_state_setclear_abdicate = false; in ohci_write_csr()
2744 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) { in ohci_write_csr()
2750 ohci->csr_state_setclear_abdicate = true; in ohci_write_csr()
2766 spin_lock_irqsave(&ohci->lock, flags); in ohci_write_csr()
2767 ohci->bus_time = (update_bus_time(ohci) & 0x40) | in ohci_write_csr()
2769 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_write_csr()
2792 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, in flush_iso_completions()
2793 ctx->header_length, ctx->header, in flush_iso_completions()
2794 ctx->base.callback_data); in flush_iso_completions()
2795 ctx->header_length = 0; in flush_iso_completions()
2802 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { in copy_iso_headers()
2803 if (ctx->base.drop_overflow_headers) in copy_iso_headers()
2808 ctx_hdr = ctx->header + ctx->header_length; in copy_iso_headers()
2809 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); in copy_iso_headers()
2816 if (ctx->base.header_size > 0) in copy_iso_headers()
2818 if (ctx->base.header_size > 4) in copy_iso_headers()
2820 if (ctx->base.header_size > 8) in copy_iso_headers()
2821 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); in copy_iso_headers()
2822 ctx->header_length += ctx->base.header_size; in copy_iso_headers()
2835 if (pd->transfer_status) in handle_ir_packet_per_buffer()
2841 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) { in handle_ir_packet_per_buffer()
2843 buffer_dma = le32_to_cpu(d->data_address); in handle_ir_packet_per_buffer()
2844 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_packet_per_buffer()
2847 le16_to_cpu(d->req_count), in handle_ir_packet_per_buffer()
2853 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_ir_packet_per_buffer()
2869 req_count = le16_to_cpu(last->req_count); in handle_ir_buffer_fill()
2870 res_count = le16_to_cpu(READ_ONCE(last->res_count)); in handle_ir_buffer_fill()
2871 completed = req_count - res_count; in handle_ir_buffer_fill()
2872 buffer_dma = le32_to_cpu(last->data_address); in handle_ir_buffer_fill()
2875 ctx->mc_buffer_bus = buffer_dma; in handle_ir_buffer_fill()
2876 ctx->mc_completed = completed; in handle_ir_buffer_fill()
2883 dma_sync_single_range_for_cpu(context->ohci->card.device, in handle_ir_buffer_fill()
2888 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) { in handle_ir_buffer_fill()
2889 ctx->base.callback.mc(&ctx->base, in handle_ir_buffer_fill()
2891 ctx->base.callback_data); in handle_ir_buffer_fill()
2892 ctx->mc_completed = 0; in handle_ir_buffer_fill()
2900 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, in flush_ir_buffer_fill()
2901 ctx->mc_buffer_bus & PAGE_MASK, in flush_ir_buffer_fill()
2902 ctx->mc_buffer_bus & ~PAGE_MASK, in flush_ir_buffer_fill()
2903 ctx->mc_completed, DMA_FROM_DEVICE); in flush_ir_buffer_fill()
2905 ctx->base.callback.mc(&ctx->base, in flush_ir_buffer_fill()
2906 ctx->mc_buffer_bus + ctx->mc_completed, in flush_ir_buffer_fill()
2907 ctx->base.callback_data); in flush_ir_buffer_fill()
2908 ctx->mc_completed = 0; in flush_ir_buffer_fill()
2918 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
2929 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) == in sync_it_packet_for_cpu()
2930 (context->current_bus & PAGE_MASK)) { in sync_it_packet_for_cpu()
2931 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)) in sync_it_packet_for_cpu()
2937 buffer_dma = le32_to_cpu(pd->data_address); in sync_it_packet_for_cpu()
2938 dma_sync_single_range_for_cpu(context->ohci->card.device, in sync_it_packet_for_cpu()
2941 le16_to_cpu(pd->req_count), in sync_it_packet_for_cpu()
2943 control = pd->control; in sync_it_packet_for_cpu()
2958 if (pd->transfer_status) in handle_it_packet()
2966 if (ctx->header_length + 4 > PAGE_SIZE) { in handle_it_packet()
2967 if (ctx->base.drop_overflow_headers) in handle_it_packet()
2972 ctx_hdr = ctx->header + ctx->header_length; in handle_it_packet()
2973 ctx->last_timestamp = le16_to_cpu(last->res_count); in handle_it_packet()
2974 /* Present this value as big-endian to match the receive code */ in handle_it_packet()
2975 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) | in handle_it_packet()
2976 le16_to_cpu(pd->res_count)); in handle_it_packet()
2977 ctx->header_length += 4; in handle_it_packet()
2979 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) in handle_it_packet()
2993 ohci->mc_channels = channels; in set_multichannel_mask()
3004 int index, ret = -EBUSY; in ohci_allocate_iso_context()
3006 spin_lock_irq(&ohci->lock); in ohci_allocate_iso_context()
3010 mask = &ohci->it_context_mask; in ohci_allocate_iso_context()
3012 index = ffs(*mask) - 1; in ohci_allocate_iso_context()
3016 ctx = &ohci->it_context_list[index]; in ohci_allocate_iso_context()
3021 channels = &ohci->ir_context_channels; in ohci_allocate_iso_context()
3022 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
3024 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
3029 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
3034 mask = &ohci->ir_context_mask; in ohci_allocate_iso_context()
3036 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1; in ohci_allocate_iso_context()
3038 ohci->mc_allocated = true; in ohci_allocate_iso_context()
3041 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
3046 index = -1; in ohci_allocate_iso_context()
3047 ret = -ENOSYS; in ohci_allocate_iso_context()
3050 spin_unlock_irq(&ohci->lock); in ohci_allocate_iso_context()
3056 ctx->header_length = 0; in ohci_allocate_iso_context()
3057 ctx->header = (void *) __get_free_page(GFP_KERNEL); in ohci_allocate_iso_context()
3058 if (ctx->header == NULL) { in ohci_allocate_iso_context()
3059 ret = -ENOMEM; in ohci_allocate_iso_context()
3062 ret = context_init(&ctx->context, ohci, regs, callback); in ohci_allocate_iso_context()
3068 ctx->mc_completed = 0; in ohci_allocate_iso_context()
3071 return &ctx->base; in ohci_allocate_iso_context()
3074 free_page((unsigned long)ctx->header); in ohci_allocate_iso_context()
3076 spin_lock_irq(&ohci->lock); in ohci_allocate_iso_context()
3084 ohci->mc_allocated = false; in ohci_allocate_iso_context()
3089 spin_unlock_irq(&ohci->lock); in ohci_allocate_iso_context()
3098 struct fw_ohci *ohci = ctx->context.ohci; in ohci_start_iso()
3103 if (ctx->context.last->branch_address == 0) in ohci_start_iso()
3104 return -ENODATA; in ohci_start_iso()
3106 switch (ctx->base.type) { in ohci_start_iso()
3108 index = ctx - ohci->it_context_list; in ohci_start_iso()
3116 context_run(&ctx->context, match); in ohci_start_iso()
3123 index = ctx - ohci->ir_context_list; in ohci_start_iso()
3124 match = (tags << 28) | (sync << 8) | ctx->base.channel; in ohci_start_iso()
3132 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); in ohci_start_iso()
3133 context_run(&ctx->context, control); in ohci_start_iso()
3135 ctx->sync = sync; in ohci_start_iso()
3136 ctx->tags = tags; in ohci_start_iso()
3146 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_stop_iso()
3150 switch (ctx->base.type) { in ohci_stop_iso()
3152 index = ctx - ohci->it_context_list; in ohci_stop_iso()
3158 index = ctx - ohci->ir_context_list; in ohci_stop_iso()
3163 context_stop(&ctx->context); in ohci_stop_iso()
3164 tasklet_kill(&ctx->context.tasklet); in ohci_stop_iso()
3171 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_free_iso_context()
3177 context_release(&ctx->context); in ohci_free_iso_context()
3178 free_page((unsigned long)ctx->header); in ohci_free_iso_context()
3180 spin_lock_irqsave(&ohci->lock, flags); in ohci_free_iso_context()
3182 switch (base->type) { in ohci_free_iso_context()
3184 index = ctx - ohci->it_context_list; in ohci_free_iso_context()
3185 ohci->it_context_mask |= 1 << index; in ohci_free_iso_context()
3189 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3190 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3191 ohci->ir_context_channels |= 1ULL << base->channel; in ohci_free_iso_context()
3195 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3196 ohci->ir_context_mask |= 1 << index; in ohci_free_iso_context()
3197 ohci->ir_context_channels |= ohci->mc_channels; in ohci_free_iso_context()
3198 ohci->mc_channels = 0; in ohci_free_iso_context()
3199 ohci->mc_allocated = false; in ohci_free_iso_context()
3203 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_free_iso_context()
3208 struct fw_ohci *ohci = fw_ohci(base->card); in ohci_set_iso_channels()
3212 switch (base->type) { in ohci_set_iso_channels()
3215 spin_lock_irqsave(&ohci->lock, flags); in ohci_set_iso_channels()
3218 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) { in ohci_set_iso_channels()
3219 *channels = ohci->ir_context_channels; in ohci_set_iso_channels()
3220 ret = -EBUSY; in ohci_set_iso_channels()
3226 spin_unlock_irqrestore(&ohci->lock, flags); in ohci_set_iso_channels()
3230 ret = -EINVAL; in ohci_set_iso_channels()
3242 for (i = 0 ; i < ohci->n_ir ; i++) { in ohci_resume_iso_dma()
3243 ctx = &ohci->ir_context_list[i]; in ohci_resume_iso_dma()
3244 if (ctx->context.running) in ohci_resume_iso_dma()
3245 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3248 for (i = 0 ; i < ohci->n_it ; i++) { in ohci_resume_iso_dma()
3249 ctx = &ohci->it_context_list[i]; in ohci_resume_iso_dma()
3250 if (ctx->context.running) in ohci_resume_iso_dma()
3251 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3272 if (p->skip) in queue_iso_transmit()
3276 if (p->header_length > 0) in queue_iso_transmit()
3280 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT; in queue_iso_transmit()
3281 if (p->payload_length > 0) in queue_iso_transmit()
3282 payload_z = end_page - (payload_index >> PAGE_SHIFT); in queue_iso_transmit()
3289 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d)); in queue_iso_transmit()
3291 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); in queue_iso_transmit()
3293 return -ENOMEM; in queue_iso_transmit()
3295 if (!p->skip) { in queue_iso_transmit()
3303 * FIXME: Make the context's cycle-lost behaviour configurable? in queue_iso_transmit()
3308 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) | in queue_iso_transmit()
3309 IT_HEADER_TAG(p->tag) | in queue_iso_transmit()
3311 IT_HEADER_CHANNEL(ctx->base.channel) | in queue_iso_transmit()
3312 IT_HEADER_SPEED(ctx->base.speed)); in queue_iso_transmit()
3314 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length + in queue_iso_transmit()
3315 p->payload_length)); in queue_iso_transmit()
3318 if (p->header_length > 0) { in queue_iso_transmit()
3319 d[2].req_count = cpu_to_le16(p->header_length); in queue_iso_transmit()
3321 memcpy(&d[z], p->header, p->header_length); in queue_iso_transmit()
3324 pd = d + z - payload_z; in queue_iso_transmit()
3325 payload_end_index = payload_index + p->payload_length; in queue_iso_transmit()
3331 min(next_page_index, payload_end_index) - payload_index; in queue_iso_transmit()
3334 page_bus = page_private(buffer->pages[page]); in queue_iso_transmit()
3337 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_transmit()
3344 if (p->interrupt) in queue_iso_transmit()
3349 last = z == 2 ? d : d + z - 1; in queue_iso_transmit()
3350 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST | in queue_iso_transmit()
3355 context_append(&ctx->context, d, z, header_z); in queue_iso_transmit()
3365 struct device *device = ctx->context.ohci->card.device; in queue_iso_packet_per_buffer()
3376 packet_count = packet->header_length / ctx->base.header_size; in queue_iso_packet_per_buffer()
3377 header_size = max(ctx->base.header_size, (size_t)8); in queue_iso_packet_per_buffer()
3383 payload_per_buffer = packet->payload_length / packet_count; in queue_iso_packet_per_buffer()
3388 d = context_get_descriptors(&ctx->context, in queue_iso_packet_per_buffer()
3391 return -ENOMEM; in queue_iso_packet_per_buffer()
3393 d->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3395 if (packet->skip && i == 0) in queue_iso_packet_per_buffer()
3396 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_packet_per_buffer()
3397 d->req_count = cpu_to_le16(header_size); in queue_iso_packet_per_buffer()
3398 d->res_count = d->req_count; in queue_iso_packet_per_buffer()
3399 d->transfer_status = 0; in queue_iso_packet_per_buffer()
3400 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); in queue_iso_packet_per_buffer()
3406 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3412 length = PAGE_SIZE - offset; in queue_iso_packet_per_buffer()
3413 pd->req_count = cpu_to_le16(length); in queue_iso_packet_per_buffer()
3414 pd->res_count = pd->req_count; in queue_iso_packet_per_buffer()
3415 pd->transfer_status = 0; in queue_iso_packet_per_buffer()
3417 page_bus = page_private(buffer->pages[page]); in queue_iso_packet_per_buffer()
3418 pd->data_address = cpu_to_le32(page_bus + offset); in queue_iso_packet_per_buffer()
3425 rest -= length; in queue_iso_packet_per_buffer()
3429 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | in queue_iso_packet_per_buffer()
3432 if (packet->interrupt && i == packet_count - 1) in queue_iso_packet_per_buffer()
3433 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_packet_per_buffer()
3435 context_append(&ctx->context, d, z, header_z); in queue_iso_packet_per_buffer()
3452 rest = packet->payload_length; in queue_iso_buffer_fill()
3457 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count)) in queue_iso_buffer_fill()
3458 return -EFAULT; in queue_iso_buffer_fill()
3461 d = context_get_descriptors(&ctx->context, 1, &d_bus); in queue_iso_buffer_fill()
3463 return -ENOMEM; in queue_iso_buffer_fill()
3465 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE | in queue_iso_buffer_fill()
3467 if (packet->skip && i == 0) in queue_iso_buffer_fill()
3468 d->control |= cpu_to_le16(DESCRIPTOR_WAIT); in queue_iso_buffer_fill()
3469 if (packet->interrupt && i == z - 1) in queue_iso_buffer_fill()
3470 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS); in queue_iso_buffer_fill()
3475 length = PAGE_SIZE - offset; in queue_iso_buffer_fill()
3476 d->req_count = cpu_to_le16(length); in queue_iso_buffer_fill()
3477 d->res_count = d->req_count; in queue_iso_buffer_fill()
3478 d->transfer_status = 0; in queue_iso_buffer_fill()
3480 page_bus = page_private(buffer->pages[page]); in queue_iso_buffer_fill()
3481 d->data_address = cpu_to_le32(page_bus + offset); in queue_iso_buffer_fill()
3483 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_buffer_fill()
3487 rest -= length; in queue_iso_buffer_fill()
3491 context_append(&ctx->context, d, 1, 0); in queue_iso_buffer_fill()
3504 int ret = -ENOSYS; in ohci_queue_iso()
3506 spin_lock_irqsave(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3507 switch (base->type) { in ohci_queue_iso()
3518 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3526 &container_of(base, struct iso_context, base)->context; in ohci_flush_queue_iso()
3528 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ohci_flush_queue_iso()
3536 tasklet_disable_in_atomic(&ctx->context.tasklet); in ohci_flush_iso_completions()
3538 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { in ohci_flush_iso_completions()
3539 context_tasklet((unsigned long)&ctx->context); in ohci_flush_iso_completions()
3541 switch (base->type) { in ohci_flush_iso_completions()
3544 if (ctx->header_length != 0) in ohci_flush_iso_completions()
3548 if (ctx->mc_completed != 0) in ohci_flush_iso_completions()
3552 ret = -ENOSYS; in ohci_flush_iso_completions()
3555 clear_bit_unlock(0, &ctx->flushing_completions); in ohci_flush_iso_completions()
3559 tasklet_enable(&ctx->context.tasklet); in ohci_flush_iso_completions()
3565 .enable = ohci_enable,
3622 ar_context_release(&ohci->ar_response_ctx); in release_ohci()
3623 ar_context_release(&ohci->ar_request_ctx); in release_ohci()
3625 dev_notice(dev, "removed fw-ohci device\n"); in release_ohci()
3637 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) { in pci_probe()
3638 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n"); in pci_probe()
3639 return -ENOSYS; in pci_probe()
3644 return -ENOMEM; in pci_probe()
3645 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev); in pci_probe()
3648 devres_add(&dev->dev, ohci); in pci_probe()
3652 dev_err(&dev->dev, "failed to enable OHCI hardware\n"); in pci_probe()
3659 spin_lock_init(&ohci->lock); in pci_probe()
3660 mutex_init(&ohci->phy_reg_mutex); in pci_probe()
3662 INIT_WORK(&ohci->bus_reset_work, bus_reset_work); in pci_probe()
3667 return -ENXIO; in pci_probe()
3673 return -ENXIO; in pci_probe()
3675 ohci->registers = pcim_iomap_table(dev)[0]; in pci_probe()
3678 if ((ohci_quirks[i].vendor == dev->vendor) && in pci_probe()
3680 ohci_quirks[i].device == dev->device) && in pci_probe()
3682 ohci_quirks[i].revision >= dev->revision)) { in pci_probe()
3683 ohci->quirks = ohci_quirks[i].flags; in pci_probe()
3687 ohci->quirks = param_quirks; in pci_probe()
3690 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ; in pci_probe()
3699 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus, in pci_probe()
3701 if (!ohci->misc_buffer) in pci_probe()
3702 return -ENOMEM; in pci_probe()
3704 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0, in pci_probe()
3709 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4, in pci_probe()
3714 err = context_init(&ohci->at_request_ctx, ohci, in pci_probe()
3719 err = context_init(&ohci->at_response_ctx, ohci, in pci_probe()
3725 ohci->ir_context_channels = ~0ULL; in pci_probe()
3726 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet); in pci_probe()
3728 ohci->ir_context_mask = ohci->ir_context_support; in pci_probe()
3729 ohci->n_ir = hweight32(ohci->ir_context_mask); in pci_probe()
3730 size = sizeof(struct iso_context) * ohci->n_ir; in pci_probe()
3731 ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL); in pci_probe()
3732 if (!ohci->ir_context_list) in pci_probe()
3733 return -ENOMEM; in pci_probe()
3736 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet); in pci_probe()
3738 if (!ohci->it_context_support) { in pci_probe()
3740 ohci->it_context_support = 0xf; in pci_probe()
3743 ohci->it_context_mask = ohci->it_context_support; in pci_probe()
3744 ohci->n_it = hweight32(ohci->it_context_mask); in pci_probe()
3745 size = sizeof(struct iso_context) * ohci->n_it; in pci_probe()
3746 ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL); in pci_probe()
3747 if (!ohci->it_context_list) in pci_probe()
3748 return -ENOMEM; in pci_probe()
3750 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2; in pci_probe()
3751 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2; in pci_probe()
3759 if (!(ohci->quirks & QUIRK_NO_MSI)) in pci_probe()
3761 err = devm_request_irq(&dev->dev, dev->irq, irq_handler, in pci_probe()
3764 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); in pci_probe()
3768 err = fw_card_add(&ohci->card, max_receive, link_speed, guid); in pci_probe()
3776 version >> 16, version & 0xff, ohci->card.index, in pci_probe()
3777 ohci->n_ir, ohci->n_it, ohci->quirks, in pci_probe()
3784 devm_free_irq(&dev->dev, dev->irq, ohci); in pci_probe()
3802 cancel_work_sync(&ohci->bus_reset_work); in pci_remove()
3803 fw_core_remove_card(&ohci->card); in pci_remove()
3812 devm_free_irq(&dev->dev, dev->irq, ohci); in pci_remove()
3815 dev_notice(&dev->dev, "removing fw-ohci device\n"); in pci_remove()
3855 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid); in pci_resume()
3856 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32)); in pci_resume()
3859 err = ohci_enable(&ohci->card, NULL, 0); in pci_resume()
3891 return -ENOMEM; in fw_ohci_init()
3909 /* Provide a module alias so root-on-sbp2 initrds don't break. */