Lines Matching refs:mhba

53 static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)  in tag_get_one()  argument
59 static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, in tag_release_one() argument
106 static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, in mvumi_alloc_mem_resource() argument
112 dev_err(&mhba->pdev->dev, in mvumi_alloc_mem_resource()
121 dev_err(&mhba->pdev->dev, in mvumi_alloc_mem_resource()
130 res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, in mvumi_alloc_mem_resource()
134 dev_err(&mhba->pdev->dev, in mvumi_alloc_mem_resource()
143 dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); in mvumi_alloc_mem_resource()
151 list_add_tail(&res->entry, &mhba->res_list); in mvumi_alloc_mem_resource()
156 static void mvumi_release_mem_resource(struct mvumi_hba *mhba) in mvumi_release_mem_resource() argument
160 list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { in mvumi_release_mem_resource()
163 dma_free_coherent(&mhba->pdev->dev, res->size, in mvumi_release_mem_resource()
170 dev_err(&mhba->pdev->dev, in mvumi_release_mem_resource()
177 mhba->fw_flag &= ~MVUMI_FW_ALLOC; in mvumi_release_mem_resource()
189 static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, in mvumi_make_sgl() argument
198 *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, in mvumi_make_sgl()
200 if (*sg_count > mhba->max_sge) { in mvumi_make_sgl()
201 dev_err(&mhba->pdev->dev, in mvumi_make_sgl()
203 *sg_count, mhba->max_sge); in mvumi_make_sgl()
204 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, in mvumi_make_sgl()
213 sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg))); in mvumi_make_sgl()
215 m_sg->flags |= 1U << mhba->eot_flag; in mvumi_make_sgl()
217 sgd_inc(mhba, m_sg); in mvumi_make_sgl()
223 static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, in mvumi_internal_cmd_sgl() argument
233 virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr, in mvumi_internal_cmd_sgl()
244 m_sg->flags = 1U << mhba->eot_flag; in mvumi_internal_cmd_sgl()
245 sgd_setsz(mhba, m_sg, cpu_to_le32(size)); in mvumi_internal_cmd_sgl()
250 static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, in mvumi_create_internal_cmd() argument
257 dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); in mvumi_create_internal_cmd()
262 cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size, in mvumi_create_internal_cmd()
265 dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" in mvumi_create_internal_cmd()
266 " frame,size = %d.\n", mhba->ib_max_size); in mvumi_create_internal_cmd()
272 if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { in mvumi_create_internal_cmd()
273 dev_err(&mhba->pdev->dev, "failed to allocate memory" in mvumi_create_internal_cmd()
275 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, in mvumi_create_internal_cmd()
286 static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, in mvumi_delete_internal_cmd() argument
296 sgd_getsz(mhba, m_sg, size); in mvumi_delete_internal_cmd()
301 dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf, in mvumi_delete_internal_cmd()
304 dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size, in mvumi_delete_internal_cmd()
316 static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) in mvumi_get_cmd() argument
320 if (likely(!list_empty(&mhba->cmd_pool))) { in mvumi_get_cmd()
321 cmd = list_entry((&mhba->cmd_pool)->next, in mvumi_get_cmd()
325 dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); in mvumi_get_cmd()
335 static inline void mvumi_return_cmd(struct mvumi_hba *mhba, in mvumi_return_cmd() argument
339 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); in mvumi_return_cmd()
346 static void mvumi_free_cmds(struct mvumi_hba *mhba) in mvumi_free_cmds() argument
350 while (!list_empty(&mhba->cmd_pool)) { in mvumi_free_cmds()
351 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, in mvumi_free_cmds()
354 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) in mvumi_free_cmds()
365 static int mvumi_alloc_cmds(struct mvumi_hba *mhba) in mvumi_alloc_cmds() argument
370 for (i = 0; i < mhba->max_io; i++) { in mvumi_alloc_cmds()
376 list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); in mvumi_alloc_cmds()
377 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_alloc_cmds()
378 cmd->frame = mhba->ib_frame + i * mhba->ib_max_size; in mvumi_alloc_cmds()
379 cmd->frame_phys = mhba->ib_frame_phys in mvumi_alloc_cmds()
380 + i * mhba->ib_max_size; in mvumi_alloc_cmds()
382 cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); in mvumi_alloc_cmds()
389 dev_err(&mhba->pdev->dev, in mvumi_alloc_cmds()
391 while (!list_empty(&mhba->cmd_pool)) { in mvumi_alloc_cmds()
392 cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, in mvumi_alloc_cmds()
395 if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)) in mvumi_alloc_cmds()
402 static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba) in mvumi_check_ib_list_9143() argument
405 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_check_ib_list_9143()
407 ib_rp_reg = ioread32(mhba->regs->inb_read_pointer); in mvumi_check_ib_list_9143()
410 (mhba->ib_cur_slot & regs->cl_slot_num_mask)) && in mvumi_check_ib_list_9143()
412 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) { in mvumi_check_ib_list_9143()
413 dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); in mvumi_check_ib_list_9143()
416 if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { in mvumi_check_ib_list_9143()
417 dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); in mvumi_check_ib_list_9143()
420 return mhba->max_io - atomic_read(&mhba->fw_outstanding); in mvumi_check_ib_list_9143()
424 static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba) in mvumi_check_ib_list_9580() argument
427 if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1)) in mvumi_check_ib_list_9580()
429 count = ioread32(mhba->ib_shadow); in mvumi_check_ib_list_9580()
435 static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) in mvumi_get_ib_list_entry() argument
439 cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask; in mvumi_get_ib_list_entry()
441 if (cur_ib_entry >= mhba->list_num_io) { in mvumi_get_ib_list_entry()
442 cur_ib_entry -= mhba->list_num_io; in mvumi_get_ib_list_entry()
443 mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle; in mvumi_get_ib_list_entry()
445 mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask; in mvumi_get_ib_list_entry()
446 mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask); in mvumi_get_ib_list_entry()
447 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_get_ib_list_entry()
448 *ib_entry = mhba->ib_list + cur_ib_entry * in mvumi_get_ib_list_entry()
451 *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; in mvumi_get_ib_list_entry()
453 atomic_inc(&mhba->fw_outstanding); in mvumi_get_ib_list_entry()
456 static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) in mvumi_send_ib_list_entry() argument
458 iowrite32(0xffff, mhba->ib_shadow); in mvumi_send_ib_list_entry()
459 iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer); in mvumi_send_ib_list_entry()
462 static char mvumi_check_ob_frame(struct mvumi_hba *mhba, in mvumi_check_ob_frame() argument
468 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; in mvumi_check_ob_frame()
471 if (tag > mhba->tag_pool.size) { in mvumi_check_ob_frame()
472 dev_err(&mhba->pdev->dev, "ob frame data error\n"); in mvumi_check_ob_frame()
475 if (mhba->tag_cmd[tag] == NULL) { in mvumi_check_ob_frame()
476 dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); in mvumi_check_ob_frame()
478 } else if (mhba->tag_cmd[tag]->request_id != request_id && in mvumi_check_ob_frame()
479 mhba->request_id_enabled) { in mvumi_check_ob_frame()
480 dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," in mvumi_check_ob_frame()
482 mhba->tag_cmd[tag]->request_id); in mvumi_check_ob_frame()
489 static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba, in mvumi_check_ob_list_9143() argument
493 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_check_ob_list_9143()
497 ob_write_shadow = ioread32(mhba->ob_shadow); in mvumi_check_ob_list_9143()
500 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9143()
501 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9143()
504 (mhba->ob_cur_slot & regs->cl_pointer_toggle)) { in mvumi_check_ob_list_9143()
505 *assign_obf_end += mhba->list_num_io; in mvumi_check_ob_list_9143()
510 static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba, in mvumi_check_ob_list_9580() argument
514 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_check_ob_list_9580()
518 *cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9580()
519 *assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask; in mvumi_check_ob_list_9580()
521 *assign_obf_end += mhba->list_num_io; in mvumi_check_ob_list_9580()
527 static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) in mvumi_receive_ob_list_entry() argument
532 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_receive_ob_list_entry()
534 if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end)) in mvumi_receive_ob_list_entry()
539 if (cur_obf >= mhba->list_num_io) { in mvumi_receive_ob_list_entry()
540 cur_obf -= mhba->list_num_io; in mvumi_receive_ob_list_entry()
541 mhba->ob_cur_slot ^= regs->cl_pointer_toggle; in mvumi_receive_ob_list_entry()
544 p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; in mvumi_receive_ob_list_entry()
549 if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || in mvumi_receive_ob_list_entry()
550 mhba->tag_cmd[p_outb_frame->tag] == NULL || in mvumi_receive_ob_list_entry()
552 mhba->tag_cmd[p_outb_frame->tag]->request_id)) in mvumi_receive_ob_list_entry()
553 if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) in mvumi_receive_ob_list_entry()
556 if (!list_empty(&mhba->ob_data_list)) { in mvumi_receive_ob_list_entry()
558 list_first_entry(&mhba->ob_data_list, in mvumi_receive_ob_list_entry()
564 cur_obf = mhba->list_num_io - 1; in mvumi_receive_ob_list_entry()
565 mhba->ob_cur_slot ^= regs->cl_pointer_toggle; in mvumi_receive_ob_list_entry()
571 memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); in mvumi_receive_ob_list_entry()
574 list_add_tail(&ob_data->list, &mhba->free_ob_list); in mvumi_receive_ob_list_entry()
576 mhba->ob_cur_slot &= ~regs->cl_slot_num_mask; in mvumi_receive_ob_list_entry()
577 mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask); in mvumi_receive_ob_list_entry()
578 iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer); in mvumi_receive_ob_list_entry()
581 static void mvumi_reset(struct mvumi_hba *mhba) in mvumi_reset() argument
583 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_reset()
592 static unsigned char mvumi_start(struct mvumi_hba *mhba);
594 static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) in mvumi_wait_for_outstanding() argument
596 mhba->fw_state = FW_STATE_ABORT; in mvumi_wait_for_outstanding()
597 mvumi_reset(mhba); in mvumi_wait_for_outstanding()
599 if (mvumi_start(mhba)) in mvumi_wait_for_outstanding()
605 static int mvumi_wait_for_fw(struct mvumi_hba *mhba) in mvumi_wait_for_fw() argument
607 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_wait_for_fw()
617 dev_err(&mhba->pdev->dev, in mvumi_wait_for_fw()
630 static void mvumi_backup_bar_addr(struct mvumi_hba *mhba) in mvumi_backup_bar_addr() argument
635 pci_read_config_dword(mhba->pdev, 0x10 + i * 4, in mvumi_backup_bar_addr()
636 &mhba->pci_base[i]); in mvumi_backup_bar_addr()
640 static void mvumi_restore_bar_addr(struct mvumi_hba *mhba) in mvumi_restore_bar_addr() argument
645 if (mhba->pci_base[i]) in mvumi_restore_bar_addr()
646 pci_write_config_dword(mhba->pdev, 0x10 + i * 4, in mvumi_restore_bar_addr()
647 mhba->pci_base[i]); in mvumi_restore_bar_addr()
666 static int mvumi_reset_host_9580(struct mvumi_hba *mhba) in mvumi_reset_host_9580() argument
668 mhba->fw_state = FW_STATE_ABORT; in mvumi_reset_host_9580()
670 iowrite32(0, mhba->regs->reset_enable); in mvumi_reset_host_9580()
671 iowrite32(0xf, mhba->regs->reset_request); in mvumi_reset_host_9580()
673 iowrite32(0x10, mhba->regs->reset_enable); in mvumi_reset_host_9580()
674 iowrite32(0x10, mhba->regs->reset_request); in mvumi_reset_host_9580()
676 pci_disable_device(mhba->pdev); in mvumi_reset_host_9580()
678 if (pci_enable_device(mhba->pdev)) { in mvumi_reset_host_9580()
679 dev_err(&mhba->pdev->dev, "enable device failed\n"); in mvumi_reset_host_9580()
682 if (mvumi_pci_set_master(mhba->pdev)) { in mvumi_reset_host_9580()
683 dev_err(&mhba->pdev->dev, "set master failed\n"); in mvumi_reset_host_9580()
686 mvumi_restore_bar_addr(mhba); in mvumi_reset_host_9580()
687 if (mvumi_wait_for_fw(mhba) == FAILED) in mvumi_reset_host_9580()
690 return mvumi_wait_for_outstanding(mhba); in mvumi_reset_host_9580()
693 static int mvumi_reset_host_9143(struct mvumi_hba *mhba) in mvumi_reset_host_9143() argument
695 return mvumi_wait_for_outstanding(mhba); in mvumi_reset_host_9143()
700 struct mvumi_hba *mhba; in mvumi_host_reset() local
702 mhba = (struct mvumi_hba *) scmd->device->host->hostdata; in mvumi_host_reset()
707 return mhba->instancet->reset_host(mhba); in mvumi_host_reset()
710 static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, in mvumi_issue_blocked_cmd() argument
718 dev_err(&mhba->pdev->dev, in mvumi_issue_blocked_cmd()
725 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
726 mhba->instancet->fire_cmd(mhba, cmd); in mvumi_issue_blocked_cmd()
727 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
729 wait_event_timeout(mhba->int_cmd_wait_q, in mvumi_issue_blocked_cmd()
735 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
737 if (mhba->tag_cmd[cmd->frame->tag]) { in mvumi_issue_blocked_cmd()
738 mhba->tag_cmd[cmd->frame->tag] = NULL; in mvumi_issue_blocked_cmd()
739 dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", in mvumi_issue_blocked_cmd()
741 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); in mvumi_issue_blocked_cmd()
744 dev_warn(&mhba->pdev->dev, in mvumi_issue_blocked_cmd()
748 atomic_dec(&mhba->fw_outstanding); in mvumi_issue_blocked_cmd()
750 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_issue_blocked_cmd()
755 static void mvumi_release_fw(struct mvumi_hba *mhba) in mvumi_release_fw() argument
757 mvumi_free_cmds(mhba); in mvumi_release_fw()
758 mvumi_release_mem_resource(mhba); in mvumi_release_fw()
759 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_release_fw()
760 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, in mvumi_release_fw()
761 mhba->handshake_page, mhba->handshake_page_phys); in mvumi_release_fw()
762 kfree(mhba->regs); in mvumi_release_fw()
763 pci_release_regions(mhba->pdev); in mvumi_release_fw()
766 static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) in mvumi_flush_cache() argument
773 for (device_id = 0; device_id < mhba->max_target_id; device_id++) { in mvumi_flush_cache()
774 if (!(mhba->target_map[device_id / bitcount] & in mvumi_flush_cache()
777 get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); in mvumi_flush_cache()
780 dev_err(&mhba->pdev->dev, "failed to get memory" in mvumi_flush_cache()
802 mvumi_issue_blocked_cmd(mhba, cmd); in mvumi_flush_cache()
804 dev_err(&mhba->pdev->dev, in mvumi_flush_cache()
809 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_flush_cache()
830 static void mvumi_hs_build_page(struct mvumi_hba *mhba, in mvumi_hs_build_page() argument
845 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) in mvumi_hs_build_page()
874 hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); in mvumi_hs_build_page()
875 hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); in mvumi_hs_build_page()
877 hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); in mvumi_hs_build_page()
878 hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); in mvumi_hs_build_page()
879 hs_page4->ib_entry_size = mhba->ib_max_size_setting; in mvumi_hs_build_page()
880 hs_page4->ob_entry_size = mhba->ob_max_size_setting; in mvumi_hs_build_page()
881 if (mhba->hba_capability in mvumi_hs_build_page()
884 &mhba->list_num_io, in mvumi_hs_build_page()
887 &mhba->list_num_io, in mvumi_hs_build_page()
890 hs_page4->ob_depth = (u8) mhba->list_num_io; in mvumi_hs_build_page()
891 hs_page4->ib_depth = (u8) mhba->list_num_io; in mvumi_hs_build_page()
898 dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", in mvumi_hs_build_page()
908 static int mvumi_init_data(struct mvumi_hba *mhba) in mvumi_init_data() argument
916 if (mhba->fw_flag & MVUMI_FW_ALLOC) in mvumi_init_data()
919 tmp_size = mhba->ib_max_size * mhba->max_io; in mvumi_init_data()
920 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) in mvumi_init_data()
921 tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; in mvumi_init_data()
923 tmp_size += 128 + mhba->ob_max_size * mhba->max_io; in mvumi_init_data()
926 res_mgnt = mvumi_alloc_mem_resource(mhba, in mvumi_init_data()
929 dev_err(&mhba->pdev->dev, in mvumi_init_data()
940 mhba->ib_list = v; in mvumi_init_data()
941 mhba->ib_list_phys = p; in mvumi_init_data()
942 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_init_data()
943 v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; in mvumi_init_data()
944 p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io; in mvumi_init_data()
945 mhba->ib_frame = v; in mvumi_init_data()
946 mhba->ib_frame_phys = p; in mvumi_init_data()
948 v += mhba->ib_max_size * mhba->max_io; in mvumi_init_data()
949 p += mhba->ib_max_size * mhba->max_io; in mvumi_init_data()
955 mhba->ib_shadow = v; in mvumi_init_data()
956 mhba->ib_shadow_phys = p; in mvumi_init_data()
960 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { in mvumi_init_data()
964 mhba->ob_shadow = v; in mvumi_init_data()
965 mhba->ob_shadow_phys = p; in mvumi_init_data()
972 mhba->ob_shadow = v; in mvumi_init_data()
973 mhba->ob_shadow_phys = p; in mvumi_init_data()
983 mhba->ob_list = v; in mvumi_init_data()
984 mhba->ob_list_phys = p; in mvumi_init_data()
987 tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); in mvumi_init_data()
990 res_mgnt = mvumi_alloc_mem_resource(mhba, in mvumi_init_data()
993 dev_err(&mhba->pdev->dev, in mvumi_init_data()
999 for (i = mhba->max_io; i != 0; i--) { in mvumi_init_data()
1001 list_add_tail(&ob_pool->list, &mhba->ob_data_list); in mvumi_init_data()
1002 virmem += mhba->ob_max_size + sizeof(*ob_pool); in mvumi_init_data()
1005 tmp_size = sizeof(unsigned short) * mhba->max_io + in mvumi_init_data()
1006 sizeof(struct mvumi_cmd *) * mhba->max_io; in mvumi_init_data()
1007 tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / in mvumi_init_data()
1010 res_mgnt = mvumi_alloc_mem_resource(mhba, in mvumi_init_data()
1013 dev_err(&mhba->pdev->dev, in mvumi_init_data()
1019 mhba->tag_pool.stack = virmem; in mvumi_init_data()
1020 mhba->tag_pool.size = mhba->max_io; in mvumi_init_data()
1021 tag_init(&mhba->tag_pool, mhba->max_io); in mvumi_init_data()
1022 virmem += sizeof(unsigned short) * mhba->max_io; in mvumi_init_data()
1024 mhba->tag_cmd = virmem; in mvumi_init_data()
1025 virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; in mvumi_init_data()
1027 mhba->target_map = virmem; in mvumi_init_data()
1029 mhba->fw_flag |= MVUMI_FW_ALLOC; in mvumi_init_data()
1033 mvumi_release_mem_resource(mhba); in mvumi_init_data()
1037 static int mvumi_hs_process_page(struct mvumi_hba *mhba, in mvumi_hs_process_page() argument
1046 dev_err(&mhba->pdev->dev, "checksum error\n"); in mvumi_hs_process_page()
1054 mhba->max_io = hs_page1->max_io_support; in mvumi_hs_process_page()
1055 mhba->list_num_io = hs_page1->cl_inout_list_depth; in mvumi_hs_process_page()
1056 mhba->max_transfer_size = hs_page1->max_transfer_size; in mvumi_hs_process_page()
1057 mhba->max_target_id = hs_page1->max_devices_support; in mvumi_hs_process_page()
1058 mhba->hba_capability = hs_page1->capability; in mvumi_hs_process_page()
1059 mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; in mvumi_hs_process_page()
1060 mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; in mvumi_hs_process_page()
1062 mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; in mvumi_hs_process_page()
1063 mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; in mvumi_hs_process_page()
1065 dev_dbg(&mhba->pdev->dev, "FW version:%d\n", in mvumi_hs_process_page()
1068 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG) in mvumi_hs_process_page()
1069 mhba->eot_flag = 22; in mvumi_hs_process_page()
1071 mhba->eot_flag = 27; in mvumi_hs_process_page()
1072 if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) in mvumi_hs_process_page()
1073 mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth; in mvumi_hs_process_page()
1076 dev_err(&mhba->pdev->dev, "handshake: page code error\n"); in mvumi_hs_process_page()
1091 static int mvumi_handshake(struct mvumi_hba *mhba) in mvumi_handshake() argument
1095 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_handshake()
1097 if (mhba->fw_state == FW_STATE_STARTING) in mvumi_handshake()
1102 dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); in mvumi_handshake()
1104 mhba->fw_state = FW_STATE_STARTING; in mvumi_handshake()
1112 mhba->fw_state = FW_STATE_HANDSHAKING; in mvumi_handshake()
1121 iowrite32(lower_32_bits(mhba->handshake_page_phys), in mvumi_handshake()
1123 iowrite32(upper_32_bits(mhba->handshake_page_phys), in mvumi_handshake()
1134 hs_header = (struct mvumi_hs_header *) mhba->handshake_page; in mvumi_handshake()
1136 mhba->hba_total_pages = in mvumi_handshake()
1139 if (mhba->hba_total_pages == 0) in mvumi_handshake()
1140 mhba->hba_total_pages = HS_PAGE_TOTAL-1; in mvumi_handshake()
1144 if (mvumi_hs_process_page(mhba, hs_header)) { in mvumi_handshake()
1148 if (mvumi_init_data(mhba)) { in mvumi_handshake()
1154 mhba->hba_total_pages = HS_PAGE_TOTAL-1; in mvumi_handshake()
1157 if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { in mvumi_handshake()
1160 mvumi_hs_build_page(mhba, hs_header); in mvumi_handshake()
1177 iowrite32(mhba->list_num_io, mhba->ib_shadow); in mvumi_handshake()
1179 iowrite32(lower_32_bits(mhba->ib_shadow_phys), in mvumi_handshake()
1181 iowrite32(upper_32_bits(mhba->ib_shadow_phys), in mvumi_handshake()
1184 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) { in mvumi_handshake()
1186 iowrite32((mhba->list_num_io-1) | in mvumi_handshake()
1188 mhba->ob_shadow); in mvumi_handshake()
1189 iowrite32(lower_32_bits(mhba->ob_shadow_phys), in mvumi_handshake()
1191 iowrite32(upper_32_bits(mhba->ob_shadow_phys), in mvumi_handshake()
1195 mhba->ib_cur_slot = (mhba->list_num_io - 1) | in mvumi_handshake()
1197 mhba->ob_cur_slot = (mhba->list_num_io - 1) | in mvumi_handshake()
1199 mhba->fw_state = FW_STATE_STARTED; in mvumi_handshake()
1203 dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", in mvumi_handshake()
1210 static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) in mvumi_handshake_event() argument
1216 mvumi_handshake(mhba); in mvumi_handshake_event()
1218 isr_status = mhba->instancet->read_fw_status_reg(mhba); in mvumi_handshake_event()
1220 if (mhba->fw_state == FW_STATE_STARTED) in mvumi_handshake_event()
1223 dev_err(&mhba->pdev->dev, in mvumi_handshake_event()
1225 mhba->fw_state); in mvumi_handshake_event()
1226 dev_err(&mhba->pdev->dev, in mvumi_handshake_event()
1228 mhba->global_isr, isr_status); in mvumi_handshake_event()
1238 static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) in mvumi_check_handshake() argument
1244 tmp = ioread32(mhba->regs->arm_to_pciea_msg1); in mvumi_check_handshake()
1248 mhba->regs->pciea_to_arm_drbl_reg); in mvumi_check_handshake()
1250 dev_err(&mhba->pdev->dev, in mvumi_check_handshake()
1256 tmp = ioread32(mhba->regs->arm_to_pciea_msg1); in mvumi_check_handshake()
1259 mhba->fw_state = FW_STATE_STARTING; in mvumi_check_handshake()
1260 dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); in mvumi_check_handshake()
1262 if (mvumi_handshake_event(mhba)) { in mvumi_check_handshake()
1263 dev_err(&mhba->pdev->dev, in mvumi_check_handshake()
1265 mhba->fw_state); in mvumi_check_handshake()
1268 } while (mhba->fw_state != FW_STATE_STARTED); in mvumi_check_handshake()
1270 dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); in mvumi_check_handshake()
1275 static unsigned char mvumi_start(struct mvumi_hba *mhba) in mvumi_start() argument
1278 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_start()
1288 if (mvumi_check_handshake(mhba)) in mvumi_start()
1300 static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, in mvumi_complete_cmd() argument
1328 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), in mvumi_complete_cmd()
1332 mvumi_return_cmd(mhba, cmd); in mvumi_complete_cmd()
1335 static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, in mvumi_complete_internal_cmd() argument
1349 wake_up(&mhba->int_cmd_wait_q); in mvumi_complete_internal_cmd()
1353 static void mvumi_show_event(struct mvumi_hba *mhba, in mvumi_show_event() argument
1358 dev_warn(&mhba->pdev->dev, in mvumi_show_event()
1379 static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status) in mvumi_handle_hotplug() argument
1385 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); in mvumi_handle_hotplug()
1387 dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0, in mvumi_handle_hotplug()
1393 dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n", in mvumi_handle_hotplug()
1396 sdev = scsi_device_lookup(mhba->shost, 0, devid, 0); in mvumi_handle_hotplug()
1398 scsi_add_device(mhba->shost, 0, devid, 0); in mvumi_handle_hotplug()
1399 dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0, in mvumi_handle_hotplug()
1403 dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n", in mvumi_handle_hotplug()
1411 static u64 mvumi_inquiry(struct mvumi_hba *mhba, in mvumi_inquiry() argument
1420 cmd = mvumi_create_internal_cmd(mhba, data_buf_len); in mvumi_inquiry()
1441 mvumi_issue_blocked_cmd(mhba, cmd); in mvumi_inquiry()
1444 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) in mvumi_inquiry()
1450 dev_dbg(&mhba->pdev->dev, in mvumi_inquiry()
1456 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_inquiry()
1461 static void mvumi_detach_devices(struct mvumi_hba *mhba) in mvumi_detach_devices() argument
1466 mutex_lock(&mhba->device_lock); in mvumi_detach_devices()
1470 &mhba->shost_dev_list, list) { in mvumi_detach_devices()
1471 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); in mvumi_detach_devices()
1473 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", in mvumi_detach_devices()
1477 list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) { in mvumi_detach_devices()
1479 dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n", in mvumi_detach_devices()
1485 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) in mvumi_detach_devices()
1486 sdev = scsi_device_lookup(mhba->shost, 0, in mvumi_detach_devices()
1487 mhba->max_target_id - 1, 0); in mvumi_detach_devices()
1494 mutex_unlock(&mhba->device_lock); in mvumi_detach_devices()
1497 static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id) in mvumi_rescan_devices() argument
1501 sdev = scsi_device_lookup(mhba->shost, 0, id, 0); in mvumi_rescan_devices()
1508 static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid) in mvumi_match_devices() argument
1512 list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) { in mvumi_match_devices()
1515 dev_err(&mhba->pdev->dev, in mvumi_match_devices()
1521 if (mhba->pdev->device == in mvumi_match_devices()
1523 mvumi_rescan_devices(mhba, id); in mvumi_match_devices()
1531 static void mvumi_remove_devices(struct mvumi_hba *mhba, int id) in mvumi_remove_devices() argument
1536 &mhba->shost_dev_list, list) { in mvumi_remove_devices()
1538 dev_dbg(&mhba->pdev->dev, in mvumi_remove_devices()
1541 mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE); in mvumi_remove_devices()
1548 static int mvumi_probe_devices(struct mvumi_hba *mhba) in mvumi_probe_devices() argument
1556 cmd = mvumi_create_internal_cmd(mhba, 64); in mvumi_probe_devices()
1560 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) in mvumi_probe_devices()
1561 maxid = mhba->max_target_id; in mvumi_probe_devices()
1563 maxid = mhba->max_target_id - 1; in mvumi_probe_devices()
1566 wwid = mvumi_inquiry(mhba, id, cmd); in mvumi_probe_devices()
1569 mvumi_remove_devices(mhba, id); in mvumi_probe_devices()
1572 found = mvumi_match_devices(mhba, id, wwid); in mvumi_probe_devices()
1574 mvumi_remove_devices(mhba, id); in mvumi_probe_devices()
1578 dev_err(&mhba->pdev->dev, in mvumi_probe_devices()
1588 &mhba->mhba_dev_list); in mvumi_probe_devices()
1589 dev_dbg(&mhba->pdev->dev, in mvumi_probe_devices()
1600 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_probe_devices()
1608 struct mvumi_hba *mhba = (struct mvumi_hba *) data; in mvumi_rescan_bus() local
1614 if (!atomic_read(&mhba->pnp_count)) in mvumi_rescan_bus()
1617 atomic_set(&mhba->pnp_count, 0); in mvumi_rescan_bus()
1620 mutex_lock(&mhba->device_lock); in mvumi_rescan_bus()
1621 ret = mvumi_probe_devices(mhba); in mvumi_rescan_bus()
1624 &mhba->mhba_dev_list, list) { in mvumi_rescan_bus()
1625 if (mvumi_handle_hotplug(mhba, mv_dev->id, in mvumi_rescan_bus()
1627 dev_err(&mhba->pdev->dev, in mvumi_rescan_bus()
1636 &mhba->shost_dev_list); in mvumi_rescan_bus()
1640 mutex_unlock(&mhba->device_lock); in mvumi_rescan_bus()
1645 static void mvumi_proc_msg(struct mvumi_hba *mhba, in mvumi_proc_msg() argument
1653 if (mhba->fw_flag & MVUMI_FW_ATTACH) { in mvumi_proc_msg()
1658 mutex_lock(&mhba->sas_discovery_mutex); in mvumi_proc_msg()
1663 mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE); in mvumi_proc_msg()
1671 mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE); in mvumi_proc_msg()
1673 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_proc_msg()
1677 static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) in mvumi_notification() argument
1685 dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" in mvumi_notification()
1692 mvumi_show_event(mhba, param); in mvumi_notification()
1695 mvumi_proc_msg(mhba, buffer); in mvumi_notification()
1699 static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) in mvumi_get_event() argument
1704 cmd = mvumi_create_internal_cmd(mhba, 512); in mvumi_get_event()
1719 mvumi_issue_blocked_cmd(mhba, cmd); in mvumi_get_event()
1722 dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", in mvumi_get_event()
1725 mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); in mvumi_get_event()
1727 mvumi_delete_internal_cmd(mhba, cmd); in mvumi_get_event()
1736 mvumi_get_event(mu_ev->mhba, mu_ev->event); in mvumi_scan_events()
1740 static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status) in mvumi_launch_events() argument
1746 atomic_inc(&mhba->pnp_count); in mvumi_launch_events()
1747 wake_up_process(mhba->dm_thread); in mvumi_launch_events()
1755 mu_ev->mhba = mhba; in mvumi_launch_events()
1764 static void mvumi_handle_clob(struct mvumi_hba *mhba) in mvumi_handle_clob() argument
1770 while (!list_empty(&mhba->free_ob_list)) { in mvumi_handle_clob()
1771 pool = list_first_entry(&mhba->free_ob_list, in mvumi_handle_clob()
1774 list_add_tail(&pool->list, &mhba->ob_data_list); in mvumi_handle_clob()
1777 cmd = mhba->tag_cmd[ob_frame->tag]; in mvumi_handle_clob()
1779 atomic_dec(&mhba->fw_outstanding); in mvumi_handle_clob()
1780 mhba->tag_cmd[ob_frame->tag] = NULL; in mvumi_handle_clob()
1781 tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); in mvumi_handle_clob()
1783 mvumi_complete_cmd(mhba, cmd, ob_frame); in mvumi_handle_clob()
1785 mvumi_complete_internal_cmd(mhba, cmd, ob_frame); in mvumi_handle_clob()
1787 mhba->instancet->fire_cmd(mhba, NULL); in mvumi_handle_clob()
1792 struct mvumi_hba *mhba = (struct mvumi_hba *) devp; in mvumi_isr_handler() local
1795 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_isr_handler()
1796 if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { in mvumi_isr_handler()
1797 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_isr_handler()
1801 if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) { in mvumi_isr_handler()
1802 if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) in mvumi_isr_handler()
1803 mvumi_launch_events(mhba, mhba->isr_status); in mvumi_isr_handler()
1804 if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { in mvumi_isr_handler()
1805 dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); in mvumi_isr_handler()
1806 mvumi_handshake(mhba); in mvumi_isr_handler()
1811 if (mhba->global_isr & mhba->regs->int_comaout) in mvumi_isr_handler()
1812 mvumi_receive_ob_list_entry(mhba); in mvumi_isr_handler()
1814 mhba->global_isr = 0; in mvumi_isr_handler()
1815 mhba->isr_status = 0; in mvumi_isr_handler()
1816 if (mhba->fw_state == FW_STATE_STARTED) in mvumi_isr_handler()
1817 mvumi_handle_clob(mhba); in mvumi_isr_handler()
1818 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_isr_handler()
1822 static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, in mvumi_send_command() argument
1830 if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { in mvumi_send_command()
1831 dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); in mvumi_send_command()
1834 if (tag_is_empty(&mhba->tag_pool)) { in mvumi_send_command()
1835 dev_dbg(&mhba->pdev->dev, "no free tag.\n"); in mvumi_send_command()
1838 mvumi_get_ib_list_entry(mhba, &ib_entry); in mvumi_send_command()
1840 cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); in mvumi_send_command()
1841 cmd->frame->request_id = mhba->io_seq++; in mvumi_send_command()
1843 mhba->tag_cmd[cmd->frame->tag] = cmd; in mvumi_send_command()
1846 if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) { in mvumi_send_command()
1860 static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) in mvumi_fire_cmd() argument
1867 list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); in mvumi_fire_cmd()
1868 count = mhba->instancet->check_ib_list(mhba); in mvumi_fire_cmd()
1869 if (list_empty(&mhba->waiting_req_list) || !count) in mvumi_fire_cmd()
1873 cmd = list_first_entry(&mhba->waiting_req_list, in mvumi_fire_cmd()
1876 result = mvumi_send_command(mhba, cmd); in mvumi_fire_cmd()
1882 list_add(&cmd->queue_pointer, &mhba->waiting_req_list); in mvumi_fire_cmd()
1884 mvumi_send_ib_list_entry(mhba); in mvumi_fire_cmd()
1888 } while (!list_empty(&mhba->waiting_req_list) && count--); in mvumi_fire_cmd()
1891 mvumi_send_ib_list_entry(mhba); in mvumi_fire_cmd()
1898 static void mvumi_enable_intr(struct mvumi_hba *mhba) in mvumi_enable_intr() argument
1901 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_enable_intr()
1913 static void mvumi_disable_intr(struct mvumi_hba *mhba) in mvumi_disable_intr() argument
1916 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_disable_intr()
1927 struct mvumi_hba *mhba = (struct mvumi_hba *) extend; in mvumi_clear_intr() local
1929 struct mvumi_hw_regs *regs = mhba->regs; in mvumi_clear_intr()
1936 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) { in mvumi_clear_intr()
1947 status ^= mhba->regs->int_comaerr; in mvumi_clear_intr()
1961 mhba->global_isr = status; in mvumi_clear_intr()
1962 mhba->isr_status = isr_status; in mvumi_clear_intr()
1971 static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba) in mvumi_read_fw_status_reg() argument
1975 status = ioread32(mhba->regs->arm_to_pciea_drbl_reg); in mvumi_read_fw_status_reg()
1977 iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg); in mvumi_read_fw_status_reg()
2005 struct mvumi_hba *mhba; in mvumi_slave_configure() local
2008 mhba = (struct mvumi_hba *) sdev->host->hostdata; in mvumi_slave_configure()
2009 if (sdev->id >= mhba->max_target_id) in mvumi_slave_configure()
2012 mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); in mvumi_slave_configure()
2025 static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, in mvumi_build_frame() argument
2049 dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " in mvumi_build_frame()
2058 if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], in mvumi_build_frame()
2083 struct mvumi_hba *mhba; in mvumi_queue_command() local
2088 mhba = (struct mvumi_hba *) shost->hostdata; in mvumi_queue_command()
2090 cmd = mvumi_get_cmd(mhba); in mvumi_queue_command()
2096 if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) in mvumi_queue_command()
2101 mhba->instancet->fire_cmd(mhba, cmd); in mvumi_queue_command()
2106 mvumi_return_cmd(mhba, cmd); in mvumi_queue_command()
2116 struct mvumi_hba *mhba = shost_priv(host); in mvumi_timed_out() local
2119 spin_lock_irqsave(mhba->shost->host_lock, flags); in mvumi_timed_out()
2121 if (mhba->tag_cmd[cmd->frame->tag]) { in mvumi_timed_out()
2122 mhba->tag_cmd[cmd->frame->tag] = NULL; in mvumi_timed_out()
2123 tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); in mvumi_timed_out()
2128 atomic_dec(&mhba->fw_outstanding); in mvumi_timed_out()
2133 dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), in mvumi_timed_out()
2137 mvumi_return_cmd(mhba, cmd); in mvumi_timed_out()
2138 spin_unlock_irqrestore(mhba->shost->host_lock, flags); in mvumi_timed_out()
2185 static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba) in mvumi_cfg_hw_reg() argument
2190 switch (mhba->pdev->device) { in mvumi_cfg_hw_reg()
2192 mhba->mmio = mhba->base_addr[0]; in mvumi_cfg_hw_reg()
2193 base = mhba->mmio; in mvumi_cfg_hw_reg()
2194 if (!mhba->regs) { in mvumi_cfg_hw_reg()
2195 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); in mvumi_cfg_hw_reg()
2196 if (mhba->regs == NULL) in mvumi_cfg_hw_reg()
2199 regs = mhba->regs; in mvumi_cfg_hw_reg()
2244 mhba->mmio = mhba->base_addr[2]; in mvumi_cfg_hw_reg()
2245 base = mhba->mmio; in mvumi_cfg_hw_reg()
2246 if (!mhba->regs) { in mvumi_cfg_hw_reg()
2247 mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL); in mvumi_cfg_hw_reg()
2248 if (mhba->regs == NULL) in mvumi_cfg_hw_reg()
2251 regs = mhba->regs; in mvumi_cfg_hw_reg()
2308 static int mvumi_init_fw(struct mvumi_hba *mhba) in mvumi_init_fw() argument
2312 if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { in mvumi_init_fw()
2313 dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); in mvumi_init_fw()
2316 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_init_fw()
2320 switch (mhba->pdev->device) { in mvumi_init_fw()
2322 mhba->instancet = &mvumi_instance_9143; in mvumi_init_fw()
2323 mhba->io_seq = 0; in mvumi_init_fw()
2324 mhba->max_sge = MVUMI_MAX_SG_ENTRY; in mvumi_init_fw()
2325 mhba->request_id_enabled = 1; in mvumi_init_fw()
2328 mhba->instancet = &mvumi_instance_9580; in mvumi_init_fw()
2329 mhba->io_seq = 0; in mvumi_init_fw()
2330 mhba->max_sge = MVUMI_MAX_SG_ENTRY; in mvumi_init_fw()
2333 dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", in mvumi_init_fw()
2334 mhba->pdev->device); in mvumi_init_fw()
2335 mhba->instancet = NULL; in mvumi_init_fw()
2339 dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", in mvumi_init_fw()
2340 mhba->pdev->device); in mvumi_init_fw()
2341 ret = mvumi_cfg_hw_reg(mhba); in mvumi_init_fw()
2343 dev_err(&mhba->pdev->dev, in mvumi_init_fw()
2348 mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev, in mvumi_init_fw()
2349 HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL); in mvumi_init_fw()
2350 if (!mhba->handshake_page) { in mvumi_init_fw()
2351 dev_err(&mhba->pdev->dev, in mvumi_init_fw()
2357 if (mvumi_start(mhba)) { in mvumi_init_fw()
2361 ret = mvumi_alloc_cmds(mhba); in mvumi_init_fw()
2368 mvumi_release_mem_resource(mhba); in mvumi_init_fw()
2369 dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE, in mvumi_init_fw()
2370 mhba->handshake_page, mhba->handshake_page_phys); in mvumi_init_fw()
2372 kfree(mhba->regs); in mvumi_init_fw()
2374 mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_init_fw()
2376 pci_release_regions(mhba->pdev); in mvumi_init_fw()
2385 static int mvumi_io_attach(struct mvumi_hba *mhba) in mvumi_io_attach() argument
2387 struct Scsi_Host *host = mhba->shost; in mvumi_io_attach()
2390 unsigned int max_sg = (mhba->ib_max_size - in mvumi_io_attach()
2393 host->irq = mhba->pdev->irq; in mvumi_io_attach()
2394 host->unique_id = mhba->unique_id; in mvumi_io_attach()
2395 host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; in mvumi_io_attach()
2396 host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; in mvumi_io_attach()
2397 host->max_sectors = mhba->max_transfer_size / 512; in mvumi_io_attach()
2398 host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; in mvumi_io_attach()
2399 host->max_id = mhba->max_target_id; in mvumi_io_attach()
2402 ret = scsi_add_host(host, &mhba->pdev->dev); in mvumi_io_attach()
2404 dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); in mvumi_io_attach()
2407 mhba->fw_flag |= MVUMI_FW_ATTACH; in mvumi_io_attach()
2409 mutex_lock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2410 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) in mvumi_io_attach()
2411 ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0); in mvumi_io_attach()
2415 dev_err(&mhba->pdev->dev, "add virtual device failed\n"); in mvumi_io_attach()
2416 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2420 mhba->dm_thread = kthread_create(mvumi_rescan_bus, in mvumi_io_attach()
2421 mhba, "mvumi_scanthread"); in mvumi_io_attach()
2422 if (IS_ERR(mhba->dm_thread)) { in mvumi_io_attach()
2423 dev_err(&mhba->pdev->dev, in mvumi_io_attach()
2425 ret = PTR_ERR(mhba->dm_thread); in mvumi_io_attach()
2426 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2429 atomic_set(&mhba->pnp_count, 1); in mvumi_io_attach()
2430 wake_up_process(mhba->dm_thread); in mvumi_io_attach()
2432 mutex_unlock(&mhba->sas_discovery_mutex); in mvumi_io_attach()
2436 if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) in mvumi_io_attach()
2437 sdev = scsi_device_lookup(mhba->shost, 0, in mvumi_io_attach()
2438 mhba->max_target_id - 1, 0); in mvumi_io_attach()
2444 scsi_remove_host(mhba->shost); in mvumi_io_attach()
2456 struct mvumi_hba *mhba; in mvumi_probe_one() local
2471 host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); in mvumi_probe_one()
2477 mhba = shost_priv(host); in mvumi_probe_one()
2479 INIT_LIST_HEAD(&mhba->cmd_pool); in mvumi_probe_one()
2480 INIT_LIST_HEAD(&mhba->ob_data_list); in mvumi_probe_one()
2481 INIT_LIST_HEAD(&mhba->free_ob_list); in mvumi_probe_one()
2482 INIT_LIST_HEAD(&mhba->res_list); in mvumi_probe_one()
2483 INIT_LIST_HEAD(&mhba->waiting_req_list); in mvumi_probe_one()
2484 mutex_init(&mhba->device_lock); in mvumi_probe_one()
2485 INIT_LIST_HEAD(&mhba->mhba_dev_list); in mvumi_probe_one()
2486 INIT_LIST_HEAD(&mhba->shost_dev_list); in mvumi_probe_one()
2487 atomic_set(&mhba->fw_outstanding, 0); in mvumi_probe_one()
2488 init_waitqueue_head(&mhba->int_cmd_wait_q); in mvumi_probe_one()
2489 mutex_init(&mhba->sas_discovery_mutex); in mvumi_probe_one()
2491 mhba->pdev = pdev; in mvumi_probe_one()
2492 mhba->shost = host; in mvumi_probe_one()
2493 mhba->unique_id = pci_dev_id(pdev); in mvumi_probe_one()
2495 ret = mvumi_init_fw(mhba); in mvumi_probe_one()
2499 ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, in mvumi_probe_one()
2500 "mvumi", mhba); in mvumi_probe_one()
2506 mhba->instancet->enable_intr(mhba); in mvumi_probe_one()
2507 pci_set_drvdata(pdev, mhba); in mvumi_probe_one()
2509 ret = mvumi_io_attach(mhba); in mvumi_probe_one()
2513 mvumi_backup_bar_addr(mhba); in mvumi_probe_one()
2519 mhba->instancet->disable_intr(mhba); in mvumi_probe_one()
2520 free_irq(mhba->pdev->irq, mhba); in mvumi_probe_one()
2522 mvumi_release_fw(mhba); in mvumi_probe_one()
2536 struct mvumi_hba *mhba; in mvumi_detach_one() local
2538 mhba = pci_get_drvdata(pdev); in mvumi_detach_one()
2539 if (mhba->dm_thread) { in mvumi_detach_one()
2540 kthread_stop(mhba->dm_thread); in mvumi_detach_one()
2541 mhba->dm_thread = NULL; in mvumi_detach_one()
2544 mvumi_detach_devices(mhba); in mvumi_detach_one()
2545 host = mhba->shost; in mvumi_detach_one()
2546 scsi_remove_host(mhba->shost); in mvumi_detach_one()
2547 mvumi_flush_cache(mhba); in mvumi_detach_one()
2549 mhba->instancet->disable_intr(mhba); in mvumi_detach_one()
2550 free_irq(mhba->pdev->irq, mhba); in mvumi_detach_one()
2551 mvumi_release_fw(mhba); in mvumi_detach_one()
2563 struct mvumi_hba *mhba = pci_get_drvdata(pdev); in mvumi_shutdown() local
2565 mvumi_flush_cache(mhba); in mvumi_shutdown()
2571 struct mvumi_hba *mhba = pci_get_drvdata(pdev); in mvumi_suspend() local
2573 mvumi_flush_cache(mhba); in mvumi_suspend()
2575 mhba->instancet->disable_intr(mhba); in mvumi_suspend()
2576 mvumi_unmap_pci_addr(pdev, mhba->base_addr); in mvumi_suspend()
2585 struct mvumi_hba *mhba = pci_get_drvdata(pdev); in mvumi_resume() local
2590 ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); in mvumi_resume()
2594 if (mvumi_cfg_hw_reg(mhba)) { in mvumi_resume()
2599 mhba->mmio = mhba->base_addr[0]; in mvumi_resume()
2600 mvumi_reset(mhba); in mvumi_resume()
2602 if (mvumi_start(mhba)) { in mvumi_resume()
2607 mhba->instancet->enable_intr(mhba); in mvumi_resume()
2612 mvumi_unmap_pci_addr(pdev, mhba->base_addr); in mvumi_resume()