Lines Matching +full:deep +full:- +full:touch

2  * Linux driver for VMware's para-virtualized SCSI HBA.
4 * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved.
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
56 * 1-to-1 mapping completions back to requests.
114 MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
121 MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
125 MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
129 MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
132 MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
135 MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
139 MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1…
151 return &(adapter->dev->dev); in pvscsi_dev()
159 end = &adapter->cmd_map[adapter->req_depth]; in pvscsi_find_context()
160 for (ctx = adapter->cmd_map; ctx < end; ctx++) in pvscsi_find_context()
161 if (ctx->cmd == cmd) in pvscsi_find_context()
172 if (list_empty(&adapter->cmd_pool)) in pvscsi_acquire_context()
175 ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); in pvscsi_acquire_context()
176 ctx->cmd = cmd; in pvscsi_acquire_context()
177 list_del(&ctx->list); in pvscsi_acquire_context()
185 ctx->cmd = NULL; in pvscsi_release_context()
186 ctx->abort_cmp = NULL; in pvscsi_release_context()
187 list_add(&ctx->list, &adapter->cmd_pool); in pvscsi_release_context()
192 * non-zero integer. ctx always points to an entry in cmd_map array, hence
198 return ctx - adapter->cmd_map + 1; in pvscsi_map_context()
204 return &adapter->cmd_map[context - 1]; in pvscsi_get_context()
210 writel(val, adapter->mmioBase + offset); in pvscsi_reg_write()
215 return readl(adapter->mmioBase + offset); in pvscsi_reg_read()
234 if (adapter->use_msg) in pvscsi_unmask_intr()
263 cmd.target = ctx->cmd->device->id; in pvscsi_abort_cmd()
291 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_kick_io()
293 if (!adapter->use_req_threshold || in pvscsi_kick_io()
294 s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) in pvscsi_kick_io()
335 sge = &ctx->sgl->sge[0]; in pvscsi_create_sg()
355 e->dataLen = bufflen; in pvscsi_map_buffers()
356 e->dataAddr = 0; in pvscsi_map_buffers()
365 if (segs == -ENOMEM) { in pvscsi_map_buffers()
368 return -ENOMEM; in pvscsi_map_buffers()
372 e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; in pvscsi_map_buffers()
373 ctx->sglPA = dma_map_single(&adapter->dev->dev, in pvscsi_map_buffers()
374 ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); in pvscsi_map_buffers()
375 if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { in pvscsi_map_buffers()
379 ctx->sglPA = 0; in pvscsi_map_buffers()
380 return -ENOMEM; in pvscsi_map_buffers()
382 e->dataAddr = ctx->sglPA; in pvscsi_map_buffers()
384 e->dataAddr = sg_dma_address(sg); in pvscsi_map_buffers()
390 ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, in pvscsi_map_buffers()
391 cmd->sc_data_direction); in pvscsi_map_buffers()
392 if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { in pvscsi_map_buffers()
395 return -ENOMEM; in pvscsi_map_buffers()
397 e->dataAddr = ctx->dataPA; in pvscsi_map_buffers()
410 if (cmd->sense_buffer) in pvscsi_patch_sense()
411 cmd->sense_buffer[0] = 0; in pvscsi_patch_sense()
420 cmd = ctx->cmd; in pvscsi_unmap_buffers()
428 if (ctx->sglPA) { in pvscsi_unmap_buffers()
429 dma_unmap_single(&adapter->dev->dev, ctx->sglPA, in pvscsi_unmap_buffers()
431 ctx->sglPA = 0; in pvscsi_unmap_buffers()
434 dma_unmap_single(&adapter->dev->dev, ctx->dataPA, in pvscsi_unmap_buffers()
435 bufflen, cmd->sc_data_direction); in pvscsi_unmap_buffers()
437 if (cmd->sense_buffer) in pvscsi_unmap_buffers()
438 dma_unmap_single(&adapter->dev->dev, ctx->sensePA, in pvscsi_unmap_buffers()
444 adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_allocate_rings()
445 &adapter->ringStatePA, GFP_KERNEL); in pvscsi_allocate_rings()
446 if (!adapter->rings_state) in pvscsi_allocate_rings()
447 return -ENOMEM; in pvscsi_allocate_rings()
449 adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, in pvscsi_allocate_rings()
451 adapter->req_depth = adapter->req_pages in pvscsi_allocate_rings()
453 adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
454 adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA, in pvscsi_allocate_rings()
456 if (!adapter->req_ring) in pvscsi_allocate_rings()
457 return -ENOMEM; in pvscsi_allocate_rings()
459 adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, in pvscsi_allocate_rings()
461 adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
462 adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA, in pvscsi_allocate_rings()
464 if (!adapter->cmp_ring) in pvscsi_allocate_rings()
465 return -ENOMEM; in pvscsi_allocate_rings()
467 BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); in pvscsi_allocate_rings()
468 BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
469 BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
471 if (!adapter->use_msg) in pvscsi_allocate_rings()
474 adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, in pvscsi_allocate_rings()
476 adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev, in pvscsi_allocate_rings()
477 adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA, in pvscsi_allocate_rings()
479 if (!adapter->msg_ring) in pvscsi_allocate_rings()
480 return -ENOMEM; in pvscsi_allocate_rings()
481 BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); in pvscsi_allocate_rings()
492 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; in pvscsi_setup_all_rings()
493 cmd.reqRingNumPages = adapter->req_pages; in pvscsi_setup_all_rings()
494 cmd.cmpRingNumPages = adapter->cmp_pages; in pvscsi_setup_all_rings()
496 base = adapter->reqRingPA; in pvscsi_setup_all_rings()
497 for (i = 0; i < adapter->req_pages; i++) { in pvscsi_setup_all_rings()
502 base = adapter->cmpRingPA; in pvscsi_setup_all_rings()
503 for (i = 0; i < adapter->cmp_pages; i++) { in pvscsi_setup_all_rings()
508 memset(adapter->rings_state, 0, PAGE_SIZE); in pvscsi_setup_all_rings()
509 memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
510 memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
515 if (adapter->use_msg) { in pvscsi_setup_all_rings()
518 cmd_msg.numPages = adapter->msg_pages; in pvscsi_setup_all_rings()
520 base = adapter->msgRingPA; in pvscsi_setup_all_rings()
521 for (i = 0; i < adapter->msg_pages; i++) { in pvscsi_setup_all_rings()
525 memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); in pvscsi_setup_all_rings()
534 if (!sdev->tagged_supported) in pvscsi_change_queue_depth()
549 u32 btstat = e->hostStatus; in pvscsi_complete_request()
550 u32 sdstat = e->scsiStatus; in pvscsi_complete_request()
552 ctx = pvscsi_get_context(adapter, e->context); in pvscsi_complete_request()
553 cmd = ctx->cmd; in pvscsi_complete_request()
554 abort_cmp = ctx->abort_cmp; in pvscsi_complete_request()
570 cmd->result = 0; in pvscsi_complete_request()
576 cmd->result = (DID_RESET << 16); in pvscsi_complete_request()
578 cmd->result = (DID_OK << 16) | sdstat; in pvscsi_complete_request()
590 * returns zero dataLen with non zero data - do not set in pvscsi_complete_request()
593 if (e->dataLen && (e->dataLen < scsi_bufflen(cmd))) in pvscsi_complete_request()
594 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); in pvscsi_complete_request()
595 cmd->result = (DID_OK << 16); in pvscsi_complete_request()
601 scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); in pvscsi_complete_request()
602 cmd->result = (DID_ERROR << 16); in pvscsi_complete_request()
606 /* Our emulation returns this for non-connected devs */ in pvscsi_complete_request()
607 cmd->result = (DID_BAD_TARGET << 16); in pvscsi_complete_request()
621 cmd->result |= (DID_ERROR << 16); in pvscsi_complete_request()
627 cmd->result = (DID_RESET << 16); in pvscsi_complete_request()
631 cmd->result = (DID_BUS_BUSY << 16); in pvscsi_complete_request()
635 cmd->result = (DID_PARITY << 16); in pvscsi_complete_request()
639 cmd->result = (DID_ERROR << 16); in pvscsi_complete_request()
645 dev_dbg(&cmd->device->sdev_gendev, in pvscsi_complete_request()
647 cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); in pvscsi_complete_request()
661 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_completion_ring()
662 struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; in pvscsi_process_completion_ring()
663 u32 cmp_entries = s->cmpNumEntriesLog2; in pvscsi_process_completion_ring()
665 while (s->cmpConsIdx != s->cmpProdIdx) { in pvscsi_process_completion_ring()
666 struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & in pvscsi_process_completion_ring()
671 * Since the device emulation advances s->cmpProdIdx only after in pvscsi_process_completion_ring()
678 * to s->cmpConsIdx before the read of (*e) inside in pvscsi_process_completion_ring()
683 s->cmpConsIdx++; in pvscsi_process_completion_ring()
698 s = adapter->rings_state; in pvscsi_queue_ring()
699 sdev = cmd->device; in pvscsi_queue_ring()
700 req_entries = s->reqNumEntriesLog2; in pvscsi_queue_ring()
705 * However, we have already ruled out this possibility - we would not in pvscsi_queue_ring()
710 if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { in pvscsi_queue_ring()
713 s->reqProdIdx, s->cmpConsIdx); in pvscsi_queue_ring()
714 return -1; in pvscsi_queue_ring()
717 e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); in pvscsi_queue_ring()
719 e->bus = sdev->channel; in pvscsi_queue_ring()
720 e->target = sdev->id; in pvscsi_queue_ring()
721 memset(e->lun, 0, sizeof(e->lun)); in pvscsi_queue_ring()
722 e->lun[1] = sdev->lun; in pvscsi_queue_ring()
724 if (cmd->sense_buffer) { in pvscsi_queue_ring()
725 ctx->sensePA = dma_map_single(&adapter->dev->dev, in pvscsi_queue_ring()
726 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, in pvscsi_queue_ring()
728 if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { in pvscsi_queue_ring()
731 ctx->sensePA = 0; in pvscsi_queue_ring()
732 return -ENOMEM; in pvscsi_queue_ring()
734 e->senseAddr = ctx->sensePA; in pvscsi_queue_ring()
735 e->senseLen = SCSI_SENSE_BUFFERSIZE; in pvscsi_queue_ring()
737 e->senseLen = 0; in pvscsi_queue_ring()
738 e->senseAddr = 0; in pvscsi_queue_ring()
740 e->cdbLen = cmd->cmd_len; in pvscsi_queue_ring()
741 e->vcpuHint = smp_processor_id(); in pvscsi_queue_ring()
742 memcpy(e->cdb, cmd->cmnd, e->cdbLen); in pvscsi_queue_ring()
744 e->tag = SIMPLE_QUEUE_TAG; in pvscsi_queue_ring()
746 if (cmd->sc_data_direction == DMA_FROM_DEVICE) in pvscsi_queue_ring()
747 e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; in pvscsi_queue_ring()
748 else if (cmd->sc_data_direction == DMA_TO_DEVICE) in pvscsi_queue_ring()
749 e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; in pvscsi_queue_ring()
750 else if (cmd->sc_data_direction == DMA_NONE) in pvscsi_queue_ring()
751 e->flags = PVSCSI_FLAG_CMD_DIR_NONE; in pvscsi_queue_ring()
753 e->flags = 0; in pvscsi_queue_ring()
756 if (cmd->sense_buffer) { in pvscsi_queue_ring()
757 dma_unmap_single(&adapter->dev->dev, ctx->sensePA, in pvscsi_queue_ring()
760 ctx->sensePA = 0; in pvscsi_queue_ring()
762 return -ENOMEM; in pvscsi_queue_ring()
765 e->context = pvscsi_map_context(adapter, ctx); in pvscsi_queue_ring()
769 s->reqProdIdx++; in pvscsi_queue_ring()
776 struct Scsi_Host *host = cmd->device->host; in pvscsi_queue_lck()
782 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_queue_lck()
788 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_queue_lck()
792 op = cmd->cmnd[0]; in pvscsi_queue_lck()
794 dev_dbg(&cmd->device->sdev_gendev, in pvscsi_queue_lck()
797 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_queue_lck()
808 struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); in DEF_SCSI_QCMD()
816 adapter->host->host_no, cmd); in DEF_SCSI_QCMD()
818 spin_lock_irqsave(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
821 * Poll the completion ring first - we might be trying to abort in DEF_SCSI_QCMD()
840 ctx->abort_cmp = &abort_cmp; in DEF_SCSI_QCMD()
843 spin_unlock_irqrestore(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
846 spin_lock_irqsave(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
853 ctx->abort_cmp = NULL; in DEF_SCSI_QCMD()
864 cmd->result = (DID_ABORT << 16); in DEF_SCSI_QCMD()
868 spin_unlock_irqrestore(&adapter->hw_lock, flags); in DEF_SCSI_QCMD()
875 * destroys the 1-1 mapping between context field passed to emulation and our
882 for (i = 0; i < adapter->req_depth; i++) { in pvscsi_reset_all()
883 struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; in pvscsi_reset_all()
884 struct scsi_cmnd *cmd = ctx->cmd; in pvscsi_reset_all()
891 cmd->result = (DID_RESET << 16); in pvscsi_reset_all()
899 struct Scsi_Host *host = cmd->device->host; in pvscsi_host_reset()
906 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_host_reset()
908 use_msg = adapter->use_msg; in pvscsi_host_reset()
911 adapter->use_msg = false; in pvscsi_host_reset()
912 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_host_reset()
918 flush_workqueue(adapter->workqueue); in pvscsi_host_reset()
919 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_host_reset()
936 * not touch the ring memory after reset, so the immediately pre-reset in pvscsi_host_reset()
942 adapter->use_msg = use_msg; in pvscsi_host_reset()
946 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_host_reset()
953 struct Scsi_Host *host = cmd->device->host; in pvscsi_bus_reset()
965 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_bus_reset()
971 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_bus_reset()
978 struct Scsi_Host *host = cmd->device->host; in pvscsi_device_reset()
983 host->host_no, cmd->device->id); in pvscsi_device_reset()
990 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_device_reset()
993 ll_device_reset(adapter, cmd->device->id); in pvscsi_device_reset()
996 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_device_reset()
1009 "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, in pvscsi_info()
1010 adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, in pvscsi_info()
1022 .this_id = -1,
1036 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_msg()
1037 struct Scsi_Host *host = adapter->host; in pvscsi_process_msg()
1040 printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", in pvscsi_process_msg()
1041 e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); in pvscsi_process_msg()
1045 if (e->type == PVSCSI_MSG_DEV_ADDED) { in pvscsi_process_msg()
1051 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1056 sdev = scsi_device_lookup(host, desc->bus, desc->target, in pvscsi_process_msg()
1057 desc->lun[1]); in pvscsi_process_msg()
1062 scsi_add_device(adapter->host, desc->bus, in pvscsi_process_msg()
1063 desc->target, desc->lun[1]); in pvscsi_process_msg()
1066 } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { in pvscsi_process_msg()
1072 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1077 sdev = scsi_device_lookup(host, desc->bus, desc->target, in pvscsi_process_msg()
1078 desc->lun[1]); in pvscsi_process_msg()
1085 desc->bus, desc->target, desc->lun[1]); in pvscsi_process_msg()
1093 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_msg_pending()
1095 return s->msgProdIdx != s->msgConsIdx; in pvscsi_msg_pending()
1100 struct PVSCSIRingsState *s = adapter->rings_state; in pvscsi_process_msg_ring()
1101 struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; in pvscsi_process_msg_ring()
1102 u32 msg_entries = s->msgNumEntriesLog2; in pvscsi_process_msg_ring()
1105 struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & in pvscsi_process_msg_ring()
1111 s->msgConsIdx++; in pvscsi_process_msg_ring()
1134 if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) in pvscsi_setup_msg_workqueue()
1138 "vmw_pvscsi_wq_%u", adapter->host->host_no); in pvscsi_setup_msg_workqueue()
1140 adapter->workqueue = create_singlethread_workqueue(name); in pvscsi_setup_msg_workqueue()
1141 if (!adapter->workqueue) { in pvscsi_setup_msg_workqueue()
1145 INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); in pvscsi_setup_msg_workqueue()
1161 if (val == -1) { in pvscsi_setup_req_threshold()
1183 spin_lock_irqsave(&adapter->hw_lock, flags); in pvscsi_isr()
1185 if (adapter->use_msg && pvscsi_msg_pending(adapter)) in pvscsi_isr()
1186 queue_work(adapter->workqueue, &adapter->work); in pvscsi_isr()
1187 spin_unlock_irqrestore(&adapter->hw_lock, flags); in pvscsi_isr()
1205 struct pvscsi_ctx *ctx = adapter->cmd_map; in pvscsi_free_sgls()
1208 for (i = 0; i < adapter->req_depth; ++i, ++ctx) in pvscsi_free_sgls()
1209 free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); in pvscsi_free_sgls()
1214 free_irq(pci_irq_vector(adapter->dev, 0), adapter); in pvscsi_shutdown_intr()
1215 pci_free_irq_vectors(adapter->dev); in pvscsi_shutdown_intr()
1220 if (adapter->workqueue) in pvscsi_release_resources()
1221 destroy_workqueue(adapter->workqueue); in pvscsi_release_resources()
1223 if (adapter->mmioBase) in pvscsi_release_resources()
1224 pci_iounmap(adapter->dev, adapter->mmioBase); in pvscsi_release_resources()
1226 pci_release_regions(adapter->dev); in pvscsi_release_resources()
1228 if (adapter->cmd_map) { in pvscsi_release_resources()
1230 kfree(adapter->cmd_map); in pvscsi_release_resources()
1233 if (adapter->rings_state) in pvscsi_release_resources()
1234 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_release_resources()
1235 adapter->rings_state, adapter->ringStatePA); in pvscsi_release_resources()
1237 if (adapter->req_ring) in pvscsi_release_resources()
1238 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1239 adapter->req_pages * PAGE_SIZE, in pvscsi_release_resources()
1240 adapter->req_ring, adapter->reqRingPA); in pvscsi_release_resources()
1242 if (adapter->cmp_ring) in pvscsi_release_resources()
1243 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1244 adapter->cmp_pages * PAGE_SIZE, in pvscsi_release_resources()
1245 adapter->cmp_ring, adapter->cmpRingPA); in pvscsi_release_resources()
1247 if (adapter->msg_ring) in pvscsi_release_resources()
1248 dma_free_coherent(&adapter->dev->dev, in pvscsi_release_resources()
1249 adapter->msg_pages * PAGE_SIZE, in pvscsi_release_resources()
1250 adapter->msg_ring, adapter->msgRingPA); in pvscsi_release_resources()
1258 * Dynamic allocation can fail, and we can't go deep into the memory
1261 * in that case because we can't get an allocation - the I/O could be
1271 ctx = adapter->cmd_map; in pvscsi_allocate_sg()
1274 for (i = 0; i < adapter->req_depth; ++i, ++ctx) { in pvscsi_allocate_sg()
1275 ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, in pvscsi_allocate_sg()
1277 ctx->sglPA = 0; in pvscsi_allocate_sg()
1278 BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); in pvscsi_allocate_sg()
1279 if (!ctx->sgl) { in pvscsi_allocate_sg()
1280 for (; i >= 0; --i, --ctx) { in pvscsi_allocate_sg()
1281 free_pages((unsigned long)ctx->sgl, in pvscsi_allocate_sg()
1283 ctx->sgl = NULL; in pvscsi_allocate_sg()
1285 return -ENOMEM; in pvscsi_allocate_sg()
1307 config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, in pvscsi_get_max_targets()
1327 header->hostStatus = BTSTAT_INVPARAM; in pvscsi_get_max_targets()
1328 header->scsiStatus = SDSTAT_CHECK; in pvscsi_get_max_targets()
1332 if (header->hostStatus == BTSTAT_SUCCESS && in pvscsi_get_max_targets()
1333 header->scsiStatus == SDSTAT_GOOD) { in pvscsi_get_max_targets()
1337 numPhys = config->numPhys; in pvscsi_get_max_targets()
1340 header->hostStatus, header->scsiStatus); in pvscsi_get_max_targets()
1341 dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page, in pvscsi_get_max_targets()
1357 error = -ENODEV; in pvscsi_probe()
1362 if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { in pvscsi_probe()
1364 } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { in pvscsi_probe()
1378 adapter->dev = pdev; in pvscsi_probe()
1379 adapter->rev = pdev->revision; in pvscsi_probe()
1402 adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); in pvscsi_probe()
1404 if (!adapter->mmioBase) { in pvscsi_probe()
1448 adapter->dev = pdev; in pvscsi_probe()
1449 adapter->host = host; in pvscsi_probe()
1453 adapter->rev = adapter_temp.rev; in pvscsi_probe()
1454 adapter->mmioBase = adapter_temp.mmioBase; in pvscsi_probe()
1456 spin_lock_init(&adapter->hw_lock); in pvscsi_probe()
1457 host->max_channel = 0; in pvscsi_probe()
1458 host->max_lun = 1; in pvscsi_probe()
1459 host->max_cmd_len = 16; in pvscsi_probe()
1460 host->max_id = max_id; in pvscsi_probe()
1466 adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); in pvscsi_probe()
1480 adapter->cmd_map = kcalloc(adapter->req_depth, in pvscsi_probe()
1482 if (!adapter->cmd_map) { in pvscsi_probe()
1484 error = -ENOMEM; in pvscsi_probe()
1488 INIT_LIST_HEAD(&adapter->cmd_pool); in pvscsi_probe()
1489 for (i = 0; i < adapter->req_depth; i++) { in pvscsi_probe()
1490 struct pvscsi_ctx *ctx = adapter->cmd_map + i; in pvscsi_probe()
1491 list_add(&ctx->list, &adapter->cmd_pool); in pvscsi_probe()
1505 error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); in pvscsi_probe()
1509 adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); in pvscsi_probe()
1510 printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", in pvscsi_probe()
1511 adapter->use_req_threshold ? "en" : "dis"); in pvscsi_probe()
1513 if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) { in pvscsi_probe()
1515 adapter->dev->msix_enabled ? "-X" : ""); in pvscsi_probe()
1530 error = scsi_add_host(host, &pdev->dev); in pvscsi_probe()
1537 dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", in pvscsi_probe()
1538 adapter->rev, host->host_no); in pvscsi_probe()
1567 if (adapter->workqueue) in __pvscsi_shutdown()
1568 flush_workqueue(adapter->workqueue); in __pvscsi_shutdown()
1610 pr_info("%s - version %s\n", in pvscsi_init()