Lines Matching +full:sig +full:- +full:dir +full:- +full:cmd

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
14 #include <linux/blk-mq.h>
15 #include <linux/blk-integrity.h>
25 #include <linux/nvme-rdma.h>
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
173 return queue->cmnd_capsule_len - sizeof(struct nvme_command); in nvme_rdma_inline_data_size()
177 size_t capsule_size, enum dma_data_direction dir) in nvme_rdma_free_qe() argument
179 ib_dma_unmap_single(ibdev, qe->dma, capsule_size, dir); in nvme_rdma_free_qe()
180 kfree(qe->data); in nvme_rdma_free_qe()
184 size_t capsule_size, enum dma_data_direction dir) in nvme_rdma_alloc_qe() argument
186 qe->data = kzalloc(capsule_size, GFP_KERNEL); in nvme_rdma_alloc_qe()
187 if (!qe->data) in nvme_rdma_alloc_qe()
188 return -ENOMEM; in nvme_rdma_alloc_qe()
190 qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir); in nvme_rdma_alloc_qe()
191 if (ib_dma_mapping_error(ibdev, qe->dma)) { in nvme_rdma_alloc_qe()
192 kfree(qe->data); in nvme_rdma_alloc_qe()
193 qe->data = NULL; in nvme_rdma_alloc_qe()
194 return -ENOMEM; in nvme_rdma_alloc_qe()
202 size_t capsule_size, enum dma_data_direction dir) in nvme_rdma_free_ring() argument
207 nvme_rdma_free_qe(ibdev, &ring[i], capsule_size, dir); in nvme_rdma_free_ring()
213 enum dma_data_direction dir) in nvme_rdma_alloc_ring() argument
225 * will issue error recovery and queue re-creation. in nvme_rdma_alloc_ring()
228 if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir)) in nvme_rdma_alloc_ring()
235 nvme_rdma_free_ring(ibdev, ring, i, capsule_size, dir); in nvme_rdma_alloc_ring()
242 ib_event_msg(event->event), event->event); in nvme_rdma_qp_event()
250 ret = wait_for_completion_interruptible(&queue->cm_done); in nvme_rdma_wait_for_cm()
253 WARN_ON_ONCE(queue->cm_error > 0); in nvme_rdma_wait_for_cm()
254 return queue->cm_error; in nvme_rdma_wait_for_cm()
259 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_create_qp()
266 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp()
268 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp()
270 init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; in nvme_rdma_create_qp()
273 init_attr.send_cq = queue->ib_cq; in nvme_rdma_create_qp()
274 init_attr.recv_cq = queue->ib_cq; in nvme_rdma_create_qp()
275 if (queue->pi_support) in nvme_rdma_create_qp()
279 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
281 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
290 kfree(req->sqe.data); in nvme_rdma_exit_request()
297 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); in nvme_rdma_init_request()
299 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request()
300 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request()
302 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_rdma_init_request()
303 req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); in nvme_rdma_init_request()
304 if (!req->sqe.data) in nvme_rdma_init_request()
305 return -ENOMEM; in nvme_rdma_init_request()
308 if (queue->pi_support) in nvme_rdma_init_request()
309 req->metadata_sgl = (void *)nvme_req(rq) + in nvme_rdma_init_request()
313 req->queue = queue; in nvme_rdma_init_request()
314 nvme_req(rq)->cmd = req->sqe.data; in nvme_rdma_init_request()
323 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx()
325 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx()
327 hctx->driver_data = queue; in nvme_rdma_init_hctx()
335 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx()
339 hctx->driver_data = queue; in nvme_rdma_init_admin_hctx()
349 list_del(&ndev->entry); in nvme_rdma_free_dev()
352 ib_dealloc_pd(ndev->pd); in nvme_rdma_free_dev()
358 kref_put(&dev->ref, nvme_rdma_free_dev); in nvme_rdma_dev_put()
363 return kref_get_unless_zero(&dev->ref); in nvme_rdma_dev_get()
373 if (ndev->dev->node_guid == cm_id->device->node_guid && in nvme_rdma_find_get_device()
382 ndev->dev = cm_id->device; in nvme_rdma_find_get_device()
383 kref_init(&ndev->ref); in nvme_rdma_find_get_device()
385 ndev->pd = ib_alloc_pd(ndev->dev, in nvme_rdma_find_get_device()
387 if (IS_ERR(ndev->pd)) in nvme_rdma_find_get_device()
390 if (!(ndev->dev->attrs.device_cap_flags & in nvme_rdma_find_get_device()
392 dev_err(&ndev->dev->dev, in nvme_rdma_find_get_device()
397 ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, in nvme_rdma_find_get_device()
398 ndev->dev->attrs.max_send_sge - 1); in nvme_rdma_find_get_device()
399 list_add(&ndev->entry, &device_list); in nvme_rdma_find_get_device()
405 ib_dealloc_pd(ndev->pd); in nvme_rdma_find_get_device()
416 ib_free_cq(queue->ib_cq); in nvme_rdma_free_cq()
418 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq()
426 if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags)) in nvme_rdma_destroy_queue_ib()
429 dev = queue->device; in nvme_rdma_destroy_queue_ib()
430 ibdev = dev->dev; in nvme_rdma_destroy_queue_ib()
432 if (queue->pi_support) in nvme_rdma_destroy_queue_ib()
433 ib_mr_pool_destroy(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_destroy_queue_ib()
434 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_destroy_queue_ib()
441 ib_destroy_qp(queue->qp); in nvme_rdma_destroy_queue_ib()
444 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_destroy_queue_ib()
455 max_page_list_len = ibdev->attrs.max_pi_fast_reg_page_list_len; in nvme_rdma_get_max_fr_pages()
457 max_page_list_len = ibdev->attrs.max_fast_reg_page_list_len; in nvme_rdma_get_max_fr_pages()
459 return min_t(u32, NVME_RDMA_MAX_SEGMENTS, max_page_list_len - 1); in nvme_rdma_get_max_fr_pages()
471 comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors; in nvme_rdma_create_cq()
475 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq()
478 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq()
481 if (IS_ERR(queue->ib_cq)) { in nvme_rdma_create_cq()
482 ret = PTR_ERR(queue->ib_cq); in nvme_rdma_create_cq()
496 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
497 if (!queue->device) { in nvme_rdma_create_queue_ib()
498 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
500 return -ECONNREFUSED; in nvme_rdma_create_queue_ib()
502 ibdev = queue->device->dev; in nvme_rdma_create_queue_ib()
505 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
515 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, in nvme_rdma_create_queue_ib()
517 if (!queue->rsp_ring) { in nvme_rdma_create_queue_ib()
518 ret = -ENOMEM; in nvme_rdma_create_queue_ib()
527 pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev, queue->pi_support) + 1; in nvme_rdma_create_queue_ib()
528 ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs, in nvme_rdma_create_queue_ib()
529 queue->queue_size, in nvme_rdma_create_queue_ib()
533 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
535 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
539 if (queue->pi_support) { in nvme_rdma_create_queue_ib()
540 ret = ib_mr_pool_init(queue->qp, &queue->qp->sig_mrs, in nvme_rdma_create_queue_ib()
541 queue->queue_size, IB_MR_TYPE_INTEGRITY, in nvme_rdma_create_queue_ib()
544 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
546 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib()
551 set_bit(NVME_RDMA_Q_TR_READY, &queue->flags); in nvme_rdma_create_queue_ib()
556 ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_create_queue_ib()
558 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_create_queue_ib()
561 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
565 nvme_rdma_dev_put(queue->device); in nvme_rdma_create_queue_ib()
576 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
577 mutex_init(&queue->queue_lock); in nvme_rdma_alloc_queue()
578 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
579 if (idx && ctrl->ctrl.max_integrity_segments) in nvme_rdma_alloc_queue()
580 queue->pi_support = true; in nvme_rdma_alloc_queue()
582 queue->pi_support = false; in nvme_rdma_alloc_queue()
583 init_completion(&queue->cm_done); in nvme_rdma_alloc_queue()
586 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
588 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_rdma_alloc_queue()
590 queue->queue_size = queue_size; in nvme_rdma_alloc_queue()
592 queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, in nvme_rdma_alloc_queue()
594 if (IS_ERR(queue->cm_id)) { in nvme_rdma_alloc_queue()
595 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
596 "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); in nvme_rdma_alloc_queue()
597 ret = PTR_ERR(queue->cm_id); in nvme_rdma_alloc_queue()
601 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) in nvme_rdma_alloc_queue()
602 src_addr = (struct sockaddr *)&ctrl->src_addr; in nvme_rdma_alloc_queue()
604 queue->cm_error = -ETIMEDOUT; in nvme_rdma_alloc_queue()
605 ret = rdma_resolve_addr(queue->cm_id, src_addr, in nvme_rdma_alloc_queue()
606 (struct sockaddr *)&ctrl->addr, in nvme_rdma_alloc_queue()
609 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
616 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
621 set_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags); in nvme_rdma_alloc_queue()
626 rdma_destroy_id(queue->cm_id); in nvme_rdma_alloc_queue()
629 mutex_destroy(&queue->queue_lock); in nvme_rdma_alloc_queue()
635 rdma_disconnect(queue->cm_id); in __nvme_rdma_stop_queue()
636 ib_drain_qp(queue->qp); in __nvme_rdma_stop_queue()
641 if (!test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_stop_queue()
644 mutex_lock(&queue->queue_lock); in nvme_rdma_stop_queue()
645 if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) in nvme_rdma_stop_queue()
647 mutex_unlock(&queue->queue_lock); in nvme_rdma_stop_queue()
652 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_free_queue()
655 rdma_destroy_id(queue->cm_id); in nvme_rdma_free_queue()
657 mutex_destroy(&queue->queue_lock); in nvme_rdma_free_queue()
664 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_free_io_queues()
665 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_free_io_queues()
672 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_stop_io_queues()
673 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_stop_io_queues()
678 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue()
682 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx); in nvme_rdma_start_queue()
684 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_rdma_start_queue()
687 set_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_start_queue()
689 if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) in nvme_rdma_start_queue()
691 dev_info(ctrl->ctrl.device, in nvme_rdma_start_queue()
711 for (i--; i >= first; i--) in nvme_rdma_start_io_queues()
712 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_start_io_queues()
718 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_alloc_io_queues()
723 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_rdma_alloc_io_queues()
728 dev_err(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
730 return -ENOMEM; in nvme_rdma_alloc_io_queues()
733 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_rdma_alloc_io_queues()
734 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
737 nvmf_set_io_queues(opts, nr_io_queues, ctrl->io_queues); in nvme_rdma_alloc_io_queues()
738 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_alloc_io_queues()
740 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues()
748 for (i--; i >= 1; i--) in nvme_rdma_alloc_io_queues()
749 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_alloc_io_queues()
759 if (ctrl->max_integrity_segments) in nvme_rdma_alloc_tag_set()
763 return nvme_alloc_io_tag_set(ctrl, &to_rdma_ctrl(ctrl)->tag_set, in nvme_rdma_alloc_tag_set()
765 ctrl->opts->nr_poll_queues ? HCTX_MAX_TYPES : 2, in nvme_rdma_alloc_tag_set()
771 if (ctrl->async_event_sqe.data) { in nvme_rdma_destroy_admin_queue()
772 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_rdma_destroy_admin_queue()
773 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_destroy_admin_queue()
775 ctrl->async_event_sqe.data = NULL; in nvme_rdma_destroy_admin_queue()
777 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_destroy_admin_queue()
790 ctrl->device = ctrl->queues[0].device; in nvme_rdma_configure_admin_queue()
791 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); in nvme_rdma_configure_admin_queue()
793 /* T10-PI support */ in nvme_rdma_configure_admin_queue()
794 if (ctrl->device->dev->attrs.kernel_cap_flags & in nvme_rdma_configure_admin_queue()
798 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, in nvme_rdma_configure_admin_queue()
804 * error recovery and queue re-creation. in nvme_rdma_configure_admin_queue()
806 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
812 error = nvme_alloc_admin_tag_set(&ctrl->ctrl, in nvme_rdma_configure_admin_queue()
813 &ctrl->admin_tag_set, &nvme_rdma_admin_mq_ops, in nvme_rdma_configure_admin_queue()
825 error = nvme_enable_ctrl(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
829 ctrl->ctrl.max_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
830 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); in nvme_rdma_configure_admin_queue()
832 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
834 ctrl->ctrl.max_integrity_segments = 0; in nvme_rdma_configure_admin_queue()
836 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
838 error = nvme_init_ctrl_finish(&ctrl->ctrl, false); in nvme_rdma_configure_admin_queue()
845 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
846 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
848 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
849 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
852 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
854 if (ctrl->async_event_sqe.data) { in nvme_rdma_configure_admin_queue()
855 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
857 ctrl->async_event_sqe.data = NULL; in nvme_rdma_configure_admin_queue()
860 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
873 ret = nvme_rdma_alloc_tag_set(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
883 nr_queues = min(ctrl->tag_set.nr_hw_queues + 1, ctrl->ctrl.queue_count); in nvme_rdma_configure_io_queues()
889 nvme_start_freeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
890 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
891 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { in nvme_rdma_configure_io_queues()
897 ret = -ENODEV; in nvme_rdma_configure_io_queues()
898 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
901 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, in nvme_rdma_configure_io_queues()
902 ctrl->ctrl.queue_count - 1); in nvme_rdma_configure_io_queues()
903 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
911 ctrl->tag_set.nr_hw_queues + 1); in nvme_rdma_configure_io_queues()
918 nvme_quiesce_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
919 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
922 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
924 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
933 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
934 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
935 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_teardown_admin_queue()
936 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
938 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
939 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
947 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_teardown_io_queues()
948 nvme_quiesce_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
949 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
951 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
953 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
954 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
964 flush_work(&ctrl->err_work); in nvme_rdma_stop_ctrl()
965 cancel_delayed_work_sync(&ctrl->reconnect_work); in nvme_rdma_stop_ctrl()
972 if (list_empty(&ctrl->list)) in nvme_rdma_free_ctrl()
976 list_del(&ctrl->list); in nvme_rdma_free_ctrl()
979 nvmf_free_options(nctrl->opts); in nvme_rdma_free_ctrl()
981 kfree(ctrl->queues); in nvme_rdma_free_ctrl()
987 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); in nvme_rdma_reconnect_or_remove()
995 if (nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_rdma_reconnect_or_remove()
996 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", in nvme_rdma_reconnect_or_remove()
997 ctrl->ctrl.opts->reconnect_delay); in nvme_rdma_reconnect_or_remove()
998 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, in nvme_rdma_reconnect_or_remove()
999 ctrl->ctrl.opts->reconnect_delay * HZ); in nvme_rdma_reconnect_or_remove()
1001 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_reconnect_or_remove()
1014 if (ctrl->ctrl.icdoff) { in nvme_rdma_setup_ctrl()
1015 ret = -EOPNOTSUPP; in nvme_rdma_setup_ctrl()
1016 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); in nvme_rdma_setup_ctrl()
1020 if (!(ctrl->ctrl.sgls & (1 << 2))) { in nvme_rdma_setup_ctrl()
1021 ret = -EOPNOTSUPP; in nvme_rdma_setup_ctrl()
1022 dev_err(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1027 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl()
1028 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1030 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl()
1033 if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) { in nvme_rdma_setup_ctrl()
1034 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1036 ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE); in nvme_rdma_setup_ctrl()
1037 ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1; in nvme_rdma_setup_ctrl()
1040 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl()
1041 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1043 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl()
1044 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl()
1047 if (ctrl->ctrl.sgls & (1 << 20)) in nvme_rdma_setup_ctrl()
1048 ctrl->use_inline_data = true; in nvme_rdma_setup_ctrl()
1050 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1056 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_rdma_setup_ctrl()
1063 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1068 ret = -EINVAL; in nvme_rdma_setup_ctrl()
1072 nvme_start_ctrl(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1076 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1077 nvme_quiesce_io_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1078 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1080 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1082 nvme_remove_io_tag_set(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1086 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1087 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_setup_ctrl()
1088 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_setup_ctrl()
1089 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1091 nvme_remove_admin_tag_set(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1101 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reconnect_ctrl_work()
1106 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", in nvme_rdma_reconnect_ctrl_work()
1107 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1109 ctrl->ctrl.nr_reconnects = 0; in nvme_rdma_reconnect_ctrl_work()
1114 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", in nvme_rdma_reconnect_ctrl_work()
1115 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1124 nvme_stop_keep_alive(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1125 flush_work(&ctrl->ctrl.async_event_work); in nvme_rdma_error_recovery_work()
1127 nvme_unquiesce_io_queues(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1129 nvme_unquiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1130 nvme_auth_stop(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1132 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_error_recovery_work()
1134 enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1146 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) in nvme_rdma_error_recovery()
1149 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); in nvme_rdma_error_recovery()
1150 queue_work(nvme_reset_wq, &ctrl->err_work); in nvme_rdma_error_recovery()
1157 if (!refcount_dec_and_test(&req->ref)) in nvme_rdma_end_request()
1159 if (!nvme_try_complete_req(rq, req->status, req->result)) in nvme_rdma_end_request()
1166 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_wr_error()
1167 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error()
1169 if (nvme_ctrl_state(&ctrl->ctrl) == NVME_CTRL_LIVE) in nvme_rdma_wr_error()
1170 dev_info(ctrl->ctrl.device, in nvme_rdma_wr_error()
1172 op, wc->wr_cqe, in nvme_rdma_wr_error()
1173 ib_wc_status_msg(wc->status), wc->status); in nvme_rdma_wr_error()
1179 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_memreg_done()
1186 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done()
1188 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_inv_rkey_done()
1202 .ex.invalidate_rkey = req->mr->rkey, in nvme_rdma_inv_rkey()
1205 req->reg_cqe.done = nvme_rdma_inv_rkey_done; in nvme_rdma_inv_rkey()
1206 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey()
1208 return ib_post_send(queue->qp, &wr, NULL); in nvme_rdma_inv_rkey()
1216 ib_dma_unmap_sg(ibdev, req->metadata_sgl->sg_table.sgl, in nvme_rdma_dma_unmap_req()
1217 req->metadata_sgl->nents, rq_dma_dir(rq)); in nvme_rdma_dma_unmap_req()
1218 sg_free_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_dma_unmap_req()
1222 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, in nvme_rdma_dma_unmap_req()
1224 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); in nvme_rdma_dma_unmap_req()
1231 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_unmap_data()
1232 struct ib_device *ibdev = dev->dev; in nvme_rdma_unmap_data()
1233 struct list_head *pool = &queue->qp->rdma_mrs; in nvme_rdma_unmap_data()
1238 if (req->use_sig_mr) in nvme_rdma_unmap_data()
1239 pool = &queue->qp->sig_mrs; in nvme_rdma_unmap_data()
1241 if (req->mr) { in nvme_rdma_unmap_data()
1242 ib_mr_pool_put(queue->qp, pool, req->mr); in nvme_rdma_unmap_data()
1243 req->mr = NULL; in nvme_rdma_unmap_data()
1251 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_set_sg_null()
1253 sg->addr = 0; in nvme_rdma_set_sg_null()
1254 put_unaligned_le24(0, sg->length); in nvme_rdma_set_sg_null()
1255 put_unaligned_le32(0, sg->key); in nvme_rdma_set_sg_null()
1256 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_set_sg_null()
1264 struct nvme_sgl_desc *sg = &c->common.dptr.sgl; in nvme_rdma_map_sg_inline()
1265 struct ib_sge *sge = &req->sge[1]; in nvme_rdma_map_sg_inline()
1270 for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { in nvme_rdma_map_sg_inline()
1271 sge->addr = sg_dma_address(sgl); in nvme_rdma_map_sg_inline()
1272 sge->length = sg_dma_len(sgl); in nvme_rdma_map_sg_inline()
1273 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_map_sg_inline()
1274 len += sge->length; in nvme_rdma_map_sg_inline()
1278 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1279 sg->length = cpu_to_le32(len); in nvme_rdma_map_sg_inline()
1280 sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; in nvme_rdma_map_sg_inline()
1282 req->num_sge += count; in nvme_rdma_map_sg_inline()
1289 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_single()
1291 sg->addr = cpu_to_le64(sg_dma_address(req->data_sgl.sg_table.sgl)); in nvme_rdma_map_sg_single()
1292 put_unaligned_le24(sg_dma_len(req->data_sgl.sg_table.sgl), sg->length); in nvme_rdma_map_sg_single()
1293 put_unaligned_le32(queue->device->pd->unsafe_global_rkey, sg->key); in nvme_rdma_map_sg_single()
1294 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_map_sg_single()
1302 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_fr()
1305 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs); in nvme_rdma_map_sg_fr()
1306 if (WARN_ON_ONCE(!req->mr)) in nvme_rdma_map_sg_fr()
1307 return -EAGAIN; in nvme_rdma_map_sg_fr()
1313 nr = ib_map_mr_sg(req->mr, req->data_sgl.sg_table.sgl, count, NULL, in nvme_rdma_map_sg_fr()
1316 ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr); in nvme_rdma_map_sg_fr()
1317 req->mr = NULL; in nvme_rdma_map_sg_fr()
1320 return -EINVAL; in nvme_rdma_map_sg_fr()
1323 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); in nvme_rdma_map_sg_fr()
1325 req->reg_cqe.done = nvme_rdma_memreg_done; in nvme_rdma_map_sg_fr()
1326 memset(&req->reg_wr, 0, sizeof(req->reg_wr)); in nvme_rdma_map_sg_fr()
1327 req->reg_wr.wr.opcode = IB_WR_REG_MR; in nvme_rdma_map_sg_fr()
1328 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr()
1329 req->reg_wr.wr.num_sge = 0; in nvme_rdma_map_sg_fr()
1330 req->reg_wr.mr = req->mr; in nvme_rdma_map_sg_fr()
1331 req->reg_wr.key = req->mr->rkey; in nvme_rdma_map_sg_fr()
1332 req->reg_wr.access = IB_ACCESS_LOCAL_WRITE | in nvme_rdma_map_sg_fr()
1336 sg->addr = cpu_to_le64(req->mr->iova); in nvme_rdma_map_sg_fr()
1337 put_unaligned_le24(req->mr->length, sg->length); in nvme_rdma_map_sg_fr()
1338 put_unaligned_le32(req->mr->rkey, sg->key); in nvme_rdma_map_sg_fr()
1339 sg->type = (NVME_KEY_SGL_FMT_DATA_DESC << 4) | in nvme_rdma_map_sg_fr()
1346 struct nvme_command *cmd, struct ib_sig_domain *domain, in nvme_rdma_set_sig_domain() argument
1349 domain->sig_type = IB_SIG_TYPE_T10_DIF; in nvme_rdma_set_sig_domain()
1350 domain->sig.dif.bg_type = IB_T10DIF_CRC; in nvme_rdma_set_sig_domain()
1351 domain->sig.dif.pi_interval = 1 << bi->interval_exp; in nvme_rdma_set_sig_domain()
1352 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); in nvme_rdma_set_sig_domain()
1354 domain->sig.dif.ref_remap = true; in nvme_rdma_set_sig_domain()
1356 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); in nvme_rdma_set_sig_domain()
1357 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); in nvme_rdma_set_sig_domain()
1358 domain->sig.dif.app_escape = true; in nvme_rdma_set_sig_domain()
1360 domain->sig.dif.ref_escape = true; in nvme_rdma_set_sig_domain()
1364 struct nvme_command *cmd, struct ib_sig_attrs *sig_attrs, in nvme_rdma_set_sig_attrs() argument
1367 u16 control = le16_to_cpu(cmd->rw.control); in nvme_rdma_set_sig_attrs()
1372 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE; in nvme_rdma_set_sig_attrs()
1373 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, in nvme_rdma_set_sig_attrs()
1377 cmd->rw.control = cpu_to_le16(control); in nvme_rdma_set_sig_attrs()
1380 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, in nvme_rdma_set_sig_attrs()
1382 nvme_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, in nvme_rdma_set_sig_attrs()
1387 static void nvme_rdma_set_prot_checks(struct nvme_command *cmd, u8 *mask) in nvme_rdma_set_prot_checks() argument
1390 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_REF) in nvme_rdma_set_prot_checks()
1392 if (le16_to_cpu(cmd->rw.control) & NVME_RW_PRINFO_PRCHK_GUARD) in nvme_rdma_set_prot_checks()
1398 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_sig_done()
1399 nvme_rdma_wr_error(cq, wc, "SIG"); in nvme_rdma_sig_done()
1406 struct nvme_rdma_sgl *sgl = &req->data_sgl; in nvme_rdma_map_sg_pi()
1407 struct ib_reg_wr *wr = &req->reg_wr; in nvme_rdma_map_sg_pi()
1409 struct nvme_ns *ns = rq->q->queuedata; in nvme_rdma_map_sg_pi()
1410 struct bio *bio = rq->bio; in nvme_rdma_map_sg_pi()
1411 struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl; in nvme_rdma_map_sg_pi()
1414 req->mr = ib_mr_pool_get(queue->qp, &queue->qp->sig_mrs); in nvme_rdma_map_sg_pi()
1415 if (WARN_ON_ONCE(!req->mr)) in nvme_rdma_map_sg_pi()
1416 return -EAGAIN; in nvme_rdma_map_sg_pi()
1418 nr = ib_map_mr_sg_pi(req->mr, sgl->sg_table.sgl, count, NULL, in nvme_rdma_map_sg_pi()
1419 req->metadata_sgl->sg_table.sgl, pi_count, NULL, in nvme_rdma_map_sg_pi()
1424 nvme_rdma_set_sig_attrs(blk_get_integrity(bio->bi_bdev->bd_disk), c, in nvme_rdma_map_sg_pi()
1425 req->mr->sig_attrs, ns->pi_type); in nvme_rdma_map_sg_pi()
1426 nvme_rdma_set_prot_checks(c, &req->mr->sig_attrs->check_mask); in nvme_rdma_map_sg_pi()
1428 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); in nvme_rdma_map_sg_pi()
1430 req->reg_cqe.done = nvme_rdma_sig_done; in nvme_rdma_map_sg_pi()
1432 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY; in nvme_rdma_map_sg_pi()
1433 wr->wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_pi()
1434 wr->wr.num_sge = 0; in nvme_rdma_map_sg_pi()
1435 wr->wr.send_flags = 0; in nvme_rdma_map_sg_pi()
1436 wr->mr = req->mr; in nvme_rdma_map_sg_pi()
1437 wr->key = req->mr->rkey; in nvme_rdma_map_sg_pi()
1438 wr->access = IB_ACCESS_LOCAL_WRITE | in nvme_rdma_map_sg_pi()
1442 sg->addr = cpu_to_le64(req->mr->iova); in nvme_rdma_map_sg_pi()
1443 put_unaligned_le24(req->mr->length, sg->length); in nvme_rdma_map_sg_pi()
1444 put_unaligned_le32(req->mr->rkey, sg->key); in nvme_rdma_map_sg_pi()
1445 sg->type = NVME_KEY_SGL_FMT_DATA_DESC << 4; in nvme_rdma_map_sg_pi()
1450 ib_mr_pool_put(queue->qp, &queue->qp->sig_mrs, req->mr); in nvme_rdma_map_sg_pi()
1451 req->mr = NULL; in nvme_rdma_map_sg_pi()
1454 return -EINVAL; in nvme_rdma_map_sg_pi()
1463 req->data_sgl.sg_table.sgl = (struct scatterlist *)(req + 1); in nvme_rdma_dma_map_req()
1464 ret = sg_alloc_table_chained(&req->data_sgl.sg_table, in nvme_rdma_dma_map_req()
1465 blk_rq_nr_phys_segments(rq), req->data_sgl.sg_table.sgl, in nvme_rdma_dma_map_req()
1468 return -ENOMEM; in nvme_rdma_dma_map_req()
1470 req->data_sgl.nents = blk_rq_map_sg(rq->q, rq, in nvme_rdma_dma_map_req()
1471 req->data_sgl.sg_table.sgl); in nvme_rdma_dma_map_req()
1473 *count = ib_dma_map_sg(ibdev, req->data_sgl.sg_table.sgl, in nvme_rdma_dma_map_req()
1474 req->data_sgl.nents, rq_dma_dir(rq)); in nvme_rdma_dma_map_req()
1476 ret = -EIO; in nvme_rdma_dma_map_req()
1481 req->metadata_sgl->sg_table.sgl = in nvme_rdma_dma_map_req()
1482 (struct scatterlist *)(req->metadata_sgl + 1); in nvme_rdma_dma_map_req()
1483 ret = sg_alloc_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_dma_map_req()
1484 blk_rq_count_integrity_sg(rq->q, rq->bio), in nvme_rdma_dma_map_req()
1485 req->metadata_sgl->sg_table.sgl, in nvme_rdma_dma_map_req()
1488 ret = -ENOMEM; in nvme_rdma_dma_map_req()
1492 req->metadata_sgl->nents = blk_rq_map_integrity_sg(rq->q, in nvme_rdma_dma_map_req()
1493 rq->bio, req->metadata_sgl->sg_table.sgl); in nvme_rdma_dma_map_req()
1495 req->metadata_sgl->sg_table.sgl, in nvme_rdma_dma_map_req()
1496 req->metadata_sgl->nents, in nvme_rdma_dma_map_req()
1499 ret = -EIO; in nvme_rdma_dma_map_req()
1507 sg_free_table_chained(&req->metadata_sgl->sg_table, in nvme_rdma_dma_map_req()
1510 ib_dma_unmap_sg(ibdev, req->data_sgl.sg_table.sgl, req->data_sgl.nents, in nvme_rdma_dma_map_req()
1513 sg_free_table_chained(&req->data_sgl.sg_table, NVME_INLINE_SG_CNT); in nvme_rdma_dma_map_req()
1521 struct nvme_rdma_device *dev = queue->device; in nvme_rdma_map_data()
1522 struct ib_device *ibdev = dev->dev; in nvme_rdma_map_data()
1526 req->num_sge = 1; in nvme_rdma_map_data()
1527 refcount_set(&req->ref, 2); /* send and recv completions */ in nvme_rdma_map_data()
1529 c->common.flags |= NVME_CMD_SGL_METABUF; in nvme_rdma_map_data()
1538 if (req->use_sig_mr) { in nvme_rdma_map_data()
1543 if (count <= dev->num_inline_segments) { in nvme_rdma_map_data()
1545 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1552 if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { in nvme_rdma_map_data()
1573 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done()
1577 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_send_done()
1590 sge->addr = qe->dma; in nvme_rdma_post_send()
1591 sge->length = sizeof(struct nvme_command); in nvme_rdma_post_send()
1592 sge->lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_send()
1595 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send()
1602 first->next = &wr; in nvme_rdma_post_send()
1606 ret = ib_post_send(queue->qp, first, NULL); in nvme_rdma_post_send()
1608 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1621 list.addr = qe->dma; in nvme_rdma_post_recv()
1623 list.lkey = queue->device->pd->local_dma_lkey; in nvme_rdma_post_recv()
1625 qe->cqe.done = nvme_rdma_recv_done; in nvme_rdma_post_recv()
1628 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv()
1632 ret = ib_post_recv(queue->qp, &wr, NULL); in nvme_rdma_post_recv()
1634 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1645 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1646 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1651 if (unlikely(wc->status != IB_WC_SUCCESS)) in nvme_rdma_async_done()
1658 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event()
1659 struct ib_device *dev = queue->device->dev; in nvme_rdma_submit_async_event()
1660 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event()
1661 struct nvme_command *cmd = sqe->data; in nvme_rdma_submit_async_event() local
1665 ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); in nvme_rdma_submit_async_event()
1667 memset(cmd, 0, sizeof(*cmd)); in nvme_rdma_submit_async_event()
1668 cmd->common.opcode = nvme_admin_async_event; in nvme_rdma_submit_async_event()
1669 cmd->common.command_id = NVME_AQ_BLK_MQ_DEPTH; in nvme_rdma_submit_async_event()
1670 cmd->common.flags |= NVME_CMD_SGL_METABUF; in nvme_rdma_submit_async_event()
1671 nvme_rdma_set_sg_null(cmd); in nvme_rdma_submit_async_event()
1673 sqe->cqe.done = nvme_rdma_async_done; in nvme_rdma_submit_async_event()
1675 ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), in nvme_rdma_submit_async_event()
1688 rq = nvme_find_rq(nvme_rdma_tagset(queue), cqe->command_id); in nvme_rdma_process_nvme_rsp()
1690 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1692 cqe->command_id, queue->qp->qp_num); in nvme_rdma_process_nvme_rsp()
1693 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1698 req->status = cqe->status; in nvme_rdma_process_nvme_rsp()
1699 req->result = cqe->result; in nvme_rdma_process_nvme_rsp()
1701 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { in nvme_rdma_process_nvme_rsp()
1702 if (unlikely(!req->mr || in nvme_rdma_process_nvme_rsp()
1703 wc->ex.invalidate_rkey != req->mr->rkey)) { in nvme_rdma_process_nvme_rsp()
1704 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1706 req->mr ? req->mr->rkey : 0); in nvme_rdma_process_nvme_rsp()
1707 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1709 } else if (req->mr) { in nvme_rdma_process_nvme_rsp()
1714 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1716 req->mr->rkey, ret); in nvme_rdma_process_nvme_rsp()
1717 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1729 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_recv_done()
1730 struct nvme_rdma_queue *queue = wc->qp->qp_context; in nvme_rdma_recv_done()
1731 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_recv_done()
1732 struct nvme_completion *cqe = qe->data; in nvme_rdma_recv_done()
1735 if (unlikely(wc->status != IB_WC_SUCCESS)) { in nvme_rdma_recv_done()
1741 if (unlikely(wc->byte_len < len)) { in nvme_rdma_recv_done()
1742 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1743 "Unexpected nvme completion length(%d)\n", wc->byte_len); in nvme_rdma_recv_done()
1744 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1748 ib_dma_sync_single_for_cpu(ibdev, qe->dma, len, DMA_FROM_DEVICE); in nvme_rdma_recv_done()
1756 cqe->command_id))) in nvme_rdma_recv_done()
1757 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1758 &cqe->result); in nvme_rdma_recv_done()
1761 ib_dma_sync_single_for_device(ibdev, qe->dma, len, DMA_FROM_DEVICE); in nvme_rdma_recv_done()
1770 for (i = 0; i < queue->queue_size; i++) { in nvme_rdma_conn_established()
1771 ret = nvme_rdma_post_recv(queue, &queue->rsp_ring[i]); in nvme_rdma_conn_established()
1782 struct rdma_cm_id *cm_id = queue->cm_id; in nvme_rdma_conn_rejected()
1783 int status = ev->status; in nvme_rdma_conn_rejected()
1792 u16 sts = le16_to_cpu(rej_data->sts); in nvme_rdma_conn_rejected()
1794 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1798 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1802 return -ECONNRESET; in nvme_rdma_conn_rejected()
1807 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved()
1814 if (ctrl->opts->tos >= 0) in nvme_rdma_addr_resolved()
1815 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1816 ret = rdma_resolve_route(queue->cm_id, NVME_RDMA_CM_TIMEOUT_MS); in nvme_rdma_addr_resolved()
1818 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", in nvme_rdma_addr_resolved()
1819 queue->cm_error); in nvme_rdma_addr_resolved()
1832 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved()
1837 param.qp_num = queue->qp->qp_num; in nvme_rdma_route_resolved()
1840 param.responder_resources = queue->device->dev->attrs.max_qp_rd_atom; in nvme_rdma_route_resolved()
1855 priv.hsqsize = cpu_to_le16(NVME_AQ_DEPTH - 1); in nvme_rdma_route_resolved()
1862 priv.hrqsize = cpu_to_le16(queue->queue_size); in nvme_rdma_route_resolved()
1863 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1866 ret = rdma_connect_locked(queue->cm_id, &param); in nvme_rdma_route_resolved()
1868 dev_err(ctrl->ctrl.device, in nvme_rdma_route_resolved()
1879 struct nvme_rdma_queue *queue = cm_id->context; in nvme_rdma_cm_handler()
1882 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1883 rdma_event_msg(ev->event), ev->event, in nvme_rdma_cm_handler()
1884 ev->status, cm_id); in nvme_rdma_cm_handler()
1886 switch (ev->event) { in nvme_rdma_cm_handler()
1894 queue->cm_error = nvme_rdma_conn_established(queue); in nvme_rdma_cm_handler()
1896 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1905 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1906 "CM error event %d\n", ev->event); in nvme_rdma_cm_handler()
1907 cm_error = -ECONNRESET; in nvme_rdma_cm_handler()
1912 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1913 "disconnect received - connection closed\n"); in nvme_rdma_cm_handler()
1914 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1920 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1921 "Unexpected RDMA CM event (%d)\n", ev->event); in nvme_rdma_cm_handler()
1922 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1927 queue->cm_error = cm_error; in nvme_rdma_cm_handler()
1928 complete(&queue->cm_done); in nvme_rdma_cm_handler()
1937 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_timed_out()
1946 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_timeout()
1947 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout()
1949 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", in nvme_rdma_timeout()
1950 rq->tag, nvme_rdma_queue_idx(queue)); in nvme_rdma_timeout()
1952 if (nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_LIVE) { in nvme_rdma_timeout()
1957 * - ctrl disable/shutdown fabrics requests in nvme_rdma_timeout()
1958 * - connect requests in nvme_rdma_timeout()
1959 * - initialization admin requests in nvme_rdma_timeout()
1960 * - I/O requests that entered after unquiescing and in nvme_rdma_timeout()
1981 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_rdma_queue_rq()
1982 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_queue_rq()
1983 struct request *rq = bd->rq; in nvme_rdma_queue_rq()
1985 struct nvme_rdma_qe *sqe = &req->sqe; in nvme_rdma_queue_rq()
1986 struct nvme_command *c = nvme_req(rq)->cmd; in nvme_rdma_queue_rq()
1988 bool queue_ready = test_bit(NVME_RDMA_Q_LIVE, &queue->flags); in nvme_rdma_queue_rq()
1992 WARN_ON_ONCE(rq->tag < 0); in nvme_rdma_queue_rq()
1994 if (!nvme_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
1995 return nvme_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
1997 dev = queue->device->dev; in nvme_rdma_queue_rq()
1999 req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, in nvme_rdma_queue_rq()
2002 err = ib_dma_mapping_error(dev, req->sqe.dma); in nvme_rdma_queue_rq()
2006 ib_dma_sync_single_for_cpu(dev, sqe->dma, in nvme_rdma_queue_rq()
2016 queue->pi_support && in nvme_rdma_queue_rq()
2017 (c->common.opcode == nvme_cmd_write || in nvme_rdma_queue_rq()
2018 c->common.opcode == nvme_cmd_read) && in nvme_rdma_queue_rq()
2020 req->use_sig_mr = true; in nvme_rdma_queue_rq()
2022 req->use_sig_mr = false; in nvme_rdma_queue_rq()
2026 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2031 sqe->cqe.done = nvme_rdma_send_done; in nvme_rdma_queue_rq()
2033 ib_dma_sync_single_for_device(dev, sqe->dma, in nvme_rdma_queue_rq()
2036 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, in nvme_rdma_queue_rq()
2037 req->mr ? &req->reg_wr.wr : NULL); in nvme_rdma_queue_rq()
2046 if (err == -EIO) in nvme_rdma_queue_rq()
2048 else if (err == -ENOMEM || err == -EAGAIN) in nvme_rdma_queue_rq()
2054 ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_queue_rq()
2061 struct nvme_rdma_queue *queue = hctx->driver_data; in nvme_rdma_poll()
2063 return ib_process_cq_direct(queue->ib_cq, -1); in nvme_rdma_poll()
2072 ret = ib_check_mr_status(req->mr, IB_MR_CHECK_SIG_STATUS, &mr_status); in nvme_rdma_check_pi_status()
2075 nvme_req(rq)->status = NVME_SC_INVALID_PI; in nvme_rdma_check_pi_status()
2082 nvme_req(rq)->status = NVME_SC_GUARD_CHECK; in nvme_rdma_check_pi_status()
2085 nvme_req(rq)->status = NVME_SC_REFTAG_CHECK; in nvme_rdma_check_pi_status()
2088 nvme_req(rq)->status = NVME_SC_APPTAG_CHECK; in nvme_rdma_check_pi_status()
2100 struct nvme_rdma_queue *queue = req->queue; in nvme_rdma_complete_rq()
2101 struct ib_device *ibdev = queue->device->dev; in nvme_rdma_complete_rq()
2103 if (req->use_sig_mr) in nvme_rdma_complete_rq()
2107 ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), in nvme_rdma_complete_rq()
2114 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(set->driver_data); in nvme_rdma_map_queues()
2116 nvmf_map_queues(set, &ctrl->ctrl, ctrl->io_queues); in nvme_rdma_map_queues()
2142 nvme_quiesce_admin_queue(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2143 nvme_disable_ctrl(&ctrl->ctrl, shutdown); in nvme_rdma_shutdown_ctrl()
2157 nvme_stop_ctrl(&ctrl->ctrl); in nvme_rdma_reset_ctrl_work()
2160 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_reset_ctrl_work()
2172 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reset_ctrl_work()
2210 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_rdma_existing_controller()
2228 return ERR_PTR(-ENOMEM); in nvme_rdma_create_ctrl()
2229 ctrl->ctrl.opts = opts; in nvme_rdma_create_ctrl()
2230 INIT_LIST_HEAD(&ctrl->list); in nvme_rdma_create_ctrl()
2232 if (!(opts->mask & NVMF_OPT_TRSVCID)) { in nvme_rdma_create_ctrl()
2233 opts->trsvcid = in nvme_rdma_create_ctrl()
2235 if (!opts->trsvcid) { in nvme_rdma_create_ctrl()
2236 ret = -ENOMEM; in nvme_rdma_create_ctrl()
2239 opts->mask |= NVMF_OPT_TRSVCID; in nvme_rdma_create_ctrl()
2243 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_rdma_create_ctrl()
2246 opts->traddr, opts->trsvcid); in nvme_rdma_create_ctrl()
2250 if (opts->mask & NVMF_OPT_HOST_TRADDR) { in nvme_rdma_create_ctrl()
2252 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_rdma_create_ctrl()
2255 opts->host_traddr); in nvme_rdma_create_ctrl()
2260 if (!opts->duplicate_connect && nvme_rdma_existing_controller(opts)) { in nvme_rdma_create_ctrl()
2261 ret = -EALREADY; in nvme_rdma_create_ctrl()
2265 INIT_DELAYED_WORK(&ctrl->reconnect_work, in nvme_rdma_create_ctrl()
2267 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); in nvme_rdma_create_ctrl()
2268 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); in nvme_rdma_create_ctrl()
2270 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_rdma_create_ctrl()
2271 opts->nr_poll_queues + 1; in nvme_rdma_create_ctrl()
2272 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_rdma_create_ctrl()
2273 ctrl->ctrl.kato = opts->kato; in nvme_rdma_create_ctrl()
2275 ret = -ENOMEM; in nvme_rdma_create_ctrl()
2276 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_rdma_create_ctrl()
2278 if (!ctrl->queues) in nvme_rdma_create_ctrl()
2281 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, in nvme_rdma_create_ctrl()
2286 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); in nvme_rdma_create_ctrl()
2293 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", in nvme_rdma_create_ctrl()
2294 nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr); in nvme_rdma_create_ctrl()
2297 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); in nvme_rdma_create_ctrl()
2300 return &ctrl->ctrl; in nvme_rdma_create_ctrl()
2303 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2304 nvme_put_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2306 ret = -EIO; in nvme_rdma_create_ctrl()
2309 kfree(ctrl->queues); in nvme_rdma_create_ctrl()
2334 if (ndev->dev == ib_device) { in nvme_rdma_remove_one()
2347 if (ctrl->device->dev != ib_device) in nvme_rdma_remove_one()
2349 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_remove_one()
2389 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_cleanup_module()