rdma.c (3eb66e91a25497065c5322b1268cbc3953642227) rdma.c (5cbab6303b4791a3e6713dfe2c5fda6a867f9adc)
1/*
2 * NVMe over Fabrics RDMA target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 125 unchanged lines hidden (view full) ---

134static DEFINE_MUTEX(device_list_mutex);
135
136static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
137static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
138static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
1/*
2 * NVMe over Fabrics RDMA target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *

--- 125 unchanged lines hidden (view full) ---

134static DEFINE_MUTEX(device_list_mutex);
135
136static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
137static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
138static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
139static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
140static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
141static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
142static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
143 struct nvmet_rdma_rsp *r);
144static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
145 struct nvmet_rdma_rsp *r);
142
143static const struct nvmet_fabrics_ops nvmet_rdma_ops;
144
145static int num_pages(int len)
146{
147 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
148}
149

--- 27 unchanged lines hidden (view full) ---

177 spin_lock_irqsave(&queue->rsps_lock, flags);
178 rsp = list_first_entry_or_null(&queue->free_rsps,
179 struct nvmet_rdma_rsp, free_list);
180 if (likely(rsp))
181 list_del(&rsp->free_list);
182 spin_unlock_irqrestore(&queue->rsps_lock, flags);
183
184 if (unlikely(!rsp)) {
146
147static const struct nvmet_fabrics_ops nvmet_rdma_ops;
148
149static int num_pages(int len)
150{
151 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT);
152}
153

--- 27 unchanged lines hidden (view full) ---

181 spin_lock_irqsave(&queue->rsps_lock, flags);
182 rsp = list_first_entry_or_null(&queue->free_rsps,
183 struct nvmet_rdma_rsp, free_list);
184 if (likely(rsp))
185 list_del(&rsp->free_list);
186 spin_unlock_irqrestore(&queue->rsps_lock, flags);
187
188 if (unlikely(!rsp)) {
185 rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
189 int ret;
190
191 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
186 if (unlikely(!rsp))
187 return NULL;
192 if (unlikely(!rsp))
193 return NULL;
194 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
195 if (unlikely(ret)) {
196 kfree(rsp);
197 return NULL;
198 }
199
188 rsp->allocated = true;
189 }
190
191 return rsp;
192}
193
194static inline void
195nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
196{
197 unsigned long flags;
198
200 rsp->allocated = true;
201 }
202
203 return rsp;
204}
205
206static inline void
207nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
208{
209 unsigned long flags;
210
199 if (rsp->allocated) {
211 if (unlikely(rsp->allocated)) {
212 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
200 kfree(rsp);
201 return;
202 }
203
204 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
205 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
206 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
207}

--- 417 unchanged lines hidden (view full) ---

625}
626
627static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
628{
629 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
630 u64 off = le64_to_cpu(sgl->addr);
631 u32 len = le32_to_cpu(sgl->length);
632
213 kfree(rsp);
214 return;
215 }
216
217 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
218 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
219 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
220}

--- 417 unchanged lines hidden (view full) ---

638}
639
640static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
641{
642 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
643 u64 off = le64_to_cpu(sgl->addr);
644 u32 len = le32_to_cpu(sgl->length);
645
633 if (!nvme_is_write(rsp->req.cmd))
646 if (!nvme_is_write(rsp->req.cmd)) {
647 rsp->req.error_loc =
648 offsetof(struct nvme_common_command, opcode);
634 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
649 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
650 }
635
636 if (off + len > rsp->queue->dev->inline_data_size) {
637 pr_err("invalid inline data offset!\n");
638 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
639 }
640
641 /* no data command? */
642 if (!len)

--- 48 unchanged lines hidden (view full) ---

691
692 switch (sgl->type >> 4) {
693 case NVME_SGL_FMT_DATA_DESC:
694 switch (sgl->type & 0xf) {
695 case NVME_SGL_FMT_OFFSET:
696 return nvmet_rdma_map_sgl_inline(rsp);
697 default:
698 pr_err("invalid SGL subtype: %#x\n", sgl->type);
651
652 if (off + len > rsp->queue->dev->inline_data_size) {
653 pr_err("invalid inline data offset!\n");
654 return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR;
655 }
656
657 /* no data command? */
658 if (!len)

--- 48 unchanged lines hidden (view full) ---

707
708 switch (sgl->type >> 4) {
709 case NVME_SGL_FMT_DATA_DESC:
710 switch (sgl->type & 0xf) {
711 case NVME_SGL_FMT_OFFSET:
712 return nvmet_rdma_map_sgl_inline(rsp);
713 default:
714 pr_err("invalid SGL subtype: %#x\n", sgl->type);
715 rsp->req.error_loc =
716 offsetof(struct nvme_common_command, dptr);
699 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
700 }
701 case NVME_KEY_SGL_FMT_DATA_DESC:
702 switch (sgl->type & 0xf) {
703 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
704 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
705 case NVME_SGL_FMT_ADDRESS:
706 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
707 default:
708 pr_err("invalid SGL subtype: %#x\n", sgl->type);
717 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
718 }
719 case NVME_KEY_SGL_FMT_DATA_DESC:
720 switch (sgl->type & 0xf) {
721 case NVME_SGL_FMT_ADDRESS | NVME_SGL_FMT_INVALIDATE:
722 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
723 case NVME_SGL_FMT_ADDRESS:
724 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
725 default:
726 pr_err("invalid SGL subtype: %#x\n", sgl->type);
727 rsp->req.error_loc =
728 offsetof(struct nvme_common_command, dptr);
709 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
710 }
711 default:
712 pr_err("invalid SGL type: %#x\n", sgl->type);
729 return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
730 }
731 default:
732 pr_err("invalid SGL type: %#x\n", sgl->type);
733 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
713 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
714 }
715}
716
717static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
718{
719 struct nvmet_rdma_queue *queue = rsp->queue;
720

--- 958 unchanged lines hidden ---
734 return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
735 }
736}
737
738static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
739{
740 struct nvmet_rdma_queue *queue = rsp->queue;
741

--- 958 unchanged lines hidden ---