1 /*
2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/mm.h>
36 #include <linux/highmem.h>
37 #include <linux/scatterlist.h>
38
39 #include "iscsi_iser.h"
40
iser_reg_comp(struct ib_cq * cq,struct ib_wc * wc)41 void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
42 {
43 iser_err_comp(wc, "memreg");
44 }
45
iser_reg_desc_get_fr(struct ib_conn * ib_conn)46 static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
47 {
48 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
49 struct iser_fr_desc *desc;
50 unsigned long flags;
51
52 spin_lock_irqsave(&fr_pool->lock, flags);
53 desc = list_first_entry(&fr_pool->list,
54 struct iser_fr_desc, list);
55 list_del(&desc->list);
56 spin_unlock_irqrestore(&fr_pool->lock, flags);
57
58 return desc;
59 }
60
iser_reg_desc_put_fr(struct ib_conn * ib_conn,struct iser_fr_desc * desc)61 static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
62 struct iser_fr_desc *desc)
63 {
64 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
65 unsigned long flags;
66
67 spin_lock_irqsave(&fr_pool->lock, flags);
68 list_add(&desc->list, &fr_pool->list);
69 spin_unlock_irqrestore(&fr_pool->lock, flags);
70 }
71
iser_dma_map_task_data(struct iscsi_iser_task * iser_task,enum iser_data_dir iser_dir,enum dma_data_direction dma_dir)72 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
73 enum iser_data_dir iser_dir,
74 enum dma_data_direction dma_dir)
75 {
76 struct iser_data_buf *data = &iser_task->data[iser_dir];
77 struct ib_device *dev;
78
79 iser_task->dir[iser_dir] = 1;
80 dev = iser_task->iser_conn->ib_conn.device->ib_device;
81
82 data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
83 if (unlikely(data->dma_nents == 0)) {
84 iser_err("dma_map_sg failed!!!\n");
85 return -EINVAL;
86 }
87
88 if (scsi_prot_sg_count(iser_task->sc)) {
89 struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
90
91 pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
92 if (unlikely(pdata->dma_nents == 0)) {
93 iser_err("protection dma_map_sg failed!!!\n");
94 goto out_unmap;
95 }
96 }
97
98 return 0;
99
100 out_unmap:
101 ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
102 return -EINVAL;
103 }
104
105
iser_dma_unmap_task_data(struct iscsi_iser_task * iser_task,enum iser_data_dir iser_dir,enum dma_data_direction dma_dir)106 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
107 enum iser_data_dir iser_dir,
108 enum dma_data_direction dma_dir)
109 {
110 struct iser_data_buf *data = &iser_task->data[iser_dir];
111 struct ib_device *dev;
112
113 dev = iser_task->iser_conn->ib_conn.device->ib_device;
114 ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
115
116 if (scsi_prot_sg_count(iser_task->sc)) {
117 struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
118
119 ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
120 }
121 }
122
iser_reg_dma(struct iser_device * device,struct iser_data_buf * mem,struct iser_mem_reg * reg)123 static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
124 struct iser_mem_reg *reg)
125 {
126 struct scatterlist *sg = mem->sg;
127
128 reg->sge.lkey = device->pd->local_dma_lkey;
129 /*
130 * FIXME: rework the registration code path to differentiate
131 * rkey/lkey use cases
132 */
133
134 if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
135 reg->rkey = device->pd->unsafe_global_rkey;
136 else
137 reg->rkey = 0;
138 reg->sge.addr = sg_dma_address(&sg[0]);
139 reg->sge.length = sg_dma_len(&sg[0]);
140
141 iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
142 " length=0x%x\n", reg->sge.lkey, reg->rkey,
143 reg->sge.addr, reg->sge.length);
144
145 return 0;
146 }
147
iser_unreg_mem_fastreg(struct iscsi_iser_task * iser_task,enum iser_data_dir cmd_dir)148 void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
149 enum iser_data_dir cmd_dir)
150 {
151 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
152 struct iser_fr_desc *desc;
153 struct ib_mr_status mr_status;
154
155 desc = reg->desc;
156 if (!desc)
157 return;
158
159 /*
160 * The signature MR cannot be invalidated and reused without checking.
161 * libiscsi calls the check_protection transport handler only if
162 * SCSI-Response is received. And the signature MR is not checked if
163 * the task is completed for some other reason like a timeout or error
164 * handling. That's why we must check the signature MR here before
165 * putting it to the free pool.
166 */
167 if (unlikely(desc->sig_protected)) {
168 desc->sig_protected = false;
169 ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
170 &mr_status);
171 }
172 iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
173 reg->desc = NULL;
174 }
175
iser_set_dif_domain(struct scsi_cmnd * sc,struct ib_sig_domain * domain)176 static void iser_set_dif_domain(struct scsi_cmnd *sc,
177 struct ib_sig_domain *domain)
178 {
179 domain->sig_type = IB_SIG_TYPE_T10_DIF;
180 domain->sig.dif.pi_interval = scsi_prot_interval(sc);
181 domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
182 /*
183 * At the moment we hard code those, but in the future
184 * we will take them from sc.
185 */
186 domain->sig.dif.apptag_check_mask = 0xffff;
187 domain->sig.dif.app_escape = true;
188 domain->sig.dif.ref_escape = true;
189 if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
190 domain->sig.dif.ref_remap = true;
191 }
192
iser_set_sig_attrs(struct scsi_cmnd * sc,struct ib_sig_attrs * sig_attrs)193 static int iser_set_sig_attrs(struct scsi_cmnd *sc,
194 struct ib_sig_attrs *sig_attrs)
195 {
196 switch (scsi_get_prot_op(sc)) {
197 case SCSI_PROT_WRITE_INSERT:
198 case SCSI_PROT_READ_STRIP:
199 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
200 iser_set_dif_domain(sc, &sig_attrs->wire);
201 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
202 break;
203 case SCSI_PROT_READ_INSERT:
204 case SCSI_PROT_WRITE_STRIP:
205 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
206 iser_set_dif_domain(sc, &sig_attrs->mem);
207 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
208 IB_T10DIF_CSUM : IB_T10DIF_CRC;
209 break;
210 case SCSI_PROT_READ_PASS:
211 case SCSI_PROT_WRITE_PASS:
212 iser_set_dif_domain(sc, &sig_attrs->wire);
213 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
214 iser_set_dif_domain(sc, &sig_attrs->mem);
215 sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
216 IB_T10DIF_CSUM : IB_T10DIF_CRC;
217 break;
218 default:
219 iser_err("Unsupported PI operation %d\n",
220 scsi_get_prot_op(sc));
221 return -EINVAL;
222 }
223
224 return 0;
225 }
226
iser_set_prot_checks(struct scsi_cmnd * sc,u8 * mask)227 static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
228 {
229 *mask = 0;
230 if (sc->prot_flags & SCSI_PROT_REF_CHECK)
231 *mask |= IB_SIG_CHECK_REFTAG;
232 if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
233 *mask |= IB_SIG_CHECK_GUARD;
234 }
235
iser_inv_rkey(struct ib_send_wr * inv_wr,struct ib_mr * mr,struct ib_cqe * cqe,struct ib_send_wr * next_wr)236 static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
237 struct ib_cqe *cqe, struct ib_send_wr *next_wr)
238 {
239 inv_wr->opcode = IB_WR_LOCAL_INV;
240 inv_wr->wr_cqe = cqe;
241 inv_wr->ex.invalidate_rkey = mr->rkey;
242 inv_wr->send_flags = 0;
243 inv_wr->num_sge = 0;
244 inv_wr->next = next_wr;
245 }
246
iser_reg_sig_mr(struct iscsi_iser_task * iser_task,struct iser_data_buf * mem,struct iser_data_buf * sig_mem,struct iser_reg_resources * rsc,struct iser_mem_reg * sig_reg)247 static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
248 struct iser_data_buf *mem,
249 struct iser_data_buf *sig_mem,
250 struct iser_reg_resources *rsc,
251 struct iser_mem_reg *sig_reg)
252 {
253 struct iser_tx_desc *tx_desc = &iser_task->desc;
254 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
255 struct ib_mr *mr = rsc->sig_mr;
256 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
257 struct ib_reg_wr *wr = &tx_desc->reg_wr;
258 int ret;
259
260 memset(sig_attrs, 0, sizeof(*sig_attrs));
261 ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
262 if (ret)
263 goto err;
264
265 iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
266
267 if (rsc->sig_mr->need_inval)
268 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
269
270 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
271
272 ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
273 sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
274 if (unlikely(ret)) {
275 iser_err("failed to map PI sg (%d)\n",
276 mem->dma_nents + sig_mem->dma_nents);
277 goto err;
278 }
279
280 memset(wr, 0, sizeof(*wr));
281 wr->wr.next = &tx_desc->send_wr;
282 wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
283 wr->wr.wr_cqe = cqe;
284 wr->wr.num_sge = 0;
285 wr->wr.send_flags = 0;
286 wr->mr = mr;
287 wr->key = mr->rkey;
288 wr->access = IB_ACCESS_LOCAL_WRITE |
289 IB_ACCESS_REMOTE_READ |
290 IB_ACCESS_REMOTE_WRITE;
291 rsc->sig_mr->need_inval = true;
292
293 sig_reg->sge.lkey = mr->lkey;
294 sig_reg->rkey = mr->rkey;
295 sig_reg->sge.addr = mr->iova;
296 sig_reg->sge.length = mr->length;
297
298 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
299 sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
300 sig_reg->sge.length);
301 err:
302 return ret;
303 }
304
iser_fast_reg_mr(struct iscsi_iser_task * iser_task,struct iser_data_buf * mem,struct iser_reg_resources * rsc,struct iser_mem_reg * reg)305 static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
306 struct iser_data_buf *mem,
307 struct iser_reg_resources *rsc,
308 struct iser_mem_reg *reg)
309 {
310 struct iser_tx_desc *tx_desc = &iser_task->desc;
311 struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
312 struct ib_mr *mr = rsc->mr;
313 struct ib_reg_wr *wr = &tx_desc->reg_wr;
314 int n;
315
316 if (rsc->mr->need_inval)
317 iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
318
319 ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
320
321 n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
322 if (unlikely(n != mem->dma_nents)) {
323 iser_err("failed to map sg (%d/%d)\n",
324 n, mem->dma_nents);
325 return n < 0 ? n : -EINVAL;
326 }
327
328 wr->wr.next = &tx_desc->send_wr;
329 wr->wr.opcode = IB_WR_REG_MR;
330 wr->wr.wr_cqe = cqe;
331 wr->wr.send_flags = 0;
332 wr->wr.num_sge = 0;
333 wr->mr = mr;
334 wr->key = mr->rkey;
335 wr->access = IB_ACCESS_LOCAL_WRITE |
336 IB_ACCESS_REMOTE_WRITE |
337 IB_ACCESS_REMOTE_READ;
338
339 rsc->mr->need_inval = true;
340
341 reg->sge.lkey = mr->lkey;
342 reg->rkey = mr->rkey;
343 reg->sge.addr = mr->iova;
344 reg->sge.length = mr->length;
345
346 iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
347 reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
348
349 return 0;
350 }
351
iser_reg_mem_fastreg(struct iscsi_iser_task * task,enum iser_data_dir dir,bool all_imm)352 int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
353 enum iser_data_dir dir,
354 bool all_imm)
355 {
356 struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
357 struct iser_device *device = ib_conn->device;
358 struct iser_data_buf *mem = &task->data[dir];
359 struct iser_mem_reg *reg = &task->rdma_reg[dir];
360 struct iser_fr_desc *desc;
361 bool use_dma_key;
362 int err;
363
364 use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
365 scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
366 if (use_dma_key)
367 return iser_reg_dma(device, mem, reg);
368
369 desc = iser_reg_desc_get_fr(ib_conn);
370 if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
371 err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
372 if (unlikely(err))
373 goto err_reg;
374 } else {
375 err = iser_reg_sig_mr(task, mem, &task->prot[dir],
376 &desc->rsc, reg);
377 if (unlikely(err))
378 goto err_reg;
379
380 desc->sig_protected = true;
381 }
382
383 reg->desc = desc;
384
385 return 0;
386
387 err_reg:
388 iser_reg_desc_put_fr(ib_conn, desc);
389
390 return err;
391 }
392