1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved. 4 * 5 * This software is available to you under a choice of one of two 6 * licenses. You may choose to be licensed under the terms of the GNU 7 * General Public License (GPL) Version 2, available from the file 8 * COPYING in the main directory of this source tree, or the 9 * OpenIB.org BSD license below: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * - Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * - Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 #include <linux/module.h> 34 #include <linux/kernel.h> 35 #include <linux/slab.h> 36 #include <linux/mm.h> 37 #include <linux/highmem.h> 38 #include <linux/scatterlist.h> 39 40 #include "iscsi_iser.h" 41 42 #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 43 44 /** 45 * iser_start_rdma_unaligned_sg 46 */ 47 static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 48 struct iser_data_buf *data, 49 struct iser_data_buf *data_copy, 50 enum iser_data_dir cmd_dir) 51 { 52 struct ib_device *dev = iser_task->ib_conn->device->ib_device; 53 struct scatterlist *sgl = (struct scatterlist *)data->buf; 54 struct scatterlist *sg; 55 char *mem = NULL; 56 unsigned long cmd_data_len = 0; 57 int dma_nents, i; 58 59 for_each_sg(sgl, sg, data->size, i) 60 cmd_data_len += ib_sg_dma_len(dev, sg); 61 62 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 63 mem = (void *)__get_free_pages(GFP_ATOMIC, 64 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 65 else 66 mem = kmalloc(cmd_data_len, GFP_ATOMIC); 67 68 if (mem == NULL) { 69 iser_err("Failed to allocate mem size %d %d for copying sglist\n", 70 data->size, (int)cmd_data_len); 71 return -ENOMEM; 72 } 73 74 if (cmd_dir == ISER_DIR_OUT) { 75 /* copy the unaligned sg the buffer which is used for RDMA */ 76 int i; 77 char *p, *from; 78 79 sgl = (struct scatterlist *)data->buf; 80 p = mem; 81 for_each_sg(sgl, sg, data->size, i) { 82 from = kmap_atomic(sg_page(sg)); 83 memcpy(p, 84 from + sg->offset, 85 sg->length); 86 kunmap_atomic(from); 87 p += sg->length; 88 } 89 } 90 91 sg_init_one(&data_copy->sg_single, mem, cmd_data_len); 92 data_copy->buf = &data_copy->sg_single; 93 data_copy->size = 1; 94 data_copy->copy_buf = mem; 95 96 dma_nents = ib_dma_map_sg(dev, &data_copy->sg_single, 1, 97 (cmd_dir == ISER_DIR_OUT) ? 98 DMA_TO_DEVICE : DMA_FROM_DEVICE); 99 BUG_ON(dma_nents == 0); 100 101 data_copy->dma_nents = dma_nents; 102 data_copy->data_len = cmd_data_len; 103 104 return 0; 105 } 106 107 /** 108 * iser_finalize_rdma_unaligned_sg 109 */ 110 111 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, 112 struct iser_data_buf *data, 113 struct iser_data_buf *data_copy, 114 enum iser_data_dir cmd_dir) 115 { 116 struct ib_device *dev; 117 unsigned long cmd_data_len; 118 119 dev = iser_task->ib_conn->device->ib_device; 120 121 ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, 122 (cmd_dir == ISER_DIR_OUT) ? 123 DMA_TO_DEVICE : DMA_FROM_DEVICE); 124 125 if (cmd_dir == ISER_DIR_IN) { 126 char *mem; 127 struct scatterlist *sgl, *sg; 128 unsigned char *p, *to; 129 unsigned int sg_size; 130 int i; 131 132 /* copy back read RDMA to unaligned sg */ 133 mem = data_copy->copy_buf; 134 135 sgl = (struct scatterlist *)data->buf; 136 sg_size = data->size; 137 138 p = mem; 139 for_each_sg(sgl, sg, sg_size, i) { 140 to = kmap_atomic(sg_page(sg)); 141 memcpy(to + sg->offset, 142 p, 143 sg->length); 144 kunmap_atomic(to); 145 p += sg->length; 146 } 147 } 148 149 cmd_data_len = data->data_len; 150 151 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 152 free_pages((unsigned long)data_copy->copy_buf, 153 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 154 else 155 kfree(data_copy->copy_buf); 156 157 data_copy->copy_buf = NULL; 158 } 159 160 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) 161 162 /** 163 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses 164 * and returns the length of resulting physical address array (may be less than 165 * the original due to possible compaction). 166 * 167 * we build a "page vec" under the assumption that the SG meets the RDMA 168 * alignment requirements. Other then the first and last SG elements, all 169 * the "internal" elements can be compacted into a list whose elements are 170 * dma addresses of physical pages. The code supports also the weird case 171 * where --few fragments of the same page-- are present in the SG as 172 * consecutive elements. Also, it handles one entry SG. 173 */ 174 175 static int iser_sg_to_page_vec(struct iser_data_buf *data, 176 struct ib_device *ibdev, u64 *pages, 177 int *offset, int *data_size) 178 { 179 struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf; 180 u64 start_addr, end_addr, page, chunk_start = 0; 181 unsigned long total_sz = 0; 182 unsigned int dma_len; 183 int i, new_chunk, cur_page, last_ent = data->dma_nents - 1; 184 185 /* compute the offset of first element */ 186 *offset = (u64) sgl[0].offset & ~MASK_4K; 187 188 new_chunk = 1; 189 cur_page = 0; 190 for_each_sg(sgl, sg, data->dma_nents, i) { 191 start_addr = ib_sg_dma_address(ibdev, sg); 192 if (new_chunk) 193 chunk_start = start_addr; 194 dma_len = ib_sg_dma_len(ibdev, sg); 195 end_addr = start_addr + dma_len; 196 total_sz += dma_len; 197 198 /* collect page fragments until aligned or end of SG list */ 199 if (!IS_4K_ALIGNED(end_addr) && i < last_ent) { 200 new_chunk = 0; 201 continue; 202 } 203 new_chunk = 1; 204 205 /* address of the first page in the contiguous chunk; 206 masking relevant for the very first SG entry, 207 which might be unaligned */ 208 page = chunk_start & MASK_4K; 209 do { 210 pages[cur_page++] = page; 211 page += SIZE_4K; 212 } while (page < end_addr); 213 } 214 215 *data_size = total_sz; 216 iser_dbg("page_vec->data_size:%d cur_page %d\n", 217 *data_size, cur_page); 218 return cur_page; 219 } 220 221 222 /** 223 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 224 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns 225 * the number of entries which are aligned correctly. Supports the case where 226 * consecutive SG elements are actually fragments of the same physcial page. 227 */ 228 static int iser_data_buf_aligned_len(struct iser_data_buf *data, 229 struct ib_device *ibdev) 230 { 231 struct scatterlist *sgl, *sg, *next_sg = NULL; 232 u64 start_addr, end_addr; 233 int i, ret_len, start_check = 0; 234 235 if (data->dma_nents == 1) 236 return 1; 237 238 sgl = (struct scatterlist *)data->buf; 239 start_addr = ib_sg_dma_address(ibdev, sgl); 240 241 for_each_sg(sgl, sg, data->dma_nents, i) { 242 if (start_check && !IS_4K_ALIGNED(start_addr)) 243 break; 244 245 next_sg = sg_next(sg); 246 if (!next_sg) 247 break; 248 249 end_addr = start_addr + ib_sg_dma_len(ibdev, sg); 250 start_addr = ib_sg_dma_address(ibdev, next_sg); 251 252 if (end_addr == start_addr) { 253 start_check = 0; 254 continue; 255 } else 256 start_check = 1; 257 258 if (!IS_4K_ALIGNED(end_addr)) 259 break; 260 } 261 ret_len = (next_sg) ? i : i+1; 262 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", 263 ret_len, data->dma_nents, data); 264 return ret_len; 265 } 266 267 static void iser_data_buf_dump(struct iser_data_buf *data, 268 struct ib_device *ibdev) 269 { 270 struct scatterlist *sgl = (struct scatterlist *)data->buf; 271 struct scatterlist *sg; 272 int i; 273 274 for_each_sg(sgl, sg, data->dma_nents, i) 275 iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p " 276 "off:0x%x sz:0x%x dma_len:0x%x\n", 277 i, (unsigned long)ib_sg_dma_address(ibdev, sg), 278 sg_page(sg), sg->offset, 279 sg->length, ib_sg_dma_len(ibdev, sg)); 280 } 281 282 static void iser_dump_page_vec(struct iser_page_vec *page_vec) 283 { 284 int i; 285 286 iser_err("page vec length %d data size %d\n", 287 page_vec->length, page_vec->data_size); 288 for (i = 0; i < page_vec->length; i++) 289 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); 290 } 291 292 static void iser_page_vec_build(struct iser_data_buf *data, 293 struct iser_page_vec *page_vec, 294 struct ib_device *ibdev) 295 { 296 int page_vec_len = 0; 297 298 page_vec->length = 0; 299 page_vec->offset = 0; 300 301 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 302 page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages, 303 &page_vec->offset, 304 &page_vec->data_size); 305 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len); 306 307 page_vec->length = page_vec_len; 308 309 if (page_vec_len * SIZE_4K < page_vec->data_size) { 310 iser_err("page_vec too short to hold this SG\n"); 311 iser_data_buf_dump(data, ibdev); 312 iser_dump_page_vec(page_vec); 313 BUG(); 314 } 315 } 316 317 int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, 318 struct iser_data_buf *data, 319 enum iser_data_dir iser_dir, 320 enum dma_data_direction dma_dir) 321 { 322 struct ib_device *dev; 323 324 iser_task->dir[iser_dir] = 1; 325 dev = iser_task->ib_conn->device->ib_device; 326 327 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 328 if (data->dma_nents == 0) { 329 iser_err("dma_map_sg failed!!!\n"); 330 return -EINVAL; 331 } 332 return 0; 333 } 334 335 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, 336 struct iser_data_buf *data) 337 { 338 struct ib_device *dev; 339 340 dev = iser_task->ib_conn->device->ib_device; 341 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 342 } 343 344 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, 345 struct ib_device *ibdev, 346 struct iser_data_buf *mem, 347 struct iser_data_buf *mem_copy, 348 enum iser_data_dir cmd_dir, 349 int aligned_len) 350 { 351 struct iscsi_conn *iscsi_conn = iser_task->ib_conn->iscsi_conn; 352 353 iscsi_conn->fmr_unalign_cnt++; 354 iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n", 355 aligned_len, mem->size); 356 357 if (iser_debug_level > 0) 358 iser_data_buf_dump(mem, ibdev); 359 360 /* unmap the command data before accessing it */ 361 iser_dma_unmap_task_data(iser_task, mem); 362 363 /* allocate copy buf, if we are writing, copy the */ 364 /* unaligned scatterlist, dma map the copy */ 365 if (iser_start_rdma_unaligned_sg(iser_task, mem, mem_copy, cmd_dir) != 0) 366 return -ENOMEM; 367 368 return 0; 369 } 370 371 /** 372 * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA, 373 * using FMR (if possible) obtaining rkey and va 374 * 375 * returns 0 on success, errno code on failure 376 */ 377 int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, 378 enum iser_data_dir cmd_dir) 379 { 380 struct iser_conn *ib_conn = iser_task->ib_conn; 381 struct iser_device *device = ib_conn->device; 382 struct ib_device *ibdev = device->ib_device; 383 struct iser_data_buf *mem = &iser_task->data[cmd_dir]; 384 struct iser_regd_buf *regd_buf; 385 int aligned_len; 386 int err; 387 int i; 388 struct scatterlist *sg; 389 390 regd_buf = &iser_task->rdma_regd[cmd_dir]; 391 392 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 393 if (aligned_len != mem->dma_nents) { 394 err = fall_to_bounce_buf(iser_task, ibdev, mem, 395 &iser_task->data_copy[cmd_dir], 396 cmd_dir, aligned_len); 397 if (err) { 398 iser_err("failed to allocate bounce buffer\n"); 399 return err; 400 } 401 mem = &iser_task->data_copy[cmd_dir]; 402 } 403 404 /* if there a single dma entry, FMR is not needed */ 405 if (mem->dma_nents == 1) { 406 sg = (struct scatterlist *)mem->buf; 407 408 regd_buf->reg.lkey = device->mr->lkey; 409 regd_buf->reg.rkey = device->mr->rkey; 410 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); 411 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); 412 regd_buf->reg.is_mr = 0; 413 414 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " 415 "va: 0x%08lX sz: %ld]\n", 416 (unsigned int)regd_buf->reg.lkey, 417 (unsigned int)regd_buf->reg.rkey, 418 (unsigned long)regd_buf->reg.va, 419 (unsigned long)regd_buf->reg.len); 420 } else { /* use FMR for multiple dma entries */ 421 iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev); 422 err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec, 423 ®d_buf->reg); 424 if (err && err != -EAGAIN) { 425 iser_data_buf_dump(mem, ibdev); 426 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", 427 mem->dma_nents, 428 ntoh24(iser_task->desc.iscsi_header.dlength)); 429 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 430 ib_conn->fmr.page_vec->data_size, 431 ib_conn->fmr.page_vec->length, 432 ib_conn->fmr.page_vec->offset); 433 for (i = 0; i < ib_conn->fmr.page_vec->length; i++) 434 iser_err("page_vec[%d] = 0x%llx\n", i, 435 (unsigned long long) ib_conn->fmr.page_vec->pages[i]); 436 } 437 if (err) 438 return err; 439 } 440 return 0; 441 } 442 443 static inline enum ib_t10_dif_type 444 scsi2ib_prot_type(unsigned char prot_type) 445 { 446 switch (prot_type) { 447 case SCSI_PROT_DIF_TYPE0: 448 return IB_T10DIF_NONE; 449 case SCSI_PROT_DIF_TYPE1: 450 return IB_T10DIF_TYPE1; 451 case SCSI_PROT_DIF_TYPE2: 452 return IB_T10DIF_TYPE2; 453 case SCSI_PROT_DIF_TYPE3: 454 return IB_T10DIF_TYPE3; 455 default: 456 return IB_T10DIF_NONE; 457 } 458 } 459 460 461 static int 462 iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) 463 { 464 unsigned char scsi_ptype = scsi_get_prot_type(sc); 465 466 sig_attrs->mem.sig_type = IB_SIG_TYPE_T10_DIF; 467 sig_attrs->wire.sig_type = IB_SIG_TYPE_T10_DIF; 468 sig_attrs->mem.sig.dif.pi_interval = sc->device->sector_size; 469 sig_attrs->wire.sig.dif.pi_interval = sc->device->sector_size; 470 471 switch (scsi_get_prot_op(sc)) { 472 case SCSI_PROT_WRITE_INSERT: 473 case SCSI_PROT_READ_STRIP: 474 sig_attrs->mem.sig.dif.type = IB_T10DIF_NONE; 475 sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 476 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 477 sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & 478 0xffffffff; 479 break; 480 case SCSI_PROT_READ_INSERT: 481 case SCSI_PROT_WRITE_STRIP: 482 sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 483 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; 484 sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & 485 0xffffffff; 486 sig_attrs->wire.sig.dif.type = IB_T10DIF_NONE; 487 break; 488 case SCSI_PROT_READ_PASS: 489 case SCSI_PROT_WRITE_PASS: 490 sig_attrs->mem.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 491 sig_attrs->mem.sig.dif.bg_type = IB_T10DIF_CRC; 492 sig_attrs->mem.sig.dif.ref_tag = scsi_get_lba(sc) & 493 0xffffffff; 494 sig_attrs->wire.sig.dif.type = scsi2ib_prot_type(scsi_ptype); 495 sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC; 496 sig_attrs->wire.sig.dif.ref_tag = scsi_get_lba(sc) & 497 0xffffffff; 498 break; 499 default: 500 iser_err("Unsupported PI operation %d\n", 501 scsi_get_prot_op(sc)); 502 return -EINVAL; 503 } 504 return 0; 505 } 506 507 508 static int 509 iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) 510 { 511 switch (scsi_get_prot_type(sc)) { 512 case SCSI_PROT_DIF_TYPE0: 513 *mask = 0x0; 514 break; 515 case SCSI_PROT_DIF_TYPE1: 516 case SCSI_PROT_DIF_TYPE2: 517 *mask = ISER_CHECK_GUARD | ISER_CHECK_REFTAG; 518 break; 519 case SCSI_PROT_DIF_TYPE3: 520 *mask = ISER_CHECK_GUARD; 521 break; 522 default: 523 iser_err("Unsupported protection type %d\n", 524 scsi_get_prot_type(sc)); 525 return -EINVAL; 526 } 527 528 return 0; 529 } 530 531 static int 532 iser_reg_sig_mr(struct iscsi_iser_task *iser_task, 533 struct fast_reg_descriptor *desc, struct ib_sge *data_sge, 534 struct ib_sge *prot_sge, struct ib_sge *sig_sge) 535 { 536 struct iser_conn *ib_conn = iser_task->ib_conn; 537 struct iser_pi_context *pi_ctx = desc->pi_ctx; 538 struct ib_send_wr sig_wr, inv_wr; 539 struct ib_send_wr *bad_wr, *wr = NULL; 540 struct ib_sig_attrs sig_attrs; 541 int ret; 542 u32 key; 543 544 memset(&sig_attrs, 0, sizeof(sig_attrs)); 545 ret = iser_set_sig_attrs(iser_task->sc, &sig_attrs); 546 if (ret) 547 goto err; 548 549 ret = iser_set_prot_checks(iser_task->sc, &sig_attrs.check_mask); 550 if (ret) 551 goto err; 552 553 if (!(desc->reg_indicators & ISER_SIG_KEY_VALID)) { 554 memset(&inv_wr, 0, sizeof(inv_wr)); 555 inv_wr.opcode = IB_WR_LOCAL_INV; 556 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 557 inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey; 558 wr = &inv_wr; 559 /* Bump the key */ 560 key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF); 561 ib_update_fast_reg_key(pi_ctx->sig_mr, ++key); 562 } 563 564 memset(&sig_wr, 0, sizeof(sig_wr)); 565 sig_wr.opcode = IB_WR_REG_SIG_MR; 566 sig_wr.wr_id = ISER_FASTREG_LI_WRID; 567 sig_wr.sg_list = data_sge; 568 sig_wr.num_sge = 1; 569 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs; 570 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr; 571 if (scsi_prot_sg_count(iser_task->sc)) 572 sig_wr.wr.sig_handover.prot = prot_sge; 573 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE | 574 IB_ACCESS_REMOTE_READ | 575 IB_ACCESS_REMOTE_WRITE; 576 577 if (!wr) 578 wr = &sig_wr; 579 else 580 wr->next = &sig_wr; 581 582 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); 583 if (ret) { 584 iser_err("reg_sig_mr failed, ret:%d\n", ret); 585 goto err; 586 } 587 desc->reg_indicators &= ~ISER_SIG_KEY_VALID; 588 589 sig_sge->lkey = pi_ctx->sig_mr->lkey; 590 sig_sge->addr = 0; 591 sig_sge->length = data_sge->length + prot_sge->length; 592 if (scsi_get_prot_op(iser_task->sc) == SCSI_PROT_WRITE_INSERT || 593 scsi_get_prot_op(iser_task->sc) == SCSI_PROT_READ_STRIP) { 594 sig_sge->length += (data_sge->length / 595 iser_task->sc->device->sector_size) * 8; 596 } 597 598 iser_dbg("sig_sge: addr: 0x%llx length: %u lkey: 0x%x\n", 599 sig_sge->addr, sig_sge->length, 600 sig_sge->lkey); 601 err: 602 return ret; 603 } 604 605 static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, 606 struct iser_regd_buf *regd_buf, 607 struct iser_data_buf *mem, 608 enum iser_reg_indicator ind, 609 struct ib_sge *sge) 610 { 611 struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; 612 struct iser_conn *ib_conn = iser_task->ib_conn; 613 struct iser_device *device = ib_conn->device; 614 struct ib_device *ibdev = device->ib_device; 615 struct ib_mr *mr; 616 struct ib_fast_reg_page_list *frpl; 617 struct ib_send_wr fastreg_wr, inv_wr; 618 struct ib_send_wr *bad_wr, *wr = NULL; 619 u8 key; 620 int ret, offset, size, plen; 621 622 /* if there a single dma entry, dma mr suffices */ 623 if (mem->dma_nents == 1) { 624 struct scatterlist *sg = (struct scatterlist *)mem->buf; 625 626 sge->lkey = device->mr->lkey; 627 sge->addr = ib_sg_dma_address(ibdev, &sg[0]); 628 sge->length = ib_sg_dma_len(ibdev, &sg[0]); 629 630 iser_dbg("Single DMA entry: lkey=0x%x, addr=0x%llx, length=0x%x\n", 631 sge->lkey, sge->addr, sge->length); 632 return 0; 633 } 634 635 if (ind == ISER_DATA_KEY_VALID) { 636 mr = desc->data_mr; 637 frpl = desc->data_frpl; 638 } else { 639 mr = desc->pi_ctx->prot_mr; 640 frpl = desc->pi_ctx->prot_frpl; 641 } 642 643 plen = iser_sg_to_page_vec(mem, device->ib_device, frpl->page_list, 644 &offset, &size); 645 if (plen * SIZE_4K < size) { 646 iser_err("fast reg page_list too short to hold this SG\n"); 647 return -EINVAL; 648 } 649 650 if (!(desc->reg_indicators & ind)) { 651 memset(&inv_wr, 0, sizeof(inv_wr)); 652 inv_wr.wr_id = ISER_FASTREG_LI_WRID; 653 inv_wr.opcode = IB_WR_LOCAL_INV; 654 inv_wr.ex.invalidate_rkey = mr->rkey; 655 wr = &inv_wr; 656 /* Bump the key */ 657 key = (u8)(mr->rkey & 0x000000FF); 658 ib_update_fast_reg_key(mr, ++key); 659 } 660 661 /* Prepare FASTREG WR */ 662 memset(&fastreg_wr, 0, sizeof(fastreg_wr)); 663 fastreg_wr.wr_id = ISER_FASTREG_LI_WRID; 664 fastreg_wr.opcode = IB_WR_FAST_REG_MR; 665 fastreg_wr.wr.fast_reg.iova_start = frpl->page_list[0] + offset; 666 fastreg_wr.wr.fast_reg.page_list = frpl; 667 fastreg_wr.wr.fast_reg.page_list_len = plen; 668 fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K; 669 fastreg_wr.wr.fast_reg.length = size; 670 fastreg_wr.wr.fast_reg.rkey = mr->rkey; 671 fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE | 672 IB_ACCESS_REMOTE_WRITE | 673 IB_ACCESS_REMOTE_READ); 674 675 if (!wr) 676 wr = &fastreg_wr; 677 else 678 wr->next = &fastreg_wr; 679 680 ret = ib_post_send(ib_conn->qp, wr, &bad_wr); 681 if (ret) { 682 iser_err("fast registration failed, ret:%d\n", ret); 683 return ret; 684 } 685 desc->reg_indicators &= ~ind; 686 687 sge->lkey = mr->lkey; 688 sge->addr = frpl->page_list[0] + offset; 689 sge->length = size; 690 691 return ret; 692 } 693 694 /** 695 * iser_reg_rdma_mem_fastreg - Registers memory intended for RDMA, 696 * using Fast Registration WR (if possible) obtaining rkey and va 697 * 698 * returns 0 on success, errno code on failure 699 */ 700 int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, 701 enum iser_data_dir cmd_dir) 702 { 703 struct iser_conn *ib_conn = iser_task->ib_conn; 704 struct iser_device *device = ib_conn->device; 705 struct ib_device *ibdev = device->ib_device; 706 struct iser_data_buf *mem = &iser_task->data[cmd_dir]; 707 struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; 708 struct fast_reg_descriptor *desc = NULL; 709 struct ib_sge data_sge; 710 int err, aligned_len; 711 unsigned long flags; 712 713 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 714 if (aligned_len != mem->dma_nents) { 715 err = fall_to_bounce_buf(iser_task, ibdev, mem, 716 &iser_task->data_copy[cmd_dir], 717 cmd_dir, aligned_len); 718 if (err) { 719 iser_err("failed to allocate bounce buffer\n"); 720 return err; 721 } 722 mem = &iser_task->data_copy[cmd_dir]; 723 } 724 725 if (mem->dma_nents != 1 || 726 scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { 727 spin_lock_irqsave(&ib_conn->lock, flags); 728 desc = list_first_entry(&ib_conn->fastreg.pool, 729 struct fast_reg_descriptor, list); 730 list_del(&desc->list); 731 spin_unlock_irqrestore(&ib_conn->lock, flags); 732 regd_buf->reg.mem_h = desc; 733 } 734 735 err = iser_fast_reg_mr(iser_task, regd_buf, mem, 736 ISER_DATA_KEY_VALID, &data_sge); 737 if (err) 738 goto err_reg; 739 740 if (scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { 741 struct ib_sge prot_sge, sig_sge; 742 743 memset(&prot_sge, 0, sizeof(prot_sge)); 744 if (scsi_prot_sg_count(iser_task->sc)) { 745 mem = &iser_task->prot[cmd_dir]; 746 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 747 if (aligned_len != mem->dma_nents) { 748 err = fall_to_bounce_buf(iser_task, ibdev, mem, 749 &iser_task->prot_copy[cmd_dir], 750 cmd_dir, aligned_len); 751 if (err) { 752 iser_err("failed to allocate bounce buffer\n"); 753 return err; 754 } 755 mem = &iser_task->prot_copy[cmd_dir]; 756 } 757 758 err = iser_fast_reg_mr(iser_task, regd_buf, mem, 759 ISER_PROT_KEY_VALID, &prot_sge); 760 if (err) 761 goto err_reg; 762 } 763 764 err = iser_reg_sig_mr(iser_task, desc, &data_sge, 765 &prot_sge, &sig_sge); 766 if (err) { 767 iser_err("Failed to register signature mr\n"); 768 return err; 769 } 770 desc->reg_indicators |= ISER_FASTREG_PROTECTED; 771 772 regd_buf->reg.lkey = sig_sge.lkey; 773 regd_buf->reg.rkey = desc->pi_ctx->sig_mr->rkey; 774 regd_buf->reg.va = sig_sge.addr; 775 regd_buf->reg.len = sig_sge.length; 776 regd_buf->reg.is_mr = 1; 777 } else { 778 if (desc) { 779 regd_buf->reg.rkey = desc->data_mr->rkey; 780 regd_buf->reg.is_mr = 1; 781 } else { 782 regd_buf->reg.rkey = device->mr->rkey; 783 regd_buf->reg.is_mr = 0; 784 } 785 786 regd_buf->reg.lkey = data_sge.lkey; 787 regd_buf->reg.va = data_sge.addr; 788 regd_buf->reg.len = data_sge.length; 789 } 790 791 return 0; 792 err_reg: 793 if (desc) { 794 spin_lock_irqsave(&ib_conn->lock, flags); 795 list_add_tail(&desc->list, &ib_conn->fastreg.pool); 796 spin_unlock_irqrestore(&ib_conn->lock, flags); 797 } 798 799 return err; 800 } 801