1 /* 2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 * $Id: iser_memory.c 6964 2006-05-07 11:11:43Z ogerlitz $ 33 */ 34 #include <linux/module.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/mm.h> 38 #include <linux/highmem.h> 39 #include <linux/scatterlist.h> 40 41 #include "iscsi_iser.h" 42 43 #define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */ 44 45 /** 46 * Decrements the reference count for the 47 * registered buffer & releases it 48 * 49 * returns 0 if released, 1 if deferred 50 */ 51 int iser_regd_buff_release(struct iser_regd_buf *regd_buf) 52 { 53 struct ib_device *dev; 54 55 if ((atomic_read(®d_buf->ref_count) == 0) || 56 atomic_dec_and_test(®d_buf->ref_count)) { 57 /* if we used the dma mr, unreg is just NOP */ 58 if (regd_buf->reg.is_fmr) 59 iser_unreg_mem(®d_buf->reg); 60 61 if (regd_buf->dma_addr) { 62 dev = regd_buf->device->ib_device; 63 ib_dma_unmap_single(dev, 64 regd_buf->dma_addr, 65 regd_buf->data_size, 66 regd_buf->direction); 67 } 68 /* else this regd buf is associated with task which we */ 69 /* dma_unmap_single/sg later */ 70 return 0; 71 } else { 72 iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf); 73 return 1; 74 } 75 } 76 77 /** 78 * iser_reg_single - fills registered buffer descriptor with 79 * registration information 80 */ 81 void iser_reg_single(struct iser_device *device, 82 struct iser_regd_buf *regd_buf, 83 enum dma_data_direction direction) 84 { 85 u64 dma_addr; 86 87 dma_addr = ib_dma_map_single(device->ib_device, 88 regd_buf->virt_addr, 89 regd_buf->data_size, direction); 90 BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr)); 91 92 regd_buf->reg.lkey = device->mr->lkey; 93 regd_buf->reg.len = regd_buf->data_size; 94 regd_buf->reg.va = dma_addr; 95 regd_buf->reg.is_fmr = 0; 96 97 regd_buf->dma_addr = dma_addr; 98 regd_buf->direction = direction; 99 } 100 101 /** 102 * iser_start_rdma_unaligned_sg 103 */ 104 static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 105 enum iser_data_dir cmd_dir) 106 { 107 int dma_nents; 108 struct ib_device *dev; 109 char *mem = NULL; 110 struct iser_data_buf *data = &iser_ctask->data[cmd_dir]; 111 unsigned long cmd_data_len = data->data_len; 112 113 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 114 mem = (void *)__get_free_pages(GFP_NOIO, 115 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 116 else 117 mem = kmalloc(cmd_data_len, GFP_NOIO); 118 119 if (mem == NULL) { 120 iser_err("Failed to allocate mem size %d %d for copying sglist\n", 121 data->size,(int)cmd_data_len); 122 return -ENOMEM; 123 } 124 125 if (cmd_dir == ISER_DIR_OUT) { 126 /* copy the unaligned sg the buffer which is used for RDMA */ 127 struct scatterlist *sgl = (struct scatterlist *)data->buf; 128 struct scatterlist *sg; 129 int i; 130 char *p, *from; 131 132 p = mem; 133 for_each_sg(sgl, sg, data->size, i) { 134 from = kmap_atomic(sg_page(sg), KM_USER0); 135 memcpy(p, 136 from + sg->offset, 137 sg->length); 138 kunmap_atomic(from, KM_USER0); 139 p += sg->length; 140 } 141 } 142 143 sg_init_one(&iser_ctask->data_copy[cmd_dir].sg_single, mem, cmd_data_len); 144 iser_ctask->data_copy[cmd_dir].buf = 145 &iser_ctask->data_copy[cmd_dir].sg_single; 146 iser_ctask->data_copy[cmd_dir].size = 1; 147 148 iser_ctask->data_copy[cmd_dir].copy_buf = mem; 149 150 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 151 dma_nents = ib_dma_map_sg(dev, 152 &iser_ctask->data_copy[cmd_dir].sg_single, 153 1, 154 (cmd_dir == ISER_DIR_OUT) ? 155 DMA_TO_DEVICE : DMA_FROM_DEVICE); 156 BUG_ON(dma_nents == 0); 157 158 iser_ctask->data_copy[cmd_dir].dma_nents = dma_nents; 159 return 0; 160 } 161 162 /** 163 * iser_finalize_rdma_unaligned_sg 164 */ 165 void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask, 166 enum iser_data_dir cmd_dir) 167 { 168 struct ib_device *dev; 169 struct iser_data_buf *mem_copy; 170 unsigned long cmd_data_len; 171 172 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 173 mem_copy = &iser_ctask->data_copy[cmd_dir]; 174 175 ib_dma_unmap_sg(dev, &mem_copy->sg_single, 1, 176 (cmd_dir == ISER_DIR_OUT) ? 177 DMA_TO_DEVICE : DMA_FROM_DEVICE); 178 179 if (cmd_dir == ISER_DIR_IN) { 180 char *mem; 181 struct scatterlist *sgl, *sg; 182 unsigned char *p, *to; 183 unsigned int sg_size; 184 int i; 185 186 /* copy back read RDMA to unaligned sg */ 187 mem = mem_copy->copy_buf; 188 189 sgl = (struct scatterlist *)iser_ctask->data[ISER_DIR_IN].buf; 190 sg_size = iser_ctask->data[ISER_DIR_IN].size; 191 192 p = mem; 193 for_each_sg(sgl, sg, sg_size, i) { 194 to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 195 memcpy(to + sg->offset, 196 p, 197 sg->length); 198 kunmap_atomic(to, KM_SOFTIRQ0); 199 p += sg->length; 200 } 201 } 202 203 cmd_data_len = iser_ctask->data[cmd_dir].data_len; 204 205 if (cmd_data_len > ISER_KMALLOC_THRESHOLD) 206 free_pages((unsigned long)mem_copy->copy_buf, 207 ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT); 208 else 209 kfree(mem_copy->copy_buf); 210 211 mem_copy->copy_buf = NULL; 212 } 213 214 /** 215 * iser_sg_to_page_vec - Translates scatterlist entries to physical addresses 216 * and returns the length of resulting physical address array (may be less than 217 * the original due to possible compaction). 218 * 219 * we build a "page vec" under the assumption that the SG meets the RDMA 220 * alignment requirements. Other then the first and last SG elements, all 221 * the "internal" elements can be compacted into a list whose elements are 222 * dma addresses of physical pages. The code supports also the weird case 223 * where --few fragments of the same page-- are present in the SG as 224 * consecutive elements. Also, it handles one entry SG. 225 */ 226 static int iser_sg_to_page_vec(struct iser_data_buf *data, 227 struct iser_page_vec *page_vec, 228 struct ib_device *ibdev) 229 { 230 struct scatterlist *sgl = (struct scatterlist *)data->buf; 231 struct scatterlist *sg; 232 u64 first_addr, last_addr, page; 233 int end_aligned; 234 unsigned int cur_page = 0; 235 unsigned long total_sz = 0; 236 int i; 237 238 /* compute the offset of first element */ 239 page_vec->offset = (u64) sgl[0].offset & ~MASK_4K; 240 241 for_each_sg(sgl, sg, data->dma_nents, i) { 242 unsigned int dma_len = ib_sg_dma_len(ibdev, sg); 243 244 total_sz += dma_len; 245 246 first_addr = ib_sg_dma_address(ibdev, sg); 247 last_addr = first_addr + dma_len; 248 249 end_aligned = !(last_addr & ~MASK_4K); 250 251 /* continue to collect page fragments till aligned or SG ends */ 252 while (!end_aligned && (i + 1 < data->dma_nents)) { 253 sg = sg_next(sg); 254 i++; 255 dma_len = ib_sg_dma_len(ibdev, sg); 256 total_sz += dma_len; 257 last_addr = ib_sg_dma_address(ibdev, sg) + dma_len; 258 end_aligned = !(last_addr & ~MASK_4K); 259 } 260 261 /* handle the 1st page in the 1st DMA element */ 262 if (cur_page == 0) { 263 page = first_addr & MASK_4K; 264 page_vec->pages[cur_page] = page; 265 cur_page++; 266 page += SIZE_4K; 267 } else 268 page = first_addr; 269 270 for (; page < last_addr; page += SIZE_4K) { 271 page_vec->pages[cur_page] = page; 272 cur_page++; 273 } 274 275 } 276 page_vec->data_size = total_sz; 277 iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page); 278 return cur_page; 279 } 280 281 #define IS_4K_ALIGNED(addr) ((((unsigned long)addr) & ~MASK_4K) == 0) 282 283 /** 284 * iser_data_buf_aligned_len - Tries to determine the maximal correctly aligned 285 * for RDMA sub-list of a scatter-gather list of memory buffers, and returns 286 * the number of entries which are aligned correctly. Supports the case where 287 * consecutive SG elements are actually fragments of the same physcial page. 288 */ 289 static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data, 290 struct ib_device *ibdev) 291 { 292 struct scatterlist *sgl, *sg; 293 u64 end_addr, next_addr; 294 int i, cnt; 295 unsigned int ret_len = 0; 296 297 sgl = (struct scatterlist *)data->buf; 298 299 cnt = 0; 300 for_each_sg(sgl, sg, data->dma_nents, i) { 301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 302 "offset: %ld sz: %ld\n", i, 303 (unsigned long)sg_phys(sg), 304 (unsigned long)sg->offset, 305 (unsigned long)sg->length); */ 306 end_addr = ib_sg_dma_address(ibdev, sg) + 307 ib_sg_dma_len(ibdev, sg); 308 /* iser_dbg("Checking sg iobuf end address " 309 "0x%08lX\n", end_addr); */ 310 if (i + 1 < data->dma_nents) { 311 next_addr = ib_sg_dma_address(ibdev, sg_next(sg)); 312 /* are i, i+1 fragments of the same page? */ 313 if (end_addr == next_addr) { 314 cnt++; 315 continue; 316 } else if (!IS_4K_ALIGNED(end_addr)) { 317 ret_len = cnt + 1; 318 break; 319 } 320 } 321 cnt++; 322 } 323 if (i == data->dma_nents) 324 ret_len = cnt; /* loop ended */ 325 iser_dbg("Found %d aligned entries out of %d in sg:0x%p\n", 326 ret_len, data->dma_nents, data); 327 return ret_len; 328 } 329 330 static void iser_data_buf_dump(struct iser_data_buf *data, 331 struct ib_device *ibdev) 332 { 333 struct scatterlist *sgl = (struct scatterlist *)data->buf; 334 struct scatterlist *sg; 335 int i; 336 337 for_each_sg(sgl, sg, data->dma_nents, i) 338 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 339 "off:0x%x sz:0x%x dma_len:0x%x\n", 340 i, (unsigned long)ib_sg_dma_address(ibdev, sg), 341 sg_page(sg), sg->offset, 342 sg->length, ib_sg_dma_len(ibdev, sg)); 343 } 344 345 static void iser_dump_page_vec(struct iser_page_vec *page_vec) 346 { 347 int i; 348 349 iser_err("page vec length %d data size %d\n", 350 page_vec->length, page_vec->data_size); 351 for (i = 0; i < page_vec->length; i++) 352 iser_err("%d %lx\n",i,(unsigned long)page_vec->pages[i]); 353 } 354 355 static void iser_page_vec_build(struct iser_data_buf *data, 356 struct iser_page_vec *page_vec, 357 struct ib_device *ibdev) 358 { 359 int page_vec_len = 0; 360 361 page_vec->length = 0; 362 page_vec->offset = 0; 363 364 iser_dbg("Translating sg sz: %d\n", data->dma_nents); 365 page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev); 366 iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len); 367 368 page_vec->length = page_vec_len; 369 370 if (page_vec_len * SIZE_4K < page_vec->data_size) { 371 iser_err("page_vec too short to hold this SG\n"); 372 iser_data_buf_dump(data, ibdev); 373 iser_dump_page_vec(page_vec); 374 BUG(); 375 } 376 } 377 378 int iser_dma_map_task_data(struct iscsi_iser_cmd_task *iser_ctask, 379 struct iser_data_buf *data, 380 enum iser_data_dir iser_dir, 381 enum dma_data_direction dma_dir) 382 { 383 struct ib_device *dev; 384 385 iser_ctask->dir[iser_dir] = 1; 386 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 387 388 data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); 389 if (data->dma_nents == 0) { 390 iser_err("dma_map_sg failed!!!\n"); 391 return -EINVAL; 392 } 393 return 0; 394 } 395 396 void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask) 397 { 398 struct ib_device *dev; 399 struct iser_data_buf *data; 400 401 dev = iser_ctask->iser_conn->ib_conn->device->ib_device; 402 403 if (iser_ctask->dir[ISER_DIR_IN]) { 404 data = &iser_ctask->data[ISER_DIR_IN]; 405 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); 406 } 407 408 if (iser_ctask->dir[ISER_DIR_OUT]) { 409 data = &iser_ctask->data[ISER_DIR_OUT]; 410 ib_dma_unmap_sg(dev, data->buf, data->size, DMA_TO_DEVICE); 411 } 412 } 413 414 /** 415 * iser_reg_rdma_mem - Registers memory intended for RDMA, 416 * obtaining rkey and va 417 * 418 * returns 0 on success, errno code on failure 419 */ 420 int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask, 421 enum iser_data_dir cmd_dir) 422 { 423 struct iser_conn *ib_conn = iser_ctask->iser_conn->ib_conn; 424 struct iser_device *device = ib_conn->device; 425 struct ib_device *ibdev = device->ib_device; 426 struct iser_data_buf *mem = &iser_ctask->data[cmd_dir]; 427 struct iser_regd_buf *regd_buf; 428 int aligned_len; 429 int err; 430 int i; 431 struct scatterlist *sg; 432 433 regd_buf = &iser_ctask->rdma_regd[cmd_dir]; 434 435 aligned_len = iser_data_buf_aligned_len(mem, ibdev); 436 if (aligned_len != mem->dma_nents) { 437 iser_err("rdma alignment violation %d/%d aligned\n", 438 aligned_len, mem->size); 439 iser_data_buf_dump(mem, ibdev); 440 441 /* unmap the command data before accessing it */ 442 iser_dma_unmap_task_data(iser_ctask); 443 444 /* allocate copy buf, if we are writing, copy the */ 445 /* unaligned scatterlist, dma map the copy */ 446 if (iser_start_rdma_unaligned_sg(iser_ctask, cmd_dir) != 0) 447 return -ENOMEM; 448 mem = &iser_ctask->data_copy[cmd_dir]; 449 } 450 451 /* if there a single dma entry, FMR is not needed */ 452 if (mem->dma_nents == 1) { 453 sg = (struct scatterlist *)mem->buf; 454 455 regd_buf->reg.lkey = device->mr->lkey; 456 regd_buf->reg.rkey = device->mr->rkey; 457 regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]); 458 regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]); 459 regd_buf->reg.is_fmr = 0; 460 461 iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X " 462 "va: 0x%08lX sz: %ld]\n", 463 (unsigned int)regd_buf->reg.lkey, 464 (unsigned int)regd_buf->reg.rkey, 465 (unsigned long)regd_buf->reg.va, 466 (unsigned long)regd_buf->reg.len); 467 } else { /* use FMR for multiple dma entries */ 468 iser_page_vec_build(mem, ib_conn->page_vec, ibdev); 469 err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, ®d_buf->reg); 470 if (err) { 471 iser_data_buf_dump(mem, ibdev); 472 iser_err("mem->dma_nents = %d (dlength = 0x%x)\n", mem->dma_nents, 473 ntoh24(iser_ctask->desc.iscsi_header.dlength)); 474 iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", 475 ib_conn->page_vec->data_size, ib_conn->page_vec->length, 476 ib_conn->page_vec->offset); 477 for (i=0 ; i<ib_conn->page_vec->length ; i++) 478 iser_err("page_vec[%d] = 0x%llx\n", i, 479 (unsigned long long) ib_conn->page_vec->pages[i]); 480 return err; 481 } 482 } 483 484 /* take a reference on this regd buf such that it will not be released * 485 * (eg in send dto completion) before we get the scsi response */ 486 atomic_inc(®d_buf->ref_count); 487 return 0; 488 } 489