1 /******************************************************************************* 2 * Filename: target_core_rd.c 3 * 4 * This file contains the Storage Engine <-> Ramdisk transport 5 * specific functions. 6 * 7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. 8 * Copyright (c) 2005, 2006, 2007 SBE, Inc. 9 * Copyright (c) 2007-2010 Rising Tide Systems 10 * Copyright (c) 2008-2010 Linux-iSCSI.org 11 * 12 * Nicholas A. Bellinger <nab@kernel.org> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or 17 * (at your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 27 * 28 ******************************************************************************/ 29 30 #include <linux/version.h> 31 #include <linux/string.h> 32 #include <linux/parser.h> 33 #include <linux/timer.h> 34 #include <linux/blkdev.h> 35 #include <linux/slab.h> 36 #include <linux/spinlock.h> 37 #include <linux/smp_lock.h> 38 #include <scsi/scsi.h> 39 #include <scsi/scsi_host.h> 40 41 #include <target/target_core_base.h> 42 #include <target/target_core_device.h> 43 #include <target/target_core_transport.h> 44 #include <target/target_core_fabric_ops.h> 45 46 #include "target_core_rd.h" 47 48 static struct se_subsystem_api rd_dr_template; 49 static struct se_subsystem_api rd_mcp_template; 50 51 /* #define DEBUG_RAMDISK_MCP */ 52 /* #define DEBUG_RAMDISK_DR */ 53 54 /* rd_attach_hba(): (Part of se_subsystem_api_t template) 55 * 56 * 57 */ 58 static int rd_attach_hba(struct se_hba *hba, u32 host_id) 59 { 60 struct rd_host *rd_host; 61 62 rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL); 63 if (!(rd_host)) { 64 printk(KERN_ERR "Unable to allocate memory for struct rd_host\n"); 65 return -ENOMEM; 66 } 67 68 rd_host->rd_host_id = host_id; 69 70 atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH); 71 atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH); 72 hba->hba_ptr = (void *) rd_host; 73 74 printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on" 75 " Generic Target Core Stack %s\n", hba->hba_id, 76 RD_HBA_VERSION, TARGET_CORE_MOD_VERSION); 77 printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic" 78 " Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id, 79 rd_host->rd_host_id, atomic_read(&hba->max_queue_depth), 80 RD_MAX_SECTORS); 81 82 return 0; 83 } 84 85 static void rd_detach_hba(struct se_hba *hba) 86 { 87 struct rd_host *rd_host = hba->hba_ptr; 88 89 printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from" 90 " Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); 91 92 kfree(rd_host); 93 hba->hba_ptr = NULL; 94 } 95 96 /* rd_release_device_space(): 97 * 98 * 99 */ 100 static void rd_release_device_space(struct rd_dev *rd_dev) 101 { 102 u32 i, j, page_count = 0, sg_per_table; 103 struct rd_dev_sg_table *sg_table; 104 struct page *pg; 105 struct scatterlist *sg; 106 107 if (!rd_dev->sg_table_array || !rd_dev->sg_table_count) 108 return; 109 110 sg_table = rd_dev->sg_table_array; 111 112 for (i = 0; i < rd_dev->sg_table_count; i++) { 113 sg = sg_table[i].sg_table; 114 sg_per_table = sg_table[i].rd_sg_count; 115 116 for (j = 0; j < sg_per_table; j++) { 117 pg = sg_page(&sg[j]); 118 if ((pg)) { 119 __free_page(pg); 120 page_count++; 121 } 122 } 123 124 kfree(sg); 125 } 126 127 printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk" 128 " Device ID: %u, pages %u in %u tables total bytes %lu\n", 129 rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count, 130 rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE); 131 132 kfree(sg_table); 133 rd_dev->sg_table_array = NULL; 134 rd_dev->sg_table_count = 0; 135 } 136 137 138 /* rd_build_device_space(): 139 * 140 * 141 */ 142 static int rd_build_device_space(struct rd_dev *rd_dev) 143 { 144 u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed; 145 u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE / 146 sizeof(struct scatterlist)); 147 struct rd_dev_sg_table *sg_table; 148 struct page *pg; 149 struct scatterlist *sg; 150 151 if (rd_dev->rd_page_count <= 0) { 152 printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n", 153 rd_dev->rd_page_count); 154 return -1; 155 } 156 total_sg_needed = rd_dev->rd_page_count; 157 158 sg_tables = (total_sg_needed / max_sg_per_table) + 1; 159 160 sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL); 161 if (!(sg_table)) { 162 printk(KERN_ERR "Unable to allocate memory for Ramdisk" 163 " scatterlist tables\n"); 164 return -1; 165 } 166 167 rd_dev->sg_table_array = sg_table; 168 rd_dev->sg_table_count = sg_tables; 169 170 while (total_sg_needed) { 171 sg_per_table = (total_sg_needed > max_sg_per_table) ? 172 max_sg_per_table : total_sg_needed; 173 174 sg = kzalloc(sg_per_table * sizeof(struct scatterlist), 175 GFP_KERNEL); 176 if (!(sg)) { 177 printk(KERN_ERR "Unable to allocate scatterlist array" 178 " for struct rd_dev\n"); 179 return -1; 180 } 181 182 sg_init_table((struct scatterlist *)&sg[0], sg_per_table); 183 184 sg_table[i].sg_table = sg; 185 sg_table[i].rd_sg_count = sg_per_table; 186 sg_table[i].page_start_offset = page_offset; 187 sg_table[i++].page_end_offset = (page_offset + sg_per_table) 188 - 1; 189 190 for (j = 0; j < sg_per_table; j++) { 191 pg = alloc_pages(GFP_KERNEL, 0); 192 if (!(pg)) { 193 printk(KERN_ERR "Unable to allocate scatterlist" 194 " pages for struct rd_dev_sg_table\n"); 195 return -1; 196 } 197 sg_assign_page(&sg[j], pg); 198 sg[j].length = PAGE_SIZE; 199 } 200 201 page_offset += sg_per_table; 202 total_sg_needed -= sg_per_table; 203 } 204 205 printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of" 206 " %u pages in %u tables\n", rd_dev->rd_host->rd_host_id, 207 rd_dev->rd_dev_id, rd_dev->rd_page_count, 208 rd_dev->sg_table_count); 209 210 return 0; 211 } 212 213 static void *rd_allocate_virtdevice( 214 struct se_hba *hba, 215 const char *name, 216 int rd_direct) 217 { 218 struct rd_dev *rd_dev; 219 struct rd_host *rd_host = hba->hba_ptr; 220 221 rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL); 222 if (!(rd_dev)) { 223 printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n"); 224 return NULL; 225 } 226 227 rd_dev->rd_host = rd_host; 228 rd_dev->rd_direct = rd_direct; 229 230 return rd_dev; 231 } 232 233 static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name) 234 { 235 return rd_allocate_virtdevice(hba, name, 1); 236 } 237 238 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name) 239 { 240 return rd_allocate_virtdevice(hba, name, 0); 241 } 242 243 /* rd_create_virtdevice(): 244 * 245 * 246 */ 247 static struct se_device *rd_create_virtdevice( 248 struct se_hba *hba, 249 struct se_subsystem_dev *se_dev, 250 void *p, 251 int rd_direct) 252 { 253 struct se_device *dev; 254 struct se_dev_limits dev_limits; 255 struct rd_dev *rd_dev = p; 256 struct rd_host *rd_host = hba->hba_ptr; 257 int dev_flags = 0; 258 char prod[16], rev[4]; 259 260 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 261 262 if (rd_build_device_space(rd_dev) < 0) 263 goto fail; 264 265 snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP"); 266 snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION : 267 RD_MCP_VERSION); 268 269 dev_limits.limits.logical_block_size = RD_BLOCKSIZE; 270 dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS; 271 dev_limits.limits.max_sectors = RD_MAX_SECTORS; 272 dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 273 dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH; 274 275 dev = transport_add_device_to_core_hba(hba, 276 (rd_dev->rd_direct) ? &rd_dr_template : 277 &rd_mcp_template, se_dev, dev_flags, (void *)rd_dev, 278 &dev_limits, prod, rev); 279 if (!(dev)) 280 goto fail; 281 282 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 283 rd_dev->rd_queue_depth = dev->queue_depth; 284 285 printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of" 286 " %u pages in %u tables, %lu total bytes\n", 287 rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" : 288 "DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count, 289 rd_dev->sg_table_count, 290 (unsigned long)(rd_dev->rd_page_count * PAGE_SIZE)); 291 292 return dev; 293 294 fail: 295 rd_release_device_space(rd_dev); 296 return NULL; 297 } 298 299 static struct se_device *rd_DIRECT_create_virtdevice( 300 struct se_hba *hba, 301 struct se_subsystem_dev *se_dev, 302 void *p) 303 { 304 return rd_create_virtdevice(hba, se_dev, p, 1); 305 } 306 307 static struct se_device *rd_MEMCPY_create_virtdevice( 308 struct se_hba *hba, 309 struct se_subsystem_dev *se_dev, 310 void *p) 311 { 312 return rd_create_virtdevice(hba, se_dev, p, 0); 313 } 314 315 /* rd_free_device(): (Part of se_subsystem_api_t template) 316 * 317 * 318 */ 319 static void rd_free_device(void *p) 320 { 321 struct rd_dev *rd_dev = p; 322 323 rd_release_device_space(rd_dev); 324 kfree(rd_dev); 325 } 326 327 static inline struct rd_request *RD_REQ(struct se_task *task) 328 { 329 return container_of(task, struct rd_request, rd_task); 330 } 331 332 static struct se_task * 333 rd_alloc_task(struct se_cmd *cmd) 334 { 335 struct rd_request *rd_req; 336 337 rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL); 338 if (!rd_req) { 339 printk(KERN_ERR "Unable to allocate struct rd_request\n"); 340 return NULL; 341 } 342 rd_req->rd_dev = SE_DEV(cmd)->dev_ptr; 343 344 return &rd_req->rd_task; 345 } 346 347 /* rd_get_sg_table(): 348 * 349 * 350 */ 351 static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) 352 { 353 u32 i; 354 struct rd_dev_sg_table *sg_table; 355 356 for (i = 0; i < rd_dev->sg_table_count; i++) { 357 sg_table = &rd_dev->sg_table_array[i]; 358 if ((sg_table->page_start_offset <= page) && 359 (sg_table->page_end_offset >= page)) 360 return sg_table; 361 } 362 363 printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n", 364 page); 365 366 return NULL; 367 } 368 369 /* rd_MEMCPY_read(): 370 * 371 * 372 */ 373 static int rd_MEMCPY_read(struct rd_request *req) 374 { 375 struct se_task *task = &req->rd_task; 376 struct rd_dev *dev = req->rd_dev; 377 struct rd_dev_sg_table *table; 378 struct scatterlist *sg_d, *sg_s; 379 void *dst, *src; 380 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; 381 u32 length, page_end = 0, table_sg_end; 382 u32 rd_offset = req->rd_offset; 383 384 table = rd_get_sg_table(dev, req->rd_page); 385 if (!(table)) 386 return -1; 387 388 table_sg_end = (table->page_end_offset - req->rd_page); 389 sg_d = task->task_sg; 390 sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; 391 #ifdef DEBUG_RAMDISK_MCP 392 printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:" 393 " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 394 req->rd_page, req->rd_offset); 395 #endif 396 src_offset = rd_offset; 397 398 while (req->rd_size) { 399 if ((sg_d[i].length - dst_offset) < 400 (sg_s[j].length - src_offset)) { 401 length = (sg_d[i].length - dst_offset); 402 #ifdef DEBUG_RAMDISK_MCP 403 printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d" 404 " offset: %u sg_s[%d].length: %u\n", i, 405 &sg_d[i], sg_d[i].length, sg_d[i].offset, j, 406 sg_s[j].length); 407 printk(KERN_INFO "Step 1 - length: %u dst_offset: %u" 408 " src_offset: %u\n", length, dst_offset, 409 src_offset); 410 #endif 411 if (length > req->rd_size) 412 length = req->rd_size; 413 414 dst = sg_virt(&sg_d[i++]) + dst_offset; 415 if (!dst) 416 BUG(); 417 418 src = sg_virt(&sg_s[j]) + src_offset; 419 if (!src) 420 BUG(); 421 422 dst_offset = 0; 423 src_offset = length; 424 page_end = 0; 425 } else { 426 length = (sg_s[j].length - src_offset); 427 #ifdef DEBUG_RAMDISK_MCP 428 printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d" 429 " offset: %u sg_s[%d].length: %u\n", i, 430 &sg_d[i], sg_d[i].length, sg_d[i].offset, 431 j, sg_s[j].length); 432 printk(KERN_INFO "Step 2 - length: %u dst_offset: %u" 433 " src_offset: %u\n", length, dst_offset, 434 src_offset); 435 #endif 436 if (length > req->rd_size) 437 length = req->rd_size; 438 439 dst = sg_virt(&sg_d[i]) + dst_offset; 440 if (!dst) 441 BUG(); 442 443 if (sg_d[i].length == length) { 444 i++; 445 dst_offset = 0; 446 } else 447 dst_offset = length; 448 449 src = sg_virt(&sg_s[j++]) + src_offset; 450 if (!src) 451 BUG(); 452 453 src_offset = 0; 454 page_end = 1; 455 } 456 457 memcpy(dst, src, length); 458 459 #ifdef DEBUG_RAMDISK_MCP 460 printk(KERN_INFO "page: %u, remaining size: %u, length: %u," 461 " i: %u, j: %u\n", req->rd_page, 462 (req->rd_size - length), length, i, j); 463 #endif 464 req->rd_size -= length; 465 if (!(req->rd_size)) 466 return 0; 467 468 if (!page_end) 469 continue; 470 471 if (++req->rd_page <= table->page_end_offset) { 472 #ifdef DEBUG_RAMDISK_MCP 473 printk(KERN_INFO "page: %u in same page table\n", 474 req->rd_page); 475 #endif 476 continue; 477 } 478 #ifdef DEBUG_RAMDISK_MCP 479 printk(KERN_INFO "getting new page table for page: %u\n", 480 req->rd_page); 481 #endif 482 table = rd_get_sg_table(dev, req->rd_page); 483 if (!(table)) 484 return -1; 485 486 sg_s = &table->sg_table[j = 0]; 487 } 488 489 return 0; 490 } 491 492 /* rd_MEMCPY_write(): 493 * 494 * 495 */ 496 static int rd_MEMCPY_write(struct rd_request *req) 497 { 498 struct se_task *task = &req->rd_task; 499 struct rd_dev *dev = req->rd_dev; 500 struct rd_dev_sg_table *table; 501 struct scatterlist *sg_d, *sg_s; 502 void *dst, *src; 503 u32 i = 0, j = 0, dst_offset = 0, src_offset = 0; 504 u32 length, page_end = 0, table_sg_end; 505 u32 rd_offset = req->rd_offset; 506 507 table = rd_get_sg_table(dev, req->rd_page); 508 if (!(table)) 509 return -1; 510 511 table_sg_end = (table->page_end_offset - req->rd_page); 512 sg_d = &table->sg_table[req->rd_page - table->page_start_offset]; 513 sg_s = task->task_sg; 514 #ifdef DEBUG_RAMDISK_MCP 515 printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u," 516 " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size, 517 req->rd_page, req->rd_offset); 518 #endif 519 dst_offset = rd_offset; 520 521 while (req->rd_size) { 522 if ((sg_s[i].length - src_offset) < 523 (sg_d[j].length - dst_offset)) { 524 length = (sg_s[i].length - src_offset); 525 #ifdef DEBUG_RAMDISK_MCP 526 printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d" 527 " offset: %d sg_d[%d].length: %u\n", i, 528 &sg_s[i], sg_s[i].length, sg_s[i].offset, 529 j, sg_d[j].length); 530 printk(KERN_INFO "Step 1 - length: %u src_offset: %u" 531 " dst_offset: %u\n", length, src_offset, 532 dst_offset); 533 #endif 534 if (length > req->rd_size) 535 length = req->rd_size; 536 537 src = sg_virt(&sg_s[i++]) + src_offset; 538 if (!src) 539 BUG(); 540 541 dst = sg_virt(&sg_d[j]) + dst_offset; 542 if (!dst) 543 BUG(); 544 545 src_offset = 0; 546 dst_offset = length; 547 page_end = 0; 548 } else { 549 length = (sg_d[j].length - dst_offset); 550 #ifdef DEBUG_RAMDISK_MCP 551 printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d" 552 " offset: %d sg_d[%d].length: %u\n", i, 553 &sg_s[i], sg_s[i].length, sg_s[i].offset, 554 j, sg_d[j].length); 555 printk(KERN_INFO "Step 2 - length: %u src_offset: %u" 556 " dst_offset: %u\n", length, src_offset, 557 dst_offset); 558 #endif 559 if (length > req->rd_size) 560 length = req->rd_size; 561 562 src = sg_virt(&sg_s[i]) + src_offset; 563 if (!src) 564 BUG(); 565 566 if (sg_s[i].length == length) { 567 i++; 568 src_offset = 0; 569 } else 570 src_offset = length; 571 572 dst = sg_virt(&sg_d[j++]) + dst_offset; 573 if (!dst) 574 BUG(); 575 576 dst_offset = 0; 577 page_end = 1; 578 } 579 580 memcpy(dst, src, length); 581 582 #ifdef DEBUG_RAMDISK_MCP 583 printk(KERN_INFO "page: %u, remaining size: %u, length: %u," 584 " i: %u, j: %u\n", req->rd_page, 585 (req->rd_size - length), length, i, j); 586 #endif 587 req->rd_size -= length; 588 if (!(req->rd_size)) 589 return 0; 590 591 if (!page_end) 592 continue; 593 594 if (++req->rd_page <= table->page_end_offset) { 595 #ifdef DEBUG_RAMDISK_MCP 596 printk(KERN_INFO "page: %u in same page table\n", 597 req->rd_page); 598 #endif 599 continue; 600 } 601 #ifdef DEBUG_RAMDISK_MCP 602 printk(KERN_INFO "getting new page table for page: %u\n", 603 req->rd_page); 604 #endif 605 table = rd_get_sg_table(dev, req->rd_page); 606 if (!(table)) 607 return -1; 608 609 sg_d = &table->sg_table[j = 0]; 610 } 611 612 return 0; 613 } 614 615 /* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template) 616 * 617 * 618 */ 619 static int rd_MEMCPY_do_task(struct se_task *task) 620 { 621 struct se_device *dev = task->se_dev; 622 struct rd_request *req = RD_REQ(task); 623 unsigned long long lba; 624 int ret; 625 626 req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE; 627 lba = task->task_lba; 628 req->rd_offset = (do_div(lba, 629 (PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) * 630 DEV_ATTRIB(dev)->block_size; 631 req->rd_size = task->task_size; 632 633 if (task->task_data_direction == DMA_FROM_DEVICE) 634 ret = rd_MEMCPY_read(req); 635 else 636 ret = rd_MEMCPY_write(req); 637 638 if (ret != 0) 639 return ret; 640 641 task->task_scsi_status = GOOD; 642 transport_complete_task(task, 1); 643 644 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 645 } 646 647 /* rd_DIRECT_with_offset(): 648 * 649 * 650 */ 651 static int rd_DIRECT_with_offset( 652 struct se_task *task, 653 struct list_head *se_mem_list, 654 u32 *se_mem_cnt, 655 u32 *task_offset) 656 { 657 struct rd_request *req = RD_REQ(task); 658 struct rd_dev *dev = req->rd_dev; 659 struct rd_dev_sg_table *table; 660 struct se_mem *se_mem; 661 struct scatterlist *sg_s; 662 u32 j = 0, set_offset = 1; 663 u32 get_next_table = 0, offset_length, table_sg_end; 664 665 table = rd_get_sg_table(dev, req->rd_page); 666 if (!(table)) 667 return -1; 668 669 table_sg_end = (table->page_end_offset - req->rd_page); 670 sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; 671 #ifdef DEBUG_RAMDISK_DR 672 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n", 673 (task->task_data_direction == DMA_TO_DEVICE) ? 674 "Write" : "Read", 675 task->task_lba, req->rd_size, req->rd_page, req->rd_offset); 676 #endif 677 while (req->rd_size) { 678 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 679 if (!(se_mem)) { 680 printk(KERN_ERR "Unable to allocate struct se_mem\n"); 681 return -1; 682 } 683 INIT_LIST_HEAD(&se_mem->se_list); 684 685 if (set_offset) { 686 offset_length = sg_s[j].length - req->rd_offset; 687 if (offset_length > req->rd_size) 688 offset_length = req->rd_size; 689 690 se_mem->se_page = sg_page(&sg_s[j++]); 691 se_mem->se_off = req->rd_offset; 692 se_mem->se_len = offset_length; 693 694 set_offset = 0; 695 get_next_table = (j > table_sg_end); 696 goto check_eot; 697 } 698 699 offset_length = (req->rd_size < req->rd_offset) ? 700 req->rd_size : req->rd_offset; 701 702 se_mem->se_page = sg_page(&sg_s[j]); 703 se_mem->se_len = offset_length; 704 705 set_offset = 1; 706 707 check_eot: 708 #ifdef DEBUG_RAMDISK_DR 709 printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u" 710 " se_mem: %p, se_page: %p se_off: %u se_len: %u\n", 711 req->rd_page, req->rd_size, offset_length, j, se_mem, 712 se_mem->se_page, se_mem->se_off, se_mem->se_len); 713 #endif 714 list_add_tail(&se_mem->se_list, se_mem_list); 715 (*se_mem_cnt)++; 716 717 req->rd_size -= offset_length; 718 if (!(req->rd_size)) 719 goto out; 720 721 if (!set_offset && !get_next_table) 722 continue; 723 724 if (++req->rd_page <= table->page_end_offset) { 725 #ifdef DEBUG_RAMDISK_DR 726 printk(KERN_INFO "page: %u in same page table\n", 727 req->rd_page); 728 #endif 729 continue; 730 } 731 #ifdef DEBUG_RAMDISK_DR 732 printk(KERN_INFO "getting new page table for page: %u\n", 733 req->rd_page); 734 #endif 735 table = rd_get_sg_table(dev, req->rd_page); 736 if (!(table)) 737 return -1; 738 739 sg_s = &table->sg_table[j = 0]; 740 } 741 742 out: 743 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; 744 #ifdef DEBUG_RAMDISK_DR 745 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 746 *se_mem_cnt); 747 #endif 748 return 0; 749 } 750 751 /* rd_DIRECT_without_offset(): 752 * 753 * 754 */ 755 static int rd_DIRECT_without_offset( 756 struct se_task *task, 757 struct list_head *se_mem_list, 758 u32 *se_mem_cnt, 759 u32 *task_offset) 760 { 761 struct rd_request *req = RD_REQ(task); 762 struct rd_dev *dev = req->rd_dev; 763 struct rd_dev_sg_table *table; 764 struct se_mem *se_mem; 765 struct scatterlist *sg_s; 766 u32 length, j = 0; 767 768 table = rd_get_sg_table(dev, req->rd_page); 769 if (!(table)) 770 return -1; 771 772 sg_s = &table->sg_table[req->rd_page - table->page_start_offset]; 773 #ifdef DEBUG_RAMDISK_DR 774 printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n", 775 (task->task_data_direction == DMA_TO_DEVICE) ? 776 "Write" : "Read", 777 task->task_lba, req->rd_size, req->rd_page); 778 #endif 779 while (req->rd_size) { 780 se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL); 781 if (!(se_mem)) { 782 printk(KERN_ERR "Unable to allocate struct se_mem\n"); 783 return -1; 784 } 785 INIT_LIST_HEAD(&se_mem->se_list); 786 787 length = (req->rd_size < sg_s[j].length) ? 788 req->rd_size : sg_s[j].length; 789 790 se_mem->se_page = sg_page(&sg_s[j++]); 791 se_mem->se_len = length; 792 793 #ifdef DEBUG_RAMDISK_DR 794 printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p," 795 " se_page: %p se_off: %u se_len: %u\n", req->rd_page, 796 req->rd_size, j, se_mem, se_mem->se_page, 797 se_mem->se_off, se_mem->se_len); 798 #endif 799 list_add_tail(&se_mem->se_list, se_mem_list); 800 (*se_mem_cnt)++; 801 802 req->rd_size -= length; 803 if (!(req->rd_size)) 804 goto out; 805 806 if (++req->rd_page <= table->page_end_offset) { 807 #ifdef DEBUG_RAMDISK_DR 808 printk("page: %u in same page table\n", 809 req->rd_page); 810 #endif 811 continue; 812 } 813 #ifdef DEBUG_RAMDISK_DR 814 printk(KERN_INFO "getting new page table for page: %u\n", 815 req->rd_page); 816 #endif 817 table = rd_get_sg_table(dev, req->rd_page); 818 if (!(table)) 819 return -1; 820 821 sg_s = &table->sg_table[j = 0]; 822 } 823 824 out: 825 T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt; 826 #ifdef DEBUG_RAMDISK_DR 827 printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n", 828 *se_mem_cnt); 829 #endif 830 return 0; 831 } 832 833 /* rd_DIRECT_do_se_mem_map(): 834 * 835 * 836 */ 837 static int rd_DIRECT_do_se_mem_map( 838 struct se_task *task, 839 struct list_head *se_mem_list, 840 void *in_mem, 841 struct se_mem *in_se_mem, 842 struct se_mem **out_se_mem, 843 u32 *se_mem_cnt, 844 u32 *task_offset_in) 845 { 846 struct se_cmd *cmd = task->task_se_cmd; 847 struct rd_request *req = RD_REQ(task); 848 u32 task_offset = *task_offset_in; 849 unsigned long long lba; 850 int ret; 851 852 req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) / 853 PAGE_SIZE); 854 lba = task->task_lba; 855 req->rd_offset = (do_div(lba, 856 (PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) * 857 DEV_ATTRIB(task->se_dev)->block_size; 858 req->rd_size = task->task_size; 859 860 if (req->rd_offset) 861 ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt, 862 task_offset_in); 863 else 864 ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt, 865 task_offset_in); 866 867 if (ret < 0) 868 return ret; 869 870 if (CMD_TFO(cmd)->task_sg_chaining == 0) 871 return 0; 872 /* 873 * Currently prevent writers from multiple HW fabrics doing 874 * pci_map_sg() to RD_DR's internal scatterlist memory. 875 */ 876 if (cmd->data_direction == DMA_TO_DEVICE) { 877 printk(KERN_ERR "DMA_TO_DEVICE not supported for" 878 " RAMDISK_DR with task_sg_chaining=1\n"); 879 return -1; 880 } 881 /* 882 * Special case for if task_sg_chaining is enabled, then 883 * we setup struct se_task->task_sg[], as it will be used by 884 * transport_do_task_sg_chain() for creating chainged SGLs 885 * across multiple struct se_task->task_sg[]. 886 */ 887 if (!(transport_calc_sg_num(task, 888 list_entry(T_TASK(cmd)->t_mem_list->next, 889 struct se_mem, se_list), 890 task_offset))) 891 return -1; 892 893 return transport_map_mem_to_sg(task, se_mem_list, task->task_sg, 894 list_entry(T_TASK(cmd)->t_mem_list->next, 895 struct se_mem, se_list), 896 out_se_mem, se_mem_cnt, task_offset_in); 897 } 898 899 /* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template) 900 * 901 * 902 */ 903 static int rd_DIRECT_do_task(struct se_task *task) 904 { 905 /* 906 * At this point the locally allocated RD tables have been mapped 907 * to struct se_mem elements in rd_DIRECT_do_se_mem_map(). 908 */ 909 task->task_scsi_status = GOOD; 910 transport_complete_task(task, 1); 911 912 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 913 } 914 915 /* rd_free_task(): (Part of se_subsystem_api_t template) 916 * 917 * 918 */ 919 static void rd_free_task(struct se_task *task) 920 { 921 kfree(RD_REQ(task)); 922 } 923 924 enum { 925 Opt_rd_pages, Opt_err 926 }; 927 928 static match_table_t tokens = { 929 {Opt_rd_pages, "rd_pages=%d"}, 930 {Opt_err, NULL} 931 }; 932 933 static ssize_t rd_set_configfs_dev_params( 934 struct se_hba *hba, 935 struct se_subsystem_dev *se_dev, 936 const char *page, 937 ssize_t count) 938 { 939 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 940 char *orig, *ptr, *opts; 941 substring_t args[MAX_OPT_ARGS]; 942 int ret = 0, arg, token; 943 944 opts = kstrdup(page, GFP_KERNEL); 945 if (!opts) 946 return -ENOMEM; 947 948 orig = opts; 949 950 while ((ptr = strsep(&opts, ",")) != NULL) { 951 if (!*ptr) 952 continue; 953 954 token = match_token(ptr, tokens, args); 955 switch (token) { 956 case Opt_rd_pages: 957 match_int(args, &arg); 958 rd_dev->rd_page_count = arg; 959 printk(KERN_INFO "RAMDISK: Referencing Page" 960 " Count: %u\n", rd_dev->rd_page_count); 961 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 962 break; 963 default: 964 break; 965 } 966 } 967 968 kfree(orig); 969 return (!ret) ? count : ret; 970 } 971 972 static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 973 { 974 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 975 976 if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) { 977 printk(KERN_INFO "Missing rd_pages= parameter\n"); 978 return -1; 979 } 980 981 return 0; 982 } 983 984 static ssize_t rd_show_configfs_dev_params( 985 struct se_hba *hba, 986 struct se_subsystem_dev *se_dev, 987 char *b) 988 { 989 struct rd_dev *rd_dev = se_dev->se_dev_su_ptr; 990 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n", 991 rd_dev->rd_dev_id, (rd_dev->rd_direct) ? 992 "rd_direct" : "rd_mcp"); 993 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 994 " SG_table_count: %u\n", rd_dev->rd_page_count, 995 PAGE_SIZE, rd_dev->sg_table_count); 996 return bl; 997 } 998 999 /* rd_get_cdb(): (Part of se_subsystem_api_t template) 1000 * 1001 * 1002 */ 1003 static unsigned char *rd_get_cdb(struct se_task *task) 1004 { 1005 struct rd_request *req = RD_REQ(task); 1006 1007 return req->rd_scsi_cdb; 1008 } 1009 1010 static u32 rd_get_device_rev(struct se_device *dev) 1011 { 1012 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 1013 } 1014 1015 static u32 rd_get_device_type(struct se_device *dev) 1016 { 1017 return TYPE_DISK; 1018 } 1019 1020 static sector_t rd_get_blocks(struct se_device *dev) 1021 { 1022 struct rd_dev *rd_dev = dev->dev_ptr; 1023 unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) / 1024 DEV_ATTRIB(dev)->block_size) - 1; 1025 1026 return blocks_long; 1027 } 1028 1029 static struct se_subsystem_api rd_dr_template = { 1030 .name = "rd_dr", 1031 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 1032 .attach_hba = rd_attach_hba, 1033 .detach_hba = rd_detach_hba, 1034 .allocate_virtdevice = rd_DIRECT_allocate_virtdevice, 1035 .create_virtdevice = rd_DIRECT_create_virtdevice, 1036 .free_device = rd_free_device, 1037 .alloc_task = rd_alloc_task, 1038 .do_task = rd_DIRECT_do_task, 1039 .free_task = rd_free_task, 1040 .check_configfs_dev_params = rd_check_configfs_dev_params, 1041 .set_configfs_dev_params = rd_set_configfs_dev_params, 1042 .show_configfs_dev_params = rd_show_configfs_dev_params, 1043 .get_cdb = rd_get_cdb, 1044 .get_device_rev = rd_get_device_rev, 1045 .get_device_type = rd_get_device_type, 1046 .get_blocks = rd_get_blocks, 1047 .do_se_mem_map = rd_DIRECT_do_se_mem_map, 1048 }; 1049 1050 static struct se_subsystem_api rd_mcp_template = { 1051 .name = "rd_mcp", 1052 .transport_type = TRANSPORT_PLUGIN_VHBA_VDEV, 1053 .attach_hba = rd_attach_hba, 1054 .detach_hba = rd_detach_hba, 1055 .allocate_virtdevice = rd_MEMCPY_allocate_virtdevice, 1056 .create_virtdevice = rd_MEMCPY_create_virtdevice, 1057 .free_device = rd_free_device, 1058 .alloc_task = rd_alloc_task, 1059 .do_task = rd_MEMCPY_do_task, 1060 .free_task = rd_free_task, 1061 .check_configfs_dev_params = rd_check_configfs_dev_params, 1062 .set_configfs_dev_params = rd_set_configfs_dev_params, 1063 .show_configfs_dev_params = rd_show_configfs_dev_params, 1064 .get_cdb = rd_get_cdb, 1065 .get_device_rev = rd_get_device_rev, 1066 .get_device_type = rd_get_device_type, 1067 .get_blocks = rd_get_blocks, 1068 }; 1069 1070 int __init rd_module_init(void) 1071 { 1072 int ret; 1073 1074 ret = transport_subsystem_register(&rd_dr_template); 1075 if (ret < 0) 1076 return ret; 1077 1078 ret = transport_subsystem_register(&rd_mcp_template); 1079 if (ret < 0) { 1080 transport_subsystem_release(&rd_dr_template); 1081 return ret; 1082 } 1083 1084 return 0; 1085 } 1086 1087 void rd_module_exit(void) 1088 { 1089 transport_subsystem_release(&rd_dr_template); 1090 transport_subsystem_release(&rd_mcp_template); 1091 } 1092