1 /******************************************************************************* 2 * Filename: target_core_file.c 3 * 4 * This file contains the Storage Engine <-> FILEIO transport specific functions 5 * 6 * Copyright (c) 2005 PyX Technologies, Inc. 7 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. 8 * Copyright (c) 2007-2010 Rising Tide Systems 9 * Copyright (c) 2008-2010 Linux-iSCSI.org 10 * 11 * Nicholas A. Bellinger <nab@kernel.org> 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 26 * 27 ******************************************************************************/ 28 29 #include <linux/version.h> 30 #include <linux/string.h> 31 #include <linux/parser.h> 32 #include <linux/timer.h> 33 #include <linux/blkdev.h> 34 #include <linux/slab.h> 35 #include <linux/spinlock.h> 36 #include <linux/smp_lock.h> 37 #include <scsi/scsi.h> 38 #include <scsi/scsi_host.h> 39 40 #include <target/target_core_base.h> 41 #include <target/target_core_device.h> 42 #include <target/target_core_transport.h> 43 44 #include "target_core_file.h" 45 46 #if 1 47 #define DEBUG_FD_CACHE(x...) printk(x) 48 #else 49 #define DEBUG_FD_CACHE(x...) 50 #endif 51 52 #if 1 53 #define DEBUG_FD_FUA(x...) printk(x) 54 #else 55 #define DEBUG_FD_FUA(x...) 56 #endif 57 58 static struct se_subsystem_api fileio_template; 59 60 /* fd_attach_hba(): (Part of se_subsystem_api_t template) 61 * 62 * 63 */ 64 static int fd_attach_hba(struct se_hba *hba, u32 host_id) 65 { 66 struct fd_host *fd_host; 67 68 fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL); 69 if (!(fd_host)) { 70 printk(KERN_ERR "Unable to allocate memory for struct fd_host\n"); 71 return -1; 72 } 73 74 fd_host->fd_host_id = host_id; 75 76 atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH); 77 atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH); 78 hba->hba_ptr = (void *) fd_host; 79 80 printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic" 81 " Target Core Stack %s\n", hba->hba_id, FD_VERSION, 82 TARGET_CORE_MOD_VERSION); 83 printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic" 84 " Target Core with TCQ Depth: %d MaxSectors: %u\n", 85 hba->hba_id, fd_host->fd_host_id, 86 atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS); 87 88 return 0; 89 } 90 91 static void fd_detach_hba(struct se_hba *hba) 92 { 93 struct fd_host *fd_host = hba->hba_ptr; 94 95 printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic" 96 " Target Core\n", hba->hba_id, fd_host->fd_host_id); 97 98 kfree(fd_host); 99 hba->hba_ptr = NULL; 100 } 101 102 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name) 103 { 104 struct fd_dev *fd_dev; 105 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 106 107 fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL); 108 if (!(fd_dev)) { 109 printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n"); 110 return NULL; 111 } 112 113 fd_dev->fd_host = fd_host; 114 115 printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name); 116 117 return fd_dev; 118 } 119 120 /* fd_create_virtdevice(): (Part of se_subsystem_api_t template) 121 * 122 * 123 */ 124 static struct se_device *fd_create_virtdevice( 125 struct se_hba *hba, 126 struct se_subsystem_dev *se_dev, 127 void *p) 128 { 129 char *dev_p = NULL; 130 struct se_device *dev; 131 struct se_dev_limits dev_limits; 132 struct queue_limits *limits; 133 struct fd_dev *fd_dev = (struct fd_dev *) p; 134 struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr; 135 mm_segment_t old_fs; 136 struct file *file; 137 struct inode *inode = NULL; 138 int dev_flags = 0, flags; 139 140 memset(&dev_limits, 0, sizeof(struct se_dev_limits)); 141 142 old_fs = get_fs(); 143 set_fs(get_ds()); 144 dev_p = getname(fd_dev->fd_dev_name); 145 set_fs(old_fs); 146 147 if (IS_ERR(dev_p)) { 148 printk(KERN_ERR "getname(%s) failed: %lu\n", 149 fd_dev->fd_dev_name, IS_ERR(dev_p)); 150 goto fail; 151 } 152 #if 0 153 if (di->no_create_file) 154 flags = O_RDWR | O_LARGEFILE; 155 else 156 flags = O_RDWR | O_CREAT | O_LARGEFILE; 157 #else 158 flags = O_RDWR | O_CREAT | O_LARGEFILE; 159 #endif 160 /* flags |= O_DIRECT; */ 161 /* 162 * If fd_buffered_io=1 has not been set explictly (the default), 163 * use O_SYNC to force FILEIO writes to disk. 164 */ 165 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) 166 flags |= O_SYNC; 167 168 file = filp_open(dev_p, flags, 0600); 169 170 if (IS_ERR(file) || !file || !file->f_dentry) { 171 printk(KERN_ERR "filp_open(%s) failed\n", dev_p); 172 goto fail; 173 } 174 fd_dev->fd_file = file; 175 /* 176 * If using a block backend with this struct file, we extract 177 * fd_dev->fd_[block,dev]_size from struct block_device. 178 * 179 * Otherwise, we use the passed fd_size= from configfs 180 */ 181 inode = file->f_mapping->host; 182 if (S_ISBLK(inode->i_mode)) { 183 struct request_queue *q; 184 /* 185 * Setup the local scope queue_limits from struct request_queue->limits 186 * to pass into transport_add_device_to_core_hba() as struct se_dev_limits. 187 */ 188 q = bdev_get_queue(inode->i_bdev); 189 limits = &dev_limits.limits; 190 limits->logical_block_size = bdev_logical_block_size(inode->i_bdev); 191 limits->max_hw_sectors = queue_max_hw_sectors(q); 192 limits->max_sectors = queue_max_sectors(q); 193 /* 194 * Determine the number of bytes from i_size_read() minus 195 * one (1) logical sector from underlying struct block_device 196 */ 197 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); 198 fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) - 199 fd_dev->fd_block_size); 200 201 printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct" 202 " block_device blocks: %llu logical_block_size: %d\n", 203 fd_dev->fd_dev_size, 204 div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size), 205 fd_dev->fd_block_size); 206 } else { 207 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) { 208 printk(KERN_ERR "FILEIO: Missing fd_dev_size=" 209 " parameter, and no backing struct" 210 " block_device\n"); 211 goto fail; 212 } 213 214 limits = &dev_limits.limits; 215 limits->logical_block_size = FD_BLOCKSIZE; 216 limits->max_hw_sectors = FD_MAX_SECTORS; 217 limits->max_sectors = FD_MAX_SECTORS; 218 fd_dev->fd_block_size = FD_BLOCKSIZE; 219 } 220 221 dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 222 dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH; 223 224 dev = transport_add_device_to_core_hba(hba, &fileio_template, 225 se_dev, dev_flags, (void *)fd_dev, 226 &dev_limits, "FILEIO", FD_VERSION); 227 if (!(dev)) 228 goto fail; 229 230 fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++; 231 fd_dev->fd_queue_depth = dev->queue_depth; 232 233 printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s," 234 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, 235 fd_dev->fd_dev_name, fd_dev->fd_dev_size); 236 237 putname(dev_p); 238 return dev; 239 fail: 240 if (fd_dev->fd_file) { 241 filp_close(fd_dev->fd_file, NULL); 242 fd_dev->fd_file = NULL; 243 } 244 putname(dev_p); 245 return NULL; 246 } 247 248 /* fd_free_device(): (Part of se_subsystem_api_t template) 249 * 250 * 251 */ 252 static void fd_free_device(void *p) 253 { 254 struct fd_dev *fd_dev = (struct fd_dev *) p; 255 256 if (fd_dev->fd_file) { 257 filp_close(fd_dev->fd_file, NULL); 258 fd_dev->fd_file = NULL; 259 } 260 261 kfree(fd_dev); 262 } 263 264 static inline struct fd_request *FILE_REQ(struct se_task *task) 265 { 266 return container_of(task, struct fd_request, fd_task); 267 } 268 269 270 static struct se_task * 271 fd_alloc_task(struct se_cmd *cmd) 272 { 273 struct fd_request *fd_req; 274 275 fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL); 276 if (!(fd_req)) { 277 printk(KERN_ERR "Unable to allocate struct fd_request\n"); 278 return NULL; 279 } 280 281 fd_req->fd_dev = SE_DEV(cmd)->dev_ptr; 282 283 return &fd_req->fd_task; 284 } 285 286 static int fd_do_readv(struct se_task *task) 287 { 288 struct fd_request *req = FILE_REQ(task); 289 struct file *fd = req->fd_dev->fd_file; 290 struct scatterlist *sg = task->task_sg; 291 struct iovec *iov; 292 mm_segment_t old_fs; 293 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); 294 int ret = 0, i; 295 296 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); 297 if (!(iov)) { 298 printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n"); 299 return -1; 300 } 301 302 for (i = 0; i < task->task_sg_num; i++) { 303 iov[i].iov_len = sg[i].length; 304 iov[i].iov_base = sg_virt(&sg[i]); 305 } 306 307 old_fs = get_fs(); 308 set_fs(get_ds()); 309 ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos); 310 set_fs(old_fs); 311 312 kfree(iov); 313 /* 314 * Return zeros and GOOD status even if the READ did not return 315 * the expected virt_size for struct file w/o a backing struct 316 * block_device. 317 */ 318 if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) { 319 if (ret < 0 || ret != task->task_size) { 320 printk(KERN_ERR "vfs_readv() returned %d," 321 " expecting %d for S_ISBLK\n", ret, 322 (int)task->task_size); 323 return -1; 324 } 325 } else { 326 if (ret < 0) { 327 printk(KERN_ERR "vfs_readv() returned %d for non" 328 " S_ISBLK\n", ret); 329 return -1; 330 } 331 } 332 333 return 1; 334 } 335 336 static int fd_do_writev(struct se_task *task) 337 { 338 struct fd_request *req = FILE_REQ(task); 339 struct file *fd = req->fd_dev->fd_file; 340 struct scatterlist *sg = task->task_sg; 341 struct iovec *iov; 342 mm_segment_t old_fs; 343 loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size); 344 int ret, i = 0; 345 346 iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL); 347 if (!(iov)) { 348 printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n"); 349 return -1; 350 } 351 352 for (i = 0; i < task->task_sg_num; i++) { 353 iov[i].iov_len = sg[i].length; 354 iov[i].iov_base = sg_virt(&sg[i]); 355 } 356 357 old_fs = get_fs(); 358 set_fs(get_ds()); 359 ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos); 360 set_fs(old_fs); 361 362 kfree(iov); 363 364 if (ret < 0 || ret != task->task_size) { 365 printk(KERN_ERR "vfs_writev() returned %d\n", ret); 366 return -1; 367 } 368 369 return 1; 370 } 371 372 static void fd_emulate_sync_cache(struct se_task *task) 373 { 374 struct se_cmd *cmd = TASK_CMD(task); 375 struct se_device *dev = cmd->se_dev; 376 struct fd_dev *fd_dev = dev->dev_ptr; 377 int immed = (cmd->t_task->t_task_cdb[1] & 0x2); 378 loff_t start, end; 379 int ret; 380 381 /* 382 * If the Immediate bit is set, queue up the GOOD response 383 * for this SYNCHRONIZE_CACHE op 384 */ 385 if (immed) 386 transport_complete_sync_cache(cmd, 1); 387 388 /* 389 * Determine if we will be flushing the entire device. 390 */ 391 if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) { 392 start = 0; 393 end = LLONG_MAX; 394 } else { 395 start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size; 396 if (cmd->data_length) 397 end = start + cmd->data_length; 398 else 399 end = LLONG_MAX; 400 } 401 402 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 403 if (ret != 0) 404 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); 405 406 if (!immed) 407 transport_complete_sync_cache(cmd, ret == 0); 408 } 409 410 /* 411 * Tell TCM Core that we are capable of WriteCache emulation for 412 * an underlying struct se_device. 413 */ 414 static int fd_emulated_write_cache(struct se_device *dev) 415 { 416 return 1; 417 } 418 419 static int fd_emulated_dpo(struct se_device *dev) 420 { 421 return 0; 422 } 423 /* 424 * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs 425 * for TYPE_DISK. 426 */ 427 static int fd_emulated_fua_write(struct se_device *dev) 428 { 429 return 1; 430 } 431 432 static int fd_emulated_fua_read(struct se_device *dev) 433 { 434 return 0; 435 } 436 437 /* 438 * WRITE Force Unit Access (FUA) emulation on a per struct se_task 439 * LBA range basis.. 440 */ 441 static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task) 442 { 443 struct se_device *dev = cmd->se_dev; 444 struct fd_dev *fd_dev = dev->dev_ptr; 445 loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size; 446 loff_t end = start + task->task_size; 447 int ret; 448 449 DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n", 450 task->task_lba, task->task_size); 451 452 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); 453 if (ret != 0) 454 printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret); 455 } 456 457 static int fd_do_task(struct se_task *task) 458 { 459 struct se_cmd *cmd = task->task_se_cmd; 460 struct se_device *dev = cmd->se_dev; 461 int ret = 0; 462 463 /* 464 * Call vectorized fileio functions to map struct scatterlist 465 * physical memory addresses to struct iovec virtual memory. 466 */ 467 if (task->task_data_direction == DMA_FROM_DEVICE) { 468 ret = fd_do_readv(task); 469 } else { 470 ret = fd_do_writev(task); 471 472 if (ret > 0 && 473 DEV_ATTRIB(dev)->emulate_write_cache > 0 && 474 DEV_ATTRIB(dev)->emulate_fua_write > 0 && 475 T_TASK(cmd)->t_tasks_fua) { 476 /* 477 * We might need to be a bit smarter here 478 * and return some sense data to let the initiator 479 * know the FUA WRITE cache sync failed..? 480 */ 481 fd_emulate_write_fua(cmd, task); 482 } 483 484 } 485 486 if (ret < 0) 487 return ret; 488 if (ret) { 489 task->task_scsi_status = GOOD; 490 transport_complete_task(task, 1); 491 } 492 return PYX_TRANSPORT_SENT_TO_TRANSPORT; 493 } 494 495 /* fd_free_task(): (Part of se_subsystem_api_t template) 496 * 497 * 498 */ 499 static void fd_free_task(struct se_task *task) 500 { 501 struct fd_request *req = FILE_REQ(task); 502 503 kfree(req); 504 } 505 506 enum { 507 Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err 508 }; 509 510 static match_table_t tokens = { 511 {Opt_fd_dev_name, "fd_dev_name=%s"}, 512 {Opt_fd_dev_size, "fd_dev_size=%s"}, 513 {Opt_fd_buffered_io, "fd_buffered_id=%d"}, 514 {Opt_err, NULL} 515 }; 516 517 static ssize_t fd_set_configfs_dev_params( 518 struct se_hba *hba, 519 struct se_subsystem_dev *se_dev, 520 const char *page, ssize_t count) 521 { 522 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 523 char *orig, *ptr, *arg_p, *opts; 524 substring_t args[MAX_OPT_ARGS]; 525 int ret = 0, arg, token; 526 527 opts = kstrdup(page, GFP_KERNEL); 528 if (!opts) 529 return -ENOMEM; 530 531 orig = opts; 532 533 while ((ptr = strsep(&opts, ",")) != NULL) { 534 if (!*ptr) 535 continue; 536 537 token = match_token(ptr, tokens, args); 538 switch (token) { 539 case Opt_fd_dev_name: 540 snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, 541 "%s", match_strdup(&args[0])); 542 printk(KERN_INFO "FILEIO: Referencing Path: %s\n", 543 fd_dev->fd_dev_name); 544 fd_dev->fbd_flags |= FBDF_HAS_PATH; 545 break; 546 case Opt_fd_dev_size: 547 arg_p = match_strdup(&args[0]); 548 ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size); 549 if (ret < 0) { 550 printk(KERN_ERR "strict_strtoull() failed for" 551 " fd_dev_size=\n"); 552 goto out; 553 } 554 printk(KERN_INFO "FILEIO: Referencing Size: %llu" 555 " bytes\n", fd_dev->fd_dev_size); 556 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 557 break; 558 case Opt_fd_buffered_io: 559 match_int(args, &arg); 560 if (arg != 1) { 561 printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg); 562 ret = -EINVAL; 563 goto out; 564 } 565 566 printk(KERN_INFO "FILEIO: Using buffered I/O" 567 " operations for struct fd_dev\n"); 568 569 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO; 570 break; 571 default: 572 break; 573 } 574 } 575 576 out: 577 kfree(orig); 578 return (!ret) ? count : ret; 579 } 580 581 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev) 582 { 583 struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr; 584 585 if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) { 586 printk(KERN_ERR "Missing fd_dev_name=\n"); 587 return -1; 588 } 589 590 return 0; 591 } 592 593 static ssize_t fd_show_configfs_dev_params( 594 struct se_hba *hba, 595 struct se_subsystem_dev *se_dev, 596 char *b) 597 { 598 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 599 ssize_t bl = 0; 600 601 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 602 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 603 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 604 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ? 605 "Buffered" : "Synchronous"); 606 return bl; 607 } 608 609 /* fd_get_cdb(): (Part of se_subsystem_api_t template) 610 * 611 * 612 */ 613 static unsigned char *fd_get_cdb(struct se_task *task) 614 { 615 struct fd_request *req = FILE_REQ(task); 616 617 return req->fd_scsi_cdb; 618 } 619 620 /* fd_get_device_rev(): (Part of se_subsystem_api_t template) 621 * 622 * 623 */ 624 static u32 fd_get_device_rev(struct se_device *dev) 625 { 626 return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ 627 } 628 629 /* fd_get_device_type(): (Part of se_subsystem_api_t template) 630 * 631 * 632 */ 633 static u32 fd_get_device_type(struct se_device *dev) 634 { 635 return TYPE_DISK; 636 } 637 638 static sector_t fd_get_blocks(struct se_device *dev) 639 { 640 struct fd_dev *fd_dev = dev->dev_ptr; 641 unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size, 642 DEV_ATTRIB(dev)->block_size); 643 644 return blocks_long; 645 } 646 647 static struct se_subsystem_api fileio_template = { 648 .name = "fileio", 649 .owner = THIS_MODULE, 650 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV, 651 .attach_hba = fd_attach_hba, 652 .detach_hba = fd_detach_hba, 653 .allocate_virtdevice = fd_allocate_virtdevice, 654 .create_virtdevice = fd_create_virtdevice, 655 .free_device = fd_free_device, 656 .dpo_emulated = fd_emulated_dpo, 657 .fua_write_emulated = fd_emulated_fua_write, 658 .fua_read_emulated = fd_emulated_fua_read, 659 .write_cache_emulated = fd_emulated_write_cache, 660 .alloc_task = fd_alloc_task, 661 .do_task = fd_do_task, 662 .do_sync_cache = fd_emulate_sync_cache, 663 .free_task = fd_free_task, 664 .check_configfs_dev_params = fd_check_configfs_dev_params, 665 .set_configfs_dev_params = fd_set_configfs_dev_params, 666 .show_configfs_dev_params = fd_show_configfs_dev_params, 667 .get_cdb = fd_get_cdb, 668 .get_device_rev = fd_get_device_rev, 669 .get_device_type = fd_get_device_type, 670 .get_blocks = fd_get_blocks, 671 }; 672 673 static int __init fileio_module_init(void) 674 { 675 return transport_subsystem_register(&fileio_template); 676 } 677 678 static void fileio_module_exit(void) 679 { 680 transport_subsystem_release(&fileio_template); 681 } 682 683 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin"); 684 MODULE_AUTHOR("nab@Linux-iSCSI.org"); 685 MODULE_LICENSE("GPL"); 686 687 module_init(fileio_module_init); 688 module_exit(fileio_module_exit); 689