1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 4 * Copyright (C) 2014 Red Hat, Inc. 5 * Copyright (C) 2015 Arrikto, Inc. 6 * Copyright (C) 2017 Chinamobile, Inc. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/timer.h> 13 #include <linux/parser.h> 14 #include <linux/vmalloc.h> 15 #include <linux/uio_driver.h> 16 #include <linux/xarray.h> 17 #include <linux/stringify.h> 18 #include <linux/bitops.h> 19 #include <linux/highmem.h> 20 #include <linux/configfs.h> 21 #include <linux/mutex.h> 22 #include <linux/workqueue.h> 23 #include <net/genetlink.h> 24 #include <scsi/scsi_common.h> 25 #include <scsi/scsi_proto.h> 26 #include <target/target_core_base.h> 27 #include <target/target_core_fabric.h> 28 #include <target/target_core_backend.h> 29 30 #include <linux/target_core_user.h> 31 32 /** 33 * DOC: Userspace I/O 34 * Userspace I/O 35 * ------------- 36 * 37 * Define a shared-memory interface for LIO to pass SCSI commands and 38 * data to userspace for processing. This is to allow backends that 39 * are too complex for in-kernel support to be possible. 40 * 41 * It uses the UIO framework to do a lot of the device-creation and 42 * introspection work for us. 43 * 44 * See the .h file for how the ring is laid out. Note that while the 45 * command ring is defined, the particulars of the data area are 46 * not. Offset values in the command entry point to other locations 47 * internal to the mmap-ed area. There is separate space outside the 48 * command ring for data buffers. This leaves maximum flexibility for 49 * moving buffer allocations, or even page flipping or other 50 * allocation techniques, without altering the command ring layout. 51 * 52 * SECURITY: 53 * The user process must be assumed to be malicious. There's no way to 54 * prevent it breaking the command ring protocol if it wants, but in 55 * order to prevent other issues we must only ever read *data* from 56 * the shared memory area, not offsets or sizes. This applies to 57 * command ring entries as well as the mailbox. Extra code needed for 58 * this may have a 'UAM' comment. 59 */ 60 61 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 62 63 /* For mailbox plus cmd ring, the size is fixed 8MB */ 64 #define MB_CMDR_SIZE (8 * 1024 * 1024) 65 /* Offset of cmd ring is size of mailbox */ 66 #define CMDR_OFF sizeof(struct tcmu_mailbox) 67 #define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF) 68 69 /* 70 * For data area, the default block size is PAGE_SIZE and 71 * the default total size is 256K * PAGE_SIZE. 72 */ 73 #define DATA_PAGES_PER_BLK 1 74 #define DATA_BLOCK_SIZE (DATA_PAGES_PER_BLK * PAGE_SIZE) 75 #define DATA_AREA_PAGES_DEF (256 * 1024) 76 77 #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) 78 #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) 79 80 /* 81 * Default number of global data blocks(512K * PAGE_SIZE) 82 * when the unmap thread will be started. 83 */ 84 #define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024) 85 86 static u8 tcmu_kern_cmd_reply_supported; 87 static u8 tcmu_netlink_blocked; 88 89 static struct device *tcmu_root_device; 90 91 struct tcmu_hba { 92 u32 host_id; 93 }; 94 95 #define TCMU_CONFIG_LEN 256 96 97 static DEFINE_MUTEX(tcmu_nl_cmd_mutex); 98 static LIST_HEAD(tcmu_nl_cmd_list); 99 100 struct tcmu_dev; 101 102 struct tcmu_nl_cmd { 103 /* wake up thread waiting for reply */ 104 struct completion complete; 105 struct list_head nl_list; 106 struct tcmu_dev *udev; 107 int cmd; 108 int status; 109 }; 110 111 struct tcmu_dev { 112 struct list_head node; 113 struct kref kref; 114 115 struct se_device se_dev; 116 struct se_dev_plug se_plug; 117 118 char *name; 119 struct se_hba *hba; 120 121 #define TCMU_DEV_BIT_OPEN 0 122 #define TCMU_DEV_BIT_BROKEN 1 123 #define TCMU_DEV_BIT_BLOCKED 2 124 #define TCMU_DEV_BIT_TMR_NOTIFY 3 125 #define TCM_DEV_BIT_PLUGGED 4 126 unsigned long flags; 127 128 struct uio_info uio_info; 129 130 struct inode *inode; 131 132 uint64_t dev_size; 133 134 struct tcmu_mailbox *mb_addr; 135 void *cmdr; 136 u32 cmdr_size; 137 u32 cmdr_last_cleaned; 138 /* Offset of data area from start of mb */ 139 /* Must add data_off and mb_addr to get the address */ 140 size_t data_off; 141 int data_area_mb; 142 uint32_t max_blocks; 143 size_t mmap_pages; 144 145 struct mutex cmdr_lock; 146 struct list_head qfull_queue; 147 struct list_head tmr_queue; 148 149 uint32_t dbi_max; 150 uint32_t dbi_thresh; 151 unsigned long *data_bitmap; 152 struct xarray data_pages; 153 154 struct xarray commands; 155 156 struct timer_list cmd_timer; 157 unsigned int cmd_time_out; 158 struct list_head inflight_queue; 159 160 struct timer_list qfull_timer; 161 int qfull_time_out; 162 163 struct list_head timedout_entry; 164 165 struct tcmu_nl_cmd curr_nl_cmd; 166 167 char dev_config[TCMU_CONFIG_LEN]; 168 169 int nl_reply_supported; 170 }; 171 172 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 173 174 struct tcmu_cmd { 175 struct se_cmd *se_cmd; 176 struct tcmu_dev *tcmu_dev; 177 struct list_head queue_entry; 178 179 uint16_t cmd_id; 180 181 /* Can't use se_cmd when cleaning up expired cmds, because if 182 cmd has been completed then accessing se_cmd is off limits */ 183 uint32_t dbi_cnt; 184 uint32_t dbi_bidi_cnt; 185 uint32_t dbi_cur; 186 uint32_t *dbi; 187 188 uint32_t data_len_bidi; 189 190 unsigned long deadline; 191 192 #define TCMU_CMD_BIT_EXPIRED 0 193 unsigned long flags; 194 }; 195 196 struct tcmu_tmr { 197 struct list_head queue_entry; 198 199 uint8_t tmr_type; 200 uint32_t tmr_cmd_cnt; 201 int16_t tmr_cmd_ids[]; 202 }; 203 204 /* 205 * To avoid dead lock the mutex lock order should always be: 206 * 207 * mutex_lock(&root_udev_mutex); 208 * ... 209 * mutex_lock(&tcmu_dev->cmdr_lock); 210 * mutex_unlock(&tcmu_dev->cmdr_lock); 211 * ... 212 * mutex_unlock(&root_udev_mutex); 213 */ 214 static DEFINE_MUTEX(root_udev_mutex); 215 static LIST_HEAD(root_udev); 216 217 static DEFINE_SPINLOCK(timed_out_udevs_lock); 218 static LIST_HEAD(timed_out_udevs); 219 220 static struct kmem_cache *tcmu_cmd_cache; 221 222 static atomic_t global_page_count = ATOMIC_INIT(0); 223 static struct delayed_work tcmu_unmap_work; 224 static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF; 225 226 static int tcmu_set_global_max_data_area(const char *str, 227 const struct kernel_param *kp) 228 { 229 int ret, max_area_mb; 230 231 ret = kstrtoint(str, 10, &max_area_mb); 232 if (ret) 233 return -EINVAL; 234 235 if (max_area_mb <= 0) { 236 pr_err("global_max_data_area must be larger than 0.\n"); 237 return -EINVAL; 238 } 239 240 tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb); 241 if (atomic_read(&global_page_count) > tcmu_global_max_pages) 242 schedule_delayed_work(&tcmu_unmap_work, 0); 243 else 244 cancel_delayed_work_sync(&tcmu_unmap_work); 245 246 return 0; 247 } 248 249 static int tcmu_get_global_max_data_area(char *buffer, 250 const struct kernel_param *kp) 251 { 252 return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); 253 } 254 255 static const struct kernel_param_ops tcmu_global_max_data_area_op = { 256 .set = tcmu_set_global_max_data_area, 257 .get = tcmu_get_global_max_data_area, 258 }; 259 260 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 261 S_IWUSR | S_IRUGO); 262 MODULE_PARM_DESC(global_max_data_area_mb, 263 "Max MBs allowed to be allocated to all the tcmu device's " 264 "data areas."); 265 266 static int tcmu_get_block_netlink(char *buffer, 267 const struct kernel_param *kp) 268 { 269 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? 270 "blocked" : "unblocked"); 271 } 272 273 static int tcmu_set_block_netlink(const char *str, 274 const struct kernel_param *kp) 275 { 276 int ret; 277 u8 val; 278 279 ret = kstrtou8(str, 0, &val); 280 if (ret < 0) 281 return ret; 282 283 if (val > 1) { 284 pr_err("Invalid block netlink value %u\n", val); 285 return -EINVAL; 286 } 287 288 tcmu_netlink_blocked = val; 289 return 0; 290 } 291 292 static const struct kernel_param_ops tcmu_block_netlink_op = { 293 .set = tcmu_set_block_netlink, 294 .get = tcmu_get_block_netlink, 295 }; 296 297 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); 298 MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); 299 300 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) 301 { 302 struct tcmu_dev *udev = nl_cmd->udev; 303 304 if (!tcmu_netlink_blocked) { 305 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); 306 return -EBUSY; 307 } 308 309 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 310 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); 311 nl_cmd->status = -EINTR; 312 list_del(&nl_cmd->nl_list); 313 complete(&nl_cmd->complete); 314 } 315 return 0; 316 } 317 318 static int tcmu_set_reset_netlink(const char *str, 319 const struct kernel_param *kp) 320 { 321 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; 322 int ret; 323 u8 val; 324 325 ret = kstrtou8(str, 0, &val); 326 if (ret < 0) 327 return ret; 328 329 if (val != 1) { 330 pr_err("Invalid reset netlink value %u\n", val); 331 return -EINVAL; 332 } 333 334 mutex_lock(&tcmu_nl_cmd_mutex); 335 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { 336 ret = tcmu_fail_netlink_cmd(nl_cmd); 337 if (ret) 338 break; 339 } 340 mutex_unlock(&tcmu_nl_cmd_mutex); 341 342 return ret; 343 } 344 345 static const struct kernel_param_ops tcmu_reset_netlink_op = { 346 .set = tcmu_set_reset_netlink, 347 }; 348 349 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); 350 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); 351 352 /* multicast group */ 353 enum tcmu_multicast_groups { 354 TCMU_MCGRP_CONFIG, 355 }; 356 357 static const struct genl_multicast_group tcmu_mcgrps[] = { 358 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 359 }; 360 361 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 362 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 363 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 364 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 365 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 366 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 367 }; 368 369 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 370 { 371 struct tcmu_dev *udev = NULL; 372 struct tcmu_nl_cmd *nl_cmd; 373 int dev_id, rc, ret = 0; 374 375 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 376 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 377 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 378 return -EINVAL; 379 } 380 381 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 382 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 383 384 mutex_lock(&tcmu_nl_cmd_mutex); 385 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { 386 if (nl_cmd->udev->se_dev.dev_index == dev_id) { 387 udev = nl_cmd->udev; 388 break; 389 } 390 } 391 392 if (!udev) { 393 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", 394 completed_cmd, rc, dev_id); 395 ret = -ENODEV; 396 goto unlock; 397 } 398 list_del(&nl_cmd->nl_list); 399 400 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", 401 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, 402 nl_cmd->status); 403 404 if (nl_cmd->cmd != completed_cmd) { 405 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", 406 udev->name, completed_cmd, nl_cmd->cmd); 407 ret = -EINVAL; 408 goto unlock; 409 } 410 411 nl_cmd->status = rc; 412 complete(&nl_cmd->complete); 413 unlock: 414 mutex_unlock(&tcmu_nl_cmd_mutex); 415 return ret; 416 } 417 418 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 419 { 420 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 421 } 422 423 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 424 { 425 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 426 } 427 428 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 429 struct genl_info *info) 430 { 431 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 432 } 433 434 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 435 { 436 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 437 tcmu_kern_cmd_reply_supported = 438 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 439 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 440 tcmu_kern_cmd_reply_supported); 441 } 442 443 return 0; 444 } 445 446 static const struct genl_small_ops tcmu_genl_ops[] = { 447 { 448 .cmd = TCMU_CMD_SET_FEATURES, 449 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 450 .flags = GENL_ADMIN_PERM, 451 .doit = tcmu_genl_set_features, 452 }, 453 { 454 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 455 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 456 .flags = GENL_ADMIN_PERM, 457 .doit = tcmu_genl_add_dev_done, 458 }, 459 { 460 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 461 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 462 .flags = GENL_ADMIN_PERM, 463 .doit = tcmu_genl_rm_dev_done, 464 }, 465 { 466 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 467 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 468 .flags = GENL_ADMIN_PERM, 469 .doit = tcmu_genl_reconfig_dev_done, 470 }, 471 }; 472 473 /* Our generic netlink family */ 474 static struct genl_family tcmu_genl_family __ro_after_init = { 475 .module = THIS_MODULE, 476 .hdrsize = 0, 477 .name = "TCM-USER", 478 .version = 2, 479 .maxattr = TCMU_ATTR_MAX, 480 .policy = tcmu_attr_policy, 481 .mcgrps = tcmu_mcgrps, 482 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 483 .netnsok = true, 484 .small_ops = tcmu_genl_ops, 485 .n_small_ops = ARRAY_SIZE(tcmu_genl_ops), 486 }; 487 488 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 489 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 490 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 491 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 492 493 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 494 { 495 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 496 uint32_t i; 497 498 for (i = 0; i < len; i++) 499 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 500 } 501 502 static inline int tcmu_get_empty_block(struct tcmu_dev *udev, 503 struct tcmu_cmd *tcmu_cmd, 504 int prev_dbi, int length, int *iov_cnt) 505 { 506 XA_STATE(xas, &udev->data_pages, 0); 507 struct page *page; 508 int i, cnt, dbi; 509 int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE); 510 511 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 512 if (dbi == udev->dbi_thresh) 513 return -1; 514 515 /* Count the number of already allocated pages */ 516 xas_set(&xas, dbi * DATA_PAGES_PER_BLK); 517 for (cnt = 0; xas_next(&xas) && cnt < page_cnt;) 518 cnt++; 519 520 for (i = cnt; i < page_cnt; i++) { 521 /* try to get new page from the mm */ 522 page = alloc_page(GFP_NOIO); 523 if (!page) 524 break; 525 526 if (xa_store(&udev->data_pages, dbi * DATA_PAGES_PER_BLK + i, 527 page, GFP_NOIO)) { 528 __free_page(page); 529 break; 530 } 531 } 532 if (atomic_add_return(i - cnt, &global_page_count) > 533 tcmu_global_max_pages) 534 schedule_delayed_work(&tcmu_unmap_work, 0); 535 536 if (i && dbi > udev->dbi_max) 537 udev->dbi_max = dbi; 538 539 set_bit(dbi, udev->data_bitmap); 540 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 541 542 if (dbi != prev_dbi + 1) 543 *iov_cnt += 1; 544 545 return i == page_cnt ? dbi : -1; 546 } 547 548 static int tcmu_get_empty_blocks(struct tcmu_dev *udev, 549 struct tcmu_cmd *tcmu_cmd, int length) 550 { 551 /* start value of dbi + 1 must not be a valid dbi */ 552 int dbi = -2; 553 int blk_len, iov_cnt = 0; 554 555 for (; length > 0; length -= DATA_BLOCK_SIZE) { 556 blk_len = min_t(int, length, DATA_BLOCK_SIZE); 557 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_len, &iov_cnt); 558 if (dbi < 0) 559 return -1; 560 } 561 return iov_cnt; 562 } 563 564 static inline struct page * 565 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dpi) 566 { 567 return xa_load(&udev->data_pages, dpi); 568 } 569 570 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 571 { 572 kfree(tcmu_cmd->dbi); 573 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 574 } 575 576 static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd) 577 { 578 int i, len; 579 struct se_cmd *se_cmd = cmd->se_cmd; 580 581 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 582 583 if (se_cmd->se_cmd_flags & SCF_BIDI) { 584 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 585 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) 586 len += se_cmd->t_bidi_data_sg[i].length; 587 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE); 588 cmd->dbi_cnt += cmd->dbi_bidi_cnt; 589 cmd->data_len_bidi = len; 590 } 591 } 592 593 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 594 struct iovec **iov, int prev_dbi, int len) 595 { 596 /* Get the next dbi */ 597 int dbi = tcmu_cmd_get_dbi(cmd); 598 599 /* Do not add more than DATA_BLOCK_SIZE to iov */ 600 if (len > DATA_BLOCK_SIZE) 601 len = DATA_BLOCK_SIZE; 602 603 /* 604 * The following code will gather and map the blocks to the same iovec 605 * when the blocks are all next to each other. 606 */ 607 if (dbi != prev_dbi + 1) { 608 /* dbi is not next to previous dbi, so start new iov */ 609 if (prev_dbi >= 0) 610 (*iov)++; 611 /* write offset relative to mb_addr */ 612 (*iov)->iov_base = (void __user *) 613 (udev->data_off + dbi * DATA_BLOCK_SIZE); 614 } 615 (*iov)->iov_len += len; 616 617 return dbi; 618 } 619 620 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 621 struct iovec **iov, int data_length) 622 { 623 /* start value of dbi + 1 must not be a valid dbi */ 624 int dbi = -2; 625 626 /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */ 627 for (; data_length > 0; data_length -= DATA_BLOCK_SIZE) 628 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); 629 } 630 631 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 632 { 633 struct se_device *se_dev = se_cmd->se_dev; 634 struct tcmu_dev *udev = TCMU_DEV(se_dev); 635 struct tcmu_cmd *tcmu_cmd; 636 637 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); 638 if (!tcmu_cmd) 639 return NULL; 640 641 INIT_LIST_HEAD(&tcmu_cmd->queue_entry); 642 tcmu_cmd->se_cmd = se_cmd; 643 tcmu_cmd->tcmu_dev = udev; 644 645 tcmu_cmd_set_block_cnts(tcmu_cmd); 646 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 647 GFP_NOIO); 648 if (!tcmu_cmd->dbi) { 649 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 650 return NULL; 651 } 652 653 return tcmu_cmd; 654 } 655 656 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 657 { 658 unsigned long offset = offset_in_page(vaddr); 659 void *start = vaddr - offset; 660 661 size = round_up(size+offset, PAGE_SIZE); 662 663 while (size) { 664 flush_dcache_page(vmalloc_to_page(start)); 665 start += PAGE_SIZE; 666 size -= PAGE_SIZE; 667 } 668 } 669 670 /* 671 * Some ring helper functions. We don't assume size is a power of 2 so 672 * we can't use circ_buf.h. 673 */ 674 static inline size_t spc_used(size_t head, size_t tail, size_t size) 675 { 676 int diff = head - tail; 677 678 if (diff >= 0) 679 return diff; 680 else 681 return size + diff; 682 } 683 684 static inline size_t spc_free(size_t head, size_t tail, size_t size) 685 { 686 /* Keep 1 byte unused or we can't tell full from empty */ 687 return (size - spc_used(head, tail, size) - 1); 688 } 689 690 static inline size_t head_to_end(size_t head, size_t size) 691 { 692 return size - head; 693 } 694 695 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 696 697 #define TCMU_SG_TO_DATA_AREA 1 698 #define TCMU_DATA_AREA_TO_SG 2 699 700 static inline void tcmu_copy_data(struct tcmu_dev *udev, 701 struct tcmu_cmd *tcmu_cmd, uint32_t direction, 702 struct scatterlist *sg, unsigned int sg_nents, 703 struct iovec **iov, size_t data_len) 704 { 705 XA_STATE(xas, &udev->data_pages, 0); 706 /* start value of dbi + 1 must not be a valid dbi */ 707 int dbi = -2; 708 size_t page_remaining, cp_len; 709 int page_cnt, page_inx; 710 struct sg_mapping_iter sg_iter; 711 unsigned int sg_flags; 712 struct page *page; 713 void *data_page_start, *data_addr; 714 715 if (direction == TCMU_SG_TO_DATA_AREA) 716 sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG; 717 else 718 sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 719 sg_miter_start(&sg_iter, sg, sg_nents, sg_flags); 720 721 while (data_len) { 722 if (direction == TCMU_SG_TO_DATA_AREA) 723 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, 724 data_len); 725 else 726 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 727 728 page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE); 729 if (page_cnt > DATA_PAGES_PER_BLK) 730 page_cnt = DATA_PAGES_PER_BLK; 731 732 xas_set(&xas, dbi * DATA_PAGES_PER_BLK); 733 for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) { 734 page = xas_next(&xas); 735 736 if (direction == TCMU_DATA_AREA_TO_SG) 737 flush_dcache_page(page); 738 data_page_start = kmap_atomic(page); 739 page_remaining = PAGE_SIZE; 740 741 while (page_remaining && data_len) { 742 if (!sg_miter_next(&sg_iter)) { 743 /* set length to 0 to abort outer loop */ 744 data_len = 0; 745 pr_debug("%s: aborting data copy due to exhausted sg_list\n", 746 __func__); 747 break; 748 } 749 cp_len = min3(sg_iter.length, page_remaining, 750 data_len); 751 752 data_addr = data_page_start + 753 PAGE_SIZE - page_remaining; 754 if (direction == TCMU_SG_TO_DATA_AREA) 755 memcpy(data_addr, sg_iter.addr, cp_len); 756 else 757 memcpy(sg_iter.addr, data_addr, cp_len); 758 759 data_len -= cp_len; 760 page_remaining -= cp_len; 761 sg_iter.consumed = cp_len; 762 } 763 sg_miter_stop(&sg_iter); 764 765 kunmap_atomic(data_page_start); 766 if (direction == TCMU_SG_TO_DATA_AREA) 767 flush_dcache_page(page); 768 } 769 } 770 } 771 772 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 773 struct iovec **iov) 774 { 775 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 776 777 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, 778 se_cmd->t_data_nents, iov, se_cmd->data_length); 779 } 780 781 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 782 bool bidi, uint32_t read_len) 783 { 784 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 785 struct scatterlist *data_sg; 786 unsigned int data_nents; 787 788 if (!bidi) { 789 data_sg = se_cmd->t_data_sg; 790 data_nents = se_cmd->t_data_nents; 791 } else { 792 /* 793 * For bidi case, the first count blocks are for Data-Out 794 * buffer blocks, and before gathering the Data-In buffer 795 * the Data-Out buffer blocks should be skipped. 796 */ 797 tcmu_cmd_set_dbi_cur(tcmu_cmd, 798 tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt); 799 800 data_sg = se_cmd->t_bidi_data_sg; 801 data_nents = se_cmd->t_bidi_data_nents; 802 } 803 804 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, 805 data_nents, NULL, read_len); 806 } 807 808 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 809 { 810 return thresh - bitmap_weight(bitmap, thresh); 811 } 812 813 /* 814 * We can't queue a command until we have space available on the cmd ring. 815 * 816 * Called with ring lock held. 817 */ 818 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) 819 { 820 struct tcmu_mailbox *mb = udev->mb_addr; 821 size_t space, cmd_needed; 822 u32 cmd_head; 823 824 tcmu_flush_dcache_range(mb, sizeof(*mb)); 825 826 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 827 828 /* 829 * If cmd end-of-ring space is too small then we need space for a NOP plus 830 * original cmd - cmds are internally contiguous. 831 */ 832 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 833 cmd_needed = cmd_size; 834 else 835 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 836 837 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 838 if (space < cmd_needed) { 839 pr_debug("no cmd space: %u %u %u\n", cmd_head, 840 udev->cmdr_last_cleaned, udev->cmdr_size); 841 return false; 842 } 843 return true; 844 } 845 846 /* 847 * We have to allocate data buffers before we can queue a command. 848 * Returns -1 on error (not enough space) or number of needed iovs on success 849 * 850 * Called with ring lock held. 851 */ 852 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 853 int *iov_bidi_cnt) 854 { 855 int space, iov_cnt = 0, ret = 0; 856 857 if (!cmd->dbi_cnt) 858 goto wr_iov_cnts; 859 860 /* try to check and get the data blocks as needed */ 861 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 862 if (space < cmd->dbi_cnt) { 863 unsigned long blocks_left = 864 (udev->max_blocks - udev->dbi_thresh) + space; 865 866 if (blocks_left < cmd->dbi_cnt) { 867 pr_debug("no data space: only %lu available, but ask for %lu\n", 868 blocks_left * DATA_BLOCK_SIZE, 869 cmd->dbi_cnt * DATA_BLOCK_SIZE); 870 return -1; 871 } 872 873 udev->dbi_thresh += cmd->dbi_cnt; 874 if (udev->dbi_thresh > udev->max_blocks) 875 udev->dbi_thresh = udev->max_blocks; 876 } 877 878 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); 879 if (iov_cnt < 0) 880 return -1; 881 882 if (cmd->dbi_bidi_cnt) { 883 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); 884 if (ret < 0) 885 return -1; 886 } 887 wr_iov_cnts: 888 *iov_bidi_cnt = ret; 889 return iov_cnt + ret; 890 } 891 892 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 893 { 894 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 895 sizeof(struct tcmu_cmd_entry)); 896 } 897 898 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 899 size_t base_command_size) 900 { 901 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 902 size_t command_size; 903 904 command_size = base_command_size + 905 round_up(scsi_command_size(se_cmd->t_task_cdb), 906 TCMU_OP_ALIGN_SIZE); 907 908 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 909 910 return command_size; 911 } 912 913 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 914 struct timer_list *timer) 915 { 916 if (!tmo) 917 return; 918 919 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 920 if (!timer_pending(timer)) 921 mod_timer(timer, tcmu_cmd->deadline); 922 923 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, 924 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); 925 } 926 927 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 928 { 929 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 930 unsigned int tmo; 931 932 /* 933 * For backwards compat if qfull_time_out is not set use 934 * cmd_time_out and if that's not set use the default time out. 935 */ 936 if (!udev->qfull_time_out) 937 return -ETIMEDOUT; 938 else if (udev->qfull_time_out > 0) 939 tmo = udev->qfull_time_out; 940 else if (udev->cmd_time_out) 941 tmo = udev->cmd_time_out; 942 else 943 tmo = TCMU_TIME_OUT; 944 945 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 946 947 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 948 pr_debug("adding cmd %p on dev %s to ring space wait queue\n", 949 tcmu_cmd, udev->name); 950 return 0; 951 } 952 953 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) 954 { 955 struct tcmu_cmd_entry_hdr *hdr; 956 struct tcmu_mailbox *mb = udev->mb_addr; 957 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 958 959 /* Insert a PAD if end-of-ring space is too small */ 960 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { 961 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 962 963 hdr = udev->cmdr + cmd_head; 964 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); 965 tcmu_hdr_set_len(&hdr->len_op, pad_size); 966 hdr->cmd_id = 0; /* not used for PAD */ 967 hdr->kflags = 0; 968 hdr->uflags = 0; 969 tcmu_flush_dcache_range(hdr, sizeof(*hdr)); 970 971 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 972 tcmu_flush_dcache_range(mb, sizeof(*mb)); 973 974 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 975 WARN_ON(cmd_head != 0); 976 } 977 978 return cmd_head; 979 } 980 981 static void tcmu_unplug_device(struct se_dev_plug *se_plug) 982 { 983 struct se_device *se_dev = se_plug->se_dev; 984 struct tcmu_dev *udev = TCMU_DEV(se_dev); 985 986 clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags); 987 uio_event_notify(&udev->uio_info); 988 } 989 990 static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev) 991 { 992 struct tcmu_dev *udev = TCMU_DEV(se_dev); 993 994 if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) 995 return &udev->se_plug; 996 997 return NULL; 998 } 999 1000 /** 1001 * queue_cmd_ring - queue cmd to ring or internally 1002 * @tcmu_cmd: cmd to queue 1003 * @scsi_err: TCM error code if failure (-1) returned. 1004 * 1005 * Returns: 1006 * -1 we cannot queue internally or to the ring. 1007 * 0 success 1008 * 1 internally queued to wait for ring memory to free. 1009 */ 1010 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) 1011 { 1012 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 1013 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 1014 size_t base_command_size, command_size; 1015 struct tcmu_mailbox *mb = udev->mb_addr; 1016 struct tcmu_cmd_entry *entry; 1017 struct iovec *iov; 1018 int iov_cnt, iov_bidi_cnt; 1019 uint32_t cmd_id, cmd_head; 1020 uint64_t cdb_off; 1021 /* size of data buffer needed */ 1022 size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE; 1023 1024 *scsi_err = TCM_NO_SENSE; 1025 1026 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 1027 *scsi_err = TCM_LUN_BUSY; 1028 return -1; 1029 } 1030 1031 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1032 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1033 return -1; 1034 } 1035 1036 if (!list_empty(&udev->qfull_queue)) 1037 goto queue; 1038 1039 if (data_length > udev->max_blocks * DATA_BLOCK_SIZE) { 1040 pr_warn("TCMU: Request of size %zu is too big for %zu data area\n", 1041 data_length, udev->max_blocks * DATA_BLOCK_SIZE); 1042 *scsi_err = TCM_INVALID_CDB_FIELD; 1043 return -1; 1044 } 1045 1046 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); 1047 if (iov_cnt < 0) 1048 goto free_and_queue; 1049 1050 /* 1051 * Must be a certain minimum size for response sense info, but 1052 * also may be larger if the iov array is large. 1053 */ 1054 base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt); 1055 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1056 1057 if (command_size > (udev->cmdr_size / 2)) { 1058 pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n", 1059 command_size, udev->cmdr_size); 1060 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1061 *scsi_err = TCM_INVALID_CDB_FIELD; 1062 return -1; 1063 } 1064 1065 if (!is_ring_space_avail(udev, command_size)) 1066 /* 1067 * Don't leave commands partially setup because the unmap 1068 * thread might need the blocks to make forward progress. 1069 */ 1070 goto free_and_queue; 1071 1072 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), 1073 GFP_NOWAIT) < 0) { 1074 pr_err("tcmu: Could not allocate cmd id.\n"); 1075 1076 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1077 *scsi_err = TCM_OUT_OF_RESOURCES; 1078 return -1; 1079 } 1080 tcmu_cmd->cmd_id = cmd_id; 1081 1082 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, 1083 tcmu_cmd, udev->name); 1084 1085 cmd_head = ring_insert_padding(udev, command_size); 1086 1087 entry = udev->cmdr + cmd_head; 1088 memset(entry, 0, command_size); 1089 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 1090 1091 /* prepare iov list and copy data to data area if necessary */ 1092 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1093 iov = &entry->req.iov[0]; 1094 1095 if (se_cmd->data_direction == DMA_TO_DEVICE || 1096 se_cmd->se_cmd_flags & SCF_BIDI) 1097 scatter_data_area(udev, tcmu_cmd, &iov); 1098 else 1099 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); 1100 1101 entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; 1102 1103 /* Handle BIDI commands */ 1104 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1105 iov++; 1106 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); 1107 entry->req.iov_bidi_cnt = iov_bidi_cnt; 1108 } 1109 1110 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); 1111 1112 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 1113 1114 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 1115 1116 /* All offsets relative to mb_addr, not start of entry! */ 1117 cdb_off = CMDR_OFF + cmd_head + base_command_size; 1118 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 1119 entry->req.cdb_off = cdb_off; 1120 tcmu_flush_dcache_range(entry, command_size); 1121 1122 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1123 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1124 1125 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); 1126 1127 if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) 1128 uio_event_notify(&udev->uio_info); 1129 1130 return 0; 1131 1132 free_and_queue: 1133 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1134 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1135 1136 queue: 1137 if (add_to_qfull_queue(tcmu_cmd)) { 1138 *scsi_err = TCM_OUT_OF_RESOURCES; 1139 return -1; 1140 } 1141 1142 return 1; 1143 } 1144 1145 /** 1146 * queue_tmr_ring - queue tmr info to ring or internally 1147 * @udev: related tcmu_dev 1148 * @tmr: tcmu_tmr containing tmr info to queue 1149 * 1150 * Returns: 1151 * 0 success 1152 * 1 internally queued to wait for ring memory to free. 1153 */ 1154 static int 1155 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) 1156 { 1157 struct tcmu_tmr_entry *entry; 1158 int cmd_size; 1159 int id_list_sz; 1160 struct tcmu_mailbox *mb = udev->mb_addr; 1161 uint32_t cmd_head; 1162 1163 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 1164 goto out_free; 1165 1166 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; 1167 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE); 1168 1169 if (!list_empty(&udev->tmr_queue) || 1170 !is_ring_space_avail(udev, cmd_size)) { 1171 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); 1172 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n", 1173 tmr, udev->name); 1174 return 1; 1175 } 1176 1177 cmd_head = ring_insert_padding(udev, cmd_size); 1178 1179 entry = udev->cmdr + cmd_head; 1180 memset(entry, 0, cmd_size); 1181 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); 1182 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); 1183 entry->tmr_type = tmr->tmr_type; 1184 entry->cmd_cnt = tmr->tmr_cmd_cnt; 1185 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); 1186 tcmu_flush_dcache_range(entry, cmd_size); 1187 1188 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); 1189 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1190 1191 uio_event_notify(&udev->uio_info); 1192 1193 out_free: 1194 kfree(tmr); 1195 1196 return 0; 1197 } 1198 1199 static sense_reason_t 1200 tcmu_queue_cmd(struct se_cmd *se_cmd) 1201 { 1202 struct se_device *se_dev = se_cmd->se_dev; 1203 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1204 struct tcmu_cmd *tcmu_cmd; 1205 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD; 1206 int ret = -1; 1207 1208 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 1209 if (!tcmu_cmd) 1210 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1211 1212 mutex_lock(&udev->cmdr_lock); 1213 if (!(se_cmd->transport_state & CMD_T_ABORTED)) 1214 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1215 if (ret < 0) 1216 tcmu_free_cmd(tcmu_cmd); 1217 else 1218 se_cmd->priv = tcmu_cmd; 1219 mutex_unlock(&udev->cmdr_lock); 1220 return scsi_ret; 1221 } 1222 1223 static void tcmu_set_next_deadline(struct list_head *queue, 1224 struct timer_list *timer) 1225 { 1226 struct tcmu_cmd *cmd; 1227 1228 if (!list_empty(queue)) { 1229 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); 1230 mod_timer(timer, cmd->deadline); 1231 } else 1232 del_timer(timer); 1233 } 1234 1235 static int 1236 tcmu_tmr_type(enum tcm_tmreq_table tmf) 1237 { 1238 switch (tmf) { 1239 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK; 1240 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET; 1241 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA; 1242 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET; 1243 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET; 1244 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET; 1245 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET; 1246 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO; 1247 default: return TCMU_TMR_UNKNOWN; 1248 } 1249 } 1250 1251 static void 1252 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, 1253 struct list_head *cmd_list) 1254 { 1255 int i = 0, cmd_cnt = 0; 1256 bool unqueued = false; 1257 uint16_t *cmd_ids = NULL; 1258 struct tcmu_cmd *cmd; 1259 struct se_cmd *se_cmd; 1260 struct tcmu_tmr *tmr; 1261 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1262 1263 mutex_lock(&udev->cmdr_lock); 1264 1265 /* First we check for aborted commands in qfull_queue */ 1266 list_for_each_entry(se_cmd, cmd_list, state_list) { 1267 i++; 1268 if (!se_cmd->priv) 1269 continue; 1270 cmd = se_cmd->priv; 1271 /* Commands on qfull queue have no id yet */ 1272 if (cmd->cmd_id) { 1273 cmd_cnt++; 1274 continue; 1275 } 1276 pr_debug("Removing aborted command %p from queue on dev %s.\n", 1277 cmd, udev->name); 1278 1279 list_del_init(&cmd->queue_entry); 1280 tcmu_free_cmd(cmd); 1281 se_cmd->priv = NULL; 1282 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); 1283 unqueued = true; 1284 } 1285 if (unqueued) 1286 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1287 1288 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) 1289 goto unlock; 1290 1291 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", 1292 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); 1293 1294 tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO); 1295 if (!tmr) 1296 goto unlock; 1297 1298 tmr->tmr_type = tcmu_tmr_type(tmf); 1299 tmr->tmr_cmd_cnt = cmd_cnt; 1300 1301 if (cmd_cnt != 0) { 1302 cmd_cnt = 0; 1303 list_for_each_entry(se_cmd, cmd_list, state_list) { 1304 if (!se_cmd->priv) 1305 continue; 1306 cmd = se_cmd->priv; 1307 if (cmd->cmd_id) 1308 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; 1309 } 1310 } 1311 1312 queue_tmr_ring(udev, tmr); 1313 1314 unlock: 1315 mutex_unlock(&udev->cmdr_lock); 1316 } 1317 1318 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 1319 { 1320 struct se_cmd *se_cmd = cmd->se_cmd; 1321 struct tcmu_dev *udev = cmd->tcmu_dev; 1322 bool read_len_valid = false; 1323 uint32_t read_len; 1324 1325 /* 1326 * cmd has been completed already from timeout, just reclaim 1327 * data area space and free cmd 1328 */ 1329 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1330 WARN_ON_ONCE(se_cmd); 1331 goto out; 1332 } 1333 1334 list_del_init(&cmd->queue_entry); 1335 1336 tcmu_cmd_reset_dbi_cur(cmd); 1337 1338 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1339 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1340 cmd->se_cmd); 1341 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1342 goto done; 1343 } 1344 1345 read_len = se_cmd->data_length; 1346 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1347 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1348 read_len_valid = true; 1349 if (entry->rsp.read_len < read_len) 1350 read_len = entry->rsp.read_len; 1351 } 1352 1353 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1354 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1355 if (!read_len_valid ) 1356 goto done; 1357 else 1358 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 1359 } 1360 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1361 /* Get Data-In buffer before clean up */ 1362 gather_data_area(udev, cmd, true, read_len); 1363 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1364 gather_data_area(udev, cmd, false, read_len); 1365 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1366 /* TODO: */ 1367 } else if (se_cmd->data_direction != DMA_NONE) { 1368 pr_warn("TCMU: data direction was %d!\n", 1369 se_cmd->data_direction); 1370 } 1371 1372 done: 1373 se_cmd->priv = NULL; 1374 if (read_len_valid) { 1375 pr_debug("read_len = %d\n", read_len); 1376 target_complete_cmd_with_length(cmd->se_cmd, 1377 entry->rsp.scsi_status, read_len); 1378 } else 1379 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1380 1381 out: 1382 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1383 tcmu_free_cmd(cmd); 1384 } 1385 1386 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) 1387 { 1388 struct tcmu_tmr *tmr, *tmp; 1389 LIST_HEAD(tmrs); 1390 1391 if (list_empty(&udev->tmr_queue)) 1392 return 1; 1393 1394 pr_debug("running %s's tmr queue\n", udev->name); 1395 1396 list_splice_init(&udev->tmr_queue, &tmrs); 1397 1398 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) { 1399 list_del_init(&tmr->queue_entry); 1400 1401 pr_debug("removing tmr %p on dev %s from queue\n", 1402 tmr, udev->name); 1403 1404 if (queue_tmr_ring(udev, tmr)) { 1405 pr_debug("ran out of space during tmr queue run\n"); 1406 /* 1407 * tmr was requeued, so just put all tmrs back in 1408 * the queue 1409 */ 1410 list_splice_tail(&tmrs, &udev->tmr_queue); 1411 return 0; 1412 } 1413 } 1414 1415 return 1; 1416 } 1417 1418 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1419 { 1420 struct tcmu_mailbox *mb; 1421 struct tcmu_cmd *cmd; 1422 bool free_space = false; 1423 1424 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1425 pr_err("ring broken, not handling completions\n"); 1426 return 0; 1427 } 1428 1429 mb = udev->mb_addr; 1430 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1431 1432 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1433 1434 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; 1435 1436 /* 1437 * Flush max. up to end of cmd ring since current entry might 1438 * be a padding that is shorter than sizeof(*entry) 1439 */ 1440 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, 1441 udev->cmdr_size); 1442 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? 1443 ring_left : sizeof(*entry)); 1444 1445 free_space = true; 1446 1447 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || 1448 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { 1449 UPDATE_HEAD(udev->cmdr_last_cleaned, 1450 tcmu_hdr_get_len(entry->hdr.len_op), 1451 udev->cmdr_size); 1452 continue; 1453 } 1454 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1455 1456 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); 1457 if (!cmd) { 1458 pr_err("cmd_id %u not found, ring is broken\n", 1459 entry->hdr.cmd_id); 1460 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1461 break; 1462 } 1463 1464 tcmu_handle_completion(cmd, entry); 1465 1466 UPDATE_HEAD(udev->cmdr_last_cleaned, 1467 tcmu_hdr_get_len(entry->hdr.len_op), 1468 udev->cmdr_size); 1469 } 1470 if (free_space) 1471 free_space = tcmu_run_tmr_queue(udev); 1472 1473 if (atomic_read(&global_page_count) > tcmu_global_max_pages && 1474 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { 1475 /* 1476 * Allocated blocks exceeded global block limit, currently no 1477 * more pending or waiting commands so try to reclaim blocks. 1478 */ 1479 schedule_delayed_work(&tcmu_unmap_work, 0); 1480 } 1481 if (udev->cmd_time_out) 1482 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 1483 1484 return free_space; 1485 } 1486 1487 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) 1488 { 1489 struct se_cmd *se_cmd; 1490 1491 if (!time_after_eq(jiffies, cmd->deadline)) 1492 return; 1493 1494 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1495 list_del_init(&cmd->queue_entry); 1496 se_cmd = cmd->se_cmd; 1497 se_cmd->priv = NULL; 1498 cmd->se_cmd = NULL; 1499 1500 pr_debug("Timing out inflight cmd %u on dev %s.\n", 1501 cmd->cmd_id, cmd->tcmu_dev->name); 1502 1503 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); 1504 } 1505 1506 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) 1507 { 1508 struct se_cmd *se_cmd; 1509 1510 if (!time_after_eq(jiffies, cmd->deadline)) 1511 return; 1512 1513 pr_debug("Timing out queued cmd %p on dev %s.\n", 1514 cmd, cmd->tcmu_dev->name); 1515 1516 list_del_init(&cmd->queue_entry); 1517 se_cmd = cmd->se_cmd; 1518 tcmu_free_cmd(cmd); 1519 1520 se_cmd->priv = NULL; 1521 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); 1522 } 1523 1524 static void tcmu_device_timedout(struct tcmu_dev *udev) 1525 { 1526 spin_lock(&timed_out_udevs_lock); 1527 if (list_empty(&udev->timedout_entry)) 1528 list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1529 spin_unlock(&timed_out_udevs_lock); 1530 1531 schedule_delayed_work(&tcmu_unmap_work, 0); 1532 } 1533 1534 static void tcmu_cmd_timedout(struct timer_list *t) 1535 { 1536 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 1537 1538 pr_debug("%s cmd timeout has expired\n", udev->name); 1539 tcmu_device_timedout(udev); 1540 } 1541 1542 static void tcmu_qfull_timedout(struct timer_list *t) 1543 { 1544 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 1545 1546 pr_debug("%s qfull timeout has expired\n", udev->name); 1547 tcmu_device_timedout(udev); 1548 } 1549 1550 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1551 { 1552 struct tcmu_hba *tcmu_hba; 1553 1554 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1555 if (!tcmu_hba) 1556 return -ENOMEM; 1557 1558 tcmu_hba->host_id = host_id; 1559 hba->hba_ptr = tcmu_hba; 1560 1561 return 0; 1562 } 1563 1564 static void tcmu_detach_hba(struct se_hba *hba) 1565 { 1566 kfree(hba->hba_ptr); 1567 hba->hba_ptr = NULL; 1568 } 1569 1570 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1571 { 1572 struct tcmu_dev *udev; 1573 1574 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1575 if (!udev) 1576 return NULL; 1577 kref_init(&udev->kref); 1578 1579 udev->name = kstrdup(name, GFP_KERNEL); 1580 if (!udev->name) { 1581 kfree(udev); 1582 return NULL; 1583 } 1584 1585 udev->hba = hba; 1586 udev->cmd_time_out = TCMU_TIME_OUT; 1587 udev->qfull_time_out = -1; 1588 1589 udev->max_blocks = DATA_AREA_PAGES_DEF / DATA_PAGES_PER_BLK; 1590 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); 1591 mutex_init(&udev->cmdr_lock); 1592 1593 INIT_LIST_HEAD(&udev->node); 1594 INIT_LIST_HEAD(&udev->timedout_entry); 1595 INIT_LIST_HEAD(&udev->qfull_queue); 1596 INIT_LIST_HEAD(&udev->tmr_queue); 1597 INIT_LIST_HEAD(&udev->inflight_queue); 1598 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); 1599 1600 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1601 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 1602 1603 xa_init(&udev->data_pages); 1604 1605 return &udev->se_dev; 1606 } 1607 1608 static void tcmu_dev_call_rcu(struct rcu_head *p) 1609 { 1610 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1611 struct tcmu_dev *udev = TCMU_DEV(dev); 1612 1613 kfree(udev->uio_info.name); 1614 kfree(udev->name); 1615 kfree(udev); 1616 } 1617 1618 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1619 { 1620 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1621 kmem_cache_free(tcmu_cmd_cache, cmd); 1622 return 0; 1623 } 1624 return -EINVAL; 1625 } 1626 1627 static u32 tcmu_blocks_release(struct xarray *blocks, unsigned long first, 1628 unsigned long last) 1629 { 1630 XA_STATE(xas, blocks, first * DATA_PAGES_PER_BLK); 1631 struct page *page; 1632 u32 pages_freed = 0; 1633 1634 xas_lock(&xas); 1635 xas_for_each(&xas, page, (last + 1) * DATA_PAGES_PER_BLK - 1) { 1636 xas_store(&xas, NULL); 1637 __free_page(page); 1638 pages_freed++; 1639 } 1640 xas_unlock(&xas); 1641 1642 atomic_sub(pages_freed, &global_page_count); 1643 1644 return pages_freed; 1645 } 1646 1647 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) 1648 { 1649 struct tcmu_tmr *tmr, *tmp; 1650 1651 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { 1652 list_del_init(&tmr->queue_entry); 1653 kfree(tmr); 1654 } 1655 } 1656 1657 static void tcmu_dev_kref_release(struct kref *kref) 1658 { 1659 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1660 struct se_device *dev = &udev->se_dev; 1661 struct tcmu_cmd *cmd; 1662 bool all_expired = true; 1663 unsigned long i; 1664 1665 vfree(udev->mb_addr); 1666 udev->mb_addr = NULL; 1667 1668 spin_lock_bh(&timed_out_udevs_lock); 1669 if (!list_empty(&udev->timedout_entry)) 1670 list_del(&udev->timedout_entry); 1671 spin_unlock_bh(&timed_out_udevs_lock); 1672 1673 /* Upper layer should drain all requests before calling this */ 1674 mutex_lock(&udev->cmdr_lock); 1675 xa_for_each(&udev->commands, i, cmd) { 1676 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1677 all_expired = false; 1678 } 1679 /* There can be left over TMR cmds. Remove them. */ 1680 tcmu_remove_all_queued_tmr(udev); 1681 if (!list_empty(&udev->qfull_queue)) 1682 all_expired = false; 1683 xa_destroy(&udev->commands); 1684 WARN_ON(!all_expired); 1685 1686 tcmu_blocks_release(&udev->data_pages, 0, udev->dbi_max); 1687 bitmap_free(udev->data_bitmap); 1688 mutex_unlock(&udev->cmdr_lock); 1689 1690 pr_debug("dev_kref_release\n"); 1691 1692 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1693 } 1694 1695 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) 1696 { 1697 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1698 LIST_HEAD(cmds); 1699 sense_reason_t scsi_ret; 1700 int ret; 1701 1702 if (list_empty(&udev->qfull_queue)) 1703 return; 1704 1705 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1706 1707 list_splice_init(&udev->qfull_queue, &cmds); 1708 1709 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1710 list_del_init(&tcmu_cmd->queue_entry); 1711 1712 pr_debug("removing cmd %p on dev %s from queue\n", 1713 tcmu_cmd, udev->name); 1714 1715 if (fail) { 1716 /* 1717 * We were not able to even start the command, so 1718 * fail with busy to allow a retry in case runner 1719 * was only temporarily down. If the device is being 1720 * removed then LIO core will do the right thing and 1721 * fail the retry. 1722 */ 1723 tcmu_cmd->se_cmd->priv = NULL; 1724 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1725 tcmu_free_cmd(tcmu_cmd); 1726 continue; 1727 } 1728 1729 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1730 if (ret < 0) { 1731 pr_debug("cmd %p on dev %s failed with %u\n", 1732 tcmu_cmd, udev->name, scsi_ret); 1733 /* 1734 * Ignore scsi_ret for now. target_complete_cmd 1735 * drops it. 1736 */ 1737 tcmu_cmd->se_cmd->priv = NULL; 1738 target_complete_cmd(tcmu_cmd->se_cmd, 1739 SAM_STAT_CHECK_CONDITION); 1740 tcmu_free_cmd(tcmu_cmd); 1741 } else if (ret > 0) { 1742 pr_debug("ran out of space during cmdr queue run\n"); 1743 /* 1744 * cmd was requeued, so just put all cmds back in 1745 * the queue 1746 */ 1747 list_splice_tail(&cmds, &udev->qfull_queue); 1748 break; 1749 } 1750 } 1751 1752 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1753 } 1754 1755 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1756 { 1757 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1758 1759 mutex_lock(&udev->cmdr_lock); 1760 if (tcmu_handle_completions(udev)) 1761 run_qfull_queue(udev, false); 1762 mutex_unlock(&udev->cmdr_lock); 1763 1764 return 0; 1765 } 1766 1767 /* 1768 * mmap code from uio.c. Copied here because we want to hook mmap() 1769 * and this stuff must come along. 1770 */ 1771 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1772 { 1773 struct tcmu_dev *udev = vma->vm_private_data; 1774 struct uio_info *info = &udev->uio_info; 1775 1776 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1777 if (info->mem[vma->vm_pgoff].size == 0) 1778 return -1; 1779 return (int)vma->vm_pgoff; 1780 } 1781 return -1; 1782 } 1783 1784 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) 1785 { 1786 struct page *page; 1787 1788 mutex_lock(&udev->cmdr_lock); 1789 page = tcmu_get_block_page(udev, dpi); 1790 if (likely(page)) { 1791 mutex_unlock(&udev->cmdr_lock); 1792 return page; 1793 } 1794 1795 /* 1796 * Userspace messed up and passed in a address not in the 1797 * data iov passed to it. 1798 */ 1799 pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", 1800 dpi, udev->name); 1801 mutex_unlock(&udev->cmdr_lock); 1802 1803 return NULL; 1804 } 1805 1806 static void tcmu_vma_open(struct vm_area_struct *vma) 1807 { 1808 struct tcmu_dev *udev = vma->vm_private_data; 1809 1810 pr_debug("vma_open\n"); 1811 1812 kref_get(&udev->kref); 1813 } 1814 1815 static void tcmu_vma_close(struct vm_area_struct *vma) 1816 { 1817 struct tcmu_dev *udev = vma->vm_private_data; 1818 1819 pr_debug("vma_close\n"); 1820 1821 /* release ref from tcmu_vma_open */ 1822 kref_put(&udev->kref, tcmu_dev_kref_release); 1823 } 1824 1825 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) 1826 { 1827 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1828 struct uio_info *info = &udev->uio_info; 1829 struct page *page; 1830 unsigned long offset; 1831 void *addr; 1832 1833 int mi = tcmu_find_mem_index(vmf->vma); 1834 if (mi < 0) 1835 return VM_FAULT_SIGBUS; 1836 1837 /* 1838 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1839 * to use mem[N]. 1840 */ 1841 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1842 1843 if (offset < udev->data_off) { 1844 /* For the vmalloc()ed cmd area pages */ 1845 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1846 page = vmalloc_to_page(addr); 1847 } else { 1848 uint32_t dpi; 1849 1850 /* For the dynamically growing data area pages */ 1851 dpi = (offset - udev->data_off) / PAGE_SIZE; 1852 page = tcmu_try_get_data_page(udev, dpi); 1853 if (!page) 1854 return VM_FAULT_SIGBUS; 1855 } 1856 1857 get_page(page); 1858 vmf->page = page; 1859 return 0; 1860 } 1861 1862 static const struct vm_operations_struct tcmu_vm_ops = { 1863 .open = tcmu_vma_open, 1864 .close = tcmu_vma_close, 1865 .fault = tcmu_vma_fault, 1866 }; 1867 1868 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1869 { 1870 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1871 1872 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1873 vma->vm_ops = &tcmu_vm_ops; 1874 1875 vma->vm_private_data = udev; 1876 1877 /* Ensure the mmap is exactly the right size */ 1878 if (vma_pages(vma) != udev->mmap_pages) 1879 return -EINVAL; 1880 1881 tcmu_vma_open(vma); 1882 1883 return 0; 1884 } 1885 1886 static int tcmu_open(struct uio_info *info, struct inode *inode) 1887 { 1888 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1889 1890 /* O_EXCL not supported for char devs, so fake it? */ 1891 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1892 return -EBUSY; 1893 1894 udev->inode = inode; 1895 1896 pr_debug("open\n"); 1897 1898 return 0; 1899 } 1900 1901 static int tcmu_release(struct uio_info *info, struct inode *inode) 1902 { 1903 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1904 1905 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1906 1907 pr_debug("close\n"); 1908 1909 return 0; 1910 } 1911 1912 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1913 { 1914 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1915 1916 if (!tcmu_kern_cmd_reply_supported) 1917 return 0; 1918 1919 if (udev->nl_reply_supported <= 0) 1920 return 0; 1921 1922 mutex_lock(&tcmu_nl_cmd_mutex); 1923 1924 if (tcmu_netlink_blocked) { 1925 mutex_unlock(&tcmu_nl_cmd_mutex); 1926 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, 1927 udev->name); 1928 return -EAGAIN; 1929 } 1930 1931 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1932 mutex_unlock(&tcmu_nl_cmd_mutex); 1933 pr_warn("netlink cmd %d already executing on %s\n", 1934 nl_cmd->cmd, udev->name); 1935 return -EBUSY; 1936 } 1937 1938 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1939 nl_cmd->cmd = cmd; 1940 nl_cmd->udev = udev; 1941 init_completion(&nl_cmd->complete); 1942 INIT_LIST_HEAD(&nl_cmd->nl_list); 1943 1944 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); 1945 1946 mutex_unlock(&tcmu_nl_cmd_mutex); 1947 return 0; 1948 } 1949 1950 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) 1951 { 1952 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1953 1954 if (!tcmu_kern_cmd_reply_supported) 1955 return; 1956 1957 if (udev->nl_reply_supported <= 0) 1958 return; 1959 1960 mutex_lock(&tcmu_nl_cmd_mutex); 1961 1962 list_del(&nl_cmd->nl_list); 1963 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1964 1965 mutex_unlock(&tcmu_nl_cmd_mutex); 1966 } 1967 1968 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1969 { 1970 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1971 int ret; 1972 1973 if (!tcmu_kern_cmd_reply_supported) 1974 return 0; 1975 1976 if (udev->nl_reply_supported <= 0) 1977 return 0; 1978 1979 pr_debug("sleeping for nl reply\n"); 1980 wait_for_completion(&nl_cmd->complete); 1981 1982 mutex_lock(&tcmu_nl_cmd_mutex); 1983 nl_cmd->cmd = TCMU_CMD_UNSPEC; 1984 ret = nl_cmd->status; 1985 mutex_unlock(&tcmu_nl_cmd_mutex); 1986 1987 return ret; 1988 } 1989 1990 static int tcmu_netlink_event_init(struct tcmu_dev *udev, 1991 enum tcmu_genl_cmd cmd, 1992 struct sk_buff **buf, void **hdr) 1993 { 1994 struct sk_buff *skb; 1995 void *msg_header; 1996 int ret = -ENOMEM; 1997 1998 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1999 if (!skb) 2000 return ret; 2001 2002 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 2003 if (!msg_header) 2004 goto free_skb; 2005 2006 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 2007 if (ret < 0) 2008 goto free_skb; 2009 2010 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 2011 if (ret < 0) 2012 goto free_skb; 2013 2014 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 2015 if (ret < 0) 2016 goto free_skb; 2017 2018 *buf = skb; 2019 *hdr = msg_header; 2020 return ret; 2021 2022 free_skb: 2023 nlmsg_free(skb); 2024 return ret; 2025 } 2026 2027 static int tcmu_netlink_event_send(struct tcmu_dev *udev, 2028 enum tcmu_genl_cmd cmd, 2029 struct sk_buff *skb, void *msg_header) 2030 { 2031 int ret; 2032 2033 genlmsg_end(skb, msg_header); 2034 2035 ret = tcmu_init_genl_cmd_reply(udev, cmd); 2036 if (ret) { 2037 nlmsg_free(skb); 2038 return ret; 2039 } 2040 2041 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 2042 TCMU_MCGRP_CONFIG, GFP_KERNEL); 2043 2044 /* Wait during an add as the listener may not be up yet */ 2045 if (ret == 0 || 2046 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) 2047 return tcmu_wait_genl_cmd_reply(udev); 2048 else 2049 tcmu_destroy_genl_cmd_reply(udev); 2050 2051 return ret; 2052 } 2053 2054 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) 2055 { 2056 struct sk_buff *skb = NULL; 2057 void *msg_header = NULL; 2058 int ret = 0; 2059 2060 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, 2061 &msg_header); 2062 if (ret < 0) 2063 return ret; 2064 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, 2065 msg_header); 2066 } 2067 2068 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) 2069 { 2070 struct sk_buff *skb = NULL; 2071 void *msg_header = NULL; 2072 int ret = 0; 2073 2074 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, 2075 &skb, &msg_header); 2076 if (ret < 0) 2077 return ret; 2078 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, 2079 skb, msg_header); 2080 } 2081 2082 static int tcmu_update_uio_info(struct tcmu_dev *udev) 2083 { 2084 struct tcmu_hba *hba = udev->hba->hba_ptr; 2085 struct uio_info *info; 2086 char *str; 2087 2088 info = &udev->uio_info; 2089 2090 if (udev->dev_config[0]) 2091 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, 2092 udev->name, udev->dev_config); 2093 else 2094 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, 2095 udev->name); 2096 if (!str) 2097 return -ENOMEM; 2098 2099 /* If the old string exists, free it */ 2100 kfree(info->name); 2101 info->name = str; 2102 2103 return 0; 2104 } 2105 2106 static int tcmu_configure_device(struct se_device *dev) 2107 { 2108 struct tcmu_dev *udev = TCMU_DEV(dev); 2109 struct uio_info *info; 2110 struct tcmu_mailbox *mb; 2111 size_t data_size; 2112 int ret = 0; 2113 2114 ret = tcmu_update_uio_info(udev); 2115 if (ret) 2116 return ret; 2117 2118 info = &udev->uio_info; 2119 2120 mutex_lock(&udev->cmdr_lock); 2121 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); 2122 mutex_unlock(&udev->cmdr_lock); 2123 if (!udev->data_bitmap) { 2124 ret = -ENOMEM; 2125 goto err_bitmap_alloc; 2126 } 2127 2128 mb = vzalloc(MB_CMDR_SIZE); 2129 if (!mb) { 2130 ret = -ENOMEM; 2131 goto err_vzalloc; 2132 } 2133 2134 /* mailbox fits in first part of CMDR space */ 2135 udev->mb_addr = mb; 2136 udev->cmdr = (void *)mb + CMDR_OFF; 2137 udev->cmdr_size = CMDR_SIZE; 2138 udev->data_off = MB_CMDR_SIZE; 2139 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; 2140 udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT; 2141 udev->dbi_thresh = 0; /* Default in Idle state */ 2142 2143 /* Initialise the mailbox of the ring buffer */ 2144 mb->version = TCMU_MAILBOX_VERSION; 2145 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | 2146 TCMU_MAILBOX_FLAG_CAP_READ_LEN | 2147 TCMU_MAILBOX_FLAG_CAP_TMR; 2148 mb->cmdr_off = CMDR_OFF; 2149 mb->cmdr_size = udev->cmdr_size; 2150 2151 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 2152 WARN_ON(data_size % PAGE_SIZE); 2153 2154 info->version = __stringify(TCMU_MAILBOX_VERSION); 2155 2156 info->mem[0].name = "tcm-user command & data buffer"; 2157 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 2158 info->mem[0].size = data_size + MB_CMDR_SIZE; 2159 info->mem[0].memtype = UIO_MEM_NONE; 2160 2161 info->irqcontrol = tcmu_irqcontrol; 2162 info->irq = UIO_IRQ_CUSTOM; 2163 2164 info->mmap = tcmu_mmap; 2165 info->open = tcmu_open; 2166 info->release = tcmu_release; 2167 2168 ret = uio_register_device(tcmu_root_device, info); 2169 if (ret) 2170 goto err_register; 2171 2172 /* User can set hw_block_size before enable the device */ 2173 if (dev->dev_attrib.hw_block_size == 0) 2174 dev->dev_attrib.hw_block_size = 512; 2175 /* Other attributes can be configured in userspace */ 2176 if (!dev->dev_attrib.hw_max_sectors) 2177 dev->dev_attrib.hw_max_sectors = 128; 2178 if (!dev->dev_attrib.emulate_write_cache) 2179 dev->dev_attrib.emulate_write_cache = 0; 2180 dev->dev_attrib.hw_queue_depth = 128; 2181 2182 /* If user didn't explicitly disable netlink reply support, use 2183 * module scope setting. 2184 */ 2185 if (udev->nl_reply_supported >= 0) 2186 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 2187 2188 /* 2189 * Get a ref incase userspace does a close on the uio device before 2190 * LIO has initiated tcmu_free_device. 2191 */ 2192 kref_get(&udev->kref); 2193 2194 ret = tcmu_send_dev_add_event(udev); 2195 if (ret) 2196 goto err_netlink; 2197 2198 mutex_lock(&root_udev_mutex); 2199 list_add(&udev->node, &root_udev); 2200 mutex_unlock(&root_udev_mutex); 2201 2202 return 0; 2203 2204 err_netlink: 2205 kref_put(&udev->kref, tcmu_dev_kref_release); 2206 uio_unregister_device(&udev->uio_info); 2207 err_register: 2208 vfree(udev->mb_addr); 2209 udev->mb_addr = NULL; 2210 err_vzalloc: 2211 bitmap_free(udev->data_bitmap); 2212 udev->data_bitmap = NULL; 2213 err_bitmap_alloc: 2214 kfree(info->name); 2215 info->name = NULL; 2216 2217 return ret; 2218 } 2219 2220 static void tcmu_free_device(struct se_device *dev) 2221 { 2222 struct tcmu_dev *udev = TCMU_DEV(dev); 2223 2224 /* release ref from init */ 2225 kref_put(&udev->kref, tcmu_dev_kref_release); 2226 } 2227 2228 static void tcmu_destroy_device(struct se_device *dev) 2229 { 2230 struct tcmu_dev *udev = TCMU_DEV(dev); 2231 2232 del_timer_sync(&udev->cmd_timer); 2233 del_timer_sync(&udev->qfull_timer); 2234 2235 mutex_lock(&root_udev_mutex); 2236 list_del(&udev->node); 2237 mutex_unlock(&root_udev_mutex); 2238 2239 tcmu_send_dev_remove_event(udev); 2240 2241 uio_unregister_device(&udev->uio_info); 2242 2243 /* release ref from configure */ 2244 kref_put(&udev->kref, tcmu_dev_kref_release); 2245 } 2246 2247 static void tcmu_unblock_dev(struct tcmu_dev *udev) 2248 { 2249 mutex_lock(&udev->cmdr_lock); 2250 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 2251 mutex_unlock(&udev->cmdr_lock); 2252 } 2253 2254 static void tcmu_block_dev(struct tcmu_dev *udev) 2255 { 2256 mutex_lock(&udev->cmdr_lock); 2257 2258 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2259 goto unlock; 2260 2261 /* complete IO that has executed successfully */ 2262 tcmu_handle_completions(udev); 2263 /* fail IO waiting to be queued */ 2264 run_qfull_queue(udev, true); 2265 2266 unlock: 2267 mutex_unlock(&udev->cmdr_lock); 2268 } 2269 2270 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 2271 { 2272 struct tcmu_mailbox *mb; 2273 struct tcmu_cmd *cmd; 2274 unsigned long i; 2275 2276 mutex_lock(&udev->cmdr_lock); 2277 2278 xa_for_each(&udev->commands, i, cmd) { 2279 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2280 cmd->cmd_id, udev->name, 2281 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); 2282 2283 xa_erase(&udev->commands, i); 2284 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2285 WARN_ON(!cmd->se_cmd); 2286 list_del_init(&cmd->queue_entry); 2287 cmd->se_cmd->priv = NULL; 2288 if (err_level == 1) { 2289 /* 2290 * Userspace was not able to start the 2291 * command or it is retryable. 2292 */ 2293 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 2294 } else { 2295 /* hard failure */ 2296 target_complete_cmd(cmd->se_cmd, 2297 SAM_STAT_CHECK_CONDITION); 2298 } 2299 } 2300 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 2301 tcmu_free_cmd(cmd); 2302 } 2303 2304 mb = udev->mb_addr; 2305 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2306 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 2307 mb->cmd_tail, mb->cmd_head); 2308 2309 udev->cmdr_last_cleaned = 0; 2310 mb->cmd_tail = 0; 2311 mb->cmd_head = 0; 2312 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2313 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2314 2315 del_timer(&udev->cmd_timer); 2316 2317 /* 2318 * ring is empty and qfull queue never contains aborted commands. 2319 * So TMRs in tmr queue do not contain relevant cmd_ids. 2320 * After a ring reset userspace should do a fresh start, so 2321 * even LUN RESET message is no longer relevant. 2322 * Therefore remove all TMRs from qfull queue 2323 */ 2324 tcmu_remove_all_queued_tmr(udev); 2325 2326 run_qfull_queue(udev, false); 2327 2328 mutex_unlock(&udev->cmdr_lock); 2329 } 2330 2331 enum { 2332 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 2333 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, 2334 }; 2335 2336 static match_table_t tokens = { 2337 {Opt_dev_config, "dev_config=%s"}, 2338 {Opt_dev_size, "dev_size=%s"}, 2339 {Opt_hw_block_size, "hw_block_size=%d"}, 2340 {Opt_hw_max_sectors, "hw_max_sectors=%d"}, 2341 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 2342 {Opt_max_data_area_mb, "max_data_area_mb=%d"}, 2343 {Opt_err, NULL} 2344 }; 2345 2346 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 2347 { 2348 int val, ret; 2349 2350 ret = match_int(arg, &val); 2351 if (ret < 0) { 2352 pr_err("match_int() failed for dev attrib. Error %d.\n", 2353 ret); 2354 return ret; 2355 } 2356 2357 if (val <= 0) { 2358 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", 2359 val); 2360 return -EINVAL; 2361 } 2362 *dev_attrib = val; 2363 return 0; 2364 } 2365 2366 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) 2367 { 2368 int val, ret; 2369 2370 ret = match_int(arg, &val); 2371 if (ret < 0) { 2372 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", 2373 ret); 2374 return ret; 2375 } 2376 if (val <= 0) { 2377 pr_err("Invalid max_data_area %d.\n", val); 2378 return -EINVAL; 2379 } 2380 if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) { 2381 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 2382 val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); 2383 val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages); 2384 } 2385 if (TCMU_MBS_TO_PAGES(val) < DATA_PAGES_PER_BLK) { 2386 pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%d pages).\n", 2387 val, TCMU_MBS_TO_PAGES(val), DATA_PAGES_PER_BLK); 2388 return -EINVAL; 2389 } 2390 2391 mutex_lock(&udev->cmdr_lock); 2392 if (udev->data_bitmap) { 2393 pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); 2394 ret = -EINVAL; 2395 goto unlock; 2396 } 2397 2398 udev->data_area_mb = val; 2399 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / DATA_PAGES_PER_BLK; 2400 2401 unlock: 2402 mutex_unlock(&udev->cmdr_lock); 2403 return ret; 2404 } 2405 2406 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 2407 const char *page, ssize_t count) 2408 { 2409 struct tcmu_dev *udev = TCMU_DEV(dev); 2410 char *orig, *ptr, *opts; 2411 substring_t args[MAX_OPT_ARGS]; 2412 int ret = 0, token; 2413 2414 opts = kstrdup(page, GFP_KERNEL); 2415 if (!opts) 2416 return -ENOMEM; 2417 2418 orig = opts; 2419 2420 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2421 if (!*ptr) 2422 continue; 2423 2424 token = match_token(ptr, tokens, args); 2425 switch (token) { 2426 case Opt_dev_config: 2427 if (match_strlcpy(udev->dev_config, &args[0], 2428 TCMU_CONFIG_LEN) == 0) { 2429 ret = -EINVAL; 2430 break; 2431 } 2432 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 2433 break; 2434 case Opt_dev_size: 2435 ret = match_u64(&args[0], &udev->dev_size); 2436 if (ret < 0) 2437 pr_err("match_u64() failed for dev_size=. Error %d.\n", 2438 ret); 2439 break; 2440 case Opt_hw_block_size: 2441 ret = tcmu_set_dev_attrib(&args[0], 2442 &(dev->dev_attrib.hw_block_size)); 2443 break; 2444 case Opt_hw_max_sectors: 2445 ret = tcmu_set_dev_attrib(&args[0], 2446 &(dev->dev_attrib.hw_max_sectors)); 2447 break; 2448 case Opt_nl_reply_supported: 2449 ret = match_int(&args[0], &udev->nl_reply_supported); 2450 if (ret < 0) 2451 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", 2452 ret); 2453 break; 2454 case Opt_max_data_area_mb: 2455 ret = tcmu_set_max_blocks_param(udev, &args[0]); 2456 break; 2457 default: 2458 break; 2459 } 2460 2461 if (ret) 2462 break; 2463 } 2464 2465 kfree(orig); 2466 return (!ret) ? count : ret; 2467 } 2468 2469 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 2470 { 2471 struct tcmu_dev *udev = TCMU_DEV(dev); 2472 ssize_t bl = 0; 2473 2474 bl = sprintf(b + bl, "Config: %s ", 2475 udev->dev_config[0] ? udev->dev_config : "NULL"); 2476 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); 2477 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", udev->data_area_mb); 2478 2479 return bl; 2480 } 2481 2482 static sector_t tcmu_get_blocks(struct se_device *dev) 2483 { 2484 struct tcmu_dev *udev = TCMU_DEV(dev); 2485 2486 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 2487 dev->dev_attrib.block_size); 2488 } 2489 2490 static sense_reason_t 2491 tcmu_parse_cdb(struct se_cmd *cmd) 2492 { 2493 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 2494 } 2495 2496 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 2497 { 2498 struct se_dev_attrib *da = container_of(to_config_group(item), 2499 struct se_dev_attrib, da_group); 2500 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2501 2502 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 2503 } 2504 2505 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 2506 size_t count) 2507 { 2508 struct se_dev_attrib *da = container_of(to_config_group(item), 2509 struct se_dev_attrib, da_group); 2510 struct tcmu_dev *udev = container_of(da->da_dev, 2511 struct tcmu_dev, se_dev); 2512 u32 val; 2513 int ret; 2514 2515 if (da->da_dev->export_count) { 2516 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 2517 return -EINVAL; 2518 } 2519 2520 ret = kstrtou32(page, 0, &val); 2521 if (ret < 0) 2522 return ret; 2523 2524 udev->cmd_time_out = val * MSEC_PER_SEC; 2525 return count; 2526 } 2527 CONFIGFS_ATTR(tcmu_, cmd_time_out); 2528 2529 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 2530 { 2531 struct se_dev_attrib *da = container_of(to_config_group(item), 2532 struct se_dev_attrib, da_group); 2533 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2534 2535 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 2536 udev->qfull_time_out : 2537 udev->qfull_time_out / MSEC_PER_SEC); 2538 } 2539 2540 static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 2541 const char *page, size_t count) 2542 { 2543 struct se_dev_attrib *da = container_of(to_config_group(item), 2544 struct se_dev_attrib, da_group); 2545 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2546 s32 val; 2547 int ret; 2548 2549 ret = kstrtos32(page, 0, &val); 2550 if (ret < 0) 2551 return ret; 2552 2553 if (val >= 0) { 2554 udev->qfull_time_out = val * MSEC_PER_SEC; 2555 } else if (val == -1) { 2556 udev->qfull_time_out = val; 2557 } else { 2558 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2559 return -EINVAL; 2560 } 2561 return count; 2562 } 2563 CONFIGFS_ATTR(tcmu_, qfull_time_out); 2564 2565 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 2566 { 2567 struct se_dev_attrib *da = container_of(to_config_group(item), 2568 struct se_dev_attrib, da_group); 2569 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2570 2571 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); 2572 } 2573 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 2574 2575 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2576 { 2577 struct se_dev_attrib *da = container_of(to_config_group(item), 2578 struct se_dev_attrib, da_group); 2579 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2580 2581 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2582 } 2583 2584 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, 2585 const char *reconfig_data) 2586 { 2587 struct sk_buff *skb = NULL; 2588 void *msg_header = NULL; 2589 int ret = 0; 2590 2591 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2592 &skb, &msg_header); 2593 if (ret < 0) 2594 return ret; 2595 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); 2596 if (ret < 0) { 2597 nlmsg_free(skb); 2598 return ret; 2599 } 2600 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2601 skb, msg_header); 2602 } 2603 2604 2605 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2606 size_t count) 2607 { 2608 struct se_dev_attrib *da = container_of(to_config_group(item), 2609 struct se_dev_attrib, da_group); 2610 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2611 int ret, len; 2612 2613 len = strlen(page); 2614 if (!len || len > TCMU_CONFIG_LEN - 1) 2615 return -EINVAL; 2616 2617 /* Check if device has been configured before */ 2618 if (target_dev_configured(&udev->se_dev)) { 2619 ret = tcmu_send_dev_config_event(udev, page); 2620 if (ret) { 2621 pr_err("Unable to reconfigure device\n"); 2622 return ret; 2623 } 2624 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2625 2626 ret = tcmu_update_uio_info(udev); 2627 if (ret) 2628 return ret; 2629 return count; 2630 } 2631 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2632 2633 return count; 2634 } 2635 CONFIGFS_ATTR(tcmu_, dev_config); 2636 2637 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 2638 { 2639 struct se_dev_attrib *da = container_of(to_config_group(item), 2640 struct se_dev_attrib, da_group); 2641 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2642 2643 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); 2644 } 2645 2646 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) 2647 { 2648 struct sk_buff *skb = NULL; 2649 void *msg_header = NULL; 2650 int ret = 0; 2651 2652 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2653 &skb, &msg_header); 2654 if (ret < 0) 2655 return ret; 2656 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, 2657 size, TCMU_ATTR_PAD); 2658 if (ret < 0) { 2659 nlmsg_free(skb); 2660 return ret; 2661 } 2662 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2663 skb, msg_header); 2664 } 2665 2666 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2667 size_t count) 2668 { 2669 struct se_dev_attrib *da = container_of(to_config_group(item), 2670 struct se_dev_attrib, da_group); 2671 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2672 u64 val; 2673 int ret; 2674 2675 ret = kstrtou64(page, 0, &val); 2676 if (ret < 0) 2677 return ret; 2678 2679 /* Check if device has been configured before */ 2680 if (target_dev_configured(&udev->se_dev)) { 2681 ret = tcmu_send_dev_size_event(udev, val); 2682 if (ret) { 2683 pr_err("Unable to reconfigure device\n"); 2684 return ret; 2685 } 2686 } 2687 udev->dev_size = val; 2688 return count; 2689 } 2690 CONFIGFS_ATTR(tcmu_, dev_size); 2691 2692 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 2693 char *page) 2694 { 2695 struct se_dev_attrib *da = container_of(to_config_group(item), 2696 struct se_dev_attrib, da_group); 2697 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2698 2699 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 2700 } 2701 2702 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 2703 const char *page, size_t count) 2704 { 2705 struct se_dev_attrib *da = container_of(to_config_group(item), 2706 struct se_dev_attrib, da_group); 2707 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2708 s8 val; 2709 int ret; 2710 2711 ret = kstrtos8(page, 0, &val); 2712 if (ret < 0) 2713 return ret; 2714 2715 udev->nl_reply_supported = val; 2716 return count; 2717 } 2718 CONFIGFS_ATTR(tcmu_, nl_reply_supported); 2719 2720 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 2721 char *page) 2722 { 2723 struct se_dev_attrib *da = container_of(to_config_group(item), 2724 struct se_dev_attrib, da_group); 2725 2726 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 2727 } 2728 2729 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) 2730 { 2731 struct sk_buff *skb = NULL; 2732 void *msg_header = NULL; 2733 int ret = 0; 2734 2735 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2736 &skb, &msg_header); 2737 if (ret < 0) 2738 return ret; 2739 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); 2740 if (ret < 0) { 2741 nlmsg_free(skb); 2742 return ret; 2743 } 2744 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2745 skb, msg_header); 2746 } 2747 2748 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 2749 const char *page, size_t count) 2750 { 2751 struct se_dev_attrib *da = container_of(to_config_group(item), 2752 struct se_dev_attrib, da_group); 2753 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2754 u8 val; 2755 int ret; 2756 2757 ret = kstrtou8(page, 0, &val); 2758 if (ret < 0) 2759 return ret; 2760 2761 /* Check if device has been configured before */ 2762 if (target_dev_configured(&udev->se_dev)) { 2763 ret = tcmu_send_emulate_write_cache(udev, val); 2764 if (ret) { 2765 pr_err("Unable to reconfigure device\n"); 2766 return ret; 2767 } 2768 } 2769 2770 da->emulate_write_cache = val; 2771 return count; 2772 } 2773 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 2774 2775 static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page) 2776 { 2777 struct se_dev_attrib *da = container_of(to_config_group(item), 2778 struct se_dev_attrib, da_group); 2779 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2780 2781 return snprintf(page, PAGE_SIZE, "%i\n", 2782 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); 2783 } 2784 2785 static ssize_t tcmu_tmr_notification_store(struct config_item *item, 2786 const char *page, size_t count) 2787 { 2788 struct se_dev_attrib *da = container_of(to_config_group(item), 2789 struct se_dev_attrib, da_group); 2790 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2791 u8 val; 2792 int ret; 2793 2794 ret = kstrtou8(page, 0, &val); 2795 if (ret < 0) 2796 return ret; 2797 if (val > 1) 2798 return -EINVAL; 2799 2800 if (val) 2801 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2802 else 2803 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2804 return count; 2805 } 2806 CONFIGFS_ATTR(tcmu_, tmr_notification); 2807 2808 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 2809 { 2810 struct se_device *se_dev = container_of(to_config_group(item), 2811 struct se_device, 2812 dev_action_group); 2813 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2814 2815 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2816 return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 2817 else 2818 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 2819 } 2820 2821 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 2822 size_t count) 2823 { 2824 struct se_device *se_dev = container_of(to_config_group(item), 2825 struct se_device, 2826 dev_action_group); 2827 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2828 u8 val; 2829 int ret; 2830 2831 if (!target_dev_configured(&udev->se_dev)) { 2832 pr_err("Device is not configured.\n"); 2833 return -EINVAL; 2834 } 2835 2836 ret = kstrtou8(page, 0, &val); 2837 if (ret < 0) 2838 return ret; 2839 2840 if (val > 1) { 2841 pr_err("Invalid block value %d\n", val); 2842 return -EINVAL; 2843 } 2844 2845 if (!val) 2846 tcmu_unblock_dev(udev); 2847 else 2848 tcmu_block_dev(udev); 2849 return count; 2850 } 2851 CONFIGFS_ATTR(tcmu_, block_dev); 2852 2853 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 2854 size_t count) 2855 { 2856 struct se_device *se_dev = container_of(to_config_group(item), 2857 struct se_device, 2858 dev_action_group); 2859 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2860 u8 val; 2861 int ret; 2862 2863 if (!target_dev_configured(&udev->se_dev)) { 2864 pr_err("Device is not configured.\n"); 2865 return -EINVAL; 2866 } 2867 2868 ret = kstrtou8(page, 0, &val); 2869 if (ret < 0) 2870 return ret; 2871 2872 if (val != 1 && val != 2) { 2873 pr_err("Invalid reset ring value %d\n", val); 2874 return -EINVAL; 2875 } 2876 2877 tcmu_reset_ring(udev, val); 2878 return count; 2879 } 2880 CONFIGFS_ATTR_WO(tcmu_, reset_ring); 2881 2882 static struct configfs_attribute *tcmu_attrib_attrs[] = { 2883 &tcmu_attr_cmd_time_out, 2884 &tcmu_attr_qfull_time_out, 2885 &tcmu_attr_max_data_area_mb, 2886 &tcmu_attr_dev_config, 2887 &tcmu_attr_dev_size, 2888 &tcmu_attr_emulate_write_cache, 2889 &tcmu_attr_tmr_notification, 2890 &tcmu_attr_nl_reply_supported, 2891 NULL, 2892 }; 2893 2894 static struct configfs_attribute **tcmu_attrs; 2895 2896 static struct configfs_attribute *tcmu_action_attrs[] = { 2897 &tcmu_attr_block_dev, 2898 &tcmu_attr_reset_ring, 2899 NULL, 2900 }; 2901 2902 static struct target_backend_ops tcmu_ops = { 2903 .name = "user", 2904 .owner = THIS_MODULE, 2905 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH, 2906 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR | 2907 TRANSPORT_FLAG_PASSTHROUGH_ALUA, 2908 .attach_hba = tcmu_attach_hba, 2909 .detach_hba = tcmu_detach_hba, 2910 .alloc_device = tcmu_alloc_device, 2911 .configure_device = tcmu_configure_device, 2912 .destroy_device = tcmu_destroy_device, 2913 .free_device = tcmu_free_device, 2914 .unplug_device = tcmu_unplug_device, 2915 .plug_device = tcmu_plug_device, 2916 .parse_cdb = tcmu_parse_cdb, 2917 .tmr_notify = tcmu_tmr_notify, 2918 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 2919 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 2920 .get_device_type = sbc_get_device_type, 2921 .get_blocks = tcmu_get_blocks, 2922 .tb_dev_action_attrs = tcmu_action_attrs, 2923 }; 2924 2925 static void find_free_blocks(void) 2926 { 2927 struct tcmu_dev *udev; 2928 loff_t off; 2929 u32 pages_freed, total_pages_freed = 0; 2930 u32 start, end, block, total_blocks_freed = 0; 2931 2932 if (atomic_read(&global_page_count) <= tcmu_global_max_pages) 2933 return; 2934 2935 mutex_lock(&root_udev_mutex); 2936 list_for_each_entry(udev, &root_udev, node) { 2937 mutex_lock(&udev->cmdr_lock); 2938 2939 if (!target_dev_configured(&udev->se_dev)) { 2940 mutex_unlock(&udev->cmdr_lock); 2941 continue; 2942 } 2943 2944 /* Try to complete the finished commands first */ 2945 if (tcmu_handle_completions(udev)) 2946 run_qfull_queue(udev, false); 2947 2948 /* Skip the udevs in idle */ 2949 if (!udev->dbi_thresh) { 2950 mutex_unlock(&udev->cmdr_lock); 2951 continue; 2952 } 2953 2954 end = udev->dbi_max + 1; 2955 block = find_last_bit(udev->data_bitmap, end); 2956 if (block == udev->dbi_max) { 2957 /* 2958 * The last bit is dbi_max, so it is not possible 2959 * reclaim any blocks. 2960 */ 2961 mutex_unlock(&udev->cmdr_lock); 2962 continue; 2963 } else if (block == end) { 2964 /* The current udev will goto idle state */ 2965 udev->dbi_thresh = start = 0; 2966 udev->dbi_max = 0; 2967 } else { 2968 udev->dbi_thresh = start = block + 1; 2969 udev->dbi_max = block; 2970 } 2971 2972 /* Here will truncate the data area from off */ 2973 off = udev->data_off + start * DATA_BLOCK_SIZE; 2974 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2975 2976 /* Release the block pages */ 2977 pages_freed = tcmu_blocks_release(&udev->data_pages, start, end - 1); 2978 mutex_unlock(&udev->cmdr_lock); 2979 2980 total_pages_freed += pages_freed; 2981 total_blocks_freed += end - start; 2982 pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n", 2983 pages_freed, total_pages_freed, end - start, 2984 total_blocks_freed, udev->name); 2985 } 2986 mutex_unlock(&root_udev_mutex); 2987 2988 if (atomic_read(&global_page_count) > tcmu_global_max_pages) 2989 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 2990 } 2991 2992 static void check_timedout_devices(void) 2993 { 2994 struct tcmu_dev *udev, *tmp_dev; 2995 struct tcmu_cmd *cmd, *tmp_cmd; 2996 LIST_HEAD(devs); 2997 2998 spin_lock_bh(&timed_out_udevs_lock); 2999 list_splice_init(&timed_out_udevs, &devs); 3000 3001 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 3002 list_del_init(&udev->timedout_entry); 3003 spin_unlock_bh(&timed_out_udevs_lock); 3004 3005 mutex_lock(&udev->cmdr_lock); 3006 3007 /* 3008 * If cmd_time_out is disabled but qfull is set deadline 3009 * will only reflect the qfull timeout. Ignore it. 3010 */ 3011 if (udev->cmd_time_out) { 3012 list_for_each_entry_safe(cmd, tmp_cmd, 3013 &udev->inflight_queue, 3014 queue_entry) { 3015 tcmu_check_expired_ring_cmd(cmd); 3016 } 3017 tcmu_set_next_deadline(&udev->inflight_queue, 3018 &udev->cmd_timer); 3019 } 3020 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, 3021 queue_entry) { 3022 tcmu_check_expired_queue_cmd(cmd); 3023 } 3024 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 3025 3026 mutex_unlock(&udev->cmdr_lock); 3027 3028 spin_lock_bh(&timed_out_udevs_lock); 3029 } 3030 3031 spin_unlock_bh(&timed_out_udevs_lock); 3032 } 3033 3034 static void tcmu_unmap_work_fn(struct work_struct *work) 3035 { 3036 check_timedout_devices(); 3037 find_free_blocks(); 3038 } 3039 3040 static int __init tcmu_module_init(void) 3041 { 3042 int ret, i, k, len = 0; 3043 3044 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 3045 3046 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 3047 3048 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 3049 sizeof(struct tcmu_cmd), 3050 __alignof__(struct tcmu_cmd), 3051 0, NULL); 3052 if (!tcmu_cmd_cache) 3053 return -ENOMEM; 3054 3055 tcmu_root_device = root_device_register("tcm_user"); 3056 if (IS_ERR(tcmu_root_device)) { 3057 ret = PTR_ERR(tcmu_root_device); 3058 goto out_free_cache; 3059 } 3060 3061 ret = genl_register_family(&tcmu_genl_family); 3062 if (ret < 0) { 3063 goto out_unreg_device; 3064 } 3065 3066 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3067 len += sizeof(struct configfs_attribute *); 3068 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++) 3069 len += sizeof(struct configfs_attribute *); 3070 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) 3071 len += sizeof(struct configfs_attribute *); 3072 len += sizeof(struct configfs_attribute *); 3073 3074 tcmu_attrs = kzalloc(len, GFP_KERNEL); 3075 if (!tcmu_attrs) { 3076 ret = -ENOMEM; 3077 goto out_unreg_genl; 3078 } 3079 3080 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3081 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 3082 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++) 3083 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k]; 3084 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) 3085 tcmu_attrs[i++] = tcmu_attrib_attrs[k]; 3086 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 3087 3088 ret = transport_backend_register(&tcmu_ops); 3089 if (ret) 3090 goto out_attrs; 3091 3092 return 0; 3093 3094 out_attrs: 3095 kfree(tcmu_attrs); 3096 out_unreg_genl: 3097 genl_unregister_family(&tcmu_genl_family); 3098 out_unreg_device: 3099 root_device_unregister(tcmu_root_device); 3100 out_free_cache: 3101 kmem_cache_destroy(tcmu_cmd_cache); 3102 3103 return ret; 3104 } 3105 3106 static void __exit tcmu_module_exit(void) 3107 { 3108 cancel_delayed_work_sync(&tcmu_unmap_work); 3109 target_backend_unregister(&tcmu_ops); 3110 kfree(tcmu_attrs); 3111 genl_unregister_family(&tcmu_genl_family); 3112 root_device_unregister(tcmu_root_device); 3113 kmem_cache_destroy(tcmu_cmd_cache); 3114 } 3115 3116 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 3117 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 3118 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 3119 MODULE_LICENSE("GPL"); 3120 3121 module_init(tcmu_module_init); 3122 module_exit(tcmu_module_exit); 3123