1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 4 * Copyright (C) 2014 Red Hat, Inc. 5 * Copyright (C) 2015 Arrikto, Inc. 6 * Copyright (C) 2017 Chinamobile, Inc. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/idr.h> 12 #include <linux/kernel.h> 13 #include <linux/timer.h> 14 #include <linux/parser.h> 15 #include <linux/vmalloc.h> 16 #include <linux/uio_driver.h> 17 #include <linux/radix-tree.h> 18 #include <linux/stringify.h> 19 #include <linux/bitops.h> 20 #include <linux/highmem.h> 21 #include <linux/configfs.h> 22 #include <linux/mutex.h> 23 #include <linux/workqueue.h> 24 #include <net/genetlink.h> 25 #include <scsi/scsi_common.h> 26 #include <scsi/scsi_proto.h> 27 #include <target/target_core_base.h> 28 #include <target/target_core_fabric.h> 29 #include <target/target_core_backend.h> 30 31 #include <linux/target_core_user.h> 32 33 /** 34 * DOC: Userspace I/O 35 * Userspace I/O 36 * ------------- 37 * 38 * Define a shared-memory interface for LIO to pass SCSI commands and 39 * data to userspace for processing. This is to allow backends that 40 * are too complex for in-kernel support to be possible. 41 * 42 * It uses the UIO framework to do a lot of the device-creation and 43 * introspection work for us. 44 * 45 * See the .h file for how the ring is laid out. Note that while the 46 * command ring is defined, the particulars of the data area are 47 * not. Offset values in the command entry point to other locations 48 * internal to the mmap-ed area. There is separate space outside the 49 * command ring for data buffers. This leaves maximum flexibility for 50 * moving buffer allocations, or even page flipping or other 51 * allocation techniques, without altering the command ring layout. 52 * 53 * SECURITY: 54 * The user process must be assumed to be malicious. There's no way to 55 * prevent it breaking the command ring protocol if it wants, but in 56 * order to prevent other issues we must only ever read *data* from 57 * the shared memory area, not offsets or sizes. This applies to 58 * command ring entries as well as the mailbox. Extra code needed for 59 * this may have a 'UAM' comment. 60 */ 61 62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 63 64 /* For cmd area, the size is fixed 8MB */ 65 #define CMDR_SIZE (8 * 1024 * 1024) 66 67 /* 68 * For data area, the block size is PAGE_SIZE and 69 * the total size is 256K * PAGE_SIZE. 70 */ 71 #define DATA_BLOCK_SIZE PAGE_SIZE 72 #define DATA_BLOCK_SHIFT PAGE_SHIFT 73 #define DATA_BLOCK_BITS_DEF (256 * 1024) 74 75 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) 76 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) 77 78 /* 79 * Default number of global data blocks(512K * PAGE_SIZE) 80 * when the unmap thread will be started. 81 */ 82 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) 83 84 static u8 tcmu_kern_cmd_reply_supported; 85 static u8 tcmu_netlink_blocked; 86 87 static struct device *tcmu_root_device; 88 89 struct tcmu_hba { 90 u32 host_id; 91 }; 92 93 #define TCMU_CONFIG_LEN 256 94 95 static DEFINE_MUTEX(tcmu_nl_cmd_mutex); 96 static LIST_HEAD(tcmu_nl_cmd_list); 97 98 struct tcmu_dev; 99 100 struct tcmu_nl_cmd { 101 /* wake up thread waiting for reply */ 102 struct completion complete; 103 struct list_head nl_list; 104 struct tcmu_dev *udev; 105 int cmd; 106 int status; 107 }; 108 109 struct tcmu_dev { 110 struct list_head node; 111 struct kref kref; 112 113 struct se_device se_dev; 114 struct se_dev_plug se_plug; 115 116 char *name; 117 struct se_hba *hba; 118 119 #define TCMU_DEV_BIT_OPEN 0 120 #define TCMU_DEV_BIT_BROKEN 1 121 #define TCMU_DEV_BIT_BLOCKED 2 122 #define TCMU_DEV_BIT_TMR_NOTIFY 3 123 #define TCM_DEV_BIT_PLUGGED 4 124 unsigned long flags; 125 126 struct uio_info uio_info; 127 128 struct inode *inode; 129 130 struct tcmu_mailbox *mb_addr; 131 uint64_t dev_size; 132 u32 cmdr_size; 133 u32 cmdr_last_cleaned; 134 /* Offset of data area from start of mb */ 135 /* Must add data_off and mb_addr to get the address */ 136 size_t data_off; 137 size_t data_size; 138 uint32_t max_blocks; 139 size_t ring_size; 140 141 struct mutex cmdr_lock; 142 struct list_head qfull_queue; 143 struct list_head tmr_queue; 144 145 uint32_t dbi_max; 146 uint32_t dbi_thresh; 147 unsigned long *data_bitmap; 148 struct radix_tree_root data_blocks; 149 150 struct idr commands; 151 152 struct timer_list cmd_timer; 153 unsigned int cmd_time_out; 154 struct list_head inflight_queue; 155 156 struct timer_list qfull_timer; 157 int qfull_time_out; 158 159 struct list_head timedout_entry; 160 161 struct tcmu_nl_cmd curr_nl_cmd; 162 163 char dev_config[TCMU_CONFIG_LEN]; 164 165 int nl_reply_supported; 166 }; 167 168 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 169 170 #define CMDR_OFF sizeof(struct tcmu_mailbox) 171 172 struct tcmu_cmd { 173 struct se_cmd *se_cmd; 174 struct tcmu_dev *tcmu_dev; 175 struct list_head queue_entry; 176 177 uint16_t cmd_id; 178 179 /* Can't use se_cmd when cleaning up expired cmds, because if 180 cmd has been completed then accessing se_cmd is off limits */ 181 uint32_t dbi_cnt; 182 uint32_t dbi_bidi_cnt; 183 uint32_t dbi_cur; 184 uint32_t *dbi; 185 186 uint32_t data_len_bidi; 187 188 unsigned long deadline; 189 190 #define TCMU_CMD_BIT_EXPIRED 0 191 unsigned long flags; 192 }; 193 194 struct tcmu_tmr { 195 struct list_head queue_entry; 196 197 uint8_t tmr_type; 198 uint32_t tmr_cmd_cnt; 199 int16_t tmr_cmd_ids[]; 200 }; 201 202 /* 203 * To avoid dead lock the mutex lock order should always be: 204 * 205 * mutex_lock(&root_udev_mutex); 206 * ... 207 * mutex_lock(&tcmu_dev->cmdr_lock); 208 * mutex_unlock(&tcmu_dev->cmdr_lock); 209 * ... 210 * mutex_unlock(&root_udev_mutex); 211 */ 212 static DEFINE_MUTEX(root_udev_mutex); 213 static LIST_HEAD(root_udev); 214 215 static DEFINE_SPINLOCK(timed_out_udevs_lock); 216 static LIST_HEAD(timed_out_udevs); 217 218 static struct kmem_cache *tcmu_cmd_cache; 219 220 static atomic_t global_db_count = ATOMIC_INIT(0); 221 static struct delayed_work tcmu_unmap_work; 222 static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; 223 224 static int tcmu_set_global_max_data_area(const char *str, 225 const struct kernel_param *kp) 226 { 227 int ret, max_area_mb; 228 229 ret = kstrtoint(str, 10, &max_area_mb); 230 if (ret) 231 return -EINVAL; 232 233 if (max_area_mb <= 0) { 234 pr_err("global_max_data_area must be larger than 0.\n"); 235 return -EINVAL; 236 } 237 238 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); 239 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 240 schedule_delayed_work(&tcmu_unmap_work, 0); 241 else 242 cancel_delayed_work_sync(&tcmu_unmap_work); 243 244 return 0; 245 } 246 247 static int tcmu_get_global_max_data_area(char *buffer, 248 const struct kernel_param *kp) 249 { 250 return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 251 } 252 253 static const struct kernel_param_ops tcmu_global_max_data_area_op = { 254 .set = tcmu_set_global_max_data_area, 255 .get = tcmu_get_global_max_data_area, 256 }; 257 258 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 259 S_IWUSR | S_IRUGO); 260 MODULE_PARM_DESC(global_max_data_area_mb, 261 "Max MBs allowed to be allocated to all the tcmu device's " 262 "data areas."); 263 264 static int tcmu_get_block_netlink(char *buffer, 265 const struct kernel_param *kp) 266 { 267 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? 268 "blocked" : "unblocked"); 269 } 270 271 static int tcmu_set_block_netlink(const char *str, 272 const struct kernel_param *kp) 273 { 274 int ret; 275 u8 val; 276 277 ret = kstrtou8(str, 0, &val); 278 if (ret < 0) 279 return ret; 280 281 if (val > 1) { 282 pr_err("Invalid block netlink value %u\n", val); 283 return -EINVAL; 284 } 285 286 tcmu_netlink_blocked = val; 287 return 0; 288 } 289 290 static const struct kernel_param_ops tcmu_block_netlink_op = { 291 .set = tcmu_set_block_netlink, 292 .get = tcmu_get_block_netlink, 293 }; 294 295 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); 296 MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); 297 298 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) 299 { 300 struct tcmu_dev *udev = nl_cmd->udev; 301 302 if (!tcmu_netlink_blocked) { 303 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); 304 return -EBUSY; 305 } 306 307 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 308 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); 309 nl_cmd->status = -EINTR; 310 list_del(&nl_cmd->nl_list); 311 complete(&nl_cmd->complete); 312 } 313 return 0; 314 } 315 316 static int tcmu_set_reset_netlink(const char *str, 317 const struct kernel_param *kp) 318 { 319 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; 320 int ret; 321 u8 val; 322 323 ret = kstrtou8(str, 0, &val); 324 if (ret < 0) 325 return ret; 326 327 if (val != 1) { 328 pr_err("Invalid reset netlink value %u\n", val); 329 return -EINVAL; 330 } 331 332 mutex_lock(&tcmu_nl_cmd_mutex); 333 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { 334 ret = tcmu_fail_netlink_cmd(nl_cmd); 335 if (ret) 336 break; 337 } 338 mutex_unlock(&tcmu_nl_cmd_mutex); 339 340 return ret; 341 } 342 343 static const struct kernel_param_ops tcmu_reset_netlink_op = { 344 .set = tcmu_set_reset_netlink, 345 }; 346 347 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); 348 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); 349 350 /* multicast group */ 351 enum tcmu_multicast_groups { 352 TCMU_MCGRP_CONFIG, 353 }; 354 355 static const struct genl_multicast_group tcmu_mcgrps[] = { 356 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 357 }; 358 359 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 360 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 361 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 362 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 363 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 364 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 365 }; 366 367 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 368 { 369 struct tcmu_dev *udev = NULL; 370 struct tcmu_nl_cmd *nl_cmd; 371 int dev_id, rc, ret = 0; 372 373 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 374 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 375 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 376 return -EINVAL; 377 } 378 379 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 380 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 381 382 mutex_lock(&tcmu_nl_cmd_mutex); 383 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { 384 if (nl_cmd->udev->se_dev.dev_index == dev_id) { 385 udev = nl_cmd->udev; 386 break; 387 } 388 } 389 390 if (!udev) { 391 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", 392 completed_cmd, rc, dev_id); 393 ret = -ENODEV; 394 goto unlock; 395 } 396 list_del(&nl_cmd->nl_list); 397 398 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", 399 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, 400 nl_cmd->status); 401 402 if (nl_cmd->cmd != completed_cmd) { 403 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", 404 udev->name, completed_cmd, nl_cmd->cmd); 405 ret = -EINVAL; 406 goto unlock; 407 } 408 409 nl_cmd->status = rc; 410 complete(&nl_cmd->complete); 411 unlock: 412 mutex_unlock(&tcmu_nl_cmd_mutex); 413 return ret; 414 } 415 416 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 417 { 418 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 419 } 420 421 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 422 { 423 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 424 } 425 426 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 427 struct genl_info *info) 428 { 429 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 430 } 431 432 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 433 { 434 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 435 tcmu_kern_cmd_reply_supported = 436 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 437 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 438 tcmu_kern_cmd_reply_supported); 439 } 440 441 return 0; 442 } 443 444 static const struct genl_small_ops tcmu_genl_ops[] = { 445 { 446 .cmd = TCMU_CMD_SET_FEATURES, 447 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 448 .flags = GENL_ADMIN_PERM, 449 .doit = tcmu_genl_set_features, 450 }, 451 { 452 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 453 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 454 .flags = GENL_ADMIN_PERM, 455 .doit = tcmu_genl_add_dev_done, 456 }, 457 { 458 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 459 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 460 .flags = GENL_ADMIN_PERM, 461 .doit = tcmu_genl_rm_dev_done, 462 }, 463 { 464 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 465 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 466 .flags = GENL_ADMIN_PERM, 467 .doit = tcmu_genl_reconfig_dev_done, 468 }, 469 }; 470 471 /* Our generic netlink family */ 472 static struct genl_family tcmu_genl_family __ro_after_init = { 473 .module = THIS_MODULE, 474 .hdrsize = 0, 475 .name = "TCM-USER", 476 .version = 2, 477 .maxattr = TCMU_ATTR_MAX, 478 .policy = tcmu_attr_policy, 479 .mcgrps = tcmu_mcgrps, 480 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 481 .netnsok = true, 482 .small_ops = tcmu_genl_ops, 483 .n_small_ops = ARRAY_SIZE(tcmu_genl_ops), 484 }; 485 486 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 487 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 488 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 489 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 490 491 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 492 { 493 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 494 uint32_t i; 495 496 for (i = 0; i < len; i++) 497 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 498 } 499 500 static inline int tcmu_get_empty_block(struct tcmu_dev *udev, 501 struct tcmu_cmd *tcmu_cmd, 502 int prev_dbi, int *iov_cnt) 503 { 504 struct page *page; 505 int ret, dbi; 506 507 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 508 if (dbi == udev->dbi_thresh) 509 return -1; 510 511 page = radix_tree_lookup(&udev->data_blocks, dbi); 512 if (!page) { 513 if (atomic_add_return(1, &global_db_count) > 514 tcmu_global_max_blocks) 515 schedule_delayed_work(&tcmu_unmap_work, 0); 516 517 /* try to get new page from the mm */ 518 page = alloc_page(GFP_NOIO); 519 if (!page) 520 goto err_alloc; 521 522 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 523 if (ret) 524 goto err_insert; 525 } 526 527 if (dbi > udev->dbi_max) 528 udev->dbi_max = dbi; 529 530 set_bit(dbi, udev->data_bitmap); 531 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 532 533 if (dbi != prev_dbi + 1) 534 *iov_cnt += 1; 535 536 return dbi; 537 err_insert: 538 __free_page(page); 539 err_alloc: 540 atomic_dec(&global_db_count); 541 return -1; 542 } 543 544 static int tcmu_get_empty_blocks(struct tcmu_dev *udev, 545 struct tcmu_cmd *tcmu_cmd, int dbi_cnt) 546 { 547 /* start value of dbi + 1 must not be a valid dbi */ 548 int dbi = -2; 549 int i, iov_cnt = 0; 550 551 for (i = 0; i < dbi_cnt; i++) { 552 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt); 553 if (dbi < 0) 554 return -1; 555 } 556 return iov_cnt; 557 } 558 559 static inline struct page * 560 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 561 { 562 return radix_tree_lookup(&udev->data_blocks, dbi); 563 } 564 565 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 566 { 567 kfree(tcmu_cmd->dbi); 568 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 569 } 570 571 static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd) 572 { 573 int i, len; 574 struct se_cmd *se_cmd = cmd->se_cmd; 575 576 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 577 578 if (se_cmd->se_cmd_flags & SCF_BIDI) { 579 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 580 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) 581 len += se_cmd->t_bidi_data_sg[i].length; 582 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE); 583 cmd->dbi_cnt += cmd->dbi_bidi_cnt; 584 cmd->data_len_bidi = len; 585 } 586 } 587 588 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 589 struct iovec **iov, int prev_dbi, int len) 590 { 591 /* Get the next dbi */ 592 int dbi = tcmu_cmd_get_dbi(cmd); 593 594 /* Do not add more than DATA_BLOCK_SIZE to iov */ 595 if (len > DATA_BLOCK_SIZE) 596 len = DATA_BLOCK_SIZE; 597 598 /* 599 * The following code will gather and map the blocks to the same iovec 600 * when the blocks are all next to each other. 601 */ 602 if (dbi != prev_dbi + 1) { 603 /* dbi is not next to previous dbi, so start new iov */ 604 if (prev_dbi >= 0) 605 (*iov)++; 606 /* write offset relative to mb_addr */ 607 (*iov)->iov_base = (void __user *) 608 (udev->data_off + dbi * DATA_BLOCK_SIZE); 609 } 610 (*iov)->iov_len += len; 611 612 return dbi; 613 } 614 615 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 616 struct iovec **iov, int data_length) 617 { 618 /* start value of dbi + 1 must not be a valid dbi */ 619 int dbi = -2; 620 621 /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */ 622 for (; data_length > 0; data_length -= DATA_BLOCK_SIZE) 623 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); 624 } 625 626 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 627 { 628 struct se_device *se_dev = se_cmd->se_dev; 629 struct tcmu_dev *udev = TCMU_DEV(se_dev); 630 struct tcmu_cmd *tcmu_cmd; 631 632 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); 633 if (!tcmu_cmd) 634 return NULL; 635 636 INIT_LIST_HEAD(&tcmu_cmd->queue_entry); 637 tcmu_cmd->se_cmd = se_cmd; 638 tcmu_cmd->tcmu_dev = udev; 639 640 tcmu_cmd_set_block_cnts(tcmu_cmd); 641 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 642 GFP_NOIO); 643 if (!tcmu_cmd->dbi) { 644 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 645 return NULL; 646 } 647 648 return tcmu_cmd; 649 } 650 651 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 652 { 653 unsigned long offset = offset_in_page(vaddr); 654 void *start = vaddr - offset; 655 656 size = round_up(size+offset, PAGE_SIZE); 657 658 while (size) { 659 flush_dcache_page(vmalloc_to_page(start)); 660 start += PAGE_SIZE; 661 size -= PAGE_SIZE; 662 } 663 } 664 665 /* 666 * Some ring helper functions. We don't assume size is a power of 2 so 667 * we can't use circ_buf.h. 668 */ 669 static inline size_t spc_used(size_t head, size_t tail, size_t size) 670 { 671 int diff = head - tail; 672 673 if (diff >= 0) 674 return diff; 675 else 676 return size + diff; 677 } 678 679 static inline size_t spc_free(size_t head, size_t tail, size_t size) 680 { 681 /* Keep 1 byte unused or we can't tell full from empty */ 682 return (size - spc_used(head, tail, size) - 1); 683 } 684 685 static inline size_t head_to_end(size_t head, size_t size) 686 { 687 return size - head; 688 } 689 690 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 691 692 #define TCMU_SG_TO_DATA_AREA 1 693 #define TCMU_DATA_AREA_TO_SG 2 694 695 static inline void tcmu_copy_data(struct tcmu_dev *udev, 696 struct tcmu_cmd *tcmu_cmd, uint32_t direction, 697 struct scatterlist *sg, unsigned int sg_nents, 698 struct iovec **iov, size_t data_len) 699 { 700 /* start value of dbi + 1 must not be a valid dbi */ 701 int dbi = -2; 702 size_t block_remaining, cp_len; 703 struct sg_mapping_iter sg_iter; 704 unsigned int sg_flags; 705 struct page *page; 706 void *data_page_start, *data_addr; 707 708 if (direction == TCMU_SG_TO_DATA_AREA) 709 sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG; 710 else 711 sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 712 sg_miter_start(&sg_iter, sg, sg_nents, sg_flags); 713 714 while (data_len) { 715 if (direction == TCMU_SG_TO_DATA_AREA) 716 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, 717 data_len); 718 else 719 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 720 page = tcmu_get_block_page(udev, dbi); 721 if (direction == TCMU_DATA_AREA_TO_SG) 722 flush_dcache_page(page); 723 data_page_start = kmap_atomic(page); 724 block_remaining = DATA_BLOCK_SIZE; 725 726 while (block_remaining && data_len) { 727 if (!sg_miter_next(&sg_iter)) { 728 /* set length to 0 to abort outer loop */ 729 data_len = 0; 730 pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n"); 731 break; 732 } 733 cp_len = min3(sg_iter.length, block_remaining, data_len); 734 735 data_addr = data_page_start + 736 DATA_BLOCK_SIZE - block_remaining; 737 if (direction == TCMU_SG_TO_DATA_AREA) 738 memcpy(data_addr, sg_iter.addr, cp_len); 739 else 740 memcpy(sg_iter.addr, data_addr, cp_len); 741 742 data_len -= cp_len; 743 block_remaining -= cp_len; 744 sg_iter.consumed = cp_len; 745 } 746 sg_miter_stop(&sg_iter); 747 748 kunmap_atomic(data_page_start); 749 if (direction == TCMU_SG_TO_DATA_AREA) 750 flush_dcache_page(page); 751 } 752 } 753 754 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 755 struct iovec **iov) 756 { 757 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 758 759 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, 760 se_cmd->t_data_nents, iov, se_cmd->data_length); 761 } 762 763 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 764 bool bidi, uint32_t read_len) 765 { 766 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 767 struct scatterlist *data_sg; 768 unsigned int data_nents; 769 770 if (!bidi) { 771 data_sg = se_cmd->t_data_sg; 772 data_nents = se_cmd->t_data_nents; 773 } else { 774 /* 775 * For bidi case, the first count blocks are for Data-Out 776 * buffer blocks, and before gathering the Data-In buffer 777 * the Data-Out buffer blocks should be skipped. 778 */ 779 tcmu_cmd_set_dbi_cur(tcmu_cmd, 780 tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt); 781 782 data_sg = se_cmd->t_bidi_data_sg; 783 data_nents = se_cmd->t_bidi_data_nents; 784 } 785 786 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, 787 data_nents, NULL, read_len); 788 } 789 790 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 791 { 792 return thresh - bitmap_weight(bitmap, thresh); 793 } 794 795 /* 796 * We can't queue a command until we have space available on the cmd ring. 797 * 798 * Called with ring lock held. 799 */ 800 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) 801 { 802 struct tcmu_mailbox *mb = udev->mb_addr; 803 size_t space, cmd_needed; 804 u32 cmd_head; 805 806 tcmu_flush_dcache_range(mb, sizeof(*mb)); 807 808 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 809 810 /* 811 * If cmd end-of-ring space is too small then we need space for a NOP plus 812 * original cmd - cmds are internally contiguous. 813 */ 814 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 815 cmd_needed = cmd_size; 816 else 817 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 818 819 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 820 if (space < cmd_needed) { 821 pr_debug("no cmd space: %u %u %u\n", cmd_head, 822 udev->cmdr_last_cleaned, udev->cmdr_size); 823 return false; 824 } 825 return true; 826 } 827 828 /* 829 * We have to allocate data buffers before we can queue a command. 830 * Returns -1 on error (not enough space) or number of needed iovs on success 831 * 832 * Called with ring lock held. 833 */ 834 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 835 int *iov_bidi_cnt) 836 { 837 int space, iov_cnt = 0, ret = 0; 838 839 if (!cmd->dbi_cnt) 840 goto wr_iov_cnts; 841 842 /* try to check and get the data blocks as needed */ 843 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 844 if (space < cmd->dbi_cnt) { 845 unsigned long blocks_left = 846 (udev->max_blocks - udev->dbi_thresh) + space; 847 848 if (blocks_left < cmd->dbi_cnt) { 849 pr_debug("no data space: only %lu available, but ask for %lu\n", 850 blocks_left * DATA_BLOCK_SIZE, 851 cmd->dbi_cnt * DATA_BLOCK_SIZE); 852 return -1; 853 } 854 855 udev->dbi_thresh += cmd->dbi_cnt; 856 if (udev->dbi_thresh > udev->max_blocks) 857 udev->dbi_thresh = udev->max_blocks; 858 } 859 860 iov_cnt = tcmu_get_empty_blocks(udev, cmd, 861 cmd->dbi_cnt - cmd->dbi_bidi_cnt); 862 if (iov_cnt < 0) 863 return -1; 864 865 if (cmd->dbi_bidi_cnt) { 866 ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt); 867 if (ret < 0) 868 return -1; 869 } 870 wr_iov_cnts: 871 *iov_bidi_cnt = ret; 872 return iov_cnt + ret; 873 } 874 875 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 876 { 877 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 878 sizeof(struct tcmu_cmd_entry)); 879 } 880 881 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 882 size_t base_command_size) 883 { 884 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 885 size_t command_size; 886 887 command_size = base_command_size + 888 round_up(scsi_command_size(se_cmd->t_task_cdb), 889 TCMU_OP_ALIGN_SIZE); 890 891 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 892 893 return command_size; 894 } 895 896 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 897 struct timer_list *timer) 898 { 899 if (!tmo) 900 return; 901 902 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 903 if (!timer_pending(timer)) 904 mod_timer(timer, tcmu_cmd->deadline); 905 906 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, 907 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); 908 } 909 910 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 911 { 912 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 913 unsigned int tmo; 914 915 /* 916 * For backwards compat if qfull_time_out is not set use 917 * cmd_time_out and if that's not set use the default time out. 918 */ 919 if (!udev->qfull_time_out) 920 return -ETIMEDOUT; 921 else if (udev->qfull_time_out > 0) 922 tmo = udev->qfull_time_out; 923 else if (udev->cmd_time_out) 924 tmo = udev->cmd_time_out; 925 else 926 tmo = TCMU_TIME_OUT; 927 928 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 929 930 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 931 pr_debug("adding cmd %p on dev %s to ring space wait queue\n", 932 tcmu_cmd, udev->name); 933 return 0; 934 } 935 936 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) 937 { 938 struct tcmu_cmd_entry_hdr *hdr; 939 struct tcmu_mailbox *mb = udev->mb_addr; 940 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 941 942 /* Insert a PAD if end-of-ring space is too small */ 943 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { 944 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 945 946 hdr = (void *) mb + CMDR_OFF + cmd_head; 947 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); 948 tcmu_hdr_set_len(&hdr->len_op, pad_size); 949 hdr->cmd_id = 0; /* not used for PAD */ 950 hdr->kflags = 0; 951 hdr->uflags = 0; 952 tcmu_flush_dcache_range(hdr, sizeof(*hdr)); 953 954 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 955 tcmu_flush_dcache_range(mb, sizeof(*mb)); 956 957 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 958 WARN_ON(cmd_head != 0); 959 } 960 961 return cmd_head; 962 } 963 964 static void tcmu_unplug_device(struct se_dev_plug *se_plug) 965 { 966 struct se_device *se_dev = se_plug->se_dev; 967 struct tcmu_dev *udev = TCMU_DEV(se_dev); 968 969 clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags); 970 uio_event_notify(&udev->uio_info); 971 } 972 973 static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev) 974 { 975 struct tcmu_dev *udev = TCMU_DEV(se_dev); 976 977 if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) 978 return &udev->se_plug; 979 980 return NULL; 981 } 982 983 /** 984 * queue_cmd_ring - queue cmd to ring or internally 985 * @tcmu_cmd: cmd to queue 986 * @scsi_err: TCM error code if failure (-1) returned. 987 * 988 * Returns: 989 * -1 we cannot queue internally or to the ring. 990 * 0 success 991 * 1 internally queued to wait for ring memory to free. 992 */ 993 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) 994 { 995 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 996 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 997 size_t base_command_size, command_size; 998 struct tcmu_mailbox *mb = udev->mb_addr; 999 struct tcmu_cmd_entry *entry; 1000 struct iovec *iov; 1001 int iov_cnt, iov_bidi_cnt, cmd_id; 1002 uint32_t cmd_head; 1003 uint64_t cdb_off; 1004 /* size of data buffer needed */ 1005 size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE; 1006 1007 *scsi_err = TCM_NO_SENSE; 1008 1009 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 1010 *scsi_err = TCM_LUN_BUSY; 1011 return -1; 1012 } 1013 1014 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1015 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1016 return -1; 1017 } 1018 1019 if (!list_empty(&udev->qfull_queue)) 1020 goto queue; 1021 1022 if (data_length > udev->data_size) { 1023 pr_warn("TCMU: Request of size %zu is too big for %zu data area\n", 1024 data_length, udev->data_size); 1025 *scsi_err = TCM_INVALID_CDB_FIELD; 1026 return -1; 1027 } 1028 1029 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); 1030 if (iov_cnt < 0) 1031 goto free_and_queue; 1032 1033 /* 1034 * Must be a certain minimum size for response sense info, but 1035 * also may be larger if the iov array is large. 1036 */ 1037 base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt); 1038 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1039 1040 if (command_size > (udev->cmdr_size / 2)) { 1041 pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n", 1042 command_size, udev->cmdr_size); 1043 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1044 *scsi_err = TCM_INVALID_CDB_FIELD; 1045 return -1; 1046 } 1047 1048 if (!is_ring_space_avail(udev, command_size)) 1049 /* 1050 * Don't leave commands partially setup because the unmap 1051 * thread might need the blocks to make forward progress. 1052 */ 1053 goto free_and_queue; 1054 1055 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 1056 if (cmd_id < 0) { 1057 pr_err("tcmu: Could not allocate cmd id.\n"); 1058 1059 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1060 *scsi_err = TCM_OUT_OF_RESOURCES; 1061 return -1; 1062 } 1063 tcmu_cmd->cmd_id = cmd_id; 1064 1065 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, 1066 tcmu_cmd, udev->name); 1067 1068 cmd_head = ring_insert_padding(udev, command_size); 1069 1070 entry = (void *) mb + CMDR_OFF + cmd_head; 1071 memset(entry, 0, command_size); 1072 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 1073 1074 /* prepare iov list and copy data to data area if necessary */ 1075 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1076 iov = &entry->req.iov[0]; 1077 1078 if (se_cmd->data_direction == DMA_TO_DEVICE || 1079 se_cmd->se_cmd_flags & SCF_BIDI) 1080 scatter_data_area(udev, tcmu_cmd, &iov); 1081 else 1082 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); 1083 1084 entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; 1085 1086 /* Handle BIDI commands */ 1087 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1088 iov++; 1089 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); 1090 entry->req.iov_bidi_cnt = iov_bidi_cnt; 1091 } 1092 1093 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); 1094 1095 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 1096 1097 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 1098 1099 /* All offsets relative to mb_addr, not start of entry! */ 1100 cdb_off = CMDR_OFF + cmd_head + base_command_size; 1101 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 1102 entry->req.cdb_off = cdb_off; 1103 tcmu_flush_dcache_range(entry, command_size); 1104 1105 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1106 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1107 1108 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); 1109 1110 if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) 1111 uio_event_notify(&udev->uio_info); 1112 1113 return 0; 1114 1115 free_and_queue: 1116 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1117 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1118 1119 queue: 1120 if (add_to_qfull_queue(tcmu_cmd)) { 1121 *scsi_err = TCM_OUT_OF_RESOURCES; 1122 return -1; 1123 } 1124 1125 return 1; 1126 } 1127 1128 /** 1129 * queue_tmr_ring - queue tmr info to ring or internally 1130 * @udev: related tcmu_dev 1131 * @tmr: tcmu_tmr containing tmr info to queue 1132 * 1133 * Returns: 1134 * 0 success 1135 * 1 internally queued to wait for ring memory to free. 1136 */ 1137 static int 1138 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) 1139 { 1140 struct tcmu_tmr_entry *entry; 1141 int cmd_size; 1142 int id_list_sz; 1143 struct tcmu_mailbox *mb = udev->mb_addr; 1144 uint32_t cmd_head; 1145 1146 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 1147 goto out_free; 1148 1149 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; 1150 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE); 1151 1152 if (!list_empty(&udev->tmr_queue) || 1153 !is_ring_space_avail(udev, cmd_size)) { 1154 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); 1155 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n", 1156 tmr, udev->name); 1157 return 1; 1158 } 1159 1160 cmd_head = ring_insert_padding(udev, cmd_size); 1161 1162 entry = (void *)mb + CMDR_OFF + cmd_head; 1163 memset(entry, 0, cmd_size); 1164 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); 1165 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); 1166 entry->tmr_type = tmr->tmr_type; 1167 entry->cmd_cnt = tmr->tmr_cmd_cnt; 1168 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); 1169 tcmu_flush_dcache_range(entry, cmd_size); 1170 1171 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); 1172 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1173 1174 uio_event_notify(&udev->uio_info); 1175 1176 out_free: 1177 kfree(tmr); 1178 1179 return 0; 1180 } 1181 1182 static sense_reason_t 1183 tcmu_queue_cmd(struct se_cmd *se_cmd) 1184 { 1185 struct se_device *se_dev = se_cmd->se_dev; 1186 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1187 struct tcmu_cmd *tcmu_cmd; 1188 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD; 1189 int ret = -1; 1190 1191 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 1192 if (!tcmu_cmd) 1193 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1194 1195 mutex_lock(&udev->cmdr_lock); 1196 if (!(se_cmd->transport_state & CMD_T_ABORTED)) 1197 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1198 if (ret < 0) 1199 tcmu_free_cmd(tcmu_cmd); 1200 else 1201 se_cmd->priv = tcmu_cmd; 1202 mutex_unlock(&udev->cmdr_lock); 1203 return scsi_ret; 1204 } 1205 1206 static void tcmu_set_next_deadline(struct list_head *queue, 1207 struct timer_list *timer) 1208 { 1209 struct tcmu_cmd *cmd; 1210 1211 if (!list_empty(queue)) { 1212 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); 1213 mod_timer(timer, cmd->deadline); 1214 } else 1215 del_timer(timer); 1216 } 1217 1218 static int 1219 tcmu_tmr_type(enum tcm_tmreq_table tmf) 1220 { 1221 switch (tmf) { 1222 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK; 1223 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET; 1224 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA; 1225 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET; 1226 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET; 1227 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET; 1228 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET; 1229 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO; 1230 default: return TCMU_TMR_UNKNOWN; 1231 } 1232 } 1233 1234 static void 1235 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, 1236 struct list_head *cmd_list) 1237 { 1238 int i = 0, cmd_cnt = 0; 1239 bool unqueued = false; 1240 uint16_t *cmd_ids = NULL; 1241 struct tcmu_cmd *cmd; 1242 struct se_cmd *se_cmd; 1243 struct tcmu_tmr *tmr; 1244 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1245 1246 mutex_lock(&udev->cmdr_lock); 1247 1248 /* First we check for aborted commands in qfull_queue */ 1249 list_for_each_entry(se_cmd, cmd_list, state_list) { 1250 i++; 1251 if (!se_cmd->priv) 1252 continue; 1253 cmd = se_cmd->priv; 1254 /* Commands on qfull queue have no id yet */ 1255 if (cmd->cmd_id) { 1256 cmd_cnt++; 1257 continue; 1258 } 1259 pr_debug("Removing aborted command %p from queue on dev %s.\n", 1260 cmd, udev->name); 1261 1262 list_del_init(&cmd->queue_entry); 1263 tcmu_free_cmd(cmd); 1264 se_cmd->priv = NULL; 1265 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); 1266 unqueued = true; 1267 } 1268 if (unqueued) 1269 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1270 1271 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) 1272 goto unlock; 1273 1274 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", 1275 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); 1276 1277 tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL); 1278 if (!tmr) 1279 goto unlock; 1280 1281 tmr->tmr_type = tcmu_tmr_type(tmf); 1282 tmr->tmr_cmd_cnt = cmd_cnt; 1283 1284 if (cmd_cnt != 0) { 1285 cmd_cnt = 0; 1286 list_for_each_entry(se_cmd, cmd_list, state_list) { 1287 if (!se_cmd->priv) 1288 continue; 1289 cmd = se_cmd->priv; 1290 if (cmd->cmd_id) 1291 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; 1292 } 1293 } 1294 1295 queue_tmr_ring(udev, tmr); 1296 1297 unlock: 1298 mutex_unlock(&udev->cmdr_lock); 1299 } 1300 1301 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 1302 { 1303 struct se_cmd *se_cmd = cmd->se_cmd; 1304 struct tcmu_dev *udev = cmd->tcmu_dev; 1305 bool read_len_valid = false; 1306 uint32_t read_len; 1307 1308 /* 1309 * cmd has been completed already from timeout, just reclaim 1310 * data area space and free cmd 1311 */ 1312 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1313 WARN_ON_ONCE(se_cmd); 1314 goto out; 1315 } 1316 1317 list_del_init(&cmd->queue_entry); 1318 1319 tcmu_cmd_reset_dbi_cur(cmd); 1320 1321 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1322 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1323 cmd->se_cmd); 1324 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1325 goto done; 1326 } 1327 1328 read_len = se_cmd->data_length; 1329 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1330 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1331 read_len_valid = true; 1332 if (entry->rsp.read_len < read_len) 1333 read_len = entry->rsp.read_len; 1334 } 1335 1336 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1337 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1338 if (!read_len_valid ) 1339 goto done; 1340 else 1341 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 1342 } 1343 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1344 /* Get Data-In buffer before clean up */ 1345 gather_data_area(udev, cmd, true, read_len); 1346 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1347 gather_data_area(udev, cmd, false, read_len); 1348 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1349 /* TODO: */ 1350 } else if (se_cmd->data_direction != DMA_NONE) { 1351 pr_warn("TCMU: data direction was %d!\n", 1352 se_cmd->data_direction); 1353 } 1354 1355 done: 1356 se_cmd->priv = NULL; 1357 if (read_len_valid) { 1358 pr_debug("read_len = %d\n", read_len); 1359 target_complete_cmd_with_length(cmd->se_cmd, 1360 entry->rsp.scsi_status, read_len); 1361 } else 1362 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1363 1364 out: 1365 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1366 tcmu_free_cmd(cmd); 1367 } 1368 1369 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) 1370 { 1371 struct tcmu_tmr *tmr, *tmp; 1372 LIST_HEAD(tmrs); 1373 1374 if (list_empty(&udev->tmr_queue)) 1375 return 1; 1376 1377 pr_debug("running %s's tmr queue\n", udev->name); 1378 1379 list_splice_init(&udev->tmr_queue, &tmrs); 1380 1381 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) { 1382 list_del_init(&tmr->queue_entry); 1383 1384 pr_debug("removing tmr %p on dev %s from queue\n", 1385 tmr, udev->name); 1386 1387 if (queue_tmr_ring(udev, tmr)) { 1388 pr_debug("ran out of space during tmr queue run\n"); 1389 /* 1390 * tmr was requeued, so just put all tmrs back in 1391 * the queue 1392 */ 1393 list_splice_tail(&tmrs, &udev->tmr_queue); 1394 return 0; 1395 } 1396 } 1397 1398 return 1; 1399 } 1400 1401 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1402 { 1403 struct tcmu_mailbox *mb; 1404 struct tcmu_cmd *cmd; 1405 bool free_space = false; 1406 1407 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1408 pr_err("ring broken, not handling completions\n"); 1409 return 0; 1410 } 1411 1412 mb = udev->mb_addr; 1413 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1414 1415 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1416 1417 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1418 1419 /* 1420 * Flush max. up to end of cmd ring since current entry might 1421 * be a padding that is shorter than sizeof(*entry) 1422 */ 1423 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, 1424 udev->cmdr_size); 1425 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? 1426 ring_left : sizeof(*entry)); 1427 1428 free_space = true; 1429 1430 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || 1431 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { 1432 UPDATE_HEAD(udev->cmdr_last_cleaned, 1433 tcmu_hdr_get_len(entry->hdr.len_op), 1434 udev->cmdr_size); 1435 continue; 1436 } 1437 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1438 1439 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); 1440 if (!cmd) { 1441 pr_err("cmd_id %u not found, ring is broken\n", 1442 entry->hdr.cmd_id); 1443 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1444 break; 1445 } 1446 1447 tcmu_handle_completion(cmd, entry); 1448 1449 UPDATE_HEAD(udev->cmdr_last_cleaned, 1450 tcmu_hdr_get_len(entry->hdr.len_op), 1451 udev->cmdr_size); 1452 } 1453 if (free_space) 1454 free_space = tcmu_run_tmr_queue(udev); 1455 1456 if (atomic_read(&global_db_count) > tcmu_global_max_blocks && 1457 idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { 1458 /* 1459 * Allocated blocks exceeded global block limit, currently no 1460 * more pending or waiting commands so try to reclaim blocks. 1461 */ 1462 schedule_delayed_work(&tcmu_unmap_work, 0); 1463 } 1464 if (udev->cmd_time_out) 1465 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 1466 1467 return free_space; 1468 } 1469 1470 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) 1471 { 1472 struct se_cmd *se_cmd; 1473 1474 if (!time_after_eq(jiffies, cmd->deadline)) 1475 return; 1476 1477 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1478 list_del_init(&cmd->queue_entry); 1479 se_cmd = cmd->se_cmd; 1480 se_cmd->priv = NULL; 1481 cmd->se_cmd = NULL; 1482 1483 pr_debug("Timing out inflight cmd %u on dev %s.\n", 1484 cmd->cmd_id, cmd->tcmu_dev->name); 1485 1486 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); 1487 } 1488 1489 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) 1490 { 1491 struct se_cmd *se_cmd; 1492 1493 if (!time_after_eq(jiffies, cmd->deadline)) 1494 return; 1495 1496 pr_debug("Timing out queued cmd %p on dev %s.\n", 1497 cmd, cmd->tcmu_dev->name); 1498 1499 list_del_init(&cmd->queue_entry); 1500 se_cmd = cmd->se_cmd; 1501 tcmu_free_cmd(cmd); 1502 1503 se_cmd->priv = NULL; 1504 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); 1505 } 1506 1507 static void tcmu_device_timedout(struct tcmu_dev *udev) 1508 { 1509 spin_lock(&timed_out_udevs_lock); 1510 if (list_empty(&udev->timedout_entry)) 1511 list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1512 spin_unlock(&timed_out_udevs_lock); 1513 1514 schedule_delayed_work(&tcmu_unmap_work, 0); 1515 } 1516 1517 static void tcmu_cmd_timedout(struct timer_list *t) 1518 { 1519 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 1520 1521 pr_debug("%s cmd timeout has expired\n", udev->name); 1522 tcmu_device_timedout(udev); 1523 } 1524 1525 static void tcmu_qfull_timedout(struct timer_list *t) 1526 { 1527 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 1528 1529 pr_debug("%s qfull timeout has expired\n", udev->name); 1530 tcmu_device_timedout(udev); 1531 } 1532 1533 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1534 { 1535 struct tcmu_hba *tcmu_hba; 1536 1537 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1538 if (!tcmu_hba) 1539 return -ENOMEM; 1540 1541 tcmu_hba->host_id = host_id; 1542 hba->hba_ptr = tcmu_hba; 1543 1544 return 0; 1545 } 1546 1547 static void tcmu_detach_hba(struct se_hba *hba) 1548 { 1549 kfree(hba->hba_ptr); 1550 hba->hba_ptr = NULL; 1551 } 1552 1553 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1554 { 1555 struct tcmu_dev *udev; 1556 1557 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1558 if (!udev) 1559 return NULL; 1560 kref_init(&udev->kref); 1561 1562 udev->name = kstrdup(name, GFP_KERNEL); 1563 if (!udev->name) { 1564 kfree(udev); 1565 return NULL; 1566 } 1567 1568 udev->hba = hba; 1569 udev->cmd_time_out = TCMU_TIME_OUT; 1570 udev->qfull_time_out = -1; 1571 1572 udev->max_blocks = DATA_BLOCK_BITS_DEF; 1573 mutex_init(&udev->cmdr_lock); 1574 1575 INIT_LIST_HEAD(&udev->node); 1576 INIT_LIST_HEAD(&udev->timedout_entry); 1577 INIT_LIST_HEAD(&udev->qfull_queue); 1578 INIT_LIST_HEAD(&udev->tmr_queue); 1579 INIT_LIST_HEAD(&udev->inflight_queue); 1580 idr_init(&udev->commands); 1581 1582 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1583 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 1584 1585 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); 1586 1587 return &udev->se_dev; 1588 } 1589 1590 static void tcmu_dev_call_rcu(struct rcu_head *p) 1591 { 1592 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1593 struct tcmu_dev *udev = TCMU_DEV(dev); 1594 1595 kfree(udev->uio_info.name); 1596 kfree(udev->name); 1597 kfree(udev); 1598 } 1599 1600 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1601 { 1602 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1603 kmem_cache_free(tcmu_cmd_cache, cmd); 1604 return 0; 1605 } 1606 return -EINVAL; 1607 } 1608 1609 static void tcmu_blocks_release(struct radix_tree_root *blocks, 1610 int start, int end) 1611 { 1612 int i; 1613 struct page *page; 1614 1615 for (i = start; i < end; i++) { 1616 page = radix_tree_delete(blocks, i); 1617 if (page) { 1618 __free_page(page); 1619 atomic_dec(&global_db_count); 1620 } 1621 } 1622 } 1623 1624 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) 1625 { 1626 struct tcmu_tmr *tmr, *tmp; 1627 1628 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { 1629 list_del_init(&tmr->queue_entry); 1630 kfree(tmr); 1631 } 1632 } 1633 1634 static void tcmu_dev_kref_release(struct kref *kref) 1635 { 1636 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1637 struct se_device *dev = &udev->se_dev; 1638 struct tcmu_cmd *cmd; 1639 bool all_expired = true; 1640 int i; 1641 1642 vfree(udev->mb_addr); 1643 udev->mb_addr = NULL; 1644 1645 spin_lock_bh(&timed_out_udevs_lock); 1646 if (!list_empty(&udev->timedout_entry)) 1647 list_del(&udev->timedout_entry); 1648 spin_unlock_bh(&timed_out_udevs_lock); 1649 1650 /* Upper layer should drain all requests before calling this */ 1651 mutex_lock(&udev->cmdr_lock); 1652 idr_for_each_entry(&udev->commands, cmd, i) { 1653 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1654 all_expired = false; 1655 } 1656 /* There can be left over TMR cmds. Remove them. */ 1657 tcmu_remove_all_queued_tmr(udev); 1658 if (!list_empty(&udev->qfull_queue)) 1659 all_expired = false; 1660 idr_destroy(&udev->commands); 1661 WARN_ON(!all_expired); 1662 1663 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); 1664 bitmap_free(udev->data_bitmap); 1665 mutex_unlock(&udev->cmdr_lock); 1666 1667 pr_debug("dev_kref_release\n"); 1668 1669 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1670 } 1671 1672 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) 1673 { 1674 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1675 LIST_HEAD(cmds); 1676 sense_reason_t scsi_ret; 1677 int ret; 1678 1679 if (list_empty(&udev->qfull_queue)) 1680 return; 1681 1682 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1683 1684 list_splice_init(&udev->qfull_queue, &cmds); 1685 1686 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1687 list_del_init(&tcmu_cmd->queue_entry); 1688 1689 pr_debug("removing cmd %p on dev %s from queue\n", 1690 tcmu_cmd, udev->name); 1691 1692 if (fail) { 1693 /* 1694 * We were not able to even start the command, so 1695 * fail with busy to allow a retry in case runner 1696 * was only temporarily down. If the device is being 1697 * removed then LIO core will do the right thing and 1698 * fail the retry. 1699 */ 1700 tcmu_cmd->se_cmd->priv = NULL; 1701 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1702 tcmu_free_cmd(tcmu_cmd); 1703 continue; 1704 } 1705 1706 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1707 if (ret < 0) { 1708 pr_debug("cmd %p on dev %s failed with %u\n", 1709 tcmu_cmd, udev->name, scsi_ret); 1710 /* 1711 * Ignore scsi_ret for now. target_complete_cmd 1712 * drops it. 1713 */ 1714 tcmu_cmd->se_cmd->priv = NULL; 1715 target_complete_cmd(tcmu_cmd->se_cmd, 1716 SAM_STAT_CHECK_CONDITION); 1717 tcmu_free_cmd(tcmu_cmd); 1718 } else if (ret > 0) { 1719 pr_debug("ran out of space during cmdr queue run\n"); 1720 /* 1721 * cmd was requeued, so just put all cmds back in 1722 * the queue 1723 */ 1724 list_splice_tail(&cmds, &udev->qfull_queue); 1725 break; 1726 } 1727 } 1728 1729 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1730 } 1731 1732 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1733 { 1734 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1735 1736 mutex_lock(&udev->cmdr_lock); 1737 if (tcmu_handle_completions(udev)) 1738 run_qfull_queue(udev, false); 1739 mutex_unlock(&udev->cmdr_lock); 1740 1741 return 0; 1742 } 1743 1744 /* 1745 * mmap code from uio.c. Copied here because we want to hook mmap() 1746 * and this stuff must come along. 1747 */ 1748 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1749 { 1750 struct tcmu_dev *udev = vma->vm_private_data; 1751 struct uio_info *info = &udev->uio_info; 1752 1753 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1754 if (info->mem[vma->vm_pgoff].size == 0) 1755 return -1; 1756 return (int)vma->vm_pgoff; 1757 } 1758 return -1; 1759 } 1760 1761 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1762 { 1763 struct page *page; 1764 1765 mutex_lock(&udev->cmdr_lock); 1766 page = tcmu_get_block_page(udev, dbi); 1767 if (likely(page)) { 1768 mutex_unlock(&udev->cmdr_lock); 1769 return page; 1770 } 1771 1772 /* 1773 * Userspace messed up and passed in a address not in the 1774 * data iov passed to it. 1775 */ 1776 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", 1777 dbi, udev->name); 1778 page = NULL; 1779 mutex_unlock(&udev->cmdr_lock); 1780 1781 return page; 1782 } 1783 1784 static void tcmu_vma_open(struct vm_area_struct *vma) 1785 { 1786 struct tcmu_dev *udev = vma->vm_private_data; 1787 1788 pr_debug("vma_open\n"); 1789 1790 kref_get(&udev->kref); 1791 } 1792 1793 static void tcmu_vma_close(struct vm_area_struct *vma) 1794 { 1795 struct tcmu_dev *udev = vma->vm_private_data; 1796 1797 pr_debug("vma_close\n"); 1798 1799 /* release ref from tcmu_vma_open */ 1800 kref_put(&udev->kref, tcmu_dev_kref_release); 1801 } 1802 1803 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) 1804 { 1805 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1806 struct uio_info *info = &udev->uio_info; 1807 struct page *page; 1808 unsigned long offset; 1809 void *addr; 1810 1811 int mi = tcmu_find_mem_index(vmf->vma); 1812 if (mi < 0) 1813 return VM_FAULT_SIGBUS; 1814 1815 /* 1816 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1817 * to use mem[N]. 1818 */ 1819 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1820 1821 if (offset < udev->data_off) { 1822 /* For the vmalloc()ed cmd area pages */ 1823 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1824 page = vmalloc_to_page(addr); 1825 } else { 1826 uint32_t dbi; 1827 1828 /* For the dynamically growing data area pages */ 1829 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1830 page = tcmu_try_get_block_page(udev, dbi); 1831 if (!page) 1832 return VM_FAULT_SIGBUS; 1833 } 1834 1835 get_page(page); 1836 vmf->page = page; 1837 return 0; 1838 } 1839 1840 static const struct vm_operations_struct tcmu_vm_ops = { 1841 .open = tcmu_vma_open, 1842 .close = tcmu_vma_close, 1843 .fault = tcmu_vma_fault, 1844 }; 1845 1846 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1847 { 1848 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1849 1850 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1851 vma->vm_ops = &tcmu_vm_ops; 1852 1853 vma->vm_private_data = udev; 1854 1855 /* Ensure the mmap is exactly the right size */ 1856 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) 1857 return -EINVAL; 1858 1859 tcmu_vma_open(vma); 1860 1861 return 0; 1862 } 1863 1864 static int tcmu_open(struct uio_info *info, struct inode *inode) 1865 { 1866 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1867 1868 /* O_EXCL not supported for char devs, so fake it? */ 1869 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1870 return -EBUSY; 1871 1872 udev->inode = inode; 1873 1874 pr_debug("open\n"); 1875 1876 return 0; 1877 } 1878 1879 static int tcmu_release(struct uio_info *info, struct inode *inode) 1880 { 1881 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1882 1883 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1884 1885 pr_debug("close\n"); 1886 1887 return 0; 1888 } 1889 1890 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1891 { 1892 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1893 1894 if (!tcmu_kern_cmd_reply_supported) 1895 return 0; 1896 1897 if (udev->nl_reply_supported <= 0) 1898 return 0; 1899 1900 mutex_lock(&tcmu_nl_cmd_mutex); 1901 1902 if (tcmu_netlink_blocked) { 1903 mutex_unlock(&tcmu_nl_cmd_mutex); 1904 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, 1905 udev->name); 1906 return -EAGAIN; 1907 } 1908 1909 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1910 mutex_unlock(&tcmu_nl_cmd_mutex); 1911 pr_warn("netlink cmd %d already executing on %s\n", 1912 nl_cmd->cmd, udev->name); 1913 return -EBUSY; 1914 } 1915 1916 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1917 nl_cmd->cmd = cmd; 1918 nl_cmd->udev = udev; 1919 init_completion(&nl_cmd->complete); 1920 INIT_LIST_HEAD(&nl_cmd->nl_list); 1921 1922 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); 1923 1924 mutex_unlock(&tcmu_nl_cmd_mutex); 1925 return 0; 1926 } 1927 1928 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) 1929 { 1930 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1931 1932 if (!tcmu_kern_cmd_reply_supported) 1933 return; 1934 1935 if (udev->nl_reply_supported <= 0) 1936 return; 1937 1938 mutex_lock(&tcmu_nl_cmd_mutex); 1939 1940 list_del(&nl_cmd->nl_list); 1941 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1942 1943 mutex_unlock(&tcmu_nl_cmd_mutex); 1944 } 1945 1946 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1947 { 1948 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1949 int ret; 1950 1951 if (!tcmu_kern_cmd_reply_supported) 1952 return 0; 1953 1954 if (udev->nl_reply_supported <= 0) 1955 return 0; 1956 1957 pr_debug("sleeping for nl reply\n"); 1958 wait_for_completion(&nl_cmd->complete); 1959 1960 mutex_lock(&tcmu_nl_cmd_mutex); 1961 nl_cmd->cmd = TCMU_CMD_UNSPEC; 1962 ret = nl_cmd->status; 1963 mutex_unlock(&tcmu_nl_cmd_mutex); 1964 1965 return ret; 1966 } 1967 1968 static int tcmu_netlink_event_init(struct tcmu_dev *udev, 1969 enum tcmu_genl_cmd cmd, 1970 struct sk_buff **buf, void **hdr) 1971 { 1972 struct sk_buff *skb; 1973 void *msg_header; 1974 int ret = -ENOMEM; 1975 1976 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1977 if (!skb) 1978 return ret; 1979 1980 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 1981 if (!msg_header) 1982 goto free_skb; 1983 1984 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 1985 if (ret < 0) 1986 goto free_skb; 1987 1988 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1989 if (ret < 0) 1990 goto free_skb; 1991 1992 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 1993 if (ret < 0) 1994 goto free_skb; 1995 1996 *buf = skb; 1997 *hdr = msg_header; 1998 return ret; 1999 2000 free_skb: 2001 nlmsg_free(skb); 2002 return ret; 2003 } 2004 2005 static int tcmu_netlink_event_send(struct tcmu_dev *udev, 2006 enum tcmu_genl_cmd cmd, 2007 struct sk_buff *skb, void *msg_header) 2008 { 2009 int ret; 2010 2011 genlmsg_end(skb, msg_header); 2012 2013 ret = tcmu_init_genl_cmd_reply(udev, cmd); 2014 if (ret) { 2015 nlmsg_free(skb); 2016 return ret; 2017 } 2018 2019 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 2020 TCMU_MCGRP_CONFIG, GFP_KERNEL); 2021 2022 /* Wait during an add as the listener may not be up yet */ 2023 if (ret == 0 || 2024 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) 2025 return tcmu_wait_genl_cmd_reply(udev); 2026 else 2027 tcmu_destroy_genl_cmd_reply(udev); 2028 2029 return ret; 2030 } 2031 2032 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) 2033 { 2034 struct sk_buff *skb = NULL; 2035 void *msg_header = NULL; 2036 int ret = 0; 2037 2038 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, 2039 &msg_header); 2040 if (ret < 0) 2041 return ret; 2042 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, 2043 msg_header); 2044 } 2045 2046 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) 2047 { 2048 struct sk_buff *skb = NULL; 2049 void *msg_header = NULL; 2050 int ret = 0; 2051 2052 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, 2053 &skb, &msg_header); 2054 if (ret < 0) 2055 return ret; 2056 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, 2057 skb, msg_header); 2058 } 2059 2060 static int tcmu_update_uio_info(struct tcmu_dev *udev) 2061 { 2062 struct tcmu_hba *hba = udev->hba->hba_ptr; 2063 struct uio_info *info; 2064 char *str; 2065 2066 info = &udev->uio_info; 2067 2068 if (udev->dev_config[0]) 2069 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, 2070 udev->name, udev->dev_config); 2071 else 2072 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, 2073 udev->name); 2074 if (!str) 2075 return -ENOMEM; 2076 2077 /* If the old string exists, free it */ 2078 kfree(info->name); 2079 info->name = str; 2080 2081 return 0; 2082 } 2083 2084 static int tcmu_configure_device(struct se_device *dev) 2085 { 2086 struct tcmu_dev *udev = TCMU_DEV(dev); 2087 struct uio_info *info; 2088 struct tcmu_mailbox *mb; 2089 int ret = 0; 2090 2091 ret = tcmu_update_uio_info(udev); 2092 if (ret) 2093 return ret; 2094 2095 info = &udev->uio_info; 2096 2097 mutex_lock(&udev->cmdr_lock); 2098 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); 2099 mutex_unlock(&udev->cmdr_lock); 2100 if (!udev->data_bitmap) { 2101 ret = -ENOMEM; 2102 goto err_bitmap_alloc; 2103 } 2104 2105 udev->mb_addr = vzalloc(CMDR_SIZE); 2106 if (!udev->mb_addr) { 2107 ret = -ENOMEM; 2108 goto err_vzalloc; 2109 } 2110 2111 /* mailbox fits in first part of CMDR space */ 2112 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 2113 udev->data_off = CMDR_SIZE; 2114 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; 2115 udev->dbi_thresh = 0; /* Default in Idle state */ 2116 2117 /* Initialise the mailbox of the ring buffer */ 2118 mb = udev->mb_addr; 2119 mb->version = TCMU_MAILBOX_VERSION; 2120 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | 2121 TCMU_MAILBOX_FLAG_CAP_READ_LEN | 2122 TCMU_MAILBOX_FLAG_CAP_TMR; 2123 mb->cmdr_off = CMDR_OFF; 2124 mb->cmdr_size = udev->cmdr_size; 2125 2126 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 2127 WARN_ON(udev->data_size % PAGE_SIZE); 2128 WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 2129 2130 info->version = __stringify(TCMU_MAILBOX_VERSION); 2131 2132 info->mem[0].name = "tcm-user command & data buffer"; 2133 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 2134 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; 2135 info->mem[0].memtype = UIO_MEM_NONE; 2136 2137 info->irqcontrol = tcmu_irqcontrol; 2138 info->irq = UIO_IRQ_CUSTOM; 2139 2140 info->mmap = tcmu_mmap; 2141 info->open = tcmu_open; 2142 info->release = tcmu_release; 2143 2144 ret = uio_register_device(tcmu_root_device, info); 2145 if (ret) 2146 goto err_register; 2147 2148 /* User can set hw_block_size before enable the device */ 2149 if (dev->dev_attrib.hw_block_size == 0) 2150 dev->dev_attrib.hw_block_size = 512; 2151 /* Other attributes can be configured in userspace */ 2152 if (!dev->dev_attrib.hw_max_sectors) 2153 dev->dev_attrib.hw_max_sectors = 128; 2154 if (!dev->dev_attrib.emulate_write_cache) 2155 dev->dev_attrib.emulate_write_cache = 0; 2156 dev->dev_attrib.hw_queue_depth = 128; 2157 2158 /* If user didn't explicitly disable netlink reply support, use 2159 * module scope setting. 2160 */ 2161 if (udev->nl_reply_supported >= 0) 2162 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 2163 2164 /* 2165 * Get a ref incase userspace does a close on the uio device before 2166 * LIO has initiated tcmu_free_device. 2167 */ 2168 kref_get(&udev->kref); 2169 2170 ret = tcmu_send_dev_add_event(udev); 2171 if (ret) 2172 goto err_netlink; 2173 2174 mutex_lock(&root_udev_mutex); 2175 list_add(&udev->node, &root_udev); 2176 mutex_unlock(&root_udev_mutex); 2177 2178 return 0; 2179 2180 err_netlink: 2181 kref_put(&udev->kref, tcmu_dev_kref_release); 2182 uio_unregister_device(&udev->uio_info); 2183 err_register: 2184 vfree(udev->mb_addr); 2185 udev->mb_addr = NULL; 2186 err_vzalloc: 2187 bitmap_free(udev->data_bitmap); 2188 udev->data_bitmap = NULL; 2189 err_bitmap_alloc: 2190 kfree(info->name); 2191 info->name = NULL; 2192 2193 return ret; 2194 } 2195 2196 static void tcmu_free_device(struct se_device *dev) 2197 { 2198 struct tcmu_dev *udev = TCMU_DEV(dev); 2199 2200 /* release ref from init */ 2201 kref_put(&udev->kref, tcmu_dev_kref_release); 2202 } 2203 2204 static void tcmu_destroy_device(struct se_device *dev) 2205 { 2206 struct tcmu_dev *udev = TCMU_DEV(dev); 2207 2208 del_timer_sync(&udev->cmd_timer); 2209 del_timer_sync(&udev->qfull_timer); 2210 2211 mutex_lock(&root_udev_mutex); 2212 list_del(&udev->node); 2213 mutex_unlock(&root_udev_mutex); 2214 2215 tcmu_send_dev_remove_event(udev); 2216 2217 uio_unregister_device(&udev->uio_info); 2218 2219 /* release ref from configure */ 2220 kref_put(&udev->kref, tcmu_dev_kref_release); 2221 } 2222 2223 static void tcmu_unblock_dev(struct tcmu_dev *udev) 2224 { 2225 mutex_lock(&udev->cmdr_lock); 2226 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 2227 mutex_unlock(&udev->cmdr_lock); 2228 } 2229 2230 static void tcmu_block_dev(struct tcmu_dev *udev) 2231 { 2232 mutex_lock(&udev->cmdr_lock); 2233 2234 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2235 goto unlock; 2236 2237 /* complete IO that has executed successfully */ 2238 tcmu_handle_completions(udev); 2239 /* fail IO waiting to be queued */ 2240 run_qfull_queue(udev, true); 2241 2242 unlock: 2243 mutex_unlock(&udev->cmdr_lock); 2244 } 2245 2246 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 2247 { 2248 struct tcmu_mailbox *mb; 2249 struct tcmu_cmd *cmd; 2250 int i; 2251 2252 mutex_lock(&udev->cmdr_lock); 2253 2254 idr_for_each_entry(&udev->commands, cmd, i) { 2255 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2256 cmd->cmd_id, udev->name, 2257 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); 2258 2259 idr_remove(&udev->commands, i); 2260 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2261 WARN_ON(!cmd->se_cmd); 2262 list_del_init(&cmd->queue_entry); 2263 cmd->se_cmd->priv = NULL; 2264 if (err_level == 1) { 2265 /* 2266 * Userspace was not able to start the 2267 * command or it is retryable. 2268 */ 2269 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 2270 } else { 2271 /* hard failure */ 2272 target_complete_cmd(cmd->se_cmd, 2273 SAM_STAT_CHECK_CONDITION); 2274 } 2275 } 2276 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 2277 tcmu_free_cmd(cmd); 2278 } 2279 2280 mb = udev->mb_addr; 2281 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2282 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 2283 mb->cmd_tail, mb->cmd_head); 2284 2285 udev->cmdr_last_cleaned = 0; 2286 mb->cmd_tail = 0; 2287 mb->cmd_head = 0; 2288 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2289 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2290 2291 del_timer(&udev->cmd_timer); 2292 2293 /* 2294 * ring is empty and qfull queue never contains aborted commands. 2295 * So TMRs in tmr queue do not contain relevant cmd_ids. 2296 * After a ring reset userspace should do a fresh start, so 2297 * even LUN RESET message is no longer relevant. 2298 * Therefore remove all TMRs from qfull queue 2299 */ 2300 tcmu_remove_all_queued_tmr(udev); 2301 2302 run_qfull_queue(udev, false); 2303 2304 mutex_unlock(&udev->cmdr_lock); 2305 } 2306 2307 enum { 2308 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 2309 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, 2310 }; 2311 2312 static match_table_t tokens = { 2313 {Opt_dev_config, "dev_config=%s"}, 2314 {Opt_dev_size, "dev_size=%s"}, 2315 {Opt_hw_block_size, "hw_block_size=%d"}, 2316 {Opt_hw_max_sectors, "hw_max_sectors=%d"}, 2317 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 2318 {Opt_max_data_area_mb, "max_data_area_mb=%d"}, 2319 {Opt_err, NULL} 2320 }; 2321 2322 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 2323 { 2324 int val, ret; 2325 2326 ret = match_int(arg, &val); 2327 if (ret < 0) { 2328 pr_err("match_int() failed for dev attrib. Error %d.\n", 2329 ret); 2330 return ret; 2331 } 2332 2333 if (val <= 0) { 2334 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", 2335 val); 2336 return -EINVAL; 2337 } 2338 *dev_attrib = val; 2339 return 0; 2340 } 2341 2342 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) 2343 { 2344 int val, ret; 2345 2346 ret = match_int(arg, &val); 2347 if (ret < 0) { 2348 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", 2349 ret); 2350 return ret; 2351 } 2352 2353 if (val <= 0) { 2354 pr_err("Invalid max_data_area %d.\n", val); 2355 return -EINVAL; 2356 } 2357 2358 mutex_lock(&udev->cmdr_lock); 2359 if (udev->data_bitmap) { 2360 pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); 2361 ret = -EINVAL; 2362 goto unlock; 2363 } 2364 2365 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val); 2366 if (udev->max_blocks > tcmu_global_max_blocks) { 2367 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 2368 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 2369 udev->max_blocks = tcmu_global_max_blocks; 2370 } 2371 2372 unlock: 2373 mutex_unlock(&udev->cmdr_lock); 2374 return ret; 2375 } 2376 2377 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 2378 const char *page, ssize_t count) 2379 { 2380 struct tcmu_dev *udev = TCMU_DEV(dev); 2381 char *orig, *ptr, *opts; 2382 substring_t args[MAX_OPT_ARGS]; 2383 int ret = 0, token; 2384 2385 opts = kstrdup(page, GFP_KERNEL); 2386 if (!opts) 2387 return -ENOMEM; 2388 2389 orig = opts; 2390 2391 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2392 if (!*ptr) 2393 continue; 2394 2395 token = match_token(ptr, tokens, args); 2396 switch (token) { 2397 case Opt_dev_config: 2398 if (match_strlcpy(udev->dev_config, &args[0], 2399 TCMU_CONFIG_LEN) == 0) { 2400 ret = -EINVAL; 2401 break; 2402 } 2403 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 2404 break; 2405 case Opt_dev_size: 2406 ret = match_u64(&args[0], &udev->dev_size); 2407 if (ret < 0) 2408 pr_err("match_u64() failed for dev_size=. Error %d.\n", 2409 ret); 2410 break; 2411 case Opt_hw_block_size: 2412 ret = tcmu_set_dev_attrib(&args[0], 2413 &(dev->dev_attrib.hw_block_size)); 2414 break; 2415 case Opt_hw_max_sectors: 2416 ret = tcmu_set_dev_attrib(&args[0], 2417 &(dev->dev_attrib.hw_max_sectors)); 2418 break; 2419 case Opt_nl_reply_supported: 2420 ret = match_int(&args[0], &udev->nl_reply_supported); 2421 if (ret < 0) 2422 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", 2423 ret); 2424 break; 2425 case Opt_max_data_area_mb: 2426 ret = tcmu_set_max_blocks_param(udev, &args[0]); 2427 break; 2428 default: 2429 break; 2430 } 2431 2432 if (ret) 2433 break; 2434 } 2435 2436 kfree(orig); 2437 return (!ret) ? count : ret; 2438 } 2439 2440 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 2441 { 2442 struct tcmu_dev *udev = TCMU_DEV(dev); 2443 ssize_t bl = 0; 2444 2445 bl = sprintf(b + bl, "Config: %s ", 2446 udev->dev_config[0] ? udev->dev_config : "NULL"); 2447 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); 2448 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", 2449 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2450 2451 return bl; 2452 } 2453 2454 static sector_t tcmu_get_blocks(struct se_device *dev) 2455 { 2456 struct tcmu_dev *udev = TCMU_DEV(dev); 2457 2458 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 2459 dev->dev_attrib.block_size); 2460 } 2461 2462 static sense_reason_t 2463 tcmu_parse_cdb(struct se_cmd *cmd) 2464 { 2465 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 2466 } 2467 2468 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 2469 { 2470 struct se_dev_attrib *da = container_of(to_config_group(item), 2471 struct se_dev_attrib, da_group); 2472 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2473 2474 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 2475 } 2476 2477 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 2478 size_t count) 2479 { 2480 struct se_dev_attrib *da = container_of(to_config_group(item), 2481 struct se_dev_attrib, da_group); 2482 struct tcmu_dev *udev = container_of(da->da_dev, 2483 struct tcmu_dev, se_dev); 2484 u32 val; 2485 int ret; 2486 2487 if (da->da_dev->export_count) { 2488 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 2489 return -EINVAL; 2490 } 2491 2492 ret = kstrtou32(page, 0, &val); 2493 if (ret < 0) 2494 return ret; 2495 2496 udev->cmd_time_out = val * MSEC_PER_SEC; 2497 return count; 2498 } 2499 CONFIGFS_ATTR(tcmu_, cmd_time_out); 2500 2501 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 2502 { 2503 struct se_dev_attrib *da = container_of(to_config_group(item), 2504 struct se_dev_attrib, da_group); 2505 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2506 2507 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 2508 udev->qfull_time_out : 2509 udev->qfull_time_out / MSEC_PER_SEC); 2510 } 2511 2512 static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 2513 const char *page, size_t count) 2514 { 2515 struct se_dev_attrib *da = container_of(to_config_group(item), 2516 struct se_dev_attrib, da_group); 2517 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2518 s32 val; 2519 int ret; 2520 2521 ret = kstrtos32(page, 0, &val); 2522 if (ret < 0) 2523 return ret; 2524 2525 if (val >= 0) { 2526 udev->qfull_time_out = val * MSEC_PER_SEC; 2527 } else if (val == -1) { 2528 udev->qfull_time_out = val; 2529 } else { 2530 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2531 return -EINVAL; 2532 } 2533 return count; 2534 } 2535 CONFIGFS_ATTR(tcmu_, qfull_time_out); 2536 2537 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 2538 { 2539 struct se_dev_attrib *da = container_of(to_config_group(item), 2540 struct se_dev_attrib, da_group); 2541 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2542 2543 return snprintf(page, PAGE_SIZE, "%u\n", 2544 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2545 } 2546 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 2547 2548 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2549 { 2550 struct se_dev_attrib *da = container_of(to_config_group(item), 2551 struct se_dev_attrib, da_group); 2552 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2553 2554 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2555 } 2556 2557 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, 2558 const char *reconfig_data) 2559 { 2560 struct sk_buff *skb = NULL; 2561 void *msg_header = NULL; 2562 int ret = 0; 2563 2564 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2565 &skb, &msg_header); 2566 if (ret < 0) 2567 return ret; 2568 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); 2569 if (ret < 0) { 2570 nlmsg_free(skb); 2571 return ret; 2572 } 2573 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2574 skb, msg_header); 2575 } 2576 2577 2578 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2579 size_t count) 2580 { 2581 struct se_dev_attrib *da = container_of(to_config_group(item), 2582 struct se_dev_attrib, da_group); 2583 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2584 int ret, len; 2585 2586 len = strlen(page); 2587 if (!len || len > TCMU_CONFIG_LEN - 1) 2588 return -EINVAL; 2589 2590 /* Check if device has been configured before */ 2591 if (target_dev_configured(&udev->se_dev)) { 2592 ret = tcmu_send_dev_config_event(udev, page); 2593 if (ret) { 2594 pr_err("Unable to reconfigure device\n"); 2595 return ret; 2596 } 2597 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2598 2599 ret = tcmu_update_uio_info(udev); 2600 if (ret) 2601 return ret; 2602 return count; 2603 } 2604 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2605 2606 return count; 2607 } 2608 CONFIGFS_ATTR(tcmu_, dev_config); 2609 2610 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 2611 { 2612 struct se_dev_attrib *da = container_of(to_config_group(item), 2613 struct se_dev_attrib, da_group); 2614 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2615 2616 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); 2617 } 2618 2619 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) 2620 { 2621 struct sk_buff *skb = NULL; 2622 void *msg_header = NULL; 2623 int ret = 0; 2624 2625 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2626 &skb, &msg_header); 2627 if (ret < 0) 2628 return ret; 2629 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, 2630 size, TCMU_ATTR_PAD); 2631 if (ret < 0) { 2632 nlmsg_free(skb); 2633 return ret; 2634 } 2635 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2636 skb, msg_header); 2637 } 2638 2639 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2640 size_t count) 2641 { 2642 struct se_dev_attrib *da = container_of(to_config_group(item), 2643 struct se_dev_attrib, da_group); 2644 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2645 u64 val; 2646 int ret; 2647 2648 ret = kstrtou64(page, 0, &val); 2649 if (ret < 0) 2650 return ret; 2651 2652 /* Check if device has been configured before */ 2653 if (target_dev_configured(&udev->se_dev)) { 2654 ret = tcmu_send_dev_size_event(udev, val); 2655 if (ret) { 2656 pr_err("Unable to reconfigure device\n"); 2657 return ret; 2658 } 2659 } 2660 udev->dev_size = val; 2661 return count; 2662 } 2663 CONFIGFS_ATTR(tcmu_, dev_size); 2664 2665 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 2666 char *page) 2667 { 2668 struct se_dev_attrib *da = container_of(to_config_group(item), 2669 struct se_dev_attrib, da_group); 2670 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2671 2672 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 2673 } 2674 2675 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 2676 const char *page, size_t count) 2677 { 2678 struct se_dev_attrib *da = container_of(to_config_group(item), 2679 struct se_dev_attrib, da_group); 2680 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2681 s8 val; 2682 int ret; 2683 2684 ret = kstrtos8(page, 0, &val); 2685 if (ret < 0) 2686 return ret; 2687 2688 udev->nl_reply_supported = val; 2689 return count; 2690 } 2691 CONFIGFS_ATTR(tcmu_, nl_reply_supported); 2692 2693 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 2694 char *page) 2695 { 2696 struct se_dev_attrib *da = container_of(to_config_group(item), 2697 struct se_dev_attrib, da_group); 2698 2699 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 2700 } 2701 2702 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) 2703 { 2704 struct sk_buff *skb = NULL; 2705 void *msg_header = NULL; 2706 int ret = 0; 2707 2708 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2709 &skb, &msg_header); 2710 if (ret < 0) 2711 return ret; 2712 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); 2713 if (ret < 0) { 2714 nlmsg_free(skb); 2715 return ret; 2716 } 2717 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2718 skb, msg_header); 2719 } 2720 2721 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 2722 const char *page, size_t count) 2723 { 2724 struct se_dev_attrib *da = container_of(to_config_group(item), 2725 struct se_dev_attrib, da_group); 2726 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2727 u8 val; 2728 int ret; 2729 2730 ret = kstrtou8(page, 0, &val); 2731 if (ret < 0) 2732 return ret; 2733 2734 /* Check if device has been configured before */ 2735 if (target_dev_configured(&udev->se_dev)) { 2736 ret = tcmu_send_emulate_write_cache(udev, val); 2737 if (ret) { 2738 pr_err("Unable to reconfigure device\n"); 2739 return ret; 2740 } 2741 } 2742 2743 da->emulate_write_cache = val; 2744 return count; 2745 } 2746 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 2747 2748 static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page) 2749 { 2750 struct se_dev_attrib *da = container_of(to_config_group(item), 2751 struct se_dev_attrib, da_group); 2752 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2753 2754 return snprintf(page, PAGE_SIZE, "%i\n", 2755 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); 2756 } 2757 2758 static ssize_t tcmu_tmr_notification_store(struct config_item *item, 2759 const char *page, size_t count) 2760 { 2761 struct se_dev_attrib *da = container_of(to_config_group(item), 2762 struct se_dev_attrib, da_group); 2763 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2764 u8 val; 2765 int ret; 2766 2767 ret = kstrtou8(page, 0, &val); 2768 if (ret < 0) 2769 return ret; 2770 if (val > 1) 2771 return -EINVAL; 2772 2773 if (val) 2774 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2775 else 2776 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2777 return count; 2778 } 2779 CONFIGFS_ATTR(tcmu_, tmr_notification); 2780 2781 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 2782 { 2783 struct se_device *se_dev = container_of(to_config_group(item), 2784 struct se_device, 2785 dev_action_group); 2786 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2787 2788 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2789 return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 2790 else 2791 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 2792 } 2793 2794 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 2795 size_t count) 2796 { 2797 struct se_device *se_dev = container_of(to_config_group(item), 2798 struct se_device, 2799 dev_action_group); 2800 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2801 u8 val; 2802 int ret; 2803 2804 if (!target_dev_configured(&udev->se_dev)) { 2805 pr_err("Device is not configured.\n"); 2806 return -EINVAL; 2807 } 2808 2809 ret = kstrtou8(page, 0, &val); 2810 if (ret < 0) 2811 return ret; 2812 2813 if (val > 1) { 2814 pr_err("Invalid block value %d\n", val); 2815 return -EINVAL; 2816 } 2817 2818 if (!val) 2819 tcmu_unblock_dev(udev); 2820 else 2821 tcmu_block_dev(udev); 2822 return count; 2823 } 2824 CONFIGFS_ATTR(tcmu_, block_dev); 2825 2826 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 2827 size_t count) 2828 { 2829 struct se_device *se_dev = container_of(to_config_group(item), 2830 struct se_device, 2831 dev_action_group); 2832 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2833 u8 val; 2834 int ret; 2835 2836 if (!target_dev_configured(&udev->se_dev)) { 2837 pr_err("Device is not configured.\n"); 2838 return -EINVAL; 2839 } 2840 2841 ret = kstrtou8(page, 0, &val); 2842 if (ret < 0) 2843 return ret; 2844 2845 if (val != 1 && val != 2) { 2846 pr_err("Invalid reset ring value %d\n", val); 2847 return -EINVAL; 2848 } 2849 2850 tcmu_reset_ring(udev, val); 2851 return count; 2852 } 2853 CONFIGFS_ATTR_WO(tcmu_, reset_ring); 2854 2855 static struct configfs_attribute *tcmu_attrib_attrs[] = { 2856 &tcmu_attr_cmd_time_out, 2857 &tcmu_attr_qfull_time_out, 2858 &tcmu_attr_max_data_area_mb, 2859 &tcmu_attr_dev_config, 2860 &tcmu_attr_dev_size, 2861 &tcmu_attr_emulate_write_cache, 2862 &tcmu_attr_tmr_notification, 2863 &tcmu_attr_nl_reply_supported, 2864 NULL, 2865 }; 2866 2867 static struct configfs_attribute **tcmu_attrs; 2868 2869 static struct configfs_attribute *tcmu_action_attrs[] = { 2870 &tcmu_attr_block_dev, 2871 &tcmu_attr_reset_ring, 2872 NULL, 2873 }; 2874 2875 static struct target_backend_ops tcmu_ops = { 2876 .name = "user", 2877 .owner = THIS_MODULE, 2878 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH, 2879 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR | 2880 TRANSPORT_FLAG_PASSTHROUGH_ALUA, 2881 .attach_hba = tcmu_attach_hba, 2882 .detach_hba = tcmu_detach_hba, 2883 .alloc_device = tcmu_alloc_device, 2884 .configure_device = tcmu_configure_device, 2885 .destroy_device = tcmu_destroy_device, 2886 .free_device = tcmu_free_device, 2887 .unplug_device = tcmu_unplug_device, 2888 .plug_device = tcmu_plug_device, 2889 .parse_cdb = tcmu_parse_cdb, 2890 .tmr_notify = tcmu_tmr_notify, 2891 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 2892 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 2893 .get_device_type = sbc_get_device_type, 2894 .get_blocks = tcmu_get_blocks, 2895 .tb_dev_action_attrs = tcmu_action_attrs, 2896 }; 2897 2898 static void find_free_blocks(void) 2899 { 2900 struct tcmu_dev *udev; 2901 loff_t off; 2902 u32 start, end, block, total_freed = 0; 2903 2904 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) 2905 return; 2906 2907 mutex_lock(&root_udev_mutex); 2908 list_for_each_entry(udev, &root_udev, node) { 2909 mutex_lock(&udev->cmdr_lock); 2910 2911 if (!target_dev_configured(&udev->se_dev)) { 2912 mutex_unlock(&udev->cmdr_lock); 2913 continue; 2914 } 2915 2916 /* Try to complete the finished commands first */ 2917 if (tcmu_handle_completions(udev)) 2918 run_qfull_queue(udev, false); 2919 2920 /* Skip the udevs in idle */ 2921 if (!udev->dbi_thresh) { 2922 mutex_unlock(&udev->cmdr_lock); 2923 continue; 2924 } 2925 2926 end = udev->dbi_max + 1; 2927 block = find_last_bit(udev->data_bitmap, end); 2928 if (block == udev->dbi_max) { 2929 /* 2930 * The last bit is dbi_max, so it is not possible 2931 * reclaim any blocks. 2932 */ 2933 mutex_unlock(&udev->cmdr_lock); 2934 continue; 2935 } else if (block == end) { 2936 /* The current udev will goto idle state */ 2937 udev->dbi_thresh = start = 0; 2938 udev->dbi_max = 0; 2939 } else { 2940 udev->dbi_thresh = start = block + 1; 2941 udev->dbi_max = block; 2942 } 2943 2944 /* Here will truncate the data area from off */ 2945 off = udev->data_off + start * DATA_BLOCK_SIZE; 2946 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2947 2948 /* Release the block pages */ 2949 tcmu_blocks_release(&udev->data_blocks, start, end); 2950 mutex_unlock(&udev->cmdr_lock); 2951 2952 total_freed += end - start; 2953 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, 2954 total_freed, udev->name); 2955 } 2956 mutex_unlock(&root_udev_mutex); 2957 2958 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 2959 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 2960 } 2961 2962 static void check_timedout_devices(void) 2963 { 2964 struct tcmu_dev *udev, *tmp_dev; 2965 struct tcmu_cmd *cmd, *tmp_cmd; 2966 LIST_HEAD(devs); 2967 2968 spin_lock_bh(&timed_out_udevs_lock); 2969 list_splice_init(&timed_out_udevs, &devs); 2970 2971 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 2972 list_del_init(&udev->timedout_entry); 2973 spin_unlock_bh(&timed_out_udevs_lock); 2974 2975 mutex_lock(&udev->cmdr_lock); 2976 2977 /* 2978 * If cmd_time_out is disabled but qfull is set deadline 2979 * will only reflect the qfull timeout. Ignore it. 2980 */ 2981 if (udev->cmd_time_out) { 2982 list_for_each_entry_safe(cmd, tmp_cmd, 2983 &udev->inflight_queue, 2984 queue_entry) { 2985 tcmu_check_expired_ring_cmd(cmd); 2986 } 2987 tcmu_set_next_deadline(&udev->inflight_queue, 2988 &udev->cmd_timer); 2989 } 2990 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, 2991 queue_entry) { 2992 tcmu_check_expired_queue_cmd(cmd); 2993 } 2994 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 2995 2996 mutex_unlock(&udev->cmdr_lock); 2997 2998 spin_lock_bh(&timed_out_udevs_lock); 2999 } 3000 3001 spin_unlock_bh(&timed_out_udevs_lock); 3002 } 3003 3004 static void tcmu_unmap_work_fn(struct work_struct *work) 3005 { 3006 check_timedout_devices(); 3007 find_free_blocks(); 3008 } 3009 3010 static int __init tcmu_module_init(void) 3011 { 3012 int ret, i, k, len = 0; 3013 3014 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 3015 3016 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 3017 3018 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 3019 sizeof(struct tcmu_cmd), 3020 __alignof__(struct tcmu_cmd), 3021 0, NULL); 3022 if (!tcmu_cmd_cache) 3023 return -ENOMEM; 3024 3025 tcmu_root_device = root_device_register("tcm_user"); 3026 if (IS_ERR(tcmu_root_device)) { 3027 ret = PTR_ERR(tcmu_root_device); 3028 goto out_free_cache; 3029 } 3030 3031 ret = genl_register_family(&tcmu_genl_family); 3032 if (ret < 0) { 3033 goto out_unreg_device; 3034 } 3035 3036 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3037 len += sizeof(struct configfs_attribute *); 3038 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++) 3039 len += sizeof(struct configfs_attribute *); 3040 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) 3041 len += sizeof(struct configfs_attribute *); 3042 len += sizeof(struct configfs_attribute *); 3043 3044 tcmu_attrs = kzalloc(len, GFP_KERNEL); 3045 if (!tcmu_attrs) { 3046 ret = -ENOMEM; 3047 goto out_unreg_genl; 3048 } 3049 3050 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3051 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 3052 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++) 3053 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k]; 3054 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) 3055 tcmu_attrs[i++] = tcmu_attrib_attrs[k]; 3056 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 3057 3058 ret = transport_backend_register(&tcmu_ops); 3059 if (ret) 3060 goto out_attrs; 3061 3062 return 0; 3063 3064 out_attrs: 3065 kfree(tcmu_attrs); 3066 out_unreg_genl: 3067 genl_unregister_family(&tcmu_genl_family); 3068 out_unreg_device: 3069 root_device_unregister(tcmu_root_device); 3070 out_free_cache: 3071 kmem_cache_destroy(tcmu_cmd_cache); 3072 3073 return ret; 3074 } 3075 3076 static void __exit tcmu_module_exit(void) 3077 { 3078 cancel_delayed_work_sync(&tcmu_unmap_work); 3079 target_backend_unregister(&tcmu_ops); 3080 kfree(tcmu_attrs); 3081 genl_unregister_family(&tcmu_genl_family); 3082 root_device_unregister(tcmu_root_device); 3083 kmem_cache_destroy(tcmu_cmd_cache); 3084 } 3085 3086 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 3087 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 3088 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 3089 MODULE_LICENSE("GPL"); 3090 3091 module_init(tcmu_module_init); 3092 module_exit(tcmu_module_exit); 3093