1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 4 * Copyright (C) 2014 Red Hat, Inc. 5 * Copyright (C) 2015 Arrikto, Inc. 6 * Copyright (C) 2017 Chinamobile, Inc. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/timer.h> 13 #include <linux/parser.h> 14 #include <linux/vmalloc.h> 15 #include <linux/uio_driver.h> 16 #include <linux/xarray.h> 17 #include <linux/stringify.h> 18 #include <linux/bitops.h> 19 #include <linux/highmem.h> 20 #include <linux/configfs.h> 21 #include <linux/mutex.h> 22 #include <linux/workqueue.h> 23 #include <net/genetlink.h> 24 #include <scsi/scsi_common.h> 25 #include <scsi/scsi_proto.h> 26 #include <target/target_core_base.h> 27 #include <target/target_core_fabric.h> 28 #include <target/target_core_backend.h> 29 30 #include <linux/target_core_user.h> 31 32 /** 33 * DOC: Userspace I/O 34 * Userspace I/O 35 * ------------- 36 * 37 * Define a shared-memory interface for LIO to pass SCSI commands and 38 * data to userspace for processing. This is to allow backends that 39 * are too complex for in-kernel support to be possible. 40 * 41 * It uses the UIO framework to do a lot of the device-creation and 42 * introspection work for us. 43 * 44 * See the .h file for how the ring is laid out. Note that while the 45 * command ring is defined, the particulars of the data area are 46 * not. Offset values in the command entry point to other locations 47 * internal to the mmap-ed area. There is separate space outside the 48 * command ring for data buffers. This leaves maximum flexibility for 49 * moving buffer allocations, or even page flipping or other 50 * allocation techniques, without altering the command ring layout. 51 * 52 * SECURITY: 53 * The user process must be assumed to be malicious. There's no way to 54 * prevent it breaking the command ring protocol if it wants, but in 55 * order to prevent other issues we must only ever read *data* from 56 * the shared memory area, not offsets or sizes. This applies to 57 * command ring entries as well as the mailbox. Extra code needed for 58 * this may have a 'UAM' comment. 59 */ 60 61 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 62 63 /* For cmd area, the size is fixed 8MB */ 64 #define CMDR_SIZE (8 * 1024 * 1024) 65 66 /* 67 * For data area, the block size is PAGE_SIZE and 68 * the total size is 256K * PAGE_SIZE. 69 */ 70 #define DATA_BLOCK_SIZE PAGE_SIZE 71 #define DATA_BLOCK_SHIFT PAGE_SHIFT 72 #define DATA_BLOCK_BITS_DEF (256 * 1024) 73 74 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) 75 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) 76 77 /* 78 * Default number of global data blocks(512K * PAGE_SIZE) 79 * when the unmap thread will be started. 80 */ 81 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) 82 83 static u8 tcmu_kern_cmd_reply_supported; 84 static u8 tcmu_netlink_blocked; 85 86 static struct device *tcmu_root_device; 87 88 struct tcmu_hba { 89 u32 host_id; 90 }; 91 92 #define TCMU_CONFIG_LEN 256 93 94 static DEFINE_MUTEX(tcmu_nl_cmd_mutex); 95 static LIST_HEAD(tcmu_nl_cmd_list); 96 97 struct tcmu_dev; 98 99 struct tcmu_nl_cmd { 100 /* wake up thread waiting for reply */ 101 struct completion complete; 102 struct list_head nl_list; 103 struct tcmu_dev *udev; 104 int cmd; 105 int status; 106 }; 107 108 struct tcmu_dev { 109 struct list_head node; 110 struct kref kref; 111 112 struct se_device se_dev; 113 struct se_dev_plug se_plug; 114 115 char *name; 116 struct se_hba *hba; 117 118 #define TCMU_DEV_BIT_OPEN 0 119 #define TCMU_DEV_BIT_BROKEN 1 120 #define TCMU_DEV_BIT_BLOCKED 2 121 #define TCMU_DEV_BIT_TMR_NOTIFY 3 122 #define TCM_DEV_BIT_PLUGGED 4 123 unsigned long flags; 124 125 struct uio_info uio_info; 126 127 struct inode *inode; 128 129 struct tcmu_mailbox *mb_addr; 130 uint64_t dev_size; 131 u32 cmdr_size; 132 u32 cmdr_last_cleaned; 133 /* Offset of data area from start of mb */ 134 /* Must add data_off and mb_addr to get the address */ 135 size_t data_off; 136 size_t data_size; 137 uint32_t max_blocks; 138 size_t ring_size; 139 140 struct mutex cmdr_lock; 141 struct list_head qfull_queue; 142 struct list_head tmr_queue; 143 144 uint32_t dbi_max; 145 uint32_t dbi_thresh; 146 unsigned long *data_bitmap; 147 struct xarray data_blocks; 148 149 struct xarray commands; 150 151 struct timer_list cmd_timer; 152 unsigned int cmd_time_out; 153 struct list_head inflight_queue; 154 155 struct timer_list qfull_timer; 156 int qfull_time_out; 157 158 struct list_head timedout_entry; 159 160 struct tcmu_nl_cmd curr_nl_cmd; 161 162 char dev_config[TCMU_CONFIG_LEN]; 163 164 int nl_reply_supported; 165 }; 166 167 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 168 169 #define CMDR_OFF sizeof(struct tcmu_mailbox) 170 171 struct tcmu_cmd { 172 struct se_cmd *se_cmd; 173 struct tcmu_dev *tcmu_dev; 174 struct list_head queue_entry; 175 176 uint16_t cmd_id; 177 178 /* Can't use se_cmd when cleaning up expired cmds, because if 179 cmd has been completed then accessing se_cmd is off limits */ 180 uint32_t dbi_cnt; 181 uint32_t dbi_bidi_cnt; 182 uint32_t dbi_cur; 183 uint32_t *dbi; 184 185 uint32_t data_len_bidi; 186 187 unsigned long deadline; 188 189 #define TCMU_CMD_BIT_EXPIRED 0 190 unsigned long flags; 191 }; 192 193 struct tcmu_tmr { 194 struct list_head queue_entry; 195 196 uint8_t tmr_type; 197 uint32_t tmr_cmd_cnt; 198 int16_t tmr_cmd_ids[]; 199 }; 200 201 /* 202 * To avoid dead lock the mutex lock order should always be: 203 * 204 * mutex_lock(&root_udev_mutex); 205 * ... 206 * mutex_lock(&tcmu_dev->cmdr_lock); 207 * mutex_unlock(&tcmu_dev->cmdr_lock); 208 * ... 209 * mutex_unlock(&root_udev_mutex); 210 */ 211 static DEFINE_MUTEX(root_udev_mutex); 212 static LIST_HEAD(root_udev); 213 214 static DEFINE_SPINLOCK(timed_out_udevs_lock); 215 static LIST_HEAD(timed_out_udevs); 216 217 static struct kmem_cache *tcmu_cmd_cache; 218 219 static atomic_t global_db_count = ATOMIC_INIT(0); 220 static struct delayed_work tcmu_unmap_work; 221 static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; 222 223 static int tcmu_set_global_max_data_area(const char *str, 224 const struct kernel_param *kp) 225 { 226 int ret, max_area_mb; 227 228 ret = kstrtoint(str, 10, &max_area_mb); 229 if (ret) 230 return -EINVAL; 231 232 if (max_area_mb <= 0) { 233 pr_err("global_max_data_area must be larger than 0.\n"); 234 return -EINVAL; 235 } 236 237 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); 238 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 239 schedule_delayed_work(&tcmu_unmap_work, 0); 240 else 241 cancel_delayed_work_sync(&tcmu_unmap_work); 242 243 return 0; 244 } 245 246 static int tcmu_get_global_max_data_area(char *buffer, 247 const struct kernel_param *kp) 248 { 249 return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 250 } 251 252 static const struct kernel_param_ops tcmu_global_max_data_area_op = { 253 .set = tcmu_set_global_max_data_area, 254 .get = tcmu_get_global_max_data_area, 255 }; 256 257 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 258 S_IWUSR | S_IRUGO); 259 MODULE_PARM_DESC(global_max_data_area_mb, 260 "Max MBs allowed to be allocated to all the tcmu device's " 261 "data areas."); 262 263 static int tcmu_get_block_netlink(char *buffer, 264 const struct kernel_param *kp) 265 { 266 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? 267 "blocked" : "unblocked"); 268 } 269 270 static int tcmu_set_block_netlink(const char *str, 271 const struct kernel_param *kp) 272 { 273 int ret; 274 u8 val; 275 276 ret = kstrtou8(str, 0, &val); 277 if (ret < 0) 278 return ret; 279 280 if (val > 1) { 281 pr_err("Invalid block netlink value %u\n", val); 282 return -EINVAL; 283 } 284 285 tcmu_netlink_blocked = val; 286 return 0; 287 } 288 289 static const struct kernel_param_ops tcmu_block_netlink_op = { 290 .set = tcmu_set_block_netlink, 291 .get = tcmu_get_block_netlink, 292 }; 293 294 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); 295 MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); 296 297 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) 298 { 299 struct tcmu_dev *udev = nl_cmd->udev; 300 301 if (!tcmu_netlink_blocked) { 302 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); 303 return -EBUSY; 304 } 305 306 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 307 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); 308 nl_cmd->status = -EINTR; 309 list_del(&nl_cmd->nl_list); 310 complete(&nl_cmd->complete); 311 } 312 return 0; 313 } 314 315 static int tcmu_set_reset_netlink(const char *str, 316 const struct kernel_param *kp) 317 { 318 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; 319 int ret; 320 u8 val; 321 322 ret = kstrtou8(str, 0, &val); 323 if (ret < 0) 324 return ret; 325 326 if (val != 1) { 327 pr_err("Invalid reset netlink value %u\n", val); 328 return -EINVAL; 329 } 330 331 mutex_lock(&tcmu_nl_cmd_mutex); 332 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { 333 ret = tcmu_fail_netlink_cmd(nl_cmd); 334 if (ret) 335 break; 336 } 337 mutex_unlock(&tcmu_nl_cmd_mutex); 338 339 return ret; 340 } 341 342 static const struct kernel_param_ops tcmu_reset_netlink_op = { 343 .set = tcmu_set_reset_netlink, 344 }; 345 346 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); 347 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); 348 349 /* multicast group */ 350 enum tcmu_multicast_groups { 351 TCMU_MCGRP_CONFIG, 352 }; 353 354 static const struct genl_multicast_group tcmu_mcgrps[] = { 355 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 356 }; 357 358 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 359 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 360 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 361 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 362 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 363 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 364 }; 365 366 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 367 { 368 struct tcmu_dev *udev = NULL; 369 struct tcmu_nl_cmd *nl_cmd; 370 int dev_id, rc, ret = 0; 371 372 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 373 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 374 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 375 return -EINVAL; 376 } 377 378 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 379 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 380 381 mutex_lock(&tcmu_nl_cmd_mutex); 382 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { 383 if (nl_cmd->udev->se_dev.dev_index == dev_id) { 384 udev = nl_cmd->udev; 385 break; 386 } 387 } 388 389 if (!udev) { 390 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", 391 completed_cmd, rc, dev_id); 392 ret = -ENODEV; 393 goto unlock; 394 } 395 list_del(&nl_cmd->nl_list); 396 397 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", 398 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, 399 nl_cmd->status); 400 401 if (nl_cmd->cmd != completed_cmd) { 402 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", 403 udev->name, completed_cmd, nl_cmd->cmd); 404 ret = -EINVAL; 405 goto unlock; 406 } 407 408 nl_cmd->status = rc; 409 complete(&nl_cmd->complete); 410 unlock: 411 mutex_unlock(&tcmu_nl_cmd_mutex); 412 return ret; 413 } 414 415 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 416 { 417 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 418 } 419 420 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 421 { 422 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 423 } 424 425 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 426 struct genl_info *info) 427 { 428 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 429 } 430 431 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 432 { 433 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 434 tcmu_kern_cmd_reply_supported = 435 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 436 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 437 tcmu_kern_cmd_reply_supported); 438 } 439 440 return 0; 441 } 442 443 static const struct genl_small_ops tcmu_genl_ops[] = { 444 { 445 .cmd = TCMU_CMD_SET_FEATURES, 446 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 447 .flags = GENL_ADMIN_PERM, 448 .doit = tcmu_genl_set_features, 449 }, 450 { 451 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 452 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 453 .flags = GENL_ADMIN_PERM, 454 .doit = tcmu_genl_add_dev_done, 455 }, 456 { 457 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 458 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 459 .flags = GENL_ADMIN_PERM, 460 .doit = tcmu_genl_rm_dev_done, 461 }, 462 { 463 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 464 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 465 .flags = GENL_ADMIN_PERM, 466 .doit = tcmu_genl_reconfig_dev_done, 467 }, 468 }; 469 470 /* Our generic netlink family */ 471 static struct genl_family tcmu_genl_family __ro_after_init = { 472 .module = THIS_MODULE, 473 .hdrsize = 0, 474 .name = "TCM-USER", 475 .version = 2, 476 .maxattr = TCMU_ATTR_MAX, 477 .policy = tcmu_attr_policy, 478 .mcgrps = tcmu_mcgrps, 479 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 480 .netnsok = true, 481 .small_ops = tcmu_genl_ops, 482 .n_small_ops = ARRAY_SIZE(tcmu_genl_ops), 483 }; 484 485 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 486 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 487 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 488 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 489 490 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 491 { 492 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 493 uint32_t i; 494 495 for (i = 0; i < len; i++) 496 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 497 } 498 499 static inline int tcmu_get_empty_block(struct tcmu_dev *udev, 500 struct tcmu_cmd *tcmu_cmd, 501 int prev_dbi, int *iov_cnt) 502 { 503 struct page *page; 504 int dbi; 505 506 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 507 if (dbi == udev->dbi_thresh) 508 return -1; 509 510 page = xa_load(&udev->data_blocks, dbi); 511 if (!page) { 512 if (atomic_add_return(1, &global_db_count) > 513 tcmu_global_max_blocks) 514 schedule_delayed_work(&tcmu_unmap_work, 0); 515 516 /* try to get new page from the mm */ 517 page = alloc_page(GFP_NOIO); 518 if (!page) 519 goto err_alloc; 520 521 if (xa_store(&udev->data_blocks, dbi, page, GFP_NOIO)) 522 goto err_insert; 523 } 524 525 if (dbi > udev->dbi_max) 526 udev->dbi_max = dbi; 527 528 set_bit(dbi, udev->data_bitmap); 529 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 530 531 if (dbi != prev_dbi + 1) 532 *iov_cnt += 1; 533 534 return dbi; 535 err_insert: 536 __free_page(page); 537 err_alloc: 538 atomic_dec(&global_db_count); 539 return -1; 540 } 541 542 static int tcmu_get_empty_blocks(struct tcmu_dev *udev, 543 struct tcmu_cmd *tcmu_cmd, int dbi_cnt) 544 { 545 /* start value of dbi + 1 must not be a valid dbi */ 546 int dbi = -2; 547 int i, iov_cnt = 0; 548 549 for (i = 0; i < dbi_cnt; i++) { 550 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt); 551 if (dbi < 0) 552 return -1; 553 } 554 return iov_cnt; 555 } 556 557 static inline struct page * 558 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 559 { 560 return xa_load(&udev->data_blocks, dbi); 561 } 562 563 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 564 { 565 kfree(tcmu_cmd->dbi); 566 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 567 } 568 569 static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd) 570 { 571 int i, len; 572 struct se_cmd *se_cmd = cmd->se_cmd; 573 574 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 575 576 if (se_cmd->se_cmd_flags & SCF_BIDI) { 577 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 578 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) 579 len += se_cmd->t_bidi_data_sg[i].length; 580 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE); 581 cmd->dbi_cnt += cmd->dbi_bidi_cnt; 582 cmd->data_len_bidi = len; 583 } 584 } 585 586 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 587 struct iovec **iov, int prev_dbi, int len) 588 { 589 /* Get the next dbi */ 590 int dbi = tcmu_cmd_get_dbi(cmd); 591 592 /* Do not add more than DATA_BLOCK_SIZE to iov */ 593 if (len > DATA_BLOCK_SIZE) 594 len = DATA_BLOCK_SIZE; 595 596 /* 597 * The following code will gather and map the blocks to the same iovec 598 * when the blocks are all next to each other. 599 */ 600 if (dbi != prev_dbi + 1) { 601 /* dbi is not next to previous dbi, so start new iov */ 602 if (prev_dbi >= 0) 603 (*iov)++; 604 /* write offset relative to mb_addr */ 605 (*iov)->iov_base = (void __user *) 606 (udev->data_off + dbi * DATA_BLOCK_SIZE); 607 } 608 (*iov)->iov_len += len; 609 610 return dbi; 611 } 612 613 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 614 struct iovec **iov, int data_length) 615 { 616 /* start value of dbi + 1 must not be a valid dbi */ 617 int dbi = -2; 618 619 /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */ 620 for (; data_length > 0; data_length -= DATA_BLOCK_SIZE) 621 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); 622 } 623 624 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 625 { 626 struct se_device *se_dev = se_cmd->se_dev; 627 struct tcmu_dev *udev = TCMU_DEV(se_dev); 628 struct tcmu_cmd *tcmu_cmd; 629 630 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); 631 if (!tcmu_cmd) 632 return NULL; 633 634 INIT_LIST_HEAD(&tcmu_cmd->queue_entry); 635 tcmu_cmd->se_cmd = se_cmd; 636 tcmu_cmd->tcmu_dev = udev; 637 638 tcmu_cmd_set_block_cnts(tcmu_cmd); 639 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 640 GFP_NOIO); 641 if (!tcmu_cmd->dbi) { 642 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 643 return NULL; 644 } 645 646 return tcmu_cmd; 647 } 648 649 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 650 { 651 unsigned long offset = offset_in_page(vaddr); 652 void *start = vaddr - offset; 653 654 size = round_up(size+offset, PAGE_SIZE); 655 656 while (size) { 657 flush_dcache_page(vmalloc_to_page(start)); 658 start += PAGE_SIZE; 659 size -= PAGE_SIZE; 660 } 661 } 662 663 /* 664 * Some ring helper functions. We don't assume size is a power of 2 so 665 * we can't use circ_buf.h. 666 */ 667 static inline size_t spc_used(size_t head, size_t tail, size_t size) 668 { 669 int diff = head - tail; 670 671 if (diff >= 0) 672 return diff; 673 else 674 return size + diff; 675 } 676 677 static inline size_t spc_free(size_t head, size_t tail, size_t size) 678 { 679 /* Keep 1 byte unused or we can't tell full from empty */ 680 return (size - spc_used(head, tail, size) - 1); 681 } 682 683 static inline size_t head_to_end(size_t head, size_t size) 684 { 685 return size - head; 686 } 687 688 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 689 690 #define TCMU_SG_TO_DATA_AREA 1 691 #define TCMU_DATA_AREA_TO_SG 2 692 693 static inline void tcmu_copy_data(struct tcmu_dev *udev, 694 struct tcmu_cmd *tcmu_cmd, uint32_t direction, 695 struct scatterlist *sg, unsigned int sg_nents, 696 struct iovec **iov, size_t data_len) 697 { 698 /* start value of dbi + 1 must not be a valid dbi */ 699 int dbi = -2; 700 size_t block_remaining, cp_len; 701 struct sg_mapping_iter sg_iter; 702 unsigned int sg_flags; 703 struct page *page; 704 void *data_page_start, *data_addr; 705 706 if (direction == TCMU_SG_TO_DATA_AREA) 707 sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG; 708 else 709 sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 710 sg_miter_start(&sg_iter, sg, sg_nents, sg_flags); 711 712 while (data_len) { 713 if (direction == TCMU_SG_TO_DATA_AREA) 714 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, 715 data_len); 716 else 717 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 718 page = tcmu_get_block_page(udev, dbi); 719 if (direction == TCMU_DATA_AREA_TO_SG) 720 flush_dcache_page(page); 721 data_page_start = kmap_atomic(page); 722 block_remaining = DATA_BLOCK_SIZE; 723 724 while (block_remaining && data_len) { 725 if (!sg_miter_next(&sg_iter)) { 726 /* set length to 0 to abort outer loop */ 727 data_len = 0; 728 pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n"); 729 break; 730 } 731 cp_len = min3(sg_iter.length, block_remaining, data_len); 732 733 data_addr = data_page_start + 734 DATA_BLOCK_SIZE - block_remaining; 735 if (direction == TCMU_SG_TO_DATA_AREA) 736 memcpy(data_addr, sg_iter.addr, cp_len); 737 else 738 memcpy(sg_iter.addr, data_addr, cp_len); 739 740 data_len -= cp_len; 741 block_remaining -= cp_len; 742 sg_iter.consumed = cp_len; 743 } 744 sg_miter_stop(&sg_iter); 745 746 kunmap_atomic(data_page_start); 747 if (direction == TCMU_SG_TO_DATA_AREA) 748 flush_dcache_page(page); 749 } 750 } 751 752 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 753 struct iovec **iov) 754 { 755 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 756 757 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, 758 se_cmd->t_data_nents, iov, se_cmd->data_length); 759 } 760 761 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 762 bool bidi, uint32_t read_len) 763 { 764 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 765 struct scatterlist *data_sg; 766 unsigned int data_nents; 767 768 if (!bidi) { 769 data_sg = se_cmd->t_data_sg; 770 data_nents = se_cmd->t_data_nents; 771 } else { 772 /* 773 * For bidi case, the first count blocks are for Data-Out 774 * buffer blocks, and before gathering the Data-In buffer 775 * the Data-Out buffer blocks should be skipped. 776 */ 777 tcmu_cmd_set_dbi_cur(tcmu_cmd, 778 tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt); 779 780 data_sg = se_cmd->t_bidi_data_sg; 781 data_nents = se_cmd->t_bidi_data_nents; 782 } 783 784 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, 785 data_nents, NULL, read_len); 786 } 787 788 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 789 { 790 return thresh - bitmap_weight(bitmap, thresh); 791 } 792 793 /* 794 * We can't queue a command until we have space available on the cmd ring. 795 * 796 * Called with ring lock held. 797 */ 798 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) 799 { 800 struct tcmu_mailbox *mb = udev->mb_addr; 801 size_t space, cmd_needed; 802 u32 cmd_head; 803 804 tcmu_flush_dcache_range(mb, sizeof(*mb)); 805 806 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 807 808 /* 809 * If cmd end-of-ring space is too small then we need space for a NOP plus 810 * original cmd - cmds are internally contiguous. 811 */ 812 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 813 cmd_needed = cmd_size; 814 else 815 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 816 817 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 818 if (space < cmd_needed) { 819 pr_debug("no cmd space: %u %u %u\n", cmd_head, 820 udev->cmdr_last_cleaned, udev->cmdr_size); 821 return false; 822 } 823 return true; 824 } 825 826 /* 827 * We have to allocate data buffers before we can queue a command. 828 * Returns -1 on error (not enough space) or number of needed iovs on success 829 * 830 * Called with ring lock held. 831 */ 832 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 833 int *iov_bidi_cnt) 834 { 835 int space, iov_cnt = 0, ret = 0; 836 837 if (!cmd->dbi_cnt) 838 goto wr_iov_cnts; 839 840 /* try to check and get the data blocks as needed */ 841 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 842 if (space < cmd->dbi_cnt) { 843 unsigned long blocks_left = 844 (udev->max_blocks - udev->dbi_thresh) + space; 845 846 if (blocks_left < cmd->dbi_cnt) { 847 pr_debug("no data space: only %lu available, but ask for %lu\n", 848 blocks_left * DATA_BLOCK_SIZE, 849 cmd->dbi_cnt * DATA_BLOCK_SIZE); 850 return -1; 851 } 852 853 udev->dbi_thresh += cmd->dbi_cnt; 854 if (udev->dbi_thresh > udev->max_blocks) 855 udev->dbi_thresh = udev->max_blocks; 856 } 857 858 iov_cnt = tcmu_get_empty_blocks(udev, cmd, 859 cmd->dbi_cnt - cmd->dbi_bidi_cnt); 860 if (iov_cnt < 0) 861 return -1; 862 863 if (cmd->dbi_bidi_cnt) { 864 ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt); 865 if (ret < 0) 866 return -1; 867 } 868 wr_iov_cnts: 869 *iov_bidi_cnt = ret; 870 return iov_cnt + ret; 871 } 872 873 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 874 { 875 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 876 sizeof(struct tcmu_cmd_entry)); 877 } 878 879 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 880 size_t base_command_size) 881 { 882 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 883 size_t command_size; 884 885 command_size = base_command_size + 886 round_up(scsi_command_size(se_cmd->t_task_cdb), 887 TCMU_OP_ALIGN_SIZE); 888 889 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 890 891 return command_size; 892 } 893 894 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 895 struct timer_list *timer) 896 { 897 if (!tmo) 898 return; 899 900 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 901 if (!timer_pending(timer)) 902 mod_timer(timer, tcmu_cmd->deadline); 903 904 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, 905 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); 906 } 907 908 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 909 { 910 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 911 unsigned int tmo; 912 913 /* 914 * For backwards compat if qfull_time_out is not set use 915 * cmd_time_out and if that's not set use the default time out. 916 */ 917 if (!udev->qfull_time_out) 918 return -ETIMEDOUT; 919 else if (udev->qfull_time_out > 0) 920 tmo = udev->qfull_time_out; 921 else if (udev->cmd_time_out) 922 tmo = udev->cmd_time_out; 923 else 924 tmo = TCMU_TIME_OUT; 925 926 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 927 928 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 929 pr_debug("adding cmd %p on dev %s to ring space wait queue\n", 930 tcmu_cmd, udev->name); 931 return 0; 932 } 933 934 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) 935 { 936 struct tcmu_cmd_entry_hdr *hdr; 937 struct tcmu_mailbox *mb = udev->mb_addr; 938 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 939 940 /* Insert a PAD if end-of-ring space is too small */ 941 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { 942 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 943 944 hdr = (void *) mb + CMDR_OFF + cmd_head; 945 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); 946 tcmu_hdr_set_len(&hdr->len_op, pad_size); 947 hdr->cmd_id = 0; /* not used for PAD */ 948 hdr->kflags = 0; 949 hdr->uflags = 0; 950 tcmu_flush_dcache_range(hdr, sizeof(*hdr)); 951 952 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 953 tcmu_flush_dcache_range(mb, sizeof(*mb)); 954 955 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 956 WARN_ON(cmd_head != 0); 957 } 958 959 return cmd_head; 960 } 961 962 static void tcmu_unplug_device(struct se_dev_plug *se_plug) 963 { 964 struct se_device *se_dev = se_plug->se_dev; 965 struct tcmu_dev *udev = TCMU_DEV(se_dev); 966 967 clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags); 968 uio_event_notify(&udev->uio_info); 969 } 970 971 static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev) 972 { 973 struct tcmu_dev *udev = TCMU_DEV(se_dev); 974 975 if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) 976 return &udev->se_plug; 977 978 return NULL; 979 } 980 981 /** 982 * queue_cmd_ring - queue cmd to ring or internally 983 * @tcmu_cmd: cmd to queue 984 * @scsi_err: TCM error code if failure (-1) returned. 985 * 986 * Returns: 987 * -1 we cannot queue internally or to the ring. 988 * 0 success 989 * 1 internally queued to wait for ring memory to free. 990 */ 991 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) 992 { 993 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 994 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 995 size_t base_command_size, command_size; 996 struct tcmu_mailbox *mb = udev->mb_addr; 997 struct tcmu_cmd_entry *entry; 998 struct iovec *iov; 999 int iov_cnt, iov_bidi_cnt; 1000 uint32_t cmd_id, cmd_head; 1001 uint64_t cdb_off; 1002 /* size of data buffer needed */ 1003 size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE; 1004 1005 *scsi_err = TCM_NO_SENSE; 1006 1007 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 1008 *scsi_err = TCM_LUN_BUSY; 1009 return -1; 1010 } 1011 1012 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1013 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1014 return -1; 1015 } 1016 1017 if (!list_empty(&udev->qfull_queue)) 1018 goto queue; 1019 1020 if (data_length > udev->data_size) { 1021 pr_warn("TCMU: Request of size %zu is too big for %zu data area\n", 1022 data_length, udev->data_size); 1023 *scsi_err = TCM_INVALID_CDB_FIELD; 1024 return -1; 1025 } 1026 1027 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); 1028 if (iov_cnt < 0) 1029 goto free_and_queue; 1030 1031 /* 1032 * Must be a certain minimum size for response sense info, but 1033 * also may be larger if the iov array is large. 1034 */ 1035 base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt); 1036 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1037 1038 if (command_size > (udev->cmdr_size / 2)) { 1039 pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n", 1040 command_size, udev->cmdr_size); 1041 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1042 *scsi_err = TCM_INVALID_CDB_FIELD; 1043 return -1; 1044 } 1045 1046 if (!is_ring_space_avail(udev, command_size)) 1047 /* 1048 * Don't leave commands partially setup because the unmap 1049 * thread might need the blocks to make forward progress. 1050 */ 1051 goto free_and_queue; 1052 1053 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), 1054 GFP_NOWAIT) < 0) { 1055 pr_err("tcmu: Could not allocate cmd id.\n"); 1056 1057 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1058 *scsi_err = TCM_OUT_OF_RESOURCES; 1059 return -1; 1060 } 1061 tcmu_cmd->cmd_id = cmd_id; 1062 1063 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, 1064 tcmu_cmd, udev->name); 1065 1066 cmd_head = ring_insert_padding(udev, command_size); 1067 1068 entry = (void *) mb + CMDR_OFF + cmd_head; 1069 memset(entry, 0, command_size); 1070 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 1071 1072 /* prepare iov list and copy data to data area if necessary */ 1073 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1074 iov = &entry->req.iov[0]; 1075 1076 if (se_cmd->data_direction == DMA_TO_DEVICE || 1077 se_cmd->se_cmd_flags & SCF_BIDI) 1078 scatter_data_area(udev, tcmu_cmd, &iov); 1079 else 1080 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); 1081 1082 entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; 1083 1084 /* Handle BIDI commands */ 1085 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1086 iov++; 1087 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); 1088 entry->req.iov_bidi_cnt = iov_bidi_cnt; 1089 } 1090 1091 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); 1092 1093 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 1094 1095 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 1096 1097 /* All offsets relative to mb_addr, not start of entry! */ 1098 cdb_off = CMDR_OFF + cmd_head + base_command_size; 1099 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 1100 entry->req.cdb_off = cdb_off; 1101 tcmu_flush_dcache_range(entry, command_size); 1102 1103 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1104 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1105 1106 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); 1107 1108 if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) 1109 uio_event_notify(&udev->uio_info); 1110 1111 return 0; 1112 1113 free_and_queue: 1114 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1115 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1116 1117 queue: 1118 if (add_to_qfull_queue(tcmu_cmd)) { 1119 *scsi_err = TCM_OUT_OF_RESOURCES; 1120 return -1; 1121 } 1122 1123 return 1; 1124 } 1125 1126 /** 1127 * queue_tmr_ring - queue tmr info to ring or internally 1128 * @udev: related tcmu_dev 1129 * @tmr: tcmu_tmr containing tmr info to queue 1130 * 1131 * Returns: 1132 * 0 success 1133 * 1 internally queued to wait for ring memory to free. 1134 */ 1135 static int 1136 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) 1137 { 1138 struct tcmu_tmr_entry *entry; 1139 int cmd_size; 1140 int id_list_sz; 1141 struct tcmu_mailbox *mb = udev->mb_addr; 1142 uint32_t cmd_head; 1143 1144 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 1145 goto out_free; 1146 1147 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; 1148 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE); 1149 1150 if (!list_empty(&udev->tmr_queue) || 1151 !is_ring_space_avail(udev, cmd_size)) { 1152 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); 1153 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n", 1154 tmr, udev->name); 1155 return 1; 1156 } 1157 1158 cmd_head = ring_insert_padding(udev, cmd_size); 1159 1160 entry = (void *)mb + CMDR_OFF + cmd_head; 1161 memset(entry, 0, cmd_size); 1162 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); 1163 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); 1164 entry->tmr_type = tmr->tmr_type; 1165 entry->cmd_cnt = tmr->tmr_cmd_cnt; 1166 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); 1167 tcmu_flush_dcache_range(entry, cmd_size); 1168 1169 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); 1170 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1171 1172 uio_event_notify(&udev->uio_info); 1173 1174 out_free: 1175 kfree(tmr); 1176 1177 return 0; 1178 } 1179 1180 static sense_reason_t 1181 tcmu_queue_cmd(struct se_cmd *se_cmd) 1182 { 1183 struct se_device *se_dev = se_cmd->se_dev; 1184 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1185 struct tcmu_cmd *tcmu_cmd; 1186 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD; 1187 int ret = -1; 1188 1189 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 1190 if (!tcmu_cmd) 1191 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1192 1193 mutex_lock(&udev->cmdr_lock); 1194 if (!(se_cmd->transport_state & CMD_T_ABORTED)) 1195 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1196 if (ret < 0) 1197 tcmu_free_cmd(tcmu_cmd); 1198 else 1199 se_cmd->priv = tcmu_cmd; 1200 mutex_unlock(&udev->cmdr_lock); 1201 return scsi_ret; 1202 } 1203 1204 static void tcmu_set_next_deadline(struct list_head *queue, 1205 struct timer_list *timer) 1206 { 1207 struct tcmu_cmd *cmd; 1208 1209 if (!list_empty(queue)) { 1210 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); 1211 mod_timer(timer, cmd->deadline); 1212 } else 1213 del_timer(timer); 1214 } 1215 1216 static int 1217 tcmu_tmr_type(enum tcm_tmreq_table tmf) 1218 { 1219 switch (tmf) { 1220 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK; 1221 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET; 1222 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA; 1223 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET; 1224 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET; 1225 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET; 1226 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET; 1227 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO; 1228 default: return TCMU_TMR_UNKNOWN; 1229 } 1230 } 1231 1232 static void 1233 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, 1234 struct list_head *cmd_list) 1235 { 1236 int i = 0, cmd_cnt = 0; 1237 bool unqueued = false; 1238 uint16_t *cmd_ids = NULL; 1239 struct tcmu_cmd *cmd; 1240 struct se_cmd *se_cmd; 1241 struct tcmu_tmr *tmr; 1242 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1243 1244 mutex_lock(&udev->cmdr_lock); 1245 1246 /* First we check for aborted commands in qfull_queue */ 1247 list_for_each_entry(se_cmd, cmd_list, state_list) { 1248 i++; 1249 if (!se_cmd->priv) 1250 continue; 1251 cmd = se_cmd->priv; 1252 /* Commands on qfull queue have no id yet */ 1253 if (cmd->cmd_id) { 1254 cmd_cnt++; 1255 continue; 1256 } 1257 pr_debug("Removing aborted command %p from queue on dev %s.\n", 1258 cmd, udev->name); 1259 1260 list_del_init(&cmd->queue_entry); 1261 tcmu_free_cmd(cmd); 1262 se_cmd->priv = NULL; 1263 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); 1264 unqueued = true; 1265 } 1266 if (unqueued) 1267 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1268 1269 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) 1270 goto unlock; 1271 1272 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", 1273 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); 1274 1275 tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO); 1276 if (!tmr) 1277 goto unlock; 1278 1279 tmr->tmr_type = tcmu_tmr_type(tmf); 1280 tmr->tmr_cmd_cnt = cmd_cnt; 1281 1282 if (cmd_cnt != 0) { 1283 cmd_cnt = 0; 1284 list_for_each_entry(se_cmd, cmd_list, state_list) { 1285 if (!se_cmd->priv) 1286 continue; 1287 cmd = se_cmd->priv; 1288 if (cmd->cmd_id) 1289 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; 1290 } 1291 } 1292 1293 queue_tmr_ring(udev, tmr); 1294 1295 unlock: 1296 mutex_unlock(&udev->cmdr_lock); 1297 } 1298 1299 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 1300 { 1301 struct se_cmd *se_cmd = cmd->se_cmd; 1302 struct tcmu_dev *udev = cmd->tcmu_dev; 1303 bool read_len_valid = false; 1304 uint32_t read_len; 1305 1306 /* 1307 * cmd has been completed already from timeout, just reclaim 1308 * data area space and free cmd 1309 */ 1310 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1311 WARN_ON_ONCE(se_cmd); 1312 goto out; 1313 } 1314 1315 list_del_init(&cmd->queue_entry); 1316 1317 tcmu_cmd_reset_dbi_cur(cmd); 1318 1319 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1320 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1321 cmd->se_cmd); 1322 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1323 goto done; 1324 } 1325 1326 read_len = se_cmd->data_length; 1327 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1328 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1329 read_len_valid = true; 1330 if (entry->rsp.read_len < read_len) 1331 read_len = entry->rsp.read_len; 1332 } 1333 1334 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1335 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1336 if (!read_len_valid ) 1337 goto done; 1338 else 1339 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 1340 } 1341 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1342 /* Get Data-In buffer before clean up */ 1343 gather_data_area(udev, cmd, true, read_len); 1344 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1345 gather_data_area(udev, cmd, false, read_len); 1346 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1347 /* TODO: */ 1348 } else if (se_cmd->data_direction != DMA_NONE) { 1349 pr_warn("TCMU: data direction was %d!\n", 1350 se_cmd->data_direction); 1351 } 1352 1353 done: 1354 se_cmd->priv = NULL; 1355 if (read_len_valid) { 1356 pr_debug("read_len = %d\n", read_len); 1357 target_complete_cmd_with_length(cmd->se_cmd, 1358 entry->rsp.scsi_status, read_len); 1359 } else 1360 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1361 1362 out: 1363 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1364 tcmu_free_cmd(cmd); 1365 } 1366 1367 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) 1368 { 1369 struct tcmu_tmr *tmr, *tmp; 1370 LIST_HEAD(tmrs); 1371 1372 if (list_empty(&udev->tmr_queue)) 1373 return 1; 1374 1375 pr_debug("running %s's tmr queue\n", udev->name); 1376 1377 list_splice_init(&udev->tmr_queue, &tmrs); 1378 1379 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) { 1380 list_del_init(&tmr->queue_entry); 1381 1382 pr_debug("removing tmr %p on dev %s from queue\n", 1383 tmr, udev->name); 1384 1385 if (queue_tmr_ring(udev, tmr)) { 1386 pr_debug("ran out of space during tmr queue run\n"); 1387 /* 1388 * tmr was requeued, so just put all tmrs back in 1389 * the queue 1390 */ 1391 list_splice_tail(&tmrs, &udev->tmr_queue); 1392 return 0; 1393 } 1394 } 1395 1396 return 1; 1397 } 1398 1399 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1400 { 1401 struct tcmu_mailbox *mb; 1402 struct tcmu_cmd *cmd; 1403 bool free_space = false; 1404 1405 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1406 pr_err("ring broken, not handling completions\n"); 1407 return 0; 1408 } 1409 1410 mb = udev->mb_addr; 1411 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1412 1413 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1414 1415 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1416 1417 /* 1418 * Flush max. up to end of cmd ring since current entry might 1419 * be a padding that is shorter than sizeof(*entry) 1420 */ 1421 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, 1422 udev->cmdr_size); 1423 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? 1424 ring_left : sizeof(*entry)); 1425 1426 free_space = true; 1427 1428 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || 1429 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { 1430 UPDATE_HEAD(udev->cmdr_last_cleaned, 1431 tcmu_hdr_get_len(entry->hdr.len_op), 1432 udev->cmdr_size); 1433 continue; 1434 } 1435 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1436 1437 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); 1438 if (!cmd) { 1439 pr_err("cmd_id %u not found, ring is broken\n", 1440 entry->hdr.cmd_id); 1441 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1442 break; 1443 } 1444 1445 tcmu_handle_completion(cmd, entry); 1446 1447 UPDATE_HEAD(udev->cmdr_last_cleaned, 1448 tcmu_hdr_get_len(entry->hdr.len_op), 1449 udev->cmdr_size); 1450 } 1451 if (free_space) 1452 free_space = tcmu_run_tmr_queue(udev); 1453 1454 if (atomic_read(&global_db_count) > tcmu_global_max_blocks && 1455 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { 1456 /* 1457 * Allocated blocks exceeded global block limit, currently no 1458 * more pending or waiting commands so try to reclaim blocks. 1459 */ 1460 schedule_delayed_work(&tcmu_unmap_work, 0); 1461 } 1462 if (udev->cmd_time_out) 1463 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 1464 1465 return free_space; 1466 } 1467 1468 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) 1469 { 1470 struct se_cmd *se_cmd; 1471 1472 if (!time_after_eq(jiffies, cmd->deadline)) 1473 return; 1474 1475 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1476 list_del_init(&cmd->queue_entry); 1477 se_cmd = cmd->se_cmd; 1478 se_cmd->priv = NULL; 1479 cmd->se_cmd = NULL; 1480 1481 pr_debug("Timing out inflight cmd %u on dev %s.\n", 1482 cmd->cmd_id, cmd->tcmu_dev->name); 1483 1484 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); 1485 } 1486 1487 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) 1488 { 1489 struct se_cmd *se_cmd; 1490 1491 if (!time_after_eq(jiffies, cmd->deadline)) 1492 return; 1493 1494 pr_debug("Timing out queued cmd %p on dev %s.\n", 1495 cmd, cmd->tcmu_dev->name); 1496 1497 list_del_init(&cmd->queue_entry); 1498 se_cmd = cmd->se_cmd; 1499 tcmu_free_cmd(cmd); 1500 1501 se_cmd->priv = NULL; 1502 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); 1503 } 1504 1505 static void tcmu_device_timedout(struct tcmu_dev *udev) 1506 { 1507 spin_lock(&timed_out_udevs_lock); 1508 if (list_empty(&udev->timedout_entry)) 1509 list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1510 spin_unlock(&timed_out_udevs_lock); 1511 1512 schedule_delayed_work(&tcmu_unmap_work, 0); 1513 } 1514 1515 static void tcmu_cmd_timedout(struct timer_list *t) 1516 { 1517 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 1518 1519 pr_debug("%s cmd timeout has expired\n", udev->name); 1520 tcmu_device_timedout(udev); 1521 } 1522 1523 static void tcmu_qfull_timedout(struct timer_list *t) 1524 { 1525 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 1526 1527 pr_debug("%s qfull timeout has expired\n", udev->name); 1528 tcmu_device_timedout(udev); 1529 } 1530 1531 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1532 { 1533 struct tcmu_hba *tcmu_hba; 1534 1535 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1536 if (!tcmu_hba) 1537 return -ENOMEM; 1538 1539 tcmu_hba->host_id = host_id; 1540 hba->hba_ptr = tcmu_hba; 1541 1542 return 0; 1543 } 1544 1545 static void tcmu_detach_hba(struct se_hba *hba) 1546 { 1547 kfree(hba->hba_ptr); 1548 hba->hba_ptr = NULL; 1549 } 1550 1551 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1552 { 1553 struct tcmu_dev *udev; 1554 1555 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1556 if (!udev) 1557 return NULL; 1558 kref_init(&udev->kref); 1559 1560 udev->name = kstrdup(name, GFP_KERNEL); 1561 if (!udev->name) { 1562 kfree(udev); 1563 return NULL; 1564 } 1565 1566 udev->hba = hba; 1567 udev->cmd_time_out = TCMU_TIME_OUT; 1568 udev->qfull_time_out = -1; 1569 1570 udev->max_blocks = DATA_BLOCK_BITS_DEF; 1571 mutex_init(&udev->cmdr_lock); 1572 1573 INIT_LIST_HEAD(&udev->node); 1574 INIT_LIST_HEAD(&udev->timedout_entry); 1575 INIT_LIST_HEAD(&udev->qfull_queue); 1576 INIT_LIST_HEAD(&udev->tmr_queue); 1577 INIT_LIST_HEAD(&udev->inflight_queue); 1578 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); 1579 1580 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1581 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 1582 1583 xa_init(&udev->data_blocks); 1584 1585 return &udev->se_dev; 1586 } 1587 1588 static void tcmu_dev_call_rcu(struct rcu_head *p) 1589 { 1590 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1591 struct tcmu_dev *udev = TCMU_DEV(dev); 1592 1593 kfree(udev->uio_info.name); 1594 kfree(udev->name); 1595 kfree(udev); 1596 } 1597 1598 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1599 { 1600 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1601 kmem_cache_free(tcmu_cmd_cache, cmd); 1602 return 0; 1603 } 1604 return -EINVAL; 1605 } 1606 1607 static void tcmu_blocks_release(struct xarray *blocks, unsigned long first, 1608 unsigned long last) 1609 { 1610 XA_STATE(xas, blocks, first); 1611 struct page *page; 1612 1613 xas_lock(&xas); 1614 xas_for_each(&xas, page, last) { 1615 xas_store(&xas, NULL); 1616 __free_page(page); 1617 atomic_dec(&global_db_count); 1618 } 1619 xas_unlock(&xas); 1620 } 1621 1622 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) 1623 { 1624 struct tcmu_tmr *tmr, *tmp; 1625 1626 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { 1627 list_del_init(&tmr->queue_entry); 1628 kfree(tmr); 1629 } 1630 } 1631 1632 static void tcmu_dev_kref_release(struct kref *kref) 1633 { 1634 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1635 struct se_device *dev = &udev->se_dev; 1636 struct tcmu_cmd *cmd; 1637 bool all_expired = true; 1638 unsigned long i; 1639 1640 vfree(udev->mb_addr); 1641 udev->mb_addr = NULL; 1642 1643 spin_lock_bh(&timed_out_udevs_lock); 1644 if (!list_empty(&udev->timedout_entry)) 1645 list_del(&udev->timedout_entry); 1646 spin_unlock_bh(&timed_out_udevs_lock); 1647 1648 /* Upper layer should drain all requests before calling this */ 1649 mutex_lock(&udev->cmdr_lock); 1650 xa_for_each(&udev->commands, i, cmd) { 1651 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1652 all_expired = false; 1653 } 1654 /* There can be left over TMR cmds. Remove them. */ 1655 tcmu_remove_all_queued_tmr(udev); 1656 if (!list_empty(&udev->qfull_queue)) 1657 all_expired = false; 1658 xa_destroy(&udev->commands); 1659 WARN_ON(!all_expired); 1660 1661 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); 1662 bitmap_free(udev->data_bitmap); 1663 mutex_unlock(&udev->cmdr_lock); 1664 1665 pr_debug("dev_kref_release\n"); 1666 1667 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1668 } 1669 1670 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) 1671 { 1672 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1673 LIST_HEAD(cmds); 1674 sense_reason_t scsi_ret; 1675 int ret; 1676 1677 if (list_empty(&udev->qfull_queue)) 1678 return; 1679 1680 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1681 1682 list_splice_init(&udev->qfull_queue, &cmds); 1683 1684 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1685 list_del_init(&tcmu_cmd->queue_entry); 1686 1687 pr_debug("removing cmd %p on dev %s from queue\n", 1688 tcmu_cmd, udev->name); 1689 1690 if (fail) { 1691 /* 1692 * We were not able to even start the command, so 1693 * fail with busy to allow a retry in case runner 1694 * was only temporarily down. If the device is being 1695 * removed then LIO core will do the right thing and 1696 * fail the retry. 1697 */ 1698 tcmu_cmd->se_cmd->priv = NULL; 1699 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1700 tcmu_free_cmd(tcmu_cmd); 1701 continue; 1702 } 1703 1704 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1705 if (ret < 0) { 1706 pr_debug("cmd %p on dev %s failed with %u\n", 1707 tcmu_cmd, udev->name, scsi_ret); 1708 /* 1709 * Ignore scsi_ret for now. target_complete_cmd 1710 * drops it. 1711 */ 1712 tcmu_cmd->se_cmd->priv = NULL; 1713 target_complete_cmd(tcmu_cmd->se_cmd, 1714 SAM_STAT_CHECK_CONDITION); 1715 tcmu_free_cmd(tcmu_cmd); 1716 } else if (ret > 0) { 1717 pr_debug("ran out of space during cmdr queue run\n"); 1718 /* 1719 * cmd was requeued, so just put all cmds back in 1720 * the queue 1721 */ 1722 list_splice_tail(&cmds, &udev->qfull_queue); 1723 break; 1724 } 1725 } 1726 1727 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1728 } 1729 1730 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1731 { 1732 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1733 1734 mutex_lock(&udev->cmdr_lock); 1735 if (tcmu_handle_completions(udev)) 1736 run_qfull_queue(udev, false); 1737 mutex_unlock(&udev->cmdr_lock); 1738 1739 return 0; 1740 } 1741 1742 /* 1743 * mmap code from uio.c. Copied here because we want to hook mmap() 1744 * and this stuff must come along. 1745 */ 1746 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1747 { 1748 struct tcmu_dev *udev = vma->vm_private_data; 1749 struct uio_info *info = &udev->uio_info; 1750 1751 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1752 if (info->mem[vma->vm_pgoff].size == 0) 1753 return -1; 1754 return (int)vma->vm_pgoff; 1755 } 1756 return -1; 1757 } 1758 1759 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1760 { 1761 struct page *page; 1762 1763 mutex_lock(&udev->cmdr_lock); 1764 page = tcmu_get_block_page(udev, dbi); 1765 if (likely(page)) { 1766 mutex_unlock(&udev->cmdr_lock); 1767 return page; 1768 } 1769 1770 /* 1771 * Userspace messed up and passed in a address not in the 1772 * data iov passed to it. 1773 */ 1774 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", 1775 dbi, udev->name); 1776 page = NULL; 1777 mutex_unlock(&udev->cmdr_lock); 1778 1779 return page; 1780 } 1781 1782 static void tcmu_vma_open(struct vm_area_struct *vma) 1783 { 1784 struct tcmu_dev *udev = vma->vm_private_data; 1785 1786 pr_debug("vma_open\n"); 1787 1788 kref_get(&udev->kref); 1789 } 1790 1791 static void tcmu_vma_close(struct vm_area_struct *vma) 1792 { 1793 struct tcmu_dev *udev = vma->vm_private_data; 1794 1795 pr_debug("vma_close\n"); 1796 1797 /* release ref from tcmu_vma_open */ 1798 kref_put(&udev->kref, tcmu_dev_kref_release); 1799 } 1800 1801 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) 1802 { 1803 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1804 struct uio_info *info = &udev->uio_info; 1805 struct page *page; 1806 unsigned long offset; 1807 void *addr; 1808 1809 int mi = tcmu_find_mem_index(vmf->vma); 1810 if (mi < 0) 1811 return VM_FAULT_SIGBUS; 1812 1813 /* 1814 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1815 * to use mem[N]. 1816 */ 1817 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1818 1819 if (offset < udev->data_off) { 1820 /* For the vmalloc()ed cmd area pages */ 1821 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1822 page = vmalloc_to_page(addr); 1823 } else { 1824 uint32_t dbi; 1825 1826 /* For the dynamically growing data area pages */ 1827 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1828 page = tcmu_try_get_block_page(udev, dbi); 1829 if (!page) 1830 return VM_FAULT_SIGBUS; 1831 } 1832 1833 get_page(page); 1834 vmf->page = page; 1835 return 0; 1836 } 1837 1838 static const struct vm_operations_struct tcmu_vm_ops = { 1839 .open = tcmu_vma_open, 1840 .close = tcmu_vma_close, 1841 .fault = tcmu_vma_fault, 1842 }; 1843 1844 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1845 { 1846 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1847 1848 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1849 vma->vm_ops = &tcmu_vm_ops; 1850 1851 vma->vm_private_data = udev; 1852 1853 /* Ensure the mmap is exactly the right size */ 1854 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) 1855 return -EINVAL; 1856 1857 tcmu_vma_open(vma); 1858 1859 return 0; 1860 } 1861 1862 static int tcmu_open(struct uio_info *info, struct inode *inode) 1863 { 1864 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1865 1866 /* O_EXCL not supported for char devs, so fake it? */ 1867 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1868 return -EBUSY; 1869 1870 udev->inode = inode; 1871 1872 pr_debug("open\n"); 1873 1874 return 0; 1875 } 1876 1877 static int tcmu_release(struct uio_info *info, struct inode *inode) 1878 { 1879 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1880 1881 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1882 1883 pr_debug("close\n"); 1884 1885 return 0; 1886 } 1887 1888 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1889 { 1890 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1891 1892 if (!tcmu_kern_cmd_reply_supported) 1893 return 0; 1894 1895 if (udev->nl_reply_supported <= 0) 1896 return 0; 1897 1898 mutex_lock(&tcmu_nl_cmd_mutex); 1899 1900 if (tcmu_netlink_blocked) { 1901 mutex_unlock(&tcmu_nl_cmd_mutex); 1902 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, 1903 udev->name); 1904 return -EAGAIN; 1905 } 1906 1907 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1908 mutex_unlock(&tcmu_nl_cmd_mutex); 1909 pr_warn("netlink cmd %d already executing on %s\n", 1910 nl_cmd->cmd, udev->name); 1911 return -EBUSY; 1912 } 1913 1914 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1915 nl_cmd->cmd = cmd; 1916 nl_cmd->udev = udev; 1917 init_completion(&nl_cmd->complete); 1918 INIT_LIST_HEAD(&nl_cmd->nl_list); 1919 1920 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); 1921 1922 mutex_unlock(&tcmu_nl_cmd_mutex); 1923 return 0; 1924 } 1925 1926 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) 1927 { 1928 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1929 1930 if (!tcmu_kern_cmd_reply_supported) 1931 return; 1932 1933 if (udev->nl_reply_supported <= 0) 1934 return; 1935 1936 mutex_lock(&tcmu_nl_cmd_mutex); 1937 1938 list_del(&nl_cmd->nl_list); 1939 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1940 1941 mutex_unlock(&tcmu_nl_cmd_mutex); 1942 } 1943 1944 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1945 { 1946 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1947 int ret; 1948 1949 if (!tcmu_kern_cmd_reply_supported) 1950 return 0; 1951 1952 if (udev->nl_reply_supported <= 0) 1953 return 0; 1954 1955 pr_debug("sleeping for nl reply\n"); 1956 wait_for_completion(&nl_cmd->complete); 1957 1958 mutex_lock(&tcmu_nl_cmd_mutex); 1959 nl_cmd->cmd = TCMU_CMD_UNSPEC; 1960 ret = nl_cmd->status; 1961 mutex_unlock(&tcmu_nl_cmd_mutex); 1962 1963 return ret; 1964 } 1965 1966 static int tcmu_netlink_event_init(struct tcmu_dev *udev, 1967 enum tcmu_genl_cmd cmd, 1968 struct sk_buff **buf, void **hdr) 1969 { 1970 struct sk_buff *skb; 1971 void *msg_header; 1972 int ret = -ENOMEM; 1973 1974 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1975 if (!skb) 1976 return ret; 1977 1978 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 1979 if (!msg_header) 1980 goto free_skb; 1981 1982 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 1983 if (ret < 0) 1984 goto free_skb; 1985 1986 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1987 if (ret < 0) 1988 goto free_skb; 1989 1990 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 1991 if (ret < 0) 1992 goto free_skb; 1993 1994 *buf = skb; 1995 *hdr = msg_header; 1996 return ret; 1997 1998 free_skb: 1999 nlmsg_free(skb); 2000 return ret; 2001 } 2002 2003 static int tcmu_netlink_event_send(struct tcmu_dev *udev, 2004 enum tcmu_genl_cmd cmd, 2005 struct sk_buff *skb, void *msg_header) 2006 { 2007 int ret; 2008 2009 genlmsg_end(skb, msg_header); 2010 2011 ret = tcmu_init_genl_cmd_reply(udev, cmd); 2012 if (ret) { 2013 nlmsg_free(skb); 2014 return ret; 2015 } 2016 2017 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 2018 TCMU_MCGRP_CONFIG, GFP_KERNEL); 2019 2020 /* Wait during an add as the listener may not be up yet */ 2021 if (ret == 0 || 2022 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) 2023 return tcmu_wait_genl_cmd_reply(udev); 2024 else 2025 tcmu_destroy_genl_cmd_reply(udev); 2026 2027 return ret; 2028 } 2029 2030 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) 2031 { 2032 struct sk_buff *skb = NULL; 2033 void *msg_header = NULL; 2034 int ret = 0; 2035 2036 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, 2037 &msg_header); 2038 if (ret < 0) 2039 return ret; 2040 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, 2041 msg_header); 2042 } 2043 2044 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) 2045 { 2046 struct sk_buff *skb = NULL; 2047 void *msg_header = NULL; 2048 int ret = 0; 2049 2050 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, 2051 &skb, &msg_header); 2052 if (ret < 0) 2053 return ret; 2054 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, 2055 skb, msg_header); 2056 } 2057 2058 static int tcmu_update_uio_info(struct tcmu_dev *udev) 2059 { 2060 struct tcmu_hba *hba = udev->hba->hba_ptr; 2061 struct uio_info *info; 2062 char *str; 2063 2064 info = &udev->uio_info; 2065 2066 if (udev->dev_config[0]) 2067 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, 2068 udev->name, udev->dev_config); 2069 else 2070 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, 2071 udev->name); 2072 if (!str) 2073 return -ENOMEM; 2074 2075 /* If the old string exists, free it */ 2076 kfree(info->name); 2077 info->name = str; 2078 2079 return 0; 2080 } 2081 2082 static int tcmu_configure_device(struct se_device *dev) 2083 { 2084 struct tcmu_dev *udev = TCMU_DEV(dev); 2085 struct uio_info *info; 2086 struct tcmu_mailbox *mb; 2087 int ret = 0; 2088 2089 ret = tcmu_update_uio_info(udev); 2090 if (ret) 2091 return ret; 2092 2093 info = &udev->uio_info; 2094 2095 mutex_lock(&udev->cmdr_lock); 2096 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); 2097 mutex_unlock(&udev->cmdr_lock); 2098 if (!udev->data_bitmap) { 2099 ret = -ENOMEM; 2100 goto err_bitmap_alloc; 2101 } 2102 2103 udev->mb_addr = vzalloc(CMDR_SIZE); 2104 if (!udev->mb_addr) { 2105 ret = -ENOMEM; 2106 goto err_vzalloc; 2107 } 2108 2109 /* mailbox fits in first part of CMDR space */ 2110 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 2111 udev->data_off = CMDR_SIZE; 2112 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; 2113 udev->dbi_thresh = 0; /* Default in Idle state */ 2114 2115 /* Initialise the mailbox of the ring buffer */ 2116 mb = udev->mb_addr; 2117 mb->version = TCMU_MAILBOX_VERSION; 2118 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | 2119 TCMU_MAILBOX_FLAG_CAP_READ_LEN | 2120 TCMU_MAILBOX_FLAG_CAP_TMR; 2121 mb->cmdr_off = CMDR_OFF; 2122 mb->cmdr_size = udev->cmdr_size; 2123 2124 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 2125 WARN_ON(udev->data_size % PAGE_SIZE); 2126 WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 2127 2128 info->version = __stringify(TCMU_MAILBOX_VERSION); 2129 2130 info->mem[0].name = "tcm-user command & data buffer"; 2131 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 2132 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; 2133 info->mem[0].memtype = UIO_MEM_NONE; 2134 2135 info->irqcontrol = tcmu_irqcontrol; 2136 info->irq = UIO_IRQ_CUSTOM; 2137 2138 info->mmap = tcmu_mmap; 2139 info->open = tcmu_open; 2140 info->release = tcmu_release; 2141 2142 ret = uio_register_device(tcmu_root_device, info); 2143 if (ret) 2144 goto err_register; 2145 2146 /* User can set hw_block_size before enable the device */ 2147 if (dev->dev_attrib.hw_block_size == 0) 2148 dev->dev_attrib.hw_block_size = 512; 2149 /* Other attributes can be configured in userspace */ 2150 if (!dev->dev_attrib.hw_max_sectors) 2151 dev->dev_attrib.hw_max_sectors = 128; 2152 if (!dev->dev_attrib.emulate_write_cache) 2153 dev->dev_attrib.emulate_write_cache = 0; 2154 dev->dev_attrib.hw_queue_depth = 128; 2155 2156 /* If user didn't explicitly disable netlink reply support, use 2157 * module scope setting. 2158 */ 2159 if (udev->nl_reply_supported >= 0) 2160 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 2161 2162 /* 2163 * Get a ref incase userspace does a close on the uio device before 2164 * LIO has initiated tcmu_free_device. 2165 */ 2166 kref_get(&udev->kref); 2167 2168 ret = tcmu_send_dev_add_event(udev); 2169 if (ret) 2170 goto err_netlink; 2171 2172 mutex_lock(&root_udev_mutex); 2173 list_add(&udev->node, &root_udev); 2174 mutex_unlock(&root_udev_mutex); 2175 2176 return 0; 2177 2178 err_netlink: 2179 kref_put(&udev->kref, tcmu_dev_kref_release); 2180 uio_unregister_device(&udev->uio_info); 2181 err_register: 2182 vfree(udev->mb_addr); 2183 udev->mb_addr = NULL; 2184 err_vzalloc: 2185 bitmap_free(udev->data_bitmap); 2186 udev->data_bitmap = NULL; 2187 err_bitmap_alloc: 2188 kfree(info->name); 2189 info->name = NULL; 2190 2191 return ret; 2192 } 2193 2194 static void tcmu_free_device(struct se_device *dev) 2195 { 2196 struct tcmu_dev *udev = TCMU_DEV(dev); 2197 2198 /* release ref from init */ 2199 kref_put(&udev->kref, tcmu_dev_kref_release); 2200 } 2201 2202 static void tcmu_destroy_device(struct se_device *dev) 2203 { 2204 struct tcmu_dev *udev = TCMU_DEV(dev); 2205 2206 del_timer_sync(&udev->cmd_timer); 2207 del_timer_sync(&udev->qfull_timer); 2208 2209 mutex_lock(&root_udev_mutex); 2210 list_del(&udev->node); 2211 mutex_unlock(&root_udev_mutex); 2212 2213 tcmu_send_dev_remove_event(udev); 2214 2215 uio_unregister_device(&udev->uio_info); 2216 2217 /* release ref from configure */ 2218 kref_put(&udev->kref, tcmu_dev_kref_release); 2219 } 2220 2221 static void tcmu_unblock_dev(struct tcmu_dev *udev) 2222 { 2223 mutex_lock(&udev->cmdr_lock); 2224 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 2225 mutex_unlock(&udev->cmdr_lock); 2226 } 2227 2228 static void tcmu_block_dev(struct tcmu_dev *udev) 2229 { 2230 mutex_lock(&udev->cmdr_lock); 2231 2232 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2233 goto unlock; 2234 2235 /* complete IO that has executed successfully */ 2236 tcmu_handle_completions(udev); 2237 /* fail IO waiting to be queued */ 2238 run_qfull_queue(udev, true); 2239 2240 unlock: 2241 mutex_unlock(&udev->cmdr_lock); 2242 } 2243 2244 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 2245 { 2246 struct tcmu_mailbox *mb; 2247 struct tcmu_cmd *cmd; 2248 unsigned long i; 2249 2250 mutex_lock(&udev->cmdr_lock); 2251 2252 xa_for_each(&udev->commands, i, cmd) { 2253 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2254 cmd->cmd_id, udev->name, 2255 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); 2256 2257 xa_erase(&udev->commands, i); 2258 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2259 WARN_ON(!cmd->se_cmd); 2260 list_del_init(&cmd->queue_entry); 2261 cmd->se_cmd->priv = NULL; 2262 if (err_level == 1) { 2263 /* 2264 * Userspace was not able to start the 2265 * command or it is retryable. 2266 */ 2267 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 2268 } else { 2269 /* hard failure */ 2270 target_complete_cmd(cmd->se_cmd, 2271 SAM_STAT_CHECK_CONDITION); 2272 } 2273 } 2274 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 2275 tcmu_free_cmd(cmd); 2276 } 2277 2278 mb = udev->mb_addr; 2279 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2280 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 2281 mb->cmd_tail, mb->cmd_head); 2282 2283 udev->cmdr_last_cleaned = 0; 2284 mb->cmd_tail = 0; 2285 mb->cmd_head = 0; 2286 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2287 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2288 2289 del_timer(&udev->cmd_timer); 2290 2291 /* 2292 * ring is empty and qfull queue never contains aborted commands. 2293 * So TMRs in tmr queue do not contain relevant cmd_ids. 2294 * After a ring reset userspace should do a fresh start, so 2295 * even LUN RESET message is no longer relevant. 2296 * Therefore remove all TMRs from qfull queue 2297 */ 2298 tcmu_remove_all_queued_tmr(udev); 2299 2300 run_qfull_queue(udev, false); 2301 2302 mutex_unlock(&udev->cmdr_lock); 2303 } 2304 2305 enum { 2306 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 2307 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, 2308 }; 2309 2310 static match_table_t tokens = { 2311 {Opt_dev_config, "dev_config=%s"}, 2312 {Opt_dev_size, "dev_size=%s"}, 2313 {Opt_hw_block_size, "hw_block_size=%d"}, 2314 {Opt_hw_max_sectors, "hw_max_sectors=%d"}, 2315 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 2316 {Opt_max_data_area_mb, "max_data_area_mb=%d"}, 2317 {Opt_err, NULL} 2318 }; 2319 2320 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 2321 { 2322 int val, ret; 2323 2324 ret = match_int(arg, &val); 2325 if (ret < 0) { 2326 pr_err("match_int() failed for dev attrib. Error %d.\n", 2327 ret); 2328 return ret; 2329 } 2330 2331 if (val <= 0) { 2332 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", 2333 val); 2334 return -EINVAL; 2335 } 2336 *dev_attrib = val; 2337 return 0; 2338 } 2339 2340 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) 2341 { 2342 int val, ret; 2343 2344 ret = match_int(arg, &val); 2345 if (ret < 0) { 2346 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", 2347 ret); 2348 return ret; 2349 } 2350 2351 if (val <= 0) { 2352 pr_err("Invalid max_data_area %d.\n", val); 2353 return -EINVAL; 2354 } 2355 2356 mutex_lock(&udev->cmdr_lock); 2357 if (udev->data_bitmap) { 2358 pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); 2359 ret = -EINVAL; 2360 goto unlock; 2361 } 2362 2363 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val); 2364 if (udev->max_blocks > tcmu_global_max_blocks) { 2365 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 2366 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 2367 udev->max_blocks = tcmu_global_max_blocks; 2368 } 2369 2370 unlock: 2371 mutex_unlock(&udev->cmdr_lock); 2372 return ret; 2373 } 2374 2375 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 2376 const char *page, ssize_t count) 2377 { 2378 struct tcmu_dev *udev = TCMU_DEV(dev); 2379 char *orig, *ptr, *opts; 2380 substring_t args[MAX_OPT_ARGS]; 2381 int ret = 0, token; 2382 2383 opts = kstrdup(page, GFP_KERNEL); 2384 if (!opts) 2385 return -ENOMEM; 2386 2387 orig = opts; 2388 2389 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2390 if (!*ptr) 2391 continue; 2392 2393 token = match_token(ptr, tokens, args); 2394 switch (token) { 2395 case Opt_dev_config: 2396 if (match_strlcpy(udev->dev_config, &args[0], 2397 TCMU_CONFIG_LEN) == 0) { 2398 ret = -EINVAL; 2399 break; 2400 } 2401 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 2402 break; 2403 case Opt_dev_size: 2404 ret = match_u64(&args[0], &udev->dev_size); 2405 if (ret < 0) 2406 pr_err("match_u64() failed for dev_size=. Error %d.\n", 2407 ret); 2408 break; 2409 case Opt_hw_block_size: 2410 ret = tcmu_set_dev_attrib(&args[0], 2411 &(dev->dev_attrib.hw_block_size)); 2412 break; 2413 case Opt_hw_max_sectors: 2414 ret = tcmu_set_dev_attrib(&args[0], 2415 &(dev->dev_attrib.hw_max_sectors)); 2416 break; 2417 case Opt_nl_reply_supported: 2418 ret = match_int(&args[0], &udev->nl_reply_supported); 2419 if (ret < 0) 2420 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", 2421 ret); 2422 break; 2423 case Opt_max_data_area_mb: 2424 ret = tcmu_set_max_blocks_param(udev, &args[0]); 2425 break; 2426 default: 2427 break; 2428 } 2429 2430 if (ret) 2431 break; 2432 } 2433 2434 kfree(orig); 2435 return (!ret) ? count : ret; 2436 } 2437 2438 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 2439 { 2440 struct tcmu_dev *udev = TCMU_DEV(dev); 2441 ssize_t bl = 0; 2442 2443 bl = sprintf(b + bl, "Config: %s ", 2444 udev->dev_config[0] ? udev->dev_config : "NULL"); 2445 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); 2446 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", 2447 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2448 2449 return bl; 2450 } 2451 2452 static sector_t tcmu_get_blocks(struct se_device *dev) 2453 { 2454 struct tcmu_dev *udev = TCMU_DEV(dev); 2455 2456 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 2457 dev->dev_attrib.block_size); 2458 } 2459 2460 static sense_reason_t 2461 tcmu_parse_cdb(struct se_cmd *cmd) 2462 { 2463 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 2464 } 2465 2466 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 2467 { 2468 struct se_dev_attrib *da = container_of(to_config_group(item), 2469 struct se_dev_attrib, da_group); 2470 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2471 2472 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 2473 } 2474 2475 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 2476 size_t count) 2477 { 2478 struct se_dev_attrib *da = container_of(to_config_group(item), 2479 struct se_dev_attrib, da_group); 2480 struct tcmu_dev *udev = container_of(da->da_dev, 2481 struct tcmu_dev, se_dev); 2482 u32 val; 2483 int ret; 2484 2485 if (da->da_dev->export_count) { 2486 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 2487 return -EINVAL; 2488 } 2489 2490 ret = kstrtou32(page, 0, &val); 2491 if (ret < 0) 2492 return ret; 2493 2494 udev->cmd_time_out = val * MSEC_PER_SEC; 2495 return count; 2496 } 2497 CONFIGFS_ATTR(tcmu_, cmd_time_out); 2498 2499 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 2500 { 2501 struct se_dev_attrib *da = container_of(to_config_group(item), 2502 struct se_dev_attrib, da_group); 2503 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2504 2505 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 2506 udev->qfull_time_out : 2507 udev->qfull_time_out / MSEC_PER_SEC); 2508 } 2509 2510 static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 2511 const char *page, size_t count) 2512 { 2513 struct se_dev_attrib *da = container_of(to_config_group(item), 2514 struct se_dev_attrib, da_group); 2515 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2516 s32 val; 2517 int ret; 2518 2519 ret = kstrtos32(page, 0, &val); 2520 if (ret < 0) 2521 return ret; 2522 2523 if (val >= 0) { 2524 udev->qfull_time_out = val * MSEC_PER_SEC; 2525 } else if (val == -1) { 2526 udev->qfull_time_out = val; 2527 } else { 2528 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2529 return -EINVAL; 2530 } 2531 return count; 2532 } 2533 CONFIGFS_ATTR(tcmu_, qfull_time_out); 2534 2535 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 2536 { 2537 struct se_dev_attrib *da = container_of(to_config_group(item), 2538 struct se_dev_attrib, da_group); 2539 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2540 2541 return snprintf(page, PAGE_SIZE, "%u\n", 2542 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2543 } 2544 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 2545 2546 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2547 { 2548 struct se_dev_attrib *da = container_of(to_config_group(item), 2549 struct se_dev_attrib, da_group); 2550 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2551 2552 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2553 } 2554 2555 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, 2556 const char *reconfig_data) 2557 { 2558 struct sk_buff *skb = NULL; 2559 void *msg_header = NULL; 2560 int ret = 0; 2561 2562 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2563 &skb, &msg_header); 2564 if (ret < 0) 2565 return ret; 2566 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); 2567 if (ret < 0) { 2568 nlmsg_free(skb); 2569 return ret; 2570 } 2571 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2572 skb, msg_header); 2573 } 2574 2575 2576 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2577 size_t count) 2578 { 2579 struct se_dev_attrib *da = container_of(to_config_group(item), 2580 struct se_dev_attrib, da_group); 2581 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2582 int ret, len; 2583 2584 len = strlen(page); 2585 if (!len || len > TCMU_CONFIG_LEN - 1) 2586 return -EINVAL; 2587 2588 /* Check if device has been configured before */ 2589 if (target_dev_configured(&udev->se_dev)) { 2590 ret = tcmu_send_dev_config_event(udev, page); 2591 if (ret) { 2592 pr_err("Unable to reconfigure device\n"); 2593 return ret; 2594 } 2595 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2596 2597 ret = tcmu_update_uio_info(udev); 2598 if (ret) 2599 return ret; 2600 return count; 2601 } 2602 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2603 2604 return count; 2605 } 2606 CONFIGFS_ATTR(tcmu_, dev_config); 2607 2608 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 2609 { 2610 struct se_dev_attrib *da = container_of(to_config_group(item), 2611 struct se_dev_attrib, da_group); 2612 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2613 2614 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); 2615 } 2616 2617 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) 2618 { 2619 struct sk_buff *skb = NULL; 2620 void *msg_header = NULL; 2621 int ret = 0; 2622 2623 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2624 &skb, &msg_header); 2625 if (ret < 0) 2626 return ret; 2627 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, 2628 size, TCMU_ATTR_PAD); 2629 if (ret < 0) { 2630 nlmsg_free(skb); 2631 return ret; 2632 } 2633 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2634 skb, msg_header); 2635 } 2636 2637 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2638 size_t count) 2639 { 2640 struct se_dev_attrib *da = container_of(to_config_group(item), 2641 struct se_dev_attrib, da_group); 2642 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2643 u64 val; 2644 int ret; 2645 2646 ret = kstrtou64(page, 0, &val); 2647 if (ret < 0) 2648 return ret; 2649 2650 /* Check if device has been configured before */ 2651 if (target_dev_configured(&udev->se_dev)) { 2652 ret = tcmu_send_dev_size_event(udev, val); 2653 if (ret) { 2654 pr_err("Unable to reconfigure device\n"); 2655 return ret; 2656 } 2657 } 2658 udev->dev_size = val; 2659 return count; 2660 } 2661 CONFIGFS_ATTR(tcmu_, dev_size); 2662 2663 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 2664 char *page) 2665 { 2666 struct se_dev_attrib *da = container_of(to_config_group(item), 2667 struct se_dev_attrib, da_group); 2668 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2669 2670 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 2671 } 2672 2673 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 2674 const char *page, size_t count) 2675 { 2676 struct se_dev_attrib *da = container_of(to_config_group(item), 2677 struct se_dev_attrib, da_group); 2678 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2679 s8 val; 2680 int ret; 2681 2682 ret = kstrtos8(page, 0, &val); 2683 if (ret < 0) 2684 return ret; 2685 2686 udev->nl_reply_supported = val; 2687 return count; 2688 } 2689 CONFIGFS_ATTR(tcmu_, nl_reply_supported); 2690 2691 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 2692 char *page) 2693 { 2694 struct se_dev_attrib *da = container_of(to_config_group(item), 2695 struct se_dev_attrib, da_group); 2696 2697 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 2698 } 2699 2700 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) 2701 { 2702 struct sk_buff *skb = NULL; 2703 void *msg_header = NULL; 2704 int ret = 0; 2705 2706 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2707 &skb, &msg_header); 2708 if (ret < 0) 2709 return ret; 2710 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); 2711 if (ret < 0) { 2712 nlmsg_free(skb); 2713 return ret; 2714 } 2715 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2716 skb, msg_header); 2717 } 2718 2719 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 2720 const char *page, size_t count) 2721 { 2722 struct se_dev_attrib *da = container_of(to_config_group(item), 2723 struct se_dev_attrib, da_group); 2724 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2725 u8 val; 2726 int ret; 2727 2728 ret = kstrtou8(page, 0, &val); 2729 if (ret < 0) 2730 return ret; 2731 2732 /* Check if device has been configured before */ 2733 if (target_dev_configured(&udev->se_dev)) { 2734 ret = tcmu_send_emulate_write_cache(udev, val); 2735 if (ret) { 2736 pr_err("Unable to reconfigure device\n"); 2737 return ret; 2738 } 2739 } 2740 2741 da->emulate_write_cache = val; 2742 return count; 2743 } 2744 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 2745 2746 static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page) 2747 { 2748 struct se_dev_attrib *da = container_of(to_config_group(item), 2749 struct se_dev_attrib, da_group); 2750 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2751 2752 return snprintf(page, PAGE_SIZE, "%i\n", 2753 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); 2754 } 2755 2756 static ssize_t tcmu_tmr_notification_store(struct config_item *item, 2757 const char *page, size_t count) 2758 { 2759 struct se_dev_attrib *da = container_of(to_config_group(item), 2760 struct se_dev_attrib, da_group); 2761 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2762 u8 val; 2763 int ret; 2764 2765 ret = kstrtou8(page, 0, &val); 2766 if (ret < 0) 2767 return ret; 2768 if (val > 1) 2769 return -EINVAL; 2770 2771 if (val) 2772 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2773 else 2774 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2775 return count; 2776 } 2777 CONFIGFS_ATTR(tcmu_, tmr_notification); 2778 2779 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 2780 { 2781 struct se_device *se_dev = container_of(to_config_group(item), 2782 struct se_device, 2783 dev_action_group); 2784 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2785 2786 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2787 return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 2788 else 2789 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 2790 } 2791 2792 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 2793 size_t count) 2794 { 2795 struct se_device *se_dev = container_of(to_config_group(item), 2796 struct se_device, 2797 dev_action_group); 2798 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2799 u8 val; 2800 int ret; 2801 2802 if (!target_dev_configured(&udev->se_dev)) { 2803 pr_err("Device is not configured.\n"); 2804 return -EINVAL; 2805 } 2806 2807 ret = kstrtou8(page, 0, &val); 2808 if (ret < 0) 2809 return ret; 2810 2811 if (val > 1) { 2812 pr_err("Invalid block value %d\n", val); 2813 return -EINVAL; 2814 } 2815 2816 if (!val) 2817 tcmu_unblock_dev(udev); 2818 else 2819 tcmu_block_dev(udev); 2820 return count; 2821 } 2822 CONFIGFS_ATTR(tcmu_, block_dev); 2823 2824 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 2825 size_t count) 2826 { 2827 struct se_device *se_dev = container_of(to_config_group(item), 2828 struct se_device, 2829 dev_action_group); 2830 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2831 u8 val; 2832 int ret; 2833 2834 if (!target_dev_configured(&udev->se_dev)) { 2835 pr_err("Device is not configured.\n"); 2836 return -EINVAL; 2837 } 2838 2839 ret = kstrtou8(page, 0, &val); 2840 if (ret < 0) 2841 return ret; 2842 2843 if (val != 1 && val != 2) { 2844 pr_err("Invalid reset ring value %d\n", val); 2845 return -EINVAL; 2846 } 2847 2848 tcmu_reset_ring(udev, val); 2849 return count; 2850 } 2851 CONFIGFS_ATTR_WO(tcmu_, reset_ring); 2852 2853 static struct configfs_attribute *tcmu_attrib_attrs[] = { 2854 &tcmu_attr_cmd_time_out, 2855 &tcmu_attr_qfull_time_out, 2856 &tcmu_attr_max_data_area_mb, 2857 &tcmu_attr_dev_config, 2858 &tcmu_attr_dev_size, 2859 &tcmu_attr_emulate_write_cache, 2860 &tcmu_attr_tmr_notification, 2861 &tcmu_attr_nl_reply_supported, 2862 NULL, 2863 }; 2864 2865 static struct configfs_attribute **tcmu_attrs; 2866 2867 static struct configfs_attribute *tcmu_action_attrs[] = { 2868 &tcmu_attr_block_dev, 2869 &tcmu_attr_reset_ring, 2870 NULL, 2871 }; 2872 2873 static struct target_backend_ops tcmu_ops = { 2874 .name = "user", 2875 .owner = THIS_MODULE, 2876 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH, 2877 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR | 2878 TRANSPORT_FLAG_PASSTHROUGH_ALUA, 2879 .attach_hba = tcmu_attach_hba, 2880 .detach_hba = tcmu_detach_hba, 2881 .alloc_device = tcmu_alloc_device, 2882 .configure_device = tcmu_configure_device, 2883 .destroy_device = tcmu_destroy_device, 2884 .free_device = tcmu_free_device, 2885 .unplug_device = tcmu_unplug_device, 2886 .plug_device = tcmu_plug_device, 2887 .parse_cdb = tcmu_parse_cdb, 2888 .tmr_notify = tcmu_tmr_notify, 2889 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 2890 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 2891 .get_device_type = sbc_get_device_type, 2892 .get_blocks = tcmu_get_blocks, 2893 .tb_dev_action_attrs = tcmu_action_attrs, 2894 }; 2895 2896 static void find_free_blocks(void) 2897 { 2898 struct tcmu_dev *udev; 2899 loff_t off; 2900 u32 start, end, block, total_freed = 0; 2901 2902 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) 2903 return; 2904 2905 mutex_lock(&root_udev_mutex); 2906 list_for_each_entry(udev, &root_udev, node) { 2907 mutex_lock(&udev->cmdr_lock); 2908 2909 if (!target_dev_configured(&udev->se_dev)) { 2910 mutex_unlock(&udev->cmdr_lock); 2911 continue; 2912 } 2913 2914 /* Try to complete the finished commands first */ 2915 if (tcmu_handle_completions(udev)) 2916 run_qfull_queue(udev, false); 2917 2918 /* Skip the udevs in idle */ 2919 if (!udev->dbi_thresh) { 2920 mutex_unlock(&udev->cmdr_lock); 2921 continue; 2922 } 2923 2924 end = udev->dbi_max + 1; 2925 block = find_last_bit(udev->data_bitmap, end); 2926 if (block == udev->dbi_max) { 2927 /* 2928 * The last bit is dbi_max, so it is not possible 2929 * reclaim any blocks. 2930 */ 2931 mutex_unlock(&udev->cmdr_lock); 2932 continue; 2933 } else if (block == end) { 2934 /* The current udev will goto idle state */ 2935 udev->dbi_thresh = start = 0; 2936 udev->dbi_max = 0; 2937 } else { 2938 udev->dbi_thresh = start = block + 1; 2939 udev->dbi_max = block; 2940 } 2941 2942 /* Here will truncate the data area from off */ 2943 off = udev->data_off + start * DATA_BLOCK_SIZE; 2944 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2945 2946 /* Release the block pages */ 2947 tcmu_blocks_release(&udev->data_blocks, start, end - 1); 2948 mutex_unlock(&udev->cmdr_lock); 2949 2950 total_freed += end - start; 2951 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, 2952 total_freed, udev->name); 2953 } 2954 mutex_unlock(&root_udev_mutex); 2955 2956 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 2957 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 2958 } 2959 2960 static void check_timedout_devices(void) 2961 { 2962 struct tcmu_dev *udev, *tmp_dev; 2963 struct tcmu_cmd *cmd, *tmp_cmd; 2964 LIST_HEAD(devs); 2965 2966 spin_lock_bh(&timed_out_udevs_lock); 2967 list_splice_init(&timed_out_udevs, &devs); 2968 2969 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 2970 list_del_init(&udev->timedout_entry); 2971 spin_unlock_bh(&timed_out_udevs_lock); 2972 2973 mutex_lock(&udev->cmdr_lock); 2974 2975 /* 2976 * If cmd_time_out is disabled but qfull is set deadline 2977 * will only reflect the qfull timeout. Ignore it. 2978 */ 2979 if (udev->cmd_time_out) { 2980 list_for_each_entry_safe(cmd, tmp_cmd, 2981 &udev->inflight_queue, 2982 queue_entry) { 2983 tcmu_check_expired_ring_cmd(cmd); 2984 } 2985 tcmu_set_next_deadline(&udev->inflight_queue, 2986 &udev->cmd_timer); 2987 } 2988 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, 2989 queue_entry) { 2990 tcmu_check_expired_queue_cmd(cmd); 2991 } 2992 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 2993 2994 mutex_unlock(&udev->cmdr_lock); 2995 2996 spin_lock_bh(&timed_out_udevs_lock); 2997 } 2998 2999 spin_unlock_bh(&timed_out_udevs_lock); 3000 } 3001 3002 static void tcmu_unmap_work_fn(struct work_struct *work) 3003 { 3004 check_timedout_devices(); 3005 find_free_blocks(); 3006 } 3007 3008 static int __init tcmu_module_init(void) 3009 { 3010 int ret, i, k, len = 0; 3011 3012 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 3013 3014 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 3015 3016 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 3017 sizeof(struct tcmu_cmd), 3018 __alignof__(struct tcmu_cmd), 3019 0, NULL); 3020 if (!tcmu_cmd_cache) 3021 return -ENOMEM; 3022 3023 tcmu_root_device = root_device_register("tcm_user"); 3024 if (IS_ERR(tcmu_root_device)) { 3025 ret = PTR_ERR(tcmu_root_device); 3026 goto out_free_cache; 3027 } 3028 3029 ret = genl_register_family(&tcmu_genl_family); 3030 if (ret < 0) { 3031 goto out_unreg_device; 3032 } 3033 3034 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3035 len += sizeof(struct configfs_attribute *); 3036 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++) 3037 len += sizeof(struct configfs_attribute *); 3038 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) 3039 len += sizeof(struct configfs_attribute *); 3040 len += sizeof(struct configfs_attribute *); 3041 3042 tcmu_attrs = kzalloc(len, GFP_KERNEL); 3043 if (!tcmu_attrs) { 3044 ret = -ENOMEM; 3045 goto out_unreg_genl; 3046 } 3047 3048 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3049 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 3050 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++) 3051 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k]; 3052 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) 3053 tcmu_attrs[i++] = tcmu_attrib_attrs[k]; 3054 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 3055 3056 ret = transport_backend_register(&tcmu_ops); 3057 if (ret) 3058 goto out_attrs; 3059 3060 return 0; 3061 3062 out_attrs: 3063 kfree(tcmu_attrs); 3064 out_unreg_genl: 3065 genl_unregister_family(&tcmu_genl_family); 3066 out_unreg_device: 3067 root_device_unregister(tcmu_root_device); 3068 out_free_cache: 3069 kmem_cache_destroy(tcmu_cmd_cache); 3070 3071 return ret; 3072 } 3073 3074 static void __exit tcmu_module_exit(void) 3075 { 3076 cancel_delayed_work_sync(&tcmu_unmap_work); 3077 target_backend_unregister(&tcmu_ops); 3078 kfree(tcmu_attrs); 3079 genl_unregister_family(&tcmu_genl_family); 3080 root_device_unregister(tcmu_root_device); 3081 kmem_cache_destroy(tcmu_cmd_cache); 3082 } 3083 3084 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 3085 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 3086 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 3087 MODULE_LICENSE("GPL"); 3088 3089 module_init(tcmu_module_init); 3090 module_exit(tcmu_module_exit); 3091