1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 4 * Copyright (C) 2014 Red Hat, Inc. 5 * Copyright (C) 2015 Arrikto, Inc. 6 * Copyright (C) 2017 Chinamobile, Inc. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/idr.h> 12 #include <linux/kernel.h> 13 #include <linux/timer.h> 14 #include <linux/parser.h> 15 #include <linux/vmalloc.h> 16 #include <linux/uio_driver.h> 17 #include <linux/radix-tree.h> 18 #include <linux/stringify.h> 19 #include <linux/bitops.h> 20 #include <linux/highmem.h> 21 #include <linux/configfs.h> 22 #include <linux/mutex.h> 23 #include <linux/workqueue.h> 24 #include <net/genetlink.h> 25 #include <scsi/scsi_common.h> 26 #include <scsi/scsi_proto.h> 27 #include <target/target_core_base.h> 28 #include <target/target_core_fabric.h> 29 #include <target/target_core_backend.h> 30 31 #include <linux/target_core_user.h> 32 33 /** 34 * DOC: Userspace I/O 35 * Userspace I/O 36 * ------------- 37 * 38 * Define a shared-memory interface for LIO to pass SCSI commands and 39 * data to userspace for processing. This is to allow backends that 40 * are too complex for in-kernel support to be possible. 41 * 42 * It uses the UIO framework to do a lot of the device-creation and 43 * introspection work for us. 44 * 45 * See the .h file for how the ring is laid out. Note that while the 46 * command ring is defined, the particulars of the data area are 47 * not. Offset values in the command entry point to other locations 48 * internal to the mmap-ed area. There is separate space outside the 49 * command ring for data buffers. This leaves maximum flexibility for 50 * moving buffer allocations, or even page flipping or other 51 * allocation techniques, without altering the command ring layout. 52 * 53 * SECURITY: 54 * The user process must be assumed to be malicious. There's no way to 55 * prevent it breaking the command ring protocol if it wants, but in 56 * order to prevent other issues we must only ever read *data* from 57 * the shared memory area, not offsets or sizes. This applies to 58 * command ring entries as well as the mailbox. Extra code needed for 59 * this may have a 'UAM' comment. 60 */ 61 62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 63 64 /* For cmd area, the size is fixed 8MB */ 65 #define CMDR_SIZE (8 * 1024 * 1024) 66 67 /* 68 * For data area, the block size is PAGE_SIZE and 69 * the total size is 256K * PAGE_SIZE. 70 */ 71 #define DATA_BLOCK_SIZE PAGE_SIZE 72 #define DATA_BLOCK_SHIFT PAGE_SHIFT 73 #define DATA_BLOCK_BITS_DEF (256 * 1024) 74 75 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) 76 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) 77 78 /* 79 * Default number of global data blocks(512K * PAGE_SIZE) 80 * when the unmap thread will be started. 81 */ 82 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) 83 84 static u8 tcmu_kern_cmd_reply_supported; 85 static u8 tcmu_netlink_blocked; 86 87 static struct device *tcmu_root_device; 88 89 struct tcmu_hba { 90 u32 host_id; 91 }; 92 93 #define TCMU_CONFIG_LEN 256 94 95 static DEFINE_MUTEX(tcmu_nl_cmd_mutex); 96 static LIST_HEAD(tcmu_nl_cmd_list); 97 98 struct tcmu_dev; 99 100 struct tcmu_nl_cmd { 101 /* wake up thread waiting for reply */ 102 struct completion complete; 103 struct list_head nl_list; 104 struct tcmu_dev *udev; 105 int cmd; 106 int status; 107 }; 108 109 struct tcmu_dev { 110 struct list_head node; 111 struct kref kref; 112 113 struct se_device se_dev; 114 115 char *name; 116 struct se_hba *hba; 117 118 #define TCMU_DEV_BIT_OPEN 0 119 #define TCMU_DEV_BIT_BROKEN 1 120 #define TCMU_DEV_BIT_BLOCKED 2 121 unsigned long flags; 122 123 struct uio_info uio_info; 124 125 struct inode *inode; 126 127 struct tcmu_mailbox *mb_addr; 128 uint64_t dev_size; 129 u32 cmdr_size; 130 u32 cmdr_last_cleaned; 131 /* Offset of data area from start of mb */ 132 /* Must add data_off and mb_addr to get the address */ 133 size_t data_off; 134 size_t data_size; 135 uint32_t max_blocks; 136 size_t ring_size; 137 138 struct mutex cmdr_lock; 139 struct list_head qfull_queue; 140 141 uint32_t dbi_max; 142 uint32_t dbi_thresh; 143 unsigned long *data_bitmap; 144 struct radix_tree_root data_blocks; 145 146 struct idr commands; 147 148 struct timer_list cmd_timer; 149 unsigned int cmd_time_out; 150 struct list_head inflight_queue; 151 152 struct timer_list qfull_timer; 153 int qfull_time_out; 154 155 struct list_head timedout_entry; 156 157 struct tcmu_nl_cmd curr_nl_cmd; 158 159 char dev_config[TCMU_CONFIG_LEN]; 160 161 int nl_reply_supported; 162 }; 163 164 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 165 166 #define CMDR_OFF sizeof(struct tcmu_mailbox) 167 168 struct tcmu_cmd { 169 struct se_cmd *se_cmd; 170 struct tcmu_dev *tcmu_dev; 171 struct list_head queue_entry; 172 173 uint16_t cmd_id; 174 175 /* Can't use se_cmd when cleaning up expired cmds, because if 176 cmd has been completed then accessing se_cmd is off limits */ 177 uint32_t dbi_cnt; 178 uint32_t dbi_cur; 179 uint32_t *dbi; 180 181 unsigned long deadline; 182 183 #define TCMU_CMD_BIT_EXPIRED 0 184 unsigned long flags; 185 }; 186 /* 187 * To avoid dead lock the mutex lock order should always be: 188 * 189 * mutex_lock(&root_udev_mutex); 190 * ... 191 * mutex_lock(&tcmu_dev->cmdr_lock); 192 * mutex_unlock(&tcmu_dev->cmdr_lock); 193 * ... 194 * mutex_unlock(&root_udev_mutex); 195 */ 196 static DEFINE_MUTEX(root_udev_mutex); 197 static LIST_HEAD(root_udev); 198 199 static DEFINE_SPINLOCK(timed_out_udevs_lock); 200 static LIST_HEAD(timed_out_udevs); 201 202 static struct kmem_cache *tcmu_cmd_cache; 203 204 static atomic_t global_db_count = ATOMIC_INIT(0); 205 static struct delayed_work tcmu_unmap_work; 206 static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; 207 208 static int tcmu_set_global_max_data_area(const char *str, 209 const struct kernel_param *kp) 210 { 211 int ret, max_area_mb; 212 213 ret = kstrtoint(str, 10, &max_area_mb); 214 if (ret) 215 return -EINVAL; 216 217 if (max_area_mb <= 0) { 218 pr_err("global_max_data_area must be larger than 0.\n"); 219 return -EINVAL; 220 } 221 222 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); 223 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 224 schedule_delayed_work(&tcmu_unmap_work, 0); 225 else 226 cancel_delayed_work_sync(&tcmu_unmap_work); 227 228 return 0; 229 } 230 231 static int tcmu_get_global_max_data_area(char *buffer, 232 const struct kernel_param *kp) 233 { 234 return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 235 } 236 237 static const struct kernel_param_ops tcmu_global_max_data_area_op = { 238 .set = tcmu_set_global_max_data_area, 239 .get = tcmu_get_global_max_data_area, 240 }; 241 242 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 243 S_IWUSR | S_IRUGO); 244 MODULE_PARM_DESC(global_max_data_area_mb, 245 "Max MBs allowed to be allocated to all the tcmu device's " 246 "data areas."); 247 248 static int tcmu_get_block_netlink(char *buffer, 249 const struct kernel_param *kp) 250 { 251 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? 252 "blocked" : "unblocked"); 253 } 254 255 static int tcmu_set_block_netlink(const char *str, 256 const struct kernel_param *kp) 257 { 258 int ret; 259 u8 val; 260 261 ret = kstrtou8(str, 0, &val); 262 if (ret < 0) 263 return ret; 264 265 if (val > 1) { 266 pr_err("Invalid block netlink value %u\n", val); 267 return -EINVAL; 268 } 269 270 tcmu_netlink_blocked = val; 271 return 0; 272 } 273 274 static const struct kernel_param_ops tcmu_block_netlink_op = { 275 .set = tcmu_set_block_netlink, 276 .get = tcmu_get_block_netlink, 277 }; 278 279 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); 280 MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); 281 282 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) 283 { 284 struct tcmu_dev *udev = nl_cmd->udev; 285 286 if (!tcmu_netlink_blocked) { 287 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); 288 return -EBUSY; 289 } 290 291 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 292 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); 293 nl_cmd->status = -EINTR; 294 list_del(&nl_cmd->nl_list); 295 complete(&nl_cmd->complete); 296 } 297 return 0; 298 } 299 300 static int tcmu_set_reset_netlink(const char *str, 301 const struct kernel_param *kp) 302 { 303 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; 304 int ret; 305 u8 val; 306 307 ret = kstrtou8(str, 0, &val); 308 if (ret < 0) 309 return ret; 310 311 if (val != 1) { 312 pr_err("Invalid reset netlink value %u\n", val); 313 return -EINVAL; 314 } 315 316 mutex_lock(&tcmu_nl_cmd_mutex); 317 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { 318 ret = tcmu_fail_netlink_cmd(nl_cmd); 319 if (ret) 320 break; 321 } 322 mutex_unlock(&tcmu_nl_cmd_mutex); 323 324 return ret; 325 } 326 327 static const struct kernel_param_ops tcmu_reset_netlink_op = { 328 .set = tcmu_set_reset_netlink, 329 }; 330 331 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); 332 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); 333 334 /* multicast group */ 335 enum tcmu_multicast_groups { 336 TCMU_MCGRP_CONFIG, 337 }; 338 339 static const struct genl_multicast_group tcmu_mcgrps[] = { 340 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 341 }; 342 343 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 344 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 345 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 346 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 347 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 348 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 349 }; 350 351 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 352 { 353 struct tcmu_dev *udev = NULL; 354 struct tcmu_nl_cmd *nl_cmd; 355 int dev_id, rc, ret = 0; 356 357 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 358 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 359 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 360 return -EINVAL; 361 } 362 363 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 364 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 365 366 mutex_lock(&tcmu_nl_cmd_mutex); 367 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { 368 if (nl_cmd->udev->se_dev.dev_index == dev_id) { 369 udev = nl_cmd->udev; 370 break; 371 } 372 } 373 374 if (!udev) { 375 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", 376 completed_cmd, rc, dev_id); 377 ret = -ENODEV; 378 goto unlock; 379 } 380 list_del(&nl_cmd->nl_list); 381 382 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", 383 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, 384 nl_cmd->status); 385 386 if (nl_cmd->cmd != completed_cmd) { 387 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", 388 udev->name, completed_cmd, nl_cmd->cmd); 389 ret = -EINVAL; 390 goto unlock; 391 } 392 393 nl_cmd->status = rc; 394 complete(&nl_cmd->complete); 395 unlock: 396 mutex_unlock(&tcmu_nl_cmd_mutex); 397 return ret; 398 } 399 400 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 401 { 402 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 403 } 404 405 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 406 { 407 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 408 } 409 410 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 411 struct genl_info *info) 412 { 413 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 414 } 415 416 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 417 { 418 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 419 tcmu_kern_cmd_reply_supported = 420 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 421 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 422 tcmu_kern_cmd_reply_supported); 423 } 424 425 return 0; 426 } 427 428 static const struct genl_ops tcmu_genl_ops[] = { 429 { 430 .cmd = TCMU_CMD_SET_FEATURES, 431 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 432 .flags = GENL_ADMIN_PERM, 433 .doit = tcmu_genl_set_features, 434 }, 435 { 436 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 437 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 438 .flags = GENL_ADMIN_PERM, 439 .doit = tcmu_genl_add_dev_done, 440 }, 441 { 442 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 443 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 444 .flags = GENL_ADMIN_PERM, 445 .doit = tcmu_genl_rm_dev_done, 446 }, 447 { 448 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 449 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 450 .flags = GENL_ADMIN_PERM, 451 .doit = tcmu_genl_reconfig_dev_done, 452 }, 453 }; 454 455 /* Our generic netlink family */ 456 static struct genl_family tcmu_genl_family __ro_after_init = { 457 .module = THIS_MODULE, 458 .hdrsize = 0, 459 .name = "TCM-USER", 460 .version = 2, 461 .maxattr = TCMU_ATTR_MAX, 462 .policy = tcmu_attr_policy, 463 .mcgrps = tcmu_mcgrps, 464 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 465 .netnsok = true, 466 .ops = tcmu_genl_ops, 467 .n_ops = ARRAY_SIZE(tcmu_genl_ops), 468 }; 469 470 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 471 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 472 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 473 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 474 475 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 476 { 477 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 478 uint32_t i; 479 480 for (i = 0; i < len; i++) 481 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 482 } 483 484 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, 485 struct tcmu_cmd *tcmu_cmd) 486 { 487 struct page *page; 488 int ret, dbi; 489 490 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 491 if (dbi == udev->dbi_thresh) 492 return false; 493 494 page = radix_tree_lookup(&udev->data_blocks, dbi); 495 if (!page) { 496 if (atomic_add_return(1, &global_db_count) > 497 tcmu_global_max_blocks) 498 schedule_delayed_work(&tcmu_unmap_work, 0); 499 500 /* try to get new page from the mm */ 501 page = alloc_page(GFP_NOIO); 502 if (!page) 503 goto err_alloc; 504 505 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 506 if (ret) 507 goto err_insert; 508 } 509 510 if (dbi > udev->dbi_max) 511 udev->dbi_max = dbi; 512 513 set_bit(dbi, udev->data_bitmap); 514 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 515 516 return true; 517 err_insert: 518 __free_page(page); 519 err_alloc: 520 atomic_dec(&global_db_count); 521 return false; 522 } 523 524 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, 525 struct tcmu_cmd *tcmu_cmd) 526 { 527 int i; 528 529 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { 530 if (!tcmu_get_empty_block(udev, tcmu_cmd)) 531 return false; 532 } 533 return true; 534 } 535 536 static inline struct page * 537 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 538 { 539 return radix_tree_lookup(&udev->data_blocks, dbi); 540 } 541 542 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 543 { 544 if (tcmu_cmd->se_cmd) 545 tcmu_cmd->se_cmd->priv = NULL; 546 kfree(tcmu_cmd->dbi); 547 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 548 } 549 550 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) 551 { 552 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 553 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); 554 555 if (se_cmd->se_cmd_flags & SCF_BIDI) { 556 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 557 data_length += round_up(se_cmd->t_bidi_data_sg->length, 558 DATA_BLOCK_SIZE); 559 } 560 561 return data_length; 562 } 563 564 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) 565 { 566 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 567 568 return data_length / DATA_BLOCK_SIZE; 569 } 570 571 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 572 { 573 struct se_device *se_dev = se_cmd->se_dev; 574 struct tcmu_dev *udev = TCMU_DEV(se_dev); 575 struct tcmu_cmd *tcmu_cmd; 576 577 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); 578 if (!tcmu_cmd) 579 return NULL; 580 581 INIT_LIST_HEAD(&tcmu_cmd->queue_entry); 582 tcmu_cmd->se_cmd = se_cmd; 583 tcmu_cmd->tcmu_dev = udev; 584 585 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 586 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); 587 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 588 GFP_NOIO); 589 if (!tcmu_cmd->dbi) { 590 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 591 return NULL; 592 } 593 594 return tcmu_cmd; 595 } 596 597 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 598 { 599 unsigned long offset = offset_in_page(vaddr); 600 void *start = vaddr - offset; 601 602 size = round_up(size+offset, PAGE_SIZE); 603 604 while (size) { 605 flush_dcache_page(vmalloc_to_page(start)); 606 start += PAGE_SIZE; 607 size -= PAGE_SIZE; 608 } 609 } 610 611 /* 612 * Some ring helper functions. We don't assume size is a power of 2 so 613 * we can't use circ_buf.h. 614 */ 615 static inline size_t spc_used(size_t head, size_t tail, size_t size) 616 { 617 int diff = head - tail; 618 619 if (diff >= 0) 620 return diff; 621 else 622 return size + diff; 623 } 624 625 static inline size_t spc_free(size_t head, size_t tail, size_t size) 626 { 627 /* Keep 1 byte unused or we can't tell full from empty */ 628 return (size - spc_used(head, tail, size) - 1); 629 } 630 631 static inline size_t head_to_end(size_t head, size_t size) 632 { 633 return size - head; 634 } 635 636 static inline void new_iov(struct iovec **iov, int *iov_cnt) 637 { 638 struct iovec *iovec; 639 640 if (*iov_cnt != 0) 641 (*iov)++; 642 (*iov_cnt)++; 643 644 iovec = *iov; 645 memset(iovec, 0, sizeof(struct iovec)); 646 } 647 648 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 649 650 /* offset is relative to mb_addr */ 651 static inline size_t get_block_offset_user(struct tcmu_dev *dev, 652 int dbi, int remaining) 653 { 654 return dev->data_off + dbi * DATA_BLOCK_SIZE + 655 DATA_BLOCK_SIZE - remaining; 656 } 657 658 static inline size_t iov_tail(struct iovec *iov) 659 { 660 return (size_t)iov->iov_base + iov->iov_len; 661 } 662 663 static void scatter_data_area(struct tcmu_dev *udev, 664 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, 665 unsigned int data_nents, struct iovec **iov, 666 int *iov_cnt, bool copy_data) 667 { 668 int i, dbi; 669 int block_remaining = 0; 670 void *from, *to = NULL; 671 size_t copy_bytes, to_offset, offset; 672 struct scatterlist *sg; 673 struct page *page; 674 675 for_each_sg(data_sg, sg, data_nents, i) { 676 int sg_remaining = sg->length; 677 from = kmap_atomic(sg_page(sg)) + sg->offset; 678 while (sg_remaining > 0) { 679 if (block_remaining == 0) { 680 if (to) { 681 flush_dcache_page(page); 682 kunmap_atomic(to); 683 } 684 685 block_remaining = DATA_BLOCK_SIZE; 686 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 687 page = tcmu_get_block_page(udev, dbi); 688 to = kmap_atomic(page); 689 } 690 691 /* 692 * Covert to virtual offset of the ring data area. 693 */ 694 to_offset = get_block_offset_user(udev, dbi, 695 block_remaining); 696 697 /* 698 * The following code will gather and map the blocks 699 * to the same iovec when the blocks are all next to 700 * each other. 701 */ 702 copy_bytes = min_t(size_t, sg_remaining, 703 block_remaining); 704 if (*iov_cnt != 0 && 705 to_offset == iov_tail(*iov)) { 706 /* 707 * Will append to the current iovec, because 708 * the current block page is next to the 709 * previous one. 710 */ 711 (*iov)->iov_len += copy_bytes; 712 } else { 713 /* 714 * Will allocate a new iovec because we are 715 * first time here or the current block page 716 * is not next to the previous one. 717 */ 718 new_iov(iov, iov_cnt); 719 (*iov)->iov_base = (void __user *)to_offset; 720 (*iov)->iov_len = copy_bytes; 721 } 722 723 if (copy_data) { 724 offset = DATA_BLOCK_SIZE - block_remaining; 725 memcpy(to + offset, 726 from + sg->length - sg_remaining, 727 copy_bytes); 728 } 729 730 sg_remaining -= copy_bytes; 731 block_remaining -= copy_bytes; 732 } 733 kunmap_atomic(from - sg->offset); 734 } 735 736 if (to) { 737 flush_dcache_page(page); 738 kunmap_atomic(to); 739 } 740 } 741 742 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 743 bool bidi, uint32_t read_len) 744 { 745 struct se_cmd *se_cmd = cmd->se_cmd; 746 int i, dbi; 747 int block_remaining = 0; 748 void *from = NULL, *to; 749 size_t copy_bytes, offset; 750 struct scatterlist *sg, *data_sg; 751 struct page *page; 752 unsigned int data_nents; 753 uint32_t count = 0; 754 755 if (!bidi) { 756 data_sg = se_cmd->t_data_sg; 757 data_nents = se_cmd->t_data_nents; 758 } else { 759 760 /* 761 * For bidi case, the first count blocks are for Data-Out 762 * buffer blocks, and before gathering the Data-In buffer 763 * the Data-Out buffer blocks should be discarded. 764 */ 765 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 766 767 data_sg = se_cmd->t_bidi_data_sg; 768 data_nents = se_cmd->t_bidi_data_nents; 769 } 770 771 tcmu_cmd_set_dbi_cur(cmd, count); 772 773 for_each_sg(data_sg, sg, data_nents, i) { 774 int sg_remaining = sg->length; 775 to = kmap_atomic(sg_page(sg)) + sg->offset; 776 while (sg_remaining > 0 && read_len > 0) { 777 if (block_remaining == 0) { 778 if (from) 779 kunmap_atomic(from); 780 781 block_remaining = DATA_BLOCK_SIZE; 782 dbi = tcmu_cmd_get_dbi(cmd); 783 page = tcmu_get_block_page(udev, dbi); 784 from = kmap_atomic(page); 785 flush_dcache_page(page); 786 } 787 copy_bytes = min_t(size_t, sg_remaining, 788 block_remaining); 789 if (read_len < copy_bytes) 790 copy_bytes = read_len; 791 offset = DATA_BLOCK_SIZE - block_remaining; 792 memcpy(to + sg->length - sg_remaining, from + offset, 793 copy_bytes); 794 795 sg_remaining -= copy_bytes; 796 block_remaining -= copy_bytes; 797 read_len -= copy_bytes; 798 } 799 kunmap_atomic(to - sg->offset); 800 if (read_len == 0) 801 break; 802 } 803 if (from) 804 kunmap_atomic(from); 805 } 806 807 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 808 { 809 return thresh - bitmap_weight(bitmap, thresh); 810 } 811 812 /* 813 * We can't queue a command until we have space available on the cmd ring *and* 814 * space available on the data area. 815 * 816 * Called with ring lock held. 817 */ 818 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 819 size_t cmd_size, size_t data_needed) 820 { 821 struct tcmu_mailbox *mb = udev->mb_addr; 822 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1) 823 / DATA_BLOCK_SIZE; 824 size_t space, cmd_needed; 825 u32 cmd_head; 826 827 tcmu_flush_dcache_range(mb, sizeof(*mb)); 828 829 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 830 831 /* 832 * If cmd end-of-ring space is too small then we need space for a NOP plus 833 * original cmd - cmds are internally contiguous. 834 */ 835 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 836 cmd_needed = cmd_size; 837 else 838 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 839 840 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 841 if (space < cmd_needed) { 842 pr_debug("no cmd space: %u %u %u\n", cmd_head, 843 udev->cmdr_last_cleaned, udev->cmdr_size); 844 return false; 845 } 846 847 /* try to check and get the data blocks as needed */ 848 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 849 if ((space * DATA_BLOCK_SIZE) < data_needed) { 850 unsigned long blocks_left = 851 (udev->max_blocks - udev->dbi_thresh) + space; 852 853 if (blocks_left < blocks_needed) { 854 pr_debug("no data space: only %lu available, but ask for %zu\n", 855 blocks_left * DATA_BLOCK_SIZE, 856 data_needed); 857 return false; 858 } 859 860 udev->dbi_thresh += blocks_needed; 861 if (udev->dbi_thresh > udev->max_blocks) 862 udev->dbi_thresh = udev->max_blocks; 863 } 864 865 return tcmu_get_empty_blocks(udev, cmd); 866 } 867 868 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 869 { 870 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 871 sizeof(struct tcmu_cmd_entry)); 872 } 873 874 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 875 size_t base_command_size) 876 { 877 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 878 size_t command_size; 879 880 command_size = base_command_size + 881 round_up(scsi_command_size(se_cmd->t_task_cdb), 882 TCMU_OP_ALIGN_SIZE); 883 884 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 885 886 return command_size; 887 } 888 889 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 890 struct timer_list *timer) 891 { 892 if (!tmo) 893 return; 894 895 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 896 if (!timer_pending(timer)) 897 mod_timer(timer, tcmu_cmd->deadline); 898 899 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, 900 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); 901 } 902 903 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 904 { 905 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 906 unsigned int tmo; 907 908 /* 909 * For backwards compat if qfull_time_out is not set use 910 * cmd_time_out and if that's not set use the default time out. 911 */ 912 if (!udev->qfull_time_out) 913 return -ETIMEDOUT; 914 else if (udev->qfull_time_out > 0) 915 tmo = udev->qfull_time_out; 916 else if (udev->cmd_time_out) 917 tmo = udev->cmd_time_out; 918 else 919 tmo = TCMU_TIME_OUT; 920 921 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 922 923 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 924 pr_debug("adding cmd %p on dev %s to ring space wait queue\n", 925 tcmu_cmd, udev->name); 926 return 0; 927 } 928 929 /** 930 * queue_cmd_ring - queue cmd to ring or internally 931 * @tcmu_cmd: cmd to queue 932 * @scsi_err: TCM error code if failure (-1) returned. 933 * 934 * Returns: 935 * -1 we cannot queue internally or to the ring. 936 * 0 success 937 * 1 internally queued to wait for ring memory to free. 938 */ 939 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) 940 { 941 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 942 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 943 size_t base_command_size, command_size; 944 struct tcmu_mailbox *mb; 945 struct tcmu_cmd_entry *entry; 946 struct iovec *iov; 947 int iov_cnt, cmd_id; 948 uint32_t cmd_head; 949 uint64_t cdb_off; 950 bool copy_to_data_area; 951 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 952 953 *scsi_err = TCM_NO_SENSE; 954 955 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 956 *scsi_err = TCM_LUN_BUSY; 957 return -1; 958 } 959 960 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 961 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 962 return -1; 963 } 964 965 /* 966 * Must be a certain minimum size for response sense info, but 967 * also may be larger if the iov array is large. 968 * 969 * We prepare as many iovs as possbile for potential uses here, 970 * because it's expensive to tell how many regions are freed in 971 * the bitmap & global data pool, as the size calculated here 972 * will only be used to do the checks. 973 * 974 * The size will be recalculated later as actually needed to save 975 * cmd area memories. 976 */ 977 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 978 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 979 980 if (!list_empty(&udev->qfull_queue)) 981 goto queue; 982 983 mb = udev->mb_addr; 984 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 985 if ((command_size > (udev->cmdr_size / 2)) || 986 data_length > udev->data_size) { 987 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 988 "cmd ring/data area\n", command_size, data_length, 989 udev->cmdr_size, udev->data_size); 990 *scsi_err = TCM_INVALID_CDB_FIELD; 991 return -1; 992 } 993 994 if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { 995 /* 996 * Don't leave commands partially setup because the unmap 997 * thread might need the blocks to make forward progress. 998 */ 999 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1000 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1001 goto queue; 1002 } 1003 1004 /* Insert a PAD if end-of-ring space is too small */ 1005 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { 1006 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 1007 1008 entry = (void *) mb + CMDR_OFF + cmd_head; 1009 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 1010 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 1011 entry->hdr.cmd_id = 0; /* not used for PAD */ 1012 entry->hdr.kflags = 0; 1013 entry->hdr.uflags = 0; 1014 tcmu_flush_dcache_range(entry, sizeof(entry->hdr)); 1015 1016 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 1017 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1018 1019 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 1020 WARN_ON(cmd_head != 0); 1021 } 1022 1023 entry = (void *) mb + CMDR_OFF + cmd_head; 1024 memset(entry, 0, command_size); 1025 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 1026 1027 /* Handle allocating space from the data area */ 1028 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1029 iov = &entry->req.iov[0]; 1030 iov_cnt = 0; 1031 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 1032 || se_cmd->se_cmd_flags & SCF_BIDI); 1033 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, 1034 se_cmd->t_data_nents, &iov, &iov_cnt, 1035 copy_to_data_area); 1036 entry->req.iov_cnt = iov_cnt; 1037 1038 /* Handle BIDI commands */ 1039 iov_cnt = 0; 1040 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1041 iov++; 1042 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, 1043 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, 1044 false); 1045 } 1046 entry->req.iov_bidi_cnt = iov_cnt; 1047 1048 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 1049 if (cmd_id < 0) { 1050 pr_err("tcmu: Could not allocate cmd id.\n"); 1051 1052 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1053 *scsi_err = TCM_OUT_OF_RESOURCES; 1054 return -1; 1055 } 1056 tcmu_cmd->cmd_id = cmd_id; 1057 1058 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, 1059 tcmu_cmd, udev->name); 1060 1061 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); 1062 1063 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 1064 1065 /* 1066 * Recalaulate the command's base size and size according 1067 * to the actual needs 1068 */ 1069 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt + 1070 entry->req.iov_bidi_cnt); 1071 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1072 1073 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 1074 1075 /* All offsets relative to mb_addr, not start of entry! */ 1076 cdb_off = CMDR_OFF + cmd_head + base_command_size; 1077 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 1078 entry->req.cdb_off = cdb_off; 1079 tcmu_flush_dcache_range(entry, command_size); 1080 1081 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1082 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1083 1084 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); 1085 1086 /* TODO: only if FLUSH and FUA? */ 1087 uio_event_notify(&udev->uio_info); 1088 1089 return 0; 1090 1091 queue: 1092 if (add_to_qfull_queue(tcmu_cmd)) { 1093 *scsi_err = TCM_OUT_OF_RESOURCES; 1094 return -1; 1095 } 1096 1097 return 1; 1098 } 1099 1100 static sense_reason_t 1101 tcmu_queue_cmd(struct se_cmd *se_cmd) 1102 { 1103 struct se_device *se_dev = se_cmd->se_dev; 1104 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1105 struct tcmu_cmd *tcmu_cmd; 1106 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD; 1107 int ret = -1; 1108 1109 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 1110 if (!tcmu_cmd) 1111 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1112 1113 mutex_lock(&udev->cmdr_lock); 1114 se_cmd->priv = tcmu_cmd; 1115 if (!(se_cmd->transport_state & CMD_T_ABORTED)) 1116 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1117 if (ret < 0) 1118 tcmu_free_cmd(tcmu_cmd); 1119 mutex_unlock(&udev->cmdr_lock); 1120 return scsi_ret; 1121 } 1122 1123 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 1124 { 1125 struct se_cmd *se_cmd = cmd->se_cmd; 1126 struct tcmu_dev *udev = cmd->tcmu_dev; 1127 bool read_len_valid = false; 1128 uint32_t read_len; 1129 1130 /* 1131 * cmd has been completed already from timeout, just reclaim 1132 * data area space and free cmd 1133 */ 1134 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1135 WARN_ON_ONCE(se_cmd); 1136 goto out; 1137 } 1138 1139 list_del_init(&cmd->queue_entry); 1140 1141 tcmu_cmd_reset_dbi_cur(cmd); 1142 1143 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1144 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1145 cmd->se_cmd); 1146 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1147 goto done; 1148 } 1149 1150 read_len = se_cmd->data_length; 1151 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1152 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1153 read_len_valid = true; 1154 if (entry->rsp.read_len < read_len) 1155 read_len = entry->rsp.read_len; 1156 } 1157 1158 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1159 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1160 if (!read_len_valid ) 1161 goto done; 1162 else 1163 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 1164 } 1165 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1166 /* Get Data-In buffer before clean up */ 1167 gather_data_area(udev, cmd, true, read_len); 1168 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1169 gather_data_area(udev, cmd, false, read_len); 1170 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1171 /* TODO: */ 1172 } else if (se_cmd->data_direction != DMA_NONE) { 1173 pr_warn("TCMU: data direction was %d!\n", 1174 se_cmd->data_direction); 1175 } 1176 1177 done: 1178 if (read_len_valid) { 1179 pr_debug("read_len = %d\n", read_len); 1180 target_complete_cmd_with_length(cmd->se_cmd, 1181 entry->rsp.scsi_status, read_len); 1182 } else 1183 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1184 1185 out: 1186 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1187 tcmu_free_cmd(cmd); 1188 } 1189 1190 static void tcmu_set_next_deadline(struct list_head *queue, 1191 struct timer_list *timer) 1192 { 1193 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1194 unsigned long deadline = 0; 1195 1196 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) { 1197 if (!time_after(jiffies, tcmu_cmd->deadline)) { 1198 deadline = tcmu_cmd->deadline; 1199 break; 1200 } 1201 } 1202 1203 if (deadline) 1204 mod_timer(timer, deadline); 1205 else 1206 del_timer(timer); 1207 } 1208 1209 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1210 { 1211 struct tcmu_mailbox *mb; 1212 struct tcmu_cmd *cmd; 1213 int handled = 0; 1214 1215 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1216 pr_err("ring broken, not handling completions\n"); 1217 return 0; 1218 } 1219 1220 mb = udev->mb_addr; 1221 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1222 1223 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1224 1225 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1226 1227 /* 1228 * Flush max. up to end of cmd ring since current entry might 1229 * be a padding that is shorter than sizeof(*entry) 1230 */ 1231 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, 1232 udev->cmdr_size); 1233 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? 1234 ring_left : sizeof(*entry)); 1235 1236 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 1237 UPDATE_HEAD(udev->cmdr_last_cleaned, 1238 tcmu_hdr_get_len(entry->hdr.len_op), 1239 udev->cmdr_size); 1240 continue; 1241 } 1242 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1243 1244 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); 1245 if (!cmd) { 1246 pr_err("cmd_id %u not found, ring is broken\n", 1247 entry->hdr.cmd_id); 1248 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1249 break; 1250 } 1251 1252 tcmu_handle_completion(cmd, entry); 1253 1254 UPDATE_HEAD(udev->cmdr_last_cleaned, 1255 tcmu_hdr_get_len(entry->hdr.len_op), 1256 udev->cmdr_size); 1257 1258 handled++; 1259 } 1260 1261 if (mb->cmd_tail == mb->cmd_head) { 1262 /* no more pending commands */ 1263 del_timer(&udev->cmd_timer); 1264 1265 if (list_empty(&udev->qfull_queue)) { 1266 /* 1267 * no more pending or waiting commands so try to 1268 * reclaim blocks if needed. 1269 */ 1270 if (atomic_read(&global_db_count) > 1271 tcmu_global_max_blocks) 1272 schedule_delayed_work(&tcmu_unmap_work, 0); 1273 } 1274 } else if (udev->cmd_time_out) { 1275 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 1276 } 1277 1278 return handled; 1279 } 1280 1281 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) 1282 { 1283 struct se_cmd *se_cmd; 1284 1285 if (!time_after(jiffies, cmd->deadline)) 1286 return; 1287 1288 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1289 list_del_init(&cmd->queue_entry); 1290 se_cmd = cmd->se_cmd; 1291 se_cmd->priv = NULL; 1292 cmd->se_cmd = NULL; 1293 1294 pr_debug("Timing out inflight cmd %u on dev %s.\n", 1295 cmd->cmd_id, cmd->tcmu_dev->name); 1296 1297 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); 1298 } 1299 1300 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) 1301 { 1302 struct se_cmd *se_cmd; 1303 1304 if (!time_after(jiffies, cmd->deadline)) 1305 return; 1306 1307 pr_debug("Timing out queued cmd %p on dev %s.\n", 1308 cmd, cmd->tcmu_dev->name); 1309 1310 list_del_init(&cmd->queue_entry); 1311 se_cmd = cmd->se_cmd; 1312 tcmu_free_cmd(cmd); 1313 1314 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); 1315 } 1316 1317 static void tcmu_device_timedout(struct tcmu_dev *udev) 1318 { 1319 spin_lock(&timed_out_udevs_lock); 1320 if (list_empty(&udev->timedout_entry)) 1321 list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1322 spin_unlock(&timed_out_udevs_lock); 1323 1324 schedule_delayed_work(&tcmu_unmap_work, 0); 1325 } 1326 1327 static void tcmu_cmd_timedout(struct timer_list *t) 1328 { 1329 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 1330 1331 pr_debug("%s cmd timeout has expired\n", udev->name); 1332 tcmu_device_timedout(udev); 1333 } 1334 1335 static void tcmu_qfull_timedout(struct timer_list *t) 1336 { 1337 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 1338 1339 pr_debug("%s qfull timeout has expired\n", udev->name); 1340 tcmu_device_timedout(udev); 1341 } 1342 1343 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1344 { 1345 struct tcmu_hba *tcmu_hba; 1346 1347 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1348 if (!tcmu_hba) 1349 return -ENOMEM; 1350 1351 tcmu_hba->host_id = host_id; 1352 hba->hba_ptr = tcmu_hba; 1353 1354 return 0; 1355 } 1356 1357 static void tcmu_detach_hba(struct se_hba *hba) 1358 { 1359 kfree(hba->hba_ptr); 1360 hba->hba_ptr = NULL; 1361 } 1362 1363 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1364 { 1365 struct tcmu_dev *udev; 1366 1367 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1368 if (!udev) 1369 return NULL; 1370 kref_init(&udev->kref); 1371 1372 udev->name = kstrdup(name, GFP_KERNEL); 1373 if (!udev->name) { 1374 kfree(udev); 1375 return NULL; 1376 } 1377 1378 udev->hba = hba; 1379 udev->cmd_time_out = TCMU_TIME_OUT; 1380 udev->qfull_time_out = -1; 1381 1382 udev->max_blocks = DATA_BLOCK_BITS_DEF; 1383 mutex_init(&udev->cmdr_lock); 1384 1385 INIT_LIST_HEAD(&udev->node); 1386 INIT_LIST_HEAD(&udev->timedout_entry); 1387 INIT_LIST_HEAD(&udev->qfull_queue); 1388 INIT_LIST_HEAD(&udev->inflight_queue); 1389 idr_init(&udev->commands); 1390 1391 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1392 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 1393 1394 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); 1395 1396 return &udev->se_dev; 1397 } 1398 1399 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) 1400 { 1401 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1402 LIST_HEAD(cmds); 1403 sense_reason_t scsi_ret; 1404 int ret; 1405 1406 if (list_empty(&udev->qfull_queue)) 1407 return; 1408 1409 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1410 1411 list_splice_init(&udev->qfull_queue, &cmds); 1412 1413 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1414 list_del_init(&tcmu_cmd->queue_entry); 1415 1416 pr_debug("removing cmd %p on dev %s from queue\n", 1417 tcmu_cmd, udev->name); 1418 1419 if (fail) { 1420 /* 1421 * We were not able to even start the command, so 1422 * fail with busy to allow a retry in case runner 1423 * was only temporarily down. If the device is being 1424 * removed then LIO core will do the right thing and 1425 * fail the retry. 1426 */ 1427 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1428 tcmu_free_cmd(tcmu_cmd); 1429 continue; 1430 } 1431 1432 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1433 if (ret < 0) { 1434 pr_debug("cmd %p on dev %s failed with %u\n", 1435 tcmu_cmd, udev->name, scsi_ret); 1436 /* 1437 * Ignore scsi_ret for now. target_complete_cmd 1438 * drops it. 1439 */ 1440 target_complete_cmd(tcmu_cmd->se_cmd, 1441 SAM_STAT_CHECK_CONDITION); 1442 tcmu_free_cmd(tcmu_cmd); 1443 } else if (ret > 0) { 1444 pr_debug("ran out of space during cmdr queue run\n"); 1445 /* 1446 * cmd was requeued, so just put all cmds back in 1447 * the queue 1448 */ 1449 list_splice_tail(&cmds, &udev->qfull_queue); 1450 break; 1451 } 1452 } 1453 1454 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1455 } 1456 1457 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1458 { 1459 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1460 1461 mutex_lock(&udev->cmdr_lock); 1462 tcmu_handle_completions(udev); 1463 run_qfull_queue(udev, false); 1464 mutex_unlock(&udev->cmdr_lock); 1465 1466 return 0; 1467 } 1468 1469 /* 1470 * mmap code from uio.c. Copied here because we want to hook mmap() 1471 * and this stuff must come along. 1472 */ 1473 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1474 { 1475 struct tcmu_dev *udev = vma->vm_private_data; 1476 struct uio_info *info = &udev->uio_info; 1477 1478 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1479 if (info->mem[vma->vm_pgoff].size == 0) 1480 return -1; 1481 return (int)vma->vm_pgoff; 1482 } 1483 return -1; 1484 } 1485 1486 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1487 { 1488 struct page *page; 1489 1490 mutex_lock(&udev->cmdr_lock); 1491 page = tcmu_get_block_page(udev, dbi); 1492 if (likely(page)) { 1493 mutex_unlock(&udev->cmdr_lock); 1494 return page; 1495 } 1496 1497 /* 1498 * Userspace messed up and passed in a address not in the 1499 * data iov passed to it. 1500 */ 1501 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", 1502 dbi, udev->name); 1503 page = NULL; 1504 mutex_unlock(&udev->cmdr_lock); 1505 1506 return page; 1507 } 1508 1509 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) 1510 { 1511 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1512 struct uio_info *info = &udev->uio_info; 1513 struct page *page; 1514 unsigned long offset; 1515 void *addr; 1516 1517 int mi = tcmu_find_mem_index(vmf->vma); 1518 if (mi < 0) 1519 return VM_FAULT_SIGBUS; 1520 1521 /* 1522 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1523 * to use mem[N]. 1524 */ 1525 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1526 1527 if (offset < udev->data_off) { 1528 /* For the vmalloc()ed cmd area pages */ 1529 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1530 page = vmalloc_to_page(addr); 1531 } else { 1532 uint32_t dbi; 1533 1534 /* For the dynamically growing data area pages */ 1535 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1536 page = tcmu_try_get_block_page(udev, dbi); 1537 if (!page) 1538 return VM_FAULT_SIGBUS; 1539 } 1540 1541 get_page(page); 1542 vmf->page = page; 1543 return 0; 1544 } 1545 1546 static const struct vm_operations_struct tcmu_vm_ops = { 1547 .fault = tcmu_vma_fault, 1548 }; 1549 1550 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1551 { 1552 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1553 1554 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1555 vma->vm_ops = &tcmu_vm_ops; 1556 1557 vma->vm_private_data = udev; 1558 1559 /* Ensure the mmap is exactly the right size */ 1560 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) 1561 return -EINVAL; 1562 1563 return 0; 1564 } 1565 1566 static int tcmu_open(struct uio_info *info, struct inode *inode) 1567 { 1568 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1569 1570 /* O_EXCL not supported for char devs, so fake it? */ 1571 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1572 return -EBUSY; 1573 1574 udev->inode = inode; 1575 kref_get(&udev->kref); 1576 1577 pr_debug("open\n"); 1578 1579 return 0; 1580 } 1581 1582 static void tcmu_dev_call_rcu(struct rcu_head *p) 1583 { 1584 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1585 struct tcmu_dev *udev = TCMU_DEV(dev); 1586 1587 kfree(udev->uio_info.name); 1588 kfree(udev->name); 1589 kfree(udev); 1590 } 1591 1592 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1593 { 1594 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1595 kmem_cache_free(tcmu_cmd_cache, cmd); 1596 return 0; 1597 } 1598 return -EINVAL; 1599 } 1600 1601 static void tcmu_blocks_release(struct radix_tree_root *blocks, 1602 int start, int end) 1603 { 1604 int i; 1605 struct page *page; 1606 1607 for (i = start; i < end; i++) { 1608 page = radix_tree_delete(blocks, i); 1609 if (page) { 1610 __free_page(page); 1611 atomic_dec(&global_db_count); 1612 } 1613 } 1614 } 1615 1616 static void tcmu_dev_kref_release(struct kref *kref) 1617 { 1618 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1619 struct se_device *dev = &udev->se_dev; 1620 struct tcmu_cmd *cmd; 1621 bool all_expired = true; 1622 int i; 1623 1624 vfree(udev->mb_addr); 1625 udev->mb_addr = NULL; 1626 1627 spin_lock_bh(&timed_out_udevs_lock); 1628 if (!list_empty(&udev->timedout_entry)) 1629 list_del(&udev->timedout_entry); 1630 spin_unlock_bh(&timed_out_udevs_lock); 1631 1632 /* Upper layer should drain all requests before calling this */ 1633 mutex_lock(&udev->cmdr_lock); 1634 idr_for_each_entry(&udev->commands, cmd, i) { 1635 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1636 all_expired = false; 1637 } 1638 if (!list_empty(&udev->qfull_queue)) 1639 all_expired = false; 1640 idr_destroy(&udev->commands); 1641 WARN_ON(!all_expired); 1642 1643 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); 1644 bitmap_free(udev->data_bitmap); 1645 mutex_unlock(&udev->cmdr_lock); 1646 1647 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1648 } 1649 1650 static int tcmu_release(struct uio_info *info, struct inode *inode) 1651 { 1652 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1653 1654 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1655 1656 pr_debug("close\n"); 1657 /* release ref from open */ 1658 kref_put(&udev->kref, tcmu_dev_kref_release); 1659 return 0; 1660 } 1661 1662 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1663 { 1664 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1665 1666 if (!tcmu_kern_cmd_reply_supported) 1667 return 0; 1668 1669 if (udev->nl_reply_supported <= 0) 1670 return 0; 1671 1672 mutex_lock(&tcmu_nl_cmd_mutex); 1673 1674 if (tcmu_netlink_blocked) { 1675 mutex_unlock(&tcmu_nl_cmd_mutex); 1676 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, 1677 udev->name); 1678 return -EAGAIN; 1679 } 1680 1681 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1682 mutex_unlock(&tcmu_nl_cmd_mutex); 1683 pr_warn("netlink cmd %d already executing on %s\n", 1684 nl_cmd->cmd, udev->name); 1685 return -EBUSY; 1686 } 1687 1688 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1689 nl_cmd->cmd = cmd; 1690 nl_cmd->udev = udev; 1691 init_completion(&nl_cmd->complete); 1692 INIT_LIST_HEAD(&nl_cmd->nl_list); 1693 1694 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); 1695 1696 mutex_unlock(&tcmu_nl_cmd_mutex); 1697 return 0; 1698 } 1699 1700 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) 1701 { 1702 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1703 1704 if (!tcmu_kern_cmd_reply_supported) 1705 return; 1706 1707 if (udev->nl_reply_supported <= 0) 1708 return; 1709 1710 mutex_lock(&tcmu_nl_cmd_mutex); 1711 1712 list_del(&nl_cmd->nl_list); 1713 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1714 1715 mutex_unlock(&tcmu_nl_cmd_mutex); 1716 } 1717 1718 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1719 { 1720 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1721 int ret; 1722 1723 if (!tcmu_kern_cmd_reply_supported) 1724 return 0; 1725 1726 if (udev->nl_reply_supported <= 0) 1727 return 0; 1728 1729 pr_debug("sleeping for nl reply\n"); 1730 wait_for_completion(&nl_cmd->complete); 1731 1732 mutex_lock(&tcmu_nl_cmd_mutex); 1733 nl_cmd->cmd = TCMU_CMD_UNSPEC; 1734 ret = nl_cmd->status; 1735 mutex_unlock(&tcmu_nl_cmd_mutex); 1736 1737 return ret; 1738 } 1739 1740 static int tcmu_netlink_event_init(struct tcmu_dev *udev, 1741 enum tcmu_genl_cmd cmd, 1742 struct sk_buff **buf, void **hdr) 1743 { 1744 struct sk_buff *skb; 1745 void *msg_header; 1746 int ret = -ENOMEM; 1747 1748 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1749 if (!skb) 1750 return ret; 1751 1752 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 1753 if (!msg_header) 1754 goto free_skb; 1755 1756 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 1757 if (ret < 0) 1758 goto free_skb; 1759 1760 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1761 if (ret < 0) 1762 goto free_skb; 1763 1764 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 1765 if (ret < 0) 1766 goto free_skb; 1767 1768 *buf = skb; 1769 *hdr = msg_header; 1770 return ret; 1771 1772 free_skb: 1773 nlmsg_free(skb); 1774 return ret; 1775 } 1776 1777 static int tcmu_netlink_event_send(struct tcmu_dev *udev, 1778 enum tcmu_genl_cmd cmd, 1779 struct sk_buff *skb, void *msg_header) 1780 { 1781 int ret; 1782 1783 genlmsg_end(skb, msg_header); 1784 1785 ret = tcmu_init_genl_cmd_reply(udev, cmd); 1786 if (ret) { 1787 nlmsg_free(skb); 1788 return ret; 1789 } 1790 1791 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1792 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1793 1794 /* Wait during an add as the listener may not be up yet */ 1795 if (ret == 0 || 1796 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) 1797 return tcmu_wait_genl_cmd_reply(udev); 1798 else 1799 tcmu_destroy_genl_cmd_reply(udev); 1800 1801 return ret; 1802 } 1803 1804 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) 1805 { 1806 struct sk_buff *skb = NULL; 1807 void *msg_header = NULL; 1808 int ret = 0; 1809 1810 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, 1811 &msg_header); 1812 if (ret < 0) 1813 return ret; 1814 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, 1815 msg_header); 1816 } 1817 1818 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) 1819 { 1820 struct sk_buff *skb = NULL; 1821 void *msg_header = NULL; 1822 int ret = 0; 1823 1824 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, 1825 &skb, &msg_header); 1826 if (ret < 0) 1827 return ret; 1828 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, 1829 skb, msg_header); 1830 } 1831 1832 static int tcmu_update_uio_info(struct tcmu_dev *udev) 1833 { 1834 struct tcmu_hba *hba = udev->hba->hba_ptr; 1835 struct uio_info *info; 1836 char *str; 1837 1838 info = &udev->uio_info; 1839 1840 if (udev->dev_config[0]) 1841 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, 1842 udev->name, udev->dev_config); 1843 else 1844 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, 1845 udev->name); 1846 if (!str) 1847 return -ENOMEM; 1848 1849 /* If the old string exists, free it */ 1850 kfree(info->name); 1851 info->name = str; 1852 1853 return 0; 1854 } 1855 1856 static int tcmu_configure_device(struct se_device *dev) 1857 { 1858 struct tcmu_dev *udev = TCMU_DEV(dev); 1859 struct uio_info *info; 1860 struct tcmu_mailbox *mb; 1861 int ret = 0; 1862 1863 ret = tcmu_update_uio_info(udev); 1864 if (ret) 1865 return ret; 1866 1867 info = &udev->uio_info; 1868 1869 mutex_lock(&udev->cmdr_lock); 1870 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); 1871 mutex_unlock(&udev->cmdr_lock); 1872 if (!udev->data_bitmap) { 1873 ret = -ENOMEM; 1874 goto err_bitmap_alloc; 1875 } 1876 1877 udev->mb_addr = vzalloc(CMDR_SIZE); 1878 if (!udev->mb_addr) { 1879 ret = -ENOMEM; 1880 goto err_vzalloc; 1881 } 1882 1883 /* mailbox fits in first part of CMDR space */ 1884 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 1885 udev->data_off = CMDR_SIZE; 1886 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; 1887 udev->dbi_thresh = 0; /* Default in Idle state */ 1888 1889 /* Initialise the mailbox of the ring buffer */ 1890 mb = udev->mb_addr; 1891 mb->version = TCMU_MAILBOX_VERSION; 1892 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN; 1893 mb->cmdr_off = CMDR_OFF; 1894 mb->cmdr_size = udev->cmdr_size; 1895 1896 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 1897 WARN_ON(udev->data_size % PAGE_SIZE); 1898 WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 1899 1900 info->version = __stringify(TCMU_MAILBOX_VERSION); 1901 1902 info->mem[0].name = "tcm-user command & data buffer"; 1903 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 1904 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; 1905 info->mem[0].memtype = UIO_MEM_NONE; 1906 1907 info->irqcontrol = tcmu_irqcontrol; 1908 info->irq = UIO_IRQ_CUSTOM; 1909 1910 info->mmap = tcmu_mmap; 1911 info->open = tcmu_open; 1912 info->release = tcmu_release; 1913 1914 ret = uio_register_device(tcmu_root_device, info); 1915 if (ret) 1916 goto err_register; 1917 1918 /* User can set hw_block_size before enable the device */ 1919 if (dev->dev_attrib.hw_block_size == 0) 1920 dev->dev_attrib.hw_block_size = 512; 1921 /* Other attributes can be configured in userspace */ 1922 if (!dev->dev_attrib.hw_max_sectors) 1923 dev->dev_attrib.hw_max_sectors = 128; 1924 if (!dev->dev_attrib.emulate_write_cache) 1925 dev->dev_attrib.emulate_write_cache = 0; 1926 dev->dev_attrib.hw_queue_depth = 128; 1927 1928 /* If user didn't explicitly disable netlink reply support, use 1929 * module scope setting. 1930 */ 1931 if (udev->nl_reply_supported >= 0) 1932 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 1933 1934 /* 1935 * Get a ref incase userspace does a close on the uio device before 1936 * LIO has initiated tcmu_free_device. 1937 */ 1938 kref_get(&udev->kref); 1939 1940 ret = tcmu_send_dev_add_event(udev); 1941 if (ret) 1942 goto err_netlink; 1943 1944 mutex_lock(&root_udev_mutex); 1945 list_add(&udev->node, &root_udev); 1946 mutex_unlock(&root_udev_mutex); 1947 1948 return 0; 1949 1950 err_netlink: 1951 kref_put(&udev->kref, tcmu_dev_kref_release); 1952 uio_unregister_device(&udev->uio_info); 1953 err_register: 1954 vfree(udev->mb_addr); 1955 udev->mb_addr = NULL; 1956 err_vzalloc: 1957 bitmap_free(udev->data_bitmap); 1958 udev->data_bitmap = NULL; 1959 err_bitmap_alloc: 1960 kfree(info->name); 1961 info->name = NULL; 1962 1963 return ret; 1964 } 1965 1966 static void tcmu_free_device(struct se_device *dev) 1967 { 1968 struct tcmu_dev *udev = TCMU_DEV(dev); 1969 1970 /* release ref from init */ 1971 kref_put(&udev->kref, tcmu_dev_kref_release); 1972 } 1973 1974 static void tcmu_destroy_device(struct se_device *dev) 1975 { 1976 struct tcmu_dev *udev = TCMU_DEV(dev); 1977 1978 del_timer_sync(&udev->cmd_timer); 1979 del_timer_sync(&udev->qfull_timer); 1980 1981 mutex_lock(&root_udev_mutex); 1982 list_del(&udev->node); 1983 mutex_unlock(&root_udev_mutex); 1984 1985 tcmu_send_dev_remove_event(udev); 1986 1987 uio_unregister_device(&udev->uio_info); 1988 1989 /* release ref from configure */ 1990 kref_put(&udev->kref, tcmu_dev_kref_release); 1991 } 1992 1993 static void tcmu_unblock_dev(struct tcmu_dev *udev) 1994 { 1995 mutex_lock(&udev->cmdr_lock); 1996 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 1997 mutex_unlock(&udev->cmdr_lock); 1998 } 1999 2000 static void tcmu_block_dev(struct tcmu_dev *udev) 2001 { 2002 mutex_lock(&udev->cmdr_lock); 2003 2004 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2005 goto unlock; 2006 2007 /* complete IO that has executed successfully */ 2008 tcmu_handle_completions(udev); 2009 /* fail IO waiting to be queued */ 2010 run_qfull_queue(udev, true); 2011 2012 unlock: 2013 mutex_unlock(&udev->cmdr_lock); 2014 } 2015 2016 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 2017 { 2018 struct tcmu_mailbox *mb; 2019 struct tcmu_cmd *cmd; 2020 int i; 2021 2022 mutex_lock(&udev->cmdr_lock); 2023 2024 idr_for_each_entry(&udev->commands, cmd, i) { 2025 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2026 cmd->cmd_id, udev->name, 2027 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); 2028 2029 idr_remove(&udev->commands, i); 2030 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2031 WARN_ON(!cmd->se_cmd); 2032 list_del_init(&cmd->queue_entry); 2033 if (err_level == 1) { 2034 /* 2035 * Userspace was not able to start the 2036 * command or it is retryable. 2037 */ 2038 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 2039 } else { 2040 /* hard failure */ 2041 target_complete_cmd(cmd->se_cmd, 2042 SAM_STAT_CHECK_CONDITION); 2043 } 2044 } 2045 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 2046 tcmu_free_cmd(cmd); 2047 } 2048 2049 mb = udev->mb_addr; 2050 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2051 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 2052 mb->cmd_tail, mb->cmd_head); 2053 2054 udev->cmdr_last_cleaned = 0; 2055 mb->cmd_tail = 0; 2056 mb->cmd_head = 0; 2057 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2058 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2059 2060 del_timer(&udev->cmd_timer); 2061 2062 run_qfull_queue(udev, false); 2063 2064 mutex_unlock(&udev->cmdr_lock); 2065 } 2066 2067 enum { 2068 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 2069 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, 2070 }; 2071 2072 static match_table_t tokens = { 2073 {Opt_dev_config, "dev_config=%s"}, 2074 {Opt_dev_size, "dev_size=%s"}, 2075 {Opt_hw_block_size, "hw_block_size=%d"}, 2076 {Opt_hw_max_sectors, "hw_max_sectors=%d"}, 2077 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 2078 {Opt_max_data_area_mb, "max_data_area_mb=%d"}, 2079 {Opt_err, NULL} 2080 }; 2081 2082 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 2083 { 2084 int val, ret; 2085 2086 ret = match_int(arg, &val); 2087 if (ret < 0) { 2088 pr_err("match_int() failed for dev attrib. Error %d.\n", 2089 ret); 2090 return ret; 2091 } 2092 2093 if (val <= 0) { 2094 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", 2095 val); 2096 return -EINVAL; 2097 } 2098 *dev_attrib = val; 2099 return 0; 2100 } 2101 2102 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) 2103 { 2104 int val, ret; 2105 2106 ret = match_int(arg, &val); 2107 if (ret < 0) { 2108 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", 2109 ret); 2110 return ret; 2111 } 2112 2113 if (val <= 0) { 2114 pr_err("Invalid max_data_area %d.\n", val); 2115 return -EINVAL; 2116 } 2117 2118 mutex_lock(&udev->cmdr_lock); 2119 if (udev->data_bitmap) { 2120 pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); 2121 ret = -EINVAL; 2122 goto unlock; 2123 } 2124 2125 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val); 2126 if (udev->max_blocks > tcmu_global_max_blocks) { 2127 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 2128 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 2129 udev->max_blocks = tcmu_global_max_blocks; 2130 } 2131 2132 unlock: 2133 mutex_unlock(&udev->cmdr_lock); 2134 return ret; 2135 } 2136 2137 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 2138 const char *page, ssize_t count) 2139 { 2140 struct tcmu_dev *udev = TCMU_DEV(dev); 2141 char *orig, *ptr, *opts; 2142 substring_t args[MAX_OPT_ARGS]; 2143 int ret = 0, token; 2144 2145 opts = kstrdup(page, GFP_KERNEL); 2146 if (!opts) 2147 return -ENOMEM; 2148 2149 orig = opts; 2150 2151 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2152 if (!*ptr) 2153 continue; 2154 2155 token = match_token(ptr, tokens, args); 2156 switch (token) { 2157 case Opt_dev_config: 2158 if (match_strlcpy(udev->dev_config, &args[0], 2159 TCMU_CONFIG_LEN) == 0) { 2160 ret = -EINVAL; 2161 break; 2162 } 2163 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 2164 break; 2165 case Opt_dev_size: 2166 ret = match_u64(&args[0], &udev->dev_size); 2167 if (ret < 0) 2168 pr_err("match_u64() failed for dev_size=. Error %d.\n", 2169 ret); 2170 break; 2171 case Opt_hw_block_size: 2172 ret = tcmu_set_dev_attrib(&args[0], 2173 &(dev->dev_attrib.hw_block_size)); 2174 break; 2175 case Opt_hw_max_sectors: 2176 ret = tcmu_set_dev_attrib(&args[0], 2177 &(dev->dev_attrib.hw_max_sectors)); 2178 break; 2179 case Opt_nl_reply_supported: 2180 ret = match_int(&args[0], &udev->nl_reply_supported); 2181 if (ret < 0) 2182 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", 2183 ret); 2184 break; 2185 case Opt_max_data_area_mb: 2186 ret = tcmu_set_max_blocks_param(udev, &args[0]); 2187 break; 2188 default: 2189 break; 2190 } 2191 2192 if (ret) 2193 break; 2194 } 2195 2196 kfree(orig); 2197 return (!ret) ? count : ret; 2198 } 2199 2200 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 2201 { 2202 struct tcmu_dev *udev = TCMU_DEV(dev); 2203 ssize_t bl = 0; 2204 2205 bl = sprintf(b + bl, "Config: %s ", 2206 udev->dev_config[0] ? udev->dev_config : "NULL"); 2207 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); 2208 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", 2209 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2210 2211 return bl; 2212 } 2213 2214 static sector_t tcmu_get_blocks(struct se_device *dev) 2215 { 2216 struct tcmu_dev *udev = TCMU_DEV(dev); 2217 2218 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 2219 dev->dev_attrib.block_size); 2220 } 2221 2222 static sense_reason_t 2223 tcmu_parse_cdb(struct se_cmd *cmd) 2224 { 2225 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 2226 } 2227 2228 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 2229 { 2230 struct se_dev_attrib *da = container_of(to_config_group(item), 2231 struct se_dev_attrib, da_group); 2232 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2233 2234 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 2235 } 2236 2237 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 2238 size_t count) 2239 { 2240 struct se_dev_attrib *da = container_of(to_config_group(item), 2241 struct se_dev_attrib, da_group); 2242 struct tcmu_dev *udev = container_of(da->da_dev, 2243 struct tcmu_dev, se_dev); 2244 u32 val; 2245 int ret; 2246 2247 if (da->da_dev->export_count) { 2248 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 2249 return -EINVAL; 2250 } 2251 2252 ret = kstrtou32(page, 0, &val); 2253 if (ret < 0) 2254 return ret; 2255 2256 udev->cmd_time_out = val * MSEC_PER_SEC; 2257 return count; 2258 } 2259 CONFIGFS_ATTR(tcmu_, cmd_time_out); 2260 2261 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 2262 { 2263 struct se_dev_attrib *da = container_of(to_config_group(item), 2264 struct se_dev_attrib, da_group); 2265 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2266 2267 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 2268 udev->qfull_time_out : 2269 udev->qfull_time_out / MSEC_PER_SEC); 2270 } 2271 2272 static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 2273 const char *page, size_t count) 2274 { 2275 struct se_dev_attrib *da = container_of(to_config_group(item), 2276 struct se_dev_attrib, da_group); 2277 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2278 s32 val; 2279 int ret; 2280 2281 ret = kstrtos32(page, 0, &val); 2282 if (ret < 0) 2283 return ret; 2284 2285 if (val >= 0) { 2286 udev->qfull_time_out = val * MSEC_PER_SEC; 2287 } else if (val == -1) { 2288 udev->qfull_time_out = val; 2289 } else { 2290 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2291 return -EINVAL; 2292 } 2293 return count; 2294 } 2295 CONFIGFS_ATTR(tcmu_, qfull_time_out); 2296 2297 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 2298 { 2299 struct se_dev_attrib *da = container_of(to_config_group(item), 2300 struct se_dev_attrib, da_group); 2301 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2302 2303 return snprintf(page, PAGE_SIZE, "%u\n", 2304 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2305 } 2306 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 2307 2308 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2309 { 2310 struct se_dev_attrib *da = container_of(to_config_group(item), 2311 struct se_dev_attrib, da_group); 2312 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2313 2314 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2315 } 2316 2317 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, 2318 const char *reconfig_data) 2319 { 2320 struct sk_buff *skb = NULL; 2321 void *msg_header = NULL; 2322 int ret = 0; 2323 2324 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2325 &skb, &msg_header); 2326 if (ret < 0) 2327 return ret; 2328 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); 2329 if (ret < 0) { 2330 nlmsg_free(skb); 2331 return ret; 2332 } 2333 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2334 skb, msg_header); 2335 } 2336 2337 2338 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2339 size_t count) 2340 { 2341 struct se_dev_attrib *da = container_of(to_config_group(item), 2342 struct se_dev_attrib, da_group); 2343 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2344 int ret, len; 2345 2346 len = strlen(page); 2347 if (!len || len > TCMU_CONFIG_LEN - 1) 2348 return -EINVAL; 2349 2350 /* Check if device has been configured before */ 2351 if (target_dev_configured(&udev->se_dev)) { 2352 ret = tcmu_send_dev_config_event(udev, page); 2353 if (ret) { 2354 pr_err("Unable to reconfigure device\n"); 2355 return ret; 2356 } 2357 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2358 2359 ret = tcmu_update_uio_info(udev); 2360 if (ret) 2361 return ret; 2362 return count; 2363 } 2364 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2365 2366 return count; 2367 } 2368 CONFIGFS_ATTR(tcmu_, dev_config); 2369 2370 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 2371 { 2372 struct se_dev_attrib *da = container_of(to_config_group(item), 2373 struct se_dev_attrib, da_group); 2374 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2375 2376 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); 2377 } 2378 2379 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) 2380 { 2381 struct sk_buff *skb = NULL; 2382 void *msg_header = NULL; 2383 int ret = 0; 2384 2385 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2386 &skb, &msg_header); 2387 if (ret < 0) 2388 return ret; 2389 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, 2390 size, TCMU_ATTR_PAD); 2391 if (ret < 0) { 2392 nlmsg_free(skb); 2393 return ret; 2394 } 2395 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2396 skb, msg_header); 2397 } 2398 2399 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2400 size_t count) 2401 { 2402 struct se_dev_attrib *da = container_of(to_config_group(item), 2403 struct se_dev_attrib, da_group); 2404 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2405 u64 val; 2406 int ret; 2407 2408 ret = kstrtou64(page, 0, &val); 2409 if (ret < 0) 2410 return ret; 2411 2412 /* Check if device has been configured before */ 2413 if (target_dev_configured(&udev->se_dev)) { 2414 ret = tcmu_send_dev_size_event(udev, val); 2415 if (ret) { 2416 pr_err("Unable to reconfigure device\n"); 2417 return ret; 2418 } 2419 } 2420 udev->dev_size = val; 2421 return count; 2422 } 2423 CONFIGFS_ATTR(tcmu_, dev_size); 2424 2425 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 2426 char *page) 2427 { 2428 struct se_dev_attrib *da = container_of(to_config_group(item), 2429 struct se_dev_attrib, da_group); 2430 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2431 2432 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 2433 } 2434 2435 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 2436 const char *page, size_t count) 2437 { 2438 struct se_dev_attrib *da = container_of(to_config_group(item), 2439 struct se_dev_attrib, da_group); 2440 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2441 s8 val; 2442 int ret; 2443 2444 ret = kstrtos8(page, 0, &val); 2445 if (ret < 0) 2446 return ret; 2447 2448 udev->nl_reply_supported = val; 2449 return count; 2450 } 2451 CONFIGFS_ATTR(tcmu_, nl_reply_supported); 2452 2453 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 2454 char *page) 2455 { 2456 struct se_dev_attrib *da = container_of(to_config_group(item), 2457 struct se_dev_attrib, da_group); 2458 2459 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 2460 } 2461 2462 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) 2463 { 2464 struct sk_buff *skb = NULL; 2465 void *msg_header = NULL; 2466 int ret = 0; 2467 2468 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2469 &skb, &msg_header); 2470 if (ret < 0) 2471 return ret; 2472 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); 2473 if (ret < 0) { 2474 nlmsg_free(skb); 2475 return ret; 2476 } 2477 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2478 skb, msg_header); 2479 } 2480 2481 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 2482 const char *page, size_t count) 2483 { 2484 struct se_dev_attrib *da = container_of(to_config_group(item), 2485 struct se_dev_attrib, da_group); 2486 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2487 u8 val; 2488 int ret; 2489 2490 ret = kstrtou8(page, 0, &val); 2491 if (ret < 0) 2492 return ret; 2493 2494 /* Check if device has been configured before */ 2495 if (target_dev_configured(&udev->se_dev)) { 2496 ret = tcmu_send_emulate_write_cache(udev, val); 2497 if (ret) { 2498 pr_err("Unable to reconfigure device\n"); 2499 return ret; 2500 } 2501 } 2502 2503 da->emulate_write_cache = val; 2504 return count; 2505 } 2506 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 2507 2508 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 2509 { 2510 struct se_device *se_dev = container_of(to_config_group(item), 2511 struct se_device, 2512 dev_action_group); 2513 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2514 2515 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2516 return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 2517 else 2518 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 2519 } 2520 2521 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 2522 size_t count) 2523 { 2524 struct se_device *se_dev = container_of(to_config_group(item), 2525 struct se_device, 2526 dev_action_group); 2527 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2528 u8 val; 2529 int ret; 2530 2531 if (!target_dev_configured(&udev->se_dev)) { 2532 pr_err("Device is not configured.\n"); 2533 return -EINVAL; 2534 } 2535 2536 ret = kstrtou8(page, 0, &val); 2537 if (ret < 0) 2538 return ret; 2539 2540 if (val > 1) { 2541 pr_err("Invalid block value %d\n", val); 2542 return -EINVAL; 2543 } 2544 2545 if (!val) 2546 tcmu_unblock_dev(udev); 2547 else 2548 tcmu_block_dev(udev); 2549 return count; 2550 } 2551 CONFIGFS_ATTR(tcmu_, block_dev); 2552 2553 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 2554 size_t count) 2555 { 2556 struct se_device *se_dev = container_of(to_config_group(item), 2557 struct se_device, 2558 dev_action_group); 2559 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2560 u8 val; 2561 int ret; 2562 2563 if (!target_dev_configured(&udev->se_dev)) { 2564 pr_err("Device is not configured.\n"); 2565 return -EINVAL; 2566 } 2567 2568 ret = kstrtou8(page, 0, &val); 2569 if (ret < 0) 2570 return ret; 2571 2572 if (val != 1 && val != 2) { 2573 pr_err("Invalid reset ring value %d\n", val); 2574 return -EINVAL; 2575 } 2576 2577 tcmu_reset_ring(udev, val); 2578 return count; 2579 } 2580 CONFIGFS_ATTR_WO(tcmu_, reset_ring); 2581 2582 static struct configfs_attribute *tcmu_attrib_attrs[] = { 2583 &tcmu_attr_cmd_time_out, 2584 &tcmu_attr_qfull_time_out, 2585 &tcmu_attr_max_data_area_mb, 2586 &tcmu_attr_dev_config, 2587 &tcmu_attr_dev_size, 2588 &tcmu_attr_emulate_write_cache, 2589 &tcmu_attr_nl_reply_supported, 2590 NULL, 2591 }; 2592 2593 static struct configfs_attribute **tcmu_attrs; 2594 2595 static struct configfs_attribute *tcmu_action_attrs[] = { 2596 &tcmu_attr_block_dev, 2597 &tcmu_attr_reset_ring, 2598 NULL, 2599 }; 2600 2601 static struct target_backend_ops tcmu_ops = { 2602 .name = "user", 2603 .owner = THIS_MODULE, 2604 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH, 2605 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR | 2606 TRANSPORT_FLAG_PASSTHROUGH_ALUA, 2607 .attach_hba = tcmu_attach_hba, 2608 .detach_hba = tcmu_detach_hba, 2609 .alloc_device = tcmu_alloc_device, 2610 .configure_device = tcmu_configure_device, 2611 .destroy_device = tcmu_destroy_device, 2612 .free_device = tcmu_free_device, 2613 .parse_cdb = tcmu_parse_cdb, 2614 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 2615 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 2616 .get_device_type = sbc_get_device_type, 2617 .get_blocks = tcmu_get_blocks, 2618 .tb_dev_action_attrs = tcmu_action_attrs, 2619 }; 2620 2621 static void find_free_blocks(void) 2622 { 2623 struct tcmu_dev *udev; 2624 loff_t off; 2625 u32 start, end, block, total_freed = 0; 2626 2627 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) 2628 return; 2629 2630 mutex_lock(&root_udev_mutex); 2631 list_for_each_entry(udev, &root_udev, node) { 2632 mutex_lock(&udev->cmdr_lock); 2633 2634 if (!target_dev_configured(&udev->se_dev)) { 2635 mutex_unlock(&udev->cmdr_lock); 2636 continue; 2637 } 2638 2639 /* Try to complete the finished commands first */ 2640 tcmu_handle_completions(udev); 2641 2642 /* Skip the udevs in idle */ 2643 if (!udev->dbi_thresh) { 2644 mutex_unlock(&udev->cmdr_lock); 2645 continue; 2646 } 2647 2648 end = udev->dbi_max + 1; 2649 block = find_last_bit(udev->data_bitmap, end); 2650 if (block == udev->dbi_max) { 2651 /* 2652 * The last bit is dbi_max, so it is not possible 2653 * reclaim any blocks. 2654 */ 2655 mutex_unlock(&udev->cmdr_lock); 2656 continue; 2657 } else if (block == end) { 2658 /* The current udev will goto idle state */ 2659 udev->dbi_thresh = start = 0; 2660 udev->dbi_max = 0; 2661 } else { 2662 udev->dbi_thresh = start = block + 1; 2663 udev->dbi_max = block; 2664 } 2665 2666 /* Here will truncate the data area from off */ 2667 off = udev->data_off + start * DATA_BLOCK_SIZE; 2668 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2669 2670 /* Release the block pages */ 2671 tcmu_blocks_release(&udev->data_blocks, start, end); 2672 mutex_unlock(&udev->cmdr_lock); 2673 2674 total_freed += end - start; 2675 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, 2676 total_freed, udev->name); 2677 } 2678 mutex_unlock(&root_udev_mutex); 2679 2680 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 2681 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 2682 } 2683 2684 static void check_timedout_devices(void) 2685 { 2686 struct tcmu_dev *udev, *tmp_dev; 2687 struct tcmu_cmd *cmd, *tmp_cmd; 2688 LIST_HEAD(devs); 2689 2690 spin_lock_bh(&timed_out_udevs_lock); 2691 list_splice_init(&timed_out_udevs, &devs); 2692 2693 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 2694 list_del_init(&udev->timedout_entry); 2695 spin_unlock_bh(&timed_out_udevs_lock); 2696 2697 mutex_lock(&udev->cmdr_lock); 2698 2699 /* 2700 * If cmd_time_out is disabled but qfull is set deadline 2701 * will only reflect the qfull timeout. Ignore it. 2702 */ 2703 if (udev->cmd_time_out) { 2704 list_for_each_entry_safe(cmd, tmp_cmd, 2705 &udev->inflight_queue, 2706 queue_entry) { 2707 tcmu_check_expired_ring_cmd(cmd); 2708 } 2709 tcmu_set_next_deadline(&udev->inflight_queue, 2710 &udev->cmd_timer); 2711 } 2712 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, 2713 queue_entry) { 2714 tcmu_check_expired_queue_cmd(cmd); 2715 } 2716 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 2717 2718 mutex_unlock(&udev->cmdr_lock); 2719 2720 spin_lock_bh(&timed_out_udevs_lock); 2721 } 2722 2723 spin_unlock_bh(&timed_out_udevs_lock); 2724 } 2725 2726 static void tcmu_unmap_work_fn(struct work_struct *work) 2727 { 2728 check_timedout_devices(); 2729 find_free_blocks(); 2730 } 2731 2732 static int __init tcmu_module_init(void) 2733 { 2734 int ret, i, k, len = 0; 2735 2736 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 2737 2738 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 2739 2740 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 2741 sizeof(struct tcmu_cmd), 2742 __alignof__(struct tcmu_cmd), 2743 0, NULL); 2744 if (!tcmu_cmd_cache) 2745 return -ENOMEM; 2746 2747 tcmu_root_device = root_device_register("tcm_user"); 2748 if (IS_ERR(tcmu_root_device)) { 2749 ret = PTR_ERR(tcmu_root_device); 2750 goto out_free_cache; 2751 } 2752 2753 ret = genl_register_family(&tcmu_genl_family); 2754 if (ret < 0) { 2755 goto out_unreg_device; 2756 } 2757 2758 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 2759 len += sizeof(struct configfs_attribute *); 2760 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++) 2761 len += sizeof(struct configfs_attribute *); 2762 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) 2763 len += sizeof(struct configfs_attribute *); 2764 len += sizeof(struct configfs_attribute *); 2765 2766 tcmu_attrs = kzalloc(len, GFP_KERNEL); 2767 if (!tcmu_attrs) { 2768 ret = -ENOMEM; 2769 goto out_unreg_genl; 2770 } 2771 2772 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 2773 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 2774 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++) 2775 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k]; 2776 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) 2777 tcmu_attrs[i++] = tcmu_attrib_attrs[k]; 2778 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 2779 2780 ret = transport_backend_register(&tcmu_ops); 2781 if (ret) 2782 goto out_attrs; 2783 2784 return 0; 2785 2786 out_attrs: 2787 kfree(tcmu_attrs); 2788 out_unreg_genl: 2789 genl_unregister_family(&tcmu_genl_family); 2790 out_unreg_device: 2791 root_device_unregister(tcmu_root_device); 2792 out_free_cache: 2793 kmem_cache_destroy(tcmu_cmd_cache); 2794 2795 return ret; 2796 } 2797 2798 static void __exit tcmu_module_exit(void) 2799 { 2800 cancel_delayed_work_sync(&tcmu_unmap_work); 2801 target_backend_unregister(&tcmu_ops); 2802 kfree(tcmu_attrs); 2803 genl_unregister_family(&tcmu_genl_family); 2804 root_device_unregister(tcmu_root_device); 2805 kmem_cache_destroy(tcmu_cmd_cache); 2806 } 2807 2808 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 2809 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 2810 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 2811 MODULE_LICENSE("GPL"); 2812 2813 module_init(tcmu_module_init); 2814 module_exit(tcmu_module_exit); 2815