1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 4 * Copyright (C) 2014 Red Hat, Inc. 5 * Copyright (C) 2015 Arrikto, Inc. 6 * Copyright (C) 2017 Chinamobile, Inc. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/idr.h> 12 #include <linux/kernel.h> 13 #include <linux/timer.h> 14 #include <linux/parser.h> 15 #include <linux/vmalloc.h> 16 #include <linux/uio_driver.h> 17 #include <linux/radix-tree.h> 18 #include <linux/stringify.h> 19 #include <linux/bitops.h> 20 #include <linux/highmem.h> 21 #include <linux/configfs.h> 22 #include <linux/mutex.h> 23 #include <linux/workqueue.h> 24 #include <net/genetlink.h> 25 #include <scsi/scsi_common.h> 26 #include <scsi/scsi_proto.h> 27 #include <target/target_core_base.h> 28 #include <target/target_core_fabric.h> 29 #include <target/target_core_backend.h> 30 31 #include <linux/target_core_user.h> 32 33 /** 34 * DOC: Userspace I/O 35 * Userspace I/O 36 * ------------- 37 * 38 * Define a shared-memory interface for LIO to pass SCSI commands and 39 * data to userspace for processing. This is to allow backends that 40 * are too complex for in-kernel support to be possible. 41 * 42 * It uses the UIO framework to do a lot of the device-creation and 43 * introspection work for us. 44 * 45 * See the .h file for how the ring is laid out. Note that while the 46 * command ring is defined, the particulars of the data area are 47 * not. Offset values in the command entry point to other locations 48 * internal to the mmap-ed area. There is separate space outside the 49 * command ring for data buffers. This leaves maximum flexibility for 50 * moving buffer allocations, or even page flipping or other 51 * allocation techniques, without altering the command ring layout. 52 * 53 * SECURITY: 54 * The user process must be assumed to be malicious. There's no way to 55 * prevent it breaking the command ring protocol if it wants, but in 56 * order to prevent other issues we must only ever read *data* from 57 * the shared memory area, not offsets or sizes. This applies to 58 * command ring entries as well as the mailbox. Extra code needed for 59 * this may have a 'UAM' comment. 60 */ 61 62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 63 64 /* For cmd area, the size is fixed 8MB */ 65 #define CMDR_SIZE (8 * 1024 * 1024) 66 67 /* 68 * For data area, the block size is PAGE_SIZE and 69 * the total size is 256K * PAGE_SIZE. 70 */ 71 #define DATA_BLOCK_SIZE PAGE_SIZE 72 #define DATA_BLOCK_SHIFT PAGE_SHIFT 73 #define DATA_BLOCK_BITS_DEF (256 * 1024) 74 75 #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) 76 #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) 77 78 /* 79 * Default number of global data blocks(512K * PAGE_SIZE) 80 * when the unmap thread will be started. 81 */ 82 #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) 83 84 static u8 tcmu_kern_cmd_reply_supported; 85 static u8 tcmu_netlink_blocked; 86 87 static struct device *tcmu_root_device; 88 89 struct tcmu_hba { 90 u32 host_id; 91 }; 92 93 #define TCMU_CONFIG_LEN 256 94 95 static DEFINE_MUTEX(tcmu_nl_cmd_mutex); 96 static LIST_HEAD(tcmu_nl_cmd_list); 97 98 struct tcmu_dev; 99 100 struct tcmu_nl_cmd { 101 /* wake up thread waiting for reply */ 102 struct completion complete; 103 struct list_head nl_list; 104 struct tcmu_dev *udev; 105 int cmd; 106 int status; 107 }; 108 109 struct tcmu_dev { 110 struct list_head node; 111 struct kref kref; 112 113 struct se_device se_dev; 114 115 char *name; 116 struct se_hba *hba; 117 118 #define TCMU_DEV_BIT_OPEN 0 119 #define TCMU_DEV_BIT_BROKEN 1 120 #define TCMU_DEV_BIT_BLOCKED 2 121 unsigned long flags; 122 123 struct uio_info uio_info; 124 125 struct inode *inode; 126 127 struct tcmu_mailbox *mb_addr; 128 uint64_t dev_size; 129 u32 cmdr_size; 130 u32 cmdr_last_cleaned; 131 /* Offset of data area from start of mb */ 132 /* Must add data_off and mb_addr to get the address */ 133 size_t data_off; 134 size_t data_size; 135 uint32_t max_blocks; 136 size_t ring_size; 137 138 struct mutex cmdr_lock; 139 struct list_head qfull_queue; 140 struct list_head tmr_queue; 141 142 uint32_t dbi_max; 143 uint32_t dbi_thresh; 144 unsigned long *data_bitmap; 145 struct radix_tree_root data_blocks; 146 147 struct idr commands; 148 149 struct timer_list cmd_timer; 150 unsigned int cmd_time_out; 151 struct list_head inflight_queue; 152 153 struct timer_list qfull_timer; 154 int qfull_time_out; 155 156 struct list_head timedout_entry; 157 158 struct tcmu_nl_cmd curr_nl_cmd; 159 160 char dev_config[TCMU_CONFIG_LEN]; 161 162 int nl_reply_supported; 163 }; 164 165 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 166 167 #define CMDR_OFF sizeof(struct tcmu_mailbox) 168 169 struct tcmu_cmd { 170 struct se_cmd *se_cmd; 171 struct tcmu_dev *tcmu_dev; 172 struct list_head queue_entry; 173 174 uint16_t cmd_id; 175 176 /* Can't use se_cmd when cleaning up expired cmds, because if 177 cmd has been completed then accessing se_cmd is off limits */ 178 uint32_t dbi_cnt; 179 uint32_t dbi_cur; 180 uint32_t *dbi; 181 182 unsigned long deadline; 183 184 #define TCMU_CMD_BIT_EXPIRED 0 185 unsigned long flags; 186 }; 187 188 struct tcmu_tmr { 189 struct list_head queue_entry; 190 191 uint8_t tmr_type; 192 uint32_t tmr_cmd_cnt; 193 int16_t tmr_cmd_ids[0]; 194 }; 195 196 /* 197 * To avoid dead lock the mutex lock order should always be: 198 * 199 * mutex_lock(&root_udev_mutex); 200 * ... 201 * mutex_lock(&tcmu_dev->cmdr_lock); 202 * mutex_unlock(&tcmu_dev->cmdr_lock); 203 * ... 204 * mutex_unlock(&root_udev_mutex); 205 */ 206 static DEFINE_MUTEX(root_udev_mutex); 207 static LIST_HEAD(root_udev); 208 209 static DEFINE_SPINLOCK(timed_out_udevs_lock); 210 static LIST_HEAD(timed_out_udevs); 211 212 static struct kmem_cache *tcmu_cmd_cache; 213 214 static atomic_t global_db_count = ATOMIC_INIT(0); 215 static struct delayed_work tcmu_unmap_work; 216 static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; 217 218 static int tcmu_set_global_max_data_area(const char *str, 219 const struct kernel_param *kp) 220 { 221 int ret, max_area_mb; 222 223 ret = kstrtoint(str, 10, &max_area_mb); 224 if (ret) 225 return -EINVAL; 226 227 if (max_area_mb <= 0) { 228 pr_err("global_max_data_area must be larger than 0.\n"); 229 return -EINVAL; 230 } 231 232 tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); 233 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 234 schedule_delayed_work(&tcmu_unmap_work, 0); 235 else 236 cancel_delayed_work_sync(&tcmu_unmap_work); 237 238 return 0; 239 } 240 241 static int tcmu_get_global_max_data_area(char *buffer, 242 const struct kernel_param *kp) 243 { 244 return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 245 } 246 247 static const struct kernel_param_ops tcmu_global_max_data_area_op = { 248 .set = tcmu_set_global_max_data_area, 249 .get = tcmu_get_global_max_data_area, 250 }; 251 252 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 253 S_IWUSR | S_IRUGO); 254 MODULE_PARM_DESC(global_max_data_area_mb, 255 "Max MBs allowed to be allocated to all the tcmu device's " 256 "data areas."); 257 258 static int tcmu_get_block_netlink(char *buffer, 259 const struct kernel_param *kp) 260 { 261 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? 262 "blocked" : "unblocked"); 263 } 264 265 static int tcmu_set_block_netlink(const char *str, 266 const struct kernel_param *kp) 267 { 268 int ret; 269 u8 val; 270 271 ret = kstrtou8(str, 0, &val); 272 if (ret < 0) 273 return ret; 274 275 if (val > 1) { 276 pr_err("Invalid block netlink value %u\n", val); 277 return -EINVAL; 278 } 279 280 tcmu_netlink_blocked = val; 281 return 0; 282 } 283 284 static const struct kernel_param_ops tcmu_block_netlink_op = { 285 .set = tcmu_set_block_netlink, 286 .get = tcmu_get_block_netlink, 287 }; 288 289 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); 290 MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); 291 292 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) 293 { 294 struct tcmu_dev *udev = nl_cmd->udev; 295 296 if (!tcmu_netlink_blocked) { 297 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); 298 return -EBUSY; 299 } 300 301 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 302 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); 303 nl_cmd->status = -EINTR; 304 list_del(&nl_cmd->nl_list); 305 complete(&nl_cmd->complete); 306 } 307 return 0; 308 } 309 310 static int tcmu_set_reset_netlink(const char *str, 311 const struct kernel_param *kp) 312 { 313 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; 314 int ret; 315 u8 val; 316 317 ret = kstrtou8(str, 0, &val); 318 if (ret < 0) 319 return ret; 320 321 if (val != 1) { 322 pr_err("Invalid reset netlink value %u\n", val); 323 return -EINVAL; 324 } 325 326 mutex_lock(&tcmu_nl_cmd_mutex); 327 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { 328 ret = tcmu_fail_netlink_cmd(nl_cmd); 329 if (ret) 330 break; 331 } 332 mutex_unlock(&tcmu_nl_cmd_mutex); 333 334 return ret; 335 } 336 337 static const struct kernel_param_ops tcmu_reset_netlink_op = { 338 .set = tcmu_set_reset_netlink, 339 }; 340 341 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); 342 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); 343 344 /* multicast group */ 345 enum tcmu_multicast_groups { 346 TCMU_MCGRP_CONFIG, 347 }; 348 349 static const struct genl_multicast_group tcmu_mcgrps[] = { 350 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 351 }; 352 353 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 354 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 355 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 356 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 357 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 358 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 359 }; 360 361 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 362 { 363 struct tcmu_dev *udev = NULL; 364 struct tcmu_nl_cmd *nl_cmd; 365 int dev_id, rc, ret = 0; 366 367 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 368 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 369 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 370 return -EINVAL; 371 } 372 373 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 374 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 375 376 mutex_lock(&tcmu_nl_cmd_mutex); 377 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { 378 if (nl_cmd->udev->se_dev.dev_index == dev_id) { 379 udev = nl_cmd->udev; 380 break; 381 } 382 } 383 384 if (!udev) { 385 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", 386 completed_cmd, rc, dev_id); 387 ret = -ENODEV; 388 goto unlock; 389 } 390 list_del(&nl_cmd->nl_list); 391 392 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", 393 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, 394 nl_cmd->status); 395 396 if (nl_cmd->cmd != completed_cmd) { 397 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", 398 udev->name, completed_cmd, nl_cmd->cmd); 399 ret = -EINVAL; 400 goto unlock; 401 } 402 403 nl_cmd->status = rc; 404 complete(&nl_cmd->complete); 405 unlock: 406 mutex_unlock(&tcmu_nl_cmd_mutex); 407 return ret; 408 } 409 410 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 411 { 412 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 413 } 414 415 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 416 { 417 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 418 } 419 420 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 421 struct genl_info *info) 422 { 423 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 424 } 425 426 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 427 { 428 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 429 tcmu_kern_cmd_reply_supported = 430 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 431 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 432 tcmu_kern_cmd_reply_supported); 433 } 434 435 return 0; 436 } 437 438 static const struct genl_ops tcmu_genl_ops[] = { 439 { 440 .cmd = TCMU_CMD_SET_FEATURES, 441 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 442 .flags = GENL_ADMIN_PERM, 443 .doit = tcmu_genl_set_features, 444 }, 445 { 446 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 447 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 448 .flags = GENL_ADMIN_PERM, 449 .doit = tcmu_genl_add_dev_done, 450 }, 451 { 452 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 453 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 454 .flags = GENL_ADMIN_PERM, 455 .doit = tcmu_genl_rm_dev_done, 456 }, 457 { 458 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 459 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 460 .flags = GENL_ADMIN_PERM, 461 .doit = tcmu_genl_reconfig_dev_done, 462 }, 463 }; 464 465 /* Our generic netlink family */ 466 static struct genl_family tcmu_genl_family __ro_after_init = { 467 .module = THIS_MODULE, 468 .hdrsize = 0, 469 .name = "TCM-USER", 470 .version = 2, 471 .maxattr = TCMU_ATTR_MAX, 472 .policy = tcmu_attr_policy, 473 .mcgrps = tcmu_mcgrps, 474 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 475 .netnsok = true, 476 .ops = tcmu_genl_ops, 477 .n_ops = ARRAY_SIZE(tcmu_genl_ops), 478 }; 479 480 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 481 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 482 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 483 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 484 485 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 486 { 487 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 488 uint32_t i; 489 490 for (i = 0; i < len; i++) 491 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 492 } 493 494 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, 495 struct tcmu_cmd *tcmu_cmd) 496 { 497 struct page *page; 498 int ret, dbi; 499 500 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 501 if (dbi == udev->dbi_thresh) 502 return false; 503 504 page = radix_tree_lookup(&udev->data_blocks, dbi); 505 if (!page) { 506 if (atomic_add_return(1, &global_db_count) > 507 tcmu_global_max_blocks) 508 schedule_delayed_work(&tcmu_unmap_work, 0); 509 510 /* try to get new page from the mm */ 511 page = alloc_page(GFP_NOIO); 512 if (!page) 513 goto err_alloc; 514 515 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 516 if (ret) 517 goto err_insert; 518 } 519 520 if (dbi > udev->dbi_max) 521 udev->dbi_max = dbi; 522 523 set_bit(dbi, udev->data_bitmap); 524 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 525 526 return true; 527 err_insert: 528 __free_page(page); 529 err_alloc: 530 atomic_dec(&global_db_count); 531 return false; 532 } 533 534 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, 535 struct tcmu_cmd *tcmu_cmd) 536 { 537 int i; 538 539 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { 540 if (!tcmu_get_empty_block(udev, tcmu_cmd)) 541 return false; 542 } 543 return true; 544 } 545 546 static inline struct page * 547 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 548 { 549 return radix_tree_lookup(&udev->data_blocks, dbi); 550 } 551 552 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 553 { 554 if (tcmu_cmd->se_cmd) 555 tcmu_cmd->se_cmd->priv = NULL; 556 kfree(tcmu_cmd->dbi); 557 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 558 } 559 560 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) 561 { 562 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 563 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); 564 565 if (se_cmd->se_cmd_flags & SCF_BIDI) { 566 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 567 data_length += round_up(se_cmd->t_bidi_data_sg->length, 568 DATA_BLOCK_SIZE); 569 } 570 571 return data_length; 572 } 573 574 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) 575 { 576 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 577 578 return data_length / DATA_BLOCK_SIZE; 579 } 580 581 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 582 { 583 struct se_device *se_dev = se_cmd->se_dev; 584 struct tcmu_dev *udev = TCMU_DEV(se_dev); 585 struct tcmu_cmd *tcmu_cmd; 586 587 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); 588 if (!tcmu_cmd) 589 return NULL; 590 591 INIT_LIST_HEAD(&tcmu_cmd->queue_entry); 592 tcmu_cmd->se_cmd = se_cmd; 593 tcmu_cmd->tcmu_dev = udev; 594 595 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 596 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); 597 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 598 GFP_NOIO); 599 if (!tcmu_cmd->dbi) { 600 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 601 return NULL; 602 } 603 604 return tcmu_cmd; 605 } 606 607 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 608 { 609 unsigned long offset = offset_in_page(vaddr); 610 void *start = vaddr - offset; 611 612 size = round_up(size+offset, PAGE_SIZE); 613 614 while (size) { 615 flush_dcache_page(vmalloc_to_page(start)); 616 start += PAGE_SIZE; 617 size -= PAGE_SIZE; 618 } 619 } 620 621 /* 622 * Some ring helper functions. We don't assume size is a power of 2 so 623 * we can't use circ_buf.h. 624 */ 625 static inline size_t spc_used(size_t head, size_t tail, size_t size) 626 { 627 int diff = head - tail; 628 629 if (diff >= 0) 630 return diff; 631 else 632 return size + diff; 633 } 634 635 static inline size_t spc_free(size_t head, size_t tail, size_t size) 636 { 637 /* Keep 1 byte unused or we can't tell full from empty */ 638 return (size - spc_used(head, tail, size) - 1); 639 } 640 641 static inline size_t head_to_end(size_t head, size_t size) 642 { 643 return size - head; 644 } 645 646 static inline void new_iov(struct iovec **iov, int *iov_cnt) 647 { 648 struct iovec *iovec; 649 650 if (*iov_cnt != 0) 651 (*iov)++; 652 (*iov_cnt)++; 653 654 iovec = *iov; 655 memset(iovec, 0, sizeof(struct iovec)); 656 } 657 658 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 659 660 /* offset is relative to mb_addr */ 661 static inline size_t get_block_offset_user(struct tcmu_dev *dev, 662 int dbi, int remaining) 663 { 664 return dev->data_off + dbi * DATA_BLOCK_SIZE + 665 DATA_BLOCK_SIZE - remaining; 666 } 667 668 static inline size_t iov_tail(struct iovec *iov) 669 { 670 return (size_t)iov->iov_base + iov->iov_len; 671 } 672 673 static void scatter_data_area(struct tcmu_dev *udev, 674 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, 675 unsigned int data_nents, struct iovec **iov, 676 int *iov_cnt, bool copy_data) 677 { 678 int i, dbi; 679 int block_remaining = 0; 680 void *from, *to = NULL; 681 size_t copy_bytes, to_offset, offset; 682 struct scatterlist *sg; 683 struct page *page; 684 685 for_each_sg(data_sg, sg, data_nents, i) { 686 int sg_remaining = sg->length; 687 from = kmap_atomic(sg_page(sg)) + sg->offset; 688 while (sg_remaining > 0) { 689 if (block_remaining == 0) { 690 if (to) { 691 flush_dcache_page(page); 692 kunmap_atomic(to); 693 } 694 695 block_remaining = DATA_BLOCK_SIZE; 696 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 697 page = tcmu_get_block_page(udev, dbi); 698 to = kmap_atomic(page); 699 } 700 701 /* 702 * Covert to virtual offset of the ring data area. 703 */ 704 to_offset = get_block_offset_user(udev, dbi, 705 block_remaining); 706 707 /* 708 * The following code will gather and map the blocks 709 * to the same iovec when the blocks are all next to 710 * each other. 711 */ 712 copy_bytes = min_t(size_t, sg_remaining, 713 block_remaining); 714 if (*iov_cnt != 0 && 715 to_offset == iov_tail(*iov)) { 716 /* 717 * Will append to the current iovec, because 718 * the current block page is next to the 719 * previous one. 720 */ 721 (*iov)->iov_len += copy_bytes; 722 } else { 723 /* 724 * Will allocate a new iovec because we are 725 * first time here or the current block page 726 * is not next to the previous one. 727 */ 728 new_iov(iov, iov_cnt); 729 (*iov)->iov_base = (void __user *)to_offset; 730 (*iov)->iov_len = copy_bytes; 731 } 732 733 if (copy_data) { 734 offset = DATA_BLOCK_SIZE - block_remaining; 735 memcpy(to + offset, 736 from + sg->length - sg_remaining, 737 copy_bytes); 738 } 739 740 sg_remaining -= copy_bytes; 741 block_remaining -= copy_bytes; 742 } 743 kunmap_atomic(from - sg->offset); 744 } 745 746 if (to) { 747 flush_dcache_page(page); 748 kunmap_atomic(to); 749 } 750 } 751 752 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 753 bool bidi, uint32_t read_len) 754 { 755 struct se_cmd *se_cmd = cmd->se_cmd; 756 int i, dbi; 757 int block_remaining = 0; 758 void *from = NULL, *to; 759 size_t copy_bytes, offset; 760 struct scatterlist *sg, *data_sg; 761 struct page *page; 762 unsigned int data_nents; 763 uint32_t count = 0; 764 765 if (!bidi) { 766 data_sg = se_cmd->t_data_sg; 767 data_nents = se_cmd->t_data_nents; 768 } else { 769 770 /* 771 * For bidi case, the first count blocks are for Data-Out 772 * buffer blocks, and before gathering the Data-In buffer 773 * the Data-Out buffer blocks should be discarded. 774 */ 775 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 776 777 data_sg = se_cmd->t_bidi_data_sg; 778 data_nents = se_cmd->t_bidi_data_nents; 779 } 780 781 tcmu_cmd_set_dbi_cur(cmd, count); 782 783 for_each_sg(data_sg, sg, data_nents, i) { 784 int sg_remaining = sg->length; 785 to = kmap_atomic(sg_page(sg)) + sg->offset; 786 while (sg_remaining > 0 && read_len > 0) { 787 if (block_remaining == 0) { 788 if (from) 789 kunmap_atomic(from); 790 791 block_remaining = DATA_BLOCK_SIZE; 792 dbi = tcmu_cmd_get_dbi(cmd); 793 page = tcmu_get_block_page(udev, dbi); 794 from = kmap_atomic(page); 795 flush_dcache_page(page); 796 } 797 copy_bytes = min_t(size_t, sg_remaining, 798 block_remaining); 799 if (read_len < copy_bytes) 800 copy_bytes = read_len; 801 offset = DATA_BLOCK_SIZE - block_remaining; 802 memcpy(to + sg->length - sg_remaining, from + offset, 803 copy_bytes); 804 805 sg_remaining -= copy_bytes; 806 block_remaining -= copy_bytes; 807 read_len -= copy_bytes; 808 } 809 kunmap_atomic(to - sg->offset); 810 if (read_len == 0) 811 break; 812 } 813 if (from) 814 kunmap_atomic(from); 815 } 816 817 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 818 { 819 return thresh - bitmap_weight(bitmap, thresh); 820 } 821 822 /* 823 * We can't queue a command until we have space available on the cmd ring *and* 824 * space available on the data area. 825 * 826 * Called with ring lock held. 827 */ 828 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 829 size_t cmd_size, size_t data_needed) 830 { 831 struct tcmu_mailbox *mb = udev->mb_addr; 832 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1) 833 / DATA_BLOCK_SIZE; 834 size_t space, cmd_needed; 835 u32 cmd_head; 836 837 tcmu_flush_dcache_range(mb, sizeof(*mb)); 838 839 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 840 841 /* 842 * If cmd end-of-ring space is too small then we need space for a NOP plus 843 * original cmd - cmds are internally contiguous. 844 */ 845 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 846 cmd_needed = cmd_size; 847 else 848 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 849 850 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 851 if (space < cmd_needed) { 852 pr_debug("no cmd space: %u %u %u\n", cmd_head, 853 udev->cmdr_last_cleaned, udev->cmdr_size); 854 return false; 855 } 856 857 if (!data_needed) 858 return true; 859 860 /* try to check and get the data blocks as needed */ 861 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 862 if ((space * DATA_BLOCK_SIZE) < data_needed) { 863 unsigned long blocks_left = 864 (udev->max_blocks - udev->dbi_thresh) + space; 865 866 if (blocks_left < blocks_needed) { 867 pr_debug("no data space: only %lu available, but ask for %zu\n", 868 blocks_left * DATA_BLOCK_SIZE, 869 data_needed); 870 return false; 871 } 872 873 udev->dbi_thresh += blocks_needed; 874 if (udev->dbi_thresh > udev->max_blocks) 875 udev->dbi_thresh = udev->max_blocks; 876 } 877 878 return tcmu_get_empty_blocks(udev, cmd); 879 } 880 881 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 882 { 883 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 884 sizeof(struct tcmu_cmd_entry)); 885 } 886 887 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 888 size_t base_command_size) 889 { 890 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 891 size_t command_size; 892 893 command_size = base_command_size + 894 round_up(scsi_command_size(se_cmd->t_task_cdb), 895 TCMU_OP_ALIGN_SIZE); 896 897 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 898 899 return command_size; 900 } 901 902 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 903 struct timer_list *timer) 904 { 905 if (!tmo) 906 return; 907 908 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 909 if (!timer_pending(timer)) 910 mod_timer(timer, tcmu_cmd->deadline); 911 912 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, 913 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); 914 } 915 916 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 917 { 918 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 919 unsigned int tmo; 920 921 /* 922 * For backwards compat if qfull_time_out is not set use 923 * cmd_time_out and if that's not set use the default time out. 924 */ 925 if (!udev->qfull_time_out) 926 return -ETIMEDOUT; 927 else if (udev->qfull_time_out > 0) 928 tmo = udev->qfull_time_out; 929 else if (udev->cmd_time_out) 930 tmo = udev->cmd_time_out; 931 else 932 tmo = TCMU_TIME_OUT; 933 934 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 935 936 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 937 pr_debug("adding cmd %p on dev %s to ring space wait queue\n", 938 tcmu_cmd, udev->name); 939 return 0; 940 } 941 942 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) 943 { 944 struct tcmu_cmd_entry_hdr *hdr; 945 struct tcmu_mailbox *mb = udev->mb_addr; 946 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 947 948 /* Insert a PAD if end-of-ring space is too small */ 949 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { 950 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 951 952 hdr = (void *) mb + CMDR_OFF + cmd_head; 953 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); 954 tcmu_hdr_set_len(&hdr->len_op, pad_size); 955 hdr->cmd_id = 0; /* not used for PAD */ 956 hdr->kflags = 0; 957 hdr->uflags = 0; 958 tcmu_flush_dcache_range(hdr, sizeof(*hdr)); 959 960 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 961 tcmu_flush_dcache_range(mb, sizeof(*mb)); 962 963 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 964 WARN_ON(cmd_head != 0); 965 } 966 967 return cmd_head; 968 } 969 970 /** 971 * queue_cmd_ring - queue cmd to ring or internally 972 * @tcmu_cmd: cmd to queue 973 * @scsi_err: TCM error code if failure (-1) returned. 974 * 975 * Returns: 976 * -1 we cannot queue internally or to the ring. 977 * 0 success 978 * 1 internally queued to wait for ring memory to free. 979 */ 980 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) 981 { 982 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 983 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 984 size_t base_command_size, command_size; 985 struct tcmu_mailbox *mb = udev->mb_addr; 986 struct tcmu_cmd_entry *entry; 987 struct iovec *iov; 988 int iov_cnt, cmd_id; 989 uint32_t cmd_head; 990 uint64_t cdb_off; 991 bool copy_to_data_area; 992 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 993 994 *scsi_err = TCM_NO_SENSE; 995 996 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 997 *scsi_err = TCM_LUN_BUSY; 998 return -1; 999 } 1000 1001 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1002 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1003 return -1; 1004 } 1005 1006 /* 1007 * Must be a certain minimum size for response sense info, but 1008 * also may be larger if the iov array is large. 1009 * 1010 * We prepare as many iovs as possbile for potential uses here, 1011 * because it's expensive to tell how many regions are freed in 1012 * the bitmap & global data pool, as the size calculated here 1013 * will only be used to do the checks. 1014 * 1015 * The size will be recalculated later as actually needed to save 1016 * cmd area memories. 1017 */ 1018 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 1019 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1020 1021 if (!list_empty(&udev->qfull_queue)) 1022 goto queue; 1023 1024 if ((command_size > (udev->cmdr_size / 2)) || 1025 data_length > udev->data_size) { 1026 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 1027 "cmd ring/data area\n", command_size, data_length, 1028 udev->cmdr_size, udev->data_size); 1029 *scsi_err = TCM_INVALID_CDB_FIELD; 1030 return -1; 1031 } 1032 1033 if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { 1034 /* 1035 * Don't leave commands partially setup because the unmap 1036 * thread might need the blocks to make forward progress. 1037 */ 1038 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1039 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1040 goto queue; 1041 } 1042 1043 cmd_head = ring_insert_padding(udev, command_size); 1044 1045 entry = (void *) mb + CMDR_OFF + cmd_head; 1046 memset(entry, 0, command_size); 1047 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 1048 1049 /* Handle allocating space from the data area */ 1050 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1051 iov = &entry->req.iov[0]; 1052 iov_cnt = 0; 1053 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 1054 || se_cmd->se_cmd_flags & SCF_BIDI); 1055 scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, 1056 se_cmd->t_data_nents, &iov, &iov_cnt, 1057 copy_to_data_area); 1058 entry->req.iov_cnt = iov_cnt; 1059 1060 /* Handle BIDI commands */ 1061 iov_cnt = 0; 1062 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1063 iov++; 1064 scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, 1065 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, 1066 false); 1067 } 1068 entry->req.iov_bidi_cnt = iov_cnt; 1069 1070 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 1071 if (cmd_id < 0) { 1072 pr_err("tcmu: Could not allocate cmd id.\n"); 1073 1074 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1075 *scsi_err = TCM_OUT_OF_RESOURCES; 1076 return -1; 1077 } 1078 tcmu_cmd->cmd_id = cmd_id; 1079 1080 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, 1081 tcmu_cmd, udev->name); 1082 1083 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); 1084 1085 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 1086 1087 /* 1088 * Recalaulate the command's base size and size according 1089 * to the actual needs 1090 */ 1091 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt + 1092 entry->req.iov_bidi_cnt); 1093 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1094 1095 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 1096 1097 /* All offsets relative to mb_addr, not start of entry! */ 1098 cdb_off = CMDR_OFF + cmd_head + base_command_size; 1099 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 1100 entry->req.cdb_off = cdb_off; 1101 tcmu_flush_dcache_range(entry, command_size); 1102 1103 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1104 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1105 1106 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); 1107 1108 /* TODO: only if FLUSH and FUA? */ 1109 uio_event_notify(&udev->uio_info); 1110 1111 return 0; 1112 1113 queue: 1114 if (add_to_qfull_queue(tcmu_cmd)) { 1115 *scsi_err = TCM_OUT_OF_RESOURCES; 1116 return -1; 1117 } 1118 1119 return 1; 1120 } 1121 1122 /** 1123 * queue_tmr_ring - queue tmr info to ring or internally 1124 * @udev: related tcmu_dev 1125 * @tmr: tcmu_tmr containing tmr info to queue 1126 * 1127 * Returns: 1128 * 0 success 1129 * 1 internally queued to wait for ring memory to free. 1130 */ 1131 static int 1132 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) 1133 { 1134 struct tcmu_tmr_entry *entry; 1135 int cmd_size; 1136 int id_list_sz; 1137 struct tcmu_mailbox *mb = udev->mb_addr; 1138 uint32_t cmd_head; 1139 1140 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 1141 goto out_free; 1142 1143 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; 1144 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE); 1145 1146 if (!list_empty(&udev->tmr_queue) || 1147 !is_ring_space_avail(udev, NULL, cmd_size, 0)) { 1148 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); 1149 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n", 1150 tmr, udev->name); 1151 return 1; 1152 } 1153 1154 cmd_head = ring_insert_padding(udev, cmd_size); 1155 1156 entry = (void *)mb + CMDR_OFF + cmd_head; 1157 memset(entry, 0, cmd_size); 1158 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); 1159 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); 1160 entry->tmr_type = tmr->tmr_type; 1161 entry->cmd_cnt = tmr->tmr_cmd_cnt; 1162 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); 1163 tcmu_flush_dcache_range(entry, cmd_size); 1164 1165 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); 1166 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1167 1168 uio_event_notify(&udev->uio_info); 1169 1170 out_free: 1171 kfree(tmr); 1172 1173 return 0; 1174 } 1175 1176 static sense_reason_t 1177 tcmu_queue_cmd(struct se_cmd *se_cmd) 1178 { 1179 struct se_device *se_dev = se_cmd->se_dev; 1180 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1181 struct tcmu_cmd *tcmu_cmd; 1182 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD; 1183 int ret = -1; 1184 1185 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 1186 if (!tcmu_cmd) 1187 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1188 1189 mutex_lock(&udev->cmdr_lock); 1190 se_cmd->priv = tcmu_cmd; 1191 if (!(se_cmd->transport_state & CMD_T_ABORTED)) 1192 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1193 if (ret < 0) 1194 tcmu_free_cmd(tcmu_cmd); 1195 mutex_unlock(&udev->cmdr_lock); 1196 return scsi_ret; 1197 } 1198 1199 static void tcmu_set_next_deadline(struct list_head *queue, 1200 struct timer_list *timer) 1201 { 1202 struct tcmu_cmd *cmd; 1203 1204 if (!list_empty(queue)) { 1205 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); 1206 mod_timer(timer, cmd->deadline); 1207 } else 1208 del_timer(timer); 1209 } 1210 1211 static int 1212 tcmu_tmr_type(enum tcm_tmreq_table tmf) 1213 { 1214 switch (tmf) { 1215 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK; 1216 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET; 1217 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA; 1218 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET; 1219 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET; 1220 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET; 1221 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET; 1222 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO; 1223 default: return TCMU_TMR_UNKNOWN; 1224 } 1225 } 1226 1227 static void 1228 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, 1229 struct list_head *cmd_list) 1230 { 1231 int i = 0, cmd_cnt = 0; 1232 bool unqueued = false; 1233 uint16_t *cmd_ids = NULL; 1234 struct tcmu_cmd *cmd; 1235 struct se_cmd *se_cmd; 1236 struct tcmu_tmr *tmr; 1237 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1238 1239 mutex_lock(&udev->cmdr_lock); 1240 1241 /* First we check for aborted commands in qfull_queue */ 1242 list_for_each_entry(se_cmd, cmd_list, state_list) { 1243 i++; 1244 if (!se_cmd->priv) 1245 continue; 1246 cmd = se_cmd->priv; 1247 /* Commands on qfull queue have no id yet */ 1248 if (cmd->cmd_id) { 1249 cmd_cnt++; 1250 continue; 1251 } 1252 pr_debug("Removing aborted command %p from queue on dev %s.\n", 1253 cmd, udev->name); 1254 1255 list_del_init(&cmd->queue_entry); 1256 tcmu_free_cmd(cmd); 1257 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); 1258 unqueued = true; 1259 } 1260 if (unqueued) 1261 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1262 1263 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", 1264 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); 1265 1266 tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL); 1267 if (!tmr) 1268 goto unlock; 1269 1270 tmr->tmr_type = tcmu_tmr_type(tmf); 1271 tmr->tmr_cmd_cnt = cmd_cnt; 1272 1273 if (cmd_cnt != 0) { 1274 cmd_cnt = 0; 1275 list_for_each_entry(se_cmd, cmd_list, state_list) { 1276 if (!se_cmd->priv) 1277 continue; 1278 cmd = se_cmd->priv; 1279 if (cmd->cmd_id) 1280 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; 1281 } 1282 } 1283 1284 queue_tmr_ring(udev, tmr); 1285 1286 unlock: 1287 mutex_unlock(&udev->cmdr_lock); 1288 } 1289 1290 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 1291 { 1292 struct se_cmd *se_cmd = cmd->se_cmd; 1293 struct tcmu_dev *udev = cmd->tcmu_dev; 1294 bool read_len_valid = false; 1295 uint32_t read_len; 1296 1297 /* 1298 * cmd has been completed already from timeout, just reclaim 1299 * data area space and free cmd 1300 */ 1301 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1302 WARN_ON_ONCE(se_cmd); 1303 goto out; 1304 } 1305 1306 list_del_init(&cmd->queue_entry); 1307 1308 tcmu_cmd_reset_dbi_cur(cmd); 1309 1310 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1311 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1312 cmd->se_cmd); 1313 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1314 goto done; 1315 } 1316 1317 read_len = se_cmd->data_length; 1318 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1319 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1320 read_len_valid = true; 1321 if (entry->rsp.read_len < read_len) 1322 read_len = entry->rsp.read_len; 1323 } 1324 1325 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1326 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1327 if (!read_len_valid ) 1328 goto done; 1329 else 1330 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 1331 } 1332 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1333 /* Get Data-In buffer before clean up */ 1334 gather_data_area(udev, cmd, true, read_len); 1335 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1336 gather_data_area(udev, cmd, false, read_len); 1337 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1338 /* TODO: */ 1339 } else if (se_cmd->data_direction != DMA_NONE) { 1340 pr_warn("TCMU: data direction was %d!\n", 1341 se_cmd->data_direction); 1342 } 1343 1344 done: 1345 if (read_len_valid) { 1346 pr_debug("read_len = %d\n", read_len); 1347 target_complete_cmd_with_length(cmd->se_cmd, 1348 entry->rsp.scsi_status, read_len); 1349 } else 1350 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1351 1352 out: 1353 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1354 tcmu_free_cmd(cmd); 1355 } 1356 1357 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) 1358 { 1359 struct tcmu_tmr *tmr, *tmp; 1360 LIST_HEAD(tmrs); 1361 1362 if (list_empty(&udev->tmr_queue)) 1363 return 1; 1364 1365 pr_debug("running %s's tmr queue\n", udev->name); 1366 1367 list_splice_init(&udev->tmr_queue, &tmrs); 1368 1369 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) { 1370 list_del_init(&tmr->queue_entry); 1371 1372 pr_debug("removing tmr %p on dev %s from queue\n", 1373 tmr, udev->name); 1374 1375 if (queue_tmr_ring(udev, tmr)) { 1376 pr_debug("ran out of space during tmr queue run\n"); 1377 /* 1378 * tmr was requeued, so just put all tmrs back in 1379 * the queue 1380 */ 1381 list_splice_tail(&tmrs, &udev->tmr_queue); 1382 return 0; 1383 } 1384 } 1385 1386 return 1; 1387 } 1388 1389 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 1390 { 1391 struct tcmu_mailbox *mb; 1392 struct tcmu_cmd *cmd; 1393 bool free_space = false; 1394 1395 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1396 pr_err("ring broken, not handling completions\n"); 1397 return 0; 1398 } 1399 1400 mb = udev->mb_addr; 1401 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1402 1403 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1404 1405 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1406 1407 /* 1408 * Flush max. up to end of cmd ring since current entry might 1409 * be a padding that is shorter than sizeof(*entry) 1410 */ 1411 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, 1412 udev->cmdr_size); 1413 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? 1414 ring_left : sizeof(*entry)); 1415 1416 free_space = true; 1417 1418 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || 1419 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { 1420 UPDATE_HEAD(udev->cmdr_last_cleaned, 1421 tcmu_hdr_get_len(entry->hdr.len_op), 1422 udev->cmdr_size); 1423 continue; 1424 } 1425 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1426 1427 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); 1428 if (!cmd) { 1429 pr_err("cmd_id %u not found, ring is broken\n", 1430 entry->hdr.cmd_id); 1431 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1432 break; 1433 } 1434 1435 tcmu_handle_completion(cmd, entry); 1436 1437 UPDATE_HEAD(udev->cmdr_last_cleaned, 1438 tcmu_hdr_get_len(entry->hdr.len_op), 1439 udev->cmdr_size); 1440 } 1441 if (free_space) 1442 free_space = tcmu_run_tmr_queue(udev); 1443 1444 if (atomic_read(&global_db_count) > tcmu_global_max_blocks && 1445 idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { 1446 /* 1447 * Allocated blocks exceeded global block limit, currently no 1448 * more pending or waiting commands so try to reclaim blocks. 1449 */ 1450 schedule_delayed_work(&tcmu_unmap_work, 0); 1451 } 1452 if (udev->cmd_time_out) 1453 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 1454 1455 return free_space; 1456 } 1457 1458 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) 1459 { 1460 struct se_cmd *se_cmd; 1461 1462 if (!time_after_eq(jiffies, cmd->deadline)) 1463 return; 1464 1465 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1466 list_del_init(&cmd->queue_entry); 1467 se_cmd = cmd->se_cmd; 1468 se_cmd->priv = NULL; 1469 cmd->se_cmd = NULL; 1470 1471 pr_debug("Timing out inflight cmd %u on dev %s.\n", 1472 cmd->cmd_id, cmd->tcmu_dev->name); 1473 1474 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); 1475 } 1476 1477 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) 1478 { 1479 struct se_cmd *se_cmd; 1480 1481 if (!time_after_eq(jiffies, cmd->deadline)) 1482 return; 1483 1484 pr_debug("Timing out queued cmd %p on dev %s.\n", 1485 cmd, cmd->tcmu_dev->name); 1486 1487 list_del_init(&cmd->queue_entry); 1488 se_cmd = cmd->se_cmd; 1489 tcmu_free_cmd(cmd); 1490 1491 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); 1492 } 1493 1494 static void tcmu_device_timedout(struct tcmu_dev *udev) 1495 { 1496 spin_lock(&timed_out_udevs_lock); 1497 if (list_empty(&udev->timedout_entry)) 1498 list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1499 spin_unlock(&timed_out_udevs_lock); 1500 1501 schedule_delayed_work(&tcmu_unmap_work, 0); 1502 } 1503 1504 static void tcmu_cmd_timedout(struct timer_list *t) 1505 { 1506 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 1507 1508 pr_debug("%s cmd timeout has expired\n", udev->name); 1509 tcmu_device_timedout(udev); 1510 } 1511 1512 static void tcmu_qfull_timedout(struct timer_list *t) 1513 { 1514 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 1515 1516 pr_debug("%s qfull timeout has expired\n", udev->name); 1517 tcmu_device_timedout(udev); 1518 } 1519 1520 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1521 { 1522 struct tcmu_hba *tcmu_hba; 1523 1524 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1525 if (!tcmu_hba) 1526 return -ENOMEM; 1527 1528 tcmu_hba->host_id = host_id; 1529 hba->hba_ptr = tcmu_hba; 1530 1531 return 0; 1532 } 1533 1534 static void tcmu_detach_hba(struct se_hba *hba) 1535 { 1536 kfree(hba->hba_ptr); 1537 hba->hba_ptr = NULL; 1538 } 1539 1540 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1541 { 1542 struct tcmu_dev *udev; 1543 1544 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1545 if (!udev) 1546 return NULL; 1547 kref_init(&udev->kref); 1548 1549 udev->name = kstrdup(name, GFP_KERNEL); 1550 if (!udev->name) { 1551 kfree(udev); 1552 return NULL; 1553 } 1554 1555 udev->hba = hba; 1556 udev->cmd_time_out = TCMU_TIME_OUT; 1557 udev->qfull_time_out = -1; 1558 1559 udev->max_blocks = DATA_BLOCK_BITS_DEF; 1560 mutex_init(&udev->cmdr_lock); 1561 1562 INIT_LIST_HEAD(&udev->node); 1563 INIT_LIST_HEAD(&udev->timedout_entry); 1564 INIT_LIST_HEAD(&udev->qfull_queue); 1565 INIT_LIST_HEAD(&udev->tmr_queue); 1566 INIT_LIST_HEAD(&udev->inflight_queue); 1567 idr_init(&udev->commands); 1568 1569 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1570 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 1571 1572 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); 1573 1574 return &udev->se_dev; 1575 } 1576 1577 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) 1578 { 1579 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1580 LIST_HEAD(cmds); 1581 sense_reason_t scsi_ret; 1582 int ret; 1583 1584 if (list_empty(&udev->qfull_queue)) 1585 return; 1586 1587 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1588 1589 list_splice_init(&udev->qfull_queue, &cmds); 1590 1591 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1592 list_del_init(&tcmu_cmd->queue_entry); 1593 1594 pr_debug("removing cmd %p on dev %s from queue\n", 1595 tcmu_cmd, udev->name); 1596 1597 if (fail) { 1598 /* 1599 * We were not able to even start the command, so 1600 * fail with busy to allow a retry in case runner 1601 * was only temporarily down. If the device is being 1602 * removed then LIO core will do the right thing and 1603 * fail the retry. 1604 */ 1605 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1606 tcmu_free_cmd(tcmu_cmd); 1607 continue; 1608 } 1609 1610 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1611 if (ret < 0) { 1612 pr_debug("cmd %p on dev %s failed with %u\n", 1613 tcmu_cmd, udev->name, scsi_ret); 1614 /* 1615 * Ignore scsi_ret for now. target_complete_cmd 1616 * drops it. 1617 */ 1618 target_complete_cmd(tcmu_cmd->se_cmd, 1619 SAM_STAT_CHECK_CONDITION); 1620 tcmu_free_cmd(tcmu_cmd); 1621 } else if (ret > 0) { 1622 pr_debug("ran out of space during cmdr queue run\n"); 1623 /* 1624 * cmd was requeued, so just put all cmds back in 1625 * the queue 1626 */ 1627 list_splice_tail(&cmds, &udev->qfull_queue); 1628 break; 1629 } 1630 } 1631 1632 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1633 } 1634 1635 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1636 { 1637 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1638 1639 mutex_lock(&udev->cmdr_lock); 1640 if (tcmu_handle_completions(udev)) 1641 run_qfull_queue(udev, false); 1642 mutex_unlock(&udev->cmdr_lock); 1643 1644 return 0; 1645 } 1646 1647 /* 1648 * mmap code from uio.c. Copied here because we want to hook mmap() 1649 * and this stuff must come along. 1650 */ 1651 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1652 { 1653 struct tcmu_dev *udev = vma->vm_private_data; 1654 struct uio_info *info = &udev->uio_info; 1655 1656 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1657 if (info->mem[vma->vm_pgoff].size == 0) 1658 return -1; 1659 return (int)vma->vm_pgoff; 1660 } 1661 return -1; 1662 } 1663 1664 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1665 { 1666 struct page *page; 1667 1668 mutex_lock(&udev->cmdr_lock); 1669 page = tcmu_get_block_page(udev, dbi); 1670 if (likely(page)) { 1671 mutex_unlock(&udev->cmdr_lock); 1672 return page; 1673 } 1674 1675 /* 1676 * Userspace messed up and passed in a address not in the 1677 * data iov passed to it. 1678 */ 1679 pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", 1680 dbi, udev->name); 1681 page = NULL; 1682 mutex_unlock(&udev->cmdr_lock); 1683 1684 return page; 1685 } 1686 1687 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) 1688 { 1689 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1690 struct uio_info *info = &udev->uio_info; 1691 struct page *page; 1692 unsigned long offset; 1693 void *addr; 1694 1695 int mi = tcmu_find_mem_index(vmf->vma); 1696 if (mi < 0) 1697 return VM_FAULT_SIGBUS; 1698 1699 /* 1700 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1701 * to use mem[N]. 1702 */ 1703 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1704 1705 if (offset < udev->data_off) { 1706 /* For the vmalloc()ed cmd area pages */ 1707 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1708 page = vmalloc_to_page(addr); 1709 } else { 1710 uint32_t dbi; 1711 1712 /* For the dynamically growing data area pages */ 1713 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1714 page = tcmu_try_get_block_page(udev, dbi); 1715 if (!page) 1716 return VM_FAULT_SIGBUS; 1717 } 1718 1719 get_page(page); 1720 vmf->page = page; 1721 return 0; 1722 } 1723 1724 static const struct vm_operations_struct tcmu_vm_ops = { 1725 .fault = tcmu_vma_fault, 1726 }; 1727 1728 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1729 { 1730 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1731 1732 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1733 vma->vm_ops = &tcmu_vm_ops; 1734 1735 vma->vm_private_data = udev; 1736 1737 /* Ensure the mmap is exactly the right size */ 1738 if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) 1739 return -EINVAL; 1740 1741 return 0; 1742 } 1743 1744 static int tcmu_open(struct uio_info *info, struct inode *inode) 1745 { 1746 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1747 1748 /* O_EXCL not supported for char devs, so fake it? */ 1749 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1750 return -EBUSY; 1751 1752 udev->inode = inode; 1753 kref_get(&udev->kref); 1754 1755 pr_debug("open\n"); 1756 1757 return 0; 1758 } 1759 1760 static void tcmu_dev_call_rcu(struct rcu_head *p) 1761 { 1762 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1763 struct tcmu_dev *udev = TCMU_DEV(dev); 1764 1765 kfree(udev->uio_info.name); 1766 kfree(udev->name); 1767 kfree(udev); 1768 } 1769 1770 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1771 { 1772 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1773 kmem_cache_free(tcmu_cmd_cache, cmd); 1774 return 0; 1775 } 1776 return -EINVAL; 1777 } 1778 1779 static void tcmu_blocks_release(struct radix_tree_root *blocks, 1780 int start, int end) 1781 { 1782 int i; 1783 struct page *page; 1784 1785 for (i = start; i < end; i++) { 1786 page = radix_tree_delete(blocks, i); 1787 if (page) { 1788 __free_page(page); 1789 atomic_dec(&global_db_count); 1790 } 1791 } 1792 } 1793 1794 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) 1795 { 1796 struct tcmu_tmr *tmr, *tmp; 1797 1798 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { 1799 list_del_init(&tmr->queue_entry); 1800 kfree(tmr); 1801 } 1802 } 1803 1804 static void tcmu_dev_kref_release(struct kref *kref) 1805 { 1806 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1807 struct se_device *dev = &udev->se_dev; 1808 struct tcmu_cmd *cmd; 1809 bool all_expired = true; 1810 int i; 1811 1812 vfree(udev->mb_addr); 1813 udev->mb_addr = NULL; 1814 1815 spin_lock_bh(&timed_out_udevs_lock); 1816 if (!list_empty(&udev->timedout_entry)) 1817 list_del(&udev->timedout_entry); 1818 spin_unlock_bh(&timed_out_udevs_lock); 1819 1820 /* Upper layer should drain all requests before calling this */ 1821 mutex_lock(&udev->cmdr_lock); 1822 idr_for_each_entry(&udev->commands, cmd, i) { 1823 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1824 all_expired = false; 1825 } 1826 /* There can be left over TMR cmds. Remove them. */ 1827 tcmu_remove_all_queued_tmr(udev); 1828 if (!list_empty(&udev->qfull_queue)) 1829 all_expired = false; 1830 idr_destroy(&udev->commands); 1831 WARN_ON(!all_expired); 1832 1833 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); 1834 bitmap_free(udev->data_bitmap); 1835 mutex_unlock(&udev->cmdr_lock); 1836 1837 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1838 } 1839 1840 static int tcmu_release(struct uio_info *info, struct inode *inode) 1841 { 1842 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1843 1844 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1845 1846 pr_debug("close\n"); 1847 /* release ref from open */ 1848 kref_put(&udev->kref, tcmu_dev_kref_release); 1849 return 0; 1850 } 1851 1852 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1853 { 1854 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1855 1856 if (!tcmu_kern_cmd_reply_supported) 1857 return 0; 1858 1859 if (udev->nl_reply_supported <= 0) 1860 return 0; 1861 1862 mutex_lock(&tcmu_nl_cmd_mutex); 1863 1864 if (tcmu_netlink_blocked) { 1865 mutex_unlock(&tcmu_nl_cmd_mutex); 1866 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, 1867 udev->name); 1868 return -EAGAIN; 1869 } 1870 1871 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1872 mutex_unlock(&tcmu_nl_cmd_mutex); 1873 pr_warn("netlink cmd %d already executing on %s\n", 1874 nl_cmd->cmd, udev->name); 1875 return -EBUSY; 1876 } 1877 1878 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1879 nl_cmd->cmd = cmd; 1880 nl_cmd->udev = udev; 1881 init_completion(&nl_cmd->complete); 1882 INIT_LIST_HEAD(&nl_cmd->nl_list); 1883 1884 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); 1885 1886 mutex_unlock(&tcmu_nl_cmd_mutex); 1887 return 0; 1888 } 1889 1890 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) 1891 { 1892 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1893 1894 if (!tcmu_kern_cmd_reply_supported) 1895 return; 1896 1897 if (udev->nl_reply_supported <= 0) 1898 return; 1899 1900 mutex_lock(&tcmu_nl_cmd_mutex); 1901 1902 list_del(&nl_cmd->nl_list); 1903 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1904 1905 mutex_unlock(&tcmu_nl_cmd_mutex); 1906 } 1907 1908 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1909 { 1910 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1911 int ret; 1912 1913 if (!tcmu_kern_cmd_reply_supported) 1914 return 0; 1915 1916 if (udev->nl_reply_supported <= 0) 1917 return 0; 1918 1919 pr_debug("sleeping for nl reply\n"); 1920 wait_for_completion(&nl_cmd->complete); 1921 1922 mutex_lock(&tcmu_nl_cmd_mutex); 1923 nl_cmd->cmd = TCMU_CMD_UNSPEC; 1924 ret = nl_cmd->status; 1925 mutex_unlock(&tcmu_nl_cmd_mutex); 1926 1927 return ret; 1928 } 1929 1930 static int tcmu_netlink_event_init(struct tcmu_dev *udev, 1931 enum tcmu_genl_cmd cmd, 1932 struct sk_buff **buf, void **hdr) 1933 { 1934 struct sk_buff *skb; 1935 void *msg_header; 1936 int ret = -ENOMEM; 1937 1938 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1939 if (!skb) 1940 return ret; 1941 1942 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 1943 if (!msg_header) 1944 goto free_skb; 1945 1946 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 1947 if (ret < 0) 1948 goto free_skb; 1949 1950 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1951 if (ret < 0) 1952 goto free_skb; 1953 1954 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 1955 if (ret < 0) 1956 goto free_skb; 1957 1958 *buf = skb; 1959 *hdr = msg_header; 1960 return ret; 1961 1962 free_skb: 1963 nlmsg_free(skb); 1964 return ret; 1965 } 1966 1967 static int tcmu_netlink_event_send(struct tcmu_dev *udev, 1968 enum tcmu_genl_cmd cmd, 1969 struct sk_buff *skb, void *msg_header) 1970 { 1971 int ret; 1972 1973 genlmsg_end(skb, msg_header); 1974 1975 ret = tcmu_init_genl_cmd_reply(udev, cmd); 1976 if (ret) { 1977 nlmsg_free(skb); 1978 return ret; 1979 } 1980 1981 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1982 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1983 1984 /* Wait during an add as the listener may not be up yet */ 1985 if (ret == 0 || 1986 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) 1987 return tcmu_wait_genl_cmd_reply(udev); 1988 else 1989 tcmu_destroy_genl_cmd_reply(udev); 1990 1991 return ret; 1992 } 1993 1994 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) 1995 { 1996 struct sk_buff *skb = NULL; 1997 void *msg_header = NULL; 1998 int ret = 0; 1999 2000 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, 2001 &msg_header); 2002 if (ret < 0) 2003 return ret; 2004 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, 2005 msg_header); 2006 } 2007 2008 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) 2009 { 2010 struct sk_buff *skb = NULL; 2011 void *msg_header = NULL; 2012 int ret = 0; 2013 2014 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, 2015 &skb, &msg_header); 2016 if (ret < 0) 2017 return ret; 2018 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, 2019 skb, msg_header); 2020 } 2021 2022 static int tcmu_update_uio_info(struct tcmu_dev *udev) 2023 { 2024 struct tcmu_hba *hba = udev->hba->hba_ptr; 2025 struct uio_info *info; 2026 char *str; 2027 2028 info = &udev->uio_info; 2029 2030 if (udev->dev_config[0]) 2031 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, 2032 udev->name, udev->dev_config); 2033 else 2034 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, 2035 udev->name); 2036 if (!str) 2037 return -ENOMEM; 2038 2039 /* If the old string exists, free it */ 2040 kfree(info->name); 2041 info->name = str; 2042 2043 return 0; 2044 } 2045 2046 static int tcmu_configure_device(struct se_device *dev) 2047 { 2048 struct tcmu_dev *udev = TCMU_DEV(dev); 2049 struct uio_info *info; 2050 struct tcmu_mailbox *mb; 2051 int ret = 0; 2052 2053 ret = tcmu_update_uio_info(udev); 2054 if (ret) 2055 return ret; 2056 2057 info = &udev->uio_info; 2058 2059 mutex_lock(&udev->cmdr_lock); 2060 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); 2061 mutex_unlock(&udev->cmdr_lock); 2062 if (!udev->data_bitmap) { 2063 ret = -ENOMEM; 2064 goto err_bitmap_alloc; 2065 } 2066 2067 udev->mb_addr = vzalloc(CMDR_SIZE); 2068 if (!udev->mb_addr) { 2069 ret = -ENOMEM; 2070 goto err_vzalloc; 2071 } 2072 2073 /* mailbox fits in first part of CMDR space */ 2074 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 2075 udev->data_off = CMDR_SIZE; 2076 udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; 2077 udev->dbi_thresh = 0; /* Default in Idle state */ 2078 2079 /* Initialise the mailbox of the ring buffer */ 2080 mb = udev->mb_addr; 2081 mb->version = TCMU_MAILBOX_VERSION; 2082 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | 2083 TCMU_MAILBOX_FLAG_CAP_READ_LEN | 2084 TCMU_MAILBOX_FLAG_CAP_TMR; 2085 mb->cmdr_off = CMDR_OFF; 2086 mb->cmdr_size = udev->cmdr_size; 2087 2088 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 2089 WARN_ON(udev->data_size % PAGE_SIZE); 2090 WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 2091 2092 info->version = __stringify(TCMU_MAILBOX_VERSION); 2093 2094 info->mem[0].name = "tcm-user command & data buffer"; 2095 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 2096 info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; 2097 info->mem[0].memtype = UIO_MEM_NONE; 2098 2099 info->irqcontrol = tcmu_irqcontrol; 2100 info->irq = UIO_IRQ_CUSTOM; 2101 2102 info->mmap = tcmu_mmap; 2103 info->open = tcmu_open; 2104 info->release = tcmu_release; 2105 2106 ret = uio_register_device(tcmu_root_device, info); 2107 if (ret) 2108 goto err_register; 2109 2110 /* User can set hw_block_size before enable the device */ 2111 if (dev->dev_attrib.hw_block_size == 0) 2112 dev->dev_attrib.hw_block_size = 512; 2113 /* Other attributes can be configured in userspace */ 2114 if (!dev->dev_attrib.hw_max_sectors) 2115 dev->dev_attrib.hw_max_sectors = 128; 2116 if (!dev->dev_attrib.emulate_write_cache) 2117 dev->dev_attrib.emulate_write_cache = 0; 2118 dev->dev_attrib.hw_queue_depth = 128; 2119 2120 /* If user didn't explicitly disable netlink reply support, use 2121 * module scope setting. 2122 */ 2123 if (udev->nl_reply_supported >= 0) 2124 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 2125 2126 /* 2127 * Get a ref incase userspace does a close on the uio device before 2128 * LIO has initiated tcmu_free_device. 2129 */ 2130 kref_get(&udev->kref); 2131 2132 ret = tcmu_send_dev_add_event(udev); 2133 if (ret) 2134 goto err_netlink; 2135 2136 mutex_lock(&root_udev_mutex); 2137 list_add(&udev->node, &root_udev); 2138 mutex_unlock(&root_udev_mutex); 2139 2140 return 0; 2141 2142 err_netlink: 2143 kref_put(&udev->kref, tcmu_dev_kref_release); 2144 uio_unregister_device(&udev->uio_info); 2145 err_register: 2146 vfree(udev->mb_addr); 2147 udev->mb_addr = NULL; 2148 err_vzalloc: 2149 bitmap_free(udev->data_bitmap); 2150 udev->data_bitmap = NULL; 2151 err_bitmap_alloc: 2152 kfree(info->name); 2153 info->name = NULL; 2154 2155 return ret; 2156 } 2157 2158 static void tcmu_free_device(struct se_device *dev) 2159 { 2160 struct tcmu_dev *udev = TCMU_DEV(dev); 2161 2162 /* release ref from init */ 2163 kref_put(&udev->kref, tcmu_dev_kref_release); 2164 } 2165 2166 static void tcmu_destroy_device(struct se_device *dev) 2167 { 2168 struct tcmu_dev *udev = TCMU_DEV(dev); 2169 2170 del_timer_sync(&udev->cmd_timer); 2171 del_timer_sync(&udev->qfull_timer); 2172 2173 mutex_lock(&root_udev_mutex); 2174 list_del(&udev->node); 2175 mutex_unlock(&root_udev_mutex); 2176 2177 tcmu_send_dev_remove_event(udev); 2178 2179 uio_unregister_device(&udev->uio_info); 2180 2181 /* release ref from configure */ 2182 kref_put(&udev->kref, tcmu_dev_kref_release); 2183 } 2184 2185 static void tcmu_unblock_dev(struct tcmu_dev *udev) 2186 { 2187 mutex_lock(&udev->cmdr_lock); 2188 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 2189 mutex_unlock(&udev->cmdr_lock); 2190 } 2191 2192 static void tcmu_block_dev(struct tcmu_dev *udev) 2193 { 2194 mutex_lock(&udev->cmdr_lock); 2195 2196 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2197 goto unlock; 2198 2199 /* complete IO that has executed successfully */ 2200 tcmu_handle_completions(udev); 2201 /* fail IO waiting to be queued */ 2202 run_qfull_queue(udev, true); 2203 2204 unlock: 2205 mutex_unlock(&udev->cmdr_lock); 2206 } 2207 2208 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 2209 { 2210 struct tcmu_mailbox *mb; 2211 struct tcmu_cmd *cmd; 2212 int i; 2213 2214 mutex_lock(&udev->cmdr_lock); 2215 2216 idr_for_each_entry(&udev->commands, cmd, i) { 2217 pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 2218 cmd->cmd_id, udev->name, 2219 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); 2220 2221 idr_remove(&udev->commands, i); 2222 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 2223 WARN_ON(!cmd->se_cmd); 2224 list_del_init(&cmd->queue_entry); 2225 if (err_level == 1) { 2226 /* 2227 * Userspace was not able to start the 2228 * command or it is retryable. 2229 */ 2230 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 2231 } else { 2232 /* hard failure */ 2233 target_complete_cmd(cmd->se_cmd, 2234 SAM_STAT_CHECK_CONDITION); 2235 } 2236 } 2237 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 2238 tcmu_free_cmd(cmd); 2239 } 2240 2241 mb = udev->mb_addr; 2242 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2243 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 2244 mb->cmd_tail, mb->cmd_head); 2245 2246 udev->cmdr_last_cleaned = 0; 2247 mb->cmd_tail = 0; 2248 mb->cmd_head = 0; 2249 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2250 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2251 2252 del_timer(&udev->cmd_timer); 2253 2254 /* 2255 * ring is empty and qfull queue never contains aborted commands. 2256 * So TMRs in tmr queue do not contain relevant cmd_ids. 2257 * After a ring reset userspace should do a fresh start, so 2258 * even LUN RESET message is no longer relevant. 2259 * Therefore remove all TMRs from qfull queue 2260 */ 2261 tcmu_remove_all_queued_tmr(udev); 2262 2263 run_qfull_queue(udev, false); 2264 2265 mutex_unlock(&udev->cmdr_lock); 2266 } 2267 2268 enum { 2269 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 2270 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, 2271 }; 2272 2273 static match_table_t tokens = { 2274 {Opt_dev_config, "dev_config=%s"}, 2275 {Opt_dev_size, "dev_size=%s"}, 2276 {Opt_hw_block_size, "hw_block_size=%d"}, 2277 {Opt_hw_max_sectors, "hw_max_sectors=%d"}, 2278 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 2279 {Opt_max_data_area_mb, "max_data_area_mb=%d"}, 2280 {Opt_err, NULL} 2281 }; 2282 2283 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 2284 { 2285 int val, ret; 2286 2287 ret = match_int(arg, &val); 2288 if (ret < 0) { 2289 pr_err("match_int() failed for dev attrib. Error %d.\n", 2290 ret); 2291 return ret; 2292 } 2293 2294 if (val <= 0) { 2295 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", 2296 val); 2297 return -EINVAL; 2298 } 2299 *dev_attrib = val; 2300 return 0; 2301 } 2302 2303 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) 2304 { 2305 int val, ret; 2306 2307 ret = match_int(arg, &val); 2308 if (ret < 0) { 2309 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", 2310 ret); 2311 return ret; 2312 } 2313 2314 if (val <= 0) { 2315 pr_err("Invalid max_data_area %d.\n", val); 2316 return -EINVAL; 2317 } 2318 2319 mutex_lock(&udev->cmdr_lock); 2320 if (udev->data_bitmap) { 2321 pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); 2322 ret = -EINVAL; 2323 goto unlock; 2324 } 2325 2326 udev->max_blocks = TCMU_MBS_TO_BLOCKS(val); 2327 if (udev->max_blocks > tcmu_global_max_blocks) { 2328 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 2329 val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 2330 udev->max_blocks = tcmu_global_max_blocks; 2331 } 2332 2333 unlock: 2334 mutex_unlock(&udev->cmdr_lock); 2335 return ret; 2336 } 2337 2338 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 2339 const char *page, ssize_t count) 2340 { 2341 struct tcmu_dev *udev = TCMU_DEV(dev); 2342 char *orig, *ptr, *opts; 2343 substring_t args[MAX_OPT_ARGS]; 2344 int ret = 0, token; 2345 2346 opts = kstrdup(page, GFP_KERNEL); 2347 if (!opts) 2348 return -ENOMEM; 2349 2350 orig = opts; 2351 2352 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2353 if (!*ptr) 2354 continue; 2355 2356 token = match_token(ptr, tokens, args); 2357 switch (token) { 2358 case Opt_dev_config: 2359 if (match_strlcpy(udev->dev_config, &args[0], 2360 TCMU_CONFIG_LEN) == 0) { 2361 ret = -EINVAL; 2362 break; 2363 } 2364 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 2365 break; 2366 case Opt_dev_size: 2367 ret = match_u64(&args[0], &udev->dev_size); 2368 if (ret < 0) 2369 pr_err("match_u64() failed for dev_size=. Error %d.\n", 2370 ret); 2371 break; 2372 case Opt_hw_block_size: 2373 ret = tcmu_set_dev_attrib(&args[0], 2374 &(dev->dev_attrib.hw_block_size)); 2375 break; 2376 case Opt_hw_max_sectors: 2377 ret = tcmu_set_dev_attrib(&args[0], 2378 &(dev->dev_attrib.hw_max_sectors)); 2379 break; 2380 case Opt_nl_reply_supported: 2381 ret = match_int(&args[0], &udev->nl_reply_supported); 2382 if (ret < 0) 2383 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", 2384 ret); 2385 break; 2386 case Opt_max_data_area_mb: 2387 ret = tcmu_set_max_blocks_param(udev, &args[0]); 2388 break; 2389 default: 2390 break; 2391 } 2392 2393 if (ret) 2394 break; 2395 } 2396 2397 kfree(orig); 2398 return (!ret) ? count : ret; 2399 } 2400 2401 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 2402 { 2403 struct tcmu_dev *udev = TCMU_DEV(dev); 2404 ssize_t bl = 0; 2405 2406 bl = sprintf(b + bl, "Config: %s ", 2407 udev->dev_config[0] ? udev->dev_config : "NULL"); 2408 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); 2409 bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", 2410 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2411 2412 return bl; 2413 } 2414 2415 static sector_t tcmu_get_blocks(struct se_device *dev) 2416 { 2417 struct tcmu_dev *udev = TCMU_DEV(dev); 2418 2419 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 2420 dev->dev_attrib.block_size); 2421 } 2422 2423 static sense_reason_t 2424 tcmu_parse_cdb(struct se_cmd *cmd) 2425 { 2426 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 2427 } 2428 2429 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 2430 { 2431 struct se_dev_attrib *da = container_of(to_config_group(item), 2432 struct se_dev_attrib, da_group); 2433 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2434 2435 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 2436 } 2437 2438 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 2439 size_t count) 2440 { 2441 struct se_dev_attrib *da = container_of(to_config_group(item), 2442 struct se_dev_attrib, da_group); 2443 struct tcmu_dev *udev = container_of(da->da_dev, 2444 struct tcmu_dev, se_dev); 2445 u32 val; 2446 int ret; 2447 2448 if (da->da_dev->export_count) { 2449 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 2450 return -EINVAL; 2451 } 2452 2453 ret = kstrtou32(page, 0, &val); 2454 if (ret < 0) 2455 return ret; 2456 2457 udev->cmd_time_out = val * MSEC_PER_SEC; 2458 return count; 2459 } 2460 CONFIGFS_ATTR(tcmu_, cmd_time_out); 2461 2462 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 2463 { 2464 struct se_dev_attrib *da = container_of(to_config_group(item), 2465 struct se_dev_attrib, da_group); 2466 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2467 2468 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 2469 udev->qfull_time_out : 2470 udev->qfull_time_out / MSEC_PER_SEC); 2471 } 2472 2473 static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 2474 const char *page, size_t count) 2475 { 2476 struct se_dev_attrib *da = container_of(to_config_group(item), 2477 struct se_dev_attrib, da_group); 2478 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2479 s32 val; 2480 int ret; 2481 2482 ret = kstrtos32(page, 0, &val); 2483 if (ret < 0) 2484 return ret; 2485 2486 if (val >= 0) { 2487 udev->qfull_time_out = val * MSEC_PER_SEC; 2488 } else if (val == -1) { 2489 udev->qfull_time_out = val; 2490 } else { 2491 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2492 return -EINVAL; 2493 } 2494 return count; 2495 } 2496 CONFIGFS_ATTR(tcmu_, qfull_time_out); 2497 2498 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 2499 { 2500 struct se_dev_attrib *da = container_of(to_config_group(item), 2501 struct se_dev_attrib, da_group); 2502 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2503 2504 return snprintf(page, PAGE_SIZE, "%u\n", 2505 TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 2506 } 2507 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 2508 2509 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2510 { 2511 struct se_dev_attrib *da = container_of(to_config_group(item), 2512 struct se_dev_attrib, da_group); 2513 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2514 2515 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2516 } 2517 2518 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, 2519 const char *reconfig_data) 2520 { 2521 struct sk_buff *skb = NULL; 2522 void *msg_header = NULL; 2523 int ret = 0; 2524 2525 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2526 &skb, &msg_header); 2527 if (ret < 0) 2528 return ret; 2529 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); 2530 if (ret < 0) { 2531 nlmsg_free(skb); 2532 return ret; 2533 } 2534 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2535 skb, msg_header); 2536 } 2537 2538 2539 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2540 size_t count) 2541 { 2542 struct se_dev_attrib *da = container_of(to_config_group(item), 2543 struct se_dev_attrib, da_group); 2544 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2545 int ret, len; 2546 2547 len = strlen(page); 2548 if (!len || len > TCMU_CONFIG_LEN - 1) 2549 return -EINVAL; 2550 2551 /* Check if device has been configured before */ 2552 if (target_dev_configured(&udev->se_dev)) { 2553 ret = tcmu_send_dev_config_event(udev, page); 2554 if (ret) { 2555 pr_err("Unable to reconfigure device\n"); 2556 return ret; 2557 } 2558 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2559 2560 ret = tcmu_update_uio_info(udev); 2561 if (ret) 2562 return ret; 2563 return count; 2564 } 2565 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2566 2567 return count; 2568 } 2569 CONFIGFS_ATTR(tcmu_, dev_config); 2570 2571 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 2572 { 2573 struct se_dev_attrib *da = container_of(to_config_group(item), 2574 struct se_dev_attrib, da_group); 2575 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2576 2577 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); 2578 } 2579 2580 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) 2581 { 2582 struct sk_buff *skb = NULL; 2583 void *msg_header = NULL; 2584 int ret = 0; 2585 2586 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2587 &skb, &msg_header); 2588 if (ret < 0) 2589 return ret; 2590 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, 2591 size, TCMU_ATTR_PAD); 2592 if (ret < 0) { 2593 nlmsg_free(skb); 2594 return ret; 2595 } 2596 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2597 skb, msg_header); 2598 } 2599 2600 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2601 size_t count) 2602 { 2603 struct se_dev_attrib *da = container_of(to_config_group(item), 2604 struct se_dev_attrib, da_group); 2605 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2606 u64 val; 2607 int ret; 2608 2609 ret = kstrtou64(page, 0, &val); 2610 if (ret < 0) 2611 return ret; 2612 2613 /* Check if device has been configured before */ 2614 if (target_dev_configured(&udev->se_dev)) { 2615 ret = tcmu_send_dev_size_event(udev, val); 2616 if (ret) { 2617 pr_err("Unable to reconfigure device\n"); 2618 return ret; 2619 } 2620 } 2621 udev->dev_size = val; 2622 return count; 2623 } 2624 CONFIGFS_ATTR(tcmu_, dev_size); 2625 2626 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 2627 char *page) 2628 { 2629 struct se_dev_attrib *da = container_of(to_config_group(item), 2630 struct se_dev_attrib, da_group); 2631 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2632 2633 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 2634 } 2635 2636 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 2637 const char *page, size_t count) 2638 { 2639 struct se_dev_attrib *da = container_of(to_config_group(item), 2640 struct se_dev_attrib, da_group); 2641 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2642 s8 val; 2643 int ret; 2644 2645 ret = kstrtos8(page, 0, &val); 2646 if (ret < 0) 2647 return ret; 2648 2649 udev->nl_reply_supported = val; 2650 return count; 2651 } 2652 CONFIGFS_ATTR(tcmu_, nl_reply_supported); 2653 2654 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 2655 char *page) 2656 { 2657 struct se_dev_attrib *da = container_of(to_config_group(item), 2658 struct se_dev_attrib, da_group); 2659 2660 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 2661 } 2662 2663 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) 2664 { 2665 struct sk_buff *skb = NULL; 2666 void *msg_header = NULL; 2667 int ret = 0; 2668 2669 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2670 &skb, &msg_header); 2671 if (ret < 0) 2672 return ret; 2673 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); 2674 if (ret < 0) { 2675 nlmsg_free(skb); 2676 return ret; 2677 } 2678 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2679 skb, msg_header); 2680 } 2681 2682 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 2683 const char *page, size_t count) 2684 { 2685 struct se_dev_attrib *da = container_of(to_config_group(item), 2686 struct se_dev_attrib, da_group); 2687 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2688 u8 val; 2689 int ret; 2690 2691 ret = kstrtou8(page, 0, &val); 2692 if (ret < 0) 2693 return ret; 2694 2695 /* Check if device has been configured before */ 2696 if (target_dev_configured(&udev->se_dev)) { 2697 ret = tcmu_send_emulate_write_cache(udev, val); 2698 if (ret) { 2699 pr_err("Unable to reconfigure device\n"); 2700 return ret; 2701 } 2702 } 2703 2704 da->emulate_write_cache = val; 2705 return count; 2706 } 2707 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 2708 2709 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 2710 { 2711 struct se_device *se_dev = container_of(to_config_group(item), 2712 struct se_device, 2713 dev_action_group); 2714 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2715 2716 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2717 return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 2718 else 2719 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 2720 } 2721 2722 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 2723 size_t count) 2724 { 2725 struct se_device *se_dev = container_of(to_config_group(item), 2726 struct se_device, 2727 dev_action_group); 2728 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2729 u8 val; 2730 int ret; 2731 2732 if (!target_dev_configured(&udev->se_dev)) { 2733 pr_err("Device is not configured.\n"); 2734 return -EINVAL; 2735 } 2736 2737 ret = kstrtou8(page, 0, &val); 2738 if (ret < 0) 2739 return ret; 2740 2741 if (val > 1) { 2742 pr_err("Invalid block value %d\n", val); 2743 return -EINVAL; 2744 } 2745 2746 if (!val) 2747 tcmu_unblock_dev(udev); 2748 else 2749 tcmu_block_dev(udev); 2750 return count; 2751 } 2752 CONFIGFS_ATTR(tcmu_, block_dev); 2753 2754 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 2755 size_t count) 2756 { 2757 struct se_device *se_dev = container_of(to_config_group(item), 2758 struct se_device, 2759 dev_action_group); 2760 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2761 u8 val; 2762 int ret; 2763 2764 if (!target_dev_configured(&udev->se_dev)) { 2765 pr_err("Device is not configured.\n"); 2766 return -EINVAL; 2767 } 2768 2769 ret = kstrtou8(page, 0, &val); 2770 if (ret < 0) 2771 return ret; 2772 2773 if (val != 1 && val != 2) { 2774 pr_err("Invalid reset ring value %d\n", val); 2775 return -EINVAL; 2776 } 2777 2778 tcmu_reset_ring(udev, val); 2779 return count; 2780 } 2781 CONFIGFS_ATTR_WO(tcmu_, reset_ring); 2782 2783 static struct configfs_attribute *tcmu_attrib_attrs[] = { 2784 &tcmu_attr_cmd_time_out, 2785 &tcmu_attr_qfull_time_out, 2786 &tcmu_attr_max_data_area_mb, 2787 &tcmu_attr_dev_config, 2788 &tcmu_attr_dev_size, 2789 &tcmu_attr_emulate_write_cache, 2790 &tcmu_attr_nl_reply_supported, 2791 NULL, 2792 }; 2793 2794 static struct configfs_attribute **tcmu_attrs; 2795 2796 static struct configfs_attribute *tcmu_action_attrs[] = { 2797 &tcmu_attr_block_dev, 2798 &tcmu_attr_reset_ring, 2799 NULL, 2800 }; 2801 2802 static struct target_backend_ops tcmu_ops = { 2803 .name = "user", 2804 .owner = THIS_MODULE, 2805 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH, 2806 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR | 2807 TRANSPORT_FLAG_PASSTHROUGH_ALUA, 2808 .attach_hba = tcmu_attach_hba, 2809 .detach_hba = tcmu_detach_hba, 2810 .alloc_device = tcmu_alloc_device, 2811 .configure_device = tcmu_configure_device, 2812 .destroy_device = tcmu_destroy_device, 2813 .free_device = tcmu_free_device, 2814 .parse_cdb = tcmu_parse_cdb, 2815 .tmr_notify = tcmu_tmr_notify, 2816 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 2817 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 2818 .get_device_type = sbc_get_device_type, 2819 .get_blocks = tcmu_get_blocks, 2820 .tb_dev_action_attrs = tcmu_action_attrs, 2821 }; 2822 2823 static void find_free_blocks(void) 2824 { 2825 struct tcmu_dev *udev; 2826 loff_t off; 2827 u32 start, end, block, total_freed = 0; 2828 2829 if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) 2830 return; 2831 2832 mutex_lock(&root_udev_mutex); 2833 list_for_each_entry(udev, &root_udev, node) { 2834 mutex_lock(&udev->cmdr_lock); 2835 2836 if (!target_dev_configured(&udev->se_dev)) { 2837 mutex_unlock(&udev->cmdr_lock); 2838 continue; 2839 } 2840 2841 /* Try to complete the finished commands first */ 2842 if (tcmu_handle_completions(udev)) 2843 run_qfull_queue(udev, false); 2844 2845 /* Skip the udevs in idle */ 2846 if (!udev->dbi_thresh) { 2847 mutex_unlock(&udev->cmdr_lock); 2848 continue; 2849 } 2850 2851 end = udev->dbi_max + 1; 2852 block = find_last_bit(udev->data_bitmap, end); 2853 if (block == udev->dbi_max) { 2854 /* 2855 * The last bit is dbi_max, so it is not possible 2856 * reclaim any blocks. 2857 */ 2858 mutex_unlock(&udev->cmdr_lock); 2859 continue; 2860 } else if (block == end) { 2861 /* The current udev will goto idle state */ 2862 udev->dbi_thresh = start = 0; 2863 udev->dbi_max = 0; 2864 } else { 2865 udev->dbi_thresh = start = block + 1; 2866 udev->dbi_max = block; 2867 } 2868 2869 /* Here will truncate the data area from off */ 2870 off = udev->data_off + start * DATA_BLOCK_SIZE; 2871 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2872 2873 /* Release the block pages */ 2874 tcmu_blocks_release(&udev->data_blocks, start, end); 2875 mutex_unlock(&udev->cmdr_lock); 2876 2877 total_freed += end - start; 2878 pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, 2879 total_freed, udev->name); 2880 } 2881 mutex_unlock(&root_udev_mutex); 2882 2883 if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 2884 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 2885 } 2886 2887 static void check_timedout_devices(void) 2888 { 2889 struct tcmu_dev *udev, *tmp_dev; 2890 struct tcmu_cmd *cmd, *tmp_cmd; 2891 LIST_HEAD(devs); 2892 2893 spin_lock_bh(&timed_out_udevs_lock); 2894 list_splice_init(&timed_out_udevs, &devs); 2895 2896 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 2897 list_del_init(&udev->timedout_entry); 2898 spin_unlock_bh(&timed_out_udevs_lock); 2899 2900 mutex_lock(&udev->cmdr_lock); 2901 2902 /* 2903 * If cmd_time_out is disabled but qfull is set deadline 2904 * will only reflect the qfull timeout. Ignore it. 2905 */ 2906 if (udev->cmd_time_out) { 2907 list_for_each_entry_safe(cmd, tmp_cmd, 2908 &udev->inflight_queue, 2909 queue_entry) { 2910 tcmu_check_expired_ring_cmd(cmd); 2911 } 2912 tcmu_set_next_deadline(&udev->inflight_queue, 2913 &udev->cmd_timer); 2914 } 2915 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, 2916 queue_entry) { 2917 tcmu_check_expired_queue_cmd(cmd); 2918 } 2919 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 2920 2921 mutex_unlock(&udev->cmdr_lock); 2922 2923 spin_lock_bh(&timed_out_udevs_lock); 2924 } 2925 2926 spin_unlock_bh(&timed_out_udevs_lock); 2927 } 2928 2929 static void tcmu_unmap_work_fn(struct work_struct *work) 2930 { 2931 check_timedout_devices(); 2932 find_free_blocks(); 2933 } 2934 2935 static int __init tcmu_module_init(void) 2936 { 2937 int ret, i, k, len = 0; 2938 2939 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 2940 2941 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 2942 2943 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 2944 sizeof(struct tcmu_cmd), 2945 __alignof__(struct tcmu_cmd), 2946 0, NULL); 2947 if (!tcmu_cmd_cache) 2948 return -ENOMEM; 2949 2950 tcmu_root_device = root_device_register("tcm_user"); 2951 if (IS_ERR(tcmu_root_device)) { 2952 ret = PTR_ERR(tcmu_root_device); 2953 goto out_free_cache; 2954 } 2955 2956 ret = genl_register_family(&tcmu_genl_family); 2957 if (ret < 0) { 2958 goto out_unreg_device; 2959 } 2960 2961 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 2962 len += sizeof(struct configfs_attribute *); 2963 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++) 2964 len += sizeof(struct configfs_attribute *); 2965 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) 2966 len += sizeof(struct configfs_attribute *); 2967 len += sizeof(struct configfs_attribute *); 2968 2969 tcmu_attrs = kzalloc(len, GFP_KERNEL); 2970 if (!tcmu_attrs) { 2971 ret = -ENOMEM; 2972 goto out_unreg_genl; 2973 } 2974 2975 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 2976 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 2977 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++) 2978 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k]; 2979 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) 2980 tcmu_attrs[i++] = tcmu_attrib_attrs[k]; 2981 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 2982 2983 ret = transport_backend_register(&tcmu_ops); 2984 if (ret) 2985 goto out_attrs; 2986 2987 return 0; 2988 2989 out_attrs: 2990 kfree(tcmu_attrs); 2991 out_unreg_genl: 2992 genl_unregister_family(&tcmu_genl_family); 2993 out_unreg_device: 2994 root_device_unregister(tcmu_root_device); 2995 out_free_cache: 2996 kmem_cache_destroy(tcmu_cmd_cache); 2997 2998 return ret; 2999 } 3000 3001 static void __exit tcmu_module_exit(void) 3002 { 3003 cancel_delayed_work_sync(&tcmu_unmap_work); 3004 target_backend_unregister(&tcmu_ops); 3005 kfree(tcmu_attrs); 3006 genl_unregister_family(&tcmu_genl_family); 3007 root_device_unregister(tcmu_root_device); 3008 kmem_cache_destroy(tcmu_cmd_cache); 3009 } 3010 3011 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 3012 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 3013 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 3014 MODULE_LICENSE("GPL"); 3015 3016 module_init(tcmu_module_init); 3017 module_exit(tcmu_module_exit); 3018