1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 4 * Copyright (C) 2014 Red Hat, Inc. 5 * Copyright (C) 2015 Arrikto, Inc. 6 * Copyright (C) 2017 Chinamobile, Inc. 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/module.h> 11 #include <linux/kernel.h> 12 #include <linux/timer.h> 13 #include <linux/parser.h> 14 #include <linux/vmalloc.h> 15 #include <linux/uio_driver.h> 16 #include <linux/xarray.h> 17 #include <linux/stringify.h> 18 #include <linux/bitops.h> 19 #include <linux/highmem.h> 20 #include <linux/configfs.h> 21 #include <linux/mutex.h> 22 #include <linux/workqueue.h> 23 #include <net/genetlink.h> 24 #include <scsi/scsi_common.h> 25 #include <scsi/scsi_proto.h> 26 #include <target/target_core_base.h> 27 #include <target/target_core_fabric.h> 28 #include <target/target_core_backend.h> 29 30 #include <linux/target_core_user.h> 31 32 /** 33 * DOC: Userspace I/O 34 * Userspace I/O 35 * ------------- 36 * 37 * Define a shared-memory interface for LIO to pass SCSI commands and 38 * data to userspace for processing. This is to allow backends that 39 * are too complex for in-kernel support to be possible. 40 * 41 * It uses the UIO framework to do a lot of the device-creation and 42 * introspection work for us. 43 * 44 * See the .h file for how the ring is laid out. Note that while the 45 * command ring is defined, the particulars of the data area are 46 * not. Offset values in the command entry point to other locations 47 * internal to the mmap-ed area. There is separate space outside the 48 * command ring for data buffers. This leaves maximum flexibility for 49 * moving buffer allocations, or even page flipping or other 50 * allocation techniques, without altering the command ring layout. 51 * 52 * SECURITY: 53 * The user process must be assumed to be malicious. There's no way to 54 * prevent it breaking the command ring protocol if it wants, but in 55 * order to prevent other issues we must only ever read *data* from 56 * the shared memory area, not offsets or sizes. This applies to 57 * command ring entries as well as the mailbox. Extra code needed for 58 * this may have a 'UAM' comment. 59 */ 60 61 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 62 63 /* For mailbox plus cmd ring, the size is fixed 8MB */ 64 #define MB_CMDR_SIZE_DEF (8 * 1024 * 1024) 65 /* Offset of cmd ring is size of mailbox */ 66 #define CMDR_OFF ((__u32)sizeof(struct tcmu_mailbox)) 67 #define CMDR_SIZE_DEF (MB_CMDR_SIZE_DEF - CMDR_OFF) 68 69 /* 70 * For data area, the default block size is PAGE_SIZE and 71 * the default total size is 256K * PAGE_SIZE. 72 */ 73 #define DATA_PAGES_PER_BLK_DEF 1 74 #define DATA_AREA_PAGES_DEF (256 * 1024) 75 76 #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) 77 #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) 78 79 /* 80 * Default number of global data blocks(512K * PAGE_SIZE) 81 * when the unmap thread will be started. 82 */ 83 #define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024) 84 85 static u8 tcmu_kern_cmd_reply_supported; 86 static u8 tcmu_netlink_blocked; 87 88 static struct device *tcmu_root_device; 89 90 struct tcmu_hba { 91 u32 host_id; 92 }; 93 94 #define TCMU_CONFIG_LEN 256 95 96 static DEFINE_MUTEX(tcmu_nl_cmd_mutex); 97 static LIST_HEAD(tcmu_nl_cmd_list); 98 99 struct tcmu_dev; 100 101 struct tcmu_nl_cmd { 102 /* wake up thread waiting for reply */ 103 struct completion complete; 104 struct list_head nl_list; 105 struct tcmu_dev *udev; 106 int cmd; 107 int status; 108 }; 109 110 struct tcmu_dev { 111 struct list_head node; 112 struct kref kref; 113 114 struct se_device se_dev; 115 struct se_dev_plug se_plug; 116 117 char *name; 118 struct se_hba *hba; 119 120 #define TCMU_DEV_BIT_OPEN 0 121 #define TCMU_DEV_BIT_BROKEN 1 122 #define TCMU_DEV_BIT_BLOCKED 2 123 #define TCMU_DEV_BIT_TMR_NOTIFY 3 124 #define TCMU_DEV_BIT_PLUGGED 4 125 unsigned long flags; 126 127 struct uio_info uio_info; 128 129 struct inode *inode; 130 131 uint64_t dev_size; 132 133 struct tcmu_mailbox *mb_addr; 134 void *cmdr; 135 u32 cmdr_size; 136 u32 cmdr_last_cleaned; 137 /* Offset of data area from start of mb */ 138 /* Must add data_off and mb_addr to get the address */ 139 size_t data_off; 140 int data_area_mb; 141 uint32_t max_blocks; 142 size_t mmap_pages; 143 144 struct mutex cmdr_lock; 145 struct list_head qfull_queue; 146 struct list_head tmr_queue; 147 148 uint32_t dbi_max; 149 uint32_t dbi_thresh; 150 unsigned long *data_bitmap; 151 struct xarray data_pages; 152 uint32_t data_pages_per_blk; 153 uint32_t data_blk_size; 154 155 struct xarray commands; 156 157 struct timer_list cmd_timer; 158 unsigned int cmd_time_out; 159 struct list_head inflight_queue; 160 161 struct timer_list qfull_timer; 162 int qfull_time_out; 163 164 struct list_head timedout_entry; 165 166 struct tcmu_nl_cmd curr_nl_cmd; 167 168 char dev_config[TCMU_CONFIG_LEN]; 169 170 int nl_reply_supported; 171 }; 172 173 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 174 175 struct tcmu_cmd { 176 struct se_cmd *se_cmd; 177 struct tcmu_dev *tcmu_dev; 178 struct list_head queue_entry; 179 180 uint16_t cmd_id; 181 182 /* Can't use se_cmd when cleaning up expired cmds, because if 183 cmd has been completed then accessing se_cmd is off limits */ 184 uint32_t dbi_cnt; 185 uint32_t dbi_bidi_cnt; 186 uint32_t dbi_cur; 187 uint32_t *dbi; 188 189 uint32_t data_len_bidi; 190 191 unsigned long deadline; 192 193 #define TCMU_CMD_BIT_EXPIRED 0 194 #define TCMU_CMD_BIT_KEEP_BUF 1 195 unsigned long flags; 196 }; 197 198 struct tcmu_tmr { 199 struct list_head queue_entry; 200 201 uint8_t tmr_type; 202 uint32_t tmr_cmd_cnt; 203 int16_t tmr_cmd_ids[]; 204 }; 205 206 /* 207 * To avoid dead lock the mutex lock order should always be: 208 * 209 * mutex_lock(&root_udev_mutex); 210 * ... 211 * mutex_lock(&tcmu_dev->cmdr_lock); 212 * mutex_unlock(&tcmu_dev->cmdr_lock); 213 * ... 214 * mutex_unlock(&root_udev_mutex); 215 */ 216 static DEFINE_MUTEX(root_udev_mutex); 217 static LIST_HEAD(root_udev); 218 219 static DEFINE_SPINLOCK(timed_out_udevs_lock); 220 static LIST_HEAD(timed_out_udevs); 221 222 static struct kmem_cache *tcmu_cmd_cache; 223 224 static atomic_t global_page_count = ATOMIC_INIT(0); 225 static struct delayed_work tcmu_unmap_work; 226 static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF; 227 228 static int tcmu_set_global_max_data_area(const char *str, 229 const struct kernel_param *kp) 230 { 231 int ret, max_area_mb; 232 233 ret = kstrtoint(str, 10, &max_area_mb); 234 if (ret) 235 return -EINVAL; 236 237 if (max_area_mb <= 0) { 238 pr_err("global_max_data_area must be larger than 0.\n"); 239 return -EINVAL; 240 } 241 242 tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb); 243 if (atomic_read(&global_page_count) > tcmu_global_max_pages) 244 schedule_delayed_work(&tcmu_unmap_work, 0); 245 else 246 cancel_delayed_work_sync(&tcmu_unmap_work); 247 248 return 0; 249 } 250 251 static int tcmu_get_global_max_data_area(char *buffer, 252 const struct kernel_param *kp) 253 { 254 return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); 255 } 256 257 static const struct kernel_param_ops tcmu_global_max_data_area_op = { 258 .set = tcmu_set_global_max_data_area, 259 .get = tcmu_get_global_max_data_area, 260 }; 261 262 module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 263 S_IWUSR | S_IRUGO); 264 MODULE_PARM_DESC(global_max_data_area_mb, 265 "Max MBs allowed to be allocated to all the tcmu device's " 266 "data areas."); 267 268 static int tcmu_get_block_netlink(char *buffer, 269 const struct kernel_param *kp) 270 { 271 return sprintf(buffer, "%s\n", tcmu_netlink_blocked ? 272 "blocked" : "unblocked"); 273 } 274 275 static int tcmu_set_block_netlink(const char *str, 276 const struct kernel_param *kp) 277 { 278 int ret; 279 u8 val; 280 281 ret = kstrtou8(str, 0, &val); 282 if (ret < 0) 283 return ret; 284 285 if (val > 1) { 286 pr_err("Invalid block netlink value %u\n", val); 287 return -EINVAL; 288 } 289 290 tcmu_netlink_blocked = val; 291 return 0; 292 } 293 294 static const struct kernel_param_ops tcmu_block_netlink_op = { 295 .set = tcmu_set_block_netlink, 296 .get = tcmu_get_block_netlink, 297 }; 298 299 module_param_cb(block_netlink, &tcmu_block_netlink_op, NULL, S_IWUSR | S_IRUGO); 300 MODULE_PARM_DESC(block_netlink, "Block new netlink commands."); 301 302 static int tcmu_fail_netlink_cmd(struct tcmu_nl_cmd *nl_cmd) 303 { 304 struct tcmu_dev *udev = nl_cmd->udev; 305 306 if (!tcmu_netlink_blocked) { 307 pr_err("Could not reset device's netlink interface. Netlink is not blocked.\n"); 308 return -EBUSY; 309 } 310 311 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 312 pr_debug("Aborting nl cmd %d on %s\n", nl_cmd->cmd, udev->name); 313 nl_cmd->status = -EINTR; 314 list_del(&nl_cmd->nl_list); 315 complete(&nl_cmd->complete); 316 } 317 return 0; 318 } 319 320 static int tcmu_set_reset_netlink(const char *str, 321 const struct kernel_param *kp) 322 { 323 struct tcmu_nl_cmd *nl_cmd, *tmp_cmd; 324 int ret; 325 u8 val; 326 327 ret = kstrtou8(str, 0, &val); 328 if (ret < 0) 329 return ret; 330 331 if (val != 1) { 332 pr_err("Invalid reset netlink value %u\n", val); 333 return -EINVAL; 334 } 335 336 mutex_lock(&tcmu_nl_cmd_mutex); 337 list_for_each_entry_safe(nl_cmd, tmp_cmd, &tcmu_nl_cmd_list, nl_list) { 338 ret = tcmu_fail_netlink_cmd(nl_cmd); 339 if (ret) 340 break; 341 } 342 mutex_unlock(&tcmu_nl_cmd_mutex); 343 344 return ret; 345 } 346 347 static const struct kernel_param_ops tcmu_reset_netlink_op = { 348 .set = tcmu_set_reset_netlink, 349 }; 350 351 module_param_cb(reset_netlink, &tcmu_reset_netlink_op, NULL, S_IWUSR); 352 MODULE_PARM_DESC(reset_netlink, "Reset netlink commands."); 353 354 /* multicast group */ 355 enum tcmu_multicast_groups { 356 TCMU_MCGRP_CONFIG, 357 }; 358 359 static const struct genl_multicast_group tcmu_mcgrps[] = { 360 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 361 }; 362 363 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 364 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 365 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 366 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 367 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 368 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 369 }; 370 371 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 372 { 373 struct tcmu_dev *udev = NULL; 374 struct tcmu_nl_cmd *nl_cmd; 375 int dev_id, rc, ret = 0; 376 377 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 378 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 379 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 380 return -EINVAL; 381 } 382 383 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 384 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 385 386 mutex_lock(&tcmu_nl_cmd_mutex); 387 list_for_each_entry(nl_cmd, &tcmu_nl_cmd_list, nl_list) { 388 if (nl_cmd->udev->se_dev.dev_index == dev_id) { 389 udev = nl_cmd->udev; 390 break; 391 } 392 } 393 394 if (!udev) { 395 pr_err("tcmu nl cmd %u/%d completion could not find device with dev id %u.\n", 396 completed_cmd, rc, dev_id); 397 ret = -ENODEV; 398 goto unlock; 399 } 400 list_del(&nl_cmd->nl_list); 401 402 pr_debug("%s genl cmd done got id %d curr %d done %d rc %d stat %d\n", 403 udev->name, dev_id, nl_cmd->cmd, completed_cmd, rc, 404 nl_cmd->status); 405 406 if (nl_cmd->cmd != completed_cmd) { 407 pr_err("Mismatched commands on %s (Expecting reply for %d. Current %d).\n", 408 udev->name, completed_cmd, nl_cmd->cmd); 409 ret = -EINVAL; 410 goto unlock; 411 } 412 413 nl_cmd->status = rc; 414 complete(&nl_cmd->complete); 415 unlock: 416 mutex_unlock(&tcmu_nl_cmd_mutex); 417 return ret; 418 } 419 420 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 421 { 422 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 423 } 424 425 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 426 { 427 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 428 } 429 430 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 431 struct genl_info *info) 432 { 433 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 434 } 435 436 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 437 { 438 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 439 tcmu_kern_cmd_reply_supported = 440 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 441 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 442 tcmu_kern_cmd_reply_supported); 443 } 444 445 return 0; 446 } 447 448 static const struct genl_small_ops tcmu_genl_ops[] = { 449 { 450 .cmd = TCMU_CMD_SET_FEATURES, 451 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 452 .flags = GENL_ADMIN_PERM, 453 .doit = tcmu_genl_set_features, 454 }, 455 { 456 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 457 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 458 .flags = GENL_ADMIN_PERM, 459 .doit = tcmu_genl_add_dev_done, 460 }, 461 { 462 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 463 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 464 .flags = GENL_ADMIN_PERM, 465 .doit = tcmu_genl_rm_dev_done, 466 }, 467 { 468 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 469 .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, 470 .flags = GENL_ADMIN_PERM, 471 .doit = tcmu_genl_reconfig_dev_done, 472 }, 473 }; 474 475 /* Our generic netlink family */ 476 static struct genl_family tcmu_genl_family __ro_after_init = { 477 .module = THIS_MODULE, 478 .hdrsize = 0, 479 .name = "TCM-USER", 480 .version = 2, 481 .maxattr = TCMU_ATTR_MAX, 482 .policy = tcmu_attr_policy, 483 .mcgrps = tcmu_mcgrps, 484 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 485 .netnsok = true, 486 .small_ops = tcmu_genl_ops, 487 .n_small_ops = ARRAY_SIZE(tcmu_genl_ops), 488 }; 489 490 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 491 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 492 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 493 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 494 495 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 496 { 497 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 498 uint32_t i; 499 500 for (i = 0; i < len; i++) 501 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 502 } 503 504 static inline int tcmu_get_empty_block(struct tcmu_dev *udev, 505 struct tcmu_cmd *tcmu_cmd, 506 int prev_dbi, int length, int *iov_cnt) 507 { 508 XA_STATE(xas, &udev->data_pages, 0); 509 struct page *page; 510 int i, cnt, dbi, dpi; 511 int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE); 512 513 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 514 if (dbi == udev->dbi_thresh) 515 return -1; 516 517 dpi = dbi * udev->data_pages_per_blk; 518 /* Count the number of already allocated pages */ 519 xas_set(&xas, dpi); 520 rcu_read_lock(); 521 for (cnt = 0; xas_next(&xas) && cnt < page_cnt;) 522 cnt++; 523 rcu_read_unlock(); 524 525 for (i = cnt; i < page_cnt; i++) { 526 /* try to get new zeroed page from the mm */ 527 page = alloc_page(GFP_NOIO | __GFP_ZERO); 528 if (!page) 529 break; 530 531 if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) { 532 __free_page(page); 533 break; 534 } 535 } 536 if (atomic_add_return(i - cnt, &global_page_count) > 537 tcmu_global_max_pages) 538 schedule_delayed_work(&tcmu_unmap_work, 0); 539 540 if (i && dbi > udev->dbi_max) 541 udev->dbi_max = dbi; 542 543 set_bit(dbi, udev->data_bitmap); 544 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 545 546 if (dbi != prev_dbi + 1) 547 *iov_cnt += 1; 548 549 return i == page_cnt ? dbi : -1; 550 } 551 552 static int tcmu_get_empty_blocks(struct tcmu_dev *udev, 553 struct tcmu_cmd *tcmu_cmd, int length) 554 { 555 /* start value of dbi + 1 must not be a valid dbi */ 556 int dbi = -2; 557 int blk_data_len, iov_cnt = 0; 558 uint32_t blk_size = udev->data_blk_size; 559 560 for (; length > 0; length -= blk_size) { 561 blk_data_len = min_t(uint32_t, length, blk_size); 562 dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, 563 &iov_cnt); 564 if (dbi < 0) 565 return -1; 566 } 567 return iov_cnt; 568 } 569 570 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 571 { 572 kfree(tcmu_cmd->dbi); 573 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 574 } 575 576 static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd) 577 { 578 int i, len; 579 struct se_cmd *se_cmd = cmd->se_cmd; 580 uint32_t blk_size = cmd->tcmu_dev->data_blk_size; 581 582 cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size); 583 584 if (se_cmd->se_cmd_flags & SCF_BIDI) { 585 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 586 for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) 587 len += se_cmd->t_bidi_data_sg[i].length; 588 cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size); 589 cmd->dbi_cnt += cmd->dbi_bidi_cnt; 590 cmd->data_len_bidi = len; 591 } 592 } 593 594 static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 595 struct iovec **iov, int prev_dbi, int len) 596 { 597 /* Get the next dbi */ 598 int dbi = tcmu_cmd_get_dbi(cmd); 599 600 /* Do not add more than udev->data_blk_size to iov */ 601 len = min_t(int, len, udev->data_blk_size); 602 603 /* 604 * The following code will gather and map the blocks to the same iovec 605 * when the blocks are all next to each other. 606 */ 607 if (dbi != prev_dbi + 1) { 608 /* dbi is not next to previous dbi, so start new iov */ 609 if (prev_dbi >= 0) 610 (*iov)++; 611 /* write offset relative to mb_addr */ 612 (*iov)->iov_base = (void __user *) 613 (udev->data_off + dbi * udev->data_blk_size); 614 } 615 (*iov)->iov_len += len; 616 617 return dbi; 618 } 619 620 static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 621 struct iovec **iov, int data_length) 622 { 623 /* start value of dbi + 1 must not be a valid dbi */ 624 int dbi = -2; 625 626 /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */ 627 for (; data_length > 0; data_length -= udev->data_blk_size) 628 dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); 629 } 630 631 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 632 { 633 struct se_device *se_dev = se_cmd->se_dev; 634 struct tcmu_dev *udev = TCMU_DEV(se_dev); 635 struct tcmu_cmd *tcmu_cmd; 636 637 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); 638 if (!tcmu_cmd) 639 return NULL; 640 641 INIT_LIST_HEAD(&tcmu_cmd->queue_entry); 642 tcmu_cmd->se_cmd = se_cmd; 643 tcmu_cmd->tcmu_dev = udev; 644 645 tcmu_cmd_set_block_cnts(tcmu_cmd); 646 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 647 GFP_NOIO); 648 if (!tcmu_cmd->dbi) { 649 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 650 return NULL; 651 } 652 653 return tcmu_cmd; 654 } 655 656 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 657 { 658 unsigned long offset = offset_in_page(vaddr); 659 void *start = vaddr - offset; 660 661 size = round_up(size+offset, PAGE_SIZE); 662 663 while (size) { 664 flush_dcache_page(vmalloc_to_page(start)); 665 start += PAGE_SIZE; 666 size -= PAGE_SIZE; 667 } 668 } 669 670 /* 671 * Some ring helper functions. We don't assume size is a power of 2 so 672 * we can't use circ_buf.h. 673 */ 674 static inline size_t spc_used(size_t head, size_t tail, size_t size) 675 { 676 int diff = head - tail; 677 678 if (diff >= 0) 679 return diff; 680 else 681 return size + diff; 682 } 683 684 static inline size_t spc_free(size_t head, size_t tail, size_t size) 685 { 686 /* Keep 1 byte unused or we can't tell full from empty */ 687 return (size - spc_used(head, tail, size) - 1); 688 } 689 690 static inline size_t head_to_end(size_t head, size_t size) 691 { 692 return size - head; 693 } 694 695 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 696 697 #define TCMU_SG_TO_DATA_AREA 1 698 #define TCMU_DATA_AREA_TO_SG 2 699 700 static inline void tcmu_copy_data(struct tcmu_dev *udev, 701 struct tcmu_cmd *tcmu_cmd, uint32_t direction, 702 struct scatterlist *sg, unsigned int sg_nents, 703 struct iovec **iov, size_t data_len) 704 { 705 /* start value of dbi + 1 must not be a valid dbi */ 706 int dbi = -2; 707 size_t page_remaining, cp_len; 708 int page_cnt, page_inx, dpi; 709 struct sg_mapping_iter sg_iter; 710 unsigned int sg_flags; 711 struct page *page; 712 void *data_page_start, *data_addr; 713 714 if (direction == TCMU_SG_TO_DATA_AREA) 715 sg_flags = SG_MITER_ATOMIC | SG_MITER_FROM_SG; 716 else 717 sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 718 sg_miter_start(&sg_iter, sg, sg_nents, sg_flags); 719 720 while (data_len) { 721 if (direction == TCMU_SG_TO_DATA_AREA) 722 dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi, 723 data_len); 724 else 725 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 726 727 page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE); 728 if (page_cnt > udev->data_pages_per_blk) 729 page_cnt = udev->data_pages_per_blk; 730 731 dpi = dbi * udev->data_pages_per_blk; 732 for (page_inx = 0; page_inx < page_cnt && data_len; 733 page_inx++, dpi++) { 734 page = xa_load(&udev->data_pages, dpi); 735 736 if (direction == TCMU_DATA_AREA_TO_SG) 737 flush_dcache_page(page); 738 data_page_start = kmap_atomic(page); 739 page_remaining = PAGE_SIZE; 740 741 while (page_remaining && data_len) { 742 if (!sg_miter_next(&sg_iter)) { 743 /* set length to 0 to abort outer loop */ 744 data_len = 0; 745 pr_debug("%s: aborting data copy due to exhausted sg_list\n", 746 __func__); 747 break; 748 } 749 cp_len = min3(sg_iter.length, page_remaining, 750 data_len); 751 752 data_addr = data_page_start + 753 PAGE_SIZE - page_remaining; 754 if (direction == TCMU_SG_TO_DATA_AREA) 755 memcpy(data_addr, sg_iter.addr, cp_len); 756 else 757 memcpy(sg_iter.addr, data_addr, cp_len); 758 759 data_len -= cp_len; 760 page_remaining -= cp_len; 761 sg_iter.consumed = cp_len; 762 } 763 sg_miter_stop(&sg_iter); 764 765 kunmap_atomic(data_page_start); 766 if (direction == TCMU_SG_TO_DATA_AREA) 767 flush_dcache_page(page); 768 } 769 } 770 } 771 772 static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 773 struct iovec **iov) 774 { 775 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 776 777 tcmu_copy_data(udev, tcmu_cmd, TCMU_SG_TO_DATA_AREA, se_cmd->t_data_sg, 778 se_cmd->t_data_nents, iov, se_cmd->data_length); 779 } 780 781 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, 782 bool bidi, uint32_t read_len) 783 { 784 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 785 struct scatterlist *data_sg; 786 unsigned int data_nents; 787 788 if (!bidi) { 789 data_sg = se_cmd->t_data_sg; 790 data_nents = se_cmd->t_data_nents; 791 } else { 792 /* 793 * For bidi case, the first count blocks are for Data-Out 794 * buffer blocks, and before gathering the Data-In buffer 795 * the Data-Out buffer blocks should be skipped. 796 */ 797 tcmu_cmd_set_dbi_cur(tcmu_cmd, 798 tcmu_cmd->dbi_cnt - tcmu_cmd->dbi_bidi_cnt); 799 800 data_sg = se_cmd->t_bidi_data_sg; 801 data_nents = se_cmd->t_bidi_data_nents; 802 } 803 804 tcmu_copy_data(udev, tcmu_cmd, TCMU_DATA_AREA_TO_SG, data_sg, 805 data_nents, NULL, read_len); 806 } 807 808 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 809 { 810 return thresh - bitmap_weight(bitmap, thresh); 811 } 812 813 /* 814 * We can't queue a command until we have space available on the cmd ring. 815 * 816 * Called with ring lock held. 817 */ 818 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) 819 { 820 struct tcmu_mailbox *mb = udev->mb_addr; 821 size_t space, cmd_needed; 822 u32 cmd_head; 823 824 tcmu_flush_dcache_range(mb, sizeof(*mb)); 825 826 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 827 828 /* 829 * If cmd end-of-ring space is too small then we need space for a NOP plus 830 * original cmd - cmds are internally contiguous. 831 */ 832 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 833 cmd_needed = cmd_size; 834 else 835 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 836 837 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 838 if (space < cmd_needed) { 839 pr_debug("no cmd space: %u %u %u\n", cmd_head, 840 udev->cmdr_last_cleaned, udev->cmdr_size); 841 return false; 842 } 843 return true; 844 } 845 846 /* 847 * We have to allocate data buffers before we can queue a command. 848 * Returns -1 on error (not enough space) or number of needed iovs on success 849 * 850 * Called with ring lock held. 851 */ 852 static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 853 int *iov_bidi_cnt) 854 { 855 int space, iov_cnt = 0, ret = 0; 856 857 if (!cmd->dbi_cnt) 858 goto wr_iov_cnts; 859 860 /* try to check and get the data blocks as needed */ 861 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 862 if (space < cmd->dbi_cnt) { 863 unsigned long blocks_left = 864 (udev->max_blocks - udev->dbi_thresh) + space; 865 866 if (blocks_left < cmd->dbi_cnt) { 867 pr_debug("no data space: only %lu available, but ask for %u\n", 868 blocks_left * udev->data_blk_size, 869 cmd->dbi_cnt * udev->data_blk_size); 870 return -1; 871 } 872 873 udev->dbi_thresh += cmd->dbi_cnt; 874 if (udev->dbi_thresh > udev->max_blocks) 875 udev->dbi_thresh = udev->max_blocks; 876 } 877 878 iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); 879 if (iov_cnt < 0) 880 return -1; 881 882 if (cmd->dbi_bidi_cnt) { 883 ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); 884 if (ret < 0) 885 return -1; 886 } 887 wr_iov_cnts: 888 *iov_bidi_cnt = ret; 889 return iov_cnt + ret; 890 } 891 892 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 893 { 894 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 895 sizeof(struct tcmu_cmd_entry)); 896 } 897 898 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 899 size_t base_command_size) 900 { 901 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 902 size_t command_size; 903 904 command_size = base_command_size + 905 round_up(scsi_command_size(se_cmd->t_task_cdb), 906 TCMU_OP_ALIGN_SIZE); 907 908 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 909 910 return command_size; 911 } 912 913 static void tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 914 struct timer_list *timer) 915 { 916 if (!tmo) 917 return; 918 919 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 920 if (!timer_pending(timer)) 921 mod_timer(timer, tcmu_cmd->deadline); 922 923 pr_debug("Timeout set up for cmd %p, dev = %s, tmo = %lu\n", tcmu_cmd, 924 tcmu_cmd->tcmu_dev->name, tmo / MSEC_PER_SEC); 925 } 926 927 static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd) 928 { 929 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 930 unsigned int tmo; 931 932 /* 933 * For backwards compat if qfull_time_out is not set use 934 * cmd_time_out and if that's not set use the default time out. 935 */ 936 if (!udev->qfull_time_out) 937 return -ETIMEDOUT; 938 else if (udev->qfull_time_out > 0) 939 tmo = udev->qfull_time_out; 940 else if (udev->cmd_time_out) 941 tmo = udev->cmd_time_out; 942 else 943 tmo = TCMU_TIME_OUT; 944 945 tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 946 947 list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue); 948 pr_debug("adding cmd %p on dev %s to ring space wait queue\n", 949 tcmu_cmd, udev->name); 950 return 0; 951 } 952 953 static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) 954 { 955 struct tcmu_cmd_entry_hdr *hdr; 956 struct tcmu_mailbox *mb = udev->mb_addr; 957 uint32_t cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 958 959 /* Insert a PAD if end-of-ring space is too small */ 960 if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { 961 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 962 963 hdr = udev->cmdr + cmd_head; 964 tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); 965 tcmu_hdr_set_len(&hdr->len_op, pad_size); 966 hdr->cmd_id = 0; /* not used for PAD */ 967 hdr->kflags = 0; 968 hdr->uflags = 0; 969 tcmu_flush_dcache_range(hdr, sizeof(*hdr)); 970 971 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 972 tcmu_flush_dcache_range(mb, sizeof(*mb)); 973 974 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 975 WARN_ON(cmd_head != 0); 976 } 977 978 return cmd_head; 979 } 980 981 static void tcmu_unplug_device(struct se_dev_plug *se_plug) 982 { 983 struct se_device *se_dev = se_plug->se_dev; 984 struct tcmu_dev *udev = TCMU_DEV(se_dev); 985 986 clear_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags); 987 uio_event_notify(&udev->uio_info); 988 } 989 990 static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev) 991 { 992 struct tcmu_dev *udev = TCMU_DEV(se_dev); 993 994 if (!test_and_set_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) 995 return &udev->se_plug; 996 997 return NULL; 998 } 999 1000 /** 1001 * queue_cmd_ring - queue cmd to ring or internally 1002 * @tcmu_cmd: cmd to queue 1003 * @scsi_err: TCM error code if failure (-1) returned. 1004 * 1005 * Returns: 1006 * -1 we cannot queue internally or to the ring. 1007 * 0 success 1008 * 1 internally queued to wait for ring memory to free. 1009 */ 1010 static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) 1011 { 1012 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 1013 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 1014 size_t base_command_size, command_size; 1015 struct tcmu_mailbox *mb = udev->mb_addr; 1016 struct tcmu_cmd_entry *entry; 1017 struct iovec *iov; 1018 int iov_cnt, iov_bidi_cnt; 1019 uint32_t cmd_id, cmd_head; 1020 uint64_t cdb_off; 1021 uint32_t blk_size = udev->data_blk_size; 1022 /* size of data buffer needed */ 1023 size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; 1024 1025 *scsi_err = TCM_NO_SENSE; 1026 1027 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 1028 *scsi_err = TCM_LUN_BUSY; 1029 return -1; 1030 } 1031 1032 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1033 *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1034 return -1; 1035 } 1036 1037 if (!list_empty(&udev->qfull_queue)) 1038 goto queue; 1039 1040 if (data_length > (size_t)udev->max_blocks * blk_size) { 1041 pr_warn("TCMU: Request of size %zu is too big for %zu data area\n", 1042 data_length, (size_t)udev->max_blocks * blk_size); 1043 *scsi_err = TCM_INVALID_CDB_FIELD; 1044 return -1; 1045 } 1046 1047 iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); 1048 if (iov_cnt < 0) 1049 goto free_and_queue; 1050 1051 /* 1052 * Must be a certain minimum size for response sense info, but 1053 * also may be larger if the iov array is large. 1054 */ 1055 base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt); 1056 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 1057 1058 if (command_size > (udev->cmdr_size / 2)) { 1059 pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n", 1060 command_size, udev->cmdr_size); 1061 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1062 *scsi_err = TCM_INVALID_CDB_FIELD; 1063 return -1; 1064 } 1065 1066 if (!is_ring_space_avail(udev, command_size)) 1067 /* 1068 * Don't leave commands partially setup because the unmap 1069 * thread might need the blocks to make forward progress. 1070 */ 1071 goto free_and_queue; 1072 1073 if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), 1074 GFP_NOWAIT) < 0) { 1075 pr_err("tcmu: Could not allocate cmd id.\n"); 1076 1077 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 1078 *scsi_err = TCM_OUT_OF_RESOURCES; 1079 return -1; 1080 } 1081 tcmu_cmd->cmd_id = cmd_id; 1082 1083 pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id, 1084 tcmu_cmd, udev->name); 1085 1086 cmd_head = ring_insert_padding(udev, command_size); 1087 1088 entry = udev->cmdr + cmd_head; 1089 memset(entry, 0, command_size); 1090 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 1091 1092 /* prepare iov list and copy data to data area if necessary */ 1093 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1094 iov = &entry->req.iov[0]; 1095 1096 if (se_cmd->data_direction == DMA_TO_DEVICE || 1097 se_cmd->se_cmd_flags & SCF_BIDI) 1098 scatter_data_area(udev, tcmu_cmd, &iov); 1099 else 1100 tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); 1101 1102 entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; 1103 1104 /* Handle BIDI commands */ 1105 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1106 iov++; 1107 tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); 1108 entry->req.iov_bidi_cnt = iov_bidi_cnt; 1109 } 1110 1111 tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); 1112 1113 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 1114 1115 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 1116 1117 /* All offsets relative to mb_addr, not start of entry! */ 1118 cdb_off = CMDR_OFF + cmd_head + base_command_size; 1119 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 1120 entry->req.cdb_off = cdb_off; 1121 tcmu_flush_dcache_range(entry, command_size); 1122 1123 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 1124 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1125 1126 list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); 1127 1128 if (!test_bit(TCMU_DEV_BIT_PLUGGED, &udev->flags)) 1129 uio_event_notify(&udev->uio_info); 1130 1131 return 0; 1132 1133 free_and_queue: 1134 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 1135 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 1136 1137 queue: 1138 if (add_to_qfull_queue(tcmu_cmd)) { 1139 *scsi_err = TCM_OUT_OF_RESOURCES; 1140 return -1; 1141 } 1142 1143 return 1; 1144 } 1145 1146 /** 1147 * queue_tmr_ring - queue tmr info to ring or internally 1148 * @udev: related tcmu_dev 1149 * @tmr: tcmu_tmr containing tmr info to queue 1150 * 1151 * Returns: 1152 * 0 success 1153 * 1 internally queued to wait for ring memory to free. 1154 */ 1155 static int 1156 queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) 1157 { 1158 struct tcmu_tmr_entry *entry; 1159 int cmd_size; 1160 int id_list_sz; 1161 struct tcmu_mailbox *mb = udev->mb_addr; 1162 uint32_t cmd_head; 1163 1164 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 1165 goto out_free; 1166 1167 id_list_sz = sizeof(tmr->tmr_cmd_ids[0]) * tmr->tmr_cmd_cnt; 1168 cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE); 1169 1170 if (!list_empty(&udev->tmr_queue) || 1171 !is_ring_space_avail(udev, cmd_size)) { 1172 list_add_tail(&tmr->queue_entry, &udev->tmr_queue); 1173 pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n", 1174 tmr, udev->name); 1175 return 1; 1176 } 1177 1178 cmd_head = ring_insert_padding(udev, cmd_size); 1179 1180 entry = udev->cmdr + cmd_head; 1181 memset(entry, 0, cmd_size); 1182 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); 1183 tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); 1184 entry->tmr_type = tmr->tmr_type; 1185 entry->cmd_cnt = tmr->tmr_cmd_cnt; 1186 memcpy(&entry->cmd_ids[0], &tmr->tmr_cmd_ids[0], id_list_sz); 1187 tcmu_flush_dcache_range(entry, cmd_size); 1188 1189 UPDATE_HEAD(mb->cmd_head, cmd_size, udev->cmdr_size); 1190 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1191 1192 uio_event_notify(&udev->uio_info); 1193 1194 out_free: 1195 kfree(tmr); 1196 1197 return 0; 1198 } 1199 1200 static sense_reason_t 1201 tcmu_queue_cmd(struct se_cmd *se_cmd) 1202 { 1203 struct se_device *se_dev = se_cmd->se_dev; 1204 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1205 struct tcmu_cmd *tcmu_cmd; 1206 sense_reason_t scsi_ret = TCM_CHECK_CONDITION_ABORT_CMD; 1207 int ret = -1; 1208 1209 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 1210 if (!tcmu_cmd) 1211 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1212 1213 mutex_lock(&udev->cmdr_lock); 1214 if (!(se_cmd->transport_state & CMD_T_ABORTED)) 1215 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1216 if (ret < 0) 1217 tcmu_free_cmd(tcmu_cmd); 1218 else 1219 se_cmd->priv = tcmu_cmd; 1220 mutex_unlock(&udev->cmdr_lock); 1221 return scsi_ret; 1222 } 1223 1224 static void tcmu_set_next_deadline(struct list_head *queue, 1225 struct timer_list *timer) 1226 { 1227 struct tcmu_cmd *cmd; 1228 1229 if (!list_empty(queue)) { 1230 cmd = list_first_entry(queue, struct tcmu_cmd, queue_entry); 1231 mod_timer(timer, cmd->deadline); 1232 } else 1233 del_timer(timer); 1234 } 1235 1236 static int 1237 tcmu_tmr_type(enum tcm_tmreq_table tmf) 1238 { 1239 switch (tmf) { 1240 case TMR_ABORT_TASK: return TCMU_TMR_ABORT_TASK; 1241 case TMR_ABORT_TASK_SET: return TCMU_TMR_ABORT_TASK_SET; 1242 case TMR_CLEAR_ACA: return TCMU_TMR_CLEAR_ACA; 1243 case TMR_CLEAR_TASK_SET: return TCMU_TMR_CLEAR_TASK_SET; 1244 case TMR_LUN_RESET: return TCMU_TMR_LUN_RESET; 1245 case TMR_TARGET_WARM_RESET: return TCMU_TMR_TARGET_WARM_RESET; 1246 case TMR_TARGET_COLD_RESET: return TCMU_TMR_TARGET_COLD_RESET; 1247 case TMR_LUN_RESET_PRO: return TCMU_TMR_LUN_RESET_PRO; 1248 default: return TCMU_TMR_UNKNOWN; 1249 } 1250 } 1251 1252 static void 1253 tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, 1254 struct list_head *cmd_list) 1255 { 1256 int i = 0, cmd_cnt = 0; 1257 bool unqueued = false; 1258 struct tcmu_cmd *cmd; 1259 struct se_cmd *se_cmd; 1260 struct tcmu_tmr *tmr; 1261 struct tcmu_dev *udev = TCMU_DEV(se_dev); 1262 1263 mutex_lock(&udev->cmdr_lock); 1264 1265 /* First we check for aborted commands in qfull_queue */ 1266 list_for_each_entry(se_cmd, cmd_list, state_list) { 1267 i++; 1268 if (!se_cmd->priv) 1269 continue; 1270 cmd = se_cmd->priv; 1271 /* Commands on qfull queue have no id yet */ 1272 if (cmd->cmd_id) { 1273 cmd_cnt++; 1274 continue; 1275 } 1276 pr_debug("Removing aborted command %p from queue on dev %s.\n", 1277 cmd, udev->name); 1278 1279 list_del_init(&cmd->queue_entry); 1280 tcmu_free_cmd(cmd); 1281 se_cmd->priv = NULL; 1282 target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); 1283 unqueued = true; 1284 } 1285 if (unqueued) 1286 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1287 1288 if (!test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)) 1289 goto unlock; 1290 1291 pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", 1292 tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); 1293 1294 tmr = kmalloc(struct_size(tmr, tmr_cmd_ids, cmd_cnt), GFP_NOIO); 1295 if (!tmr) 1296 goto unlock; 1297 1298 tmr->tmr_type = tcmu_tmr_type(tmf); 1299 tmr->tmr_cmd_cnt = cmd_cnt; 1300 1301 if (cmd_cnt != 0) { 1302 cmd_cnt = 0; 1303 list_for_each_entry(se_cmd, cmd_list, state_list) { 1304 if (!se_cmd->priv) 1305 continue; 1306 cmd = se_cmd->priv; 1307 if (cmd->cmd_id) 1308 tmr->tmr_cmd_ids[cmd_cnt++] = cmd->cmd_id; 1309 } 1310 } 1311 1312 queue_tmr_ring(udev, tmr); 1313 1314 unlock: 1315 mutex_unlock(&udev->cmdr_lock); 1316 } 1317 1318 static bool tcmu_handle_completion(struct tcmu_cmd *cmd, 1319 struct tcmu_cmd_entry *entry, bool keep_buf) 1320 { 1321 struct se_cmd *se_cmd = cmd->se_cmd; 1322 struct tcmu_dev *udev = cmd->tcmu_dev; 1323 bool read_len_valid = false; 1324 bool ret = true; 1325 uint32_t read_len; 1326 1327 /* 1328 * cmd has been completed already from timeout, just reclaim 1329 * data area space and free cmd 1330 */ 1331 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1332 WARN_ON_ONCE(se_cmd); 1333 goto out; 1334 } 1335 if (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { 1336 pr_err("cmd_id %u already completed with KEEP_BUF, ring is broken\n", 1337 entry->hdr.cmd_id); 1338 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1339 ret = false; 1340 goto out; 1341 } 1342 1343 list_del_init(&cmd->queue_entry); 1344 1345 tcmu_cmd_reset_dbi_cur(cmd); 1346 1347 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 1348 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 1349 cmd->se_cmd); 1350 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1351 goto done; 1352 } 1353 1354 read_len = se_cmd->data_length; 1355 if (se_cmd->data_direction == DMA_FROM_DEVICE && 1356 (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) { 1357 read_len_valid = true; 1358 if (entry->rsp.read_len < read_len) 1359 read_len = entry->rsp.read_len; 1360 } 1361 1362 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1363 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1364 if (!read_len_valid ) 1365 goto done; 1366 else 1367 se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; 1368 } 1369 if (se_cmd->se_cmd_flags & SCF_BIDI) { 1370 /* Get Data-In buffer before clean up */ 1371 gather_data_area(udev, cmd, true, read_len); 1372 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1373 gather_data_area(udev, cmd, false, read_len); 1374 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1375 /* TODO: */ 1376 } else if (se_cmd->data_direction != DMA_NONE) { 1377 pr_warn("TCMU: data direction was %d!\n", 1378 se_cmd->data_direction); 1379 } 1380 1381 done: 1382 se_cmd->priv = NULL; 1383 if (read_len_valid) { 1384 pr_debug("read_len = %d\n", read_len); 1385 target_complete_cmd_with_length(cmd->se_cmd, 1386 entry->rsp.scsi_status, read_len); 1387 } else 1388 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 1389 1390 out: 1391 if (!keep_buf) { 1392 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1393 tcmu_free_cmd(cmd); 1394 } else { 1395 /* 1396 * Keep this command after completion, since userspace still 1397 * needs the data buffer. Mark it with TCMU_CMD_BIT_KEEP_BUF 1398 * and reset potential TCMU_CMD_BIT_EXPIRED, so we don't accept 1399 * a second completion later. 1400 * Userspace can free the buffer later by writing the cmd_id 1401 * to new action attribute free_kept_buf. 1402 */ 1403 clear_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1404 set_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags); 1405 } 1406 return ret; 1407 } 1408 1409 static int tcmu_run_tmr_queue(struct tcmu_dev *udev) 1410 { 1411 struct tcmu_tmr *tmr, *tmp; 1412 LIST_HEAD(tmrs); 1413 1414 if (list_empty(&udev->tmr_queue)) 1415 return 1; 1416 1417 pr_debug("running %s's tmr queue\n", udev->name); 1418 1419 list_splice_init(&udev->tmr_queue, &tmrs); 1420 1421 list_for_each_entry_safe(tmr, tmp, &tmrs, queue_entry) { 1422 list_del_init(&tmr->queue_entry); 1423 1424 pr_debug("removing tmr %p on dev %s from queue\n", 1425 tmr, udev->name); 1426 1427 if (queue_tmr_ring(udev, tmr)) { 1428 pr_debug("ran out of space during tmr queue run\n"); 1429 /* 1430 * tmr was requeued, so just put all tmrs back in 1431 * the queue 1432 */ 1433 list_splice_tail(&tmrs, &udev->tmr_queue); 1434 return 0; 1435 } 1436 } 1437 1438 return 1; 1439 } 1440 1441 static bool tcmu_handle_completions(struct tcmu_dev *udev) 1442 { 1443 struct tcmu_mailbox *mb; 1444 struct tcmu_cmd *cmd; 1445 bool free_space = false; 1446 1447 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 1448 pr_err("ring broken, not handling completions\n"); 1449 return false; 1450 } 1451 1452 mb = udev->mb_addr; 1453 tcmu_flush_dcache_range(mb, sizeof(*mb)); 1454 1455 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1456 1457 struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; 1458 bool keep_buf; 1459 1460 /* 1461 * Flush max. up to end of cmd ring since current entry might 1462 * be a padding that is shorter than sizeof(*entry) 1463 */ 1464 size_t ring_left = head_to_end(udev->cmdr_last_cleaned, 1465 udev->cmdr_size); 1466 tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ? 1467 ring_left : sizeof(*entry)); 1468 1469 free_space = true; 1470 1471 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD || 1472 tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_TMR) { 1473 UPDATE_HEAD(udev->cmdr_last_cleaned, 1474 tcmu_hdr_get_len(entry->hdr.len_op), 1475 udev->cmdr_size); 1476 continue; 1477 } 1478 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1479 1480 keep_buf = !!(entry->hdr.uflags & TCMU_UFLAG_KEEP_BUF); 1481 if (keep_buf) 1482 cmd = xa_load(&udev->commands, entry->hdr.cmd_id); 1483 else 1484 cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); 1485 if (!cmd) { 1486 pr_err("cmd_id %u not found, ring is broken\n", 1487 entry->hdr.cmd_id); 1488 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1489 return false; 1490 } 1491 1492 if (!tcmu_handle_completion(cmd, entry, keep_buf)) 1493 break; 1494 1495 UPDATE_HEAD(udev->cmdr_last_cleaned, 1496 tcmu_hdr_get_len(entry->hdr.len_op), 1497 udev->cmdr_size); 1498 } 1499 if (free_space) 1500 free_space = tcmu_run_tmr_queue(udev); 1501 1502 if (atomic_read(&global_page_count) > tcmu_global_max_pages && 1503 xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { 1504 /* 1505 * Allocated blocks exceeded global block limit, currently no 1506 * more pending or waiting commands so try to reclaim blocks. 1507 */ 1508 schedule_delayed_work(&tcmu_unmap_work, 0); 1509 } 1510 if (udev->cmd_time_out) 1511 tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer); 1512 1513 return free_space; 1514 } 1515 1516 static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) 1517 { 1518 struct se_cmd *se_cmd; 1519 1520 if (!time_after_eq(jiffies, cmd->deadline)) 1521 return; 1522 1523 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1524 list_del_init(&cmd->queue_entry); 1525 se_cmd = cmd->se_cmd; 1526 se_cmd->priv = NULL; 1527 cmd->se_cmd = NULL; 1528 1529 pr_debug("Timing out inflight cmd %u on dev %s.\n", 1530 cmd->cmd_id, cmd->tcmu_dev->name); 1531 1532 target_complete_cmd(se_cmd, SAM_STAT_CHECK_CONDITION); 1533 } 1534 1535 static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) 1536 { 1537 struct se_cmd *se_cmd; 1538 1539 if (!time_after_eq(jiffies, cmd->deadline)) 1540 return; 1541 1542 pr_debug("Timing out queued cmd %p on dev %s.\n", 1543 cmd, cmd->tcmu_dev->name); 1544 1545 list_del_init(&cmd->queue_entry); 1546 se_cmd = cmd->se_cmd; 1547 tcmu_free_cmd(cmd); 1548 1549 se_cmd->priv = NULL; 1550 target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); 1551 } 1552 1553 static void tcmu_device_timedout(struct tcmu_dev *udev) 1554 { 1555 spin_lock(&timed_out_udevs_lock); 1556 if (list_empty(&udev->timedout_entry)) 1557 list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1558 spin_unlock(&timed_out_udevs_lock); 1559 1560 schedule_delayed_work(&tcmu_unmap_work, 0); 1561 } 1562 1563 static void tcmu_cmd_timedout(struct timer_list *t) 1564 { 1565 struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 1566 1567 pr_debug("%s cmd timeout has expired\n", udev->name); 1568 tcmu_device_timedout(udev); 1569 } 1570 1571 static void tcmu_qfull_timedout(struct timer_list *t) 1572 { 1573 struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 1574 1575 pr_debug("%s qfull timeout has expired\n", udev->name); 1576 tcmu_device_timedout(udev); 1577 } 1578 1579 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1580 { 1581 struct tcmu_hba *tcmu_hba; 1582 1583 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1584 if (!tcmu_hba) 1585 return -ENOMEM; 1586 1587 tcmu_hba->host_id = host_id; 1588 hba->hba_ptr = tcmu_hba; 1589 1590 return 0; 1591 } 1592 1593 static void tcmu_detach_hba(struct se_hba *hba) 1594 { 1595 kfree(hba->hba_ptr); 1596 hba->hba_ptr = NULL; 1597 } 1598 1599 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1600 { 1601 struct tcmu_dev *udev; 1602 1603 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1604 if (!udev) 1605 return NULL; 1606 kref_init(&udev->kref); 1607 1608 udev->name = kstrdup(name, GFP_KERNEL); 1609 if (!udev->name) { 1610 kfree(udev); 1611 return NULL; 1612 } 1613 1614 udev->hba = hba; 1615 udev->cmd_time_out = TCMU_TIME_OUT; 1616 udev->qfull_time_out = -1; 1617 1618 udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; 1619 udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; 1620 udev->cmdr_size = CMDR_SIZE_DEF; 1621 udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); 1622 1623 mutex_init(&udev->cmdr_lock); 1624 1625 INIT_LIST_HEAD(&udev->node); 1626 INIT_LIST_HEAD(&udev->timedout_entry); 1627 INIT_LIST_HEAD(&udev->qfull_queue); 1628 INIT_LIST_HEAD(&udev->tmr_queue); 1629 INIT_LIST_HEAD(&udev->inflight_queue); 1630 xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); 1631 1632 timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 1633 timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 1634 1635 xa_init(&udev->data_pages); 1636 1637 return &udev->se_dev; 1638 } 1639 1640 static void tcmu_dev_call_rcu(struct rcu_head *p) 1641 { 1642 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1643 struct tcmu_dev *udev = TCMU_DEV(dev); 1644 1645 kfree(udev->uio_info.name); 1646 kfree(udev->name); 1647 kfree(udev); 1648 } 1649 1650 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1651 { 1652 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) || 1653 test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { 1654 kmem_cache_free(tcmu_cmd_cache, cmd); 1655 return 0; 1656 } 1657 return -EINVAL; 1658 } 1659 1660 static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first, 1661 unsigned long last) 1662 { 1663 XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk); 1664 struct page *page; 1665 u32 pages_freed = 0; 1666 1667 xas_lock(&xas); 1668 xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) { 1669 xas_store(&xas, NULL); 1670 __free_page(page); 1671 pages_freed++; 1672 } 1673 xas_unlock(&xas); 1674 1675 atomic_sub(pages_freed, &global_page_count); 1676 1677 return pages_freed; 1678 } 1679 1680 static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) 1681 { 1682 struct tcmu_tmr *tmr, *tmp; 1683 1684 list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) { 1685 list_del_init(&tmr->queue_entry); 1686 kfree(tmr); 1687 } 1688 } 1689 1690 static void tcmu_dev_kref_release(struct kref *kref) 1691 { 1692 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1693 struct se_device *dev = &udev->se_dev; 1694 struct tcmu_cmd *cmd; 1695 bool all_expired = true; 1696 unsigned long i; 1697 1698 vfree(udev->mb_addr); 1699 udev->mb_addr = NULL; 1700 1701 spin_lock_bh(&timed_out_udevs_lock); 1702 if (!list_empty(&udev->timedout_entry)) 1703 list_del(&udev->timedout_entry); 1704 spin_unlock_bh(&timed_out_udevs_lock); 1705 1706 /* Upper layer should drain all requests before calling this */ 1707 mutex_lock(&udev->cmdr_lock); 1708 xa_for_each(&udev->commands, i, cmd) { 1709 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1710 all_expired = false; 1711 } 1712 /* There can be left over TMR cmds. Remove them. */ 1713 tcmu_remove_all_queued_tmr(udev); 1714 if (!list_empty(&udev->qfull_queue)) 1715 all_expired = false; 1716 xa_destroy(&udev->commands); 1717 WARN_ON(!all_expired); 1718 1719 tcmu_blocks_release(udev, 0, udev->dbi_max); 1720 bitmap_free(udev->data_bitmap); 1721 mutex_unlock(&udev->cmdr_lock); 1722 1723 pr_debug("dev_kref_release\n"); 1724 1725 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1726 } 1727 1728 static void run_qfull_queue(struct tcmu_dev *udev, bool fail) 1729 { 1730 struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1731 LIST_HEAD(cmds); 1732 sense_reason_t scsi_ret; 1733 int ret; 1734 1735 if (list_empty(&udev->qfull_queue)) 1736 return; 1737 1738 pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1739 1740 list_splice_init(&udev->qfull_queue, &cmds); 1741 1742 list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) { 1743 list_del_init(&tcmu_cmd->queue_entry); 1744 1745 pr_debug("removing cmd %p on dev %s from queue\n", 1746 tcmu_cmd, udev->name); 1747 1748 if (fail) { 1749 /* 1750 * We were not able to even start the command, so 1751 * fail with busy to allow a retry in case runner 1752 * was only temporarily down. If the device is being 1753 * removed then LIO core will do the right thing and 1754 * fail the retry. 1755 */ 1756 tcmu_cmd->se_cmd->priv = NULL; 1757 target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1758 tcmu_free_cmd(tcmu_cmd); 1759 continue; 1760 } 1761 1762 ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1763 if (ret < 0) { 1764 pr_debug("cmd %p on dev %s failed with %u\n", 1765 tcmu_cmd, udev->name, scsi_ret); 1766 /* 1767 * Ignore scsi_ret for now. target_complete_cmd 1768 * drops it. 1769 */ 1770 tcmu_cmd->se_cmd->priv = NULL; 1771 target_complete_cmd(tcmu_cmd->se_cmd, 1772 SAM_STAT_CHECK_CONDITION); 1773 tcmu_free_cmd(tcmu_cmd); 1774 } else if (ret > 0) { 1775 pr_debug("ran out of space during cmdr queue run\n"); 1776 /* 1777 * cmd was requeued, so just put all cmds back in 1778 * the queue 1779 */ 1780 list_splice_tail(&cmds, &udev->qfull_queue); 1781 break; 1782 } 1783 } 1784 1785 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 1786 } 1787 1788 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1789 { 1790 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1791 1792 mutex_lock(&udev->cmdr_lock); 1793 if (tcmu_handle_completions(udev)) 1794 run_qfull_queue(udev, false); 1795 mutex_unlock(&udev->cmdr_lock); 1796 1797 return 0; 1798 } 1799 1800 /* 1801 * mmap code from uio.c. Copied here because we want to hook mmap() 1802 * and this stuff must come along. 1803 */ 1804 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1805 { 1806 struct tcmu_dev *udev = vma->vm_private_data; 1807 struct uio_info *info = &udev->uio_info; 1808 1809 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1810 if (info->mem[vma->vm_pgoff].size == 0) 1811 return -1; 1812 return (int)vma->vm_pgoff; 1813 } 1814 return -1; 1815 } 1816 1817 static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) 1818 { 1819 struct page *page; 1820 1821 mutex_lock(&udev->cmdr_lock); 1822 page = xa_load(&udev->data_pages, dpi); 1823 if (likely(page)) { 1824 get_page(page); 1825 mutex_unlock(&udev->cmdr_lock); 1826 return page; 1827 } 1828 1829 /* 1830 * Userspace messed up and passed in a address not in the 1831 * data iov passed to it. 1832 */ 1833 pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", 1834 dpi, udev->name); 1835 mutex_unlock(&udev->cmdr_lock); 1836 1837 return NULL; 1838 } 1839 1840 static void tcmu_vma_open(struct vm_area_struct *vma) 1841 { 1842 struct tcmu_dev *udev = vma->vm_private_data; 1843 1844 pr_debug("vma_open\n"); 1845 1846 kref_get(&udev->kref); 1847 } 1848 1849 static void tcmu_vma_close(struct vm_area_struct *vma) 1850 { 1851 struct tcmu_dev *udev = vma->vm_private_data; 1852 1853 pr_debug("vma_close\n"); 1854 1855 /* release ref from tcmu_vma_open */ 1856 kref_put(&udev->kref, tcmu_dev_kref_release); 1857 } 1858 1859 static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) 1860 { 1861 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1862 struct uio_info *info = &udev->uio_info; 1863 struct page *page; 1864 unsigned long offset; 1865 void *addr; 1866 1867 int mi = tcmu_find_mem_index(vmf->vma); 1868 if (mi < 0) 1869 return VM_FAULT_SIGBUS; 1870 1871 /* 1872 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1873 * to use mem[N]. 1874 */ 1875 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1876 1877 if (offset < udev->data_off) { 1878 /* For the vmalloc()ed cmd area pages */ 1879 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1880 page = vmalloc_to_page(addr); 1881 get_page(page); 1882 } else { 1883 uint32_t dpi; 1884 1885 /* For the dynamically growing data area pages */ 1886 dpi = (offset - udev->data_off) / PAGE_SIZE; 1887 page = tcmu_try_get_data_page(udev, dpi); 1888 if (!page) 1889 return VM_FAULT_SIGBUS; 1890 } 1891 1892 vmf->page = page; 1893 return 0; 1894 } 1895 1896 static const struct vm_operations_struct tcmu_vm_ops = { 1897 .open = tcmu_vma_open, 1898 .close = tcmu_vma_close, 1899 .fault = tcmu_vma_fault, 1900 }; 1901 1902 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1903 { 1904 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1905 1906 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1907 vma->vm_ops = &tcmu_vm_ops; 1908 1909 vma->vm_private_data = udev; 1910 1911 /* Ensure the mmap is exactly the right size */ 1912 if (vma_pages(vma) != udev->mmap_pages) 1913 return -EINVAL; 1914 1915 tcmu_vma_open(vma); 1916 1917 return 0; 1918 } 1919 1920 static int tcmu_open(struct uio_info *info, struct inode *inode) 1921 { 1922 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1923 1924 /* O_EXCL not supported for char devs, so fake it? */ 1925 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1926 return -EBUSY; 1927 1928 udev->inode = inode; 1929 1930 pr_debug("open\n"); 1931 1932 return 0; 1933 } 1934 1935 static int tcmu_release(struct uio_info *info, struct inode *inode) 1936 { 1937 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1938 struct tcmu_cmd *cmd; 1939 unsigned long i; 1940 bool freed = false; 1941 1942 mutex_lock(&udev->cmdr_lock); 1943 1944 xa_for_each(&udev->commands, i, cmd) { 1945 /* Cmds with KEEP_BUF set are no longer on the ring, but 1946 * userspace still holds the data buffer. If userspace closes 1947 * we implicitly free these cmds and buffers, since after new 1948 * open the (new ?) userspace cannot find the cmd in the ring 1949 * and thus never will release the buffer by writing cmd_id to 1950 * free_kept_buf action attribute. 1951 */ 1952 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) 1953 continue; 1954 pr_debug("removing KEEP_BUF cmd %u on dev %s from ring\n", 1955 cmd->cmd_id, udev->name); 1956 freed = true; 1957 1958 xa_erase(&udev->commands, i); 1959 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1960 tcmu_free_cmd(cmd); 1961 } 1962 /* 1963 * We only freed data space, not ring space. Therefore we dont call 1964 * run_tmr_queue, but call run_qfull_queue if tmr_list is empty. 1965 */ 1966 if (freed && list_empty(&udev->tmr_queue)) 1967 run_qfull_queue(udev, false); 1968 1969 mutex_unlock(&udev->cmdr_lock); 1970 1971 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1972 1973 pr_debug("close\n"); 1974 1975 return 0; 1976 } 1977 1978 static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1979 { 1980 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1981 1982 if (!tcmu_kern_cmd_reply_supported) 1983 return 0; 1984 1985 if (udev->nl_reply_supported <= 0) 1986 return 0; 1987 1988 mutex_lock(&tcmu_nl_cmd_mutex); 1989 1990 if (tcmu_netlink_blocked) { 1991 mutex_unlock(&tcmu_nl_cmd_mutex); 1992 pr_warn("Failing nl cmd %d on %s. Interface is blocked.\n", cmd, 1993 udev->name); 1994 return -EAGAIN; 1995 } 1996 1997 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1998 mutex_unlock(&tcmu_nl_cmd_mutex); 1999 pr_warn("netlink cmd %d already executing on %s\n", 2000 nl_cmd->cmd, udev->name); 2001 return -EBUSY; 2002 } 2003 2004 memset(nl_cmd, 0, sizeof(*nl_cmd)); 2005 nl_cmd->cmd = cmd; 2006 nl_cmd->udev = udev; 2007 init_completion(&nl_cmd->complete); 2008 INIT_LIST_HEAD(&nl_cmd->nl_list); 2009 2010 list_add_tail(&nl_cmd->nl_list, &tcmu_nl_cmd_list); 2011 2012 mutex_unlock(&tcmu_nl_cmd_mutex); 2013 return 0; 2014 } 2015 2016 static void tcmu_destroy_genl_cmd_reply(struct tcmu_dev *udev) 2017 { 2018 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 2019 2020 if (!tcmu_kern_cmd_reply_supported) 2021 return; 2022 2023 if (udev->nl_reply_supported <= 0) 2024 return; 2025 2026 mutex_lock(&tcmu_nl_cmd_mutex); 2027 2028 list_del(&nl_cmd->nl_list); 2029 memset(nl_cmd, 0, sizeof(*nl_cmd)); 2030 2031 mutex_unlock(&tcmu_nl_cmd_mutex); 2032 } 2033 2034 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 2035 { 2036 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 2037 int ret; 2038 2039 if (!tcmu_kern_cmd_reply_supported) 2040 return 0; 2041 2042 if (udev->nl_reply_supported <= 0) 2043 return 0; 2044 2045 pr_debug("sleeping for nl reply\n"); 2046 wait_for_completion(&nl_cmd->complete); 2047 2048 mutex_lock(&tcmu_nl_cmd_mutex); 2049 nl_cmd->cmd = TCMU_CMD_UNSPEC; 2050 ret = nl_cmd->status; 2051 mutex_unlock(&tcmu_nl_cmd_mutex); 2052 2053 return ret; 2054 } 2055 2056 static int tcmu_netlink_event_init(struct tcmu_dev *udev, 2057 enum tcmu_genl_cmd cmd, 2058 struct sk_buff **buf, void **hdr) 2059 { 2060 struct sk_buff *skb; 2061 void *msg_header; 2062 int ret = -ENOMEM; 2063 2064 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2065 if (!skb) 2066 return ret; 2067 2068 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 2069 if (!msg_header) 2070 goto free_skb; 2071 2072 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 2073 if (ret < 0) 2074 goto free_skb; 2075 2076 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 2077 if (ret < 0) 2078 goto free_skb; 2079 2080 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 2081 if (ret < 0) 2082 goto free_skb; 2083 2084 *buf = skb; 2085 *hdr = msg_header; 2086 return ret; 2087 2088 free_skb: 2089 nlmsg_free(skb); 2090 return ret; 2091 } 2092 2093 static int tcmu_netlink_event_send(struct tcmu_dev *udev, 2094 enum tcmu_genl_cmd cmd, 2095 struct sk_buff *skb, void *msg_header) 2096 { 2097 int ret; 2098 2099 genlmsg_end(skb, msg_header); 2100 2101 ret = tcmu_init_genl_cmd_reply(udev, cmd); 2102 if (ret) { 2103 nlmsg_free(skb); 2104 return ret; 2105 } 2106 2107 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 2108 TCMU_MCGRP_CONFIG, GFP_KERNEL); 2109 2110 /* Wait during an add as the listener may not be up yet */ 2111 if (ret == 0 || 2112 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE)) 2113 return tcmu_wait_genl_cmd_reply(udev); 2114 else 2115 tcmu_destroy_genl_cmd_reply(udev); 2116 2117 return ret; 2118 } 2119 2120 static int tcmu_send_dev_add_event(struct tcmu_dev *udev) 2121 { 2122 struct sk_buff *skb = NULL; 2123 void *msg_header = NULL; 2124 int ret = 0; 2125 2126 ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, 2127 &msg_header); 2128 if (ret < 0) 2129 return ret; 2130 return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, skb, 2131 msg_header); 2132 } 2133 2134 static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) 2135 { 2136 struct sk_buff *skb = NULL; 2137 void *msg_header = NULL; 2138 int ret = 0; 2139 2140 ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, 2141 &skb, &msg_header); 2142 if (ret < 0) 2143 return ret; 2144 return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, 2145 skb, msg_header); 2146 } 2147 2148 static int tcmu_update_uio_info(struct tcmu_dev *udev) 2149 { 2150 struct tcmu_hba *hba = udev->hba->hba_ptr; 2151 struct uio_info *info; 2152 char *str; 2153 2154 info = &udev->uio_info; 2155 2156 if (udev->dev_config[0]) 2157 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s/%s", hba->host_id, 2158 udev->name, udev->dev_config); 2159 else 2160 str = kasprintf(GFP_KERNEL, "tcm-user/%u/%s", hba->host_id, 2161 udev->name); 2162 if (!str) 2163 return -ENOMEM; 2164 2165 /* If the old string exists, free it */ 2166 kfree(info->name); 2167 info->name = str; 2168 2169 return 0; 2170 } 2171 2172 static int tcmu_configure_device(struct se_device *dev) 2173 { 2174 struct tcmu_dev *udev = TCMU_DEV(dev); 2175 struct uio_info *info; 2176 struct tcmu_mailbox *mb; 2177 size_t data_size; 2178 int ret = 0; 2179 2180 ret = tcmu_update_uio_info(udev); 2181 if (ret) 2182 return ret; 2183 2184 info = &udev->uio_info; 2185 2186 mutex_lock(&udev->cmdr_lock); 2187 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); 2188 mutex_unlock(&udev->cmdr_lock); 2189 if (!udev->data_bitmap) { 2190 ret = -ENOMEM; 2191 goto err_bitmap_alloc; 2192 } 2193 2194 mb = vzalloc(udev->cmdr_size + CMDR_OFF); 2195 if (!mb) { 2196 ret = -ENOMEM; 2197 goto err_vzalloc; 2198 } 2199 2200 /* mailbox fits in first part of CMDR space */ 2201 udev->mb_addr = mb; 2202 udev->cmdr = (void *)mb + CMDR_OFF; 2203 udev->data_off = udev->cmdr_size + CMDR_OFF; 2204 data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; 2205 udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; 2206 udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; 2207 udev->dbi_thresh = 0; /* Default in Idle state */ 2208 2209 /* Initialise the mailbox of the ring buffer */ 2210 mb->version = TCMU_MAILBOX_VERSION; 2211 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | 2212 TCMU_MAILBOX_FLAG_CAP_READ_LEN | 2213 TCMU_MAILBOX_FLAG_CAP_TMR | 2214 TCMU_MAILBOX_FLAG_CAP_KEEP_BUF; 2215 mb->cmdr_off = CMDR_OFF; 2216 mb->cmdr_size = udev->cmdr_size; 2217 2218 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 2219 WARN_ON(data_size % PAGE_SIZE); 2220 2221 info->version = __stringify(TCMU_MAILBOX_VERSION); 2222 2223 info->mem[0].name = "tcm-user command & data buffer"; 2224 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 2225 info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; 2226 info->mem[0].memtype = UIO_MEM_NONE; 2227 2228 info->irqcontrol = tcmu_irqcontrol; 2229 info->irq = UIO_IRQ_CUSTOM; 2230 2231 info->mmap = tcmu_mmap; 2232 info->open = tcmu_open; 2233 info->release = tcmu_release; 2234 2235 ret = uio_register_device(tcmu_root_device, info); 2236 if (ret) 2237 goto err_register; 2238 2239 /* User can set hw_block_size before enable the device */ 2240 if (dev->dev_attrib.hw_block_size == 0) 2241 dev->dev_attrib.hw_block_size = 512; 2242 /* Other attributes can be configured in userspace */ 2243 if (!dev->dev_attrib.hw_max_sectors) 2244 dev->dev_attrib.hw_max_sectors = 128; 2245 if (!dev->dev_attrib.emulate_write_cache) 2246 dev->dev_attrib.emulate_write_cache = 0; 2247 dev->dev_attrib.hw_queue_depth = 128; 2248 2249 /* If user didn't explicitly disable netlink reply support, use 2250 * module scope setting. 2251 */ 2252 if (udev->nl_reply_supported >= 0) 2253 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 2254 2255 /* 2256 * Get a ref incase userspace does a close on the uio device before 2257 * LIO has initiated tcmu_free_device. 2258 */ 2259 kref_get(&udev->kref); 2260 2261 ret = tcmu_send_dev_add_event(udev); 2262 if (ret) 2263 goto err_netlink; 2264 2265 mutex_lock(&root_udev_mutex); 2266 list_add(&udev->node, &root_udev); 2267 mutex_unlock(&root_udev_mutex); 2268 2269 return 0; 2270 2271 err_netlink: 2272 kref_put(&udev->kref, tcmu_dev_kref_release); 2273 uio_unregister_device(&udev->uio_info); 2274 err_register: 2275 vfree(udev->mb_addr); 2276 udev->mb_addr = NULL; 2277 err_vzalloc: 2278 bitmap_free(udev->data_bitmap); 2279 udev->data_bitmap = NULL; 2280 err_bitmap_alloc: 2281 kfree(info->name); 2282 info->name = NULL; 2283 2284 return ret; 2285 } 2286 2287 static void tcmu_free_device(struct se_device *dev) 2288 { 2289 struct tcmu_dev *udev = TCMU_DEV(dev); 2290 2291 /* release ref from init */ 2292 kref_put(&udev->kref, tcmu_dev_kref_release); 2293 } 2294 2295 static void tcmu_destroy_device(struct se_device *dev) 2296 { 2297 struct tcmu_dev *udev = TCMU_DEV(dev); 2298 2299 del_timer_sync(&udev->cmd_timer); 2300 del_timer_sync(&udev->qfull_timer); 2301 2302 mutex_lock(&root_udev_mutex); 2303 list_del(&udev->node); 2304 mutex_unlock(&root_udev_mutex); 2305 2306 tcmu_send_dev_remove_event(udev); 2307 2308 uio_unregister_device(&udev->uio_info); 2309 2310 /* release ref from configure */ 2311 kref_put(&udev->kref, tcmu_dev_kref_release); 2312 } 2313 2314 static void tcmu_unblock_dev(struct tcmu_dev *udev) 2315 { 2316 mutex_lock(&udev->cmdr_lock); 2317 clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 2318 mutex_unlock(&udev->cmdr_lock); 2319 } 2320 2321 static void tcmu_block_dev(struct tcmu_dev *udev) 2322 { 2323 mutex_lock(&udev->cmdr_lock); 2324 2325 if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2326 goto unlock; 2327 2328 /* complete IO that has executed successfully */ 2329 tcmu_handle_completions(udev); 2330 /* fail IO waiting to be queued */ 2331 run_qfull_queue(udev, true); 2332 2333 unlock: 2334 mutex_unlock(&udev->cmdr_lock); 2335 } 2336 2337 static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 2338 { 2339 struct tcmu_mailbox *mb; 2340 struct tcmu_cmd *cmd; 2341 unsigned long i; 2342 2343 mutex_lock(&udev->cmdr_lock); 2344 2345 xa_for_each(&udev->commands, i, cmd) { 2346 pr_debug("removing cmd %u on dev %s from ring %s\n", 2347 cmd->cmd_id, udev->name, 2348 test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) ? 2349 "(is expired)" : 2350 (test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags) ? 2351 "(is keep buffer)" : "")); 2352 2353 xa_erase(&udev->commands, i); 2354 if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags) && 2355 !test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { 2356 WARN_ON(!cmd->se_cmd); 2357 list_del_init(&cmd->queue_entry); 2358 cmd->se_cmd->priv = NULL; 2359 if (err_level == 1) { 2360 /* 2361 * Userspace was not able to start the 2362 * command or it is retryable. 2363 */ 2364 target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 2365 } else { 2366 /* hard failure */ 2367 target_complete_cmd(cmd->se_cmd, 2368 SAM_STAT_CHECK_CONDITION); 2369 } 2370 } 2371 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 2372 tcmu_free_cmd(cmd); 2373 } 2374 2375 mb = udev->mb_addr; 2376 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2377 pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 2378 mb->cmd_tail, mb->cmd_head); 2379 2380 udev->cmdr_last_cleaned = 0; 2381 mb->cmd_tail = 0; 2382 mb->cmd_head = 0; 2383 tcmu_flush_dcache_range(mb, sizeof(*mb)); 2384 clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 2385 2386 del_timer(&udev->cmd_timer); 2387 2388 /* 2389 * ring is empty and qfull queue never contains aborted commands. 2390 * So TMRs in tmr queue do not contain relevant cmd_ids. 2391 * After a ring reset userspace should do a fresh start, so 2392 * even LUN RESET message is no longer relevant. 2393 * Therefore remove all TMRs from qfull queue 2394 */ 2395 tcmu_remove_all_queued_tmr(udev); 2396 2397 run_qfull_queue(udev, false); 2398 2399 mutex_unlock(&udev->cmdr_lock); 2400 } 2401 2402 enum { 2403 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 2404 Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk, 2405 Opt_cmd_ring_size_mb, Opt_err, 2406 }; 2407 2408 static match_table_t tokens = { 2409 {Opt_dev_config, "dev_config=%s"}, 2410 {Opt_dev_size, "dev_size=%s"}, 2411 {Opt_hw_block_size, "hw_block_size=%d"}, 2412 {Opt_hw_max_sectors, "hw_max_sectors=%d"}, 2413 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 2414 {Opt_max_data_area_mb, "max_data_area_mb=%d"}, 2415 {Opt_data_pages_per_blk, "data_pages_per_blk=%d"}, 2416 {Opt_cmd_ring_size_mb, "cmd_ring_size_mb=%d"}, 2417 {Opt_err, NULL} 2418 }; 2419 2420 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 2421 { 2422 int val, ret; 2423 2424 ret = match_int(arg, &val); 2425 if (ret < 0) { 2426 pr_err("match_int() failed for dev attrib. Error %d.\n", 2427 ret); 2428 return ret; 2429 } 2430 2431 if (val <= 0) { 2432 pr_err("Invalid dev attrib value %d. Must be greater than zero.\n", 2433 val); 2434 return -EINVAL; 2435 } 2436 *dev_attrib = val; 2437 return 0; 2438 } 2439 2440 static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) 2441 { 2442 int val, ret; 2443 uint32_t pages_per_blk = udev->data_pages_per_blk; 2444 2445 ret = match_int(arg, &val); 2446 if (ret < 0) { 2447 pr_err("match_int() failed for max_data_area_mb=. Error %d.\n", 2448 ret); 2449 return ret; 2450 } 2451 if (val <= 0) { 2452 pr_err("Invalid max_data_area %d.\n", val); 2453 return -EINVAL; 2454 } 2455 if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) { 2456 pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 2457 val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); 2458 val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages); 2459 } 2460 if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) { 2461 pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n", 2462 val, TCMU_MBS_TO_PAGES(val), pages_per_blk); 2463 return -EINVAL; 2464 } 2465 2466 mutex_lock(&udev->cmdr_lock); 2467 if (udev->data_bitmap) { 2468 pr_err("Cannot set max_data_area_mb after it has been enabled.\n"); 2469 ret = -EINVAL; 2470 goto unlock; 2471 } 2472 2473 udev->data_area_mb = val; 2474 udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk; 2475 2476 unlock: 2477 mutex_unlock(&udev->cmdr_lock); 2478 return ret; 2479 } 2480 2481 static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg) 2482 { 2483 int val, ret; 2484 2485 ret = match_int(arg, &val); 2486 if (ret < 0) { 2487 pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n", 2488 ret); 2489 return ret; 2490 } 2491 2492 if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) { 2493 pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n", 2494 val, udev->data_area_mb, 2495 TCMU_MBS_TO_PAGES(udev->data_area_mb)); 2496 return -EINVAL; 2497 } 2498 2499 mutex_lock(&udev->cmdr_lock); 2500 if (udev->data_bitmap) { 2501 pr_err("Cannot set data_pages_per_blk after it has been enabled.\n"); 2502 ret = -EINVAL; 2503 goto unlock; 2504 } 2505 2506 udev->data_pages_per_blk = val; 2507 udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val; 2508 2509 unlock: 2510 mutex_unlock(&udev->cmdr_lock); 2511 return ret; 2512 } 2513 2514 static int tcmu_set_cmd_ring_size(struct tcmu_dev *udev, substring_t *arg) 2515 { 2516 int val, ret; 2517 2518 ret = match_int(arg, &val); 2519 if (ret < 0) { 2520 pr_err("match_int() failed for cmd_ring_size_mb=. Error %d.\n", 2521 ret); 2522 return ret; 2523 } 2524 2525 if (val <= 0) { 2526 pr_err("Invalid cmd_ring_size_mb %d.\n", val); 2527 return -EINVAL; 2528 } 2529 2530 mutex_lock(&udev->cmdr_lock); 2531 if (udev->data_bitmap) { 2532 pr_err("Cannot set cmd_ring_size_mb after it has been enabled.\n"); 2533 ret = -EINVAL; 2534 goto unlock; 2535 } 2536 2537 udev->cmdr_size = (val << 20) - CMDR_OFF; 2538 if (val > (MB_CMDR_SIZE_DEF >> 20)) { 2539 pr_err("%d is too large. Adjusting cmd_ring_size_mb to global limit of %u\n", 2540 val, (MB_CMDR_SIZE_DEF >> 20)); 2541 udev->cmdr_size = CMDR_SIZE_DEF; 2542 } 2543 2544 unlock: 2545 mutex_unlock(&udev->cmdr_lock); 2546 return ret; 2547 } 2548 2549 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 2550 const char *page, ssize_t count) 2551 { 2552 struct tcmu_dev *udev = TCMU_DEV(dev); 2553 char *orig, *ptr, *opts; 2554 substring_t args[MAX_OPT_ARGS]; 2555 int ret = 0, token; 2556 2557 opts = kstrdup(page, GFP_KERNEL); 2558 if (!opts) 2559 return -ENOMEM; 2560 2561 orig = opts; 2562 2563 while ((ptr = strsep(&opts, ",\n")) != NULL) { 2564 if (!*ptr) 2565 continue; 2566 2567 token = match_token(ptr, tokens, args); 2568 switch (token) { 2569 case Opt_dev_config: 2570 if (match_strlcpy(udev->dev_config, &args[0], 2571 TCMU_CONFIG_LEN) == 0) { 2572 ret = -EINVAL; 2573 break; 2574 } 2575 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 2576 break; 2577 case Opt_dev_size: 2578 ret = match_u64(&args[0], &udev->dev_size); 2579 if (ret < 0) 2580 pr_err("match_u64() failed for dev_size=. Error %d.\n", 2581 ret); 2582 break; 2583 case Opt_hw_block_size: 2584 ret = tcmu_set_dev_attrib(&args[0], 2585 &(dev->dev_attrib.hw_block_size)); 2586 break; 2587 case Opt_hw_max_sectors: 2588 ret = tcmu_set_dev_attrib(&args[0], 2589 &(dev->dev_attrib.hw_max_sectors)); 2590 break; 2591 case Opt_nl_reply_supported: 2592 ret = match_int(&args[0], &udev->nl_reply_supported); 2593 if (ret < 0) 2594 pr_err("match_int() failed for nl_reply_supported=. Error %d.\n", 2595 ret); 2596 break; 2597 case Opt_max_data_area_mb: 2598 ret = tcmu_set_max_blocks_param(udev, &args[0]); 2599 break; 2600 case Opt_data_pages_per_blk: 2601 ret = tcmu_set_data_pages_per_blk(udev, &args[0]); 2602 break; 2603 case Opt_cmd_ring_size_mb: 2604 ret = tcmu_set_cmd_ring_size(udev, &args[0]); 2605 break; 2606 default: 2607 break; 2608 } 2609 2610 if (ret) 2611 break; 2612 } 2613 2614 kfree(orig); 2615 return (!ret) ? count : ret; 2616 } 2617 2618 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 2619 { 2620 struct tcmu_dev *udev = TCMU_DEV(dev); 2621 ssize_t bl = 0; 2622 2623 bl = sprintf(b + bl, "Config: %s ", 2624 udev->dev_config[0] ? udev->dev_config : "NULL"); 2625 bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); 2626 bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb); 2627 bl += sprintf(b + bl, "DataPagesPerBlk: %u ", udev->data_pages_per_blk); 2628 bl += sprintf(b + bl, "CmdRingSizeMB: %u\n", 2629 (udev->cmdr_size + CMDR_OFF) >> 20); 2630 2631 return bl; 2632 } 2633 2634 static sector_t tcmu_get_blocks(struct se_device *dev) 2635 { 2636 struct tcmu_dev *udev = TCMU_DEV(dev); 2637 2638 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 2639 dev->dev_attrib.block_size); 2640 } 2641 2642 static sense_reason_t 2643 tcmu_parse_cdb(struct se_cmd *cmd) 2644 { 2645 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 2646 } 2647 2648 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 2649 { 2650 struct se_dev_attrib *da = container_of(to_config_group(item), 2651 struct se_dev_attrib, da_group); 2652 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2653 2654 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 2655 } 2656 2657 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 2658 size_t count) 2659 { 2660 struct se_dev_attrib *da = container_of(to_config_group(item), 2661 struct se_dev_attrib, da_group); 2662 struct tcmu_dev *udev = container_of(da->da_dev, 2663 struct tcmu_dev, se_dev); 2664 u32 val; 2665 int ret; 2666 2667 if (da->da_dev->export_count) { 2668 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 2669 return -EINVAL; 2670 } 2671 2672 ret = kstrtou32(page, 0, &val); 2673 if (ret < 0) 2674 return ret; 2675 2676 udev->cmd_time_out = val * MSEC_PER_SEC; 2677 return count; 2678 } 2679 CONFIGFS_ATTR(tcmu_, cmd_time_out); 2680 2681 static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 2682 { 2683 struct se_dev_attrib *da = container_of(to_config_group(item), 2684 struct se_dev_attrib, da_group); 2685 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2686 2687 return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 2688 udev->qfull_time_out : 2689 udev->qfull_time_out / MSEC_PER_SEC); 2690 } 2691 2692 static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 2693 const char *page, size_t count) 2694 { 2695 struct se_dev_attrib *da = container_of(to_config_group(item), 2696 struct se_dev_attrib, da_group); 2697 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2698 s32 val; 2699 int ret; 2700 2701 ret = kstrtos32(page, 0, &val); 2702 if (ret < 0) 2703 return ret; 2704 2705 if (val >= 0) { 2706 udev->qfull_time_out = val * MSEC_PER_SEC; 2707 } else if (val == -1) { 2708 udev->qfull_time_out = val; 2709 } else { 2710 printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 2711 return -EINVAL; 2712 } 2713 return count; 2714 } 2715 CONFIGFS_ATTR(tcmu_, qfull_time_out); 2716 2717 static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 2718 { 2719 struct se_dev_attrib *da = container_of(to_config_group(item), 2720 struct se_dev_attrib, da_group); 2721 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2722 2723 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); 2724 } 2725 CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 2726 2727 static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item, 2728 char *page) 2729 { 2730 struct se_dev_attrib *da = container_of(to_config_group(item), 2731 struct se_dev_attrib, da_group); 2732 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2733 2734 return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk); 2735 } 2736 CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk); 2737 2738 static ssize_t tcmu_cmd_ring_size_mb_show(struct config_item *item, char *page) 2739 { 2740 struct se_dev_attrib *da = container_of(to_config_group(item), 2741 struct se_dev_attrib, da_group); 2742 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2743 2744 return snprintf(page, PAGE_SIZE, "%u\n", 2745 (udev->cmdr_size + CMDR_OFF) >> 20); 2746 } 2747 CONFIGFS_ATTR_RO(tcmu_, cmd_ring_size_mb); 2748 2749 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2750 { 2751 struct se_dev_attrib *da = container_of(to_config_group(item), 2752 struct se_dev_attrib, da_group); 2753 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2754 2755 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2756 } 2757 2758 static int tcmu_send_dev_config_event(struct tcmu_dev *udev, 2759 const char *reconfig_data) 2760 { 2761 struct sk_buff *skb = NULL; 2762 void *msg_header = NULL; 2763 int ret = 0; 2764 2765 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2766 &skb, &msg_header); 2767 if (ret < 0) 2768 return ret; 2769 ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); 2770 if (ret < 0) { 2771 nlmsg_free(skb); 2772 return ret; 2773 } 2774 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2775 skb, msg_header); 2776 } 2777 2778 2779 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2780 size_t count) 2781 { 2782 struct se_dev_attrib *da = container_of(to_config_group(item), 2783 struct se_dev_attrib, da_group); 2784 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2785 int ret, len; 2786 2787 len = strlen(page); 2788 if (!len || len > TCMU_CONFIG_LEN - 1) 2789 return -EINVAL; 2790 2791 /* Check if device has been configured before */ 2792 if (target_dev_configured(&udev->se_dev)) { 2793 ret = tcmu_send_dev_config_event(udev, page); 2794 if (ret) { 2795 pr_err("Unable to reconfigure device\n"); 2796 return ret; 2797 } 2798 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2799 2800 ret = tcmu_update_uio_info(udev); 2801 if (ret) 2802 return ret; 2803 return count; 2804 } 2805 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2806 2807 return count; 2808 } 2809 CONFIGFS_ATTR(tcmu_, dev_config); 2810 2811 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 2812 { 2813 struct se_dev_attrib *da = container_of(to_config_group(item), 2814 struct se_dev_attrib, da_group); 2815 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2816 2817 return snprintf(page, PAGE_SIZE, "%llu\n", udev->dev_size); 2818 } 2819 2820 static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) 2821 { 2822 struct sk_buff *skb = NULL; 2823 void *msg_header = NULL; 2824 int ret = 0; 2825 2826 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2827 &skb, &msg_header); 2828 if (ret < 0) 2829 return ret; 2830 ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, 2831 size, TCMU_ATTR_PAD); 2832 if (ret < 0) { 2833 nlmsg_free(skb); 2834 return ret; 2835 } 2836 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2837 skb, msg_header); 2838 } 2839 2840 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2841 size_t count) 2842 { 2843 struct se_dev_attrib *da = container_of(to_config_group(item), 2844 struct se_dev_attrib, da_group); 2845 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2846 u64 val; 2847 int ret; 2848 2849 ret = kstrtou64(page, 0, &val); 2850 if (ret < 0) 2851 return ret; 2852 2853 /* Check if device has been configured before */ 2854 if (target_dev_configured(&udev->se_dev)) { 2855 ret = tcmu_send_dev_size_event(udev, val); 2856 if (ret) { 2857 pr_err("Unable to reconfigure device\n"); 2858 return ret; 2859 } 2860 } 2861 udev->dev_size = val; 2862 return count; 2863 } 2864 CONFIGFS_ATTR(tcmu_, dev_size); 2865 2866 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 2867 char *page) 2868 { 2869 struct se_dev_attrib *da = container_of(to_config_group(item), 2870 struct se_dev_attrib, da_group); 2871 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2872 2873 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 2874 } 2875 2876 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 2877 const char *page, size_t count) 2878 { 2879 struct se_dev_attrib *da = container_of(to_config_group(item), 2880 struct se_dev_attrib, da_group); 2881 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2882 s8 val; 2883 int ret; 2884 2885 ret = kstrtos8(page, 0, &val); 2886 if (ret < 0) 2887 return ret; 2888 2889 udev->nl_reply_supported = val; 2890 return count; 2891 } 2892 CONFIGFS_ATTR(tcmu_, nl_reply_supported); 2893 2894 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 2895 char *page) 2896 { 2897 struct se_dev_attrib *da = container_of(to_config_group(item), 2898 struct se_dev_attrib, da_group); 2899 2900 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 2901 } 2902 2903 static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) 2904 { 2905 struct sk_buff *skb = NULL; 2906 void *msg_header = NULL; 2907 int ret = 0; 2908 2909 ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 2910 &skb, &msg_header); 2911 if (ret < 0) 2912 return ret; 2913 ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); 2914 if (ret < 0) { 2915 nlmsg_free(skb); 2916 return ret; 2917 } 2918 return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 2919 skb, msg_header); 2920 } 2921 2922 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 2923 const char *page, size_t count) 2924 { 2925 struct se_dev_attrib *da = container_of(to_config_group(item), 2926 struct se_dev_attrib, da_group); 2927 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2928 u8 val; 2929 int ret; 2930 2931 ret = kstrtou8(page, 0, &val); 2932 if (ret < 0) 2933 return ret; 2934 2935 /* Check if device has been configured before */ 2936 if (target_dev_configured(&udev->se_dev)) { 2937 ret = tcmu_send_emulate_write_cache(udev, val); 2938 if (ret) { 2939 pr_err("Unable to reconfigure device\n"); 2940 return ret; 2941 } 2942 } 2943 2944 da->emulate_write_cache = val; 2945 return count; 2946 } 2947 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 2948 2949 static ssize_t tcmu_tmr_notification_show(struct config_item *item, char *page) 2950 { 2951 struct se_dev_attrib *da = container_of(to_config_group(item), 2952 struct se_dev_attrib, da_group); 2953 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2954 2955 return snprintf(page, PAGE_SIZE, "%i\n", 2956 test_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags)); 2957 } 2958 2959 static ssize_t tcmu_tmr_notification_store(struct config_item *item, 2960 const char *page, size_t count) 2961 { 2962 struct se_dev_attrib *da = container_of(to_config_group(item), 2963 struct se_dev_attrib, da_group); 2964 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2965 u8 val; 2966 int ret; 2967 2968 ret = kstrtou8(page, 0, &val); 2969 if (ret < 0) 2970 return ret; 2971 if (val > 1) 2972 return -EINVAL; 2973 2974 if (val) 2975 set_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2976 else 2977 clear_bit(TCMU_DEV_BIT_TMR_NOTIFY, &udev->flags); 2978 return count; 2979 } 2980 CONFIGFS_ATTR(tcmu_, tmr_notification); 2981 2982 static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 2983 { 2984 struct se_device *se_dev = container_of(to_config_group(item), 2985 struct se_device, 2986 dev_action_group); 2987 struct tcmu_dev *udev = TCMU_DEV(se_dev); 2988 2989 if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2990 return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 2991 else 2992 return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 2993 } 2994 2995 static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 2996 size_t count) 2997 { 2998 struct se_device *se_dev = container_of(to_config_group(item), 2999 struct se_device, 3000 dev_action_group); 3001 struct tcmu_dev *udev = TCMU_DEV(se_dev); 3002 u8 val; 3003 int ret; 3004 3005 if (!target_dev_configured(&udev->se_dev)) { 3006 pr_err("Device is not configured.\n"); 3007 return -EINVAL; 3008 } 3009 3010 ret = kstrtou8(page, 0, &val); 3011 if (ret < 0) 3012 return ret; 3013 3014 if (val > 1) { 3015 pr_err("Invalid block value %d\n", val); 3016 return -EINVAL; 3017 } 3018 3019 if (!val) 3020 tcmu_unblock_dev(udev); 3021 else 3022 tcmu_block_dev(udev); 3023 return count; 3024 } 3025 CONFIGFS_ATTR(tcmu_, block_dev); 3026 3027 static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 3028 size_t count) 3029 { 3030 struct se_device *se_dev = container_of(to_config_group(item), 3031 struct se_device, 3032 dev_action_group); 3033 struct tcmu_dev *udev = TCMU_DEV(se_dev); 3034 u8 val; 3035 int ret; 3036 3037 if (!target_dev_configured(&udev->se_dev)) { 3038 pr_err("Device is not configured.\n"); 3039 return -EINVAL; 3040 } 3041 3042 ret = kstrtou8(page, 0, &val); 3043 if (ret < 0) 3044 return ret; 3045 3046 if (val != 1 && val != 2) { 3047 pr_err("Invalid reset ring value %d\n", val); 3048 return -EINVAL; 3049 } 3050 3051 tcmu_reset_ring(udev, val); 3052 return count; 3053 } 3054 CONFIGFS_ATTR_WO(tcmu_, reset_ring); 3055 3056 static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *page, 3057 size_t count) 3058 { 3059 struct se_device *se_dev = container_of(to_config_group(item), 3060 struct se_device, 3061 dev_action_group); 3062 struct tcmu_dev *udev = TCMU_DEV(se_dev); 3063 struct tcmu_cmd *cmd; 3064 u16 cmd_id; 3065 int ret; 3066 3067 if (!target_dev_configured(&udev->se_dev)) { 3068 pr_err("Device is not configured.\n"); 3069 return -EINVAL; 3070 } 3071 3072 ret = kstrtou16(page, 0, &cmd_id); 3073 if (ret < 0) 3074 return ret; 3075 3076 mutex_lock(&udev->cmdr_lock); 3077 3078 { 3079 XA_STATE(xas, &udev->commands, cmd_id); 3080 3081 xas_lock(&xas); 3082 cmd = xas_load(&xas); 3083 if (!cmd) { 3084 pr_err("free_kept_buf: cmd_id %d not found\n", cmd_id); 3085 count = -EINVAL; 3086 xas_unlock(&xas); 3087 goto out_unlock; 3088 } 3089 if (!test_bit(TCMU_CMD_BIT_KEEP_BUF, &cmd->flags)) { 3090 pr_err("free_kept_buf: cmd_id %d was not completed with KEEP_BUF\n", 3091 cmd_id); 3092 count = -EINVAL; 3093 xas_unlock(&xas); 3094 goto out_unlock; 3095 } 3096 xas_store(&xas, NULL); 3097 xas_unlock(&xas); 3098 } 3099 3100 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 3101 tcmu_free_cmd(cmd); 3102 /* 3103 * We only freed data space, not ring space. Therefore we dont call 3104 * run_tmr_queue, but call run_qfull_queue if tmr_list is empty. 3105 */ 3106 if (list_empty(&udev->tmr_queue)) 3107 run_qfull_queue(udev, false); 3108 3109 out_unlock: 3110 mutex_unlock(&udev->cmdr_lock); 3111 return count; 3112 } 3113 CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); 3114 3115 static struct configfs_attribute *tcmu_attrib_attrs[] = { 3116 &tcmu_attr_cmd_time_out, 3117 &tcmu_attr_qfull_time_out, 3118 &tcmu_attr_max_data_area_mb, 3119 &tcmu_attr_data_pages_per_blk, 3120 &tcmu_attr_cmd_ring_size_mb, 3121 &tcmu_attr_dev_config, 3122 &tcmu_attr_dev_size, 3123 &tcmu_attr_emulate_write_cache, 3124 &tcmu_attr_tmr_notification, 3125 &tcmu_attr_nl_reply_supported, 3126 NULL, 3127 }; 3128 3129 static struct configfs_attribute **tcmu_attrs; 3130 3131 static struct configfs_attribute *tcmu_action_attrs[] = { 3132 &tcmu_attr_block_dev, 3133 &tcmu_attr_reset_ring, 3134 &tcmu_attr_free_kept_buf, 3135 NULL, 3136 }; 3137 3138 static struct target_backend_ops tcmu_ops = { 3139 .name = "user", 3140 .owner = THIS_MODULE, 3141 .transport_flags_default = TRANSPORT_FLAG_PASSTHROUGH, 3142 .transport_flags_changeable = TRANSPORT_FLAG_PASSTHROUGH_PGR | 3143 TRANSPORT_FLAG_PASSTHROUGH_ALUA, 3144 .attach_hba = tcmu_attach_hba, 3145 .detach_hba = tcmu_detach_hba, 3146 .alloc_device = tcmu_alloc_device, 3147 .configure_device = tcmu_configure_device, 3148 .destroy_device = tcmu_destroy_device, 3149 .free_device = tcmu_free_device, 3150 .unplug_device = tcmu_unplug_device, 3151 .plug_device = tcmu_plug_device, 3152 .parse_cdb = tcmu_parse_cdb, 3153 .tmr_notify = tcmu_tmr_notify, 3154 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 3155 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 3156 .get_device_type = sbc_get_device_type, 3157 .get_blocks = tcmu_get_blocks, 3158 .tb_dev_action_attrs = tcmu_action_attrs, 3159 }; 3160 3161 static void find_free_blocks(void) 3162 { 3163 struct tcmu_dev *udev; 3164 loff_t off; 3165 u32 pages_freed, total_pages_freed = 0; 3166 u32 start, end, block, total_blocks_freed = 0; 3167 3168 if (atomic_read(&global_page_count) <= tcmu_global_max_pages) 3169 return; 3170 3171 mutex_lock(&root_udev_mutex); 3172 list_for_each_entry(udev, &root_udev, node) { 3173 mutex_lock(&udev->cmdr_lock); 3174 3175 if (!target_dev_configured(&udev->se_dev)) { 3176 mutex_unlock(&udev->cmdr_lock); 3177 continue; 3178 } 3179 3180 /* Try to complete the finished commands first */ 3181 if (tcmu_handle_completions(udev)) 3182 run_qfull_queue(udev, false); 3183 3184 /* Skip the udevs in idle */ 3185 if (!udev->dbi_thresh) { 3186 mutex_unlock(&udev->cmdr_lock); 3187 continue; 3188 } 3189 3190 end = udev->dbi_max + 1; 3191 block = find_last_bit(udev->data_bitmap, end); 3192 if (block == udev->dbi_max) { 3193 /* 3194 * The last bit is dbi_max, so it is not possible 3195 * reclaim any blocks. 3196 */ 3197 mutex_unlock(&udev->cmdr_lock); 3198 continue; 3199 } else if (block == end) { 3200 /* The current udev will goto idle state */ 3201 udev->dbi_thresh = start = 0; 3202 udev->dbi_max = 0; 3203 } else { 3204 udev->dbi_thresh = start = block + 1; 3205 udev->dbi_max = block; 3206 } 3207 3208 /* Here will truncate the data area from off */ 3209 off = udev->data_off + (loff_t)start * udev->data_blk_size; 3210 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 3211 3212 /* Release the block pages */ 3213 pages_freed = tcmu_blocks_release(udev, start, end - 1); 3214 mutex_unlock(&udev->cmdr_lock); 3215 3216 total_pages_freed += pages_freed; 3217 total_blocks_freed += end - start; 3218 pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n", 3219 pages_freed, total_pages_freed, end - start, 3220 total_blocks_freed, udev->name); 3221 } 3222 mutex_unlock(&root_udev_mutex); 3223 3224 if (atomic_read(&global_page_count) > tcmu_global_max_pages) 3225 schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 3226 } 3227 3228 static void check_timedout_devices(void) 3229 { 3230 struct tcmu_dev *udev, *tmp_dev; 3231 struct tcmu_cmd *cmd, *tmp_cmd; 3232 LIST_HEAD(devs); 3233 3234 spin_lock_bh(&timed_out_udevs_lock); 3235 list_splice_init(&timed_out_udevs, &devs); 3236 3237 list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 3238 list_del_init(&udev->timedout_entry); 3239 spin_unlock_bh(&timed_out_udevs_lock); 3240 3241 mutex_lock(&udev->cmdr_lock); 3242 3243 /* 3244 * If cmd_time_out is disabled but qfull is set deadline 3245 * will only reflect the qfull timeout. Ignore it. 3246 */ 3247 if (udev->cmd_time_out) { 3248 list_for_each_entry_safe(cmd, tmp_cmd, 3249 &udev->inflight_queue, 3250 queue_entry) { 3251 tcmu_check_expired_ring_cmd(cmd); 3252 } 3253 tcmu_set_next_deadline(&udev->inflight_queue, 3254 &udev->cmd_timer); 3255 } 3256 list_for_each_entry_safe(cmd, tmp_cmd, &udev->qfull_queue, 3257 queue_entry) { 3258 tcmu_check_expired_queue_cmd(cmd); 3259 } 3260 tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer); 3261 3262 mutex_unlock(&udev->cmdr_lock); 3263 3264 spin_lock_bh(&timed_out_udevs_lock); 3265 } 3266 3267 spin_unlock_bh(&timed_out_udevs_lock); 3268 } 3269 3270 static void tcmu_unmap_work_fn(struct work_struct *work) 3271 { 3272 check_timedout_devices(); 3273 find_free_blocks(); 3274 } 3275 3276 static int __init tcmu_module_init(void) 3277 { 3278 int ret, i, k, len = 0; 3279 3280 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 3281 3282 INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 3283 3284 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 3285 sizeof(struct tcmu_cmd), 3286 __alignof__(struct tcmu_cmd), 3287 0, NULL); 3288 if (!tcmu_cmd_cache) 3289 return -ENOMEM; 3290 3291 tcmu_root_device = root_device_register("tcm_user"); 3292 if (IS_ERR(tcmu_root_device)) { 3293 ret = PTR_ERR(tcmu_root_device); 3294 goto out_free_cache; 3295 } 3296 3297 ret = genl_register_family(&tcmu_genl_family); 3298 if (ret < 0) { 3299 goto out_unreg_device; 3300 } 3301 3302 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3303 len += sizeof(struct configfs_attribute *); 3304 for (i = 0; passthrough_pr_attrib_attrs[i] != NULL; i++) 3305 len += sizeof(struct configfs_attribute *); 3306 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) 3307 len += sizeof(struct configfs_attribute *); 3308 len += sizeof(struct configfs_attribute *); 3309 3310 tcmu_attrs = kzalloc(len, GFP_KERNEL); 3311 if (!tcmu_attrs) { 3312 ret = -ENOMEM; 3313 goto out_unreg_genl; 3314 } 3315 3316 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) 3317 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 3318 for (k = 0; passthrough_pr_attrib_attrs[k] != NULL; k++) 3319 tcmu_attrs[i++] = passthrough_pr_attrib_attrs[k]; 3320 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) 3321 tcmu_attrs[i++] = tcmu_attrib_attrs[k]; 3322 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 3323 3324 ret = transport_backend_register(&tcmu_ops); 3325 if (ret) 3326 goto out_attrs; 3327 3328 return 0; 3329 3330 out_attrs: 3331 kfree(tcmu_attrs); 3332 out_unreg_genl: 3333 genl_unregister_family(&tcmu_genl_family); 3334 out_unreg_device: 3335 root_device_unregister(tcmu_root_device); 3336 out_free_cache: 3337 kmem_cache_destroy(tcmu_cmd_cache); 3338 3339 return ret; 3340 } 3341 3342 static void __exit tcmu_module_exit(void) 3343 { 3344 cancel_delayed_work_sync(&tcmu_unmap_work); 3345 target_backend_unregister(&tcmu_ops); 3346 kfree(tcmu_attrs); 3347 genl_unregister_family(&tcmu_genl_family); 3348 root_device_unregister(tcmu_root_device); 3349 kmem_cache_destroy(tcmu_cmd_cache); 3350 } 3351 3352 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 3353 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 3354 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 3355 MODULE_LICENSE("GPL"); 3356 3357 module_init(tcmu_module_init); 3358 module_exit(tcmu_module_exit); 3359