1 /* 2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 3 * Copyright (C) 2014 Red Hat, Inc. 4 * Copyright (C) 2015 Arrikto, Inc. 5 * Copyright (C) 2017 Chinamobile, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 */ 20 21 #include <linux/spinlock.h> 22 #include <linux/module.h> 23 #include <linux/idr.h> 24 #include <linux/kernel.h> 25 #include <linux/timer.h> 26 #include <linux/parser.h> 27 #include <linux/vmalloc.h> 28 #include <linux/uio_driver.h> 29 #include <linux/radix-tree.h> 30 #include <linux/stringify.h> 31 #include <linux/bitops.h> 32 #include <linux/highmem.h> 33 #include <linux/configfs.h> 34 #include <linux/mutex.h> 35 #include <linux/kthread.h> 36 #include <net/genetlink.h> 37 #include <scsi/scsi_common.h> 38 #include <scsi/scsi_proto.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 #include <target/target_core_backend.h> 42 43 #include <linux/target_core_user.h> 44 45 /* 46 * Define a shared-memory interface for LIO to pass SCSI commands and 47 * data to userspace for processing. This is to allow backends that 48 * are too complex for in-kernel support to be possible. 49 * 50 * It uses the UIO framework to do a lot of the device-creation and 51 * introspection work for us. 52 * 53 * See the .h file for how the ring is laid out. Note that while the 54 * command ring is defined, the particulars of the data area are 55 * not. Offset values in the command entry point to other locations 56 * internal to the mmap()ed area. There is separate space outside the 57 * command ring for data buffers. This leaves maximum flexibility for 58 * moving buffer allocations, or even page flipping or other 59 * allocation techniques, without altering the command ring layout. 60 * 61 * SECURITY: 62 * The user process must be assumed to be malicious. There's no way to 63 * prevent it breaking the command ring protocol if it wants, but in 64 * order to prevent other issues we must only ever read *data* from 65 * the shared memory area, not offsets or sizes. This applies to 66 * command ring entries as well as the mailbox. Extra code needed for 67 * this may have a 'UAM' comment. 68 */ 69 70 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 71 72 /* For cmd area, the size is fixed 8MB */ 73 #define CMDR_SIZE (8 * 1024 * 1024) 74 75 /* 76 * For data area, the block size is PAGE_SIZE and 77 * the total size is 256K * PAGE_SIZE. 78 */ 79 #define DATA_BLOCK_SIZE PAGE_SIZE 80 #define DATA_BLOCK_BITS (256 * 1024) 81 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) 82 #define DATA_BLOCK_INIT_BITS 128 83 84 /* The total size of the ring is 8M + 256K * PAGE_SIZE */ 85 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 86 87 /* Default maximum of the global data blocks(512K * PAGE_SIZE) */ 88 #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) 89 90 static u8 tcmu_kern_cmd_reply_supported; 91 92 static struct device *tcmu_root_device; 93 94 struct tcmu_hba { 95 u32 host_id; 96 }; 97 98 #define TCMU_CONFIG_LEN 256 99 100 struct tcmu_nl_cmd { 101 /* wake up thread waiting for reply */ 102 struct completion complete; 103 int cmd; 104 int status; 105 }; 106 107 struct tcmu_dev { 108 struct list_head node; 109 struct kref kref; 110 struct se_device se_dev; 111 112 char *name; 113 struct se_hba *hba; 114 115 #define TCMU_DEV_BIT_OPEN 0 116 #define TCMU_DEV_BIT_BROKEN 1 117 unsigned long flags; 118 119 struct uio_info uio_info; 120 121 struct inode *inode; 122 123 struct tcmu_mailbox *mb_addr; 124 size_t dev_size; 125 u32 cmdr_size; 126 u32 cmdr_last_cleaned; 127 /* Offset of data area from start of mb */ 128 /* Must add data_off and mb_addr to get the address */ 129 size_t data_off; 130 size_t data_size; 131 132 wait_queue_head_t wait_cmdr; 133 struct mutex cmdr_lock; 134 135 bool waiting_global; 136 uint32_t dbi_max; 137 uint32_t dbi_thresh; 138 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS); 139 struct radix_tree_root data_blocks; 140 141 struct idr commands; 142 spinlock_t commands_lock; 143 144 struct timer_list timeout; 145 unsigned int cmd_time_out; 146 147 spinlock_t nl_cmd_lock; 148 struct tcmu_nl_cmd curr_nl_cmd; 149 /* wake up threads waiting on curr_nl_cmd */ 150 wait_queue_head_t nl_cmd_wq; 151 152 char dev_config[TCMU_CONFIG_LEN]; 153 154 int nl_reply_supported; 155 }; 156 157 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 158 159 #define CMDR_OFF sizeof(struct tcmu_mailbox) 160 161 struct tcmu_cmd { 162 struct se_cmd *se_cmd; 163 struct tcmu_dev *tcmu_dev; 164 165 uint16_t cmd_id; 166 167 /* Can't use se_cmd when cleaning up expired cmds, because if 168 cmd has been completed then accessing se_cmd is off limits */ 169 uint32_t dbi_cnt; 170 uint32_t dbi_cur; 171 uint32_t *dbi; 172 173 unsigned long deadline; 174 175 #define TCMU_CMD_BIT_EXPIRED 0 176 unsigned long flags; 177 }; 178 179 static struct task_struct *unmap_thread; 180 static wait_queue_head_t unmap_wait; 181 static DEFINE_MUTEX(root_udev_mutex); 182 static LIST_HEAD(root_udev); 183 184 static atomic_t global_db_count = ATOMIC_INIT(0); 185 186 static struct kmem_cache *tcmu_cmd_cache; 187 188 /* multicast group */ 189 enum tcmu_multicast_groups { 190 TCMU_MCGRP_CONFIG, 191 }; 192 193 static const struct genl_multicast_group tcmu_mcgrps[] = { 194 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 195 }; 196 197 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 198 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 199 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 200 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 201 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 202 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 203 }; 204 205 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 206 { 207 struct se_device *dev; 208 struct tcmu_dev *udev; 209 struct tcmu_nl_cmd *nl_cmd; 210 int dev_id, rc, ret = 0; 211 bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE); 212 213 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 214 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 215 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 216 return -EINVAL; 217 } 218 219 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 220 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 221 222 dev = target_find_device(dev_id, !is_removed); 223 if (!dev) { 224 printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n", 225 completed_cmd, rc, dev_id); 226 return -ENODEV; 227 } 228 udev = TCMU_DEV(dev); 229 230 spin_lock(&udev->nl_cmd_lock); 231 nl_cmd = &udev->curr_nl_cmd; 232 233 pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id, 234 nl_cmd->cmd, completed_cmd, rc); 235 236 if (nl_cmd->cmd != completed_cmd) { 237 printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n", 238 completed_cmd, nl_cmd->cmd); 239 ret = -EINVAL; 240 } else { 241 nl_cmd->status = rc; 242 } 243 244 spin_unlock(&udev->nl_cmd_lock); 245 if (!is_removed) 246 target_undepend_item(&dev->dev_group.cg_item); 247 if (!ret) 248 complete(&nl_cmd->complete); 249 return ret; 250 } 251 252 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 253 { 254 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 255 } 256 257 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 258 { 259 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 260 } 261 262 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 263 struct genl_info *info) 264 { 265 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 266 } 267 268 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 269 { 270 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 271 tcmu_kern_cmd_reply_supported = 272 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 273 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 274 tcmu_kern_cmd_reply_supported); 275 } 276 277 return 0; 278 } 279 280 static const struct genl_ops tcmu_genl_ops[] = { 281 { 282 .cmd = TCMU_CMD_SET_FEATURES, 283 .flags = GENL_ADMIN_PERM, 284 .policy = tcmu_attr_policy, 285 .doit = tcmu_genl_set_features, 286 }, 287 { 288 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 289 .flags = GENL_ADMIN_PERM, 290 .policy = tcmu_attr_policy, 291 .doit = tcmu_genl_add_dev_done, 292 }, 293 { 294 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 295 .flags = GENL_ADMIN_PERM, 296 .policy = tcmu_attr_policy, 297 .doit = tcmu_genl_rm_dev_done, 298 }, 299 { 300 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 301 .flags = GENL_ADMIN_PERM, 302 .policy = tcmu_attr_policy, 303 .doit = tcmu_genl_reconfig_dev_done, 304 }, 305 }; 306 307 /* Our generic netlink family */ 308 static struct genl_family tcmu_genl_family __ro_after_init = { 309 .module = THIS_MODULE, 310 .hdrsize = 0, 311 .name = "TCM-USER", 312 .version = 2, 313 .maxattr = TCMU_ATTR_MAX, 314 .mcgrps = tcmu_mcgrps, 315 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 316 .netnsok = true, 317 .ops = tcmu_genl_ops, 318 .n_ops = ARRAY_SIZE(tcmu_genl_ops), 319 }; 320 321 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 322 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 323 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 324 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 325 326 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 327 { 328 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 329 uint32_t i; 330 331 for (i = 0; i < len; i++) 332 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 333 } 334 335 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, 336 struct tcmu_cmd *tcmu_cmd) 337 { 338 struct page *page; 339 int ret, dbi; 340 341 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 342 if (dbi == udev->dbi_thresh) 343 return false; 344 345 page = radix_tree_lookup(&udev->data_blocks, dbi); 346 if (!page) { 347 if (atomic_add_return(1, &global_db_count) > 348 TCMU_GLOBAL_MAX_BLOCKS) { 349 atomic_dec(&global_db_count); 350 return false; 351 } 352 353 /* try to get new page from the mm */ 354 page = alloc_page(GFP_KERNEL); 355 if (!page) 356 goto err_alloc; 357 358 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 359 if (ret) 360 goto err_insert; 361 } 362 363 if (dbi > udev->dbi_max) 364 udev->dbi_max = dbi; 365 366 set_bit(dbi, udev->data_bitmap); 367 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 368 369 return true; 370 err_insert: 371 __free_page(page); 372 err_alloc: 373 atomic_dec(&global_db_count); 374 return false; 375 } 376 377 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, 378 struct tcmu_cmd *tcmu_cmd) 379 { 380 int i; 381 382 udev->waiting_global = false; 383 384 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { 385 if (!tcmu_get_empty_block(udev, tcmu_cmd)) 386 goto err; 387 } 388 return true; 389 390 err: 391 udev->waiting_global = true; 392 /* Try to wake up the unmap thread */ 393 wake_up(&unmap_wait); 394 return false; 395 } 396 397 static inline struct page * 398 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 399 { 400 return radix_tree_lookup(&udev->data_blocks, dbi); 401 } 402 403 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 404 { 405 kfree(tcmu_cmd->dbi); 406 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 407 } 408 409 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) 410 { 411 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 412 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); 413 414 if (se_cmd->se_cmd_flags & SCF_BIDI) { 415 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 416 data_length += round_up(se_cmd->t_bidi_data_sg->length, 417 DATA_BLOCK_SIZE); 418 } 419 420 return data_length; 421 } 422 423 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) 424 { 425 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 426 427 return data_length / DATA_BLOCK_SIZE; 428 } 429 430 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 431 { 432 struct se_device *se_dev = se_cmd->se_dev; 433 struct tcmu_dev *udev = TCMU_DEV(se_dev); 434 struct tcmu_cmd *tcmu_cmd; 435 436 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); 437 if (!tcmu_cmd) 438 return NULL; 439 440 tcmu_cmd->se_cmd = se_cmd; 441 tcmu_cmd->tcmu_dev = udev; 442 443 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 444 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); 445 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 446 GFP_KERNEL); 447 if (!tcmu_cmd->dbi) { 448 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 449 return NULL; 450 } 451 452 return tcmu_cmd; 453 } 454 455 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 456 { 457 unsigned long offset = offset_in_page(vaddr); 458 459 size = round_up(size+offset, PAGE_SIZE); 460 vaddr -= offset; 461 462 while (size) { 463 flush_dcache_page(virt_to_page(vaddr)); 464 size -= PAGE_SIZE; 465 } 466 } 467 468 /* 469 * Some ring helper functions. We don't assume size is a power of 2 so 470 * we can't use circ_buf.h. 471 */ 472 static inline size_t spc_used(size_t head, size_t tail, size_t size) 473 { 474 int diff = head - tail; 475 476 if (diff >= 0) 477 return diff; 478 else 479 return size + diff; 480 } 481 482 static inline size_t spc_free(size_t head, size_t tail, size_t size) 483 { 484 /* Keep 1 byte unused or we can't tell full from empty */ 485 return (size - spc_used(head, tail, size) - 1); 486 } 487 488 static inline size_t head_to_end(size_t head, size_t size) 489 { 490 return size - head; 491 } 492 493 static inline void new_iov(struct iovec **iov, int *iov_cnt, 494 struct tcmu_dev *udev) 495 { 496 struct iovec *iovec; 497 498 if (*iov_cnt != 0) 499 (*iov)++; 500 (*iov_cnt)++; 501 502 iovec = *iov; 503 memset(iovec, 0, sizeof(struct iovec)); 504 } 505 506 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 507 508 /* offset is relative to mb_addr */ 509 static inline size_t get_block_offset_user(struct tcmu_dev *dev, 510 int dbi, int remaining) 511 { 512 return dev->data_off + dbi * DATA_BLOCK_SIZE + 513 DATA_BLOCK_SIZE - remaining; 514 } 515 516 static inline size_t iov_tail(struct iovec *iov) 517 { 518 return (size_t)iov->iov_base + iov->iov_len; 519 } 520 521 static int scatter_data_area(struct tcmu_dev *udev, 522 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, 523 unsigned int data_nents, struct iovec **iov, 524 int *iov_cnt, bool copy_data) 525 { 526 int i, dbi; 527 int block_remaining = 0; 528 void *from, *to = NULL; 529 size_t copy_bytes, to_offset, offset; 530 struct scatterlist *sg; 531 struct page *page; 532 533 for_each_sg(data_sg, sg, data_nents, i) { 534 int sg_remaining = sg->length; 535 from = kmap_atomic(sg_page(sg)) + sg->offset; 536 while (sg_remaining > 0) { 537 if (block_remaining == 0) { 538 if (to) 539 kunmap_atomic(to); 540 541 block_remaining = DATA_BLOCK_SIZE; 542 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 543 page = tcmu_get_block_page(udev, dbi); 544 to = kmap_atomic(page); 545 } 546 547 copy_bytes = min_t(size_t, sg_remaining, 548 block_remaining); 549 to_offset = get_block_offset_user(udev, dbi, 550 block_remaining); 551 552 if (*iov_cnt != 0 && 553 to_offset == iov_tail(*iov)) { 554 (*iov)->iov_len += copy_bytes; 555 } else { 556 new_iov(iov, iov_cnt, udev); 557 (*iov)->iov_base = (void __user *)to_offset; 558 (*iov)->iov_len = copy_bytes; 559 } 560 if (copy_data) { 561 offset = DATA_BLOCK_SIZE - block_remaining; 562 memcpy(to + offset, 563 from + sg->length - sg_remaining, 564 copy_bytes); 565 tcmu_flush_dcache_range(to, copy_bytes); 566 } 567 sg_remaining -= copy_bytes; 568 block_remaining -= copy_bytes; 569 } 570 kunmap_atomic(from - sg->offset); 571 } 572 if (to) 573 kunmap_atomic(to); 574 575 return 0; 576 } 577 578 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 579 bool bidi) 580 { 581 struct se_cmd *se_cmd = cmd->se_cmd; 582 int i, dbi; 583 int block_remaining = 0; 584 void *from = NULL, *to; 585 size_t copy_bytes, offset; 586 struct scatterlist *sg, *data_sg; 587 struct page *page; 588 unsigned int data_nents; 589 uint32_t count = 0; 590 591 if (!bidi) { 592 data_sg = se_cmd->t_data_sg; 593 data_nents = se_cmd->t_data_nents; 594 } else { 595 596 /* 597 * For bidi case, the first count blocks are for Data-Out 598 * buffer blocks, and before gathering the Data-In buffer 599 * the Data-Out buffer blocks should be discarded. 600 */ 601 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 602 603 data_sg = se_cmd->t_bidi_data_sg; 604 data_nents = se_cmd->t_bidi_data_nents; 605 } 606 607 tcmu_cmd_set_dbi_cur(cmd, count); 608 609 for_each_sg(data_sg, sg, data_nents, i) { 610 int sg_remaining = sg->length; 611 to = kmap_atomic(sg_page(sg)) + sg->offset; 612 while (sg_remaining > 0) { 613 if (block_remaining == 0) { 614 if (from) 615 kunmap_atomic(from); 616 617 block_remaining = DATA_BLOCK_SIZE; 618 dbi = tcmu_cmd_get_dbi(cmd); 619 page = tcmu_get_block_page(udev, dbi); 620 from = kmap_atomic(page); 621 } 622 copy_bytes = min_t(size_t, sg_remaining, 623 block_remaining); 624 offset = DATA_BLOCK_SIZE - block_remaining; 625 tcmu_flush_dcache_range(from, copy_bytes); 626 memcpy(to + sg->length - sg_remaining, from + offset, 627 copy_bytes); 628 629 sg_remaining -= copy_bytes; 630 block_remaining -= copy_bytes; 631 } 632 kunmap_atomic(to - sg->offset); 633 } 634 if (from) 635 kunmap_atomic(from); 636 } 637 638 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 639 { 640 return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh)); 641 } 642 643 /* 644 * We can't queue a command until we have space available on the cmd ring *and* 645 * space available on the data area. 646 * 647 * Called with ring lock held. 648 */ 649 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 650 size_t cmd_size, size_t data_needed) 651 { 652 struct tcmu_mailbox *mb = udev->mb_addr; 653 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1) 654 / DATA_BLOCK_SIZE; 655 size_t space, cmd_needed; 656 u32 cmd_head; 657 658 tcmu_flush_dcache_range(mb, sizeof(*mb)); 659 660 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 661 662 /* 663 * If cmd end-of-ring space is too small then we need space for a NOP plus 664 * original cmd - cmds are internally contiguous. 665 */ 666 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 667 cmd_needed = cmd_size; 668 else 669 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 670 671 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 672 if (space < cmd_needed) { 673 pr_debug("no cmd space: %u %u %u\n", cmd_head, 674 udev->cmdr_last_cleaned, udev->cmdr_size); 675 return false; 676 } 677 678 /* try to check and get the data blocks as needed */ 679 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 680 if (space < data_needed) { 681 unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh; 682 unsigned long grow; 683 684 if (blocks_left < blocks_needed) { 685 pr_debug("no data space: only %lu available, but ask for %zu\n", 686 blocks_left * DATA_BLOCK_SIZE, 687 data_needed); 688 return false; 689 } 690 691 /* Try to expand the thresh */ 692 if (!udev->dbi_thresh) { 693 /* From idle state */ 694 uint32_t init_thresh = DATA_BLOCK_INIT_BITS; 695 696 udev->dbi_thresh = max(blocks_needed, init_thresh); 697 } else { 698 /* 699 * Grow the data area by max(blocks needed, 700 * dbi_thresh / 2), but limited to the max 701 * DATA_BLOCK_BITS size. 702 */ 703 grow = max(blocks_needed, udev->dbi_thresh / 2); 704 udev->dbi_thresh += grow; 705 if (udev->dbi_thresh > DATA_BLOCK_BITS) 706 udev->dbi_thresh = DATA_BLOCK_BITS; 707 } 708 } 709 710 return tcmu_get_empty_blocks(udev, cmd); 711 } 712 713 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 714 { 715 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 716 sizeof(struct tcmu_cmd_entry)); 717 } 718 719 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 720 size_t base_command_size) 721 { 722 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 723 size_t command_size; 724 725 command_size = base_command_size + 726 round_up(scsi_command_size(se_cmd->t_task_cdb), 727 TCMU_OP_ALIGN_SIZE); 728 729 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 730 731 return command_size; 732 } 733 734 static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd) 735 { 736 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 737 unsigned long tmo = udev->cmd_time_out; 738 int cmd_id; 739 740 if (tcmu_cmd->cmd_id) 741 return 0; 742 743 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 744 if (cmd_id < 0) { 745 pr_err("tcmu: Could not allocate cmd id.\n"); 746 return cmd_id; 747 } 748 tcmu_cmd->cmd_id = cmd_id; 749 750 if (!tmo) 751 return 0; 752 753 tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 754 mod_timer(&udev->timeout, tcmu_cmd->deadline); 755 return 0; 756 } 757 758 static sense_reason_t 759 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 760 { 761 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 762 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 763 size_t base_command_size, command_size; 764 struct tcmu_mailbox *mb; 765 struct tcmu_cmd_entry *entry; 766 struct iovec *iov; 767 int iov_cnt, ret; 768 uint32_t cmd_head; 769 uint64_t cdb_off; 770 bool copy_to_data_area; 771 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 772 773 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 774 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 775 776 /* 777 * Must be a certain minimum size for response sense info, but 778 * also may be larger if the iov array is large. 779 * 780 * We prepare as many iovs as possbile for potential uses here, 781 * because it's expensive to tell how many regions are freed in 782 * the bitmap & global data pool, as the size calculated here 783 * will only be used to do the checks. 784 * 785 * The size will be recalculated later as actually needed to save 786 * cmd area memories. 787 */ 788 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 789 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 790 791 mutex_lock(&udev->cmdr_lock); 792 793 mb = udev->mb_addr; 794 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 795 if ((command_size > (udev->cmdr_size / 2)) || 796 data_length > udev->data_size) { 797 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 798 "cmd ring/data area\n", command_size, data_length, 799 udev->cmdr_size, udev->data_size); 800 mutex_unlock(&udev->cmdr_lock); 801 return TCM_INVALID_CDB_FIELD; 802 } 803 804 while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { 805 int ret; 806 DEFINE_WAIT(__wait); 807 808 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); 809 810 pr_debug("sleeping for ring space\n"); 811 mutex_unlock(&udev->cmdr_lock); 812 if (udev->cmd_time_out) 813 ret = schedule_timeout( 814 msecs_to_jiffies(udev->cmd_time_out)); 815 else 816 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 817 finish_wait(&udev->wait_cmdr, &__wait); 818 if (!ret) { 819 pr_warn("tcmu: command timed out\n"); 820 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 821 } 822 823 mutex_lock(&udev->cmdr_lock); 824 825 /* We dropped cmdr_lock, cmd_head is stale */ 826 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 827 } 828 829 /* Insert a PAD if end-of-ring space is too small */ 830 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { 831 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 832 833 entry = (void *) mb + CMDR_OFF + cmd_head; 834 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 835 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 836 entry->hdr.cmd_id = 0; /* not used for PAD */ 837 entry->hdr.kflags = 0; 838 entry->hdr.uflags = 0; 839 tcmu_flush_dcache_range(entry, sizeof(*entry)); 840 841 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 842 tcmu_flush_dcache_range(mb, sizeof(*mb)); 843 844 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 845 WARN_ON(cmd_head != 0); 846 } 847 848 entry = (void *) mb + CMDR_OFF + cmd_head; 849 memset(entry, 0, command_size); 850 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 851 852 /* Handle allocating space from the data area */ 853 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 854 iov = &entry->req.iov[0]; 855 iov_cnt = 0; 856 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 857 || se_cmd->se_cmd_flags & SCF_BIDI); 858 ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, 859 se_cmd->t_data_nents, &iov, &iov_cnt, 860 copy_to_data_area); 861 if (ret) { 862 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 863 mutex_unlock(&udev->cmdr_lock); 864 865 pr_err("tcmu: alloc and scatter data failed\n"); 866 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 867 } 868 entry->req.iov_cnt = iov_cnt; 869 870 /* Handle BIDI commands */ 871 iov_cnt = 0; 872 if (se_cmd->se_cmd_flags & SCF_BIDI) { 873 iov++; 874 ret = scatter_data_area(udev, tcmu_cmd, 875 se_cmd->t_bidi_data_sg, 876 se_cmd->t_bidi_data_nents, 877 &iov, &iov_cnt, false); 878 if (ret) { 879 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 880 mutex_unlock(&udev->cmdr_lock); 881 882 pr_err("tcmu: alloc and scatter bidi data failed\n"); 883 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 884 } 885 } 886 entry->req.iov_bidi_cnt = iov_cnt; 887 888 ret = tcmu_setup_cmd_timer(tcmu_cmd); 889 if (ret) { 890 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 891 mutex_unlock(&udev->cmdr_lock); 892 return TCM_OUT_OF_RESOURCES; 893 } 894 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 895 896 /* 897 * Recalaulate the command's base size and size according 898 * to the actual needs 899 */ 900 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt + 901 entry->req.iov_bidi_cnt); 902 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 903 904 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 905 906 /* All offsets relative to mb_addr, not start of entry! */ 907 cdb_off = CMDR_OFF + cmd_head + base_command_size; 908 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 909 entry->req.cdb_off = cdb_off; 910 tcmu_flush_dcache_range(entry, sizeof(*entry)); 911 912 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 913 tcmu_flush_dcache_range(mb, sizeof(*mb)); 914 mutex_unlock(&udev->cmdr_lock); 915 916 /* TODO: only if FLUSH and FUA? */ 917 uio_event_notify(&udev->uio_info); 918 919 if (udev->cmd_time_out) 920 mod_timer(&udev->timeout, round_jiffies_up(jiffies + 921 msecs_to_jiffies(udev->cmd_time_out))); 922 923 return TCM_NO_SENSE; 924 } 925 926 static sense_reason_t 927 tcmu_queue_cmd(struct se_cmd *se_cmd) 928 { 929 struct tcmu_cmd *tcmu_cmd; 930 sense_reason_t ret; 931 932 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 933 if (!tcmu_cmd) 934 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 935 936 ret = tcmu_queue_cmd_ring(tcmu_cmd); 937 if (ret != TCM_NO_SENSE) { 938 pr_err("TCMU: Could not queue command\n"); 939 940 tcmu_free_cmd(tcmu_cmd); 941 } 942 943 return ret; 944 } 945 946 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 947 { 948 struct se_cmd *se_cmd = cmd->se_cmd; 949 struct tcmu_dev *udev = cmd->tcmu_dev; 950 951 /* 952 * cmd has been completed already from timeout, just reclaim 953 * data area space and free cmd 954 */ 955 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 956 goto out; 957 958 tcmu_cmd_reset_dbi_cur(cmd); 959 960 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 961 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 962 cmd->se_cmd); 963 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 964 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 965 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 966 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 967 /* Get Data-In buffer before clean up */ 968 gather_data_area(udev, cmd, true); 969 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 970 gather_data_area(udev, cmd, false); 971 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 972 /* TODO: */ 973 } else if (se_cmd->data_direction != DMA_NONE) { 974 pr_warn("TCMU: data direction was %d!\n", 975 se_cmd->data_direction); 976 } 977 978 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 979 980 out: 981 cmd->se_cmd = NULL; 982 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 983 tcmu_free_cmd(cmd); 984 } 985 986 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 987 { 988 struct tcmu_mailbox *mb; 989 int handled = 0; 990 991 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 992 pr_err("ring broken, not handling completions\n"); 993 return 0; 994 } 995 996 mb = udev->mb_addr; 997 tcmu_flush_dcache_range(mb, sizeof(*mb)); 998 999 while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 1000 1001 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 1002 struct tcmu_cmd *cmd; 1003 1004 tcmu_flush_dcache_range(entry, sizeof(*entry)); 1005 1006 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 1007 UPDATE_HEAD(udev->cmdr_last_cleaned, 1008 tcmu_hdr_get_len(entry->hdr.len_op), 1009 udev->cmdr_size); 1010 continue; 1011 } 1012 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1013 1014 spin_lock(&udev->commands_lock); 1015 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); 1016 spin_unlock(&udev->commands_lock); 1017 1018 if (!cmd) { 1019 pr_err("cmd_id not found, ring is broken\n"); 1020 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1021 break; 1022 } 1023 1024 tcmu_handle_completion(cmd, entry); 1025 1026 UPDATE_HEAD(udev->cmdr_last_cleaned, 1027 tcmu_hdr_get_len(entry->hdr.len_op), 1028 udev->cmdr_size); 1029 1030 handled++; 1031 } 1032 1033 if (mb->cmd_tail == mb->cmd_head) 1034 del_timer(&udev->timeout); /* no more pending cmds */ 1035 1036 wake_up(&udev->wait_cmdr); 1037 1038 return handled; 1039 } 1040 1041 static int tcmu_check_expired_cmd(int id, void *p, void *data) 1042 { 1043 struct tcmu_cmd *cmd = p; 1044 1045 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1046 return 0; 1047 1048 if (!time_after(jiffies, cmd->deadline)) 1049 return 0; 1050 1051 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1052 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); 1053 cmd->se_cmd = NULL; 1054 1055 return 0; 1056 } 1057 1058 static void tcmu_device_timedout(struct timer_list *t) 1059 { 1060 struct tcmu_dev *udev = from_timer(udev, t, timeout); 1061 unsigned long flags; 1062 1063 spin_lock_irqsave(&udev->commands_lock, flags); 1064 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 1065 spin_unlock_irqrestore(&udev->commands_lock, flags); 1066 1067 /* Try to wake up the ummap thread */ 1068 wake_up(&unmap_wait); 1069 1070 /* 1071 * We don't need to wakeup threads on wait_cmdr since they have their 1072 * own timeout. 1073 */ 1074 } 1075 1076 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1077 { 1078 struct tcmu_hba *tcmu_hba; 1079 1080 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1081 if (!tcmu_hba) 1082 return -ENOMEM; 1083 1084 tcmu_hba->host_id = host_id; 1085 hba->hba_ptr = tcmu_hba; 1086 1087 return 0; 1088 } 1089 1090 static void tcmu_detach_hba(struct se_hba *hba) 1091 { 1092 kfree(hba->hba_ptr); 1093 hba->hba_ptr = NULL; 1094 } 1095 1096 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1097 { 1098 struct tcmu_dev *udev; 1099 1100 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1101 if (!udev) 1102 return NULL; 1103 kref_init(&udev->kref); 1104 1105 udev->name = kstrdup(name, GFP_KERNEL); 1106 if (!udev->name) { 1107 kfree(udev); 1108 return NULL; 1109 } 1110 1111 udev->hba = hba; 1112 udev->cmd_time_out = TCMU_TIME_OUT; 1113 1114 init_waitqueue_head(&udev->wait_cmdr); 1115 mutex_init(&udev->cmdr_lock); 1116 1117 idr_init(&udev->commands); 1118 spin_lock_init(&udev->commands_lock); 1119 1120 timer_setup(&udev->timeout, tcmu_device_timedout, 0); 1121 1122 init_waitqueue_head(&udev->nl_cmd_wq); 1123 spin_lock_init(&udev->nl_cmd_lock); 1124 1125 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); 1126 1127 return &udev->se_dev; 1128 } 1129 1130 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1131 { 1132 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); 1133 1134 mutex_lock(&tcmu_dev->cmdr_lock); 1135 tcmu_handle_completions(tcmu_dev); 1136 mutex_unlock(&tcmu_dev->cmdr_lock); 1137 1138 return 0; 1139 } 1140 1141 /* 1142 * mmap code from uio.c. Copied here because we want to hook mmap() 1143 * and this stuff must come along. 1144 */ 1145 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1146 { 1147 struct tcmu_dev *udev = vma->vm_private_data; 1148 struct uio_info *info = &udev->uio_info; 1149 1150 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1151 if (info->mem[vma->vm_pgoff].size == 0) 1152 return -1; 1153 return (int)vma->vm_pgoff; 1154 } 1155 return -1; 1156 } 1157 1158 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1159 { 1160 struct page *page; 1161 int ret; 1162 1163 mutex_lock(&udev->cmdr_lock); 1164 page = tcmu_get_block_page(udev, dbi); 1165 if (likely(page)) { 1166 mutex_unlock(&udev->cmdr_lock); 1167 return page; 1168 } 1169 1170 /* 1171 * Normally it shouldn't be here: 1172 * Only when the userspace has touched the blocks which 1173 * are out of the tcmu_cmd's data iov[], and will return 1174 * one zeroed page. 1175 */ 1176 pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi); 1177 pr_warn("Mostly it will be a bug of userspace, please have a check!\n"); 1178 1179 if (dbi >= udev->dbi_thresh) { 1180 /* Extern the udev->dbi_thresh to dbi + 1 */ 1181 udev->dbi_thresh = dbi + 1; 1182 udev->dbi_max = dbi; 1183 } 1184 1185 page = radix_tree_lookup(&udev->data_blocks, dbi); 1186 if (!page) { 1187 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 1188 if (!page) { 1189 mutex_unlock(&udev->cmdr_lock); 1190 return NULL; 1191 } 1192 1193 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 1194 if (ret) { 1195 mutex_unlock(&udev->cmdr_lock); 1196 __free_page(page); 1197 return NULL; 1198 } 1199 1200 /* 1201 * Since this case is rare in page fault routine, here we 1202 * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS 1203 * to reduce possible page fault call trace. 1204 */ 1205 atomic_inc(&global_db_count); 1206 } 1207 mutex_unlock(&udev->cmdr_lock); 1208 1209 return page; 1210 } 1211 1212 static int tcmu_vma_fault(struct vm_fault *vmf) 1213 { 1214 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1215 struct uio_info *info = &udev->uio_info; 1216 struct page *page; 1217 unsigned long offset; 1218 void *addr; 1219 1220 int mi = tcmu_find_mem_index(vmf->vma); 1221 if (mi < 0) 1222 return VM_FAULT_SIGBUS; 1223 1224 /* 1225 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1226 * to use mem[N]. 1227 */ 1228 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1229 1230 if (offset < udev->data_off) { 1231 /* For the vmalloc()ed cmd area pages */ 1232 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1233 page = vmalloc_to_page(addr); 1234 } else { 1235 uint32_t dbi; 1236 1237 /* For the dynamically growing data area pages */ 1238 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1239 page = tcmu_try_get_block_page(udev, dbi); 1240 if (!page) 1241 return VM_FAULT_NOPAGE; 1242 } 1243 1244 get_page(page); 1245 vmf->page = page; 1246 return 0; 1247 } 1248 1249 static const struct vm_operations_struct tcmu_vm_ops = { 1250 .fault = tcmu_vma_fault, 1251 }; 1252 1253 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1254 { 1255 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1256 1257 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1258 vma->vm_ops = &tcmu_vm_ops; 1259 1260 vma->vm_private_data = udev; 1261 1262 /* Ensure the mmap is exactly the right size */ 1263 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) 1264 return -EINVAL; 1265 1266 return 0; 1267 } 1268 1269 static int tcmu_open(struct uio_info *info, struct inode *inode) 1270 { 1271 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1272 1273 /* O_EXCL not supported for char devs, so fake it? */ 1274 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1275 return -EBUSY; 1276 1277 udev->inode = inode; 1278 kref_get(&udev->kref); 1279 1280 pr_debug("open\n"); 1281 1282 return 0; 1283 } 1284 1285 static void tcmu_dev_call_rcu(struct rcu_head *p) 1286 { 1287 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1288 struct tcmu_dev *udev = TCMU_DEV(dev); 1289 1290 kfree(udev->uio_info.name); 1291 kfree(udev->name); 1292 kfree(udev); 1293 } 1294 1295 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1296 { 1297 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1298 kmem_cache_free(tcmu_cmd_cache, cmd); 1299 return 0; 1300 } 1301 return -EINVAL; 1302 } 1303 1304 static void tcmu_blocks_release(struct tcmu_dev *udev) 1305 { 1306 int i; 1307 struct page *page; 1308 1309 /* Try to release all block pages */ 1310 mutex_lock(&udev->cmdr_lock); 1311 for (i = 0; i <= udev->dbi_max; i++) { 1312 page = radix_tree_delete(&udev->data_blocks, i); 1313 if (page) { 1314 __free_page(page); 1315 atomic_dec(&global_db_count); 1316 } 1317 } 1318 mutex_unlock(&udev->cmdr_lock); 1319 } 1320 1321 static void tcmu_dev_kref_release(struct kref *kref) 1322 { 1323 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1324 struct se_device *dev = &udev->se_dev; 1325 struct tcmu_cmd *cmd; 1326 bool all_expired = true; 1327 int i; 1328 1329 vfree(udev->mb_addr); 1330 udev->mb_addr = NULL; 1331 1332 /* Upper layer should drain all requests before calling this */ 1333 spin_lock_irq(&udev->commands_lock); 1334 idr_for_each_entry(&udev->commands, cmd, i) { 1335 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1336 all_expired = false; 1337 } 1338 idr_destroy(&udev->commands); 1339 spin_unlock_irq(&udev->commands_lock); 1340 WARN_ON(!all_expired); 1341 1342 tcmu_blocks_release(udev); 1343 1344 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1345 } 1346 1347 static int tcmu_release(struct uio_info *info, struct inode *inode) 1348 { 1349 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1350 1351 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1352 1353 pr_debug("close\n"); 1354 /* release ref from open */ 1355 kref_put(&udev->kref, tcmu_dev_kref_release); 1356 return 0; 1357 } 1358 1359 static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1360 { 1361 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1362 1363 if (!tcmu_kern_cmd_reply_supported) 1364 return; 1365 1366 if (udev->nl_reply_supported <= 0) 1367 return; 1368 1369 relock: 1370 spin_lock(&udev->nl_cmd_lock); 1371 1372 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1373 spin_unlock(&udev->nl_cmd_lock); 1374 pr_debug("sleeping for open nl cmd\n"); 1375 wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC)); 1376 goto relock; 1377 } 1378 1379 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1380 nl_cmd->cmd = cmd; 1381 init_completion(&nl_cmd->complete); 1382 1383 spin_unlock(&udev->nl_cmd_lock); 1384 } 1385 1386 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1387 { 1388 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1389 int ret; 1390 DEFINE_WAIT(__wait); 1391 1392 if (!tcmu_kern_cmd_reply_supported) 1393 return 0; 1394 1395 if (udev->nl_reply_supported <= 0) 1396 return 0; 1397 1398 pr_debug("sleeping for nl reply\n"); 1399 wait_for_completion(&nl_cmd->complete); 1400 1401 spin_lock(&udev->nl_cmd_lock); 1402 nl_cmd->cmd = TCMU_CMD_UNSPEC; 1403 ret = nl_cmd->status; 1404 nl_cmd->status = 0; 1405 spin_unlock(&udev->nl_cmd_lock); 1406 1407 wake_up_all(&udev->nl_cmd_wq); 1408 1409 return ret;; 1410 } 1411 1412 static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, 1413 int reconfig_attr, const void *reconfig_data) 1414 { 1415 struct sk_buff *skb; 1416 void *msg_header; 1417 int ret = -ENOMEM; 1418 1419 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1420 if (!skb) 1421 return ret; 1422 1423 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 1424 if (!msg_header) 1425 goto free_skb; 1426 1427 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 1428 if (ret < 0) 1429 goto free_skb; 1430 1431 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1432 if (ret < 0) 1433 goto free_skb; 1434 1435 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 1436 if (ret < 0) 1437 goto free_skb; 1438 1439 if (cmd == TCMU_CMD_RECONFIG_DEVICE) { 1440 switch (reconfig_attr) { 1441 case TCMU_ATTR_DEV_CFG: 1442 ret = nla_put_string(skb, reconfig_attr, reconfig_data); 1443 break; 1444 case TCMU_ATTR_DEV_SIZE: 1445 ret = nla_put_u64_64bit(skb, reconfig_attr, 1446 *((u64 *)reconfig_data), 1447 TCMU_ATTR_PAD); 1448 break; 1449 case TCMU_ATTR_WRITECACHE: 1450 ret = nla_put_u8(skb, reconfig_attr, 1451 *((u8 *)reconfig_data)); 1452 break; 1453 default: 1454 BUG(); 1455 } 1456 1457 if (ret < 0) 1458 goto free_skb; 1459 } 1460 1461 genlmsg_end(skb, msg_header); 1462 1463 tcmu_init_genl_cmd_reply(udev, cmd); 1464 1465 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1466 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1467 /* We don't care if no one is listening */ 1468 if (ret == -ESRCH) 1469 ret = 0; 1470 if (!ret) 1471 ret = tcmu_wait_genl_cmd_reply(udev); 1472 1473 return ret; 1474 free_skb: 1475 nlmsg_free(skb); 1476 return ret; 1477 } 1478 1479 static int tcmu_update_uio_info(struct tcmu_dev *udev) 1480 { 1481 struct tcmu_hba *hba = udev->hba->hba_ptr; 1482 struct uio_info *info; 1483 size_t size, used; 1484 char *str; 1485 1486 info = &udev->uio_info; 1487 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 1488 udev->dev_config); 1489 size += 1; /* for \0 */ 1490 str = kmalloc(size, GFP_KERNEL); 1491 if (!str) 1492 return -ENOMEM; 1493 1494 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 1495 if (udev->dev_config[0]) 1496 snprintf(str + used, size - used, "/%s", udev->dev_config); 1497 1498 /* If the old string exists, free it */ 1499 kfree(info->name); 1500 info->name = str; 1501 1502 return 0; 1503 } 1504 1505 static int tcmu_configure_device(struct se_device *dev) 1506 { 1507 struct tcmu_dev *udev = TCMU_DEV(dev); 1508 struct uio_info *info; 1509 struct tcmu_mailbox *mb; 1510 int ret = 0; 1511 1512 ret = tcmu_update_uio_info(udev); 1513 if (ret) 1514 return ret; 1515 1516 info = &udev->uio_info; 1517 1518 udev->mb_addr = vzalloc(CMDR_SIZE); 1519 if (!udev->mb_addr) { 1520 ret = -ENOMEM; 1521 goto err_vzalloc; 1522 } 1523 1524 /* mailbox fits in first part of CMDR space */ 1525 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 1526 udev->data_off = CMDR_SIZE; 1527 udev->data_size = DATA_SIZE; 1528 udev->dbi_thresh = 0; /* Default in Idle state */ 1529 udev->waiting_global = false; 1530 1531 /* Initialise the mailbox of the ring buffer */ 1532 mb = udev->mb_addr; 1533 mb->version = TCMU_MAILBOX_VERSION; 1534 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; 1535 mb->cmdr_off = CMDR_OFF; 1536 mb->cmdr_size = udev->cmdr_size; 1537 1538 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 1539 WARN_ON(udev->data_size % PAGE_SIZE); 1540 WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 1541 1542 info->version = __stringify(TCMU_MAILBOX_VERSION); 1543 1544 info->mem[0].name = "tcm-user command & data buffer"; 1545 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 1546 info->mem[0].size = TCMU_RING_SIZE; 1547 info->mem[0].memtype = UIO_MEM_NONE; 1548 1549 info->irqcontrol = tcmu_irqcontrol; 1550 info->irq = UIO_IRQ_CUSTOM; 1551 1552 info->mmap = tcmu_mmap; 1553 info->open = tcmu_open; 1554 info->release = tcmu_release; 1555 1556 ret = uio_register_device(tcmu_root_device, info); 1557 if (ret) 1558 goto err_register; 1559 1560 /* User can set hw_block_size before enable the device */ 1561 if (dev->dev_attrib.hw_block_size == 0) 1562 dev->dev_attrib.hw_block_size = 512; 1563 /* Other attributes can be configured in userspace */ 1564 if (!dev->dev_attrib.hw_max_sectors) 1565 dev->dev_attrib.hw_max_sectors = 128; 1566 if (!dev->dev_attrib.emulate_write_cache) 1567 dev->dev_attrib.emulate_write_cache = 0; 1568 dev->dev_attrib.hw_queue_depth = 128; 1569 1570 /* If user didn't explicitly disable netlink reply support, use 1571 * module scope setting. 1572 */ 1573 if (udev->nl_reply_supported >= 0) 1574 udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 1575 1576 /* 1577 * Get a ref incase userspace does a close on the uio device before 1578 * LIO has initiated tcmu_free_device. 1579 */ 1580 kref_get(&udev->kref); 1581 1582 ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL); 1583 if (ret) 1584 goto err_netlink; 1585 1586 mutex_lock(&root_udev_mutex); 1587 list_add(&udev->node, &root_udev); 1588 mutex_unlock(&root_udev_mutex); 1589 1590 return 0; 1591 1592 err_netlink: 1593 kref_put(&udev->kref, tcmu_dev_kref_release); 1594 uio_unregister_device(&udev->uio_info); 1595 err_register: 1596 vfree(udev->mb_addr); 1597 udev->mb_addr = NULL; 1598 err_vzalloc: 1599 kfree(info->name); 1600 info->name = NULL; 1601 1602 return ret; 1603 } 1604 1605 static bool tcmu_dev_configured(struct tcmu_dev *udev) 1606 { 1607 return udev->uio_info.uio_dev ? true : false; 1608 } 1609 1610 static void tcmu_free_device(struct se_device *dev) 1611 { 1612 struct tcmu_dev *udev = TCMU_DEV(dev); 1613 1614 /* release ref from init */ 1615 kref_put(&udev->kref, tcmu_dev_kref_release); 1616 } 1617 1618 static void tcmu_destroy_device(struct se_device *dev) 1619 { 1620 struct tcmu_dev *udev = TCMU_DEV(dev); 1621 1622 del_timer_sync(&udev->timeout); 1623 1624 mutex_lock(&root_udev_mutex); 1625 list_del(&udev->node); 1626 mutex_unlock(&root_udev_mutex); 1627 1628 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL); 1629 1630 uio_unregister_device(&udev->uio_info); 1631 1632 /* release ref from configure */ 1633 kref_put(&udev->kref, tcmu_dev_kref_release); 1634 } 1635 1636 enum { 1637 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 1638 Opt_nl_reply_supported, Opt_err, 1639 }; 1640 1641 static match_table_t tokens = { 1642 {Opt_dev_config, "dev_config=%s"}, 1643 {Opt_dev_size, "dev_size=%u"}, 1644 {Opt_hw_block_size, "hw_block_size=%u"}, 1645 {Opt_hw_max_sectors, "hw_max_sectors=%u"}, 1646 {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 1647 {Opt_err, NULL} 1648 }; 1649 1650 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 1651 { 1652 unsigned long tmp_ul; 1653 char *arg_p; 1654 int ret; 1655 1656 arg_p = match_strdup(arg); 1657 if (!arg_p) 1658 return -ENOMEM; 1659 1660 ret = kstrtoul(arg_p, 0, &tmp_ul); 1661 kfree(arg_p); 1662 if (ret < 0) { 1663 pr_err("kstrtoul() failed for dev attrib\n"); 1664 return ret; 1665 } 1666 if (!tmp_ul) { 1667 pr_err("dev attrib must be nonzero\n"); 1668 return -EINVAL; 1669 } 1670 *dev_attrib = tmp_ul; 1671 return 0; 1672 } 1673 1674 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 1675 const char *page, ssize_t count) 1676 { 1677 struct tcmu_dev *udev = TCMU_DEV(dev); 1678 char *orig, *ptr, *opts, *arg_p; 1679 substring_t args[MAX_OPT_ARGS]; 1680 int ret = 0, token; 1681 1682 opts = kstrdup(page, GFP_KERNEL); 1683 if (!opts) 1684 return -ENOMEM; 1685 1686 orig = opts; 1687 1688 while ((ptr = strsep(&opts, ",\n")) != NULL) { 1689 if (!*ptr) 1690 continue; 1691 1692 token = match_token(ptr, tokens, args); 1693 switch (token) { 1694 case Opt_dev_config: 1695 if (match_strlcpy(udev->dev_config, &args[0], 1696 TCMU_CONFIG_LEN) == 0) { 1697 ret = -EINVAL; 1698 break; 1699 } 1700 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 1701 break; 1702 case Opt_dev_size: 1703 arg_p = match_strdup(&args[0]); 1704 if (!arg_p) { 1705 ret = -ENOMEM; 1706 break; 1707 } 1708 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); 1709 kfree(arg_p); 1710 if (ret < 0) 1711 pr_err("kstrtoul() failed for dev_size=\n"); 1712 break; 1713 case Opt_hw_block_size: 1714 ret = tcmu_set_dev_attrib(&args[0], 1715 &(dev->dev_attrib.hw_block_size)); 1716 break; 1717 case Opt_hw_max_sectors: 1718 ret = tcmu_set_dev_attrib(&args[0], 1719 &(dev->dev_attrib.hw_max_sectors)); 1720 break; 1721 case Opt_nl_reply_supported: 1722 arg_p = match_strdup(&args[0]); 1723 if (!arg_p) { 1724 ret = -ENOMEM; 1725 break; 1726 } 1727 ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported); 1728 kfree(arg_p); 1729 if (ret < 0) 1730 pr_err("kstrtoint() failed for nl_reply_supported=\n"); 1731 break; 1732 default: 1733 break; 1734 } 1735 1736 if (ret) 1737 break; 1738 } 1739 1740 kfree(orig); 1741 return (!ret) ? count : ret; 1742 } 1743 1744 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 1745 { 1746 struct tcmu_dev *udev = TCMU_DEV(dev); 1747 ssize_t bl = 0; 1748 1749 bl = sprintf(b + bl, "Config: %s ", 1750 udev->dev_config[0] ? udev->dev_config : "NULL"); 1751 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); 1752 1753 return bl; 1754 } 1755 1756 static sector_t tcmu_get_blocks(struct se_device *dev) 1757 { 1758 struct tcmu_dev *udev = TCMU_DEV(dev); 1759 1760 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 1761 dev->dev_attrib.block_size); 1762 } 1763 1764 static sense_reason_t 1765 tcmu_parse_cdb(struct se_cmd *cmd) 1766 { 1767 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 1768 } 1769 1770 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 1771 { 1772 struct se_dev_attrib *da = container_of(to_config_group(item), 1773 struct se_dev_attrib, da_group); 1774 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1775 1776 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 1777 } 1778 1779 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 1780 size_t count) 1781 { 1782 struct se_dev_attrib *da = container_of(to_config_group(item), 1783 struct se_dev_attrib, da_group); 1784 struct tcmu_dev *udev = container_of(da->da_dev, 1785 struct tcmu_dev, se_dev); 1786 u32 val; 1787 int ret; 1788 1789 if (da->da_dev->export_count) { 1790 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 1791 return -EINVAL; 1792 } 1793 1794 ret = kstrtou32(page, 0, &val); 1795 if (ret < 0) 1796 return ret; 1797 1798 udev->cmd_time_out = val * MSEC_PER_SEC; 1799 return count; 1800 } 1801 CONFIGFS_ATTR(tcmu_, cmd_time_out); 1802 1803 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 1804 { 1805 struct se_dev_attrib *da = container_of(to_config_group(item), 1806 struct se_dev_attrib, da_group); 1807 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1808 1809 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 1810 } 1811 1812 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 1813 size_t count) 1814 { 1815 struct se_dev_attrib *da = container_of(to_config_group(item), 1816 struct se_dev_attrib, da_group); 1817 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1818 int ret, len; 1819 1820 len = strlen(page); 1821 if (!len || len > TCMU_CONFIG_LEN - 1) 1822 return -EINVAL; 1823 1824 /* Check if device has been configured before */ 1825 if (tcmu_dev_configured(udev)) { 1826 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1827 TCMU_ATTR_DEV_CFG, page); 1828 if (ret) { 1829 pr_err("Unable to reconfigure device\n"); 1830 return ret; 1831 } 1832 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 1833 1834 ret = tcmu_update_uio_info(udev); 1835 if (ret) 1836 return ret; 1837 return count; 1838 } 1839 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 1840 1841 return count; 1842 } 1843 CONFIGFS_ATTR(tcmu_, dev_config); 1844 1845 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 1846 { 1847 struct se_dev_attrib *da = container_of(to_config_group(item), 1848 struct se_dev_attrib, da_group); 1849 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1850 1851 return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); 1852 } 1853 1854 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 1855 size_t count) 1856 { 1857 struct se_dev_attrib *da = container_of(to_config_group(item), 1858 struct se_dev_attrib, da_group); 1859 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1860 u64 val; 1861 int ret; 1862 1863 ret = kstrtou64(page, 0, &val); 1864 if (ret < 0) 1865 return ret; 1866 1867 /* Check if device has been configured before */ 1868 if (tcmu_dev_configured(udev)) { 1869 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1870 TCMU_ATTR_DEV_SIZE, &val); 1871 if (ret) { 1872 pr_err("Unable to reconfigure device\n"); 1873 return ret; 1874 } 1875 } 1876 udev->dev_size = val; 1877 return count; 1878 } 1879 CONFIGFS_ATTR(tcmu_, dev_size); 1880 1881 static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 1882 char *page) 1883 { 1884 struct se_dev_attrib *da = container_of(to_config_group(item), 1885 struct se_dev_attrib, da_group); 1886 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1887 1888 return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 1889 } 1890 1891 static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 1892 const char *page, size_t count) 1893 { 1894 struct se_dev_attrib *da = container_of(to_config_group(item), 1895 struct se_dev_attrib, da_group); 1896 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1897 s8 val; 1898 int ret; 1899 1900 ret = kstrtos8(page, 0, &val); 1901 if (ret < 0) 1902 return ret; 1903 1904 udev->nl_reply_supported = val; 1905 return count; 1906 } 1907 CONFIGFS_ATTR(tcmu_, nl_reply_supported); 1908 1909 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 1910 char *page) 1911 { 1912 struct se_dev_attrib *da = container_of(to_config_group(item), 1913 struct se_dev_attrib, da_group); 1914 1915 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 1916 } 1917 1918 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 1919 const char *page, size_t count) 1920 { 1921 struct se_dev_attrib *da = container_of(to_config_group(item), 1922 struct se_dev_attrib, da_group); 1923 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1924 u8 val; 1925 int ret; 1926 1927 ret = kstrtou8(page, 0, &val); 1928 if (ret < 0) 1929 return ret; 1930 1931 /* Check if device has been configured before */ 1932 if (tcmu_dev_configured(udev)) { 1933 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1934 TCMU_ATTR_WRITECACHE, &val); 1935 if (ret) { 1936 pr_err("Unable to reconfigure device\n"); 1937 return ret; 1938 } 1939 } 1940 1941 da->emulate_write_cache = val; 1942 return count; 1943 } 1944 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 1945 1946 static struct configfs_attribute *tcmu_attrib_attrs[] = { 1947 &tcmu_attr_cmd_time_out, 1948 &tcmu_attr_dev_config, 1949 &tcmu_attr_dev_size, 1950 &tcmu_attr_emulate_write_cache, 1951 &tcmu_attr_nl_reply_supported, 1952 NULL, 1953 }; 1954 1955 static struct configfs_attribute **tcmu_attrs; 1956 1957 static struct target_backend_ops tcmu_ops = { 1958 .name = "user", 1959 .owner = THIS_MODULE, 1960 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1961 .attach_hba = tcmu_attach_hba, 1962 .detach_hba = tcmu_detach_hba, 1963 .alloc_device = tcmu_alloc_device, 1964 .configure_device = tcmu_configure_device, 1965 .destroy_device = tcmu_destroy_device, 1966 .free_device = tcmu_free_device, 1967 .parse_cdb = tcmu_parse_cdb, 1968 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 1969 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1970 .get_device_type = sbc_get_device_type, 1971 .get_blocks = tcmu_get_blocks, 1972 .tb_dev_attrib_attrs = NULL, 1973 }; 1974 1975 static int unmap_thread_fn(void *data) 1976 { 1977 struct tcmu_dev *udev; 1978 loff_t off; 1979 uint32_t start, end, block; 1980 struct page *page; 1981 int i; 1982 1983 while (!kthread_should_stop()) { 1984 DEFINE_WAIT(__wait); 1985 1986 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); 1987 schedule(); 1988 finish_wait(&unmap_wait, &__wait); 1989 1990 if (kthread_should_stop()) 1991 break; 1992 1993 mutex_lock(&root_udev_mutex); 1994 list_for_each_entry(udev, &root_udev, node) { 1995 mutex_lock(&udev->cmdr_lock); 1996 1997 /* Try to complete the finished commands first */ 1998 tcmu_handle_completions(udev); 1999 2000 /* Skip the udevs waiting the global pool or in idle */ 2001 if (udev->waiting_global || !udev->dbi_thresh) { 2002 mutex_unlock(&udev->cmdr_lock); 2003 continue; 2004 } 2005 2006 end = udev->dbi_max + 1; 2007 block = find_last_bit(udev->data_bitmap, end); 2008 if (block == udev->dbi_max) { 2009 /* 2010 * The last bit is dbi_max, so there is 2011 * no need to shrink any blocks. 2012 */ 2013 mutex_unlock(&udev->cmdr_lock); 2014 continue; 2015 } else if (block == end) { 2016 /* The current udev will goto idle state */ 2017 udev->dbi_thresh = start = 0; 2018 udev->dbi_max = 0; 2019 } else { 2020 udev->dbi_thresh = start = block + 1; 2021 udev->dbi_max = block; 2022 } 2023 2024 /* Here will truncate the data area from off */ 2025 off = udev->data_off + start * DATA_BLOCK_SIZE; 2026 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2027 2028 /* Release the block pages */ 2029 for (i = start; i < end; i++) { 2030 page = radix_tree_delete(&udev->data_blocks, i); 2031 if (page) { 2032 __free_page(page); 2033 atomic_dec(&global_db_count); 2034 } 2035 } 2036 mutex_unlock(&udev->cmdr_lock); 2037 } 2038 2039 /* 2040 * Try to wake up the udevs who are waiting 2041 * for the global data pool. 2042 */ 2043 list_for_each_entry(udev, &root_udev, node) { 2044 if (udev->waiting_global) 2045 wake_up(&udev->wait_cmdr); 2046 } 2047 mutex_unlock(&root_udev_mutex); 2048 } 2049 2050 return 0; 2051 } 2052 2053 static int __init tcmu_module_init(void) 2054 { 2055 int ret, i, k, len = 0; 2056 2057 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 2058 2059 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 2060 sizeof(struct tcmu_cmd), 2061 __alignof__(struct tcmu_cmd), 2062 0, NULL); 2063 if (!tcmu_cmd_cache) 2064 return -ENOMEM; 2065 2066 tcmu_root_device = root_device_register("tcm_user"); 2067 if (IS_ERR(tcmu_root_device)) { 2068 ret = PTR_ERR(tcmu_root_device); 2069 goto out_free_cache; 2070 } 2071 2072 ret = genl_register_family(&tcmu_genl_family); 2073 if (ret < 0) { 2074 goto out_unreg_device; 2075 } 2076 2077 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2078 len += sizeof(struct configfs_attribute *); 2079 } 2080 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) { 2081 len += sizeof(struct configfs_attribute *); 2082 } 2083 len += sizeof(struct configfs_attribute *); 2084 2085 tcmu_attrs = kzalloc(len, GFP_KERNEL); 2086 if (!tcmu_attrs) { 2087 ret = -ENOMEM; 2088 goto out_unreg_genl; 2089 } 2090 2091 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2092 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 2093 } 2094 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) { 2095 tcmu_attrs[i] = tcmu_attrib_attrs[k]; 2096 i++; 2097 } 2098 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 2099 2100 ret = transport_backend_register(&tcmu_ops); 2101 if (ret) 2102 goto out_attrs; 2103 2104 init_waitqueue_head(&unmap_wait); 2105 unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap"); 2106 if (IS_ERR(unmap_thread)) { 2107 ret = PTR_ERR(unmap_thread); 2108 goto out_unreg_transport; 2109 } 2110 2111 return 0; 2112 2113 out_unreg_transport: 2114 target_backend_unregister(&tcmu_ops); 2115 out_attrs: 2116 kfree(tcmu_attrs); 2117 out_unreg_genl: 2118 genl_unregister_family(&tcmu_genl_family); 2119 out_unreg_device: 2120 root_device_unregister(tcmu_root_device); 2121 out_free_cache: 2122 kmem_cache_destroy(tcmu_cmd_cache); 2123 2124 return ret; 2125 } 2126 2127 static void __exit tcmu_module_exit(void) 2128 { 2129 kthread_stop(unmap_thread); 2130 target_backend_unregister(&tcmu_ops); 2131 kfree(tcmu_attrs); 2132 genl_unregister_family(&tcmu_genl_family); 2133 root_device_unregister(tcmu_root_device); 2134 kmem_cache_destroy(tcmu_cmd_cache); 2135 } 2136 2137 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 2138 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 2139 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 2140 MODULE_LICENSE("GPL"); 2141 2142 module_init(tcmu_module_init); 2143 module_exit(tcmu_module_exit); 2144