1 /* 2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 3 * Copyright (C) 2014 Red Hat, Inc. 4 * Copyright (C) 2015 Arrikto, Inc. 5 * Copyright (C) 2017 Chinamobile, Inc. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * 16 * You should have received a copy of the GNU General Public License along with 17 * this program; if not, write to the Free Software Foundation, Inc., 18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 19 */ 20 21 #include <linux/spinlock.h> 22 #include <linux/module.h> 23 #include <linux/idr.h> 24 #include <linux/kernel.h> 25 #include <linux/timer.h> 26 #include <linux/parser.h> 27 #include <linux/vmalloc.h> 28 #include <linux/uio_driver.h> 29 #include <linux/radix-tree.h> 30 #include <linux/stringify.h> 31 #include <linux/bitops.h> 32 #include <linux/highmem.h> 33 #include <linux/configfs.h> 34 #include <linux/mutex.h> 35 #include <linux/kthread.h> 36 #include <net/genetlink.h> 37 #include <scsi/scsi_common.h> 38 #include <scsi/scsi_proto.h> 39 #include <target/target_core_base.h> 40 #include <target/target_core_fabric.h> 41 #include <target/target_core_backend.h> 42 43 #include <linux/target_core_user.h> 44 45 /* 46 * Define a shared-memory interface for LIO to pass SCSI commands and 47 * data to userspace for processing. This is to allow backends that 48 * are too complex for in-kernel support to be possible. 49 * 50 * It uses the UIO framework to do a lot of the device-creation and 51 * introspection work for us. 52 * 53 * See the .h file for how the ring is laid out. Note that while the 54 * command ring is defined, the particulars of the data area are 55 * not. Offset values in the command entry point to other locations 56 * internal to the mmap()ed area. There is separate space outside the 57 * command ring for data buffers. This leaves maximum flexibility for 58 * moving buffer allocations, or even page flipping or other 59 * allocation techniques, without altering the command ring layout. 60 * 61 * SECURITY: 62 * The user process must be assumed to be malicious. There's no way to 63 * prevent it breaking the command ring protocol if it wants, but in 64 * order to prevent other issues we must only ever read *data* from 65 * the shared memory area, not offsets or sizes. This applies to 66 * command ring entries as well as the mailbox. Extra code needed for 67 * this may have a 'UAM' comment. 68 */ 69 70 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 71 72 /* For cmd area, the size is fixed 8MB */ 73 #define CMDR_SIZE (8 * 1024 * 1024) 74 75 /* 76 * For data area, the block size is PAGE_SIZE and 77 * the total size is 256K * PAGE_SIZE. 78 */ 79 #define DATA_BLOCK_SIZE PAGE_SIZE 80 #define DATA_BLOCK_BITS (256 * 1024) 81 #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) 82 #define DATA_BLOCK_INIT_BITS 128 83 84 /* The total size of the ring is 8M + 256K * PAGE_SIZE */ 85 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 86 87 /* Default maximum of the global data blocks(512K * PAGE_SIZE) */ 88 #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) 89 90 static u8 tcmu_kern_cmd_reply_supported; 91 92 static struct device *tcmu_root_device; 93 94 struct tcmu_hba { 95 u32 host_id; 96 }; 97 98 #define TCMU_CONFIG_LEN 256 99 100 struct tcmu_nl_cmd { 101 /* wake up thread waiting for reply */ 102 struct completion complete; 103 int cmd; 104 int status; 105 }; 106 107 struct tcmu_dev { 108 struct list_head node; 109 struct kref kref; 110 struct se_device se_dev; 111 112 char *name; 113 struct se_hba *hba; 114 115 #define TCMU_DEV_BIT_OPEN 0 116 #define TCMU_DEV_BIT_BROKEN 1 117 unsigned long flags; 118 119 struct uio_info uio_info; 120 121 struct inode *inode; 122 123 struct tcmu_mailbox *mb_addr; 124 size_t dev_size; 125 u32 cmdr_size; 126 u32 cmdr_last_cleaned; 127 /* Offset of data area from start of mb */ 128 /* Must add data_off and mb_addr to get the address */ 129 size_t data_off; 130 size_t data_size; 131 132 wait_queue_head_t wait_cmdr; 133 struct mutex cmdr_lock; 134 135 bool waiting_global; 136 uint32_t dbi_max; 137 uint32_t dbi_thresh; 138 DECLARE_BITMAP(data_bitmap, DATA_BLOCK_BITS); 139 struct radix_tree_root data_blocks; 140 141 struct idr commands; 142 spinlock_t commands_lock; 143 144 struct timer_list timeout; 145 unsigned int cmd_time_out; 146 147 spinlock_t nl_cmd_lock; 148 struct tcmu_nl_cmd curr_nl_cmd; 149 /* wake up threads waiting on curr_nl_cmd */ 150 wait_queue_head_t nl_cmd_wq; 151 152 char dev_config[TCMU_CONFIG_LEN]; 153 }; 154 155 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 156 157 #define CMDR_OFF sizeof(struct tcmu_mailbox) 158 159 struct tcmu_cmd { 160 struct se_cmd *se_cmd; 161 struct tcmu_dev *tcmu_dev; 162 163 uint16_t cmd_id; 164 165 /* Can't use se_cmd when cleaning up expired cmds, because if 166 cmd has been completed then accessing se_cmd is off limits */ 167 uint32_t dbi_cnt; 168 uint32_t dbi_cur; 169 uint32_t *dbi; 170 171 unsigned long deadline; 172 173 #define TCMU_CMD_BIT_EXPIRED 0 174 unsigned long flags; 175 }; 176 177 static struct task_struct *unmap_thread; 178 static wait_queue_head_t unmap_wait; 179 static DEFINE_MUTEX(root_udev_mutex); 180 static LIST_HEAD(root_udev); 181 182 static atomic_t global_db_count = ATOMIC_INIT(0); 183 184 static struct kmem_cache *tcmu_cmd_cache; 185 186 /* multicast group */ 187 enum tcmu_multicast_groups { 188 TCMU_MCGRP_CONFIG, 189 }; 190 191 static const struct genl_multicast_group tcmu_mcgrps[] = { 192 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 193 }; 194 195 static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 196 [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 197 [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 198 [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 199 [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 200 [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 201 }; 202 203 static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 204 { 205 struct se_device *dev; 206 struct tcmu_dev *udev; 207 struct tcmu_nl_cmd *nl_cmd; 208 int dev_id, rc, ret = 0; 209 bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE); 210 211 if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 212 !info->attrs[TCMU_ATTR_DEVICE_ID]) { 213 printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 214 return -EINVAL; 215 } 216 217 dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 218 rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 219 220 dev = target_find_device(dev_id, !is_removed); 221 if (!dev) { 222 printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n", 223 completed_cmd, rc, dev_id); 224 return -ENODEV; 225 } 226 udev = TCMU_DEV(dev); 227 228 spin_lock(&udev->nl_cmd_lock); 229 nl_cmd = &udev->curr_nl_cmd; 230 231 pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id, 232 nl_cmd->cmd, completed_cmd, rc); 233 234 if (nl_cmd->cmd != completed_cmd) { 235 printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n", 236 completed_cmd, nl_cmd->cmd); 237 ret = -EINVAL; 238 } else { 239 nl_cmd->status = rc; 240 } 241 242 spin_unlock(&udev->nl_cmd_lock); 243 if (!is_removed) 244 target_undepend_item(&dev->dev_group.cg_item); 245 if (!ret) 246 complete(&nl_cmd->complete); 247 return ret; 248 } 249 250 static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 251 { 252 return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 253 } 254 255 static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 256 { 257 return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 258 } 259 260 static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 261 struct genl_info *info) 262 { 263 return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 264 } 265 266 static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 267 { 268 if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 269 tcmu_kern_cmd_reply_supported = 270 nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 271 printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 272 tcmu_kern_cmd_reply_supported); 273 } 274 275 return 0; 276 } 277 278 static const struct genl_ops tcmu_genl_ops[] = { 279 { 280 .cmd = TCMU_CMD_SET_FEATURES, 281 .flags = GENL_ADMIN_PERM, 282 .policy = tcmu_attr_policy, 283 .doit = tcmu_genl_set_features, 284 }, 285 { 286 .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 287 .flags = GENL_ADMIN_PERM, 288 .policy = tcmu_attr_policy, 289 .doit = tcmu_genl_add_dev_done, 290 }, 291 { 292 .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 293 .flags = GENL_ADMIN_PERM, 294 .policy = tcmu_attr_policy, 295 .doit = tcmu_genl_rm_dev_done, 296 }, 297 { 298 .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 299 .flags = GENL_ADMIN_PERM, 300 .policy = tcmu_attr_policy, 301 .doit = tcmu_genl_reconfig_dev_done, 302 }, 303 }; 304 305 /* Our generic netlink family */ 306 static struct genl_family tcmu_genl_family __ro_after_init = { 307 .module = THIS_MODULE, 308 .hdrsize = 0, 309 .name = "TCM-USER", 310 .version = 2, 311 .maxattr = TCMU_ATTR_MAX, 312 .mcgrps = tcmu_mcgrps, 313 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 314 .netnsok = true, 315 .ops = tcmu_genl_ops, 316 .n_ops = ARRAY_SIZE(tcmu_genl_ops), 317 }; 318 319 #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 320 #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 321 #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 322 #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 323 324 static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 325 { 326 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 327 uint32_t i; 328 329 for (i = 0; i < len; i++) 330 clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 331 } 332 333 static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, 334 struct tcmu_cmd *tcmu_cmd) 335 { 336 struct page *page; 337 int ret, dbi; 338 339 dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 340 if (dbi == udev->dbi_thresh) 341 return false; 342 343 page = radix_tree_lookup(&udev->data_blocks, dbi); 344 if (!page) { 345 if (atomic_add_return(1, &global_db_count) > 346 TCMU_GLOBAL_MAX_BLOCKS) { 347 atomic_dec(&global_db_count); 348 return false; 349 } 350 351 /* try to get new page from the mm */ 352 page = alloc_page(GFP_KERNEL); 353 if (!page) 354 goto err_alloc; 355 356 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 357 if (ret) 358 goto err_insert; 359 } 360 361 if (dbi > udev->dbi_max) 362 udev->dbi_max = dbi; 363 364 set_bit(dbi, udev->data_bitmap); 365 tcmu_cmd_set_dbi(tcmu_cmd, dbi); 366 367 return true; 368 err_insert: 369 __free_page(page); 370 err_alloc: 371 atomic_dec(&global_db_count); 372 return false; 373 } 374 375 static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, 376 struct tcmu_cmd *tcmu_cmd) 377 { 378 int i; 379 380 udev->waiting_global = false; 381 382 for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { 383 if (!tcmu_get_empty_block(udev, tcmu_cmd)) 384 goto err; 385 } 386 return true; 387 388 err: 389 udev->waiting_global = true; 390 /* Try to wake up the unmap thread */ 391 wake_up(&unmap_wait); 392 return false; 393 } 394 395 static inline struct page * 396 tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 397 { 398 return radix_tree_lookup(&udev->data_blocks, dbi); 399 } 400 401 static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 402 { 403 kfree(tcmu_cmd->dbi); 404 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 405 } 406 407 static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) 408 { 409 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 410 size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); 411 412 if (se_cmd->se_cmd_flags & SCF_BIDI) { 413 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 414 data_length += round_up(se_cmd->t_bidi_data_sg->length, 415 DATA_BLOCK_SIZE); 416 } 417 418 return data_length; 419 } 420 421 static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) 422 { 423 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 424 425 return data_length / DATA_BLOCK_SIZE; 426 } 427 428 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 429 { 430 struct se_device *se_dev = se_cmd->se_dev; 431 struct tcmu_dev *udev = TCMU_DEV(se_dev); 432 struct tcmu_cmd *tcmu_cmd; 433 int cmd_id; 434 435 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); 436 if (!tcmu_cmd) 437 return NULL; 438 439 tcmu_cmd->se_cmd = se_cmd; 440 tcmu_cmd->tcmu_dev = udev; 441 if (udev->cmd_time_out) 442 tcmu_cmd->deadline = jiffies + 443 msecs_to_jiffies(udev->cmd_time_out); 444 445 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 446 tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); 447 tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 448 GFP_KERNEL); 449 if (!tcmu_cmd->dbi) { 450 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 451 return NULL; 452 } 453 454 idr_preload(GFP_KERNEL); 455 spin_lock_irq(&udev->commands_lock); 456 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0, 457 USHRT_MAX, GFP_NOWAIT); 458 spin_unlock_irq(&udev->commands_lock); 459 idr_preload_end(); 460 461 if (cmd_id < 0) { 462 tcmu_free_cmd(tcmu_cmd); 463 return NULL; 464 } 465 tcmu_cmd->cmd_id = cmd_id; 466 467 return tcmu_cmd; 468 } 469 470 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 471 { 472 unsigned long offset = offset_in_page(vaddr); 473 474 size = round_up(size+offset, PAGE_SIZE); 475 vaddr -= offset; 476 477 while (size) { 478 flush_dcache_page(virt_to_page(vaddr)); 479 size -= PAGE_SIZE; 480 } 481 } 482 483 /* 484 * Some ring helper functions. We don't assume size is a power of 2 so 485 * we can't use circ_buf.h. 486 */ 487 static inline size_t spc_used(size_t head, size_t tail, size_t size) 488 { 489 int diff = head - tail; 490 491 if (diff >= 0) 492 return diff; 493 else 494 return size + diff; 495 } 496 497 static inline size_t spc_free(size_t head, size_t tail, size_t size) 498 { 499 /* Keep 1 byte unused or we can't tell full from empty */ 500 return (size - spc_used(head, tail, size) - 1); 501 } 502 503 static inline size_t head_to_end(size_t head, size_t size) 504 { 505 return size - head; 506 } 507 508 static inline void new_iov(struct iovec **iov, int *iov_cnt, 509 struct tcmu_dev *udev) 510 { 511 struct iovec *iovec; 512 513 if (*iov_cnt != 0) 514 (*iov)++; 515 (*iov_cnt)++; 516 517 iovec = *iov; 518 memset(iovec, 0, sizeof(struct iovec)); 519 } 520 521 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 522 523 /* offset is relative to mb_addr */ 524 static inline size_t get_block_offset_user(struct tcmu_dev *dev, 525 int dbi, int remaining) 526 { 527 return dev->data_off + dbi * DATA_BLOCK_SIZE + 528 DATA_BLOCK_SIZE - remaining; 529 } 530 531 static inline size_t iov_tail(struct iovec *iov) 532 { 533 return (size_t)iov->iov_base + iov->iov_len; 534 } 535 536 static int scatter_data_area(struct tcmu_dev *udev, 537 struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, 538 unsigned int data_nents, struct iovec **iov, 539 int *iov_cnt, bool copy_data) 540 { 541 int i, dbi; 542 int block_remaining = 0; 543 void *from, *to = NULL; 544 size_t copy_bytes, to_offset, offset; 545 struct scatterlist *sg; 546 struct page *page; 547 548 for_each_sg(data_sg, sg, data_nents, i) { 549 int sg_remaining = sg->length; 550 from = kmap_atomic(sg_page(sg)) + sg->offset; 551 while (sg_remaining > 0) { 552 if (block_remaining == 0) { 553 if (to) 554 kunmap_atomic(to); 555 556 block_remaining = DATA_BLOCK_SIZE; 557 dbi = tcmu_cmd_get_dbi(tcmu_cmd); 558 page = tcmu_get_block_page(udev, dbi); 559 to = kmap_atomic(page); 560 } 561 562 copy_bytes = min_t(size_t, sg_remaining, 563 block_remaining); 564 to_offset = get_block_offset_user(udev, dbi, 565 block_remaining); 566 567 if (*iov_cnt != 0 && 568 to_offset == iov_tail(*iov)) { 569 (*iov)->iov_len += copy_bytes; 570 } else { 571 new_iov(iov, iov_cnt, udev); 572 (*iov)->iov_base = (void __user *)to_offset; 573 (*iov)->iov_len = copy_bytes; 574 } 575 if (copy_data) { 576 offset = DATA_BLOCK_SIZE - block_remaining; 577 memcpy(to + offset, 578 from + sg->length - sg_remaining, 579 copy_bytes); 580 tcmu_flush_dcache_range(to, copy_bytes); 581 } 582 sg_remaining -= copy_bytes; 583 block_remaining -= copy_bytes; 584 } 585 kunmap_atomic(from - sg->offset); 586 } 587 if (to) 588 kunmap_atomic(to); 589 590 return 0; 591 } 592 593 static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 594 bool bidi) 595 { 596 struct se_cmd *se_cmd = cmd->se_cmd; 597 int i, dbi; 598 int block_remaining = 0; 599 void *from = NULL, *to; 600 size_t copy_bytes, offset; 601 struct scatterlist *sg, *data_sg; 602 struct page *page; 603 unsigned int data_nents; 604 uint32_t count = 0; 605 606 if (!bidi) { 607 data_sg = se_cmd->t_data_sg; 608 data_nents = se_cmd->t_data_nents; 609 } else { 610 611 /* 612 * For bidi case, the first count blocks are for Data-Out 613 * buffer blocks, and before gathering the Data-In buffer 614 * the Data-Out buffer blocks should be discarded. 615 */ 616 count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 617 618 data_sg = se_cmd->t_bidi_data_sg; 619 data_nents = se_cmd->t_bidi_data_nents; 620 } 621 622 tcmu_cmd_set_dbi_cur(cmd, count); 623 624 for_each_sg(data_sg, sg, data_nents, i) { 625 int sg_remaining = sg->length; 626 to = kmap_atomic(sg_page(sg)) + sg->offset; 627 while (sg_remaining > 0) { 628 if (block_remaining == 0) { 629 if (from) 630 kunmap_atomic(from); 631 632 block_remaining = DATA_BLOCK_SIZE; 633 dbi = tcmu_cmd_get_dbi(cmd); 634 page = tcmu_get_block_page(udev, dbi); 635 from = kmap_atomic(page); 636 } 637 copy_bytes = min_t(size_t, sg_remaining, 638 block_remaining); 639 offset = DATA_BLOCK_SIZE - block_remaining; 640 tcmu_flush_dcache_range(from, copy_bytes); 641 memcpy(to + sg->length - sg_remaining, from + offset, 642 copy_bytes); 643 644 sg_remaining -= copy_bytes; 645 block_remaining -= copy_bytes; 646 } 647 kunmap_atomic(to - sg->offset); 648 } 649 if (from) 650 kunmap_atomic(from); 651 } 652 653 static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 654 { 655 return DATA_BLOCK_SIZE * (thresh - bitmap_weight(bitmap, thresh)); 656 } 657 658 /* 659 * We can't queue a command until we have space available on the cmd ring *and* 660 * space available on the data area. 661 * 662 * Called with ring lock held. 663 */ 664 static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 665 size_t cmd_size, size_t data_needed) 666 { 667 struct tcmu_mailbox *mb = udev->mb_addr; 668 uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1) 669 / DATA_BLOCK_SIZE; 670 size_t space, cmd_needed; 671 u32 cmd_head; 672 673 tcmu_flush_dcache_range(mb, sizeof(*mb)); 674 675 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 676 677 /* 678 * If cmd end-of-ring space is too small then we need space for a NOP plus 679 * original cmd - cmds are internally contiguous. 680 */ 681 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 682 cmd_needed = cmd_size; 683 else 684 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 685 686 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 687 if (space < cmd_needed) { 688 pr_debug("no cmd space: %u %u %u\n", cmd_head, 689 udev->cmdr_last_cleaned, udev->cmdr_size); 690 return false; 691 } 692 693 /* try to check and get the data blocks as needed */ 694 space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 695 if (space < data_needed) { 696 unsigned long blocks_left = DATA_BLOCK_BITS - udev->dbi_thresh; 697 unsigned long grow; 698 699 if (blocks_left < blocks_needed) { 700 pr_debug("no data space: only %lu available, but ask for %zu\n", 701 blocks_left * DATA_BLOCK_SIZE, 702 data_needed); 703 return false; 704 } 705 706 /* Try to expand the thresh */ 707 if (!udev->dbi_thresh) { 708 /* From idle state */ 709 uint32_t init_thresh = DATA_BLOCK_INIT_BITS; 710 711 udev->dbi_thresh = max(blocks_needed, init_thresh); 712 } else { 713 /* 714 * Grow the data area by max(blocks needed, 715 * dbi_thresh / 2), but limited to the max 716 * DATA_BLOCK_BITS size. 717 */ 718 grow = max(blocks_needed, udev->dbi_thresh / 2); 719 udev->dbi_thresh += grow; 720 if (udev->dbi_thresh > DATA_BLOCK_BITS) 721 udev->dbi_thresh = DATA_BLOCK_BITS; 722 } 723 } 724 725 return tcmu_get_empty_blocks(udev, cmd); 726 } 727 728 static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 729 { 730 return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 731 sizeof(struct tcmu_cmd_entry)); 732 } 733 734 static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 735 size_t base_command_size) 736 { 737 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 738 size_t command_size; 739 740 command_size = base_command_size + 741 round_up(scsi_command_size(se_cmd->t_task_cdb), 742 TCMU_OP_ALIGN_SIZE); 743 744 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 745 746 return command_size; 747 } 748 749 static sense_reason_t 750 tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 751 { 752 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 753 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 754 size_t base_command_size, command_size; 755 struct tcmu_mailbox *mb; 756 struct tcmu_cmd_entry *entry; 757 struct iovec *iov; 758 int iov_cnt, ret; 759 uint32_t cmd_head; 760 uint64_t cdb_off; 761 bool copy_to_data_area; 762 size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 763 764 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 765 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 766 767 /* 768 * Must be a certain minimum size for response sense info, but 769 * also may be larger if the iov array is large. 770 * 771 * We prepare as many iovs as possbile for potential uses here, 772 * because it's expensive to tell how many regions are freed in 773 * the bitmap & global data pool, as the size calculated here 774 * will only be used to do the checks. 775 * 776 * The size will be recalculated later as actually needed to save 777 * cmd area memories. 778 */ 779 base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 780 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 781 782 mutex_lock(&udev->cmdr_lock); 783 784 mb = udev->mb_addr; 785 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 786 if ((command_size > (udev->cmdr_size / 2)) || 787 data_length > udev->data_size) { 788 pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 789 "cmd ring/data area\n", command_size, data_length, 790 udev->cmdr_size, udev->data_size); 791 mutex_unlock(&udev->cmdr_lock); 792 return TCM_INVALID_CDB_FIELD; 793 } 794 795 while (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { 796 int ret; 797 DEFINE_WAIT(__wait); 798 799 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); 800 801 pr_debug("sleeping for ring space\n"); 802 mutex_unlock(&udev->cmdr_lock); 803 if (udev->cmd_time_out) 804 ret = schedule_timeout( 805 msecs_to_jiffies(udev->cmd_time_out)); 806 else 807 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 808 finish_wait(&udev->wait_cmdr, &__wait); 809 if (!ret) { 810 pr_warn("tcmu: command timed out\n"); 811 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 812 } 813 814 mutex_lock(&udev->cmdr_lock); 815 816 /* We dropped cmdr_lock, cmd_head is stale */ 817 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 818 } 819 820 /* Insert a PAD if end-of-ring space is too small */ 821 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { 822 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 823 824 entry = (void *) mb + CMDR_OFF + cmd_head; 825 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 826 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 827 entry->hdr.cmd_id = 0; /* not used for PAD */ 828 entry->hdr.kflags = 0; 829 entry->hdr.uflags = 0; 830 tcmu_flush_dcache_range(entry, sizeof(*entry)); 831 832 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 833 tcmu_flush_dcache_range(mb, sizeof(*mb)); 834 835 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 836 WARN_ON(cmd_head != 0); 837 } 838 839 entry = (void *) mb + CMDR_OFF + cmd_head; 840 memset(entry, 0, command_size); 841 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 842 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 843 844 /* Handle allocating space from the data area */ 845 tcmu_cmd_reset_dbi_cur(tcmu_cmd); 846 iov = &entry->req.iov[0]; 847 iov_cnt = 0; 848 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 849 || se_cmd->se_cmd_flags & SCF_BIDI); 850 ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, 851 se_cmd->t_data_nents, &iov, &iov_cnt, 852 copy_to_data_area); 853 if (ret) { 854 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 855 mutex_unlock(&udev->cmdr_lock); 856 857 pr_err("tcmu: alloc and scatter data failed\n"); 858 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 859 } 860 entry->req.iov_cnt = iov_cnt; 861 862 /* Handle BIDI commands */ 863 iov_cnt = 0; 864 if (se_cmd->se_cmd_flags & SCF_BIDI) { 865 iov++; 866 ret = scatter_data_area(udev, tcmu_cmd, 867 se_cmd->t_bidi_data_sg, 868 se_cmd->t_bidi_data_nents, 869 &iov, &iov_cnt, false); 870 if (ret) { 871 tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 872 mutex_unlock(&udev->cmdr_lock); 873 874 pr_err("tcmu: alloc and scatter bidi data failed\n"); 875 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 876 } 877 } 878 entry->req.iov_bidi_cnt = iov_cnt; 879 880 /* 881 * Recalaulate the command's base size and size according 882 * to the actual needs 883 */ 884 base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt + 885 entry->req.iov_bidi_cnt); 886 command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 887 888 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 889 890 /* All offsets relative to mb_addr, not start of entry! */ 891 cdb_off = CMDR_OFF + cmd_head + base_command_size; 892 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 893 entry->req.cdb_off = cdb_off; 894 tcmu_flush_dcache_range(entry, sizeof(*entry)); 895 896 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 897 tcmu_flush_dcache_range(mb, sizeof(*mb)); 898 mutex_unlock(&udev->cmdr_lock); 899 900 /* TODO: only if FLUSH and FUA? */ 901 uio_event_notify(&udev->uio_info); 902 903 if (udev->cmd_time_out) 904 mod_timer(&udev->timeout, round_jiffies_up(jiffies + 905 msecs_to_jiffies(udev->cmd_time_out))); 906 907 return TCM_NO_SENSE; 908 } 909 910 static sense_reason_t 911 tcmu_queue_cmd(struct se_cmd *se_cmd) 912 { 913 struct se_device *se_dev = se_cmd->se_dev; 914 struct tcmu_dev *udev = TCMU_DEV(se_dev); 915 struct tcmu_cmd *tcmu_cmd; 916 sense_reason_t ret; 917 918 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 919 if (!tcmu_cmd) 920 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 921 922 ret = tcmu_queue_cmd_ring(tcmu_cmd); 923 if (ret != TCM_NO_SENSE) { 924 pr_err("TCMU: Could not queue command\n"); 925 spin_lock_irq(&udev->commands_lock); 926 idr_remove(&udev->commands, tcmu_cmd->cmd_id); 927 spin_unlock_irq(&udev->commands_lock); 928 929 tcmu_free_cmd(tcmu_cmd); 930 } 931 932 return ret; 933 } 934 935 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 936 { 937 struct se_cmd *se_cmd = cmd->se_cmd; 938 struct tcmu_dev *udev = cmd->tcmu_dev; 939 940 /* 941 * cmd has been completed already from timeout, just reclaim 942 * data area space and free cmd 943 */ 944 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 945 goto out; 946 947 tcmu_cmd_reset_dbi_cur(cmd); 948 949 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 950 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 951 cmd->se_cmd); 952 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 953 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 954 transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 955 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 956 /* Get Data-In buffer before clean up */ 957 gather_data_area(udev, cmd, true); 958 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 959 gather_data_area(udev, cmd, false); 960 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 961 /* TODO: */ 962 } else if (se_cmd->data_direction != DMA_NONE) { 963 pr_warn("TCMU: data direction was %d!\n", 964 se_cmd->data_direction); 965 } 966 967 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 968 969 out: 970 cmd->se_cmd = NULL; 971 tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 972 tcmu_free_cmd(cmd); 973 } 974 975 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 976 { 977 struct tcmu_mailbox *mb; 978 int handled = 0; 979 980 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 981 pr_err("ring broken, not handling completions\n"); 982 return 0; 983 } 984 985 mb = udev->mb_addr; 986 tcmu_flush_dcache_range(mb, sizeof(*mb)); 987 988 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { 989 990 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 991 struct tcmu_cmd *cmd; 992 993 tcmu_flush_dcache_range(entry, sizeof(*entry)); 994 995 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 996 UPDATE_HEAD(udev->cmdr_last_cleaned, 997 tcmu_hdr_get_len(entry->hdr.len_op), 998 udev->cmdr_size); 999 continue; 1000 } 1001 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 1002 1003 spin_lock(&udev->commands_lock); 1004 cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); 1005 spin_unlock(&udev->commands_lock); 1006 1007 if (!cmd) { 1008 pr_err("cmd_id not found, ring is broken\n"); 1009 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 1010 break; 1011 } 1012 1013 tcmu_handle_completion(cmd, entry); 1014 1015 UPDATE_HEAD(udev->cmdr_last_cleaned, 1016 tcmu_hdr_get_len(entry->hdr.len_op), 1017 udev->cmdr_size); 1018 1019 handled++; 1020 } 1021 1022 if (mb->cmd_tail == mb->cmd_head) 1023 del_timer(&udev->timeout); /* no more pending cmds */ 1024 1025 wake_up(&udev->wait_cmdr); 1026 1027 return handled; 1028 } 1029 1030 static int tcmu_check_expired_cmd(int id, void *p, void *data) 1031 { 1032 struct tcmu_cmd *cmd = p; 1033 1034 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1035 return 0; 1036 1037 if (!time_after(jiffies, cmd->deadline)) 1038 return 0; 1039 1040 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1041 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); 1042 cmd->se_cmd = NULL; 1043 1044 return 0; 1045 } 1046 1047 static void tcmu_device_timedout(unsigned long data) 1048 { 1049 struct tcmu_dev *udev = (struct tcmu_dev *)data; 1050 unsigned long flags; 1051 1052 spin_lock_irqsave(&udev->commands_lock, flags); 1053 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 1054 spin_unlock_irqrestore(&udev->commands_lock, flags); 1055 1056 /* Try to wake up the ummap thread */ 1057 wake_up(&unmap_wait); 1058 1059 /* 1060 * We don't need to wakeup threads on wait_cmdr since they have their 1061 * own timeout. 1062 */ 1063 } 1064 1065 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 1066 { 1067 struct tcmu_hba *tcmu_hba; 1068 1069 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 1070 if (!tcmu_hba) 1071 return -ENOMEM; 1072 1073 tcmu_hba->host_id = host_id; 1074 hba->hba_ptr = tcmu_hba; 1075 1076 return 0; 1077 } 1078 1079 static void tcmu_detach_hba(struct se_hba *hba) 1080 { 1081 kfree(hba->hba_ptr); 1082 hba->hba_ptr = NULL; 1083 } 1084 1085 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 1086 { 1087 struct tcmu_dev *udev; 1088 1089 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 1090 if (!udev) 1091 return NULL; 1092 kref_init(&udev->kref); 1093 1094 udev->name = kstrdup(name, GFP_KERNEL); 1095 if (!udev->name) { 1096 kfree(udev); 1097 return NULL; 1098 } 1099 1100 udev->hba = hba; 1101 udev->cmd_time_out = TCMU_TIME_OUT; 1102 1103 init_waitqueue_head(&udev->wait_cmdr); 1104 mutex_init(&udev->cmdr_lock); 1105 1106 idr_init(&udev->commands); 1107 spin_lock_init(&udev->commands_lock); 1108 1109 setup_timer(&udev->timeout, tcmu_device_timedout, 1110 (unsigned long)udev); 1111 1112 init_waitqueue_head(&udev->nl_cmd_wq); 1113 spin_lock_init(&udev->nl_cmd_lock); 1114 1115 return &udev->se_dev; 1116 } 1117 1118 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 1119 { 1120 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); 1121 1122 mutex_lock(&tcmu_dev->cmdr_lock); 1123 tcmu_handle_completions(tcmu_dev); 1124 mutex_unlock(&tcmu_dev->cmdr_lock); 1125 1126 return 0; 1127 } 1128 1129 /* 1130 * mmap code from uio.c. Copied here because we want to hook mmap() 1131 * and this stuff must come along. 1132 */ 1133 static int tcmu_find_mem_index(struct vm_area_struct *vma) 1134 { 1135 struct tcmu_dev *udev = vma->vm_private_data; 1136 struct uio_info *info = &udev->uio_info; 1137 1138 if (vma->vm_pgoff < MAX_UIO_MAPS) { 1139 if (info->mem[vma->vm_pgoff].size == 0) 1140 return -1; 1141 return (int)vma->vm_pgoff; 1142 } 1143 return -1; 1144 } 1145 1146 static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1147 { 1148 struct page *page; 1149 int ret; 1150 1151 mutex_lock(&udev->cmdr_lock); 1152 page = tcmu_get_block_page(udev, dbi); 1153 if (likely(page)) { 1154 mutex_unlock(&udev->cmdr_lock); 1155 return page; 1156 } 1157 1158 /* 1159 * Normally it shouldn't be here: 1160 * Only when the userspace has touched the blocks which 1161 * are out of the tcmu_cmd's data iov[], and will return 1162 * one zeroed page. 1163 */ 1164 pr_warn("Block(%u) out of cmd's iov[] has been touched!\n", dbi); 1165 pr_warn("Mostly it will be a bug of userspace, please have a check!\n"); 1166 1167 if (dbi >= udev->dbi_thresh) { 1168 /* Extern the udev->dbi_thresh to dbi + 1 */ 1169 udev->dbi_thresh = dbi + 1; 1170 udev->dbi_max = dbi; 1171 } 1172 1173 page = radix_tree_lookup(&udev->data_blocks, dbi); 1174 if (!page) { 1175 page = alloc_page(GFP_KERNEL | __GFP_ZERO); 1176 if (!page) { 1177 mutex_unlock(&udev->cmdr_lock); 1178 return NULL; 1179 } 1180 1181 ret = radix_tree_insert(&udev->data_blocks, dbi, page); 1182 if (ret) { 1183 mutex_unlock(&udev->cmdr_lock); 1184 __free_page(page); 1185 return NULL; 1186 } 1187 1188 /* 1189 * Since this case is rare in page fault routine, here we 1190 * will allow the global_db_count >= TCMU_GLOBAL_MAX_BLOCKS 1191 * to reduce possible page fault call trace. 1192 */ 1193 atomic_inc(&global_db_count); 1194 } 1195 mutex_unlock(&udev->cmdr_lock); 1196 1197 return page; 1198 } 1199 1200 static int tcmu_vma_fault(struct vm_fault *vmf) 1201 { 1202 struct tcmu_dev *udev = vmf->vma->vm_private_data; 1203 struct uio_info *info = &udev->uio_info; 1204 struct page *page; 1205 unsigned long offset; 1206 void *addr; 1207 1208 int mi = tcmu_find_mem_index(vmf->vma); 1209 if (mi < 0) 1210 return VM_FAULT_SIGBUS; 1211 1212 /* 1213 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 1214 * to use mem[N]. 1215 */ 1216 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 1217 1218 if (offset < udev->data_off) { 1219 /* For the vmalloc()ed cmd area pages */ 1220 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 1221 page = vmalloc_to_page(addr); 1222 } else { 1223 uint32_t dbi; 1224 1225 /* For the dynamically growing data area pages */ 1226 dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1227 page = tcmu_try_get_block_page(udev, dbi); 1228 if (!page) 1229 return VM_FAULT_NOPAGE; 1230 } 1231 1232 get_page(page); 1233 vmf->page = page; 1234 return 0; 1235 } 1236 1237 static const struct vm_operations_struct tcmu_vm_ops = { 1238 .fault = tcmu_vma_fault, 1239 }; 1240 1241 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 1242 { 1243 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1244 1245 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 1246 vma->vm_ops = &tcmu_vm_ops; 1247 1248 vma->vm_private_data = udev; 1249 1250 /* Ensure the mmap is exactly the right size */ 1251 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) 1252 return -EINVAL; 1253 1254 return 0; 1255 } 1256 1257 static int tcmu_open(struct uio_info *info, struct inode *inode) 1258 { 1259 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1260 1261 /* O_EXCL not supported for char devs, so fake it? */ 1262 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 1263 return -EBUSY; 1264 1265 udev->inode = inode; 1266 kref_get(&udev->kref); 1267 1268 pr_debug("open\n"); 1269 1270 return 0; 1271 } 1272 1273 static void tcmu_dev_call_rcu(struct rcu_head *p) 1274 { 1275 struct se_device *dev = container_of(p, struct se_device, rcu_head); 1276 struct tcmu_dev *udev = TCMU_DEV(dev); 1277 1278 kfree(udev->uio_info.name); 1279 kfree(udev->name); 1280 kfree(udev); 1281 } 1282 1283 static void tcmu_dev_kref_release(struct kref *kref) 1284 { 1285 struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1286 struct se_device *dev = &udev->se_dev; 1287 1288 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1289 } 1290 1291 static int tcmu_release(struct uio_info *info, struct inode *inode) 1292 { 1293 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 1294 1295 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 1296 1297 pr_debug("close\n"); 1298 /* release ref from open */ 1299 kref_put(&udev->kref, tcmu_dev_kref_release); 1300 return 0; 1301 } 1302 1303 static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1304 { 1305 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1306 1307 if (!tcmu_kern_cmd_reply_supported) 1308 return; 1309 relock: 1310 spin_lock(&udev->nl_cmd_lock); 1311 1312 if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1313 spin_unlock(&udev->nl_cmd_lock); 1314 pr_debug("sleeping for open nl cmd\n"); 1315 wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC)); 1316 goto relock; 1317 } 1318 1319 memset(nl_cmd, 0, sizeof(*nl_cmd)); 1320 nl_cmd->cmd = cmd; 1321 init_completion(&nl_cmd->complete); 1322 1323 spin_unlock(&udev->nl_cmd_lock); 1324 } 1325 1326 static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1327 { 1328 struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1329 int ret; 1330 DEFINE_WAIT(__wait); 1331 1332 if (!tcmu_kern_cmd_reply_supported) 1333 return 0; 1334 1335 pr_debug("sleeping for nl reply\n"); 1336 wait_for_completion(&nl_cmd->complete); 1337 1338 spin_lock(&udev->nl_cmd_lock); 1339 nl_cmd->cmd = TCMU_CMD_UNSPEC; 1340 ret = nl_cmd->status; 1341 nl_cmd->status = 0; 1342 spin_unlock(&udev->nl_cmd_lock); 1343 1344 wake_up_all(&udev->nl_cmd_wq); 1345 1346 return ret;; 1347 } 1348 1349 static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, 1350 int reconfig_attr, const void *reconfig_data) 1351 { 1352 struct sk_buff *skb; 1353 void *msg_header; 1354 int ret = -ENOMEM; 1355 1356 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 1357 if (!skb) 1358 return ret; 1359 1360 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 1361 if (!msg_header) 1362 goto free_skb; 1363 1364 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 1365 if (ret < 0) 1366 goto free_skb; 1367 1368 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1369 if (ret < 0) 1370 goto free_skb; 1371 1372 ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 1373 if (ret < 0) 1374 goto free_skb; 1375 1376 if (cmd == TCMU_CMD_RECONFIG_DEVICE) { 1377 switch (reconfig_attr) { 1378 case TCMU_ATTR_DEV_CFG: 1379 ret = nla_put_string(skb, reconfig_attr, reconfig_data); 1380 break; 1381 case TCMU_ATTR_DEV_SIZE: 1382 ret = nla_put_u64_64bit(skb, reconfig_attr, 1383 *((u64 *)reconfig_data), 1384 TCMU_ATTR_PAD); 1385 break; 1386 case TCMU_ATTR_WRITECACHE: 1387 ret = nla_put_u8(skb, reconfig_attr, 1388 *((u8 *)reconfig_data)); 1389 break; 1390 default: 1391 BUG(); 1392 } 1393 1394 if (ret < 0) 1395 goto free_skb; 1396 } 1397 1398 genlmsg_end(skb, msg_header); 1399 1400 tcmu_init_genl_cmd_reply(udev, cmd); 1401 1402 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1403 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1404 /* We don't care if no one is listening */ 1405 if (ret == -ESRCH) 1406 ret = 0; 1407 if (!ret) 1408 ret = tcmu_wait_genl_cmd_reply(udev); 1409 1410 return ret; 1411 free_skb: 1412 nlmsg_free(skb); 1413 return ret; 1414 } 1415 1416 static int tcmu_update_uio_info(struct tcmu_dev *udev) 1417 { 1418 struct tcmu_hba *hba = udev->hba->hba_ptr; 1419 struct uio_info *info; 1420 size_t size, used; 1421 char *str; 1422 1423 info = &udev->uio_info; 1424 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 1425 udev->dev_config); 1426 size += 1; /* for \0 */ 1427 str = kmalloc(size, GFP_KERNEL); 1428 if (!str) 1429 return -ENOMEM; 1430 1431 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 1432 if (udev->dev_config[0]) 1433 snprintf(str + used, size - used, "/%s", udev->dev_config); 1434 1435 /* If the old string exists, free it */ 1436 kfree(info->name); 1437 info->name = str; 1438 1439 return 0; 1440 } 1441 1442 static int tcmu_configure_device(struct se_device *dev) 1443 { 1444 struct tcmu_dev *udev = TCMU_DEV(dev); 1445 struct uio_info *info; 1446 struct tcmu_mailbox *mb; 1447 int ret = 0; 1448 1449 ret = tcmu_update_uio_info(udev); 1450 if (ret) 1451 return ret; 1452 1453 info = &udev->uio_info; 1454 1455 udev->mb_addr = vzalloc(CMDR_SIZE); 1456 if (!udev->mb_addr) { 1457 ret = -ENOMEM; 1458 goto err_vzalloc; 1459 } 1460 1461 /* mailbox fits in first part of CMDR space */ 1462 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 1463 udev->data_off = CMDR_SIZE; 1464 udev->data_size = DATA_SIZE; 1465 udev->dbi_thresh = 0; /* Default in Idle state */ 1466 udev->waiting_global = false; 1467 1468 /* Initialise the mailbox of the ring buffer */ 1469 mb = udev->mb_addr; 1470 mb->version = TCMU_MAILBOX_VERSION; 1471 mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; 1472 mb->cmdr_off = CMDR_OFF; 1473 mb->cmdr_size = udev->cmdr_size; 1474 1475 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 1476 WARN_ON(udev->data_size % PAGE_SIZE); 1477 WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 1478 1479 INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); 1480 1481 info->version = __stringify(TCMU_MAILBOX_VERSION); 1482 1483 info->mem[0].name = "tcm-user command & data buffer"; 1484 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 1485 info->mem[0].size = TCMU_RING_SIZE; 1486 info->mem[0].memtype = UIO_MEM_NONE; 1487 1488 info->irqcontrol = tcmu_irqcontrol; 1489 info->irq = UIO_IRQ_CUSTOM; 1490 1491 info->mmap = tcmu_mmap; 1492 info->open = tcmu_open; 1493 info->release = tcmu_release; 1494 1495 ret = uio_register_device(tcmu_root_device, info); 1496 if (ret) 1497 goto err_register; 1498 1499 /* User can set hw_block_size before enable the device */ 1500 if (dev->dev_attrib.hw_block_size == 0) 1501 dev->dev_attrib.hw_block_size = 512; 1502 /* Other attributes can be configured in userspace */ 1503 if (!dev->dev_attrib.hw_max_sectors) 1504 dev->dev_attrib.hw_max_sectors = 128; 1505 if (!dev->dev_attrib.emulate_write_cache) 1506 dev->dev_attrib.emulate_write_cache = 0; 1507 dev->dev_attrib.hw_queue_depth = 128; 1508 1509 /* 1510 * Get a ref incase userspace does a close on the uio device before 1511 * LIO has initiated tcmu_free_device. 1512 */ 1513 kref_get(&udev->kref); 1514 1515 ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL); 1516 if (ret) 1517 goto err_netlink; 1518 1519 mutex_lock(&root_udev_mutex); 1520 list_add(&udev->node, &root_udev); 1521 mutex_unlock(&root_udev_mutex); 1522 1523 return 0; 1524 1525 err_netlink: 1526 kref_put(&udev->kref, tcmu_dev_kref_release); 1527 uio_unregister_device(&udev->uio_info); 1528 err_register: 1529 vfree(udev->mb_addr); 1530 err_vzalloc: 1531 kfree(info->name); 1532 info->name = NULL; 1533 1534 return ret; 1535 } 1536 1537 static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1538 { 1539 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1540 kmem_cache_free(tcmu_cmd_cache, cmd); 1541 return 0; 1542 } 1543 return -EINVAL; 1544 } 1545 1546 static bool tcmu_dev_configured(struct tcmu_dev *udev) 1547 { 1548 return udev->uio_info.uio_dev ? true : false; 1549 } 1550 1551 static void tcmu_blocks_release(struct tcmu_dev *udev) 1552 { 1553 int i; 1554 struct page *page; 1555 1556 /* Try to release all block pages */ 1557 mutex_lock(&udev->cmdr_lock); 1558 for (i = 0; i <= udev->dbi_max; i++) { 1559 page = radix_tree_delete(&udev->data_blocks, i); 1560 if (page) { 1561 __free_page(page); 1562 atomic_dec(&global_db_count); 1563 } 1564 } 1565 mutex_unlock(&udev->cmdr_lock); 1566 } 1567 1568 static void tcmu_free_device(struct se_device *dev) 1569 { 1570 struct tcmu_dev *udev = TCMU_DEV(dev); 1571 1572 /* release ref from init */ 1573 kref_put(&udev->kref, tcmu_dev_kref_release); 1574 } 1575 1576 static void tcmu_destroy_device(struct se_device *dev) 1577 { 1578 struct tcmu_dev *udev = TCMU_DEV(dev); 1579 struct tcmu_cmd *cmd; 1580 bool all_expired = true; 1581 int i; 1582 1583 del_timer_sync(&udev->timeout); 1584 1585 mutex_lock(&root_udev_mutex); 1586 list_del(&udev->node); 1587 mutex_unlock(&root_udev_mutex); 1588 1589 vfree(udev->mb_addr); 1590 1591 /* Upper layer should drain all requests before calling this */ 1592 spin_lock_irq(&udev->commands_lock); 1593 idr_for_each_entry(&udev->commands, cmd, i) { 1594 if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1595 all_expired = false; 1596 } 1597 idr_destroy(&udev->commands); 1598 spin_unlock_irq(&udev->commands_lock); 1599 WARN_ON(!all_expired); 1600 1601 tcmu_blocks_release(udev); 1602 1603 tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL); 1604 1605 uio_unregister_device(&udev->uio_info); 1606 1607 /* release ref from configure */ 1608 kref_put(&udev->kref, tcmu_dev_kref_release); 1609 } 1610 1611 enum { 1612 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 1613 Opt_err, 1614 }; 1615 1616 static match_table_t tokens = { 1617 {Opt_dev_config, "dev_config=%s"}, 1618 {Opt_dev_size, "dev_size=%u"}, 1619 {Opt_hw_block_size, "hw_block_size=%u"}, 1620 {Opt_hw_max_sectors, "hw_max_sectors=%u"}, 1621 {Opt_err, NULL} 1622 }; 1623 1624 static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 1625 { 1626 unsigned long tmp_ul; 1627 char *arg_p; 1628 int ret; 1629 1630 arg_p = match_strdup(arg); 1631 if (!arg_p) 1632 return -ENOMEM; 1633 1634 ret = kstrtoul(arg_p, 0, &tmp_ul); 1635 kfree(arg_p); 1636 if (ret < 0) { 1637 pr_err("kstrtoul() failed for dev attrib\n"); 1638 return ret; 1639 } 1640 if (!tmp_ul) { 1641 pr_err("dev attrib must be nonzero\n"); 1642 return -EINVAL; 1643 } 1644 *dev_attrib = tmp_ul; 1645 return 0; 1646 } 1647 1648 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 1649 const char *page, ssize_t count) 1650 { 1651 struct tcmu_dev *udev = TCMU_DEV(dev); 1652 char *orig, *ptr, *opts, *arg_p; 1653 substring_t args[MAX_OPT_ARGS]; 1654 int ret = 0, token; 1655 1656 opts = kstrdup(page, GFP_KERNEL); 1657 if (!opts) 1658 return -ENOMEM; 1659 1660 orig = opts; 1661 1662 while ((ptr = strsep(&opts, ",\n")) != NULL) { 1663 if (!*ptr) 1664 continue; 1665 1666 token = match_token(ptr, tokens, args); 1667 switch (token) { 1668 case Opt_dev_config: 1669 if (match_strlcpy(udev->dev_config, &args[0], 1670 TCMU_CONFIG_LEN) == 0) { 1671 ret = -EINVAL; 1672 break; 1673 } 1674 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 1675 break; 1676 case Opt_dev_size: 1677 arg_p = match_strdup(&args[0]); 1678 if (!arg_p) { 1679 ret = -ENOMEM; 1680 break; 1681 } 1682 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); 1683 kfree(arg_p); 1684 if (ret < 0) 1685 pr_err("kstrtoul() failed for dev_size=\n"); 1686 break; 1687 case Opt_hw_block_size: 1688 ret = tcmu_set_dev_attrib(&args[0], 1689 &(dev->dev_attrib.hw_block_size)); 1690 break; 1691 case Opt_hw_max_sectors: 1692 ret = tcmu_set_dev_attrib(&args[0], 1693 &(dev->dev_attrib.hw_max_sectors)); 1694 break; 1695 default: 1696 break; 1697 } 1698 1699 if (ret) 1700 break; 1701 } 1702 1703 kfree(orig); 1704 return (!ret) ? count : ret; 1705 } 1706 1707 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 1708 { 1709 struct tcmu_dev *udev = TCMU_DEV(dev); 1710 ssize_t bl = 0; 1711 1712 bl = sprintf(b + bl, "Config: %s ", 1713 udev->dev_config[0] ? udev->dev_config : "NULL"); 1714 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); 1715 1716 return bl; 1717 } 1718 1719 static sector_t tcmu_get_blocks(struct se_device *dev) 1720 { 1721 struct tcmu_dev *udev = TCMU_DEV(dev); 1722 1723 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 1724 dev->dev_attrib.block_size); 1725 } 1726 1727 static sense_reason_t 1728 tcmu_parse_cdb(struct se_cmd *cmd) 1729 { 1730 return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 1731 } 1732 1733 static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 1734 { 1735 struct se_dev_attrib *da = container_of(to_config_group(item), 1736 struct se_dev_attrib, da_group); 1737 struct tcmu_dev *udev = container_of(da->da_dev, 1738 struct tcmu_dev, se_dev); 1739 1740 return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 1741 } 1742 1743 static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 1744 size_t count) 1745 { 1746 struct se_dev_attrib *da = container_of(to_config_group(item), 1747 struct se_dev_attrib, da_group); 1748 struct tcmu_dev *udev = container_of(da->da_dev, 1749 struct tcmu_dev, se_dev); 1750 u32 val; 1751 int ret; 1752 1753 if (da->da_dev->export_count) { 1754 pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 1755 return -EINVAL; 1756 } 1757 1758 ret = kstrtou32(page, 0, &val); 1759 if (ret < 0) 1760 return ret; 1761 1762 udev->cmd_time_out = val * MSEC_PER_SEC; 1763 return count; 1764 } 1765 CONFIGFS_ATTR(tcmu_, cmd_time_out); 1766 1767 static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 1768 { 1769 struct se_dev_attrib *da = container_of(to_config_group(item), 1770 struct se_dev_attrib, da_group); 1771 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1772 1773 return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 1774 } 1775 1776 static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 1777 size_t count) 1778 { 1779 struct se_dev_attrib *da = container_of(to_config_group(item), 1780 struct se_dev_attrib, da_group); 1781 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1782 int ret, len; 1783 1784 len = strlen(page); 1785 if (!len || len > TCMU_CONFIG_LEN - 1) 1786 return -EINVAL; 1787 1788 /* Check if device has been configured before */ 1789 if (tcmu_dev_configured(udev)) { 1790 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1791 TCMU_ATTR_DEV_CFG, page); 1792 if (ret) { 1793 pr_err("Unable to reconfigure device\n"); 1794 return ret; 1795 } 1796 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 1797 1798 ret = tcmu_update_uio_info(udev); 1799 if (ret) 1800 return ret; 1801 return count; 1802 } 1803 strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 1804 1805 return count; 1806 } 1807 CONFIGFS_ATTR(tcmu_, dev_config); 1808 1809 static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 1810 { 1811 struct se_dev_attrib *da = container_of(to_config_group(item), 1812 struct se_dev_attrib, da_group); 1813 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1814 1815 return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); 1816 } 1817 1818 static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 1819 size_t count) 1820 { 1821 struct se_dev_attrib *da = container_of(to_config_group(item), 1822 struct se_dev_attrib, da_group); 1823 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1824 u64 val; 1825 int ret; 1826 1827 ret = kstrtou64(page, 0, &val); 1828 if (ret < 0) 1829 return ret; 1830 1831 /* Check if device has been configured before */ 1832 if (tcmu_dev_configured(udev)) { 1833 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1834 TCMU_ATTR_DEV_SIZE, &val); 1835 if (ret) { 1836 pr_err("Unable to reconfigure device\n"); 1837 return ret; 1838 } 1839 } 1840 udev->dev_size = val; 1841 return count; 1842 } 1843 CONFIGFS_ATTR(tcmu_, dev_size); 1844 1845 static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 1846 char *page) 1847 { 1848 struct se_dev_attrib *da = container_of(to_config_group(item), 1849 struct se_dev_attrib, da_group); 1850 1851 return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 1852 } 1853 1854 static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 1855 const char *page, size_t count) 1856 { 1857 struct se_dev_attrib *da = container_of(to_config_group(item), 1858 struct se_dev_attrib, da_group); 1859 struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 1860 u8 val; 1861 int ret; 1862 1863 ret = kstrtou8(page, 0, &val); 1864 if (ret < 0) 1865 return ret; 1866 1867 /* Check if device has been configured before */ 1868 if (tcmu_dev_configured(udev)) { 1869 ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, 1870 TCMU_ATTR_WRITECACHE, &val); 1871 if (ret) { 1872 pr_err("Unable to reconfigure device\n"); 1873 return ret; 1874 } 1875 } 1876 1877 da->emulate_write_cache = val; 1878 return count; 1879 } 1880 CONFIGFS_ATTR(tcmu_, emulate_write_cache); 1881 1882 static struct configfs_attribute *tcmu_attrib_attrs[] = { 1883 &tcmu_attr_cmd_time_out, 1884 &tcmu_attr_dev_config, 1885 &tcmu_attr_dev_size, 1886 &tcmu_attr_emulate_write_cache, 1887 NULL, 1888 }; 1889 1890 static struct configfs_attribute **tcmu_attrs; 1891 1892 static struct target_backend_ops tcmu_ops = { 1893 .name = "user", 1894 .owner = THIS_MODULE, 1895 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1896 .attach_hba = tcmu_attach_hba, 1897 .detach_hba = tcmu_detach_hba, 1898 .alloc_device = tcmu_alloc_device, 1899 .configure_device = tcmu_configure_device, 1900 .destroy_device = tcmu_destroy_device, 1901 .free_device = tcmu_free_device, 1902 .parse_cdb = tcmu_parse_cdb, 1903 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 1904 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1905 .get_device_type = sbc_get_device_type, 1906 .get_blocks = tcmu_get_blocks, 1907 .tb_dev_attrib_attrs = NULL, 1908 }; 1909 1910 static int unmap_thread_fn(void *data) 1911 { 1912 struct tcmu_dev *udev; 1913 loff_t off; 1914 uint32_t start, end, block; 1915 struct page *page; 1916 int i; 1917 1918 while (!kthread_should_stop()) { 1919 DEFINE_WAIT(__wait); 1920 1921 prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); 1922 schedule(); 1923 finish_wait(&unmap_wait, &__wait); 1924 1925 if (kthread_should_stop()) 1926 break; 1927 1928 mutex_lock(&root_udev_mutex); 1929 list_for_each_entry(udev, &root_udev, node) { 1930 mutex_lock(&udev->cmdr_lock); 1931 1932 /* Try to complete the finished commands first */ 1933 tcmu_handle_completions(udev); 1934 1935 /* Skip the udevs waiting the global pool or in idle */ 1936 if (udev->waiting_global || !udev->dbi_thresh) { 1937 mutex_unlock(&udev->cmdr_lock); 1938 continue; 1939 } 1940 1941 end = udev->dbi_max + 1; 1942 block = find_last_bit(udev->data_bitmap, end); 1943 if (block == udev->dbi_max) { 1944 /* 1945 * The last bit is dbi_max, so there is 1946 * no need to shrink any blocks. 1947 */ 1948 mutex_unlock(&udev->cmdr_lock); 1949 continue; 1950 } else if (block == end) { 1951 /* The current udev will goto idle state */ 1952 udev->dbi_thresh = start = 0; 1953 udev->dbi_max = 0; 1954 } else { 1955 udev->dbi_thresh = start = block + 1; 1956 udev->dbi_max = block; 1957 } 1958 1959 /* Here will truncate the data area from off */ 1960 off = udev->data_off + start * DATA_BLOCK_SIZE; 1961 unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 1962 1963 /* Release the block pages */ 1964 for (i = start; i < end; i++) { 1965 page = radix_tree_delete(&udev->data_blocks, i); 1966 if (page) { 1967 __free_page(page); 1968 atomic_dec(&global_db_count); 1969 } 1970 } 1971 mutex_unlock(&udev->cmdr_lock); 1972 } 1973 1974 /* 1975 * Try to wake up the udevs who are waiting 1976 * for the global data pool. 1977 */ 1978 list_for_each_entry(udev, &root_udev, node) { 1979 if (udev->waiting_global) 1980 wake_up(&udev->wait_cmdr); 1981 } 1982 mutex_unlock(&root_udev_mutex); 1983 } 1984 1985 return 0; 1986 } 1987 1988 static int __init tcmu_module_init(void) 1989 { 1990 int ret, i, k, len = 0; 1991 1992 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1993 1994 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 1995 sizeof(struct tcmu_cmd), 1996 __alignof__(struct tcmu_cmd), 1997 0, NULL); 1998 if (!tcmu_cmd_cache) 1999 return -ENOMEM; 2000 2001 tcmu_root_device = root_device_register("tcm_user"); 2002 if (IS_ERR(tcmu_root_device)) { 2003 ret = PTR_ERR(tcmu_root_device); 2004 goto out_free_cache; 2005 } 2006 2007 ret = genl_register_family(&tcmu_genl_family); 2008 if (ret < 0) { 2009 goto out_unreg_device; 2010 } 2011 2012 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2013 len += sizeof(struct configfs_attribute *); 2014 } 2015 for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) { 2016 len += sizeof(struct configfs_attribute *); 2017 } 2018 len += sizeof(struct configfs_attribute *); 2019 2020 tcmu_attrs = kzalloc(len, GFP_KERNEL); 2021 if (!tcmu_attrs) { 2022 ret = -ENOMEM; 2023 goto out_unreg_genl; 2024 } 2025 2026 for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 2027 tcmu_attrs[i] = passthrough_attrib_attrs[i]; 2028 } 2029 for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) { 2030 tcmu_attrs[i] = tcmu_attrib_attrs[k]; 2031 i++; 2032 } 2033 tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 2034 2035 ret = transport_backend_register(&tcmu_ops); 2036 if (ret) 2037 goto out_attrs; 2038 2039 init_waitqueue_head(&unmap_wait); 2040 unmap_thread = kthread_run(unmap_thread_fn, NULL, "tcmu_unmap"); 2041 if (IS_ERR(unmap_thread)) { 2042 ret = PTR_ERR(unmap_thread); 2043 goto out_unreg_transport; 2044 } 2045 2046 return 0; 2047 2048 out_unreg_transport: 2049 target_backend_unregister(&tcmu_ops); 2050 out_attrs: 2051 kfree(tcmu_attrs); 2052 out_unreg_genl: 2053 genl_unregister_family(&tcmu_genl_family); 2054 out_unreg_device: 2055 root_device_unregister(tcmu_root_device); 2056 out_free_cache: 2057 kmem_cache_destroy(tcmu_cmd_cache); 2058 2059 return ret; 2060 } 2061 2062 static void __exit tcmu_module_exit(void) 2063 { 2064 kthread_stop(unmap_thread); 2065 target_backend_unregister(&tcmu_ops); 2066 kfree(tcmu_attrs); 2067 genl_unregister_family(&tcmu_genl_family); 2068 root_device_unregister(tcmu_root_device); 2069 kmem_cache_destroy(tcmu_cmd_cache); 2070 } 2071 2072 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 2073 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 2074 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 2075 MODULE_LICENSE("GPL"); 2076 2077 module_init(tcmu_module_init); 2078 module_exit(tcmu_module_exit); 2079