1 /* 2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 3 * Copyright (C) 2014 Red Hat, Inc. 4 * Copyright (C) 2015 Arrikto, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 */ 19 20 #include <linux/spinlock.h> 21 #include <linux/module.h> 22 #include <linux/idr.h> 23 #include <linux/kernel.h> 24 #include <linux/timer.h> 25 #include <linux/parser.h> 26 #include <linux/vmalloc.h> 27 #include <linux/uio_driver.h> 28 #include <linux/stringify.h> 29 #include <net/genetlink.h> 30 #include <scsi/scsi_common.h> 31 #include <scsi/scsi_proto.h> 32 #include <target/target_core_base.h> 33 #include <target/target_core_fabric.h> 34 #include <target/target_core_backend.h> 35 36 #include <linux/target_core_user.h> 37 38 /* 39 * Define a shared-memory interface for LIO to pass SCSI commands and 40 * data to userspace for processing. This is to allow backends that 41 * are too complex for in-kernel support to be possible. 42 * 43 * It uses the UIO framework to do a lot of the device-creation and 44 * introspection work for us. 45 * 46 * See the .h file for how the ring is laid out. Note that while the 47 * command ring is defined, the particulars of the data area are 48 * not. Offset values in the command entry point to other locations 49 * internal to the mmap()ed area. There is separate space outside the 50 * command ring for data buffers. This leaves maximum flexibility for 51 * moving buffer allocations, or even page flipping or other 52 * allocation techniques, without altering the command ring layout. 53 * 54 * SECURITY: 55 * The user process must be assumed to be malicious. There's no way to 56 * prevent it breaking the command ring protocol if it wants, but in 57 * order to prevent other issues we must only ever read *data* from 58 * the shared memory area, not offsets or sizes. This applies to 59 * command ring entries as well as the mailbox. Extra code needed for 60 * this may have a 'UAM' comment. 61 */ 62 63 64 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 65 66 #define CMDR_SIZE (16 * 4096) 67 #define DATA_SIZE (257 * 4096) 68 69 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 70 71 static struct device *tcmu_root_device; 72 73 struct tcmu_hba { 74 u32 host_id; 75 }; 76 77 #define TCMU_CONFIG_LEN 256 78 79 struct tcmu_dev { 80 struct se_device se_dev; 81 82 char *name; 83 struct se_hba *hba; 84 85 #define TCMU_DEV_BIT_OPEN 0 86 #define TCMU_DEV_BIT_BROKEN 1 87 unsigned long flags; 88 89 struct uio_info uio_info; 90 91 struct tcmu_mailbox *mb_addr; 92 size_t dev_size; 93 u32 cmdr_size; 94 u32 cmdr_last_cleaned; 95 /* Offset of data ring from start of mb */ 96 size_t data_off; 97 size_t data_size; 98 /* Ring head + tail values. */ 99 /* Must add data_off and mb_addr to get the address */ 100 size_t data_head; 101 size_t data_tail; 102 103 wait_queue_head_t wait_cmdr; 104 /* TODO should this be a mutex? */ 105 spinlock_t cmdr_lock; 106 107 struct idr commands; 108 spinlock_t commands_lock; 109 110 struct timer_list timeout; 111 112 char dev_config[TCMU_CONFIG_LEN]; 113 }; 114 115 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 116 117 #define CMDR_OFF sizeof(struct tcmu_mailbox) 118 119 struct tcmu_cmd { 120 struct se_cmd *se_cmd; 121 struct tcmu_dev *tcmu_dev; 122 123 uint16_t cmd_id; 124 125 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if 126 cmd has been completed then accessing se_cmd is off limits */ 127 size_t data_length; 128 129 unsigned long deadline; 130 131 #define TCMU_CMD_BIT_EXPIRED 0 132 unsigned long flags; 133 }; 134 135 static struct kmem_cache *tcmu_cmd_cache; 136 137 /* multicast group */ 138 enum tcmu_multicast_groups { 139 TCMU_MCGRP_CONFIG, 140 }; 141 142 static const struct genl_multicast_group tcmu_mcgrps[] = { 143 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 144 }; 145 146 /* Our generic netlink family */ 147 static struct genl_family tcmu_genl_family = { 148 .id = GENL_ID_GENERATE, 149 .hdrsize = 0, 150 .name = "TCM-USER", 151 .version = 1, 152 .maxattr = TCMU_ATTR_MAX, 153 .mcgrps = tcmu_mcgrps, 154 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 155 .netnsok = true, 156 }; 157 158 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 159 { 160 struct se_device *se_dev = se_cmd->se_dev; 161 struct tcmu_dev *udev = TCMU_DEV(se_dev); 162 struct tcmu_cmd *tcmu_cmd; 163 int cmd_id; 164 165 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); 166 if (!tcmu_cmd) 167 return NULL; 168 169 tcmu_cmd->se_cmd = se_cmd; 170 tcmu_cmd->tcmu_dev = udev; 171 tcmu_cmd->data_length = se_cmd->data_length; 172 173 if (se_cmd->se_cmd_flags & SCF_BIDI) { 174 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 175 tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length; 176 } 177 178 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 179 180 idr_preload(GFP_KERNEL); 181 spin_lock_irq(&udev->commands_lock); 182 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0, 183 USHRT_MAX, GFP_NOWAIT); 184 spin_unlock_irq(&udev->commands_lock); 185 idr_preload_end(); 186 187 if (cmd_id < 0) { 188 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 189 return NULL; 190 } 191 tcmu_cmd->cmd_id = cmd_id; 192 193 return tcmu_cmd; 194 } 195 196 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 197 { 198 unsigned long offset = offset_in_page(vaddr); 199 200 size = round_up(size+offset, PAGE_SIZE); 201 vaddr -= offset; 202 203 while (size) { 204 flush_dcache_page(virt_to_page(vaddr)); 205 size -= PAGE_SIZE; 206 } 207 } 208 209 /* 210 * Some ring helper functions. We don't assume size is a power of 2 so 211 * we can't use circ_buf.h. 212 */ 213 static inline size_t spc_used(size_t head, size_t tail, size_t size) 214 { 215 int diff = head - tail; 216 217 if (diff >= 0) 218 return diff; 219 else 220 return size + diff; 221 } 222 223 static inline size_t spc_free(size_t head, size_t tail, size_t size) 224 { 225 /* Keep 1 byte unused or we can't tell full from empty */ 226 return (size - spc_used(head, tail, size) - 1); 227 } 228 229 static inline size_t head_to_end(size_t head, size_t size) 230 { 231 return size - head; 232 } 233 234 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 235 236 static void alloc_and_scatter_data_area(struct tcmu_dev *udev, 237 struct scatterlist *data_sg, unsigned int data_nents, 238 struct iovec **iov, int *iov_cnt, bool copy_data) 239 { 240 int i; 241 void *from, *to; 242 size_t copy_bytes; 243 struct scatterlist *sg; 244 245 for_each_sg(data_sg, sg, data_nents, i) { 246 copy_bytes = min_t(size_t, sg->length, 247 head_to_end(udev->data_head, udev->data_size)); 248 from = kmap_atomic(sg_page(sg)) + sg->offset; 249 to = (void *) udev->mb_addr + udev->data_off + udev->data_head; 250 251 if (copy_data) { 252 memcpy(to, from, copy_bytes); 253 tcmu_flush_dcache_range(to, copy_bytes); 254 } 255 256 /* Even iov_base is relative to mb_addr */ 257 (*iov)->iov_len = copy_bytes; 258 (*iov)->iov_base = (void __user *) udev->data_off + 259 udev->data_head; 260 (*iov_cnt)++; 261 (*iov)++; 262 263 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); 264 265 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ 266 if (sg->length != copy_bytes) { 267 void *from_skip = from + copy_bytes; 268 269 copy_bytes = sg->length - copy_bytes; 270 271 (*iov)->iov_len = copy_bytes; 272 (*iov)->iov_base = (void __user *) udev->data_off + 273 udev->data_head; 274 275 if (copy_data) { 276 to = (void *) udev->mb_addr + 277 udev->data_off + udev->data_head; 278 memcpy(to, from_skip, copy_bytes); 279 tcmu_flush_dcache_range(to, copy_bytes); 280 } 281 282 (*iov_cnt)++; 283 (*iov)++; 284 285 UPDATE_HEAD(udev->data_head, 286 copy_bytes, udev->data_size); 287 } 288 289 kunmap_atomic(from - sg->offset); 290 } 291 } 292 293 static void gather_and_free_data_area(struct tcmu_dev *udev, 294 struct scatterlist *data_sg, unsigned int data_nents) 295 { 296 int i; 297 void *from, *to; 298 size_t copy_bytes; 299 struct scatterlist *sg; 300 301 /* It'd be easier to look at entry's iovec again, but UAM */ 302 for_each_sg(data_sg, sg, data_nents, i) { 303 copy_bytes = min_t(size_t, sg->length, 304 head_to_end(udev->data_tail, udev->data_size)); 305 306 to = kmap_atomic(sg_page(sg)) + sg->offset; 307 WARN_ON(sg->length + sg->offset > PAGE_SIZE); 308 from = (void *) udev->mb_addr + 309 udev->data_off + udev->data_tail; 310 tcmu_flush_dcache_range(from, copy_bytes); 311 memcpy(to, from, copy_bytes); 312 313 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); 314 315 /* Uh oh, wrapped the data buffer for this sg's data */ 316 if (sg->length != copy_bytes) { 317 void *to_skip = to + copy_bytes; 318 319 from = (void *) udev->mb_addr + 320 udev->data_off + udev->data_tail; 321 WARN_ON(udev->data_tail); 322 copy_bytes = sg->length - copy_bytes; 323 tcmu_flush_dcache_range(from, copy_bytes); 324 memcpy(to_skip, from, copy_bytes); 325 326 UPDATE_HEAD(udev->data_tail, 327 copy_bytes, udev->data_size); 328 } 329 kunmap_atomic(to - sg->offset); 330 } 331 } 332 333 /* 334 * We can't queue a command until we have space available on the cmd ring *and* 335 * space available on the data ring. 336 * 337 * Called with ring lock held. 338 */ 339 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed) 340 { 341 struct tcmu_mailbox *mb = udev->mb_addr; 342 size_t space; 343 u32 cmd_head; 344 size_t cmd_needed; 345 346 tcmu_flush_dcache_range(mb, sizeof(*mb)); 347 348 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 349 350 /* 351 * If cmd end-of-ring space is too small then we need space for a NOP plus 352 * original cmd - cmds are internally contiguous. 353 */ 354 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 355 cmd_needed = cmd_size; 356 else 357 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 358 359 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 360 if (space < cmd_needed) { 361 pr_debug("no cmd space: %u %u %u\n", cmd_head, 362 udev->cmdr_last_cleaned, udev->cmdr_size); 363 return false; 364 } 365 366 space = spc_free(udev->data_head, udev->data_tail, udev->data_size); 367 if (space < data_needed) { 368 pr_debug("no data space: %zu %zu %zu\n", udev->data_head, 369 udev->data_tail, udev->data_size); 370 return false; 371 } 372 373 return true; 374 } 375 376 static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 377 { 378 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 379 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 380 size_t base_command_size, command_size; 381 struct tcmu_mailbox *mb; 382 struct tcmu_cmd_entry *entry; 383 struct iovec *iov; 384 int iov_cnt; 385 uint32_t cmd_head; 386 uint64_t cdb_off; 387 bool copy_to_data_area; 388 389 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 390 return -EINVAL; 391 392 /* 393 * Must be a certain minimum size for response sense info, but 394 * also may be larger if the iov array is large. 395 * 396 * iovs = sgl_nents+1, for end-of-ring case, plus another 1 397 * b/c size == offsetof one-past-element. 398 */ 399 base_command_size = max(offsetof(struct tcmu_cmd_entry, 400 req.iov[se_cmd->t_bidi_data_nents + 401 se_cmd->t_data_nents + 2]), 402 sizeof(struct tcmu_cmd_entry)); 403 command_size = base_command_size 404 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 405 406 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 407 408 spin_lock_irq(&udev->cmdr_lock); 409 410 mb = udev->mb_addr; 411 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 412 if ((command_size > (udev->cmdr_size / 2)) 413 || tcmu_cmd->data_length > (udev->data_size - 1)) 414 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " 415 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length, 416 udev->cmdr_size, udev->data_size); 417 418 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) { 419 int ret; 420 DEFINE_WAIT(__wait); 421 422 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); 423 424 pr_debug("sleeping for ring space\n"); 425 spin_unlock_irq(&udev->cmdr_lock); 426 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 427 finish_wait(&udev->wait_cmdr, &__wait); 428 if (!ret) { 429 pr_warn("tcmu: command timed out\n"); 430 return -ETIMEDOUT; 431 } 432 433 spin_lock_irq(&udev->cmdr_lock); 434 435 /* We dropped cmdr_lock, cmd_head is stale */ 436 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 437 } 438 439 /* Insert a PAD if end-of-ring space is too small */ 440 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { 441 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 442 443 entry = (void *) mb + CMDR_OFF + cmd_head; 444 tcmu_flush_dcache_range(entry, sizeof(*entry)); 445 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 446 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 447 entry->hdr.cmd_id = 0; /* not used for PAD */ 448 entry->hdr.kflags = 0; 449 entry->hdr.uflags = 0; 450 451 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 452 453 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 454 WARN_ON(cmd_head != 0); 455 } 456 457 entry = (void *) mb + CMDR_OFF + cmd_head; 458 tcmu_flush_dcache_range(entry, sizeof(*entry)); 459 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 460 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 461 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 462 entry->hdr.kflags = 0; 463 entry->hdr.uflags = 0; 464 465 /* 466 * Fix up iovecs, and handle if allocation in data ring wrapped. 467 */ 468 iov = &entry->req.iov[0]; 469 iov_cnt = 0; 470 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 471 || se_cmd->se_cmd_flags & SCF_BIDI); 472 alloc_and_scatter_data_area(udev, se_cmd->t_data_sg, 473 se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area); 474 entry->req.iov_cnt = iov_cnt; 475 entry->req.iov_dif_cnt = 0; 476 477 /* Handle BIDI commands */ 478 iov_cnt = 0; 479 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, 480 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 481 entry->req.iov_bidi_cnt = iov_cnt; 482 483 /* All offsets relative to mb_addr, not start of entry! */ 484 cdb_off = CMDR_OFF + cmd_head + base_command_size; 485 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 486 entry->req.cdb_off = cdb_off; 487 tcmu_flush_dcache_range(entry, sizeof(*entry)); 488 489 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 490 tcmu_flush_dcache_range(mb, sizeof(*mb)); 491 492 spin_unlock_irq(&udev->cmdr_lock); 493 494 /* TODO: only if FLUSH and FUA? */ 495 uio_event_notify(&udev->uio_info); 496 497 mod_timer(&udev->timeout, 498 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); 499 500 return 0; 501 } 502 503 static int tcmu_queue_cmd(struct se_cmd *se_cmd) 504 { 505 struct se_device *se_dev = se_cmd->se_dev; 506 struct tcmu_dev *udev = TCMU_DEV(se_dev); 507 struct tcmu_cmd *tcmu_cmd; 508 int ret; 509 510 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 511 if (!tcmu_cmd) 512 return -ENOMEM; 513 514 ret = tcmu_queue_cmd_ring(tcmu_cmd); 515 if (ret < 0) { 516 pr_err("TCMU: Could not queue command\n"); 517 spin_lock_irq(&udev->commands_lock); 518 idr_remove(&udev->commands, tcmu_cmd->cmd_id); 519 spin_unlock_irq(&udev->commands_lock); 520 521 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 522 } 523 524 return ret; 525 } 526 527 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 528 { 529 struct se_cmd *se_cmd = cmd->se_cmd; 530 struct tcmu_dev *udev = cmd->tcmu_dev; 531 532 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 533 /* cmd has been completed already from timeout, just reclaim data 534 ring space */ 535 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 536 return; 537 } 538 539 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 540 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 541 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 542 cmd->se_cmd); 543 entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 544 } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 545 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, 546 se_cmd->scsi_sense_length); 547 548 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 549 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 550 /* Discard data_out buffer */ 551 UPDATE_HEAD(udev->data_tail, 552 (size_t)se_cmd->t_data_sg->length, udev->data_size); 553 554 /* Get Data-In buffer */ 555 gather_and_free_data_area(udev, 556 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents); 557 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 558 gather_and_free_data_area(udev, 559 se_cmd->t_data_sg, se_cmd->t_data_nents); 560 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 561 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 562 } else if (se_cmd->data_direction != DMA_NONE) { 563 pr_warn("TCMU: data direction was %d!\n", 564 se_cmd->data_direction); 565 } 566 567 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 568 cmd->se_cmd = NULL; 569 570 kmem_cache_free(tcmu_cmd_cache, cmd); 571 } 572 573 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 574 { 575 struct tcmu_mailbox *mb; 576 unsigned long flags; 577 int handled = 0; 578 579 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 580 pr_err("ring broken, not handling completions\n"); 581 return 0; 582 } 583 584 spin_lock_irqsave(&udev->cmdr_lock, flags); 585 586 mb = udev->mb_addr; 587 tcmu_flush_dcache_range(mb, sizeof(*mb)); 588 589 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { 590 591 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 592 struct tcmu_cmd *cmd; 593 594 tcmu_flush_dcache_range(entry, sizeof(*entry)); 595 596 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 597 UPDATE_HEAD(udev->cmdr_last_cleaned, 598 tcmu_hdr_get_len(entry->hdr.len_op), 599 udev->cmdr_size); 600 continue; 601 } 602 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 603 604 spin_lock(&udev->commands_lock); 605 cmd = idr_find(&udev->commands, entry->hdr.cmd_id); 606 if (cmd) 607 idr_remove(&udev->commands, cmd->cmd_id); 608 spin_unlock(&udev->commands_lock); 609 610 if (!cmd) { 611 pr_err("cmd_id not found, ring is broken\n"); 612 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 613 break; 614 } 615 616 tcmu_handle_completion(cmd, entry); 617 618 UPDATE_HEAD(udev->cmdr_last_cleaned, 619 tcmu_hdr_get_len(entry->hdr.len_op), 620 udev->cmdr_size); 621 622 handled++; 623 } 624 625 if (mb->cmd_tail == mb->cmd_head) 626 del_timer(&udev->timeout); /* no more pending cmds */ 627 628 spin_unlock_irqrestore(&udev->cmdr_lock, flags); 629 630 wake_up(&udev->wait_cmdr); 631 632 return handled; 633 } 634 635 static int tcmu_check_expired_cmd(int id, void *p, void *data) 636 { 637 struct tcmu_cmd *cmd = p; 638 639 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 640 return 0; 641 642 if (!time_after(jiffies, cmd->deadline)) 643 return 0; 644 645 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 646 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); 647 cmd->se_cmd = NULL; 648 649 kmem_cache_free(tcmu_cmd_cache, cmd); 650 651 return 0; 652 } 653 654 static void tcmu_device_timedout(unsigned long data) 655 { 656 struct tcmu_dev *udev = (struct tcmu_dev *)data; 657 unsigned long flags; 658 int handled; 659 660 handled = tcmu_handle_completions(udev); 661 662 pr_warn("%d completions handled from timeout\n", handled); 663 664 spin_lock_irqsave(&udev->commands_lock, flags); 665 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 666 spin_unlock_irqrestore(&udev->commands_lock, flags); 667 668 /* 669 * We don't need to wakeup threads on wait_cmdr since they have their 670 * own timeout. 671 */ 672 } 673 674 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 675 { 676 struct tcmu_hba *tcmu_hba; 677 678 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 679 if (!tcmu_hba) 680 return -ENOMEM; 681 682 tcmu_hba->host_id = host_id; 683 hba->hba_ptr = tcmu_hba; 684 685 return 0; 686 } 687 688 static void tcmu_detach_hba(struct se_hba *hba) 689 { 690 kfree(hba->hba_ptr); 691 hba->hba_ptr = NULL; 692 } 693 694 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 695 { 696 struct tcmu_dev *udev; 697 698 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 699 if (!udev) 700 return NULL; 701 702 udev->name = kstrdup(name, GFP_KERNEL); 703 if (!udev->name) { 704 kfree(udev); 705 return NULL; 706 } 707 708 udev->hba = hba; 709 710 init_waitqueue_head(&udev->wait_cmdr); 711 spin_lock_init(&udev->cmdr_lock); 712 713 idr_init(&udev->commands); 714 spin_lock_init(&udev->commands_lock); 715 716 setup_timer(&udev->timeout, tcmu_device_timedout, 717 (unsigned long)udev); 718 719 return &udev->se_dev; 720 } 721 722 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 723 { 724 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); 725 726 tcmu_handle_completions(tcmu_dev); 727 728 return 0; 729 } 730 731 /* 732 * mmap code from uio.c. Copied here because we want to hook mmap() 733 * and this stuff must come along. 734 */ 735 static int tcmu_find_mem_index(struct vm_area_struct *vma) 736 { 737 struct tcmu_dev *udev = vma->vm_private_data; 738 struct uio_info *info = &udev->uio_info; 739 740 if (vma->vm_pgoff < MAX_UIO_MAPS) { 741 if (info->mem[vma->vm_pgoff].size == 0) 742 return -1; 743 return (int)vma->vm_pgoff; 744 } 745 return -1; 746 } 747 748 static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 749 { 750 struct tcmu_dev *udev = vma->vm_private_data; 751 struct uio_info *info = &udev->uio_info; 752 struct page *page; 753 unsigned long offset; 754 void *addr; 755 756 int mi = tcmu_find_mem_index(vma); 757 if (mi < 0) 758 return VM_FAULT_SIGBUS; 759 760 /* 761 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 762 * to use mem[N]. 763 */ 764 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 765 766 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 767 if (info->mem[mi].memtype == UIO_MEM_LOGICAL) 768 page = virt_to_page(addr); 769 else 770 page = vmalloc_to_page(addr); 771 get_page(page); 772 vmf->page = page; 773 return 0; 774 } 775 776 static const struct vm_operations_struct tcmu_vm_ops = { 777 .fault = tcmu_vma_fault, 778 }; 779 780 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 781 { 782 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 783 784 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 785 vma->vm_ops = &tcmu_vm_ops; 786 787 vma->vm_private_data = udev; 788 789 /* Ensure the mmap is exactly the right size */ 790 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) 791 return -EINVAL; 792 793 return 0; 794 } 795 796 static int tcmu_open(struct uio_info *info, struct inode *inode) 797 { 798 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 799 800 /* O_EXCL not supported for char devs, so fake it? */ 801 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 802 return -EBUSY; 803 804 pr_debug("open\n"); 805 806 return 0; 807 } 808 809 static int tcmu_release(struct uio_info *info, struct inode *inode) 810 { 811 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 812 813 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 814 815 pr_debug("close\n"); 816 817 return 0; 818 } 819 820 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) 821 { 822 struct sk_buff *skb; 823 void *msg_header; 824 int ret = -ENOMEM; 825 826 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 827 if (!skb) 828 return ret; 829 830 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 831 if (!msg_header) 832 goto free_skb; 833 834 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); 835 if (ret < 0) 836 goto free_skb; 837 838 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); 839 if (ret < 0) 840 goto free_skb; 841 842 genlmsg_end(skb, msg_header); 843 844 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 845 TCMU_MCGRP_CONFIG, GFP_KERNEL); 846 847 /* We don't care if no one is listening */ 848 if (ret == -ESRCH) 849 ret = 0; 850 851 return ret; 852 free_skb: 853 nlmsg_free(skb); 854 return ret; 855 } 856 857 static int tcmu_configure_device(struct se_device *dev) 858 { 859 struct tcmu_dev *udev = TCMU_DEV(dev); 860 struct tcmu_hba *hba = udev->hba->hba_ptr; 861 struct uio_info *info; 862 struct tcmu_mailbox *mb; 863 size_t size; 864 size_t used; 865 int ret = 0; 866 char *str; 867 868 info = &udev->uio_info; 869 870 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 871 udev->dev_config); 872 size += 1; /* for \0 */ 873 str = kmalloc(size, GFP_KERNEL); 874 if (!str) 875 return -ENOMEM; 876 877 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 878 879 if (udev->dev_config[0]) 880 snprintf(str + used, size - used, "/%s", udev->dev_config); 881 882 info->name = str; 883 884 udev->mb_addr = vzalloc(TCMU_RING_SIZE); 885 if (!udev->mb_addr) { 886 ret = -ENOMEM; 887 goto err_vzalloc; 888 } 889 890 /* mailbox fits in first part of CMDR space */ 891 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 892 udev->data_off = CMDR_SIZE; 893 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; 894 895 mb = udev->mb_addr; 896 mb->version = TCMU_MAILBOX_VERSION; 897 mb->cmdr_off = CMDR_OFF; 898 mb->cmdr_size = udev->cmdr_size; 899 900 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 901 WARN_ON(udev->data_size % PAGE_SIZE); 902 903 info->version = __stringify(TCMU_MAILBOX_VERSION); 904 905 info->mem[0].name = "tcm-user command & data buffer"; 906 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 907 info->mem[0].size = TCMU_RING_SIZE; 908 info->mem[0].memtype = UIO_MEM_VIRTUAL; 909 910 info->irqcontrol = tcmu_irqcontrol; 911 info->irq = UIO_IRQ_CUSTOM; 912 913 info->mmap = tcmu_mmap; 914 info->open = tcmu_open; 915 info->release = tcmu_release; 916 917 ret = uio_register_device(tcmu_root_device, info); 918 if (ret) 919 goto err_register; 920 921 /* User can set hw_block_size before enable the device */ 922 if (dev->dev_attrib.hw_block_size == 0) 923 dev->dev_attrib.hw_block_size = 512; 924 /* Other attributes can be configured in userspace */ 925 dev->dev_attrib.hw_max_sectors = 128; 926 dev->dev_attrib.hw_queue_depth = 128; 927 928 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 929 udev->uio_info.uio_dev->minor); 930 if (ret) 931 goto err_netlink; 932 933 return 0; 934 935 err_netlink: 936 uio_unregister_device(&udev->uio_info); 937 err_register: 938 vfree(udev->mb_addr); 939 err_vzalloc: 940 kfree(info->name); 941 942 return ret; 943 } 944 945 static int tcmu_check_pending_cmd(int id, void *p, void *data) 946 { 947 struct tcmu_cmd *cmd = p; 948 949 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 950 return 0; 951 return -EINVAL; 952 } 953 954 static void tcmu_dev_call_rcu(struct rcu_head *p) 955 { 956 struct se_device *dev = container_of(p, struct se_device, rcu_head); 957 struct tcmu_dev *udev = TCMU_DEV(dev); 958 959 kfree(udev); 960 } 961 962 static void tcmu_free_device(struct se_device *dev) 963 { 964 struct tcmu_dev *udev = TCMU_DEV(dev); 965 int i; 966 967 del_timer_sync(&udev->timeout); 968 969 vfree(udev->mb_addr); 970 971 /* Upper layer should drain all requests before calling this */ 972 spin_lock_irq(&udev->commands_lock); 973 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL); 974 idr_destroy(&udev->commands); 975 spin_unlock_irq(&udev->commands_lock); 976 WARN_ON(i); 977 978 /* Device was configured */ 979 if (udev->uio_info.uio_dev) { 980 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, 981 udev->uio_info.uio_dev->minor); 982 983 uio_unregister_device(&udev->uio_info); 984 kfree(udev->uio_info.name); 985 kfree(udev->name); 986 } 987 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 988 } 989 990 enum { 991 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, 992 }; 993 994 static match_table_t tokens = { 995 {Opt_dev_config, "dev_config=%s"}, 996 {Opt_dev_size, "dev_size=%u"}, 997 {Opt_hw_block_size, "hw_block_size=%u"}, 998 {Opt_err, NULL} 999 }; 1000 1001 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 1002 const char *page, ssize_t count) 1003 { 1004 struct tcmu_dev *udev = TCMU_DEV(dev); 1005 char *orig, *ptr, *opts, *arg_p; 1006 substring_t args[MAX_OPT_ARGS]; 1007 int ret = 0, token; 1008 unsigned long tmp_ul; 1009 1010 opts = kstrdup(page, GFP_KERNEL); 1011 if (!opts) 1012 return -ENOMEM; 1013 1014 orig = opts; 1015 1016 while ((ptr = strsep(&opts, ",\n")) != NULL) { 1017 if (!*ptr) 1018 continue; 1019 1020 token = match_token(ptr, tokens, args); 1021 switch (token) { 1022 case Opt_dev_config: 1023 if (match_strlcpy(udev->dev_config, &args[0], 1024 TCMU_CONFIG_LEN) == 0) { 1025 ret = -EINVAL; 1026 break; 1027 } 1028 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 1029 break; 1030 case Opt_dev_size: 1031 arg_p = match_strdup(&args[0]); 1032 if (!arg_p) { 1033 ret = -ENOMEM; 1034 break; 1035 } 1036 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); 1037 kfree(arg_p); 1038 if (ret < 0) 1039 pr_err("kstrtoul() failed for dev_size=\n"); 1040 break; 1041 case Opt_hw_block_size: 1042 arg_p = match_strdup(&args[0]); 1043 if (!arg_p) { 1044 ret = -ENOMEM; 1045 break; 1046 } 1047 ret = kstrtoul(arg_p, 0, &tmp_ul); 1048 kfree(arg_p); 1049 if (ret < 0) { 1050 pr_err("kstrtoul() failed for hw_block_size=\n"); 1051 break; 1052 } 1053 if (!tmp_ul) { 1054 pr_err("hw_block_size must be nonzero\n"); 1055 break; 1056 } 1057 dev->dev_attrib.hw_block_size = tmp_ul; 1058 break; 1059 default: 1060 break; 1061 } 1062 } 1063 1064 kfree(orig); 1065 return (!ret) ? count : ret; 1066 } 1067 1068 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 1069 { 1070 struct tcmu_dev *udev = TCMU_DEV(dev); 1071 ssize_t bl = 0; 1072 1073 bl = sprintf(b + bl, "Config: %s ", 1074 udev->dev_config[0] ? udev->dev_config : "NULL"); 1075 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); 1076 1077 return bl; 1078 } 1079 1080 static sector_t tcmu_get_blocks(struct se_device *dev) 1081 { 1082 struct tcmu_dev *udev = TCMU_DEV(dev); 1083 1084 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 1085 dev->dev_attrib.block_size); 1086 } 1087 1088 static sense_reason_t 1089 tcmu_pass_op(struct se_cmd *se_cmd) 1090 { 1091 int ret = tcmu_queue_cmd(se_cmd); 1092 1093 if (ret != 0) 1094 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1095 else 1096 return TCM_NO_SENSE; 1097 } 1098 1099 static sense_reason_t 1100 tcmu_parse_cdb(struct se_cmd *cmd) 1101 { 1102 return passthrough_parse_cdb(cmd, tcmu_pass_op); 1103 } 1104 1105 static const struct target_backend_ops tcmu_ops = { 1106 .name = "user", 1107 .owner = THIS_MODULE, 1108 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1109 .attach_hba = tcmu_attach_hba, 1110 .detach_hba = tcmu_detach_hba, 1111 .alloc_device = tcmu_alloc_device, 1112 .configure_device = tcmu_configure_device, 1113 .free_device = tcmu_free_device, 1114 .parse_cdb = tcmu_parse_cdb, 1115 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 1116 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1117 .get_device_type = sbc_get_device_type, 1118 .get_blocks = tcmu_get_blocks, 1119 .tb_dev_attrib_attrs = passthrough_attrib_attrs, 1120 }; 1121 1122 static int __init tcmu_module_init(void) 1123 { 1124 int ret; 1125 1126 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1127 1128 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 1129 sizeof(struct tcmu_cmd), 1130 __alignof__(struct tcmu_cmd), 1131 0, NULL); 1132 if (!tcmu_cmd_cache) 1133 return -ENOMEM; 1134 1135 tcmu_root_device = root_device_register("tcm_user"); 1136 if (IS_ERR(tcmu_root_device)) { 1137 ret = PTR_ERR(tcmu_root_device); 1138 goto out_free_cache; 1139 } 1140 1141 ret = genl_register_family(&tcmu_genl_family); 1142 if (ret < 0) { 1143 goto out_unreg_device; 1144 } 1145 1146 ret = transport_backend_register(&tcmu_ops); 1147 if (ret) 1148 goto out_unreg_genl; 1149 1150 return 0; 1151 1152 out_unreg_genl: 1153 genl_unregister_family(&tcmu_genl_family); 1154 out_unreg_device: 1155 root_device_unregister(tcmu_root_device); 1156 out_free_cache: 1157 kmem_cache_destroy(tcmu_cmd_cache); 1158 1159 return ret; 1160 } 1161 1162 static void __exit tcmu_module_exit(void) 1163 { 1164 target_backend_unregister(&tcmu_ops); 1165 genl_unregister_family(&tcmu_genl_family); 1166 root_device_unregister(tcmu_root_device); 1167 kmem_cache_destroy(tcmu_cmd_cache); 1168 } 1169 1170 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 1171 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 1172 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 1173 MODULE_LICENSE("GPL"); 1174 1175 module_init(tcmu_module_init); 1176 module_exit(tcmu_module_exit); 1177