1 /* 2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 3 * Copyright (C) 2014 Red Hat, Inc. 4 * Copyright (C) 2015 Arrikto, Inc. 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms and conditions of the GNU General Public License, 8 * version 2, as published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 * more details. 14 * 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 */ 19 20 #include <linux/spinlock.h> 21 #include <linux/module.h> 22 #include <linux/idr.h> 23 #include <linux/timer.h> 24 #include <linux/parser.h> 25 #include <scsi/scsi.h> 26 #include <scsi/scsi_host.h> 27 #include <linux/uio_driver.h> 28 #include <net/genetlink.h> 29 #include <target/target_core_base.h> 30 #include <target/target_core_fabric.h> 31 #include <target/target_core_backend.h> 32 #include <target/target_core_backend_configfs.h> 33 34 #include <linux/target_core_user.h> 35 36 /* 37 * Define a shared-memory interface for LIO to pass SCSI commands and 38 * data to userspace for processing. This is to allow backends that 39 * are too complex for in-kernel support to be possible. 40 * 41 * It uses the UIO framework to do a lot of the device-creation and 42 * introspection work for us. 43 * 44 * See the .h file for how the ring is laid out. Note that while the 45 * command ring is defined, the particulars of the data area are 46 * not. Offset values in the command entry point to other locations 47 * internal to the mmap()ed area. There is separate space outside the 48 * command ring for data buffers. This leaves maximum flexibility for 49 * moving buffer allocations, or even page flipping or other 50 * allocation techniques, without altering the command ring layout. 51 * 52 * SECURITY: 53 * The user process must be assumed to be malicious. There's no way to 54 * prevent it breaking the command ring protocol if it wants, but in 55 * order to prevent other issues we must only ever read *data* from 56 * the shared memory area, not offsets or sizes. This applies to 57 * command ring entries as well as the mailbox. Extra code needed for 58 * this may have a 'UAM' comment. 59 */ 60 61 62 #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 63 64 #define CMDR_SIZE (16 * 4096) 65 #define DATA_SIZE (257 * 4096) 66 67 #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 68 69 static struct device *tcmu_root_device; 70 71 struct tcmu_hba { 72 u32 host_id; 73 }; 74 75 #define TCMU_CONFIG_LEN 256 76 77 struct tcmu_dev { 78 struct se_device se_dev; 79 80 char *name; 81 struct se_hba *hba; 82 83 #define TCMU_DEV_BIT_OPEN 0 84 #define TCMU_DEV_BIT_BROKEN 1 85 unsigned long flags; 86 87 struct uio_info uio_info; 88 89 struct tcmu_mailbox *mb_addr; 90 size_t dev_size; 91 u32 cmdr_size; 92 u32 cmdr_last_cleaned; 93 /* Offset of data ring from start of mb */ 94 size_t data_off; 95 size_t data_size; 96 /* Ring head + tail values. */ 97 /* Must add data_off and mb_addr to get the address */ 98 size_t data_head; 99 size_t data_tail; 100 101 wait_queue_head_t wait_cmdr; 102 /* TODO should this be a mutex? */ 103 spinlock_t cmdr_lock; 104 105 struct idr commands; 106 spinlock_t commands_lock; 107 108 struct timer_list timeout; 109 110 char dev_config[TCMU_CONFIG_LEN]; 111 }; 112 113 #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 114 115 #define CMDR_OFF sizeof(struct tcmu_mailbox) 116 117 struct tcmu_cmd { 118 struct se_cmd *se_cmd; 119 struct tcmu_dev *tcmu_dev; 120 121 uint16_t cmd_id; 122 123 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if 124 cmd has been completed then accessing se_cmd is off limits */ 125 size_t data_length; 126 127 unsigned long deadline; 128 129 #define TCMU_CMD_BIT_EXPIRED 0 130 unsigned long flags; 131 }; 132 133 static struct kmem_cache *tcmu_cmd_cache; 134 135 /* multicast group */ 136 enum tcmu_multicast_groups { 137 TCMU_MCGRP_CONFIG, 138 }; 139 140 static const struct genl_multicast_group tcmu_mcgrps[] = { 141 [TCMU_MCGRP_CONFIG] = { .name = "config", }, 142 }; 143 144 /* Our generic netlink family */ 145 static struct genl_family tcmu_genl_family = { 146 .id = GENL_ID_GENERATE, 147 .hdrsize = 0, 148 .name = "TCM-USER", 149 .version = 1, 150 .maxattr = TCMU_ATTR_MAX, 151 .mcgrps = tcmu_mcgrps, 152 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 153 }; 154 155 static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 156 { 157 struct se_device *se_dev = se_cmd->se_dev; 158 struct tcmu_dev *udev = TCMU_DEV(se_dev); 159 struct tcmu_cmd *tcmu_cmd; 160 int cmd_id; 161 162 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); 163 if (!tcmu_cmd) 164 return NULL; 165 166 tcmu_cmd->se_cmd = se_cmd; 167 tcmu_cmd->tcmu_dev = udev; 168 tcmu_cmd->data_length = se_cmd->data_length; 169 170 if (se_cmd->se_cmd_flags & SCF_BIDI) { 171 BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 172 tcmu_cmd->data_length += se_cmd->t_bidi_data_sg->length; 173 } 174 175 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); 176 177 idr_preload(GFP_KERNEL); 178 spin_lock_irq(&udev->commands_lock); 179 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0, 180 USHRT_MAX, GFP_NOWAIT); 181 spin_unlock_irq(&udev->commands_lock); 182 idr_preload_end(); 183 184 if (cmd_id < 0) { 185 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 186 return NULL; 187 } 188 tcmu_cmd->cmd_id = cmd_id; 189 190 return tcmu_cmd; 191 } 192 193 static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 194 { 195 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK; 196 197 size = round_up(size+offset, PAGE_SIZE); 198 vaddr -= offset; 199 200 while (size) { 201 flush_dcache_page(virt_to_page(vaddr)); 202 size -= PAGE_SIZE; 203 } 204 } 205 206 /* 207 * Some ring helper functions. We don't assume size is a power of 2 so 208 * we can't use circ_buf.h. 209 */ 210 static inline size_t spc_used(size_t head, size_t tail, size_t size) 211 { 212 int diff = head - tail; 213 214 if (diff >= 0) 215 return diff; 216 else 217 return size + diff; 218 } 219 220 static inline size_t spc_free(size_t head, size_t tail, size_t size) 221 { 222 /* Keep 1 byte unused or we can't tell full from empty */ 223 return (size - spc_used(head, tail, size) - 1); 224 } 225 226 static inline size_t head_to_end(size_t head, size_t size) 227 { 228 return size - head; 229 } 230 231 #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 232 233 static void alloc_and_scatter_data_area(struct tcmu_dev *udev, 234 struct scatterlist *data_sg, unsigned int data_nents, 235 struct iovec **iov, int *iov_cnt, bool copy_data) 236 { 237 int i; 238 void *from, *to; 239 size_t copy_bytes; 240 struct scatterlist *sg; 241 242 for_each_sg(data_sg, sg, data_nents, i) { 243 copy_bytes = min_t(size_t, sg->length, 244 head_to_end(udev->data_head, udev->data_size)); 245 from = kmap_atomic(sg_page(sg)) + sg->offset; 246 to = (void *) udev->mb_addr + udev->data_off + udev->data_head; 247 248 if (copy_data) { 249 memcpy(to, from, copy_bytes); 250 tcmu_flush_dcache_range(to, copy_bytes); 251 } 252 253 /* Even iov_base is relative to mb_addr */ 254 (*iov)->iov_len = copy_bytes; 255 (*iov)->iov_base = (void __user *) udev->data_off + 256 udev->data_head; 257 (*iov_cnt)++; 258 (*iov)++; 259 260 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size); 261 262 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */ 263 if (sg->length != copy_bytes) { 264 from += copy_bytes; 265 copy_bytes = sg->length - copy_bytes; 266 267 (*iov)->iov_len = copy_bytes; 268 (*iov)->iov_base = (void __user *) udev->data_off + 269 udev->data_head; 270 271 if (copy_data) { 272 to = (void *) udev->mb_addr + 273 udev->data_off + udev->data_head; 274 memcpy(to, from, copy_bytes); 275 tcmu_flush_dcache_range(to, copy_bytes); 276 } 277 278 (*iov_cnt)++; 279 (*iov)++; 280 281 UPDATE_HEAD(udev->data_head, 282 copy_bytes, udev->data_size); 283 } 284 285 kunmap_atomic(from); 286 } 287 } 288 289 static void gather_and_free_data_area(struct tcmu_dev *udev, 290 struct scatterlist *data_sg, unsigned int data_nents) 291 { 292 int i; 293 void *from, *to; 294 size_t copy_bytes; 295 struct scatterlist *sg; 296 297 /* It'd be easier to look at entry's iovec again, but UAM */ 298 for_each_sg(data_sg, sg, data_nents, i) { 299 copy_bytes = min_t(size_t, sg->length, 300 head_to_end(udev->data_tail, udev->data_size)); 301 302 to = kmap_atomic(sg_page(sg)) + sg->offset; 303 WARN_ON(sg->length + sg->offset > PAGE_SIZE); 304 from = (void *) udev->mb_addr + 305 udev->data_off + udev->data_tail; 306 tcmu_flush_dcache_range(from, copy_bytes); 307 memcpy(to, from, copy_bytes); 308 309 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size); 310 311 /* Uh oh, wrapped the data buffer for this sg's data */ 312 if (sg->length != copy_bytes) { 313 from = (void *) udev->mb_addr + 314 udev->data_off + udev->data_tail; 315 WARN_ON(udev->data_tail); 316 to += copy_bytes; 317 copy_bytes = sg->length - copy_bytes; 318 tcmu_flush_dcache_range(from, copy_bytes); 319 memcpy(to, from, copy_bytes); 320 321 UPDATE_HEAD(udev->data_tail, 322 copy_bytes, udev->data_size); 323 } 324 kunmap_atomic(to); 325 } 326 } 327 328 /* 329 * We can't queue a command until we have space available on the cmd ring *and* 330 * space available on the data ring. 331 * 332 * Called with ring lock held. 333 */ 334 static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed) 335 { 336 struct tcmu_mailbox *mb = udev->mb_addr; 337 size_t space; 338 u32 cmd_head; 339 size_t cmd_needed; 340 341 tcmu_flush_dcache_range(mb, sizeof(*mb)); 342 343 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 344 345 /* 346 * If cmd end-of-ring space is too small then we need space for a NOP plus 347 * original cmd - cmds are internally contiguous. 348 */ 349 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 350 cmd_needed = cmd_size; 351 else 352 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 353 354 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 355 if (space < cmd_needed) { 356 pr_debug("no cmd space: %u %u %u\n", cmd_head, 357 udev->cmdr_last_cleaned, udev->cmdr_size); 358 return false; 359 } 360 361 space = spc_free(udev->data_head, udev->data_tail, udev->data_size); 362 if (space < data_needed) { 363 pr_debug("no data space: %zu %zu %zu\n", udev->data_head, 364 udev->data_tail, udev->data_size); 365 return false; 366 } 367 368 return true; 369 } 370 371 static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) 372 { 373 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 374 struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 375 size_t base_command_size, command_size; 376 struct tcmu_mailbox *mb; 377 struct tcmu_cmd_entry *entry; 378 struct iovec *iov; 379 int iov_cnt; 380 uint32_t cmd_head; 381 uint64_t cdb_off; 382 bool copy_to_data_area; 383 384 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) 385 return -EINVAL; 386 387 /* 388 * Must be a certain minimum size for response sense info, but 389 * also may be larger if the iov array is large. 390 * 391 * iovs = sgl_nents+1, for end-of-ring case, plus another 1 392 * b/c size == offsetof one-past-element. 393 */ 394 base_command_size = max(offsetof(struct tcmu_cmd_entry, 395 req.iov[se_cmd->t_bidi_data_nents + 396 se_cmd->t_data_nents + 2]), 397 sizeof(struct tcmu_cmd_entry)); 398 command_size = base_command_size 399 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE); 400 401 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 402 403 spin_lock_irq(&udev->cmdr_lock); 404 405 mb = udev->mb_addr; 406 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 407 if ((command_size > (udev->cmdr_size / 2)) 408 || tcmu_cmd->data_length > (udev->data_size - 1)) 409 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu " 410 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length, 411 udev->cmdr_size, udev->data_size); 412 413 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) { 414 int ret; 415 DEFINE_WAIT(__wait); 416 417 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE); 418 419 pr_debug("sleeping for ring space\n"); 420 spin_unlock_irq(&udev->cmdr_lock); 421 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); 422 finish_wait(&udev->wait_cmdr, &__wait); 423 if (!ret) { 424 pr_warn("tcmu: command timed out\n"); 425 return -ETIMEDOUT; 426 } 427 428 spin_lock_irq(&udev->cmdr_lock); 429 430 /* We dropped cmdr_lock, cmd_head is stale */ 431 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 432 } 433 434 /* Insert a PAD if end-of-ring space is too small */ 435 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { 436 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 437 438 entry = (void *) mb + CMDR_OFF + cmd_head; 439 tcmu_flush_dcache_range(entry, sizeof(*entry)); 440 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 441 tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 442 entry->hdr.cmd_id = 0; /* not used for PAD */ 443 entry->hdr.kflags = 0; 444 entry->hdr.uflags = 0; 445 446 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 447 448 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 449 WARN_ON(cmd_head != 0); 450 } 451 452 entry = (void *) mb + CMDR_OFF + cmd_head; 453 tcmu_flush_dcache_range(entry, sizeof(*entry)); 454 tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 455 tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 456 entry->hdr.cmd_id = tcmu_cmd->cmd_id; 457 entry->hdr.kflags = 0; 458 entry->hdr.uflags = 0; 459 460 /* 461 * Fix up iovecs, and handle if allocation in data ring wrapped. 462 */ 463 iov = &entry->req.iov[0]; 464 iov_cnt = 0; 465 copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 466 || se_cmd->se_cmd_flags & SCF_BIDI); 467 alloc_and_scatter_data_area(udev, se_cmd->t_data_sg, 468 se_cmd->t_data_nents, &iov, &iov_cnt, copy_to_data_area); 469 entry->req.iov_cnt = iov_cnt; 470 entry->req.iov_dif_cnt = 0; 471 472 /* Handle BIDI commands */ 473 iov_cnt = 0; 474 alloc_and_scatter_data_area(udev, se_cmd->t_bidi_data_sg, 475 se_cmd->t_bidi_data_nents, &iov, &iov_cnt, false); 476 entry->req.iov_bidi_cnt = iov_cnt; 477 478 /* All offsets relative to mb_addr, not start of entry! */ 479 cdb_off = CMDR_OFF + cmd_head + base_command_size; 480 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 481 entry->req.cdb_off = cdb_off; 482 tcmu_flush_dcache_range(entry, sizeof(*entry)); 483 484 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 485 tcmu_flush_dcache_range(mb, sizeof(*mb)); 486 487 spin_unlock_irq(&udev->cmdr_lock); 488 489 /* TODO: only if FLUSH and FUA? */ 490 uio_event_notify(&udev->uio_info); 491 492 mod_timer(&udev->timeout, 493 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); 494 495 return 0; 496 } 497 498 static int tcmu_queue_cmd(struct se_cmd *se_cmd) 499 { 500 struct se_device *se_dev = se_cmd->se_dev; 501 struct tcmu_dev *udev = TCMU_DEV(se_dev); 502 struct tcmu_cmd *tcmu_cmd; 503 int ret; 504 505 tcmu_cmd = tcmu_alloc_cmd(se_cmd); 506 if (!tcmu_cmd) 507 return -ENOMEM; 508 509 ret = tcmu_queue_cmd_ring(tcmu_cmd); 510 if (ret < 0) { 511 pr_err("TCMU: Could not queue command\n"); 512 spin_lock_irq(&udev->commands_lock); 513 idr_remove(&udev->commands, tcmu_cmd->cmd_id); 514 spin_unlock_irq(&udev->commands_lock); 515 516 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 517 } 518 519 return ret; 520 } 521 522 static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 523 { 524 struct se_cmd *se_cmd = cmd->se_cmd; 525 struct tcmu_dev *udev = cmd->tcmu_dev; 526 527 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 528 /* cmd has been completed already from timeout, just reclaim data 529 ring space */ 530 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 531 return; 532 } 533 534 if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 535 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 536 pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 537 cmd->se_cmd); 538 transport_generic_request_failure(cmd->se_cmd, 539 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); 540 cmd->se_cmd = NULL; 541 kmem_cache_free(tcmu_cmd_cache, cmd); 542 return; 543 } 544 545 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 546 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, 547 se_cmd->scsi_sense_length); 548 549 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 550 } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 551 /* Discard data_out buffer */ 552 UPDATE_HEAD(udev->data_tail, 553 (size_t)se_cmd->t_data_sg->length, udev->data_size); 554 555 /* Get Data-In buffer */ 556 gather_and_free_data_area(udev, 557 se_cmd->t_bidi_data_sg, se_cmd->t_bidi_data_nents); 558 } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 559 gather_and_free_data_area(udev, 560 se_cmd->t_data_sg, se_cmd->t_data_nents); 561 } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 562 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size); 563 } else if (se_cmd->data_direction != DMA_NONE) { 564 pr_warn("TCMU: data direction was %d!\n", 565 se_cmd->data_direction); 566 } 567 568 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 569 cmd->se_cmd = NULL; 570 571 kmem_cache_free(tcmu_cmd_cache, cmd); 572 } 573 574 static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 575 { 576 struct tcmu_mailbox *mb; 577 LIST_HEAD(cpl_cmds); 578 unsigned long flags; 579 int handled = 0; 580 581 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 582 pr_err("ring broken, not handling completions\n"); 583 return 0; 584 } 585 586 spin_lock_irqsave(&udev->cmdr_lock, flags); 587 588 mb = udev->mb_addr; 589 tcmu_flush_dcache_range(mb, sizeof(*mb)); 590 591 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { 592 593 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 594 struct tcmu_cmd *cmd; 595 596 tcmu_flush_dcache_range(entry, sizeof(*entry)); 597 598 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 599 UPDATE_HEAD(udev->cmdr_last_cleaned, 600 tcmu_hdr_get_len(entry->hdr.len_op), 601 udev->cmdr_size); 602 continue; 603 } 604 WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 605 606 spin_lock(&udev->commands_lock); 607 cmd = idr_find(&udev->commands, entry->hdr.cmd_id); 608 if (cmd) 609 idr_remove(&udev->commands, cmd->cmd_id); 610 spin_unlock(&udev->commands_lock); 611 612 if (!cmd) { 613 pr_err("cmd_id not found, ring is broken\n"); 614 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 615 break; 616 } 617 618 tcmu_handle_completion(cmd, entry); 619 620 UPDATE_HEAD(udev->cmdr_last_cleaned, 621 tcmu_hdr_get_len(entry->hdr.len_op), 622 udev->cmdr_size); 623 624 handled++; 625 } 626 627 if (mb->cmd_tail == mb->cmd_head) 628 del_timer(&udev->timeout); /* no more pending cmds */ 629 630 spin_unlock_irqrestore(&udev->cmdr_lock, flags); 631 632 wake_up(&udev->wait_cmdr); 633 634 return handled; 635 } 636 637 static int tcmu_check_expired_cmd(int id, void *p, void *data) 638 { 639 struct tcmu_cmd *cmd = p; 640 641 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 642 return 0; 643 644 if (!time_after(cmd->deadline, jiffies)) 645 return 0; 646 647 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 648 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION); 649 cmd->se_cmd = NULL; 650 651 kmem_cache_free(tcmu_cmd_cache, cmd); 652 653 return 0; 654 } 655 656 static void tcmu_device_timedout(unsigned long data) 657 { 658 struct tcmu_dev *udev = (struct tcmu_dev *)data; 659 unsigned long flags; 660 int handled; 661 662 handled = tcmu_handle_completions(udev); 663 664 pr_warn("%d completions handled from timeout\n", handled); 665 666 spin_lock_irqsave(&udev->commands_lock, flags); 667 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 668 spin_unlock_irqrestore(&udev->commands_lock, flags); 669 670 /* 671 * We don't need to wakeup threads on wait_cmdr since they have their 672 * own timeout. 673 */ 674 } 675 676 static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 677 { 678 struct tcmu_hba *tcmu_hba; 679 680 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 681 if (!tcmu_hba) 682 return -ENOMEM; 683 684 tcmu_hba->host_id = host_id; 685 hba->hba_ptr = tcmu_hba; 686 687 return 0; 688 } 689 690 static void tcmu_detach_hba(struct se_hba *hba) 691 { 692 kfree(hba->hba_ptr); 693 hba->hba_ptr = NULL; 694 } 695 696 static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 697 { 698 struct tcmu_dev *udev; 699 700 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 701 if (!udev) 702 return NULL; 703 704 udev->name = kstrdup(name, GFP_KERNEL); 705 if (!udev->name) { 706 kfree(udev); 707 return NULL; 708 } 709 710 udev->hba = hba; 711 712 init_waitqueue_head(&udev->wait_cmdr); 713 spin_lock_init(&udev->cmdr_lock); 714 715 idr_init(&udev->commands); 716 spin_lock_init(&udev->commands_lock); 717 718 setup_timer(&udev->timeout, tcmu_device_timedout, 719 (unsigned long)udev); 720 721 return &udev->se_dev; 722 } 723 724 static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 725 { 726 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info); 727 728 tcmu_handle_completions(tcmu_dev); 729 730 return 0; 731 } 732 733 /* 734 * mmap code from uio.c. Copied here because we want to hook mmap() 735 * and this stuff must come along. 736 */ 737 static int tcmu_find_mem_index(struct vm_area_struct *vma) 738 { 739 struct tcmu_dev *udev = vma->vm_private_data; 740 struct uio_info *info = &udev->uio_info; 741 742 if (vma->vm_pgoff < MAX_UIO_MAPS) { 743 if (info->mem[vma->vm_pgoff].size == 0) 744 return -1; 745 return (int)vma->vm_pgoff; 746 } 747 return -1; 748 } 749 750 static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 751 { 752 struct tcmu_dev *udev = vma->vm_private_data; 753 struct uio_info *info = &udev->uio_info; 754 struct page *page; 755 unsigned long offset; 756 void *addr; 757 758 int mi = tcmu_find_mem_index(vma); 759 if (mi < 0) 760 return VM_FAULT_SIGBUS; 761 762 /* 763 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 764 * to use mem[N]. 765 */ 766 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 767 768 addr = (void *)(unsigned long)info->mem[mi].addr + offset; 769 if (info->mem[mi].memtype == UIO_MEM_LOGICAL) 770 page = virt_to_page(addr); 771 else 772 page = vmalloc_to_page(addr); 773 get_page(page); 774 vmf->page = page; 775 return 0; 776 } 777 778 static const struct vm_operations_struct tcmu_vm_ops = { 779 .fault = tcmu_vma_fault, 780 }; 781 782 static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 783 { 784 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 785 786 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 787 vma->vm_ops = &tcmu_vm_ops; 788 789 vma->vm_private_data = udev; 790 791 /* Ensure the mmap is exactly the right size */ 792 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT)) 793 return -EINVAL; 794 795 return 0; 796 } 797 798 static int tcmu_open(struct uio_info *info, struct inode *inode) 799 { 800 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 801 802 /* O_EXCL not supported for char devs, so fake it? */ 803 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 804 return -EBUSY; 805 806 pr_debug("open\n"); 807 808 return 0; 809 } 810 811 static int tcmu_release(struct uio_info *info, struct inode *inode) 812 { 813 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 814 815 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 816 817 pr_debug("close\n"); 818 819 return 0; 820 } 821 822 static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) 823 { 824 struct sk_buff *skb; 825 void *msg_header; 826 int ret = -ENOMEM; 827 828 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 829 if (!skb) 830 return ret; 831 832 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 833 if (!msg_header) 834 goto free_skb; 835 836 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); 837 if (ret < 0) 838 goto free_skb; 839 840 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); 841 if (ret < 0) 842 goto free_skb; 843 844 genlmsg_end(skb, msg_header); 845 846 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0, 847 TCMU_MCGRP_CONFIG, GFP_KERNEL); 848 849 /* We don't care if no one is listening */ 850 if (ret == -ESRCH) 851 ret = 0; 852 853 return ret; 854 free_skb: 855 nlmsg_free(skb); 856 return ret; 857 } 858 859 static int tcmu_configure_device(struct se_device *dev) 860 { 861 struct tcmu_dev *udev = TCMU_DEV(dev); 862 struct tcmu_hba *hba = udev->hba->hba_ptr; 863 struct uio_info *info; 864 struct tcmu_mailbox *mb; 865 size_t size; 866 size_t used; 867 int ret = 0; 868 char *str; 869 870 info = &udev->uio_info; 871 872 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 873 udev->dev_config); 874 size += 1; /* for \0 */ 875 str = kmalloc(size, GFP_KERNEL); 876 if (!str) 877 return -ENOMEM; 878 879 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 880 881 if (udev->dev_config[0]) 882 snprintf(str + used, size - used, "/%s", udev->dev_config); 883 884 info->name = str; 885 886 udev->mb_addr = vzalloc(TCMU_RING_SIZE); 887 if (!udev->mb_addr) { 888 ret = -ENOMEM; 889 goto err_vzalloc; 890 } 891 892 /* mailbox fits in first part of CMDR space */ 893 udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 894 udev->data_off = CMDR_SIZE; 895 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE; 896 897 mb = udev->mb_addr; 898 mb->version = TCMU_MAILBOX_VERSION; 899 mb->cmdr_off = CMDR_OFF; 900 mb->cmdr_size = udev->cmdr_size; 901 902 WARN_ON(!PAGE_ALIGNED(udev->data_off)); 903 WARN_ON(udev->data_size % PAGE_SIZE); 904 905 info->version = xstr(TCMU_MAILBOX_VERSION); 906 907 info->mem[0].name = "tcm-user command & data buffer"; 908 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 909 info->mem[0].size = TCMU_RING_SIZE; 910 info->mem[0].memtype = UIO_MEM_VIRTUAL; 911 912 info->irqcontrol = tcmu_irqcontrol; 913 info->irq = UIO_IRQ_CUSTOM; 914 915 info->mmap = tcmu_mmap; 916 info->open = tcmu_open; 917 info->release = tcmu_release; 918 919 ret = uio_register_device(tcmu_root_device, info); 920 if (ret) 921 goto err_register; 922 923 /* Other attributes can be configured in userspace */ 924 dev->dev_attrib.hw_block_size = 512; 925 dev->dev_attrib.hw_max_sectors = 128; 926 dev->dev_attrib.hw_queue_depth = 128; 927 928 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, 929 udev->uio_info.uio_dev->minor); 930 if (ret) 931 goto err_netlink; 932 933 return 0; 934 935 err_netlink: 936 uio_unregister_device(&udev->uio_info); 937 err_register: 938 vfree(udev->mb_addr); 939 err_vzalloc: 940 kfree(info->name); 941 942 return ret; 943 } 944 945 static int tcmu_check_pending_cmd(int id, void *p, void *data) 946 { 947 struct tcmu_cmd *cmd = p; 948 949 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 950 return 0; 951 return -EINVAL; 952 } 953 954 static void tcmu_free_device(struct se_device *dev) 955 { 956 struct tcmu_dev *udev = TCMU_DEV(dev); 957 int i; 958 959 del_timer_sync(&udev->timeout); 960 961 vfree(udev->mb_addr); 962 963 /* Upper layer should drain all requests before calling this */ 964 spin_lock_irq(&udev->commands_lock); 965 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL); 966 idr_destroy(&udev->commands); 967 spin_unlock_irq(&udev->commands_lock); 968 WARN_ON(i); 969 970 /* Device was configured */ 971 if (udev->uio_info.uio_dev) { 972 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, 973 udev->uio_info.uio_dev->minor); 974 975 uio_unregister_device(&udev->uio_info); 976 kfree(udev->uio_info.name); 977 kfree(udev->name); 978 } 979 980 kfree(udev); 981 } 982 983 enum { 984 Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, 985 }; 986 987 static match_table_t tokens = { 988 {Opt_dev_config, "dev_config=%s"}, 989 {Opt_dev_size, "dev_size=%u"}, 990 {Opt_hw_block_size, "hw_block_size=%u"}, 991 {Opt_err, NULL} 992 }; 993 994 static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 995 const char *page, ssize_t count) 996 { 997 struct tcmu_dev *udev = TCMU_DEV(dev); 998 char *orig, *ptr, *opts, *arg_p; 999 substring_t args[MAX_OPT_ARGS]; 1000 int ret = 0, token; 1001 unsigned long tmp_ul; 1002 1003 opts = kstrdup(page, GFP_KERNEL); 1004 if (!opts) 1005 return -ENOMEM; 1006 1007 orig = opts; 1008 1009 while ((ptr = strsep(&opts, ",\n")) != NULL) { 1010 if (!*ptr) 1011 continue; 1012 1013 token = match_token(ptr, tokens, args); 1014 switch (token) { 1015 case Opt_dev_config: 1016 if (match_strlcpy(udev->dev_config, &args[0], 1017 TCMU_CONFIG_LEN) == 0) { 1018 ret = -EINVAL; 1019 break; 1020 } 1021 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 1022 break; 1023 case Opt_dev_size: 1024 arg_p = match_strdup(&args[0]); 1025 if (!arg_p) { 1026 ret = -ENOMEM; 1027 break; 1028 } 1029 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); 1030 kfree(arg_p); 1031 if (ret < 0) 1032 pr_err("kstrtoul() failed for dev_size=\n"); 1033 break; 1034 case Opt_hw_block_size: 1035 arg_p = match_strdup(&args[0]); 1036 if (!arg_p) { 1037 ret = -ENOMEM; 1038 break; 1039 } 1040 ret = kstrtoul(arg_p, 0, &tmp_ul); 1041 kfree(arg_p); 1042 if (ret < 0) { 1043 pr_err("kstrtoul() failed for hw_block_size=\n"); 1044 break; 1045 } 1046 if (!tmp_ul) { 1047 pr_err("hw_block_size must be nonzero\n"); 1048 break; 1049 } 1050 dev->dev_attrib.hw_block_size = tmp_ul; 1051 break; 1052 default: 1053 break; 1054 } 1055 } 1056 1057 kfree(orig); 1058 return (!ret) ? count : ret; 1059 } 1060 1061 static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 1062 { 1063 struct tcmu_dev *udev = TCMU_DEV(dev); 1064 ssize_t bl = 0; 1065 1066 bl = sprintf(b + bl, "Config: %s ", 1067 udev->dev_config[0] ? udev->dev_config : "NULL"); 1068 bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size); 1069 1070 return bl; 1071 } 1072 1073 static sector_t tcmu_get_blocks(struct se_device *dev) 1074 { 1075 struct tcmu_dev *udev = TCMU_DEV(dev); 1076 1077 return div_u64(udev->dev_size - dev->dev_attrib.block_size, 1078 dev->dev_attrib.block_size); 1079 } 1080 1081 static sense_reason_t 1082 tcmu_pass_op(struct se_cmd *se_cmd) 1083 { 1084 int ret = tcmu_queue_cmd(se_cmd); 1085 1086 if (ret != 0) 1087 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1088 else 1089 return TCM_NO_SENSE; 1090 } 1091 1092 static sense_reason_t 1093 tcmu_parse_cdb(struct se_cmd *cmd) 1094 { 1095 return passthrough_parse_cdb(cmd, tcmu_pass_op); 1096 } 1097 1098 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type); 1099 TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type); 1100 1101 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size); 1102 TB_DEV_ATTR_RO(tcmu, hw_block_size); 1103 1104 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors); 1105 TB_DEV_ATTR_RO(tcmu, hw_max_sectors); 1106 1107 DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth); 1108 TB_DEV_ATTR_RO(tcmu, hw_queue_depth); 1109 1110 static struct configfs_attribute *tcmu_backend_dev_attrs[] = { 1111 &tcmu_dev_attrib_hw_pi_prot_type.attr, 1112 &tcmu_dev_attrib_hw_block_size.attr, 1113 &tcmu_dev_attrib_hw_max_sectors.attr, 1114 &tcmu_dev_attrib_hw_queue_depth.attr, 1115 NULL, 1116 }; 1117 1118 static struct se_subsystem_api tcmu_template = { 1119 .name = "user", 1120 .inquiry_prod = "USER", 1121 .inquiry_rev = TCMU_VERSION, 1122 .owner = THIS_MODULE, 1123 .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 1124 .attach_hba = tcmu_attach_hba, 1125 .detach_hba = tcmu_detach_hba, 1126 .alloc_device = tcmu_alloc_device, 1127 .configure_device = tcmu_configure_device, 1128 .free_device = tcmu_free_device, 1129 .parse_cdb = tcmu_parse_cdb, 1130 .set_configfs_dev_params = tcmu_set_configfs_dev_params, 1131 .show_configfs_dev_params = tcmu_show_configfs_dev_params, 1132 .get_device_type = sbc_get_device_type, 1133 .get_blocks = tcmu_get_blocks, 1134 }; 1135 1136 static int __init tcmu_module_init(void) 1137 { 1138 struct target_backend_cits *tbc = &tcmu_template.tb_cits; 1139 int ret; 1140 1141 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 1142 1143 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 1144 sizeof(struct tcmu_cmd), 1145 __alignof__(struct tcmu_cmd), 1146 0, NULL); 1147 if (!tcmu_cmd_cache) 1148 return -ENOMEM; 1149 1150 tcmu_root_device = root_device_register("tcm_user"); 1151 if (IS_ERR(tcmu_root_device)) { 1152 ret = PTR_ERR(tcmu_root_device); 1153 goto out_free_cache; 1154 } 1155 1156 ret = genl_register_family(&tcmu_genl_family); 1157 if (ret < 0) { 1158 goto out_unreg_device; 1159 } 1160 1161 target_core_setup_sub_cits(&tcmu_template); 1162 tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs; 1163 1164 ret = transport_subsystem_register(&tcmu_template); 1165 if (ret) 1166 goto out_unreg_genl; 1167 1168 return 0; 1169 1170 out_unreg_genl: 1171 genl_unregister_family(&tcmu_genl_family); 1172 out_unreg_device: 1173 root_device_unregister(tcmu_root_device); 1174 out_free_cache: 1175 kmem_cache_destroy(tcmu_cmd_cache); 1176 1177 return ret; 1178 } 1179 1180 static void __exit tcmu_module_exit(void) 1181 { 1182 transport_subsystem_release(&tcmu_template); 1183 genl_unregister_family(&tcmu_genl_family); 1184 root_device_unregister(tcmu_root_device); 1185 kmem_cache_destroy(tcmu_cmd_cache); 1186 } 1187 1188 MODULE_DESCRIPTION("TCM USER subsystem plugin"); 1189 MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 1190 MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 1191 MODULE_LICENSE("GPL"); 1192 1193 module_init(tcmu_module_init); 1194 module_exit(tcmu_module_exit); 1195