17c9e7a6fSAndy Grover /* 27c9e7a6fSAndy Grover * Copyright (C) 2013 Shaohua Li <shli@kernel.org> 37c9e7a6fSAndy Grover * Copyright (C) 2014 Red Hat, Inc. 4f97ec7dbSIlias Tsitsimpis * Copyright (C) 2015 Arrikto, Inc. 5141685a3SXiubo Li * Copyright (C) 2017 Chinamobile, Inc. 67c9e7a6fSAndy Grover * 77c9e7a6fSAndy Grover * This program is free software; you can redistribute it and/or modify it 87c9e7a6fSAndy Grover * under the terms and conditions of the GNU General Public License, 97c9e7a6fSAndy Grover * version 2, as published by the Free Software Foundation. 107c9e7a6fSAndy Grover * 117c9e7a6fSAndy Grover * This program is distributed in the hope it will be useful, but WITHOUT 127c9e7a6fSAndy Grover * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 137c9e7a6fSAndy Grover * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 147c9e7a6fSAndy Grover * more details. 157c9e7a6fSAndy Grover * 167c9e7a6fSAndy Grover * You should have received a copy of the GNU General Public License along with 177c9e7a6fSAndy Grover * this program; if not, write to the Free Software Foundation, Inc., 187c9e7a6fSAndy Grover * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 197c9e7a6fSAndy Grover */ 207c9e7a6fSAndy Grover 217c9e7a6fSAndy Grover #include <linux/spinlock.h> 227c9e7a6fSAndy Grover #include <linux/module.h> 237c9e7a6fSAndy Grover #include <linux/idr.h> 24ba929992SBart Van Assche #include <linux/kernel.h> 257c9e7a6fSAndy Grover #include <linux/timer.h> 267c9e7a6fSAndy Grover #include <linux/parser.h> 275538d294SDavid S. Miller #include <linux/vmalloc.h> 287c9e7a6fSAndy Grover #include <linux/uio_driver.h> 29141685a3SXiubo Li #include <linux/radix-tree.h> 30ac64a2ceSDavid Disseldorp #include <linux/stringify.h> 3126418649SSheng Yang #include <linux/bitops.h> 32f5045724SBart Van Assche #include <linux/highmem.h> 337d7a7435SNicholas Bellinger #include <linux/configfs.h> 34b6df4b79SXiubo Li #include <linux/mutex.h> 359972cebbSMike Christie #include <linux/workqueue.h> 367c9e7a6fSAndy Grover #include <net/genetlink.h> 37ba929992SBart Van Assche #include <scsi/scsi_common.h> 38ba929992SBart Van Assche #include <scsi/scsi_proto.h> 397c9e7a6fSAndy Grover #include <target/target_core_base.h> 407c9e7a6fSAndy Grover #include <target/target_core_fabric.h> 417c9e7a6fSAndy Grover #include <target/target_core_backend.h> 42e9f720d6SNicholas Bellinger 437c9e7a6fSAndy Grover #include <linux/target_core_user.h> 447c9e7a6fSAndy Grover 45572ccdabSRandy Dunlap /** 46572ccdabSRandy Dunlap * DOC: Userspace I/O 47572ccdabSRandy Dunlap * Userspace I/O 48572ccdabSRandy Dunlap * ------------- 49572ccdabSRandy Dunlap * 507c9e7a6fSAndy Grover * Define a shared-memory interface for LIO to pass SCSI commands and 517c9e7a6fSAndy Grover * data to userspace for processing. This is to allow backends that 527c9e7a6fSAndy Grover * are too complex for in-kernel support to be possible. 537c9e7a6fSAndy Grover * 547c9e7a6fSAndy Grover * It uses the UIO framework to do a lot of the device-creation and 557c9e7a6fSAndy Grover * introspection work for us. 567c9e7a6fSAndy Grover * 577c9e7a6fSAndy Grover * See the .h file for how the ring is laid out. Note that while the 587c9e7a6fSAndy Grover * command ring is defined, the particulars of the data area are 597c9e7a6fSAndy Grover * not. Offset values in the command entry point to other locations 60572ccdabSRandy Dunlap * internal to the mmap-ed area. There is separate space outside the 617c9e7a6fSAndy Grover * command ring for data buffers. This leaves maximum flexibility for 627c9e7a6fSAndy Grover * moving buffer allocations, or even page flipping or other 637c9e7a6fSAndy Grover * allocation techniques, without altering the command ring layout. 647c9e7a6fSAndy Grover * 657c9e7a6fSAndy Grover * SECURITY: 667c9e7a6fSAndy Grover * The user process must be assumed to be malicious. There's no way to 677c9e7a6fSAndy Grover * prevent it breaking the command ring protocol if it wants, but in 687c9e7a6fSAndy Grover * order to prevent other issues we must only ever read *data* from 697c9e7a6fSAndy Grover * the shared memory area, not offsets or sizes. This applies to 707c9e7a6fSAndy Grover * command ring entries as well as the mailbox. Extra code needed for 717c9e7a6fSAndy Grover * this may have a 'UAM' comment. 727c9e7a6fSAndy Grover */ 737c9e7a6fSAndy Grover 747c9e7a6fSAndy Grover #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) 757c9e7a6fSAndy Grover 76b6df4b79SXiubo Li /* For cmd area, the size is fixed 8MB */ 77b6df4b79SXiubo Li #define CMDR_SIZE (8 * 1024 * 1024) 7826418649SSheng Yang 79b6df4b79SXiubo Li /* 80b6df4b79SXiubo Li * For data area, the block size is PAGE_SIZE and 81b6df4b79SXiubo Li * the total size is 256K * PAGE_SIZE. 82b6df4b79SXiubo Li */ 83b6df4b79SXiubo Li #define DATA_BLOCK_SIZE PAGE_SIZE 8480eb8761SMike Christie #define DATA_BLOCK_SHIFT PAGE_SHIFT 8580eb8761SMike Christie #define DATA_BLOCK_BITS_DEF (256 * 1024) 8626418649SSheng Yang #define DATA_SIZE (DATA_BLOCK_BITS * DATA_BLOCK_SIZE) 877c9e7a6fSAndy Grover 8880eb8761SMike Christie #define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) 8980eb8761SMike Christie #define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) 9080eb8761SMike Christie 91b6df4b79SXiubo Li /* The total size of the ring is 8M + 256K * PAGE_SIZE */ 927c9e7a6fSAndy Grover #define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE) 937c9e7a6fSAndy Grover 94af1dd7ffSMike Christie /* 95af1dd7ffSMike Christie * Default number of global data blocks(512K * PAGE_SIZE) 96af1dd7ffSMike Christie * when the unmap thread will be started. 97af1dd7ffSMike Christie */ 9880eb8761SMike Christie #define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) 99b6df4b79SXiubo Li 100b3af66e2SMike Christie static u8 tcmu_kern_cmd_reply_supported; 101b3af66e2SMike Christie 1027c9e7a6fSAndy Grover static struct device *tcmu_root_device; 1037c9e7a6fSAndy Grover 1047c9e7a6fSAndy Grover struct tcmu_hba { 1057c9e7a6fSAndy Grover u32 host_id; 1067c9e7a6fSAndy Grover }; 1077c9e7a6fSAndy Grover 1087c9e7a6fSAndy Grover #define TCMU_CONFIG_LEN 256 1097c9e7a6fSAndy Grover 110b3af66e2SMike Christie struct tcmu_nl_cmd { 111b3af66e2SMike Christie /* wake up thread waiting for reply */ 112b3af66e2SMike Christie struct completion complete; 113b3af66e2SMike Christie int cmd; 114b3af66e2SMike Christie int status; 115b3af66e2SMike Christie }; 116b3af66e2SMike Christie 1177c9e7a6fSAndy Grover struct tcmu_dev { 118b6df4b79SXiubo Li struct list_head node; 119f3cdbe39SMike Christie struct kref kref; 120af1dd7ffSMike Christie 1217c9e7a6fSAndy Grover struct se_device se_dev; 1227c9e7a6fSAndy Grover 1237c9e7a6fSAndy Grover char *name; 1247c9e7a6fSAndy Grover struct se_hba *hba; 1257c9e7a6fSAndy Grover 1267c9e7a6fSAndy Grover #define TCMU_DEV_BIT_OPEN 0 1277c9e7a6fSAndy Grover #define TCMU_DEV_BIT_BROKEN 1 128892782caSMike Christie #define TCMU_DEV_BIT_BLOCKED 2 1297c9e7a6fSAndy Grover unsigned long flags; 1307c9e7a6fSAndy Grover 1317c9e7a6fSAndy Grover struct uio_info uio_info; 1327c9e7a6fSAndy Grover 133b6df4b79SXiubo Li struct inode *inode; 134b6df4b79SXiubo Li 1357c9e7a6fSAndy Grover struct tcmu_mailbox *mb_addr; 1367c9e7a6fSAndy Grover size_t dev_size; 1377c9e7a6fSAndy Grover u32 cmdr_size; 1387c9e7a6fSAndy Grover u32 cmdr_last_cleaned; 1393d9b9555SAndy Grover /* Offset of data area from start of mb */ 14026418649SSheng Yang /* Must add data_off and mb_addr to get the address */ 1417c9e7a6fSAndy Grover size_t data_off; 1427c9e7a6fSAndy Grover size_t data_size; 14380eb8761SMike Christie uint32_t max_blocks; 14480eb8761SMike Christie size_t ring_size; 14526418649SSheng Yang 146b6df4b79SXiubo Li struct mutex cmdr_lock; 147af1dd7ffSMike Christie struct list_head cmdr_queue; 1487c9e7a6fSAndy Grover 149141685a3SXiubo Li uint32_t dbi_max; 150b6df4b79SXiubo Li uint32_t dbi_thresh; 15180eb8761SMike Christie unsigned long *data_bitmap; 152141685a3SXiubo Li struct radix_tree_root data_blocks; 153141685a3SXiubo Li 1547c9e7a6fSAndy Grover struct idr commands; 1557c9e7a6fSAndy Grover 1569103575aSMike Christie struct timer_list cmd_timer; 157af980e46SMike Christie unsigned int cmd_time_out; 1589103575aSMike Christie 1599103575aSMike Christie struct timer_list qfull_timer; 1609103575aSMike Christie int qfull_time_out; 1619103575aSMike Christie 162488ebe4cSMike Christie struct list_head timedout_entry; 1637c9e7a6fSAndy Grover 164b3af66e2SMike Christie spinlock_t nl_cmd_lock; 165b3af66e2SMike Christie struct tcmu_nl_cmd curr_nl_cmd; 166b3af66e2SMike Christie /* wake up threads waiting on curr_nl_cmd */ 167b3af66e2SMike Christie wait_queue_head_t nl_cmd_wq; 168b3af66e2SMike Christie 1697c9e7a6fSAndy Grover char dev_config[TCMU_CONFIG_LEN]; 170b849b456SKenjiro Nakayama 171b849b456SKenjiro Nakayama int nl_reply_supported; 1727c9e7a6fSAndy Grover }; 1737c9e7a6fSAndy Grover 1747c9e7a6fSAndy Grover #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) 1757c9e7a6fSAndy Grover 1767c9e7a6fSAndy Grover #define CMDR_OFF sizeof(struct tcmu_mailbox) 1777c9e7a6fSAndy Grover 1787c9e7a6fSAndy Grover struct tcmu_cmd { 1797c9e7a6fSAndy Grover struct se_cmd *se_cmd; 1807c9e7a6fSAndy Grover struct tcmu_dev *tcmu_dev; 181af1dd7ffSMike Christie struct list_head cmdr_queue_entry; 1827c9e7a6fSAndy Grover 1837c9e7a6fSAndy Grover uint16_t cmd_id; 1847c9e7a6fSAndy Grover 18526418649SSheng Yang /* Can't use se_cmd when cleaning up expired cmds, because if 1867c9e7a6fSAndy Grover cmd has been completed then accessing se_cmd is off limits */ 187141685a3SXiubo Li uint32_t dbi_cnt; 188141685a3SXiubo Li uint32_t dbi_cur; 189141685a3SXiubo Li uint32_t *dbi; 1907c9e7a6fSAndy Grover 1917c9e7a6fSAndy Grover unsigned long deadline; 1927c9e7a6fSAndy Grover 1937c9e7a6fSAndy Grover #define TCMU_CMD_BIT_EXPIRED 0 1947c9e7a6fSAndy Grover unsigned long flags; 1957c9e7a6fSAndy Grover }; 196af1dd7ffSMike Christie /* 197af1dd7ffSMike Christie * To avoid dead lock the mutex lock order should always be: 198af1dd7ffSMike Christie * 199af1dd7ffSMike Christie * mutex_lock(&root_udev_mutex); 200af1dd7ffSMike Christie * ... 201af1dd7ffSMike Christie * mutex_lock(&tcmu_dev->cmdr_lock); 202af1dd7ffSMike Christie * mutex_unlock(&tcmu_dev->cmdr_lock); 203af1dd7ffSMike Christie * ... 204af1dd7ffSMike Christie * mutex_unlock(&root_udev_mutex); 205af1dd7ffSMike Christie */ 206b6df4b79SXiubo Li static DEFINE_MUTEX(root_udev_mutex); 207b6df4b79SXiubo Li static LIST_HEAD(root_udev); 208b6df4b79SXiubo Li 209488ebe4cSMike Christie static DEFINE_SPINLOCK(timed_out_udevs_lock); 210488ebe4cSMike Christie static LIST_HEAD(timed_out_udevs); 211488ebe4cSMike Christie 21280eb8761SMike Christie static struct kmem_cache *tcmu_cmd_cache; 21380eb8761SMike Christie 214b6df4b79SXiubo Li static atomic_t global_db_count = ATOMIC_INIT(0); 215af1dd7ffSMike Christie static struct delayed_work tcmu_unmap_work; 21680eb8761SMike Christie static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; 217b6df4b79SXiubo Li 21880eb8761SMike Christie static int tcmu_set_global_max_data_area(const char *str, 21980eb8761SMike Christie const struct kernel_param *kp) 22080eb8761SMike Christie { 22180eb8761SMike Christie int ret, max_area_mb; 22280eb8761SMike Christie 22380eb8761SMike Christie ret = kstrtoint(str, 10, &max_area_mb); 22480eb8761SMike Christie if (ret) 22580eb8761SMike Christie return -EINVAL; 22680eb8761SMike Christie 22780eb8761SMike Christie if (max_area_mb <= 0) { 22880eb8761SMike Christie pr_err("global_max_data_area must be larger than 0.\n"); 22980eb8761SMike Christie return -EINVAL; 23080eb8761SMike Christie } 23180eb8761SMike Christie 23280eb8761SMike Christie tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); 23380eb8761SMike Christie if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 23480eb8761SMike Christie schedule_delayed_work(&tcmu_unmap_work, 0); 23580eb8761SMike Christie else 23680eb8761SMike Christie cancel_delayed_work_sync(&tcmu_unmap_work); 23780eb8761SMike Christie 23880eb8761SMike Christie return 0; 23980eb8761SMike Christie } 24080eb8761SMike Christie 24180eb8761SMike Christie static int tcmu_get_global_max_data_area(char *buffer, 24280eb8761SMike Christie const struct kernel_param *kp) 24380eb8761SMike Christie { 24480eb8761SMike Christie return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 24580eb8761SMike Christie } 24680eb8761SMike Christie 24780eb8761SMike Christie static const struct kernel_param_ops tcmu_global_max_data_area_op = { 24880eb8761SMike Christie .set = tcmu_set_global_max_data_area, 24980eb8761SMike Christie .get = tcmu_get_global_max_data_area, 25080eb8761SMike Christie }; 25180eb8761SMike Christie 25280eb8761SMike Christie module_param_cb(global_max_data_area_mb, &tcmu_global_max_data_area_op, NULL, 25380eb8761SMike Christie S_IWUSR | S_IRUGO); 25480eb8761SMike Christie MODULE_PARM_DESC(global_max_data_area_mb, 25580eb8761SMike Christie "Max MBs allowed to be allocated to all the tcmu device's " 25680eb8761SMike Christie "data areas."); 2577c9e7a6fSAndy Grover 2587c9e7a6fSAndy Grover /* multicast group */ 2597c9e7a6fSAndy Grover enum tcmu_multicast_groups { 2607c9e7a6fSAndy Grover TCMU_MCGRP_CONFIG, 2617c9e7a6fSAndy Grover }; 2627c9e7a6fSAndy Grover 2637c9e7a6fSAndy Grover static const struct genl_multicast_group tcmu_mcgrps[] = { 2647c9e7a6fSAndy Grover [TCMU_MCGRP_CONFIG] = { .name = "config", }, 2657c9e7a6fSAndy Grover }; 2667c9e7a6fSAndy Grover 267b3af66e2SMike Christie static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { 268b3af66e2SMike Christie [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, 269b3af66e2SMike Christie [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, 270b3af66e2SMike Christie [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, 271b3af66e2SMike Christie [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, 272b3af66e2SMike Christie [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, 273b3af66e2SMike Christie }; 274b3af66e2SMike Christie 275b3af66e2SMike Christie static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) 276b3af66e2SMike Christie { 277b3af66e2SMike Christie struct se_device *dev; 278b3af66e2SMike Christie struct tcmu_dev *udev; 279b3af66e2SMike Christie struct tcmu_nl_cmd *nl_cmd; 280b3af66e2SMike Christie int dev_id, rc, ret = 0; 281b3af66e2SMike Christie bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE); 282b3af66e2SMike Christie 283b3af66e2SMike Christie if (!info->attrs[TCMU_ATTR_CMD_STATUS] || 284b3af66e2SMike Christie !info->attrs[TCMU_ATTR_DEVICE_ID]) { 285b3af66e2SMike Christie printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); 286b3af66e2SMike Christie return -EINVAL; 287b3af66e2SMike Christie } 288b3af66e2SMike Christie 289b3af66e2SMike Christie dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); 290b3af66e2SMike Christie rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); 291b3af66e2SMike Christie 292b3af66e2SMike Christie dev = target_find_device(dev_id, !is_removed); 293b3af66e2SMike Christie if (!dev) { 294b3af66e2SMike Christie printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n", 295b3af66e2SMike Christie completed_cmd, rc, dev_id); 296b3af66e2SMike Christie return -ENODEV; 297b3af66e2SMike Christie } 298b3af66e2SMike Christie udev = TCMU_DEV(dev); 299b3af66e2SMike Christie 300b3af66e2SMike Christie spin_lock(&udev->nl_cmd_lock); 301b3af66e2SMike Christie nl_cmd = &udev->curr_nl_cmd; 302b3af66e2SMike Christie 303b3af66e2SMike Christie pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id, 304b3af66e2SMike Christie nl_cmd->cmd, completed_cmd, rc); 305b3af66e2SMike Christie 306b3af66e2SMike Christie if (nl_cmd->cmd != completed_cmd) { 307b3af66e2SMike Christie printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n", 308b3af66e2SMike Christie completed_cmd, nl_cmd->cmd); 309b3af66e2SMike Christie ret = -EINVAL; 310b3af66e2SMike Christie } else { 311b3af66e2SMike Christie nl_cmd->status = rc; 312b3af66e2SMike Christie } 313b3af66e2SMike Christie 314b3af66e2SMike Christie spin_unlock(&udev->nl_cmd_lock); 315b3af66e2SMike Christie if (!is_removed) 316b3af66e2SMike Christie target_undepend_item(&dev->dev_group.cg_item); 317b3af66e2SMike Christie if (!ret) 318b3af66e2SMike Christie complete(&nl_cmd->complete); 319b3af66e2SMike Christie return ret; 320b3af66e2SMike Christie } 321b3af66e2SMike Christie 322b3af66e2SMike Christie static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) 323b3af66e2SMike Christie { 324b3af66e2SMike Christie return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); 325b3af66e2SMike Christie } 326b3af66e2SMike Christie 327b3af66e2SMike Christie static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) 328b3af66e2SMike Christie { 329b3af66e2SMike Christie return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); 330b3af66e2SMike Christie } 331b3af66e2SMike Christie 332b3af66e2SMike Christie static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, 333b3af66e2SMike Christie struct genl_info *info) 334b3af66e2SMike Christie { 335b3af66e2SMike Christie return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); 336b3af66e2SMike Christie } 337b3af66e2SMike Christie 338b3af66e2SMike Christie static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) 339b3af66e2SMike Christie { 340b3af66e2SMike Christie if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { 341b3af66e2SMike Christie tcmu_kern_cmd_reply_supported = 342b3af66e2SMike Christie nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); 343b3af66e2SMike Christie printk(KERN_INFO "tcmu daemon: command reply support %u.\n", 344b3af66e2SMike Christie tcmu_kern_cmd_reply_supported); 345b3af66e2SMike Christie } 346b3af66e2SMike Christie 347b3af66e2SMike Christie return 0; 348b3af66e2SMike Christie } 349b3af66e2SMike Christie 350b3af66e2SMike Christie static const struct genl_ops tcmu_genl_ops[] = { 351b3af66e2SMike Christie { 352b3af66e2SMike Christie .cmd = TCMU_CMD_SET_FEATURES, 353b3af66e2SMike Christie .flags = GENL_ADMIN_PERM, 354b3af66e2SMike Christie .policy = tcmu_attr_policy, 355b3af66e2SMike Christie .doit = tcmu_genl_set_features, 356b3af66e2SMike Christie }, 357b3af66e2SMike Christie { 358b3af66e2SMike Christie .cmd = TCMU_CMD_ADDED_DEVICE_DONE, 359b3af66e2SMike Christie .flags = GENL_ADMIN_PERM, 360b3af66e2SMike Christie .policy = tcmu_attr_policy, 361b3af66e2SMike Christie .doit = tcmu_genl_add_dev_done, 362b3af66e2SMike Christie }, 363b3af66e2SMike Christie { 364b3af66e2SMike Christie .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, 365b3af66e2SMike Christie .flags = GENL_ADMIN_PERM, 366b3af66e2SMike Christie .policy = tcmu_attr_policy, 367b3af66e2SMike Christie .doit = tcmu_genl_rm_dev_done, 368b3af66e2SMike Christie }, 369b3af66e2SMike Christie { 370b3af66e2SMike Christie .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, 371b3af66e2SMike Christie .flags = GENL_ADMIN_PERM, 372b3af66e2SMike Christie .policy = tcmu_attr_policy, 373b3af66e2SMike Christie .doit = tcmu_genl_reconfig_dev_done, 374b3af66e2SMike Christie }, 375b3af66e2SMike Christie }; 376b3af66e2SMike Christie 3777c9e7a6fSAndy Grover /* Our generic netlink family */ 37856989f6dSJohannes Berg static struct genl_family tcmu_genl_family __ro_after_init = { 379489111e5SJohannes Berg .module = THIS_MODULE, 3807c9e7a6fSAndy Grover .hdrsize = 0, 3817c9e7a6fSAndy Grover .name = "TCM-USER", 382b3af66e2SMike Christie .version = 2, 3837c9e7a6fSAndy Grover .maxattr = TCMU_ATTR_MAX, 3847c9e7a6fSAndy Grover .mcgrps = tcmu_mcgrps, 3857c9e7a6fSAndy Grover .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), 38620c08b36SSheng Yang .netnsok = true, 387b3af66e2SMike Christie .ops = tcmu_genl_ops, 388b3af66e2SMike Christie .n_ops = ARRAY_SIZE(tcmu_genl_ops), 3897c9e7a6fSAndy Grover }; 3907c9e7a6fSAndy Grover 391141685a3SXiubo Li #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) 392141685a3SXiubo Li #define tcmu_cmd_reset_dbi_cur(cmd) tcmu_cmd_set_dbi_cur(cmd, 0) 393141685a3SXiubo Li #define tcmu_cmd_set_dbi(cmd, index) ((cmd)->dbi[(cmd)->dbi_cur++] = (index)) 394141685a3SXiubo Li #define tcmu_cmd_get_dbi(cmd) ((cmd)->dbi[(cmd)->dbi_cur++]) 395141685a3SXiubo Li 396b6df4b79SXiubo Li static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) 397141685a3SXiubo Li { 398141685a3SXiubo Li struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 399141685a3SXiubo Li uint32_t i; 400141685a3SXiubo Li 401b6df4b79SXiubo Li for (i = 0; i < len; i++) 402141685a3SXiubo Li clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); 403141685a3SXiubo Li } 404141685a3SXiubo Li 405b6df4b79SXiubo Li static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, 406b6df4b79SXiubo Li struct tcmu_cmd *tcmu_cmd) 407141685a3SXiubo Li { 408b6df4b79SXiubo Li struct page *page; 409b6df4b79SXiubo Li int ret, dbi; 410141685a3SXiubo Li 411b6df4b79SXiubo Li dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); 412b6df4b79SXiubo Li if (dbi == udev->dbi_thresh) 413b6df4b79SXiubo Li return false; 414b6df4b79SXiubo Li 415b6df4b79SXiubo Li page = radix_tree_lookup(&udev->data_blocks, dbi); 416b6df4b79SXiubo Li if (!page) { 417b6df4b79SXiubo Li if (atomic_add_return(1, &global_db_count) > 41880eb8761SMike Christie tcmu_global_max_blocks) 419af1dd7ffSMike Christie schedule_delayed_work(&tcmu_unmap_work, 0); 420b6df4b79SXiubo Li 421b6df4b79SXiubo Li /* try to get new page from the mm */ 422b6df4b79SXiubo Li page = alloc_page(GFP_KERNEL); 423b6df4b79SXiubo Li if (!page) 424daf78c30SXiubo Li goto err_alloc; 425b6df4b79SXiubo Li 426b6df4b79SXiubo Li ret = radix_tree_insert(&udev->data_blocks, dbi, page); 427daf78c30SXiubo Li if (ret) 428daf78c30SXiubo Li goto err_insert; 429b6df4b79SXiubo Li } 430b6df4b79SXiubo Li 431141685a3SXiubo Li if (dbi > udev->dbi_max) 432141685a3SXiubo Li udev->dbi_max = dbi; 433141685a3SXiubo Li 434141685a3SXiubo Li set_bit(dbi, udev->data_bitmap); 435b6df4b79SXiubo Li tcmu_cmd_set_dbi(tcmu_cmd, dbi); 436141685a3SXiubo Li 437b6df4b79SXiubo Li return true; 438daf78c30SXiubo Li err_insert: 439daf78c30SXiubo Li __free_page(page); 440daf78c30SXiubo Li err_alloc: 441daf78c30SXiubo Li atomic_dec(&global_db_count); 442daf78c30SXiubo Li return false; 443141685a3SXiubo Li } 444141685a3SXiubo Li 445b6df4b79SXiubo Li static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, 446b6df4b79SXiubo Li struct tcmu_cmd *tcmu_cmd) 447b6df4b79SXiubo Li { 448b6df4b79SXiubo Li int i; 449b6df4b79SXiubo Li 450b6df4b79SXiubo Li for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) { 451b6df4b79SXiubo Li if (!tcmu_get_empty_block(udev, tcmu_cmd)) 452af1dd7ffSMike Christie return false; 453141685a3SXiubo Li } 454b6df4b79SXiubo Li return true; 455141685a3SXiubo Li } 456141685a3SXiubo Li 457b6df4b79SXiubo Li static inline struct page * 458b6df4b79SXiubo Li tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 459141685a3SXiubo Li { 460141685a3SXiubo Li return radix_tree_lookup(&udev->data_blocks, dbi); 461141685a3SXiubo Li } 462141685a3SXiubo Li 463141685a3SXiubo Li static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) 464141685a3SXiubo Li { 465141685a3SXiubo Li kfree(tcmu_cmd->dbi); 466141685a3SXiubo Li kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 467141685a3SXiubo Li } 468141685a3SXiubo Li 469141685a3SXiubo Li static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd) 470141685a3SXiubo Li { 471141685a3SXiubo Li struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 472141685a3SXiubo Li size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE); 473141685a3SXiubo Li 474141685a3SXiubo Li if (se_cmd->se_cmd_flags & SCF_BIDI) { 475141685a3SXiubo Li BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); 476141685a3SXiubo Li data_length += round_up(se_cmd->t_bidi_data_sg->length, 477141685a3SXiubo Li DATA_BLOCK_SIZE); 478141685a3SXiubo Li } 479141685a3SXiubo Li 480141685a3SXiubo Li return data_length; 481141685a3SXiubo Li } 482141685a3SXiubo Li 483141685a3SXiubo Li static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd) 484141685a3SXiubo Li { 485141685a3SXiubo Li size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 486141685a3SXiubo Li 487141685a3SXiubo Li return data_length / DATA_BLOCK_SIZE; 488141685a3SXiubo Li } 489141685a3SXiubo Li 4907c9e7a6fSAndy Grover static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) 4917c9e7a6fSAndy Grover { 4927c9e7a6fSAndy Grover struct se_device *se_dev = se_cmd->se_dev; 4937c9e7a6fSAndy Grover struct tcmu_dev *udev = TCMU_DEV(se_dev); 4947c9e7a6fSAndy Grover struct tcmu_cmd *tcmu_cmd; 4957c9e7a6fSAndy Grover 4967c9e7a6fSAndy Grover tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL); 4977c9e7a6fSAndy Grover if (!tcmu_cmd) 4987c9e7a6fSAndy Grover return NULL; 4997c9e7a6fSAndy Grover 500af1dd7ffSMike Christie INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry); 5017c9e7a6fSAndy Grover tcmu_cmd->se_cmd = se_cmd; 5027c9e7a6fSAndy Grover tcmu_cmd->tcmu_dev = udev; 5037c9e7a6fSAndy Grover 504141685a3SXiubo Li tcmu_cmd_reset_dbi_cur(tcmu_cmd); 505141685a3SXiubo Li tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd); 506141685a3SXiubo Li tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), 507141685a3SXiubo Li GFP_KERNEL); 508141685a3SXiubo Li if (!tcmu_cmd->dbi) { 509141685a3SXiubo Li kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); 510141685a3SXiubo Li return NULL; 511141685a3SXiubo Li } 512141685a3SXiubo Li 5137c9e7a6fSAndy Grover return tcmu_cmd; 5147c9e7a6fSAndy Grover } 5157c9e7a6fSAndy Grover 5167c9e7a6fSAndy Grover static inline void tcmu_flush_dcache_range(void *vaddr, size_t size) 5177c9e7a6fSAndy Grover { 518b75d8063SGeliang Tang unsigned long offset = offset_in_page(vaddr); 51926d2b310Stangwenji void *start = vaddr - offset; 5207c9e7a6fSAndy Grover 5217c9e7a6fSAndy Grover size = round_up(size+offset, PAGE_SIZE); 5227c9e7a6fSAndy Grover 5237c9e7a6fSAndy Grover while (size) { 52426d2b310Stangwenji flush_dcache_page(virt_to_page(start)); 52526d2b310Stangwenji start += PAGE_SIZE; 5267c9e7a6fSAndy Grover size -= PAGE_SIZE; 5277c9e7a6fSAndy Grover } 5287c9e7a6fSAndy Grover } 5297c9e7a6fSAndy Grover 5307c9e7a6fSAndy Grover /* 5317c9e7a6fSAndy Grover * Some ring helper functions. We don't assume size is a power of 2 so 5327c9e7a6fSAndy Grover * we can't use circ_buf.h. 5337c9e7a6fSAndy Grover */ 5347c9e7a6fSAndy Grover static inline size_t spc_used(size_t head, size_t tail, size_t size) 5357c9e7a6fSAndy Grover { 5367c9e7a6fSAndy Grover int diff = head - tail; 5377c9e7a6fSAndy Grover 5387c9e7a6fSAndy Grover if (diff >= 0) 5397c9e7a6fSAndy Grover return diff; 5407c9e7a6fSAndy Grover else 5417c9e7a6fSAndy Grover return size + diff; 5427c9e7a6fSAndy Grover } 5437c9e7a6fSAndy Grover 5447c9e7a6fSAndy Grover static inline size_t spc_free(size_t head, size_t tail, size_t size) 5457c9e7a6fSAndy Grover { 5467c9e7a6fSAndy Grover /* Keep 1 byte unused or we can't tell full from empty */ 5477c9e7a6fSAndy Grover return (size - spc_used(head, tail, size) - 1); 5487c9e7a6fSAndy Grover } 5497c9e7a6fSAndy Grover 5507c9e7a6fSAndy Grover static inline size_t head_to_end(size_t head, size_t size) 5517c9e7a6fSAndy Grover { 5527c9e7a6fSAndy Grover return size - head; 5537c9e7a6fSAndy Grover } 5547c9e7a6fSAndy Grover 5553e609135SXiubo Li static inline void new_iov(struct iovec **iov, int *iov_cnt) 556f1dbd087SSheng Yang { 557f1dbd087SSheng Yang struct iovec *iovec; 558f1dbd087SSheng Yang 559f1dbd087SSheng Yang if (*iov_cnt != 0) 560f1dbd087SSheng Yang (*iov)++; 561f1dbd087SSheng Yang (*iov_cnt)++; 562f1dbd087SSheng Yang 563f1dbd087SSheng Yang iovec = *iov; 564f1dbd087SSheng Yang memset(iovec, 0, sizeof(struct iovec)); 565f1dbd087SSheng Yang } 566f1dbd087SSheng Yang 5677c9e7a6fSAndy Grover #define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size) 5687c9e7a6fSAndy Grover 56926418649SSheng Yang /* offset is relative to mb_addr */ 570141685a3SXiubo Li static inline size_t get_block_offset_user(struct tcmu_dev *dev, 571141685a3SXiubo Li int dbi, int remaining) 57226418649SSheng Yang { 573141685a3SXiubo Li return dev->data_off + dbi * DATA_BLOCK_SIZE + 57426418649SSheng Yang DATA_BLOCK_SIZE - remaining; 57526418649SSheng Yang } 57626418649SSheng Yang 577daf78c30SXiubo Li static inline size_t iov_tail(struct iovec *iov) 57826418649SSheng Yang { 57926418649SSheng Yang return (size_t)iov->iov_base + iov->iov_len; 58026418649SSheng Yang } 58126418649SSheng Yang 5821a1fc0b8SMike Christie static void scatter_data_area(struct tcmu_dev *udev, 583141685a3SXiubo Li struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg, 584141685a3SXiubo Li unsigned int data_nents, struct iovec **iov, 585141685a3SXiubo Li int *iov_cnt, bool copy_data) 586f97ec7dbSIlias Tsitsimpis { 587141685a3SXiubo Li int i, dbi; 58826418649SSheng Yang int block_remaining = 0; 589141685a3SXiubo Li void *from, *to = NULL; 590141685a3SXiubo Li size_t copy_bytes, to_offset, offset; 591f97ec7dbSIlias Tsitsimpis struct scatterlist *sg; 592b6df4b79SXiubo Li struct page *page; 593f97ec7dbSIlias Tsitsimpis 594f97ec7dbSIlias Tsitsimpis for_each_sg(data_sg, sg, data_nents, i) { 59526418649SSheng Yang int sg_remaining = sg->length; 596f97ec7dbSIlias Tsitsimpis from = kmap_atomic(sg_page(sg)) + sg->offset; 59726418649SSheng Yang while (sg_remaining > 0) { 59826418649SSheng Yang if (block_remaining == 0) { 599b6df4b79SXiubo Li if (to) 600b6df4b79SXiubo Li kunmap_atomic(to); 601b6df4b79SXiubo Li 60226418649SSheng Yang block_remaining = DATA_BLOCK_SIZE; 603b6df4b79SXiubo Li dbi = tcmu_cmd_get_dbi(tcmu_cmd); 604b6df4b79SXiubo Li page = tcmu_get_block_page(udev, dbi); 605b6df4b79SXiubo Li to = kmap_atomic(page); 606141685a3SXiubo Li } 607141685a3SXiubo Li 6083e609135SXiubo Li /* 6093e609135SXiubo Li * Covert to virtual offset of the ring data area. 6103e609135SXiubo Li */ 611141685a3SXiubo Li to_offset = get_block_offset_user(udev, dbi, 61226418649SSheng Yang block_remaining); 613141685a3SXiubo Li 6143e609135SXiubo Li /* 6153e609135SXiubo Li * The following code will gather and map the blocks 6163e609135SXiubo Li * to the same iovec when the blocks are all next to 6173e609135SXiubo Li * each other. 6183e609135SXiubo Li */ 6193e609135SXiubo Li copy_bytes = min_t(size_t, sg_remaining, 6203e609135SXiubo Li block_remaining); 62126418649SSheng Yang if (*iov_cnt != 0 && 622daf78c30SXiubo Li to_offset == iov_tail(*iov)) { 6233e609135SXiubo Li /* 6243e609135SXiubo Li * Will append to the current iovec, because 6253e609135SXiubo Li * the current block page is next to the 6263e609135SXiubo Li * previous one. 6273e609135SXiubo Li */ 628f1dbd087SSheng Yang (*iov)->iov_len += copy_bytes; 62926418649SSheng Yang } else { 6303e609135SXiubo Li /* 6313e609135SXiubo Li * Will allocate a new iovec because we are 6323e609135SXiubo Li * first time here or the current block page 6333e609135SXiubo Li * is not next to the previous one. 6343e609135SXiubo Li */ 6353e609135SXiubo Li new_iov(iov, iov_cnt); 63626418649SSheng Yang (*iov)->iov_base = (void __user *)to_offset; 637f97ec7dbSIlias Tsitsimpis (*iov)->iov_len = copy_bytes; 63826418649SSheng Yang } 6393e609135SXiubo Li 640f97ec7dbSIlias Tsitsimpis if (copy_data) { 641c542942cSXiubo Li offset = DATA_BLOCK_SIZE - block_remaining; 642c542942cSXiubo Li memcpy(to + offset, 643c542942cSXiubo Li from + sg->length - sg_remaining, 64426418649SSheng Yang copy_bytes); 645f97ec7dbSIlias Tsitsimpis tcmu_flush_dcache_range(to, copy_bytes); 646f97ec7dbSIlias Tsitsimpis } 6473e609135SXiubo Li 64826418649SSheng Yang sg_remaining -= copy_bytes; 64926418649SSheng Yang block_remaining -= copy_bytes; 650f97ec7dbSIlias Tsitsimpis } 651e2e21bd8SSagi Grimberg kunmap_atomic(from - sg->offset); 652f97ec7dbSIlias Tsitsimpis } 6533e609135SXiubo Li 654b6df4b79SXiubo Li if (to) 655b6df4b79SXiubo Li kunmap_atomic(to); 6560c28481fSSheng Yang } 6570c28481fSSheng Yang 658a5d68ba8SXiubo Li static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 659a5d68ba8SXiubo Li bool bidi) 660f97ec7dbSIlias Tsitsimpis { 661a5d68ba8SXiubo Li struct se_cmd *se_cmd = cmd->se_cmd; 662141685a3SXiubo Li int i, dbi; 66326418649SSheng Yang int block_remaining = 0; 664b6df4b79SXiubo Li void *from = NULL, *to; 665141685a3SXiubo Li size_t copy_bytes, offset; 666a5d68ba8SXiubo Li struct scatterlist *sg, *data_sg; 667b6df4b79SXiubo Li struct page *page; 668a5d68ba8SXiubo Li unsigned int data_nents; 669141685a3SXiubo Li uint32_t count = 0; 670a5d68ba8SXiubo Li 671a5d68ba8SXiubo Li if (!bidi) { 672a5d68ba8SXiubo Li data_sg = se_cmd->t_data_sg; 673a5d68ba8SXiubo Li data_nents = se_cmd->t_data_nents; 674a5d68ba8SXiubo Li } else { 675a5d68ba8SXiubo Li 676a5d68ba8SXiubo Li /* 677a5d68ba8SXiubo Li * For bidi case, the first count blocks are for Data-Out 678a5d68ba8SXiubo Li * buffer blocks, and before gathering the Data-In buffer 679a5d68ba8SXiubo Li * the Data-Out buffer blocks should be discarded. 680a5d68ba8SXiubo Li */ 681a5d68ba8SXiubo Li count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); 682a5d68ba8SXiubo Li 683a5d68ba8SXiubo Li data_sg = se_cmd->t_bidi_data_sg; 684a5d68ba8SXiubo Li data_nents = se_cmd->t_bidi_data_nents; 685a5d68ba8SXiubo Li } 686f97ec7dbSIlias Tsitsimpis 687141685a3SXiubo Li tcmu_cmd_set_dbi_cur(cmd, count); 688141685a3SXiubo Li 689f97ec7dbSIlias Tsitsimpis for_each_sg(data_sg, sg, data_nents, i) { 69026418649SSheng Yang int sg_remaining = sg->length; 691f97ec7dbSIlias Tsitsimpis to = kmap_atomic(sg_page(sg)) + sg->offset; 69226418649SSheng Yang while (sg_remaining > 0) { 69326418649SSheng Yang if (block_remaining == 0) { 694b6df4b79SXiubo Li if (from) 695b6df4b79SXiubo Li kunmap_atomic(from); 696b6df4b79SXiubo Li 69726418649SSheng Yang block_remaining = DATA_BLOCK_SIZE; 698141685a3SXiubo Li dbi = tcmu_cmd_get_dbi(cmd); 699b6df4b79SXiubo Li page = tcmu_get_block_page(udev, dbi); 700b6df4b79SXiubo Li from = kmap_atomic(page); 70126418649SSheng Yang } 70226418649SSheng Yang copy_bytes = min_t(size_t, sg_remaining, 70326418649SSheng Yang block_remaining); 704141685a3SXiubo Li offset = DATA_BLOCK_SIZE - block_remaining; 705f97ec7dbSIlias Tsitsimpis tcmu_flush_dcache_range(from, copy_bytes); 706c542942cSXiubo Li memcpy(to + sg->length - sg_remaining, from + offset, 70726418649SSheng Yang copy_bytes); 708f97ec7dbSIlias Tsitsimpis 70926418649SSheng Yang sg_remaining -= copy_bytes; 71026418649SSheng Yang block_remaining -= copy_bytes; 711f97ec7dbSIlias Tsitsimpis } 712e2e21bd8SSagi Grimberg kunmap_atomic(to - sg->offset); 713f97ec7dbSIlias Tsitsimpis } 714b6df4b79SXiubo Li if (from) 715b6df4b79SXiubo Li kunmap_atomic(from); 716f97ec7dbSIlias Tsitsimpis } 717f97ec7dbSIlias Tsitsimpis 718b6df4b79SXiubo Li static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh) 71926418649SSheng Yang { 7203c0f26ffSMike Christie return thresh - bitmap_weight(bitmap, thresh); 72126418649SSheng Yang } 72226418649SSheng Yang 7237c9e7a6fSAndy Grover /* 724f97ec7dbSIlias Tsitsimpis * We can't queue a command until we have space available on the cmd ring *and* 7253d9b9555SAndy Grover * space available on the data area. 7267c9e7a6fSAndy Grover * 7277c9e7a6fSAndy Grover * Called with ring lock held. 7287c9e7a6fSAndy Grover */ 729b6df4b79SXiubo Li static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, 730b6df4b79SXiubo Li size_t cmd_size, size_t data_needed) 7317c9e7a6fSAndy Grover { 7327c9e7a6fSAndy Grover struct tcmu_mailbox *mb = udev->mb_addr; 733b6df4b79SXiubo Li uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1) 734b6df4b79SXiubo Li / DATA_BLOCK_SIZE; 7350241fd39SNicholas Bellinger size_t space, cmd_needed; 7367c9e7a6fSAndy Grover u32 cmd_head; 7377c9e7a6fSAndy Grover 7387c9e7a6fSAndy Grover tcmu_flush_dcache_range(mb, sizeof(*mb)); 7397c9e7a6fSAndy Grover 7407c9e7a6fSAndy Grover cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 7417c9e7a6fSAndy Grover 742f56574a2SAndy Grover /* 743f56574a2SAndy Grover * If cmd end-of-ring space is too small then we need space for a NOP plus 744f56574a2SAndy Grover * original cmd - cmds are internally contiguous. 745f56574a2SAndy Grover */ 746f56574a2SAndy Grover if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size) 747f56574a2SAndy Grover cmd_needed = cmd_size; 748f56574a2SAndy Grover else 749f56574a2SAndy Grover cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size); 750f56574a2SAndy Grover 7517c9e7a6fSAndy Grover space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size); 7527c9e7a6fSAndy Grover if (space < cmd_needed) { 7537c9e7a6fSAndy Grover pr_debug("no cmd space: %u %u %u\n", cmd_head, 7547c9e7a6fSAndy Grover udev->cmdr_last_cleaned, udev->cmdr_size); 7557c9e7a6fSAndy Grover return false; 7567c9e7a6fSAndy Grover } 7577c9e7a6fSAndy Grover 758b6df4b79SXiubo Li /* try to check and get the data blocks as needed */ 759b6df4b79SXiubo Li space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); 7603c0f26ffSMike Christie if ((space * DATA_BLOCK_SIZE) < data_needed) { 76180eb8761SMike Christie unsigned long blocks_left = 76280eb8761SMike Christie (udev->max_blocks - udev->dbi_thresh) + space; 763b6df4b79SXiubo Li 764b6df4b79SXiubo Li if (blocks_left < blocks_needed) { 765b6df4b79SXiubo Li pr_debug("no data space: only %lu available, but ask for %zu\n", 766b6df4b79SXiubo Li blocks_left * DATA_BLOCK_SIZE, 767b6df4b79SXiubo Li data_needed); 7687c9e7a6fSAndy Grover return false; 7697c9e7a6fSAndy Grover } 7707c9e7a6fSAndy Grover 771f890f579SMike Christie udev->dbi_thresh += blocks_needed; 77280eb8761SMike Christie if (udev->dbi_thresh > udev->max_blocks) 77380eb8761SMike Christie udev->dbi_thresh = udev->max_blocks; 774b6df4b79SXiubo Li } 775b6df4b79SXiubo Li 776daf78c30SXiubo Li return tcmu_get_empty_blocks(udev, cmd); 7777c9e7a6fSAndy Grover } 7787c9e7a6fSAndy Grover 779fe25cc34SXiubo Li static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) 780fe25cc34SXiubo Li { 781fe25cc34SXiubo Li return max(offsetof(struct tcmu_cmd_entry, req.iov[iov_cnt]), 782fe25cc34SXiubo Li sizeof(struct tcmu_cmd_entry)); 783fe25cc34SXiubo Li } 784fe25cc34SXiubo Li 785fe25cc34SXiubo Li static inline size_t tcmu_cmd_get_cmd_size(struct tcmu_cmd *tcmu_cmd, 786fe25cc34SXiubo Li size_t base_command_size) 787fe25cc34SXiubo Li { 788fe25cc34SXiubo Li struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 789fe25cc34SXiubo Li size_t command_size; 790fe25cc34SXiubo Li 791fe25cc34SXiubo Li command_size = base_command_size + 792fe25cc34SXiubo Li round_up(scsi_command_size(se_cmd->t_task_cdb), 793fe25cc34SXiubo Li TCMU_OP_ALIGN_SIZE); 794fe25cc34SXiubo Li 795fe25cc34SXiubo Li WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1)); 796fe25cc34SXiubo Li 797fe25cc34SXiubo Li return command_size; 798fe25cc34SXiubo Li } 799fe25cc34SXiubo Li 8009103575aSMike Christie static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo, 8019103575aSMike Christie struct timer_list *timer) 8020d44374cSMike Christie { 8030d44374cSMike Christie struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 8040d44374cSMike Christie int cmd_id; 8050d44374cSMike Christie 8060d44374cSMike Christie if (tcmu_cmd->cmd_id) 8079103575aSMike Christie goto setup_timer; 8080d44374cSMike Christie 8090d44374cSMike Christie cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); 8100d44374cSMike Christie if (cmd_id < 0) { 8110d44374cSMike Christie pr_err("tcmu: Could not allocate cmd id.\n"); 8120d44374cSMike Christie return cmd_id; 8130d44374cSMike Christie } 8140d44374cSMike Christie tcmu_cmd->cmd_id = cmd_id; 8150d44374cSMike Christie 816af1dd7ffSMike Christie pr_debug("allocated cmd %u for dev %s tmo %lu\n", tcmu_cmd->cmd_id, 817af1dd7ffSMike Christie udev->name, tmo / MSEC_PER_SEC); 8180d44374cSMike Christie 8199103575aSMike Christie setup_timer: 8209103575aSMike Christie if (!tmo) 8219103575aSMike Christie return 0; 8229103575aSMike Christie 8230d44374cSMike Christie tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo)); 8249103575aSMike Christie mod_timer(timer, tcmu_cmd->deadline); 8250d44374cSMike Christie return 0; 8260d44374cSMike Christie } 8270d44374cSMike Christie 828af1dd7ffSMike Christie static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd) 829af1dd7ffSMike Christie { 830af1dd7ffSMike Christie struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 8319103575aSMike Christie unsigned int tmo; 832af1dd7ffSMike Christie int ret; 833af1dd7ffSMike Christie 8349103575aSMike Christie /* 8359103575aSMike Christie * For backwards compat if qfull_time_out is not set use 8369103575aSMike Christie * cmd_time_out and if that's not set use the default time out. 8379103575aSMike Christie */ 8389103575aSMike Christie if (!udev->qfull_time_out) 8399103575aSMike Christie return -ETIMEDOUT; 8409103575aSMike Christie else if (udev->qfull_time_out > 0) 8419103575aSMike Christie tmo = udev->qfull_time_out; 8429103575aSMike Christie else if (udev->cmd_time_out) 8439103575aSMike Christie tmo = udev->cmd_time_out; 8449103575aSMike Christie else 8459103575aSMike Christie tmo = TCMU_TIME_OUT; 8469103575aSMike Christie 8479103575aSMike Christie ret = tcmu_setup_cmd_timer(tcmu_cmd, tmo, &udev->qfull_timer); 848af1dd7ffSMike Christie if (ret) 849af1dd7ffSMike Christie return ret; 850af1dd7ffSMike Christie 851af1dd7ffSMike Christie list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue); 852af1dd7ffSMike Christie pr_debug("adding cmd %u on dev %s to ring space wait queue\n", 853af1dd7ffSMike Christie tcmu_cmd->cmd_id, udev->name); 854af1dd7ffSMike Christie return 0; 855af1dd7ffSMike Christie } 856af1dd7ffSMike Christie 8576fd0ce79SMike Christie /** 8586fd0ce79SMike Christie * queue_cmd_ring - queue cmd to ring or internally 8596fd0ce79SMike Christie * @tcmu_cmd: cmd to queue 8606fd0ce79SMike Christie * @scsi_err: TCM error code if failure (-1) returned. 8616fd0ce79SMike Christie * 8626fd0ce79SMike Christie * Returns: 8636fd0ce79SMike Christie * -1 we cannot queue internally or to the ring. 8646fd0ce79SMike Christie * 0 success 865af1dd7ffSMike Christie * 1 internally queued to wait for ring memory to free. 8666fd0ce79SMike Christie */ 8676fd0ce79SMike Christie static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err) 8687c9e7a6fSAndy Grover { 8697c9e7a6fSAndy Grover struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; 8707c9e7a6fSAndy Grover struct se_cmd *se_cmd = tcmu_cmd->se_cmd; 8717c9e7a6fSAndy Grover size_t base_command_size, command_size; 8727c9e7a6fSAndy Grover struct tcmu_mailbox *mb; 8737c9e7a6fSAndy Grover struct tcmu_cmd_entry *entry; 8747c9e7a6fSAndy Grover struct iovec *iov; 875141685a3SXiubo Li int iov_cnt, ret; 8767c9e7a6fSAndy Grover uint32_t cmd_head; 8777c9e7a6fSAndy Grover uint64_t cdb_off; 878f97ec7dbSIlias Tsitsimpis bool copy_to_data_area; 879ab22d260SXiubo Li size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd); 8807c9e7a6fSAndy Grover 8816fd0ce79SMike Christie *scsi_err = TCM_NO_SENSE; 8826fd0ce79SMike Christie 883892782caSMike Christie if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) { 884892782caSMike Christie *scsi_err = TCM_LUN_BUSY; 885892782caSMike Christie return -1; 886892782caSMike Christie } 887892782caSMike Christie 8886fd0ce79SMike Christie if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 8896fd0ce79SMike Christie *scsi_err = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 8906fd0ce79SMike Christie return -1; 8916fd0ce79SMike Christie } 8927c9e7a6fSAndy Grover 8937c9e7a6fSAndy Grover /* 8947c9e7a6fSAndy Grover * Must be a certain minimum size for response sense info, but 8957c9e7a6fSAndy Grover * also may be larger if the iov array is large. 8967c9e7a6fSAndy Grover * 897fe25cc34SXiubo Li * We prepare as many iovs as possbile for potential uses here, 898fe25cc34SXiubo Li * because it's expensive to tell how many regions are freed in 899fe25cc34SXiubo Li * the bitmap & global data pool, as the size calculated here 900fe25cc34SXiubo Li * will only be used to do the checks. 901fe25cc34SXiubo Li * 902fe25cc34SXiubo Li * The size will be recalculated later as actually needed to save 903fe25cc34SXiubo Li * cmd area memories. 9047c9e7a6fSAndy Grover */ 905fe25cc34SXiubo Li base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt); 906fe25cc34SXiubo Li command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 9077c9e7a6fSAndy Grover 908af1dd7ffSMike Christie if (!list_empty(&udev->cmdr_queue)) 909af1dd7ffSMike Christie goto queue; 9107c9e7a6fSAndy Grover 9117c9e7a6fSAndy Grover mb = udev->mb_addr; 9127c9e7a6fSAndy Grover cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 913554617b2SAndy Grover if ((command_size > (udev->cmdr_size / 2)) || 914554617b2SAndy Grover data_length > udev->data_size) { 915554617b2SAndy Grover pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu " 9163d9b9555SAndy Grover "cmd ring/data area\n", command_size, data_length, 9177c9e7a6fSAndy Grover udev->cmdr_size, udev->data_size); 9186fd0ce79SMike Christie *scsi_err = TCM_INVALID_CDB_FIELD; 9196fd0ce79SMike Christie return -1; 920554617b2SAndy Grover } 9217c9e7a6fSAndy Grover 922af1dd7ffSMike Christie if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) { 923810b8153SMike Christie /* 924810b8153SMike Christie * Don't leave commands partially setup because the unmap 925810b8153SMike Christie * thread might need the blocks to make forward progress. 926810b8153SMike Christie */ 927810b8153SMike Christie tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur); 928810b8153SMike Christie tcmu_cmd_reset_dbi_cur(tcmu_cmd); 929af1dd7ffSMike Christie goto queue; 9307c9e7a6fSAndy Grover } 9317c9e7a6fSAndy Grover 932f56574a2SAndy Grover /* Insert a PAD if end-of-ring space is too small */ 933f56574a2SAndy Grover if (head_to_end(cmd_head, udev->cmdr_size) < command_size) { 934f56574a2SAndy Grover size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); 935f56574a2SAndy Grover 9367c9e7a6fSAndy Grover entry = (void *) mb + CMDR_OFF + cmd_head; 9370ad46af8SAndy Grover tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); 9380ad46af8SAndy Grover tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); 9390ad46af8SAndy Grover entry->hdr.cmd_id = 0; /* not used for PAD */ 9400ad46af8SAndy Grover entry->hdr.kflags = 0; 9410ad46af8SAndy Grover entry->hdr.uflags = 0; 9429d62bc0eSXiubo Li tcmu_flush_dcache_range(entry, sizeof(*entry)); 9437c9e7a6fSAndy Grover 9447c9e7a6fSAndy Grover UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); 9459d62bc0eSXiubo Li tcmu_flush_dcache_range(mb, sizeof(*mb)); 9467c9e7a6fSAndy Grover 9477c9e7a6fSAndy Grover cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ 9487c9e7a6fSAndy Grover WARN_ON(cmd_head != 0); 9497c9e7a6fSAndy Grover } 9507c9e7a6fSAndy Grover 9517c9e7a6fSAndy Grover entry = (void *) mb + CMDR_OFF + cmd_head; 952b3743c71SXiubo Li memset(entry, 0, command_size); 9530ad46af8SAndy Grover tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); 9547c9e7a6fSAndy Grover 9553d9b9555SAndy Grover /* Handle allocating space from the data area */ 956b6df4b79SXiubo Li tcmu_cmd_reset_dbi_cur(tcmu_cmd); 9577c9e7a6fSAndy Grover iov = &entry->req.iov[0]; 958f97ec7dbSIlias Tsitsimpis iov_cnt = 0; 959e4648b01SIlias Tsitsimpis copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE 960e4648b01SIlias Tsitsimpis || se_cmd->se_cmd_flags & SCF_BIDI); 9611a1fc0b8SMike Christie scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg, 962b6df4b79SXiubo Li se_cmd->t_data_nents, &iov, &iov_cnt, 963b6df4b79SXiubo Li copy_to_data_area); 9647c9e7a6fSAndy Grover entry->req.iov_cnt = iov_cnt; 9657c9e7a6fSAndy Grover 966e4648b01SIlias Tsitsimpis /* Handle BIDI commands */ 967e4648b01SIlias Tsitsimpis iov_cnt = 0; 968b3743c71SXiubo Li if (se_cmd->se_cmd_flags & SCF_BIDI) { 969ab22d260SXiubo Li iov++; 9701a1fc0b8SMike Christie scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, 9711a1fc0b8SMike Christie se_cmd->t_bidi_data_nents, &iov, &iov_cnt, 9721a1fc0b8SMike Christie false); 973ab22d260SXiubo Li } 974b3743c71SXiubo Li entry->req.iov_bidi_cnt = iov_cnt; 97526418649SSheng Yang 9769103575aSMike Christie ret = tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, 9779103575aSMike Christie &udev->cmd_timer); 9780d44374cSMike Christie if (ret) { 9790d44374cSMike Christie tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); 98097488c73SDan Carpenter mutex_unlock(&udev->cmdr_lock); 9816fd0ce79SMike Christie 9826fd0ce79SMike Christie *scsi_err = TCM_OUT_OF_RESOURCES; 9836fd0ce79SMike Christie return -1; 9840d44374cSMike Christie } 9850d44374cSMike Christie entry->hdr.cmd_id = tcmu_cmd->cmd_id; 9860d44374cSMike Christie 987fe25cc34SXiubo Li /* 988fe25cc34SXiubo Li * Recalaulate the command's base size and size according 989fe25cc34SXiubo Li * to the actual needs 990fe25cc34SXiubo Li */ 991fe25cc34SXiubo Li base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt + 992fe25cc34SXiubo Li entry->req.iov_bidi_cnt); 993fe25cc34SXiubo Li command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size); 994fe25cc34SXiubo Li 995fe25cc34SXiubo Li tcmu_hdr_set_len(&entry->hdr.len_op, command_size); 996fe25cc34SXiubo Li 9977c9e7a6fSAndy Grover /* All offsets relative to mb_addr, not start of entry! */ 9987c9e7a6fSAndy Grover cdb_off = CMDR_OFF + cmd_head + base_command_size; 9997c9e7a6fSAndy Grover memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb)); 10007c9e7a6fSAndy Grover entry->req.cdb_off = cdb_off; 10017c9e7a6fSAndy Grover tcmu_flush_dcache_range(entry, sizeof(*entry)); 10027c9e7a6fSAndy Grover 10037c9e7a6fSAndy Grover UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size); 10047c9e7a6fSAndy Grover tcmu_flush_dcache_range(mb, sizeof(*mb)); 10057c9e7a6fSAndy Grover 10067c9e7a6fSAndy Grover /* TODO: only if FLUSH and FUA? */ 10077c9e7a6fSAndy Grover uio_event_notify(&udev->uio_info); 10087c9e7a6fSAndy Grover 10096fd0ce79SMike Christie return 0; 1010af1dd7ffSMike Christie 1011af1dd7ffSMike Christie queue: 1012af1dd7ffSMike Christie if (add_to_cmdr_queue(tcmu_cmd)) { 1013af1dd7ffSMike Christie *scsi_err = TCM_OUT_OF_RESOURCES; 1014af1dd7ffSMike Christie return -1; 1015af1dd7ffSMike Christie } 1016af1dd7ffSMike Christie 1017af1dd7ffSMike Christie return 1; 10187c9e7a6fSAndy Grover } 10197c9e7a6fSAndy Grover 102002eb924fSAndy Grover static sense_reason_t 102102eb924fSAndy Grover tcmu_queue_cmd(struct se_cmd *se_cmd) 10227c9e7a6fSAndy Grover { 1023af1dd7ffSMike Christie struct se_device *se_dev = se_cmd->se_dev; 1024af1dd7ffSMike Christie struct tcmu_dev *udev = TCMU_DEV(se_dev); 10257c9e7a6fSAndy Grover struct tcmu_cmd *tcmu_cmd; 10266fd0ce79SMike Christie sense_reason_t scsi_ret; 1027af1dd7ffSMike Christie int ret; 10287c9e7a6fSAndy Grover 10297c9e7a6fSAndy Grover tcmu_cmd = tcmu_alloc_cmd(se_cmd); 10307c9e7a6fSAndy Grover if (!tcmu_cmd) 103102eb924fSAndy Grover return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 10327c9e7a6fSAndy Grover 1033af1dd7ffSMike Christie mutex_lock(&udev->cmdr_lock); 1034af1dd7ffSMike Christie ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1035af1dd7ffSMike Christie mutex_unlock(&udev->cmdr_lock); 1036af1dd7ffSMike Christie if (ret < 0) 1037141685a3SXiubo Li tcmu_free_cmd(tcmu_cmd); 10386fd0ce79SMike Christie return scsi_ret; 10397c9e7a6fSAndy Grover } 10407c9e7a6fSAndy Grover 10417c9e7a6fSAndy Grover static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry) 10427c9e7a6fSAndy Grover { 10437c9e7a6fSAndy Grover struct se_cmd *se_cmd = cmd->se_cmd; 10447c9e7a6fSAndy Grover struct tcmu_dev *udev = cmd->tcmu_dev; 10457c9e7a6fSAndy Grover 1046b25c7863SSheng Yang /* 1047b25c7863SSheng Yang * cmd has been completed already from timeout, just reclaim 10483d9b9555SAndy Grover * data area space and free cmd 1049b25c7863SSheng Yang */ 1050141685a3SXiubo Li if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 1051141685a3SXiubo Li goto out; 1052b25c7863SSheng Yang 1053141685a3SXiubo Li tcmu_cmd_reset_dbi_cur(cmd); 10547c9e7a6fSAndy Grover 10550ad46af8SAndy Grover if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) { 10560ad46af8SAndy Grover pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n", 10570ad46af8SAndy Grover cmd->se_cmd); 1058ed97d0cdSAndy Grover entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; 1059ed97d0cdSAndy Grover } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { 1060406f74c2SMike Christie transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); 1061e4648b01SIlias Tsitsimpis } else if (se_cmd->se_cmd_flags & SCF_BIDI) { 106226418649SSheng Yang /* Get Data-In buffer before clean up */ 1063a5d68ba8SXiubo Li gather_data_area(udev, cmd, true); 1064e4648b01SIlias Tsitsimpis } else if (se_cmd->data_direction == DMA_FROM_DEVICE) { 1065a5d68ba8SXiubo Li gather_data_area(udev, cmd, false); 10667c9e7a6fSAndy Grover } else if (se_cmd->data_direction == DMA_TO_DEVICE) { 1067141685a3SXiubo Li /* TODO: */ 10682bc396a2SIlias Tsitsimpis } else if (se_cmd->data_direction != DMA_NONE) { 10692bc396a2SIlias Tsitsimpis pr_warn("TCMU: data direction was %d!\n", 10702bc396a2SIlias Tsitsimpis se_cmd->data_direction); 10717c9e7a6fSAndy Grover } 10727c9e7a6fSAndy Grover 10737c9e7a6fSAndy Grover target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status); 10747c9e7a6fSAndy Grover 1075141685a3SXiubo Li out: 1076141685a3SXiubo Li cmd->se_cmd = NULL; 1077b6df4b79SXiubo Li tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1078141685a3SXiubo Li tcmu_free_cmd(cmd); 10797c9e7a6fSAndy Grover } 10807c9e7a6fSAndy Grover 10817c9e7a6fSAndy Grover static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) 10827c9e7a6fSAndy Grover { 10837c9e7a6fSAndy Grover struct tcmu_mailbox *mb; 10847c9e7a6fSAndy Grover int handled = 0; 10857c9e7a6fSAndy Grover 10867c9e7a6fSAndy Grover if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) { 10877c9e7a6fSAndy Grover pr_err("ring broken, not handling completions\n"); 10887c9e7a6fSAndy Grover return 0; 10897c9e7a6fSAndy Grover } 10907c9e7a6fSAndy Grover 10917c9e7a6fSAndy Grover mb = udev->mb_addr; 10927c9e7a6fSAndy Grover tcmu_flush_dcache_range(mb, sizeof(*mb)); 10937c9e7a6fSAndy Grover 10946aa7de05SMark Rutland while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { 10957c9e7a6fSAndy Grover 10967c9e7a6fSAndy Grover struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; 10977c9e7a6fSAndy Grover struct tcmu_cmd *cmd; 10987c9e7a6fSAndy Grover 10997c9e7a6fSAndy Grover tcmu_flush_dcache_range(entry, sizeof(*entry)); 11007c9e7a6fSAndy Grover 11010ad46af8SAndy Grover if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) { 11020ad46af8SAndy Grover UPDATE_HEAD(udev->cmdr_last_cleaned, 11030ad46af8SAndy Grover tcmu_hdr_get_len(entry->hdr.len_op), 11040ad46af8SAndy Grover udev->cmdr_size); 11057c9e7a6fSAndy Grover continue; 11067c9e7a6fSAndy Grover } 11070ad46af8SAndy Grover WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); 11087c9e7a6fSAndy Grover 1109d3e709e6SMatthew Wilcox cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); 11107c9e7a6fSAndy Grover if (!cmd) { 111188cf1073SMike Christie pr_err("cmd_id %u not found, ring is broken\n", 111288cf1073SMike Christie entry->hdr.cmd_id); 11137c9e7a6fSAndy Grover set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); 11147c9e7a6fSAndy Grover break; 11157c9e7a6fSAndy Grover } 11167c9e7a6fSAndy Grover 11177c9e7a6fSAndy Grover tcmu_handle_completion(cmd, entry); 11187c9e7a6fSAndy Grover 11190ad46af8SAndy Grover UPDATE_HEAD(udev->cmdr_last_cleaned, 11200ad46af8SAndy Grover tcmu_hdr_get_len(entry->hdr.len_op), 11210ad46af8SAndy Grover udev->cmdr_size); 11227c9e7a6fSAndy Grover 11237c9e7a6fSAndy Grover handled++; 11247c9e7a6fSAndy Grover } 11257c9e7a6fSAndy Grover 11269103575aSMike Christie if (mb->cmd_tail == mb->cmd_head) { 11279103575aSMike Christie /* no more pending commands */ 11289103575aSMike Christie del_timer(&udev->cmd_timer); 11299103575aSMike Christie 11309103575aSMike Christie if (list_empty(&udev->cmdr_queue)) { 1131af1dd7ffSMike Christie /* 11329103575aSMike Christie * no more pending or waiting commands so try to 11339103575aSMike Christie * reclaim blocks if needed. 1134af1dd7ffSMike Christie */ 11359103575aSMike Christie if (atomic_read(&global_db_count) > 113680eb8761SMike Christie tcmu_global_max_blocks) 1137af1dd7ffSMike Christie schedule_delayed_work(&tcmu_unmap_work, 0); 1138af1dd7ffSMike Christie } 11399103575aSMike Christie } 11407c9e7a6fSAndy Grover 11417c9e7a6fSAndy Grover return handled; 11427c9e7a6fSAndy Grover } 11437c9e7a6fSAndy Grover 11447c9e7a6fSAndy Grover static int tcmu_check_expired_cmd(int id, void *p, void *data) 11457c9e7a6fSAndy Grover { 11467c9e7a6fSAndy Grover struct tcmu_cmd *cmd = p; 1147af1dd7ffSMike Christie struct tcmu_dev *udev = cmd->tcmu_dev; 1148af1dd7ffSMike Christie u8 scsi_status; 1149af1dd7ffSMike Christie struct se_cmd *se_cmd; 1150af1dd7ffSMike Christie bool is_running; 11517c9e7a6fSAndy Grover 11527c9e7a6fSAndy Grover if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) 11537c9e7a6fSAndy Grover return 0; 11547c9e7a6fSAndy Grover 1155611e2267SAndy Grover if (!time_after(jiffies, cmd->deadline)) 11567c9e7a6fSAndy Grover return 0; 11577c9e7a6fSAndy Grover 1158af1dd7ffSMike Christie is_running = list_empty(&cmd->cmdr_queue_entry); 115945dc488cSMike Christie se_cmd = cmd->se_cmd; 11607c9e7a6fSAndy Grover 1161af1dd7ffSMike Christie if (is_running) { 11629103575aSMike Christie /* 11639103575aSMike Christie * If cmd_time_out is disabled but qfull is set deadline 11649103575aSMike Christie * will only reflect the qfull timeout. Ignore it. 11659103575aSMike Christie */ 11669103575aSMike Christie if (!udev->cmd_time_out) 11679103575aSMike Christie return 0; 11689103575aSMike Christie 1169af1dd7ffSMike Christie set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); 1170af1dd7ffSMike Christie /* 1171af1dd7ffSMike Christie * target_complete_cmd will translate this to LUN COMM FAILURE 1172af1dd7ffSMike Christie */ 1173af1dd7ffSMike Christie scsi_status = SAM_STAT_CHECK_CONDITION; 1174af1dd7ffSMike Christie } else { 1175af1dd7ffSMike Christie list_del_init(&cmd->cmdr_queue_entry); 1176af1dd7ffSMike Christie 1177af1dd7ffSMike Christie idr_remove(&udev->commands, id); 1178af1dd7ffSMike Christie tcmu_free_cmd(cmd); 1179af1dd7ffSMike Christie scsi_status = SAM_STAT_TASK_SET_FULL; 1180af1dd7ffSMike Christie } 11819103575aSMike Christie 11829103575aSMike Christie pr_debug("Timing out cmd %u on dev %s that is %s.\n", 11839103575aSMike Christie id, udev->name, is_running ? "inflight" : "queued"); 11849103575aSMike Christie 1185af1dd7ffSMike Christie target_complete_cmd(se_cmd, scsi_status); 11867c9e7a6fSAndy Grover return 0; 11877c9e7a6fSAndy Grover } 11887c9e7a6fSAndy Grover 11899103575aSMike Christie static void tcmu_device_timedout(struct tcmu_dev *udev) 11907c9e7a6fSAndy Grover { 1191488ebe4cSMike Christie spin_lock(&timed_out_udevs_lock); 1192488ebe4cSMike Christie if (list_empty(&udev->timedout_entry)) 1193488ebe4cSMike Christie list_add_tail(&udev->timedout_entry, &timed_out_udevs); 1194488ebe4cSMike Christie spin_unlock(&timed_out_udevs_lock); 11957c9e7a6fSAndy Grover 1196af1dd7ffSMike Christie schedule_delayed_work(&tcmu_unmap_work, 0); 11977c9e7a6fSAndy Grover } 11987c9e7a6fSAndy Grover 11999103575aSMike Christie static void tcmu_cmd_timedout(struct timer_list *t) 12009103575aSMike Christie { 12019103575aSMike Christie struct tcmu_dev *udev = from_timer(udev, t, cmd_timer); 12029103575aSMike Christie 12039103575aSMike Christie pr_debug("%s cmd timeout has expired\n", udev->name); 12049103575aSMike Christie tcmu_device_timedout(udev); 12059103575aSMike Christie } 12069103575aSMike Christie 12079103575aSMike Christie static void tcmu_qfull_timedout(struct timer_list *t) 12089103575aSMike Christie { 12099103575aSMike Christie struct tcmu_dev *udev = from_timer(udev, t, qfull_timer); 12109103575aSMike Christie 12119103575aSMike Christie pr_debug("%s qfull timeout has expired\n", udev->name); 12129103575aSMike Christie tcmu_device_timedout(udev); 12139103575aSMike Christie } 12149103575aSMike Christie 12157c9e7a6fSAndy Grover static int tcmu_attach_hba(struct se_hba *hba, u32 host_id) 12167c9e7a6fSAndy Grover { 12177c9e7a6fSAndy Grover struct tcmu_hba *tcmu_hba; 12187c9e7a6fSAndy Grover 12197c9e7a6fSAndy Grover tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL); 12207c9e7a6fSAndy Grover if (!tcmu_hba) 12217c9e7a6fSAndy Grover return -ENOMEM; 12227c9e7a6fSAndy Grover 12237c9e7a6fSAndy Grover tcmu_hba->host_id = host_id; 12247c9e7a6fSAndy Grover hba->hba_ptr = tcmu_hba; 12257c9e7a6fSAndy Grover 12267c9e7a6fSAndy Grover return 0; 12277c9e7a6fSAndy Grover } 12287c9e7a6fSAndy Grover 12297c9e7a6fSAndy Grover static void tcmu_detach_hba(struct se_hba *hba) 12307c9e7a6fSAndy Grover { 12317c9e7a6fSAndy Grover kfree(hba->hba_ptr); 12327c9e7a6fSAndy Grover hba->hba_ptr = NULL; 12337c9e7a6fSAndy Grover } 12347c9e7a6fSAndy Grover 12357c9e7a6fSAndy Grover static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) 12367c9e7a6fSAndy Grover { 12377c9e7a6fSAndy Grover struct tcmu_dev *udev; 12387c9e7a6fSAndy Grover 12397c9e7a6fSAndy Grover udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); 12407c9e7a6fSAndy Grover if (!udev) 12417c9e7a6fSAndy Grover return NULL; 1242f3cdbe39SMike Christie kref_init(&udev->kref); 12437c9e7a6fSAndy Grover 12447c9e7a6fSAndy Grover udev->name = kstrdup(name, GFP_KERNEL); 12457c9e7a6fSAndy Grover if (!udev->name) { 12467c9e7a6fSAndy Grover kfree(udev); 12477c9e7a6fSAndy Grover return NULL; 12487c9e7a6fSAndy Grover } 12497c9e7a6fSAndy Grover 12507c9e7a6fSAndy Grover udev->hba = hba; 1251af980e46SMike Christie udev->cmd_time_out = TCMU_TIME_OUT; 12529103575aSMike Christie udev->qfull_time_out = -1; 12537c9e7a6fSAndy Grover 125480eb8761SMike Christie udev->max_blocks = DATA_BLOCK_BITS_DEF; 1255b6df4b79SXiubo Li mutex_init(&udev->cmdr_lock); 12567c9e7a6fSAndy Grover 1257488ebe4cSMike Christie INIT_LIST_HEAD(&udev->timedout_entry); 1258af1dd7ffSMike Christie INIT_LIST_HEAD(&udev->cmdr_queue); 12597c9e7a6fSAndy Grover idr_init(&udev->commands); 12607c9e7a6fSAndy Grover 12619103575aSMike Christie timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); 12629103575aSMike Christie timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); 12637c9e7a6fSAndy Grover 1264b3af66e2SMike Christie init_waitqueue_head(&udev->nl_cmd_wq); 1265b3af66e2SMike Christie spin_lock_init(&udev->nl_cmd_lock); 1266b3af66e2SMike Christie 1267c22adc0bSXiubo Li INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); 1268c22adc0bSXiubo Li 12697c9e7a6fSAndy Grover return &udev->se_dev; 12707c9e7a6fSAndy Grover } 12717c9e7a6fSAndy Grover 1272892782caSMike Christie static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail) 1273af1dd7ffSMike Christie { 1274af1dd7ffSMike Christie struct tcmu_cmd *tcmu_cmd, *tmp_cmd; 1275af1dd7ffSMike Christie LIST_HEAD(cmds); 1276af1dd7ffSMike Christie bool drained = true; 1277af1dd7ffSMike Christie sense_reason_t scsi_ret; 1278af1dd7ffSMike Christie int ret; 1279af1dd7ffSMike Christie 1280af1dd7ffSMike Christie if (list_empty(&udev->cmdr_queue)) 1281af1dd7ffSMike Christie return true; 1282af1dd7ffSMike Christie 1283892782caSMike Christie pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail); 1284af1dd7ffSMike Christie 1285af1dd7ffSMike Christie list_splice_init(&udev->cmdr_queue, &cmds); 1286af1dd7ffSMike Christie 1287af1dd7ffSMike Christie list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) { 1288af1dd7ffSMike Christie list_del_init(&tcmu_cmd->cmdr_queue_entry); 1289af1dd7ffSMike Christie 1290af1dd7ffSMike Christie pr_debug("removing cmd %u on dev %s from queue\n", 1291af1dd7ffSMike Christie tcmu_cmd->cmd_id, udev->name); 1292af1dd7ffSMike Christie 1293892782caSMike Christie if (fail) { 1294892782caSMike Christie idr_remove(&udev->commands, tcmu_cmd->cmd_id); 1295892782caSMike Christie /* 1296892782caSMike Christie * We were not able to even start the command, so 1297892782caSMike Christie * fail with busy to allow a retry in case runner 1298892782caSMike Christie * was only temporarily down. If the device is being 1299892782caSMike Christie * removed then LIO core will do the right thing and 1300892782caSMike Christie * fail the retry. 1301892782caSMike Christie */ 1302892782caSMike Christie target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); 1303892782caSMike Christie tcmu_free_cmd(tcmu_cmd); 1304892782caSMike Christie continue; 1305892782caSMike Christie } 1306892782caSMike Christie 1307af1dd7ffSMike Christie ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); 1308af1dd7ffSMike Christie if (ret < 0) { 1309af1dd7ffSMike Christie pr_debug("cmd %u on dev %s failed with %u\n", 1310af1dd7ffSMike Christie tcmu_cmd->cmd_id, udev->name, scsi_ret); 1311af1dd7ffSMike Christie 1312af1dd7ffSMike Christie idr_remove(&udev->commands, tcmu_cmd->cmd_id); 1313af1dd7ffSMike Christie /* 1314af1dd7ffSMike Christie * Ignore scsi_ret for now. target_complete_cmd 1315af1dd7ffSMike Christie * drops it. 1316af1dd7ffSMike Christie */ 1317af1dd7ffSMike Christie target_complete_cmd(tcmu_cmd->se_cmd, 1318af1dd7ffSMike Christie SAM_STAT_CHECK_CONDITION); 1319af1dd7ffSMike Christie tcmu_free_cmd(tcmu_cmd); 1320af1dd7ffSMike Christie } else if (ret > 0) { 1321af1dd7ffSMike Christie pr_debug("ran out of space during cmdr queue run\n"); 1322af1dd7ffSMike Christie /* 1323af1dd7ffSMike Christie * cmd was requeued, so just put all cmds back in 1324af1dd7ffSMike Christie * the queue 1325af1dd7ffSMike Christie */ 1326af1dd7ffSMike Christie list_splice_tail(&cmds, &udev->cmdr_queue); 1327af1dd7ffSMike Christie drained = false; 1328af1dd7ffSMike Christie goto done; 1329af1dd7ffSMike Christie } 1330af1dd7ffSMike Christie } 13319103575aSMike Christie if (list_empty(&udev->cmdr_queue)) 13329103575aSMike Christie del_timer(&udev->qfull_timer); 1333af1dd7ffSMike Christie done: 1334af1dd7ffSMike Christie return drained; 1335af1dd7ffSMike Christie } 1336af1dd7ffSMike Christie 13377c9e7a6fSAndy Grover static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on) 13387c9e7a6fSAndy Grover { 1339af1dd7ffSMike Christie struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 13407c9e7a6fSAndy Grover 1341af1dd7ffSMike Christie mutex_lock(&udev->cmdr_lock); 1342af1dd7ffSMike Christie tcmu_handle_completions(udev); 1343892782caSMike Christie run_cmdr_queue(udev, false); 1344af1dd7ffSMike Christie mutex_unlock(&udev->cmdr_lock); 13457c9e7a6fSAndy Grover 13467c9e7a6fSAndy Grover return 0; 13477c9e7a6fSAndy Grover } 13487c9e7a6fSAndy Grover 13497c9e7a6fSAndy Grover /* 13507c9e7a6fSAndy Grover * mmap code from uio.c. Copied here because we want to hook mmap() 13517c9e7a6fSAndy Grover * and this stuff must come along. 13527c9e7a6fSAndy Grover */ 13537c9e7a6fSAndy Grover static int tcmu_find_mem_index(struct vm_area_struct *vma) 13547c9e7a6fSAndy Grover { 13557c9e7a6fSAndy Grover struct tcmu_dev *udev = vma->vm_private_data; 13567c9e7a6fSAndy Grover struct uio_info *info = &udev->uio_info; 13577c9e7a6fSAndy Grover 13587c9e7a6fSAndy Grover if (vma->vm_pgoff < MAX_UIO_MAPS) { 13597c9e7a6fSAndy Grover if (info->mem[vma->vm_pgoff].size == 0) 13607c9e7a6fSAndy Grover return -1; 13617c9e7a6fSAndy Grover return (int)vma->vm_pgoff; 13627c9e7a6fSAndy Grover } 13637c9e7a6fSAndy Grover return -1; 13647c9e7a6fSAndy Grover } 13657c9e7a6fSAndy Grover 1366b6df4b79SXiubo Li static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) 1367b6df4b79SXiubo Li { 1368b6df4b79SXiubo Li struct page *page; 1369b6df4b79SXiubo Li 1370b6df4b79SXiubo Li mutex_lock(&udev->cmdr_lock); 1371b6df4b79SXiubo Li page = tcmu_get_block_page(udev, dbi); 1372b6df4b79SXiubo Li if (likely(page)) { 1373b6df4b79SXiubo Li mutex_unlock(&udev->cmdr_lock); 1374b6df4b79SXiubo Li return page; 1375b6df4b79SXiubo Li } 1376b6df4b79SXiubo Li 1377b6df4b79SXiubo Li /* 1378c1c390baSMike Christie * Userspace messed up and passed in a address not in the 1379c1c390baSMike Christie * data iov passed to it. 1380b6df4b79SXiubo Li */ 1381c1c390baSMike Christie pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", 1382c1c390baSMike Christie dbi, udev->name); 1383c1c390baSMike Christie page = NULL; 1384b6df4b79SXiubo Li mutex_unlock(&udev->cmdr_lock); 1385b6df4b79SXiubo Li 1386b6df4b79SXiubo Li return page; 1387b6df4b79SXiubo Li } 1388b6df4b79SXiubo Li 138969589c9bSSouptick Joarder static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) 13907c9e7a6fSAndy Grover { 139111bac800SDave Jiang struct tcmu_dev *udev = vmf->vma->vm_private_data; 13927c9e7a6fSAndy Grover struct uio_info *info = &udev->uio_info; 13937c9e7a6fSAndy Grover struct page *page; 13947c9e7a6fSAndy Grover unsigned long offset; 13957c9e7a6fSAndy Grover void *addr; 13967c9e7a6fSAndy Grover 139711bac800SDave Jiang int mi = tcmu_find_mem_index(vmf->vma); 13987c9e7a6fSAndy Grover if (mi < 0) 13997c9e7a6fSAndy Grover return VM_FAULT_SIGBUS; 14007c9e7a6fSAndy Grover 14017c9e7a6fSAndy Grover /* 14027c9e7a6fSAndy Grover * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 14037c9e7a6fSAndy Grover * to use mem[N]. 14047c9e7a6fSAndy Grover */ 14057c9e7a6fSAndy Grover offset = (vmf->pgoff - mi) << PAGE_SHIFT; 14067c9e7a6fSAndy Grover 1407141685a3SXiubo Li if (offset < udev->data_off) { 1408141685a3SXiubo Li /* For the vmalloc()ed cmd area pages */ 14097c9e7a6fSAndy Grover addr = (void *)(unsigned long)info->mem[mi].addr + offset; 14107c9e7a6fSAndy Grover page = vmalloc_to_page(addr); 1411141685a3SXiubo Li } else { 1412141685a3SXiubo Li uint32_t dbi; 1413141685a3SXiubo Li 1414b6df4b79SXiubo Li /* For the dynamically growing data area pages */ 1415141685a3SXiubo Li dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; 1416b6df4b79SXiubo Li page = tcmu_try_get_block_page(udev, dbi); 1417b6df4b79SXiubo Li if (!page) 1418c1c390baSMike Christie return VM_FAULT_SIGBUS; 1419141685a3SXiubo Li } 1420141685a3SXiubo Li 14217c9e7a6fSAndy Grover get_page(page); 14227c9e7a6fSAndy Grover vmf->page = page; 14237c9e7a6fSAndy Grover return 0; 14247c9e7a6fSAndy Grover } 14257c9e7a6fSAndy Grover 14267c9e7a6fSAndy Grover static const struct vm_operations_struct tcmu_vm_ops = { 14277c9e7a6fSAndy Grover .fault = tcmu_vma_fault, 14287c9e7a6fSAndy Grover }; 14297c9e7a6fSAndy Grover 14307c9e7a6fSAndy Grover static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) 14317c9e7a6fSAndy Grover { 14327c9e7a6fSAndy Grover struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 14337c9e7a6fSAndy Grover 14347c9e7a6fSAndy Grover vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 14357c9e7a6fSAndy Grover vma->vm_ops = &tcmu_vm_ops; 14367c9e7a6fSAndy Grover 14377c9e7a6fSAndy Grover vma->vm_private_data = udev; 14387c9e7a6fSAndy Grover 14397c9e7a6fSAndy Grover /* Ensure the mmap is exactly the right size */ 144080eb8761SMike Christie if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) 14417c9e7a6fSAndy Grover return -EINVAL; 14427c9e7a6fSAndy Grover 14437c9e7a6fSAndy Grover return 0; 14447c9e7a6fSAndy Grover } 14457c9e7a6fSAndy Grover 14467c9e7a6fSAndy Grover static int tcmu_open(struct uio_info *info, struct inode *inode) 14477c9e7a6fSAndy Grover { 14487c9e7a6fSAndy Grover struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 14497c9e7a6fSAndy Grover 14507c9e7a6fSAndy Grover /* O_EXCL not supported for char devs, so fake it? */ 14517c9e7a6fSAndy Grover if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags)) 14527c9e7a6fSAndy Grover return -EBUSY; 14537c9e7a6fSAndy Grover 1454b6df4b79SXiubo Li udev->inode = inode; 14559260695dSMike Christie kref_get(&udev->kref); 1456b6df4b79SXiubo Li 14577c9e7a6fSAndy Grover pr_debug("open\n"); 14587c9e7a6fSAndy Grover 14597c9e7a6fSAndy Grover return 0; 14607c9e7a6fSAndy Grover } 14617c9e7a6fSAndy Grover 1462f3cdbe39SMike Christie static void tcmu_dev_call_rcu(struct rcu_head *p) 1463f3cdbe39SMike Christie { 1464f3cdbe39SMike Christie struct se_device *dev = container_of(p, struct se_device, rcu_head); 1465f3cdbe39SMike Christie struct tcmu_dev *udev = TCMU_DEV(dev); 1466f3cdbe39SMike Christie 1467f3cdbe39SMike Christie kfree(udev->uio_info.name); 1468f3cdbe39SMike Christie kfree(udev->name); 1469f3cdbe39SMike Christie kfree(udev); 1470f3cdbe39SMike Christie } 1471f3cdbe39SMike Christie 1472c22adc0bSXiubo Li static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) 1473c22adc0bSXiubo Li { 1474c22adc0bSXiubo Li if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1475c22adc0bSXiubo Li kmem_cache_free(tcmu_cmd_cache, cmd); 1476c22adc0bSXiubo Li return 0; 1477c22adc0bSXiubo Li } 1478c22adc0bSXiubo Li return -EINVAL; 1479c22adc0bSXiubo Li } 1480c22adc0bSXiubo Li 1481bf99ec13SMike Christie static void tcmu_blocks_release(struct radix_tree_root *blocks, 1482bf99ec13SMike Christie int start, int end) 1483c22adc0bSXiubo Li { 1484c22adc0bSXiubo Li int i; 1485c22adc0bSXiubo Li struct page *page; 1486c22adc0bSXiubo Li 1487bf99ec13SMike Christie for (i = start; i < end; i++) { 1488bf99ec13SMike Christie page = radix_tree_delete(blocks, i); 1489c22adc0bSXiubo Li if (page) { 1490c22adc0bSXiubo Li __free_page(page); 1491c22adc0bSXiubo Li atomic_dec(&global_db_count); 1492c22adc0bSXiubo Li } 1493c22adc0bSXiubo Li } 1494c22adc0bSXiubo Li } 1495c22adc0bSXiubo Li 1496f3cdbe39SMike Christie static void tcmu_dev_kref_release(struct kref *kref) 1497f3cdbe39SMike Christie { 1498f3cdbe39SMike Christie struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); 1499f3cdbe39SMike Christie struct se_device *dev = &udev->se_dev; 1500c22adc0bSXiubo Li struct tcmu_cmd *cmd; 1501c22adc0bSXiubo Li bool all_expired = true; 1502c22adc0bSXiubo Li int i; 1503c22adc0bSXiubo Li 1504c22adc0bSXiubo Li vfree(udev->mb_addr); 1505c22adc0bSXiubo Li udev->mb_addr = NULL; 1506c22adc0bSXiubo Li 1507488ebe4cSMike Christie spin_lock_bh(&timed_out_udevs_lock); 1508488ebe4cSMike Christie if (!list_empty(&udev->timedout_entry)) 1509488ebe4cSMike Christie list_del(&udev->timedout_entry); 1510488ebe4cSMike Christie spin_unlock_bh(&timed_out_udevs_lock); 1511488ebe4cSMike Christie 1512c22adc0bSXiubo Li /* Upper layer should drain all requests before calling this */ 15136fddcb77SMike Christie mutex_lock(&udev->cmdr_lock); 1514c22adc0bSXiubo Li idr_for_each_entry(&udev->commands, cmd, i) { 1515c22adc0bSXiubo Li if (tcmu_check_and_free_pending_cmd(cmd) != 0) 1516c22adc0bSXiubo Li all_expired = false; 1517c22adc0bSXiubo Li } 1518c22adc0bSXiubo Li idr_destroy(&udev->commands); 1519c22adc0bSXiubo Li WARN_ON(!all_expired); 1520c22adc0bSXiubo Li 1521bf99ec13SMike Christie tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); 152280eb8761SMike Christie kfree(udev->data_bitmap); 1523bf99ec13SMike Christie mutex_unlock(&udev->cmdr_lock); 1524f3cdbe39SMike Christie 1525f3cdbe39SMike Christie call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1526f3cdbe39SMike Christie } 1527f3cdbe39SMike Christie 15287c9e7a6fSAndy Grover static int tcmu_release(struct uio_info *info, struct inode *inode) 15297c9e7a6fSAndy Grover { 15307c9e7a6fSAndy Grover struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); 15317c9e7a6fSAndy Grover 15327c9e7a6fSAndy Grover clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); 15337c9e7a6fSAndy Grover 15347c9e7a6fSAndy Grover pr_debug("close\n"); 15359260695dSMike Christie /* release ref from open */ 1536f3cdbe39SMike Christie kref_put(&udev->kref, tcmu_dev_kref_release); 15377c9e7a6fSAndy Grover return 0; 15387c9e7a6fSAndy Grover } 15397c9e7a6fSAndy Grover 1540b3af66e2SMike Christie static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) 1541b3af66e2SMike Christie { 1542b3af66e2SMike Christie struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1543b3af66e2SMike Christie 1544b3af66e2SMike Christie if (!tcmu_kern_cmd_reply_supported) 1545b3af66e2SMike Christie return; 1546b849b456SKenjiro Nakayama 1547b849b456SKenjiro Nakayama if (udev->nl_reply_supported <= 0) 1548b849b456SKenjiro Nakayama return; 1549b849b456SKenjiro Nakayama 1550b3af66e2SMike Christie relock: 1551b3af66e2SMike Christie spin_lock(&udev->nl_cmd_lock); 1552b3af66e2SMike Christie 1553b3af66e2SMike Christie if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { 1554b3af66e2SMike Christie spin_unlock(&udev->nl_cmd_lock); 1555b3af66e2SMike Christie pr_debug("sleeping for open nl cmd\n"); 1556b3af66e2SMike Christie wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC)); 1557b3af66e2SMike Christie goto relock; 1558b3af66e2SMike Christie } 1559b3af66e2SMike Christie 1560b3af66e2SMike Christie memset(nl_cmd, 0, sizeof(*nl_cmd)); 1561b3af66e2SMike Christie nl_cmd->cmd = cmd; 1562b3af66e2SMike Christie init_completion(&nl_cmd->complete); 1563b3af66e2SMike Christie 1564b3af66e2SMike Christie spin_unlock(&udev->nl_cmd_lock); 1565b3af66e2SMike Christie } 1566b3af66e2SMike Christie 1567b3af66e2SMike Christie static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) 1568b3af66e2SMike Christie { 1569b3af66e2SMike Christie struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; 1570b3af66e2SMike Christie int ret; 1571b3af66e2SMike Christie DEFINE_WAIT(__wait); 1572b3af66e2SMike Christie 1573b3af66e2SMike Christie if (!tcmu_kern_cmd_reply_supported) 1574b3af66e2SMike Christie return 0; 1575b3af66e2SMike Christie 1576b849b456SKenjiro Nakayama if (udev->nl_reply_supported <= 0) 1577b849b456SKenjiro Nakayama return 0; 1578b849b456SKenjiro Nakayama 1579b3af66e2SMike Christie pr_debug("sleeping for nl reply\n"); 1580b3af66e2SMike Christie wait_for_completion(&nl_cmd->complete); 1581b3af66e2SMike Christie 1582b3af66e2SMike Christie spin_lock(&udev->nl_cmd_lock); 1583b3af66e2SMike Christie nl_cmd->cmd = TCMU_CMD_UNSPEC; 1584b3af66e2SMike Christie ret = nl_cmd->status; 1585b3af66e2SMike Christie nl_cmd->status = 0; 1586b3af66e2SMike Christie spin_unlock(&udev->nl_cmd_lock); 1587b3af66e2SMike Christie 1588b3af66e2SMike Christie wake_up_all(&udev->nl_cmd_wq); 1589b3af66e2SMike Christie 159085fae482SLuis de Bethencourt return ret; 1591b3af66e2SMike Christie } 1592b3af66e2SMike Christie 15930e5aee39SZhu Lingshan static int tcmu_netlink_event_init(struct tcmu_dev *udev, 15940e5aee39SZhu Lingshan enum tcmu_genl_cmd cmd, 15950e5aee39SZhu Lingshan struct sk_buff **buf, void **hdr) 15967c9e7a6fSAndy Grover { 15977c9e7a6fSAndy Grover struct sk_buff *skb; 15987c9e7a6fSAndy Grover void *msg_header; 15996e14eab9SNicholas Bellinger int ret = -ENOMEM; 16007c9e7a6fSAndy Grover 16017c9e7a6fSAndy Grover skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 16027c9e7a6fSAndy Grover if (!skb) 16036e14eab9SNicholas Bellinger return ret; 16047c9e7a6fSAndy Grover 16057c9e7a6fSAndy Grover msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd); 16066e14eab9SNicholas Bellinger if (!msg_header) 16076e14eab9SNicholas Bellinger goto free_skb; 16087c9e7a6fSAndy Grover 1609b3af66e2SMike Christie ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); 16106e14eab9SNicholas Bellinger if (ret < 0) 16116e14eab9SNicholas Bellinger goto free_skb; 16127c9e7a6fSAndy Grover 1613b3af66e2SMike Christie ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); 1614b3af66e2SMike Christie if (ret < 0) 1615b3af66e2SMike Christie goto free_skb; 1616b3af66e2SMike Christie 1617b3af66e2SMike Christie ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); 16186e14eab9SNicholas Bellinger if (ret < 0) 16196e14eab9SNicholas Bellinger goto free_skb; 16207c9e7a6fSAndy Grover 16210e5aee39SZhu Lingshan *buf = skb; 16220e5aee39SZhu Lingshan *hdr = msg_header; 16230e5aee39SZhu Lingshan return ret; 16240e5aee39SZhu Lingshan 16250e5aee39SZhu Lingshan free_skb: 16260e5aee39SZhu Lingshan nlmsg_free(skb); 16270e5aee39SZhu Lingshan return ret; 16282d76443eSMike Christie } 16292d76443eSMike Christie 16300e5aee39SZhu Lingshan static int tcmu_netlink_event_send(struct tcmu_dev *udev, 16310e5aee39SZhu Lingshan enum tcmu_genl_cmd cmd, 16320e5aee39SZhu Lingshan struct sk_buff **buf, void **hdr) 16330e5aee39SZhu Lingshan { 16340e5aee39SZhu Lingshan int ret = 0; 16350e5aee39SZhu Lingshan struct sk_buff *skb = *buf; 16360e5aee39SZhu Lingshan void *msg_header = *hdr; 16378a45885cSBryant G. Ly 1638053c095aSJohannes Berg genlmsg_end(skb, msg_header); 16397c9e7a6fSAndy Grover 1640b3af66e2SMike Christie tcmu_init_genl_cmd_reply(udev, cmd); 1641b3af66e2SMike Christie 164220c08b36SSheng Yang ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 16437c9e7a6fSAndy Grover TCMU_MCGRP_CONFIG, GFP_KERNEL); 16447c9e7a6fSAndy Grover /* We don't care if no one is listening */ 16457c9e7a6fSAndy Grover if (ret == -ESRCH) 16467c9e7a6fSAndy Grover ret = 0; 1647b3af66e2SMike Christie if (!ret) 1648b3af66e2SMike Christie ret = tcmu_wait_genl_cmd_reply(udev); 16490e5aee39SZhu Lingshan return ret; 16500e5aee39SZhu Lingshan } 16517c9e7a6fSAndy Grover 1652e0c240acSZhu Lingshan static int tcmu_send_dev_add_event(struct tcmu_dev *udev) 1653e0c240acSZhu Lingshan { 1654e0c240acSZhu Lingshan struct sk_buff *skb = NULL; 1655e0c240acSZhu Lingshan void *msg_header = NULL; 1656e0c240acSZhu Lingshan int ret = 0; 1657e0c240acSZhu Lingshan 1658e0c240acSZhu Lingshan ret = tcmu_netlink_event_init(udev, TCMU_CMD_ADDED_DEVICE, &skb, 1659e0c240acSZhu Lingshan &msg_header); 1660e0c240acSZhu Lingshan if (ret < 0) 16617c9e7a6fSAndy Grover return ret; 1662e0c240acSZhu Lingshan return tcmu_netlink_event_send(udev, TCMU_CMD_ADDED_DEVICE, &skb, 1663e0c240acSZhu Lingshan &msg_header); 1664e0c240acSZhu Lingshan 1665e0c240acSZhu Lingshan } 1666e0c240acSZhu Lingshan 1667f892bd8eSZhu Lingshan static int tcmu_send_dev_remove_event(struct tcmu_dev *udev) 1668f892bd8eSZhu Lingshan { 1669f892bd8eSZhu Lingshan struct sk_buff *skb = NULL; 1670f892bd8eSZhu Lingshan void *msg_header = NULL; 1671f892bd8eSZhu Lingshan int ret = 0; 1672f892bd8eSZhu Lingshan 1673f892bd8eSZhu Lingshan ret = tcmu_netlink_event_init(udev, TCMU_CMD_REMOVED_DEVICE, 1674f892bd8eSZhu Lingshan &skb, &msg_header); 1675f892bd8eSZhu Lingshan if (ret < 0) 16766e14eab9SNicholas Bellinger return ret; 1677f892bd8eSZhu Lingshan return tcmu_netlink_event_send(udev, TCMU_CMD_REMOVED_DEVICE, 1678f892bd8eSZhu Lingshan &skb, &msg_header); 16797c9e7a6fSAndy Grover } 16807c9e7a6fSAndy Grover 1681de8c5221SBryant G. Ly static int tcmu_update_uio_info(struct tcmu_dev *udev) 16827c9e7a6fSAndy Grover { 16837c9e7a6fSAndy Grover struct tcmu_hba *hba = udev->hba->hba_ptr; 16847c9e7a6fSAndy Grover struct uio_info *info; 1685de8c5221SBryant G. Ly size_t size, used; 16867c9e7a6fSAndy Grover char *str; 16877c9e7a6fSAndy Grover 16887c9e7a6fSAndy Grover info = &udev->uio_info; 16897c9e7a6fSAndy Grover size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, 16907c9e7a6fSAndy Grover udev->dev_config); 16917c9e7a6fSAndy Grover size += 1; /* for \0 */ 16927c9e7a6fSAndy Grover str = kmalloc(size, GFP_KERNEL); 16937c9e7a6fSAndy Grover if (!str) 16947c9e7a6fSAndy Grover return -ENOMEM; 16957c9e7a6fSAndy Grover 16967c9e7a6fSAndy Grover used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); 16977c9e7a6fSAndy Grover if (udev->dev_config[0]) 16987c9e7a6fSAndy Grover snprintf(str + used, size - used, "/%s", udev->dev_config); 16997c9e7a6fSAndy Grover 1700ededd039SBryant G. Ly /* If the old string exists, free it */ 1701ededd039SBryant G. Ly kfree(info->name); 17027c9e7a6fSAndy Grover info->name = str; 17037c9e7a6fSAndy Grover 1704de8c5221SBryant G. Ly return 0; 1705de8c5221SBryant G. Ly } 1706de8c5221SBryant G. Ly 1707de8c5221SBryant G. Ly static int tcmu_configure_device(struct se_device *dev) 1708de8c5221SBryant G. Ly { 1709de8c5221SBryant G. Ly struct tcmu_dev *udev = TCMU_DEV(dev); 1710de8c5221SBryant G. Ly struct uio_info *info; 1711de8c5221SBryant G. Ly struct tcmu_mailbox *mb; 1712de8c5221SBryant G. Ly int ret = 0; 1713de8c5221SBryant G. Ly 1714de8c5221SBryant G. Ly ret = tcmu_update_uio_info(udev); 1715de8c5221SBryant G. Ly if (ret) 1716de8c5221SBryant G. Ly return ret; 1717de8c5221SBryant G. Ly 1718de8c5221SBryant G. Ly info = &udev->uio_info; 1719de8c5221SBryant G. Ly 17206396bb22SKees Cook udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks), 17216396bb22SKees Cook sizeof(unsigned long), 17226396bb22SKees Cook GFP_KERNEL); 1723a24e7917SWei Yongjun if (!udev->data_bitmap) { 1724a24e7917SWei Yongjun ret = -ENOMEM; 172580eb8761SMike Christie goto err_bitmap_alloc; 1726a24e7917SWei Yongjun } 172780eb8761SMike Christie 1728141685a3SXiubo Li udev->mb_addr = vzalloc(CMDR_SIZE); 17297c9e7a6fSAndy Grover if (!udev->mb_addr) { 17307c9e7a6fSAndy Grover ret = -ENOMEM; 17317c9e7a6fSAndy Grover goto err_vzalloc; 17327c9e7a6fSAndy Grover } 17337c9e7a6fSAndy Grover 17347c9e7a6fSAndy Grover /* mailbox fits in first part of CMDR space */ 17357c9e7a6fSAndy Grover udev->cmdr_size = CMDR_SIZE - CMDR_OFF; 17367c9e7a6fSAndy Grover udev->data_off = CMDR_SIZE; 173780eb8761SMike Christie udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; 1738b6df4b79SXiubo Li udev->dbi_thresh = 0; /* Default in Idle state */ 17397c9e7a6fSAndy Grover 1740141685a3SXiubo Li /* Initialise the mailbox of the ring buffer */ 17417c9e7a6fSAndy Grover mb = udev->mb_addr; 17420ad46af8SAndy Grover mb->version = TCMU_MAILBOX_VERSION; 174332c76de3SSheng Yang mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC; 17447c9e7a6fSAndy Grover mb->cmdr_off = CMDR_OFF; 17457c9e7a6fSAndy Grover mb->cmdr_size = udev->cmdr_size; 17467c9e7a6fSAndy Grover 17477c9e7a6fSAndy Grover WARN_ON(!PAGE_ALIGNED(udev->data_off)); 17487c9e7a6fSAndy Grover WARN_ON(udev->data_size % PAGE_SIZE); 174926418649SSheng Yang WARN_ON(udev->data_size % DATA_BLOCK_SIZE); 17507c9e7a6fSAndy Grover 1751ac64a2ceSDavid Disseldorp info->version = __stringify(TCMU_MAILBOX_VERSION); 17527c9e7a6fSAndy Grover 17537c9e7a6fSAndy Grover info->mem[0].name = "tcm-user command & data buffer"; 17540633e123SArnd Bergmann info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; 175580eb8761SMike Christie info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; 1756141685a3SXiubo Li info->mem[0].memtype = UIO_MEM_NONE; 17577c9e7a6fSAndy Grover 17587c9e7a6fSAndy Grover info->irqcontrol = tcmu_irqcontrol; 17597c9e7a6fSAndy Grover info->irq = UIO_IRQ_CUSTOM; 17607c9e7a6fSAndy Grover 17617c9e7a6fSAndy Grover info->mmap = tcmu_mmap; 17627c9e7a6fSAndy Grover info->open = tcmu_open; 17637c9e7a6fSAndy Grover info->release = tcmu_release; 17647c9e7a6fSAndy Grover 17657c9e7a6fSAndy Grover ret = uio_register_device(tcmu_root_device, info); 17667c9e7a6fSAndy Grover if (ret) 17677c9e7a6fSAndy Grover goto err_register; 17687c9e7a6fSAndy Grover 176981ee28deSSheng Yang /* User can set hw_block_size before enable the device */ 177081ee28deSSheng Yang if (dev->dev_attrib.hw_block_size == 0) 17717c9e7a6fSAndy Grover dev->dev_attrib.hw_block_size = 512; 177281ee28deSSheng Yang /* Other attributes can be configured in userspace */ 17733abaa2bfSMike Christie if (!dev->dev_attrib.hw_max_sectors) 17747c9e7a6fSAndy Grover dev->dev_attrib.hw_max_sectors = 128; 17759a8bb606SBryant G. Ly if (!dev->dev_attrib.emulate_write_cache) 17769a8bb606SBryant G. Ly dev->dev_attrib.emulate_write_cache = 0; 17777c9e7a6fSAndy Grover dev->dev_attrib.hw_queue_depth = 128; 17787c9e7a6fSAndy Grover 1779b849b456SKenjiro Nakayama /* If user didn't explicitly disable netlink reply support, use 1780b849b456SKenjiro Nakayama * module scope setting. 1781b849b456SKenjiro Nakayama */ 1782b849b456SKenjiro Nakayama if (udev->nl_reply_supported >= 0) 1783b849b456SKenjiro Nakayama udev->nl_reply_supported = tcmu_kern_cmd_reply_supported; 1784b849b456SKenjiro Nakayama 1785f3cdbe39SMike Christie /* 1786f3cdbe39SMike Christie * Get a ref incase userspace does a close on the uio device before 1787f3cdbe39SMike Christie * LIO has initiated tcmu_free_device. 1788f3cdbe39SMike Christie */ 1789f3cdbe39SMike Christie kref_get(&udev->kref); 1790f3cdbe39SMike Christie 1791e0c240acSZhu Lingshan ret = tcmu_send_dev_add_event(udev); 17927c9e7a6fSAndy Grover if (ret) 17937c9e7a6fSAndy Grover goto err_netlink; 17947c9e7a6fSAndy Grover 1795b6df4b79SXiubo Li mutex_lock(&root_udev_mutex); 1796b6df4b79SXiubo Li list_add(&udev->node, &root_udev); 1797b6df4b79SXiubo Li mutex_unlock(&root_udev_mutex); 1798b6df4b79SXiubo Li 17997c9e7a6fSAndy Grover return 0; 18007c9e7a6fSAndy Grover 18017c9e7a6fSAndy Grover err_netlink: 1802f3cdbe39SMike Christie kref_put(&udev->kref, tcmu_dev_kref_release); 18037c9e7a6fSAndy Grover uio_unregister_device(&udev->uio_info); 18047c9e7a6fSAndy Grover err_register: 18057c9e7a6fSAndy Grover vfree(udev->mb_addr); 1806c22adc0bSXiubo Li udev->mb_addr = NULL; 18077c9e7a6fSAndy Grover err_vzalloc: 180880eb8761SMike Christie kfree(udev->data_bitmap); 180980eb8761SMike Christie udev->data_bitmap = NULL; 181080eb8761SMike Christie err_bitmap_alloc: 18117c9e7a6fSAndy Grover kfree(info->name); 1812f3cdbe39SMike Christie info->name = NULL; 18137c9e7a6fSAndy Grover 18147c9e7a6fSAndy Grover return ret; 18157c9e7a6fSAndy Grover } 18167c9e7a6fSAndy Grover 1817972c7f16SMike Christie static bool tcmu_dev_configured(struct tcmu_dev *udev) 1818972c7f16SMike Christie { 1819972c7f16SMike Christie return udev->uio_info.uio_dev ? true : false; 1820972c7f16SMike Christie } 1821972c7f16SMike Christie 18227c9e7a6fSAndy Grover static void tcmu_free_device(struct se_device *dev) 18237c9e7a6fSAndy Grover { 18247c9e7a6fSAndy Grover struct tcmu_dev *udev = TCMU_DEV(dev); 182592634706SMike Christie 182692634706SMike Christie /* release ref from init */ 182792634706SMike Christie kref_put(&udev->kref, tcmu_dev_kref_release); 182892634706SMike Christie } 182992634706SMike Christie 183092634706SMike Christie static void tcmu_destroy_device(struct se_device *dev) 183192634706SMike Christie { 183292634706SMike Christie struct tcmu_dev *udev = TCMU_DEV(dev); 18337c9e7a6fSAndy Grover 18349103575aSMike Christie del_timer_sync(&udev->cmd_timer); 18359103575aSMike Christie del_timer_sync(&udev->qfull_timer); 18367c9e7a6fSAndy Grover 1837b6df4b79SXiubo Li mutex_lock(&root_udev_mutex); 1838b6df4b79SXiubo Li list_del(&udev->node); 1839b6df4b79SXiubo Li mutex_unlock(&root_udev_mutex); 1840b6df4b79SXiubo Li 1841f892bd8eSZhu Lingshan tcmu_send_dev_remove_event(udev); 18427c9e7a6fSAndy Grover 18437c9e7a6fSAndy Grover uio_unregister_device(&udev->uio_info); 18449260695dSMike Christie 18459260695dSMike Christie /* release ref from configure */ 18469260695dSMike Christie kref_put(&udev->kref, tcmu_dev_kref_release); 18477c9e7a6fSAndy Grover } 18487c9e7a6fSAndy Grover 1849892782caSMike Christie static void tcmu_unblock_dev(struct tcmu_dev *udev) 1850892782caSMike Christie { 1851892782caSMike Christie mutex_lock(&udev->cmdr_lock); 1852892782caSMike Christie clear_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags); 1853892782caSMike Christie mutex_unlock(&udev->cmdr_lock); 1854892782caSMike Christie } 1855892782caSMike Christie 1856892782caSMike Christie static void tcmu_block_dev(struct tcmu_dev *udev) 1857892782caSMike Christie { 1858892782caSMike Christie mutex_lock(&udev->cmdr_lock); 1859892782caSMike Christie 1860892782caSMike Christie if (test_and_set_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 1861892782caSMike Christie goto unlock; 1862892782caSMike Christie 1863892782caSMike Christie /* complete IO that has executed successfully */ 1864892782caSMike Christie tcmu_handle_completions(udev); 1865892782caSMike Christie /* fail IO waiting to be queued */ 1866892782caSMike Christie run_cmdr_queue(udev, true); 1867892782caSMike Christie 1868892782caSMike Christie unlock: 1869892782caSMike Christie mutex_unlock(&udev->cmdr_lock); 1870892782caSMike Christie } 1871892782caSMike Christie 1872892782caSMike Christie static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) 1873892782caSMike Christie { 1874892782caSMike Christie struct tcmu_mailbox *mb; 1875892782caSMike Christie struct tcmu_cmd *cmd; 1876892782caSMike Christie int i; 1877892782caSMike Christie 1878892782caSMike Christie mutex_lock(&udev->cmdr_lock); 1879892782caSMike Christie 1880892782caSMike Christie idr_for_each_entry(&udev->commands, cmd, i) { 1881892782caSMike Christie if (!list_empty(&cmd->cmdr_queue_entry)) 1882892782caSMike Christie continue; 1883892782caSMike Christie 1884892782caSMike Christie pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", 1885892782caSMike Christie cmd->cmd_id, udev->name, 1886892782caSMike Christie test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); 1887892782caSMike Christie 1888892782caSMike Christie idr_remove(&udev->commands, i); 1889892782caSMike Christie if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { 1890892782caSMike Christie if (err_level == 1) { 1891892782caSMike Christie /* 1892892782caSMike Christie * Userspace was not able to start the 1893892782caSMike Christie * command or it is retryable. 1894892782caSMike Christie */ 1895892782caSMike Christie target_complete_cmd(cmd->se_cmd, SAM_STAT_BUSY); 1896892782caSMike Christie } else { 1897892782caSMike Christie /* hard failure */ 1898892782caSMike Christie target_complete_cmd(cmd->se_cmd, 1899892782caSMike Christie SAM_STAT_CHECK_CONDITION); 1900892782caSMike Christie } 1901892782caSMike Christie } 1902892782caSMike Christie tcmu_cmd_free_data(cmd, cmd->dbi_cnt); 1903892782caSMike Christie tcmu_free_cmd(cmd); 1904892782caSMike Christie } 1905892782caSMike Christie 1906892782caSMike Christie mb = udev->mb_addr; 1907892782caSMike Christie tcmu_flush_dcache_range(mb, sizeof(*mb)); 1908892782caSMike Christie pr_debug("mb last %u head %u tail %u\n", udev->cmdr_last_cleaned, 1909892782caSMike Christie mb->cmd_tail, mb->cmd_head); 1910892782caSMike Christie 1911892782caSMike Christie udev->cmdr_last_cleaned = 0; 1912892782caSMike Christie mb->cmd_tail = 0; 1913892782caSMike Christie mb->cmd_head = 0; 1914892782caSMike Christie tcmu_flush_dcache_range(mb, sizeof(*mb)); 1915892782caSMike Christie 1916892782caSMike Christie del_timer(&udev->cmd_timer); 1917892782caSMike Christie 1918892782caSMike Christie mutex_unlock(&udev->cmdr_lock); 1919892782caSMike Christie } 1920892782caSMike Christie 19217c9e7a6fSAndy Grover enum { 19223abaa2bfSMike Christie Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, 192380eb8761SMike Christie Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, 19247c9e7a6fSAndy Grover }; 19257c9e7a6fSAndy Grover 19267c9e7a6fSAndy Grover static match_table_t tokens = { 19277c9e7a6fSAndy Grover {Opt_dev_config, "dev_config=%s"}, 19287c9e7a6fSAndy Grover {Opt_dev_size, "dev_size=%u"}, 19299c1cd1b6SAndy Grover {Opt_hw_block_size, "hw_block_size=%u"}, 19303abaa2bfSMike Christie {Opt_hw_max_sectors, "hw_max_sectors=%u"}, 1931b849b456SKenjiro Nakayama {Opt_nl_reply_supported, "nl_reply_supported=%d"}, 193280eb8761SMike Christie {Opt_max_data_area_mb, "max_data_area_mb=%u"}, 19337c9e7a6fSAndy Grover {Opt_err, NULL} 19347c9e7a6fSAndy Grover }; 19357c9e7a6fSAndy Grover 19363abaa2bfSMike Christie static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) 19373abaa2bfSMike Christie { 19383abaa2bfSMike Christie unsigned long tmp_ul; 19393abaa2bfSMike Christie char *arg_p; 19403abaa2bfSMike Christie int ret; 19413abaa2bfSMike Christie 19423abaa2bfSMike Christie arg_p = match_strdup(arg); 19433abaa2bfSMike Christie if (!arg_p) 19443abaa2bfSMike Christie return -ENOMEM; 19453abaa2bfSMike Christie 19463abaa2bfSMike Christie ret = kstrtoul(arg_p, 0, &tmp_ul); 19473abaa2bfSMike Christie kfree(arg_p); 19483abaa2bfSMike Christie if (ret < 0) { 19493abaa2bfSMike Christie pr_err("kstrtoul() failed for dev attrib\n"); 19503abaa2bfSMike Christie return ret; 19513abaa2bfSMike Christie } 19523abaa2bfSMike Christie if (!tmp_ul) { 19533abaa2bfSMike Christie pr_err("dev attrib must be nonzero\n"); 19543abaa2bfSMike Christie return -EINVAL; 19553abaa2bfSMike Christie } 19563abaa2bfSMike Christie *dev_attrib = tmp_ul; 19573abaa2bfSMike Christie return 0; 19583abaa2bfSMike Christie } 19593abaa2bfSMike Christie 19607c9e7a6fSAndy Grover static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, 19617c9e7a6fSAndy Grover const char *page, ssize_t count) 19627c9e7a6fSAndy Grover { 19637c9e7a6fSAndy Grover struct tcmu_dev *udev = TCMU_DEV(dev); 19647c9e7a6fSAndy Grover char *orig, *ptr, *opts, *arg_p; 19657c9e7a6fSAndy Grover substring_t args[MAX_OPT_ARGS]; 196680eb8761SMike Christie int ret = 0, token, tmpval; 19677c9e7a6fSAndy Grover 19687c9e7a6fSAndy Grover opts = kstrdup(page, GFP_KERNEL); 19697c9e7a6fSAndy Grover if (!opts) 19707c9e7a6fSAndy Grover return -ENOMEM; 19717c9e7a6fSAndy Grover 19727c9e7a6fSAndy Grover orig = opts; 19737c9e7a6fSAndy Grover 19747c9e7a6fSAndy Grover while ((ptr = strsep(&opts, ",\n")) != NULL) { 19757c9e7a6fSAndy Grover if (!*ptr) 19767c9e7a6fSAndy Grover continue; 19777c9e7a6fSAndy Grover 19787c9e7a6fSAndy Grover token = match_token(ptr, tokens, args); 19797c9e7a6fSAndy Grover switch (token) { 19807c9e7a6fSAndy Grover case Opt_dev_config: 19817c9e7a6fSAndy Grover if (match_strlcpy(udev->dev_config, &args[0], 19827c9e7a6fSAndy Grover TCMU_CONFIG_LEN) == 0) { 19837c9e7a6fSAndy Grover ret = -EINVAL; 19847c9e7a6fSAndy Grover break; 19857c9e7a6fSAndy Grover } 19867c9e7a6fSAndy Grover pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config); 19877c9e7a6fSAndy Grover break; 19887c9e7a6fSAndy Grover case Opt_dev_size: 19897c9e7a6fSAndy Grover arg_p = match_strdup(&args[0]); 19907c9e7a6fSAndy Grover if (!arg_p) { 19917c9e7a6fSAndy Grover ret = -ENOMEM; 19927c9e7a6fSAndy Grover break; 19937c9e7a6fSAndy Grover } 19947c9e7a6fSAndy Grover ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size); 19957c9e7a6fSAndy Grover kfree(arg_p); 19967c9e7a6fSAndy Grover if (ret < 0) 19977c9e7a6fSAndy Grover pr_err("kstrtoul() failed for dev_size=\n"); 19987c9e7a6fSAndy Grover break; 19999c1cd1b6SAndy Grover case Opt_hw_block_size: 20003abaa2bfSMike Christie ret = tcmu_set_dev_attrib(&args[0], 20013abaa2bfSMike Christie &(dev->dev_attrib.hw_block_size)); 20029c1cd1b6SAndy Grover break; 20033abaa2bfSMike Christie case Opt_hw_max_sectors: 20043abaa2bfSMike Christie ret = tcmu_set_dev_attrib(&args[0], 20053abaa2bfSMike Christie &(dev->dev_attrib.hw_max_sectors)); 20069c1cd1b6SAndy Grover break; 2007b849b456SKenjiro Nakayama case Opt_nl_reply_supported: 2008b849b456SKenjiro Nakayama arg_p = match_strdup(&args[0]); 2009b849b456SKenjiro Nakayama if (!arg_p) { 2010b849b456SKenjiro Nakayama ret = -ENOMEM; 2011b849b456SKenjiro Nakayama break; 2012b849b456SKenjiro Nakayama } 201316b93277SDan Carpenter ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported); 2014b849b456SKenjiro Nakayama kfree(arg_p); 2015b849b456SKenjiro Nakayama if (ret < 0) 201616b93277SDan Carpenter pr_err("kstrtoint() failed for nl_reply_supported=\n"); 2017b849b456SKenjiro Nakayama break; 201880eb8761SMike Christie case Opt_max_data_area_mb: 201980eb8761SMike Christie if (dev->export_count) { 202080eb8761SMike Christie pr_err("Unable to set max_data_area_mb while exports exist\n"); 202180eb8761SMike Christie ret = -EINVAL; 202280eb8761SMike Christie break; 202380eb8761SMike Christie } 202480eb8761SMike Christie 202580eb8761SMike Christie arg_p = match_strdup(&args[0]); 202680eb8761SMike Christie if (!arg_p) { 202780eb8761SMike Christie ret = -ENOMEM; 202880eb8761SMike Christie break; 202980eb8761SMike Christie } 203080eb8761SMike Christie ret = kstrtoint(arg_p, 0, &tmpval); 203180eb8761SMike Christie kfree(arg_p); 203280eb8761SMike Christie if (ret < 0) { 203380eb8761SMike Christie pr_err("kstrtoint() failed for max_data_area_mb=\n"); 203480eb8761SMike Christie break; 203580eb8761SMike Christie } 203680eb8761SMike Christie 203780eb8761SMike Christie if (tmpval <= 0) { 203880eb8761SMike Christie pr_err("Invalid max_data_area %d\n", tmpval); 203980eb8761SMike Christie ret = -EINVAL; 204080eb8761SMike Christie break; 204180eb8761SMike Christie } 204280eb8761SMike Christie 204380eb8761SMike Christie udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval); 204480eb8761SMike Christie if (udev->max_blocks > tcmu_global_max_blocks) { 204580eb8761SMike Christie pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", 204680eb8761SMike Christie tmpval, 204780eb8761SMike Christie TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); 204880eb8761SMike Christie udev->max_blocks = tcmu_global_max_blocks; 204980eb8761SMike Christie } 205080eb8761SMike Christie break; 20517c9e7a6fSAndy Grover default: 20527c9e7a6fSAndy Grover break; 20537c9e7a6fSAndy Grover } 20542579325cSMike Christie 20552579325cSMike Christie if (ret) 20562579325cSMike Christie break; 20577c9e7a6fSAndy Grover } 20587c9e7a6fSAndy Grover 20597c9e7a6fSAndy Grover kfree(orig); 20607c9e7a6fSAndy Grover return (!ret) ? count : ret; 20617c9e7a6fSAndy Grover } 20627c9e7a6fSAndy Grover 20637c9e7a6fSAndy Grover static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) 20647c9e7a6fSAndy Grover { 20657c9e7a6fSAndy Grover struct tcmu_dev *udev = TCMU_DEV(dev); 20667c9e7a6fSAndy Grover ssize_t bl = 0; 20677c9e7a6fSAndy Grover 20687c9e7a6fSAndy Grover bl = sprintf(b + bl, "Config: %s ", 20697c9e7a6fSAndy Grover udev->dev_config[0] ? udev->dev_config : "NULL"); 207080eb8761SMike Christie bl += sprintf(b + bl, "Size: %zu ", udev->dev_size); 207180eb8761SMike Christie bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", 207280eb8761SMike Christie TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 20737c9e7a6fSAndy Grover 20747c9e7a6fSAndy Grover return bl; 20757c9e7a6fSAndy Grover } 20767c9e7a6fSAndy Grover 20777c9e7a6fSAndy Grover static sector_t tcmu_get_blocks(struct se_device *dev) 20787c9e7a6fSAndy Grover { 20797c9e7a6fSAndy Grover struct tcmu_dev *udev = TCMU_DEV(dev); 20807c9e7a6fSAndy Grover 20817c9e7a6fSAndy Grover return div_u64(udev->dev_size - dev->dev_attrib.block_size, 20827c9e7a6fSAndy Grover dev->dev_attrib.block_size); 20837c9e7a6fSAndy Grover } 20847c9e7a6fSAndy Grover 20857c9e7a6fSAndy Grover static sense_reason_t 20867c9e7a6fSAndy Grover tcmu_parse_cdb(struct se_cmd *cmd) 20877c9e7a6fSAndy Grover { 208802eb924fSAndy Grover return passthrough_parse_cdb(cmd, tcmu_queue_cmd); 20899c1cd1b6SAndy Grover } 20909c1cd1b6SAndy Grover 20917d7a7435SNicholas Bellinger static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) 20927d7a7435SNicholas Bellinger { 20937d7a7435SNicholas Bellinger struct se_dev_attrib *da = container_of(to_config_group(item), 20947d7a7435SNicholas Bellinger struct se_dev_attrib, da_group); 2095b5ab697cSKenjiro Nakayama struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 20967d7a7435SNicholas Bellinger 20977d7a7435SNicholas Bellinger return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); 20987d7a7435SNicholas Bellinger } 20997d7a7435SNicholas Bellinger 21007d7a7435SNicholas Bellinger static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, 21017d7a7435SNicholas Bellinger size_t count) 21027d7a7435SNicholas Bellinger { 21037d7a7435SNicholas Bellinger struct se_dev_attrib *da = container_of(to_config_group(item), 21047d7a7435SNicholas Bellinger struct se_dev_attrib, da_group); 21057d7a7435SNicholas Bellinger struct tcmu_dev *udev = container_of(da->da_dev, 21067d7a7435SNicholas Bellinger struct tcmu_dev, se_dev); 21077d7a7435SNicholas Bellinger u32 val; 21087d7a7435SNicholas Bellinger int ret; 21097d7a7435SNicholas Bellinger 21107d7a7435SNicholas Bellinger if (da->da_dev->export_count) { 21117d7a7435SNicholas Bellinger pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); 21127d7a7435SNicholas Bellinger return -EINVAL; 21137d7a7435SNicholas Bellinger } 21147d7a7435SNicholas Bellinger 21157d7a7435SNicholas Bellinger ret = kstrtou32(page, 0, &val); 21167d7a7435SNicholas Bellinger if (ret < 0) 21177d7a7435SNicholas Bellinger return ret; 21187d7a7435SNicholas Bellinger 21197d7a7435SNicholas Bellinger udev->cmd_time_out = val * MSEC_PER_SEC; 21207d7a7435SNicholas Bellinger return count; 21217d7a7435SNicholas Bellinger } 21227d7a7435SNicholas Bellinger CONFIGFS_ATTR(tcmu_, cmd_time_out); 21237d7a7435SNicholas Bellinger 21249103575aSMike Christie static ssize_t tcmu_qfull_time_out_show(struct config_item *item, char *page) 21259103575aSMike Christie { 21269103575aSMike Christie struct se_dev_attrib *da = container_of(to_config_group(item), 21279103575aSMike Christie struct se_dev_attrib, da_group); 21289103575aSMike Christie struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 21299103575aSMike Christie 21309103575aSMike Christie return snprintf(page, PAGE_SIZE, "%ld\n", udev->qfull_time_out <= 0 ? 21319103575aSMike Christie udev->qfull_time_out : 21329103575aSMike Christie udev->qfull_time_out / MSEC_PER_SEC); 21339103575aSMike Christie } 21349103575aSMike Christie 21359103575aSMike Christie static ssize_t tcmu_qfull_time_out_store(struct config_item *item, 21369103575aSMike Christie const char *page, size_t count) 21379103575aSMike Christie { 21389103575aSMike Christie struct se_dev_attrib *da = container_of(to_config_group(item), 21399103575aSMike Christie struct se_dev_attrib, da_group); 21409103575aSMike Christie struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 21419103575aSMike Christie s32 val; 21429103575aSMike Christie int ret; 21439103575aSMike Christie 21449103575aSMike Christie ret = kstrtos32(page, 0, &val); 21459103575aSMike Christie if (ret < 0) 21469103575aSMike Christie return ret; 21479103575aSMike Christie 21489103575aSMike Christie if (val >= 0) { 21499103575aSMike Christie udev->qfull_time_out = val * MSEC_PER_SEC; 2150125966dbSPrasanna Kumar Kalever } else if (val == -1) { 2151125966dbSPrasanna Kumar Kalever udev->qfull_time_out = val; 21529103575aSMike Christie } else { 21539103575aSMike Christie printk(KERN_ERR "Invalid qfull timeout value %d\n", val); 21549103575aSMike Christie return -EINVAL; 21559103575aSMike Christie } 21569103575aSMike Christie return count; 21579103575aSMike Christie } 21589103575aSMike Christie CONFIGFS_ATTR(tcmu_, qfull_time_out); 21599103575aSMike Christie 216080eb8761SMike Christie static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) 216180eb8761SMike Christie { 216280eb8761SMike Christie struct se_dev_attrib *da = container_of(to_config_group(item), 216380eb8761SMike Christie struct se_dev_attrib, da_group); 216480eb8761SMike Christie struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 216580eb8761SMike Christie 216680eb8761SMike Christie return snprintf(page, PAGE_SIZE, "%u\n", 216780eb8761SMike Christie TCMU_BLOCKS_TO_MBS(udev->max_blocks)); 216880eb8761SMike Christie } 216980eb8761SMike Christie CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); 217080eb8761SMike Christie 21712d76443eSMike Christie static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) 2172ee018252SBryant G. Ly { 2173ee018252SBryant G. Ly struct se_dev_attrib *da = container_of(to_config_group(item), 2174ee018252SBryant G. Ly struct se_dev_attrib, da_group); 2175ee018252SBryant G. Ly struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2176ee018252SBryant G. Ly 2177ee018252SBryant G. Ly return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); 2178ee018252SBryant G. Ly } 2179ee018252SBryant G. Ly 218002ccfb54SZhu Lingshan static int tcmu_send_dev_config_event(struct tcmu_dev *udev, 218102ccfb54SZhu Lingshan const char *reconfig_data) 218202ccfb54SZhu Lingshan { 218302ccfb54SZhu Lingshan struct sk_buff *skb = NULL; 218402ccfb54SZhu Lingshan void *msg_header = NULL; 218502ccfb54SZhu Lingshan int ret = 0; 218602ccfb54SZhu Lingshan 218702ccfb54SZhu Lingshan ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 218802ccfb54SZhu Lingshan &skb, &msg_header); 218902ccfb54SZhu Lingshan if (ret < 0) 219002ccfb54SZhu Lingshan return ret; 219102ccfb54SZhu Lingshan ret = nla_put_string(skb, TCMU_ATTR_DEV_CFG, reconfig_data); 219202ccfb54SZhu Lingshan if (ret < 0) { 219302ccfb54SZhu Lingshan nlmsg_free(skb); 219402ccfb54SZhu Lingshan return ret; 219502ccfb54SZhu Lingshan } 219602ccfb54SZhu Lingshan return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 219702ccfb54SZhu Lingshan &skb, &msg_header); 219802ccfb54SZhu Lingshan } 219902ccfb54SZhu Lingshan 220002ccfb54SZhu Lingshan 22012d76443eSMike Christie static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, 2202ee018252SBryant G. Ly size_t count) 2203ee018252SBryant G. Ly { 2204ee018252SBryant G. Ly struct se_dev_attrib *da = container_of(to_config_group(item), 2205ee018252SBryant G. Ly struct se_dev_attrib, da_group); 2206ee018252SBryant G. Ly struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 22072d76443eSMike Christie int ret, len; 2208ee018252SBryant G. Ly 22092d76443eSMike Christie len = strlen(page); 22102d76443eSMike Christie if (!len || len > TCMU_CONFIG_LEN - 1) 2211ee018252SBryant G. Ly return -EINVAL; 2212ee018252SBryant G. Ly 2213ee018252SBryant G. Ly /* Check if device has been configured before */ 2214ee018252SBryant G. Ly if (tcmu_dev_configured(udev)) { 221502ccfb54SZhu Lingshan ret = tcmu_send_dev_config_event(udev, page); 2216ee018252SBryant G. Ly if (ret) { 2217ee018252SBryant G. Ly pr_err("Unable to reconfigure device\n"); 2218ee018252SBryant G. Ly return ret; 2219ee018252SBryant G. Ly } 2220de8c5221SBryant G. Ly strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2221de8c5221SBryant G. Ly 2222de8c5221SBryant G. Ly ret = tcmu_update_uio_info(udev); 2223de8c5221SBryant G. Ly if (ret) 2224de8c5221SBryant G. Ly return ret; 2225de8c5221SBryant G. Ly return count; 2226ee018252SBryant G. Ly } 22272d76443eSMike Christie strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); 2228ee018252SBryant G. Ly 2229ee018252SBryant G. Ly return count; 2230ee018252SBryant G. Ly } 22312d76443eSMike Christie CONFIGFS_ATTR(tcmu_, dev_config); 2232ee018252SBryant G. Ly 2233801fc54dSBryant G. Ly static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) 2234801fc54dSBryant G. Ly { 2235801fc54dSBryant G. Ly struct se_dev_attrib *da = container_of(to_config_group(item), 2236801fc54dSBryant G. Ly struct se_dev_attrib, da_group); 2237801fc54dSBryant G. Ly struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2238801fc54dSBryant G. Ly 2239801fc54dSBryant G. Ly return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); 2240801fc54dSBryant G. Ly } 2241801fc54dSBryant G. Ly 224284e28506SZhu Lingshan static int tcmu_send_dev_size_event(struct tcmu_dev *udev, u64 size) 224384e28506SZhu Lingshan { 224484e28506SZhu Lingshan struct sk_buff *skb = NULL; 224584e28506SZhu Lingshan void *msg_header = NULL; 224684e28506SZhu Lingshan int ret = 0; 224784e28506SZhu Lingshan 224884e28506SZhu Lingshan ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 224984e28506SZhu Lingshan &skb, &msg_header); 225084e28506SZhu Lingshan if (ret < 0) 225184e28506SZhu Lingshan return ret; 225284e28506SZhu Lingshan ret = nla_put_u64_64bit(skb, TCMU_ATTR_DEV_SIZE, 225384e28506SZhu Lingshan size, TCMU_ATTR_PAD); 225484e28506SZhu Lingshan if (ret < 0) { 225584e28506SZhu Lingshan nlmsg_free(skb); 225684e28506SZhu Lingshan return ret; 225784e28506SZhu Lingshan } 225884e28506SZhu Lingshan return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 225984e28506SZhu Lingshan &skb, &msg_header); 226084e28506SZhu Lingshan } 226184e28506SZhu Lingshan 2262801fc54dSBryant G. Ly static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, 2263801fc54dSBryant G. Ly size_t count) 2264801fc54dSBryant G. Ly { 2265801fc54dSBryant G. Ly struct se_dev_attrib *da = container_of(to_config_group(item), 2266801fc54dSBryant G. Ly struct se_dev_attrib, da_group); 2267801fc54dSBryant G. Ly struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 22682d76443eSMike Christie u64 val; 2269801fc54dSBryant G. Ly int ret; 2270801fc54dSBryant G. Ly 22712d76443eSMike Christie ret = kstrtou64(page, 0, &val); 2272801fc54dSBryant G. Ly if (ret < 0) 2273801fc54dSBryant G. Ly return ret; 2274801fc54dSBryant G. Ly 2275801fc54dSBryant G. Ly /* Check if device has been configured before */ 2276801fc54dSBryant G. Ly if (tcmu_dev_configured(udev)) { 227784e28506SZhu Lingshan ret = tcmu_send_dev_size_event(udev, val); 2278801fc54dSBryant G. Ly if (ret) { 2279801fc54dSBryant G. Ly pr_err("Unable to reconfigure device\n"); 2280801fc54dSBryant G. Ly return ret; 2281801fc54dSBryant G. Ly } 2282801fc54dSBryant G. Ly } 22832d76443eSMike Christie udev->dev_size = val; 2284801fc54dSBryant G. Ly return count; 2285801fc54dSBryant G. Ly } 2286801fc54dSBryant G. Ly CONFIGFS_ATTR(tcmu_, dev_size); 2287801fc54dSBryant G. Ly 2288b849b456SKenjiro Nakayama static ssize_t tcmu_nl_reply_supported_show(struct config_item *item, 2289b849b456SKenjiro Nakayama char *page) 2290b849b456SKenjiro Nakayama { 2291b849b456SKenjiro Nakayama struct se_dev_attrib *da = container_of(to_config_group(item), 2292b849b456SKenjiro Nakayama struct se_dev_attrib, da_group); 2293b849b456SKenjiro Nakayama struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2294b849b456SKenjiro Nakayama 2295b849b456SKenjiro Nakayama return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported); 2296b849b456SKenjiro Nakayama } 2297b849b456SKenjiro Nakayama 2298b849b456SKenjiro Nakayama static ssize_t tcmu_nl_reply_supported_store(struct config_item *item, 2299b849b456SKenjiro Nakayama const char *page, size_t count) 2300b849b456SKenjiro Nakayama { 2301b849b456SKenjiro Nakayama struct se_dev_attrib *da = container_of(to_config_group(item), 2302b849b456SKenjiro Nakayama struct se_dev_attrib, da_group); 2303b849b456SKenjiro Nakayama struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 2304b849b456SKenjiro Nakayama s8 val; 2305b849b456SKenjiro Nakayama int ret; 2306b849b456SKenjiro Nakayama 2307b849b456SKenjiro Nakayama ret = kstrtos8(page, 0, &val); 2308b849b456SKenjiro Nakayama if (ret < 0) 2309b849b456SKenjiro Nakayama return ret; 2310b849b456SKenjiro Nakayama 2311b849b456SKenjiro Nakayama udev->nl_reply_supported = val; 2312b849b456SKenjiro Nakayama return count; 2313b849b456SKenjiro Nakayama } 2314b849b456SKenjiro Nakayama CONFIGFS_ATTR(tcmu_, nl_reply_supported); 2315b849b456SKenjiro Nakayama 23169a8bb606SBryant G. Ly static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, 23179a8bb606SBryant G. Ly char *page) 23189a8bb606SBryant G. Ly { 23199a8bb606SBryant G. Ly struct se_dev_attrib *da = container_of(to_config_group(item), 23209a8bb606SBryant G. Ly struct se_dev_attrib, da_group); 23219a8bb606SBryant G. Ly 23229a8bb606SBryant G. Ly return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); 23239a8bb606SBryant G. Ly } 23249a8bb606SBryant G. Ly 232533d065ccSZhu Lingshan static int tcmu_send_emulate_write_cache(struct tcmu_dev *udev, u8 val) 232633d065ccSZhu Lingshan { 232733d065ccSZhu Lingshan struct sk_buff *skb = NULL; 232833d065ccSZhu Lingshan void *msg_header = NULL; 232933d065ccSZhu Lingshan int ret = 0; 233033d065ccSZhu Lingshan 233133d065ccSZhu Lingshan ret = tcmu_netlink_event_init(udev, TCMU_CMD_RECONFIG_DEVICE, 233233d065ccSZhu Lingshan &skb, &msg_header); 233333d065ccSZhu Lingshan if (ret < 0) 233433d065ccSZhu Lingshan return ret; 233533d065ccSZhu Lingshan ret = nla_put_u8(skb, TCMU_ATTR_WRITECACHE, val); 233633d065ccSZhu Lingshan if (ret < 0) { 233733d065ccSZhu Lingshan nlmsg_free(skb); 233833d065ccSZhu Lingshan return ret; 233933d065ccSZhu Lingshan } 234033d065ccSZhu Lingshan return tcmu_netlink_event_send(udev, TCMU_CMD_RECONFIG_DEVICE, 234133d065ccSZhu Lingshan &skb, &msg_header); 234233d065ccSZhu Lingshan } 234333d065ccSZhu Lingshan 23449a8bb606SBryant G. Ly static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, 23459a8bb606SBryant G. Ly const char *page, size_t count) 23469a8bb606SBryant G. Ly { 23479a8bb606SBryant G. Ly struct se_dev_attrib *da = container_of(to_config_group(item), 23489a8bb606SBryant G. Ly struct se_dev_attrib, da_group); 23491068be7bSBryant G. Ly struct tcmu_dev *udev = TCMU_DEV(da->da_dev); 23502d76443eSMike Christie u8 val; 23519a8bb606SBryant G. Ly int ret; 23529a8bb606SBryant G. Ly 23532d76443eSMike Christie ret = kstrtou8(page, 0, &val); 23549a8bb606SBryant G. Ly if (ret < 0) 23559a8bb606SBryant G. Ly return ret; 23569a8bb606SBryant G. Ly 23571068be7bSBryant G. Ly /* Check if device has been configured before */ 23581068be7bSBryant G. Ly if (tcmu_dev_configured(udev)) { 235933d065ccSZhu Lingshan ret = tcmu_send_emulate_write_cache(udev, val); 23601068be7bSBryant G. Ly if (ret) { 23611068be7bSBryant G. Ly pr_err("Unable to reconfigure device\n"); 23621068be7bSBryant G. Ly return ret; 23631068be7bSBryant G. Ly } 23641068be7bSBryant G. Ly } 23652d76443eSMike Christie 23662d76443eSMike Christie da->emulate_write_cache = val; 23679a8bb606SBryant G. Ly return count; 23689a8bb606SBryant G. Ly } 23699a8bb606SBryant G. Ly CONFIGFS_ATTR(tcmu_, emulate_write_cache); 23709a8bb606SBryant G. Ly 2371892782caSMike Christie static ssize_t tcmu_block_dev_show(struct config_item *item, char *page) 2372892782caSMike Christie { 2373892782caSMike Christie struct se_device *se_dev = container_of(to_config_group(item), 2374892782caSMike Christie struct se_device, 2375892782caSMike Christie dev_action_group); 2376892782caSMike Christie struct tcmu_dev *udev = TCMU_DEV(se_dev); 2377892782caSMike Christie 2378892782caSMike Christie if (test_bit(TCMU_DEV_BIT_BLOCKED, &udev->flags)) 2379892782caSMike Christie return snprintf(page, PAGE_SIZE, "%s\n", "blocked"); 2380892782caSMike Christie else 2381892782caSMike Christie return snprintf(page, PAGE_SIZE, "%s\n", "unblocked"); 2382892782caSMike Christie } 2383892782caSMike Christie 2384892782caSMike Christie static ssize_t tcmu_block_dev_store(struct config_item *item, const char *page, 2385892782caSMike Christie size_t count) 2386892782caSMike Christie { 2387892782caSMike Christie struct se_device *se_dev = container_of(to_config_group(item), 2388892782caSMike Christie struct se_device, 2389892782caSMike Christie dev_action_group); 2390892782caSMike Christie struct tcmu_dev *udev = TCMU_DEV(se_dev); 2391892782caSMike Christie u8 val; 2392892782caSMike Christie int ret; 2393892782caSMike Christie 2394892782caSMike Christie ret = kstrtou8(page, 0, &val); 2395892782caSMike Christie if (ret < 0) 2396892782caSMike Christie return ret; 2397892782caSMike Christie 2398892782caSMike Christie if (val > 1) { 2399892782caSMike Christie pr_err("Invalid block value %d\n", val); 2400892782caSMike Christie return -EINVAL; 2401892782caSMike Christie } 2402892782caSMike Christie 2403892782caSMike Christie if (!val) 2404892782caSMike Christie tcmu_unblock_dev(udev); 2405892782caSMike Christie else 2406892782caSMike Christie tcmu_block_dev(udev); 2407892782caSMike Christie return count; 2408892782caSMike Christie } 2409892782caSMike Christie CONFIGFS_ATTR(tcmu_, block_dev); 2410892782caSMike Christie 2411892782caSMike Christie static ssize_t tcmu_reset_ring_store(struct config_item *item, const char *page, 2412892782caSMike Christie size_t count) 2413892782caSMike Christie { 2414892782caSMike Christie struct se_device *se_dev = container_of(to_config_group(item), 2415892782caSMike Christie struct se_device, 2416892782caSMike Christie dev_action_group); 2417892782caSMike Christie struct tcmu_dev *udev = TCMU_DEV(se_dev); 2418892782caSMike Christie u8 val; 2419892782caSMike Christie int ret; 2420892782caSMike Christie 2421892782caSMike Christie ret = kstrtou8(page, 0, &val); 2422892782caSMike Christie if (ret < 0) 2423892782caSMike Christie return ret; 2424892782caSMike Christie 2425892782caSMike Christie if (val != 1 && val != 2) { 2426892782caSMike Christie pr_err("Invalid reset ring value %d\n", val); 2427892782caSMike Christie return -EINVAL; 2428892782caSMike Christie } 2429892782caSMike Christie 2430892782caSMike Christie tcmu_reset_ring(udev, val); 2431892782caSMike Christie return count; 2432892782caSMike Christie } 2433892782caSMike Christie CONFIGFS_ATTR_WO(tcmu_, reset_ring); 2434892782caSMike Christie 24355821783bSColin Ian King static struct configfs_attribute *tcmu_attrib_attrs[] = { 2436801fc54dSBryant G. Ly &tcmu_attr_cmd_time_out, 24379103575aSMike Christie &tcmu_attr_qfull_time_out, 243880eb8761SMike Christie &tcmu_attr_max_data_area_mb, 24392d76443eSMike Christie &tcmu_attr_dev_config, 2440801fc54dSBryant G. Ly &tcmu_attr_dev_size, 2441801fc54dSBryant G. Ly &tcmu_attr_emulate_write_cache, 2442b849b456SKenjiro Nakayama &tcmu_attr_nl_reply_supported, 2443801fc54dSBryant G. Ly NULL, 2444801fc54dSBryant G. Ly }; 2445801fc54dSBryant G. Ly 24467d7a7435SNicholas Bellinger static struct configfs_attribute **tcmu_attrs; 24477d7a7435SNicholas Bellinger 2448892782caSMike Christie static struct configfs_attribute *tcmu_action_attrs[] = { 2449892782caSMike Christie &tcmu_attr_block_dev, 2450892782caSMike Christie &tcmu_attr_reset_ring, 2451892782caSMike Christie NULL, 2452892782caSMike Christie }; 2453892782caSMike Christie 24547d7a7435SNicholas Bellinger static struct target_backend_ops tcmu_ops = { 24557c9e7a6fSAndy Grover .name = "user", 24567c9e7a6fSAndy Grover .owner = THIS_MODULE, 2457a3541703SAndy Grover .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, 24587c9e7a6fSAndy Grover .attach_hba = tcmu_attach_hba, 24597c9e7a6fSAndy Grover .detach_hba = tcmu_detach_hba, 24607c9e7a6fSAndy Grover .alloc_device = tcmu_alloc_device, 24617c9e7a6fSAndy Grover .configure_device = tcmu_configure_device, 246292634706SMike Christie .destroy_device = tcmu_destroy_device, 24637c9e7a6fSAndy Grover .free_device = tcmu_free_device, 24647c9e7a6fSAndy Grover .parse_cdb = tcmu_parse_cdb, 24657c9e7a6fSAndy Grover .set_configfs_dev_params = tcmu_set_configfs_dev_params, 24667c9e7a6fSAndy Grover .show_configfs_dev_params = tcmu_show_configfs_dev_params, 24677c9e7a6fSAndy Grover .get_device_type = sbc_get_device_type, 24687c9e7a6fSAndy Grover .get_blocks = tcmu_get_blocks, 2469892782caSMike Christie .tb_dev_action_attrs = tcmu_action_attrs, 24707c9e7a6fSAndy Grover }; 24717c9e7a6fSAndy Grover 247289ec9cfdSMike Christie static void find_free_blocks(void) 2473b6df4b79SXiubo Li { 2474b6df4b79SXiubo Li struct tcmu_dev *udev; 2475b6df4b79SXiubo Li loff_t off; 2476af1dd7ffSMike Christie u32 start, end, block, total_freed = 0; 2477af1dd7ffSMike Christie 247880eb8761SMike Christie if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) 2479af1dd7ffSMike Christie return; 2480b6df4b79SXiubo Li 2481b6df4b79SXiubo Li mutex_lock(&root_udev_mutex); 2482b6df4b79SXiubo Li list_for_each_entry(udev, &root_udev, node) { 2483b6df4b79SXiubo Li mutex_lock(&udev->cmdr_lock); 2484b6df4b79SXiubo Li 2485b6df4b79SXiubo Li /* Try to complete the finished commands first */ 2486b6df4b79SXiubo Li tcmu_handle_completions(udev); 2487b6df4b79SXiubo Li 2488af1dd7ffSMike Christie /* Skip the udevs in idle */ 2489af1dd7ffSMike Christie if (!udev->dbi_thresh) { 2490b6df4b79SXiubo Li mutex_unlock(&udev->cmdr_lock); 2491b6df4b79SXiubo Li continue; 2492b6df4b79SXiubo Li } 2493b6df4b79SXiubo Li 2494b6df4b79SXiubo Li end = udev->dbi_max + 1; 2495b6df4b79SXiubo Li block = find_last_bit(udev->data_bitmap, end); 2496b6df4b79SXiubo Li if (block == udev->dbi_max) { 2497b6df4b79SXiubo Li /* 2498af1dd7ffSMike Christie * The last bit is dbi_max, so it is not possible 2499af1dd7ffSMike Christie * reclaim any blocks. 2500b6df4b79SXiubo Li */ 2501b6df4b79SXiubo Li mutex_unlock(&udev->cmdr_lock); 2502b6df4b79SXiubo Li continue; 2503b6df4b79SXiubo Li } else if (block == end) { 2504b6df4b79SXiubo Li /* The current udev will goto idle state */ 2505b6df4b79SXiubo Li udev->dbi_thresh = start = 0; 2506b6df4b79SXiubo Li udev->dbi_max = 0; 2507b6df4b79SXiubo Li } else { 2508b6df4b79SXiubo Li udev->dbi_thresh = start = block + 1; 2509b6df4b79SXiubo Li udev->dbi_max = block; 2510b6df4b79SXiubo Li } 2511b6df4b79SXiubo Li 2512b6df4b79SXiubo Li /* Here will truncate the data area from off */ 2513b6df4b79SXiubo Li off = udev->data_off + start * DATA_BLOCK_SIZE; 2514b6df4b79SXiubo Li unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); 2515b6df4b79SXiubo Li 2516b6df4b79SXiubo Li /* Release the block pages */ 2517bf99ec13SMike Christie tcmu_blocks_release(&udev->data_blocks, start, end); 2518b6df4b79SXiubo Li mutex_unlock(&udev->cmdr_lock); 2519af1dd7ffSMike Christie 2520af1dd7ffSMike Christie total_freed += end - start; 2521af1dd7ffSMike Christie pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, 2522af1dd7ffSMike Christie total_freed, udev->name); 2523b6df4b79SXiubo Li } 252489ec9cfdSMike Christie mutex_unlock(&root_udev_mutex); 252589ec9cfdSMike Christie 252680eb8761SMike Christie if (atomic_read(&global_db_count) > tcmu_global_max_blocks) 2527af1dd7ffSMike Christie schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); 2528b6df4b79SXiubo Li } 2529b6df4b79SXiubo Li 2530488ebe4cSMike Christie static void check_timedout_devices(void) 2531488ebe4cSMike Christie { 2532488ebe4cSMike Christie struct tcmu_dev *udev, *tmp_dev; 2533488ebe4cSMike Christie LIST_HEAD(devs); 2534488ebe4cSMike Christie 2535488ebe4cSMike Christie spin_lock_bh(&timed_out_udevs_lock); 2536488ebe4cSMike Christie list_splice_init(&timed_out_udevs, &devs); 2537488ebe4cSMike Christie 2538488ebe4cSMike Christie list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) { 2539488ebe4cSMike Christie list_del_init(&udev->timedout_entry); 2540488ebe4cSMike Christie spin_unlock_bh(&timed_out_udevs_lock); 2541488ebe4cSMike Christie 25426fddcb77SMike Christie mutex_lock(&udev->cmdr_lock); 2543488ebe4cSMike Christie idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL); 25446fddcb77SMike Christie mutex_unlock(&udev->cmdr_lock); 2545488ebe4cSMike Christie 2546488ebe4cSMike Christie spin_lock_bh(&timed_out_udevs_lock); 2547488ebe4cSMike Christie } 2548488ebe4cSMike Christie 2549488ebe4cSMike Christie spin_unlock_bh(&timed_out_udevs_lock); 2550488ebe4cSMike Christie } 2551488ebe4cSMike Christie 25529972cebbSMike Christie static void tcmu_unmap_work_fn(struct work_struct *work) 255389ec9cfdSMike Christie { 2554488ebe4cSMike Christie check_timedout_devices(); 255589ec9cfdSMike Christie find_free_blocks(); 255689ec9cfdSMike Christie } 255789ec9cfdSMike Christie 25587c9e7a6fSAndy Grover static int __init tcmu_module_init(void) 25597c9e7a6fSAndy Grover { 2560801fc54dSBryant G. Ly int ret, i, k, len = 0; 25617c9e7a6fSAndy Grover 25627c9e7a6fSAndy Grover BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); 25637c9e7a6fSAndy Grover 2564af1dd7ffSMike Christie INIT_DELAYED_WORK(&tcmu_unmap_work, tcmu_unmap_work_fn); 25659972cebbSMike Christie 25667c9e7a6fSAndy Grover tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache", 25677c9e7a6fSAndy Grover sizeof(struct tcmu_cmd), 25687c9e7a6fSAndy Grover __alignof__(struct tcmu_cmd), 25697c9e7a6fSAndy Grover 0, NULL); 25707c9e7a6fSAndy Grover if (!tcmu_cmd_cache) 25717c9e7a6fSAndy Grover return -ENOMEM; 25727c9e7a6fSAndy Grover 25737c9e7a6fSAndy Grover tcmu_root_device = root_device_register("tcm_user"); 25747c9e7a6fSAndy Grover if (IS_ERR(tcmu_root_device)) { 25757c9e7a6fSAndy Grover ret = PTR_ERR(tcmu_root_device); 25767c9e7a6fSAndy Grover goto out_free_cache; 25777c9e7a6fSAndy Grover } 25787c9e7a6fSAndy Grover 25797c9e7a6fSAndy Grover ret = genl_register_family(&tcmu_genl_family); 25807c9e7a6fSAndy Grover if (ret < 0) { 25817c9e7a6fSAndy Grover goto out_unreg_device; 25827c9e7a6fSAndy Grover } 25837c9e7a6fSAndy Grover 25847d7a7435SNicholas Bellinger for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 25857d7a7435SNicholas Bellinger len += sizeof(struct configfs_attribute *); 25867d7a7435SNicholas Bellinger } 2587801fc54dSBryant G. Ly for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) { 2588801fc54dSBryant G. Ly len += sizeof(struct configfs_attribute *); 2589801fc54dSBryant G. Ly } 2590801fc54dSBryant G. Ly len += sizeof(struct configfs_attribute *); 25917d7a7435SNicholas Bellinger 25927d7a7435SNicholas Bellinger tcmu_attrs = kzalloc(len, GFP_KERNEL); 25937d7a7435SNicholas Bellinger if (!tcmu_attrs) { 25947d7a7435SNicholas Bellinger ret = -ENOMEM; 25957d7a7435SNicholas Bellinger goto out_unreg_genl; 25967d7a7435SNicholas Bellinger } 25977d7a7435SNicholas Bellinger 25987d7a7435SNicholas Bellinger for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { 25997d7a7435SNicholas Bellinger tcmu_attrs[i] = passthrough_attrib_attrs[i]; 26007d7a7435SNicholas Bellinger } 2601801fc54dSBryant G. Ly for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) { 2602801fc54dSBryant G. Ly tcmu_attrs[i] = tcmu_attrib_attrs[k]; 26039a8bb606SBryant G. Ly i++; 2604801fc54dSBryant G. Ly } 26057d7a7435SNicholas Bellinger tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; 26067d7a7435SNicholas Bellinger 26070a06d430SChristoph Hellwig ret = transport_backend_register(&tcmu_ops); 26087c9e7a6fSAndy Grover if (ret) 26097d7a7435SNicholas Bellinger goto out_attrs; 26107c9e7a6fSAndy Grover 26117c9e7a6fSAndy Grover return 0; 26127c9e7a6fSAndy Grover 26137d7a7435SNicholas Bellinger out_attrs: 26147d7a7435SNicholas Bellinger kfree(tcmu_attrs); 26157c9e7a6fSAndy Grover out_unreg_genl: 26167c9e7a6fSAndy Grover genl_unregister_family(&tcmu_genl_family); 26177c9e7a6fSAndy Grover out_unreg_device: 26187c9e7a6fSAndy Grover root_device_unregister(tcmu_root_device); 26197c9e7a6fSAndy Grover out_free_cache: 26207c9e7a6fSAndy Grover kmem_cache_destroy(tcmu_cmd_cache); 26217c9e7a6fSAndy Grover 26227c9e7a6fSAndy Grover return ret; 26237c9e7a6fSAndy Grover } 26247c9e7a6fSAndy Grover 26257c9e7a6fSAndy Grover static void __exit tcmu_module_exit(void) 26267c9e7a6fSAndy Grover { 2627af1dd7ffSMike Christie cancel_delayed_work_sync(&tcmu_unmap_work); 26280a06d430SChristoph Hellwig target_backend_unregister(&tcmu_ops); 26297d7a7435SNicholas Bellinger kfree(tcmu_attrs); 26307c9e7a6fSAndy Grover genl_unregister_family(&tcmu_genl_family); 26317c9e7a6fSAndy Grover root_device_unregister(tcmu_root_device); 26327c9e7a6fSAndy Grover kmem_cache_destroy(tcmu_cmd_cache); 26337c9e7a6fSAndy Grover } 26347c9e7a6fSAndy Grover 26357c9e7a6fSAndy Grover MODULE_DESCRIPTION("TCM USER subsystem plugin"); 26367c9e7a6fSAndy Grover MODULE_AUTHOR("Shaohua Li <shli@kernel.org>"); 26377c9e7a6fSAndy Grover MODULE_AUTHOR("Andy Grover <agrover@redhat.com>"); 26387c9e7a6fSAndy Grover MODULE_LICENSE("GPL"); 26397c9e7a6fSAndy Grover 26407c9e7a6fSAndy Grover module_init(tcmu_module_init); 26417c9e7a6fSAndy Grover module_exit(tcmu_module_exit); 2642