1 /******************************************************************************* 2 * Filename: target_core_hba.c 3 * 4 * This file contains the TCM HBA Transport related functions. 5 * 6 * (c) Copyright 2003-2013 Datera, Inc. 7 * 8 * Nicholas A. Bellinger <nab@kernel.org> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License as published by 12 * the Free Software Foundation; either version 2 of the License, or 13 * (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to the Free Software 22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 23 * 24 ******************************************************************************/ 25 26 #include <linux/net.h> 27 #include <linux/string.h> 28 #include <linux/timer.h> 29 #include <linux/slab.h> 30 #include <linux/spinlock.h> 31 #include <linux/in.h> 32 #include <linux/module.h> 33 #include <net/sock.h> 34 #include <net/tcp.h> 35 36 #include <target/target_core_base.h> 37 #include <target/target_core_backend.h> 38 #include <target/target_core_fabric.h> 39 40 #include "target_core_internal.h" 41 42 static LIST_HEAD(backend_list); 43 static DEFINE_MUTEX(backend_mutex); 44 45 static u32 hba_id_counter; 46 47 static DEFINE_SPINLOCK(hba_lock); 48 static LIST_HEAD(hba_list); 49 50 51 int transport_backend_register(const struct target_backend_ops *ops) 52 { 53 struct target_backend *tb, *old; 54 55 tb = kzalloc(sizeof(*tb), GFP_KERNEL); 56 if (!tb) 57 return -ENOMEM; 58 tb->ops = ops; 59 60 mutex_lock(&backend_mutex); 61 list_for_each_entry(old, &backend_list, list) { 62 if (!strcmp(old->ops->name, ops->name)) { 63 pr_err("backend %s already registered.\n", ops->name); 64 mutex_unlock(&backend_mutex); 65 kfree(tb); 66 return -EEXIST; 67 } 68 } 69 target_setup_backend_cits(tb); 70 list_add_tail(&tb->list, &backend_list); 71 mutex_unlock(&backend_mutex); 72 73 pr_debug("TCM: Registered subsystem plugin: %s struct module: %p\n", 74 ops->name, ops->owner); 75 return 0; 76 } 77 EXPORT_SYMBOL(transport_backend_register); 78 79 void target_backend_unregister(const struct target_backend_ops *ops) 80 { 81 struct target_backend *tb; 82 83 mutex_lock(&backend_mutex); 84 list_for_each_entry(tb, &backend_list, list) { 85 if (tb->ops == ops) { 86 list_del(&tb->list); 87 mutex_unlock(&backend_mutex); 88 /* 89 * Wait for any outstanding backend driver ->rcu_head 90 * callbacks to complete post TBO->free_device() -> 91 * call_rcu(), before allowing backend driver module 92 * unload of target_backend_ops->owner to proceed. 93 */ 94 rcu_barrier(); 95 kfree(tb); 96 return; 97 } 98 } 99 mutex_unlock(&backend_mutex); 100 } 101 EXPORT_SYMBOL(target_backend_unregister); 102 103 static struct target_backend *core_get_backend(const char *name) 104 { 105 struct target_backend *tb; 106 107 mutex_lock(&backend_mutex); 108 list_for_each_entry(tb, &backend_list, list) { 109 if (!strcmp(tb->ops->name, name)) 110 goto found; 111 } 112 mutex_unlock(&backend_mutex); 113 return NULL; 114 found: 115 if (tb->ops->owner && !try_module_get(tb->ops->owner)) 116 tb = NULL; 117 mutex_unlock(&backend_mutex); 118 return tb; 119 } 120 121 struct se_hba * 122 core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags) 123 { 124 struct se_hba *hba; 125 int ret = 0; 126 127 hba = kzalloc(sizeof(*hba), GFP_KERNEL); 128 if (!hba) { 129 pr_err("Unable to allocate struct se_hba\n"); 130 return ERR_PTR(-ENOMEM); 131 } 132 133 spin_lock_init(&hba->device_lock); 134 mutex_init(&hba->hba_access_mutex); 135 136 hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX); 137 hba->hba_flags |= hba_flags; 138 139 hba->backend = core_get_backend(plugin_name); 140 if (!hba->backend) { 141 ret = -EINVAL; 142 goto out_free_hba; 143 } 144 145 ret = hba->backend->ops->attach_hba(hba, plugin_dep_id); 146 if (ret < 0) 147 goto out_module_put; 148 149 spin_lock(&hba_lock); 150 hba->hba_id = hba_id_counter++; 151 list_add_tail(&hba->hba_node, &hba_list); 152 spin_unlock(&hba_lock); 153 154 pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target" 155 " Core\n", hba->hba_id); 156 157 return hba; 158 159 out_module_put: 160 module_put(hba->backend->ops->owner); 161 hba->backend = NULL; 162 out_free_hba: 163 kfree(hba); 164 return ERR_PTR(ret); 165 } 166 167 int 168 core_delete_hba(struct se_hba *hba) 169 { 170 WARN_ON(hba->dev_count); 171 172 hba->backend->ops->detach_hba(hba); 173 174 spin_lock(&hba_lock); 175 list_del(&hba->hba_node); 176 spin_unlock(&hba_lock); 177 178 pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target" 179 " Core\n", hba->hba_id); 180 181 module_put(hba->backend->ops->owner); 182 183 hba->backend = NULL; 184 kfree(hba); 185 return 0; 186 } 187 188 bool target_sense_desc_format(struct se_device *dev) 189 { 190 return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false; 191 } 192