1 /* 2 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 */ 13 14 #ifndef _NVMET_H 15 #define _NVMET_H 16 17 #include <linux/dma-mapping.h> 18 #include <linux/types.h> 19 #include <linux/device.h> 20 #include <linux/kref.h> 21 #include <linux/percpu-refcount.h> 22 #include <linux/list.h> 23 #include <linux/mutex.h> 24 #include <linux/uuid.h> 25 #include <linux/nvme.h> 26 #include <linux/configfs.h> 27 #include <linux/rcupdate.h> 28 #include <linux/blkdev.h> 29 30 #define NVMET_ASYNC_EVENTS 4 31 #define NVMET_ERROR_LOG_SLOTS 128 32 33 34 /* 35 * Supported optional AENs: 36 */ 37 #define NVMET_AEN_CFG_OPTIONAL \ 38 NVME_AEN_CFG_NS_ATTR 39 40 /* 41 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them): 42 */ 43 #define NVMET_AEN_CFG_ALL \ 44 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \ 45 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \ 46 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL) 47 48 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM 49 * The 16 bit shift is to set IATTR bit to 1, which means offending 50 * offset starts in the data section of connect() 51 */ 52 #define IPO_IATTR_CONNECT_DATA(x) \ 53 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x)))) 54 #define IPO_IATTR_CONNECT_SQE(x) \ 55 (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) 56 57 struct nvmet_ns { 58 struct list_head dev_link; 59 struct percpu_ref ref; 60 struct block_device *bdev; 61 struct file *file; 62 u32 nsid; 63 u32 blksize_shift; 64 loff_t size; 65 u8 nguid[16]; 66 uuid_t uuid; 67 68 bool buffered_io; 69 bool enabled; 70 struct nvmet_subsys *subsys; 71 const char *device_path; 72 73 struct config_group device_group; 74 struct config_group group; 75 76 struct completion disable_done; 77 mempool_t *bvec_pool; 78 struct kmem_cache *bvec_cache; 79 }; 80 81 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) 82 { 83 return container_of(to_config_group(item), struct nvmet_ns, group); 84 } 85 86 struct nvmet_cq { 87 u16 qid; 88 u16 size; 89 }; 90 91 struct nvmet_sq { 92 struct nvmet_ctrl *ctrl; 93 struct percpu_ref ref; 94 u16 qid; 95 u16 size; 96 u32 sqhd; 97 struct completion free_done; 98 struct completion confirm_done; 99 }; 100 101 /** 102 * struct nvmet_port - Common structure to keep port 103 * information for the target. 104 * @entry: Entry into referrals or transport list. 105 * @disc_addr: Address information is stored in a format defined 106 * for a discovery log page entry. 107 * @group: ConfigFS group for this element's folder. 108 * @priv: Private data for the transport. 109 */ 110 struct nvmet_port { 111 struct list_head entry; 112 struct nvmf_disc_rsp_page_entry disc_addr; 113 struct config_group group; 114 struct config_group subsys_group; 115 struct list_head subsystems; 116 struct config_group referrals_group; 117 struct list_head referrals; 118 void *priv; 119 bool enabled; 120 }; 121 122 static inline struct nvmet_port *to_nvmet_port(struct config_item *item) 123 { 124 return container_of(to_config_group(item), struct nvmet_port, 125 group); 126 } 127 128 struct nvmet_ctrl { 129 struct nvmet_subsys *subsys; 130 struct nvmet_cq **cqs; 131 struct nvmet_sq **sqs; 132 133 struct mutex lock; 134 u64 cap; 135 u32 cc; 136 u32 csts; 137 138 uuid_t hostid; 139 u16 cntlid; 140 u32 kato; 141 142 u32 aen_enabled; 143 unsigned long aen_masked; 144 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS]; 145 unsigned int nr_async_event_cmds; 146 struct list_head async_events; 147 struct work_struct async_event_work; 148 149 struct list_head subsys_entry; 150 struct kref ref; 151 struct delayed_work ka_work; 152 struct work_struct fatal_err_work; 153 154 const struct nvmet_fabrics_ops *ops; 155 156 __le32 *changed_ns_list; 157 u32 nr_changed_ns; 158 159 char subsysnqn[NVMF_NQN_FIELD_LEN]; 160 char hostnqn[NVMF_NQN_FIELD_LEN]; 161 }; 162 163 struct nvmet_subsys { 164 enum nvme_subsys_type type; 165 166 struct mutex lock; 167 struct kref ref; 168 169 struct list_head namespaces; 170 unsigned int max_nsid; 171 172 struct list_head ctrls; 173 174 struct list_head hosts; 175 bool allow_any_host; 176 177 u16 max_qid; 178 179 u64 ver; 180 u64 serial; 181 char *subsysnqn; 182 183 struct config_group group; 184 185 struct config_group namespaces_group; 186 struct config_group allowed_hosts_group; 187 }; 188 189 static inline struct nvmet_subsys *to_subsys(struct config_item *item) 190 { 191 return container_of(to_config_group(item), struct nvmet_subsys, group); 192 } 193 194 static inline struct nvmet_subsys *namespaces_to_subsys( 195 struct config_item *item) 196 { 197 return container_of(to_config_group(item), struct nvmet_subsys, 198 namespaces_group); 199 } 200 201 struct nvmet_host { 202 struct config_group group; 203 }; 204 205 static inline struct nvmet_host *to_host(struct config_item *item) 206 { 207 return container_of(to_config_group(item), struct nvmet_host, group); 208 } 209 210 static inline char *nvmet_host_name(struct nvmet_host *host) 211 { 212 return config_item_name(&host->group.cg_item); 213 } 214 215 struct nvmet_host_link { 216 struct list_head entry; 217 struct nvmet_host *host; 218 }; 219 220 struct nvmet_subsys_link { 221 struct list_head entry; 222 struct nvmet_subsys *subsys; 223 }; 224 225 struct nvmet_req; 226 struct nvmet_fabrics_ops { 227 struct module *owner; 228 unsigned int type; 229 unsigned int sqe_inline_size; 230 unsigned int msdbd; 231 bool has_keyed_sgls : 1; 232 void (*queue_response)(struct nvmet_req *req); 233 int (*add_port)(struct nvmet_port *port); 234 void (*remove_port)(struct nvmet_port *port); 235 void (*delete_ctrl)(struct nvmet_ctrl *ctrl); 236 void (*disc_traddr)(struct nvmet_req *req, 237 struct nvmet_port *port, char *traddr); 238 }; 239 240 #define NVMET_MAX_INLINE_BIOVEC 8 241 242 struct nvmet_req { 243 struct nvme_command *cmd; 244 struct nvme_completion *rsp; 245 struct nvmet_sq *sq; 246 struct nvmet_cq *cq; 247 struct nvmet_ns *ns; 248 struct scatterlist *sg; 249 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC]; 250 union { 251 struct { 252 struct bio inline_bio; 253 } b; 254 struct { 255 bool mpool_alloc; 256 struct kiocb iocb; 257 struct bio_vec *bvec; 258 struct work_struct work; 259 } f; 260 }; 261 int sg_cnt; 262 /* data length as parsed from the command: */ 263 size_t data_len; 264 /* data length as parsed from the SGL descriptor: */ 265 size_t transfer_len; 266 267 struct nvmet_port *port; 268 269 void (*execute)(struct nvmet_req *req); 270 const struct nvmet_fabrics_ops *ops; 271 }; 272 273 extern struct workqueue_struct *buffered_io_wq; 274 275 static inline void nvmet_set_status(struct nvmet_req *req, u16 status) 276 { 277 req->rsp->status = cpu_to_le16(status << 1); 278 } 279 280 static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 281 { 282 req->rsp->result.u32 = cpu_to_le32(result); 283 } 284 285 /* 286 * NVMe command writes actually are DMA reads for us on the target side. 287 */ 288 static inline enum dma_data_direction 289 nvmet_data_dir(struct nvmet_req *req) 290 { 291 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 292 } 293 294 struct nvmet_async_event { 295 struct list_head entry; 296 u8 event_type; 297 u8 event_info; 298 u8 log_page; 299 }; 300 301 u16 nvmet_parse_connect_cmd(struct nvmet_req *req); 302 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); 303 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); 304 u16 nvmet_parse_admin_cmd(struct nvmet_req *req); 305 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); 306 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); 307 308 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 309 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); 310 void nvmet_req_uninit(struct nvmet_req *req); 311 void nvmet_req_execute(struct nvmet_req *req); 312 void nvmet_req_complete(struct nvmet_req *req, u16 status); 313 314 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, 315 u16 size); 316 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, 317 u16 size); 318 void nvmet_sq_destroy(struct nvmet_sq *sq); 319 int nvmet_sq_init(struct nvmet_sq *sq); 320 321 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); 322 323 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); 324 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, 325 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); 326 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid, 327 struct nvmet_req *req, struct nvmet_ctrl **ret); 328 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); 329 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd); 330 331 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, 332 enum nvme_subsys_type type); 333 void nvmet_subsys_put(struct nvmet_subsys *subsys); 334 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); 335 336 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); 337 void nvmet_put_namespace(struct nvmet_ns *ns); 338 int nvmet_ns_enable(struct nvmet_ns *ns); 339 void nvmet_ns_disable(struct nvmet_ns *ns); 340 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid); 341 void nvmet_ns_free(struct nvmet_ns *ns); 342 343 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); 344 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); 345 346 int nvmet_enable_port(struct nvmet_port *port); 347 void nvmet_disable_port(struct nvmet_port *port); 348 349 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port); 350 void nvmet_referral_disable(struct nvmet_port *port); 351 352 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, 353 size_t len); 354 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, 355 size_t len); 356 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); 357 358 u32 nvmet_get_log_page_len(struct nvme_command *cmd); 359 360 #define NVMET_QUEUE_SIZE 1024 361 #define NVMET_NR_QUEUES 128 362 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE 363 #define NVMET_KAS 10 364 #define NVMET_DISC_KATO 120 365 366 int __init nvmet_init_configfs(void); 367 void __exit nvmet_exit_configfs(void); 368 369 int __init nvmet_init_discovery(void); 370 void nvmet_exit_discovery(void); 371 372 extern struct nvmet_subsys *nvmet_disc_subsys; 373 extern u64 nvmet_genctr; 374 extern struct rw_semaphore nvmet_config_sem; 375 376 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, 377 const char *hostnqn); 378 379 int nvmet_bdev_ns_enable(struct nvmet_ns *ns); 380 int nvmet_file_ns_enable(struct nvmet_ns *ns); 381 void nvmet_bdev_ns_disable(struct nvmet_ns *ns); 382 void nvmet_file_ns_disable(struct nvmet_ns *ns); 383 384 static inline u32 nvmet_rw_len(struct nvmet_req *req) 385 { 386 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << 387 req->ns->blksize_shift; 388 } 389 #endif /* _NVMET_H */ 390