1 /* 2 * Copyright (c) 2011-2014, Intel Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 */ 13 14 #ifndef _NVME_H 15 #define _NVME_H 16 17 #include <linux/nvme.h> 18 #include <linux/pci.h> 19 #include <linux/kref.h> 20 #include <linux/blk-mq.h> 21 22 enum { 23 /* 24 * Driver internal status code for commands that were cancelled due 25 * to timeouts or controller shutdown. The value is negative so 26 * that it a) doesn't overlap with the unsigned hardware error codes, 27 * and b) can easily be tested for. 28 */ 29 NVME_SC_CANCELLED = -EINTR, 30 }; 31 32 extern unsigned char nvme_io_timeout; 33 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 34 35 extern unsigned char admin_timeout; 36 #define ADMIN_TIMEOUT (admin_timeout * HZ) 37 38 extern unsigned char shutdown_timeout; 39 #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) 40 41 #define NVME_DEFAULT_KATO 5 42 #define NVME_KATO_GRACE 10 43 44 enum { 45 NVME_NS_LBA = 0, 46 NVME_NS_LIGHTNVM = 1, 47 }; 48 49 /* 50 * List of workarounds for devices that required behavior not specified in 51 * the standard. 52 */ 53 enum nvme_quirks { 54 /* 55 * Prefers I/O aligned to a stripe size specified in a vendor 56 * specific Identify field. 57 */ 58 NVME_QUIRK_STRIPE_SIZE = (1 << 0), 59 60 /* 61 * The controller doesn't handle Identify value others than 0 or 1 62 * correctly. 63 */ 64 NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 65 66 /* 67 * The controller deterministically returns O's on reads to discarded 68 * logical blocks. 69 */ 70 NVME_QUIRK_DISCARD_ZEROES = (1 << 2), 71 }; 72 73 enum nvme_ctrl_state { 74 NVME_CTRL_NEW, 75 NVME_CTRL_LIVE, 76 NVME_CTRL_RESETTING, 77 NVME_CTRL_RECONNECTING, 78 NVME_CTRL_DELETING, 79 NVME_CTRL_DEAD, 80 }; 81 82 struct nvme_ctrl { 83 enum nvme_ctrl_state state; 84 spinlock_t lock; 85 const struct nvme_ctrl_ops *ops; 86 struct request_queue *admin_q; 87 struct request_queue *connect_q; 88 struct device *dev; 89 struct kref kref; 90 int instance; 91 struct blk_mq_tag_set *tagset; 92 struct list_head namespaces; 93 struct mutex namespaces_mutex; 94 struct device *device; /* char device */ 95 struct list_head node; 96 struct ida ns_ida; 97 98 char name[12]; 99 char serial[20]; 100 char model[40]; 101 char firmware_rev[8]; 102 u16 cntlid; 103 104 u32 ctrl_config; 105 106 u32 page_size; 107 u32 max_hw_sectors; 108 u32 stripe_size; 109 u16 oncs; 110 u16 vid; 111 atomic_t abort_limit; 112 u8 event_limit; 113 u8 vwc; 114 u32 vs; 115 u32 sgls; 116 u16 kas; 117 unsigned int kato; 118 bool subsystem; 119 unsigned long quirks; 120 struct work_struct scan_work; 121 struct work_struct async_event_work; 122 struct delayed_work ka_work; 123 124 /* Fabrics only */ 125 u16 sqsize; 126 u32 ioccsz; 127 u32 iorcsz; 128 u16 icdoff; 129 u16 maxcmd; 130 struct nvmf_ctrl_options *opts; 131 }; 132 133 /* 134 * An NVM Express namespace is equivalent to a SCSI LUN 135 */ 136 struct nvme_ns { 137 struct list_head list; 138 139 struct nvme_ctrl *ctrl; 140 struct request_queue *queue; 141 struct gendisk *disk; 142 struct kref kref; 143 int instance; 144 145 u8 eui[8]; 146 u8 uuid[16]; 147 148 unsigned ns_id; 149 int lba_shift; 150 u16 ms; 151 bool ext; 152 u8 pi_type; 153 int type; 154 unsigned long flags; 155 156 #define NVME_NS_REMOVING 0 157 #define NVME_NS_DEAD 1 158 159 u64 mode_select_num_blocks; 160 u32 mode_select_block_len; 161 }; 162 163 struct nvme_ctrl_ops { 164 const char *name; 165 struct module *module; 166 bool is_fabrics; 167 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 168 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 169 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 170 int (*reset_ctrl)(struct nvme_ctrl *ctrl); 171 void (*free_ctrl)(struct nvme_ctrl *ctrl); 172 void (*post_scan)(struct nvme_ctrl *ctrl); 173 void (*submit_async_event)(struct nvme_ctrl *ctrl, int aer_idx); 174 int (*delete_ctrl)(struct nvme_ctrl *ctrl); 175 const char *(*get_subsysnqn)(struct nvme_ctrl *ctrl); 176 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 177 }; 178 179 static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) 180 { 181 u32 val = 0; 182 183 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 184 return false; 185 return val & NVME_CSTS_RDY; 186 } 187 188 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 189 { 190 if (!ctrl->subsystem) 191 return -ENOTTY; 192 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 193 } 194 195 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) 196 { 197 return (sector >> (ns->lba_shift - 9)); 198 } 199 200 static inline unsigned nvme_map_len(struct request *rq) 201 { 202 if (req_op(rq) == REQ_OP_DISCARD) 203 return sizeof(struct nvme_dsm_range); 204 else 205 return blk_rq_bytes(rq); 206 } 207 208 static inline void nvme_cleanup_cmd(struct request *req) 209 { 210 if (req_op(req) == REQ_OP_DISCARD) 211 kfree(req->completion_data); 212 } 213 214 static inline int nvme_error_status(u16 status) 215 { 216 switch (status & 0x7ff) { 217 case NVME_SC_SUCCESS: 218 return 0; 219 case NVME_SC_CAP_EXCEEDED: 220 return -ENOSPC; 221 default: 222 return -EIO; 223 } 224 } 225 226 static inline bool nvme_req_needs_retry(struct request *req, u16 status) 227 { 228 return !(status & NVME_SC_DNR || blk_noretry_request(req)) && 229 (jiffies - req->start_time) < req->timeout; 230 } 231 232 void nvme_cancel_request(struct request *req, void *data, bool reserved); 233 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 234 enum nvme_ctrl_state new_state); 235 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 236 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 237 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 238 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 239 const struct nvme_ctrl_ops *ops, unsigned long quirks); 240 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 241 void nvme_put_ctrl(struct nvme_ctrl *ctrl); 242 int nvme_init_identify(struct nvme_ctrl *ctrl); 243 244 void nvme_queue_scan(struct nvme_ctrl *ctrl); 245 void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 246 247 #define NVME_NR_AERS 1 248 void nvme_complete_async_event(struct nvme_ctrl *ctrl, 249 struct nvme_completion *cqe); 250 void nvme_queue_async_events(struct nvme_ctrl *ctrl); 251 252 void nvme_stop_queues(struct nvme_ctrl *ctrl); 253 void nvme_start_queues(struct nvme_ctrl *ctrl); 254 void nvme_kill_queues(struct nvme_ctrl *ctrl); 255 256 #define NVME_QID_ANY -1 257 struct request *nvme_alloc_request(struct request_queue *q, 258 struct nvme_command *cmd, unsigned int flags, int qid); 259 void nvme_requeue_req(struct request *req); 260 int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 261 struct nvme_command *cmd); 262 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 263 void *buf, unsigned bufflen); 264 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 265 struct nvme_completion *cqe, void *buffer, unsigned bufflen, 266 unsigned timeout, int qid, int at_head, int flags); 267 int nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 268 void __user *ubuffer, unsigned bufflen, u32 *result, 269 unsigned timeout); 270 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd, 271 void __user *ubuffer, unsigned bufflen, 272 void __user *meta_buffer, unsigned meta_len, u32 meta_seed, 273 u32 *result, unsigned timeout); 274 int nvme_identify_ctrl(struct nvme_ctrl *dev, struct nvme_id_ctrl **id); 275 int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, 276 struct nvme_id_ns **id); 277 int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log); 278 int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, 279 dma_addr_t dma_addr, u32 *result); 280 int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, 281 dma_addr_t dma_addr, u32 *result); 282 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 283 void nvme_start_keep_alive(struct nvme_ctrl *ctrl); 284 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 285 286 struct sg_io_hdr; 287 288 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr); 289 int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg); 290 int nvme_sg_get_version_num(int __user *ip); 291 292 #ifdef CONFIG_NVM 293 int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); 294 int nvme_nvm_register(struct request_queue *q, char *disk_name); 295 void nvme_nvm_unregister(struct request_queue *q, char *disk_name); 296 #else 297 static inline int nvme_nvm_register(struct request_queue *q, char *disk_name) 298 { 299 return 0; 300 } 301 302 static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; 303 304 static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) 305 { 306 return 0; 307 } 308 #endif /* CONFIG_NVM */ 309 310 int __init nvme_core_init(void); 311 void nvme_core_exit(void); 312 313 #endif /* _NVME_H */ 314