1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2011-2014, Intel Corporation. 4 */ 5 6 #ifndef _NVME_H 7 #define _NVME_H 8 9 #include <linux/nvme.h> 10 #include <linux/cdev.h> 11 #include <linux/pci.h> 12 #include <linux/kref.h> 13 #include <linux/blk-mq.h> 14 #include <linux/lightnvm.h> 15 #include <linux/sed-opal.h> 16 #include <linux/fault-inject.h> 17 #include <linux/rcupdate.h> 18 19 extern unsigned int nvme_io_timeout; 20 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 21 22 extern unsigned int admin_timeout; 23 #define ADMIN_TIMEOUT (admin_timeout * HZ) 24 25 #define NVME_DEFAULT_KATO 5 26 #define NVME_KATO_GRACE 10 27 28 extern struct workqueue_struct *nvme_wq; 29 extern struct workqueue_struct *nvme_reset_wq; 30 extern struct workqueue_struct *nvme_delete_wq; 31 32 enum { 33 NVME_NS_LBA = 0, 34 NVME_NS_LIGHTNVM = 1, 35 }; 36 37 /* 38 * List of workarounds for devices that required behavior not specified in 39 * the standard. 40 */ 41 enum nvme_quirks { 42 /* 43 * Prefers I/O aligned to a stripe size specified in a vendor 44 * specific Identify field. 45 */ 46 NVME_QUIRK_STRIPE_SIZE = (1 << 0), 47 48 /* 49 * The controller doesn't handle Identify value others than 0 or 1 50 * correctly. 51 */ 52 NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 53 54 /* 55 * The controller deterministically returns O's on reads to 56 * logical blocks that deallocate was called on. 57 */ 58 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 59 60 /* 61 * The controller needs a delay before starts checking the device 62 * readiness, which is done by reading the NVME_CSTS_RDY bit. 63 */ 64 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 65 66 /* 67 * APST should not be used. 68 */ 69 NVME_QUIRK_NO_APST = (1 << 4), 70 71 /* 72 * The deepest sleep state should not be used. 73 */ 74 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 75 76 /* 77 * Supports the LighNVM command set if indicated in vs[1]. 78 */ 79 NVME_QUIRK_LIGHTNVM = (1 << 6), 80 81 /* 82 * Set MEDIUM priority on SQ creation 83 */ 84 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 85 86 /* 87 * Ignore device provided subnqn. 88 */ 89 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), 90 91 /* 92 * Broken Write Zeroes. 93 */ 94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 95 96 /* 97 * Force simple suspend/resume path. 98 */ 99 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), 100 }; 101 102 /* 103 * Common request structure for NVMe passthrough. All drivers must have 104 * this structure as the first member of their request-private data. 105 */ 106 struct nvme_request { 107 struct nvme_command *cmd; 108 union nvme_result result; 109 u8 retries; 110 u8 flags; 111 u16 status; 112 struct nvme_ctrl *ctrl; 113 }; 114 115 /* 116 * Mark a bio as coming in through the mpath node. 117 */ 118 #define REQ_NVME_MPATH REQ_DRV 119 120 enum { 121 NVME_REQ_CANCELLED = (1 << 0), 122 NVME_REQ_USERCMD = (1 << 1), 123 }; 124 125 static inline struct nvme_request *nvme_req(struct request *req) 126 { 127 return blk_mq_rq_to_pdu(req); 128 } 129 130 static inline u16 nvme_req_qid(struct request *req) 131 { 132 if (!req->rq_disk) 133 return 0; 134 return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1; 135 } 136 137 /* The below value is the specific amount of delay needed before checking 138 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 139 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 140 * found empirically. 141 */ 142 #define NVME_QUIRK_DELAY_AMOUNT 2300 143 144 enum nvme_ctrl_state { 145 NVME_CTRL_NEW, 146 NVME_CTRL_LIVE, 147 NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 148 NVME_CTRL_RESETTING, 149 NVME_CTRL_CONNECTING, 150 NVME_CTRL_DELETING, 151 NVME_CTRL_DEAD, 152 }; 153 154 struct nvme_fault_inject { 155 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 156 struct fault_attr attr; 157 struct dentry *parent; 158 bool dont_retry; /* DNR, do not retry */ 159 u16 status; /* status code */ 160 #endif 161 }; 162 163 struct nvme_ctrl { 164 bool comp_seen; 165 enum nvme_ctrl_state state; 166 bool identified; 167 spinlock_t lock; 168 struct mutex scan_lock; 169 const struct nvme_ctrl_ops *ops; 170 struct request_queue *admin_q; 171 struct request_queue *connect_q; 172 struct device *dev; 173 int instance; 174 int numa_node; 175 struct blk_mq_tag_set *tagset; 176 struct blk_mq_tag_set *admin_tagset; 177 struct list_head namespaces; 178 struct rw_semaphore namespaces_rwsem; 179 struct device ctrl_device; 180 struct device *device; /* char device */ 181 struct cdev cdev; 182 struct work_struct reset_work; 183 struct work_struct delete_work; 184 185 struct nvme_subsystem *subsys; 186 struct list_head subsys_entry; 187 188 struct opal_dev *opal_dev; 189 190 char name[12]; 191 u16 cntlid; 192 193 u32 ctrl_config; 194 u16 mtfa; 195 u32 queue_count; 196 197 u64 cap; 198 u32 page_size; 199 u32 max_hw_sectors; 200 u32 max_segments; 201 u16 crdt[3]; 202 u16 oncs; 203 u16 oacs; 204 u16 nssa; 205 u16 nr_streams; 206 u32 max_namespaces; 207 atomic_t abort_limit; 208 u8 vwc; 209 u32 vs; 210 u32 sgls; 211 u16 kas; 212 u8 npss; 213 u8 apsta; 214 u32 oaes; 215 u32 aen_result; 216 u32 ctratt; 217 unsigned int shutdown_timeout; 218 unsigned int kato; 219 bool subsystem; 220 unsigned long quirks; 221 struct nvme_id_power_state psd[32]; 222 struct nvme_effects_log *effects; 223 struct work_struct scan_work; 224 struct work_struct async_event_work; 225 struct delayed_work ka_work; 226 struct nvme_command ka_cmd; 227 struct work_struct fw_act_work; 228 unsigned long events; 229 230 #ifdef CONFIG_NVME_MULTIPATH 231 /* asymmetric namespace access: */ 232 u8 anacap; 233 u8 anatt; 234 u32 anagrpmax; 235 u32 nanagrpid; 236 struct mutex ana_lock; 237 struct nvme_ana_rsp_hdr *ana_log_buf; 238 size_t ana_log_size; 239 struct timer_list anatt_timer; 240 struct work_struct ana_work; 241 #endif 242 243 /* Power saving configuration */ 244 u64 ps_max_latency_us; 245 bool apst_enabled; 246 247 /* PCIe only: */ 248 u32 hmpre; 249 u32 hmmin; 250 u32 hmminds; 251 u16 hmmaxd; 252 253 /* Fabrics only */ 254 u16 sqsize; 255 u32 ioccsz; 256 u32 iorcsz; 257 u16 icdoff; 258 u16 maxcmd; 259 int nr_reconnects; 260 struct nvmf_ctrl_options *opts; 261 262 struct page *discard_page; 263 unsigned long discard_page_busy; 264 265 struct nvme_fault_inject fault_inject; 266 }; 267 268 enum nvme_iopolicy { 269 NVME_IOPOLICY_NUMA, 270 NVME_IOPOLICY_RR, 271 }; 272 273 struct nvme_subsystem { 274 int instance; 275 struct device dev; 276 /* 277 * Because we unregister the device on the last put we need 278 * a separate refcount. 279 */ 280 struct kref ref; 281 struct list_head entry; 282 struct mutex lock; 283 struct list_head ctrls; 284 struct list_head nsheads; 285 char subnqn[NVMF_NQN_SIZE]; 286 char serial[20]; 287 char model[40]; 288 char firmware_rev[8]; 289 u8 cmic; 290 u16 vendor_id; 291 u16 awupf; /* 0's based awupf value. */ 292 struct ida ns_ida; 293 #ifdef CONFIG_NVME_MULTIPATH 294 enum nvme_iopolicy iopolicy; 295 #endif 296 }; 297 298 /* 299 * Container structure for uniqueue namespace identifiers. 300 */ 301 struct nvme_ns_ids { 302 u8 eui64[8]; 303 u8 nguid[16]; 304 uuid_t uuid; 305 }; 306 307 /* 308 * Anchor structure for namespaces. There is one for each namespace in a 309 * NVMe subsystem that any of our controllers can see, and the namespace 310 * structure for each controller is chained of it. For private namespaces 311 * there is a 1:1 relation to our namespace structures, that is ->list 312 * only ever has a single entry for private namespaces. 313 */ 314 struct nvme_ns_head { 315 struct list_head list; 316 struct srcu_struct srcu; 317 struct nvme_subsystem *subsys; 318 unsigned ns_id; 319 struct nvme_ns_ids ids; 320 struct list_head entry; 321 struct kref ref; 322 int instance; 323 #ifdef CONFIG_NVME_MULTIPATH 324 struct gendisk *disk; 325 struct bio_list requeue_list; 326 spinlock_t requeue_lock; 327 struct work_struct requeue_work; 328 struct mutex lock; 329 struct nvme_ns __rcu *current_path[]; 330 #endif 331 }; 332 333 struct nvme_ns { 334 struct list_head list; 335 336 struct nvme_ctrl *ctrl; 337 struct request_queue *queue; 338 struct gendisk *disk; 339 #ifdef CONFIG_NVME_MULTIPATH 340 enum nvme_ana_state ana_state; 341 u32 ana_grpid; 342 #endif 343 struct list_head siblings; 344 struct nvm_dev *ndev; 345 struct kref kref; 346 struct nvme_ns_head *head; 347 348 int lba_shift; 349 u16 ms; 350 u16 sgs; 351 u32 sws; 352 bool ext; 353 u8 pi_type; 354 unsigned long flags; 355 #define NVME_NS_REMOVING 0 356 #define NVME_NS_DEAD 1 357 #define NVME_NS_ANA_PENDING 2 358 u16 noiob; 359 360 struct nvme_fault_inject fault_inject; 361 362 }; 363 364 struct nvme_ctrl_ops { 365 const char *name; 366 struct module *module; 367 unsigned int flags; 368 #define NVME_F_FABRICS (1 << 0) 369 #define NVME_F_METADATA_SUPPORTED (1 << 1) 370 #define NVME_F_PCI_P2PDMA (1 << 2) 371 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 372 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 373 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 374 void (*free_ctrl)(struct nvme_ctrl *ctrl); 375 void (*submit_async_event)(struct nvme_ctrl *ctrl); 376 void (*delete_ctrl)(struct nvme_ctrl *ctrl); 377 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 378 }; 379 380 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 381 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 382 const char *dev_name); 383 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); 384 void nvme_should_fail(struct request *req); 385 #else 386 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 387 const char *dev_name) 388 { 389 } 390 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) 391 { 392 } 393 static inline void nvme_should_fail(struct request *req) {} 394 #endif 395 396 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 397 { 398 if (!ctrl->subsystem) 399 return -ENOTTY; 400 return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 401 } 402 403 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) 404 { 405 return (sector >> (ns->lba_shift - 9)); 406 } 407 408 static inline void nvme_end_request(struct request *req, __le16 status, 409 union nvme_result result) 410 { 411 struct nvme_request *rq = nvme_req(req); 412 413 rq->status = le16_to_cpu(status) >> 1; 414 rq->result = result; 415 /* inject error when permitted by fault injection framework */ 416 nvme_should_fail(req); 417 blk_mq_complete_request(req); 418 } 419 420 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 421 { 422 get_device(ctrl->device); 423 } 424 425 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 426 { 427 put_device(ctrl->device); 428 } 429 430 void nvme_complete_rq(struct request *req); 431 bool nvme_cancel_request(struct request *req, void *data, bool reserved); 432 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 433 enum nvme_ctrl_state new_state); 434 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 435 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 436 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 437 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 438 const struct nvme_ctrl_ops *ops, unsigned long quirks); 439 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 440 void nvme_start_ctrl(struct nvme_ctrl *ctrl); 441 void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 442 void nvme_put_ctrl(struct nvme_ctrl *ctrl); 443 int nvme_init_identify(struct nvme_ctrl *ctrl); 444 445 void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 446 447 int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 448 bool send); 449 450 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 451 volatile union nvme_result *res); 452 453 void nvme_stop_queues(struct nvme_ctrl *ctrl); 454 void nvme_start_queues(struct nvme_ctrl *ctrl); 455 void nvme_kill_queues(struct nvme_ctrl *ctrl); 456 void nvme_sync_queues(struct nvme_ctrl *ctrl); 457 void nvme_unfreeze(struct nvme_ctrl *ctrl); 458 void nvme_wait_freeze(struct nvme_ctrl *ctrl); 459 void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 460 void nvme_start_freeze(struct nvme_ctrl *ctrl); 461 462 #define NVME_QID_ANY -1 463 struct request *nvme_alloc_request(struct request_queue *q, 464 struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); 465 void nvme_cleanup_cmd(struct request *req); 466 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 467 struct nvme_command *cmd); 468 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 469 void *buf, unsigned bufflen); 470 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 471 union nvme_result *result, void *buffer, unsigned bufflen, 472 unsigned timeout, int qid, int at_head, 473 blk_mq_req_flags_t flags, bool poll); 474 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 475 unsigned int dword11, void *buffer, size_t buflen, 476 u32 *result); 477 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 478 unsigned int dword11, void *buffer, size_t buflen, 479 u32 *result); 480 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 481 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 482 int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 483 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 484 int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 485 486 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, 487 void *log, size_t size, u64 offset); 488 489 extern const struct attribute_group *nvme_ns_id_attr_groups[]; 490 extern const struct block_device_operations nvme_ns_head_ops; 491 492 #ifdef CONFIG_NVME_MULTIPATH 493 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 494 { 495 return ctrl->ana_log_buf != NULL; 496 } 497 498 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); 499 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); 500 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); 501 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 502 struct nvme_ctrl *ctrl, int *flags); 503 void nvme_failover_req(struct request *req); 504 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 505 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 506 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); 507 void nvme_mpath_remove_disk(struct nvme_ns_head *head); 508 int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 509 void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 510 void nvme_mpath_stop(struct nvme_ctrl *ctrl); 511 bool nvme_mpath_clear_current_path(struct nvme_ns *ns); 512 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); 513 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 514 515 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 516 { 517 struct nvme_ns_head *head = ns->head; 518 519 if (head->disk && list_empty(&head->list)) 520 kblockd_schedule_work(&head->requeue_work); 521 } 522 523 extern struct device_attribute dev_attr_ana_grpid; 524 extern struct device_attribute dev_attr_ana_state; 525 extern struct device_attribute subsys_attr_iopolicy; 526 527 #else 528 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 529 { 530 return false; 531 } 532 /* 533 * Without the multipath code enabled, multiple controller per subsystems are 534 * visible as devices and thus we cannot use the subsystem instance. 535 */ 536 static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 537 struct nvme_ctrl *ctrl, int *flags) 538 { 539 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); 540 } 541 542 static inline void nvme_failover_req(struct request *req) 543 { 544 } 545 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 546 { 547 } 548 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 549 struct nvme_ns_head *head) 550 { 551 return 0; 552 } 553 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, 554 struct nvme_id_ns *id) 555 { 556 } 557 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 558 { 559 } 560 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) 561 { 562 return false; 563 } 564 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) 565 { 566 } 567 static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 568 { 569 } 570 static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, 571 struct nvme_id_ctrl *id) 572 { 573 if (ctrl->subsys->cmic & (1 << 3)) 574 dev_warn(ctrl->device, 575 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); 576 return 0; 577 } 578 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 579 { 580 } 581 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 582 { 583 } 584 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) 585 { 586 } 587 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) 588 { 589 } 590 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 591 { 592 } 593 #endif /* CONFIG_NVME_MULTIPATH */ 594 595 #ifdef CONFIG_NVM 596 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); 597 void nvme_nvm_unregister(struct nvme_ns *ns); 598 extern const struct attribute_group nvme_nvm_attr_group; 599 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); 600 #else 601 static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, 602 int node) 603 { 604 return 0; 605 } 606 607 static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; 608 static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, 609 unsigned long arg) 610 { 611 return -ENOTTY; 612 } 613 #endif /* CONFIG_NVM */ 614 615 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 616 { 617 return dev_to_disk(dev)->private_data; 618 } 619 620 #endif /* _NVME_H */ 621