1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2011-2014, Intel Corporation. 4 */ 5 6 #ifndef _NVME_H 7 #define _NVME_H 8 9 #include <linux/nvme.h> 10 #include <linux/cdev.h> 11 #include <linux/pci.h> 12 #include <linux/kref.h> 13 #include <linux/blk-mq.h> 14 #include <linux/sed-opal.h> 15 #include <linux/fault-inject.h> 16 #include <linux/rcupdate.h> 17 #include <linux/wait.h> 18 #include <linux/t10-pi.h> 19 20 #include <trace/events/block.h> 21 22 extern const struct pr_ops nvme_pr_ops; 23 24 extern unsigned int nvme_io_timeout; 25 #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 26 27 extern unsigned int admin_timeout; 28 #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ) 29 30 #define NVME_DEFAULT_KATO 5 31 32 #ifdef CONFIG_ARCH_NO_SG_CHAIN 33 #define NVME_INLINE_SG_CNT 0 34 #define NVME_INLINE_METADATA_SG_CNT 0 35 #else 36 #define NVME_INLINE_SG_CNT 2 37 #define NVME_INLINE_METADATA_SG_CNT 1 38 #endif 39 40 /* 41 * Default to a 4K page size, with the intention to update this 42 * path in the future to accommodate architectures with differing 43 * kernel and IO page sizes. 44 */ 45 #define NVME_CTRL_PAGE_SHIFT 12 46 #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT) 47 48 extern struct workqueue_struct *nvme_wq; 49 extern struct workqueue_struct *nvme_reset_wq; 50 extern struct workqueue_struct *nvme_delete_wq; 51 52 /* 53 * List of workarounds for devices that required behavior not specified in 54 * the standard. 55 */ 56 enum nvme_quirks { 57 /* 58 * Prefers I/O aligned to a stripe size specified in a vendor 59 * specific Identify field. 60 */ 61 NVME_QUIRK_STRIPE_SIZE = (1 << 0), 62 63 /* 64 * The controller doesn't handle Identify value others than 0 or 1 65 * correctly. 66 */ 67 NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 68 69 /* 70 * The controller deterministically returns O's on reads to 71 * logical blocks that deallocate was called on. 72 */ 73 NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 74 75 /* 76 * The controller needs a delay before starts checking the device 77 * readiness, which is done by reading the NVME_CSTS_RDY bit. 78 */ 79 NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 80 81 /* 82 * APST should not be used. 83 */ 84 NVME_QUIRK_NO_APST = (1 << 4), 85 86 /* 87 * The deepest sleep state should not be used. 88 */ 89 NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 90 91 /* 92 * Set MEDIUM priority on SQ creation 93 */ 94 NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 95 96 /* 97 * Ignore device provided subnqn. 98 */ 99 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), 100 101 /* 102 * Broken Write Zeroes. 103 */ 104 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 105 106 /* 107 * Force simple suspend/resume path. 108 */ 109 NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), 110 111 /* 112 * Use only one interrupt vector for all queues 113 */ 114 NVME_QUIRK_SINGLE_VECTOR = (1 << 11), 115 116 /* 117 * Use non-standard 128 bytes SQEs. 118 */ 119 NVME_QUIRK_128_BYTES_SQES = (1 << 12), 120 121 /* 122 * Prevent tag overlap between queues 123 */ 124 NVME_QUIRK_SHARED_TAGS = (1 << 13), 125 126 /* 127 * Don't change the value of the temperature threshold feature 128 */ 129 NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), 130 131 /* 132 * The controller doesn't handle the Identify Namespace 133 * Identification Descriptor list subcommand despite claiming 134 * NVMe 1.3 compliance. 135 */ 136 NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), 137 138 /* 139 * The controller does not properly handle DMA addresses over 140 * 48 bits. 141 */ 142 NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 143 144 /* 145 * The controller requires the command_id value be limited, so skip 146 * encoding the generation sequence number. 147 */ 148 NVME_QUIRK_SKIP_CID_GEN = (1 << 17), 149 150 /* 151 * Reports garbage in the namespace identifiers (eui64, nguid, uuid). 152 */ 153 NVME_QUIRK_BOGUS_NID = (1 << 18), 154 155 /* 156 * No temperature thresholds for channels other than 0 (Composite). 157 */ 158 NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19), 159 160 /* 161 * Disables simple suspend/resume path. 162 */ 163 NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND = (1 << 20), 164 }; 165 166 /* 167 * Common request structure for NVMe passthrough. All drivers must have 168 * this structure as the first member of their request-private data. 169 */ 170 struct nvme_request { 171 struct nvme_command *cmd; 172 union nvme_result result; 173 u8 genctr; 174 u8 retries; 175 u8 flags; 176 u16 status; 177 #ifdef CONFIG_NVME_MULTIPATH 178 unsigned long start_time; 179 #endif 180 struct nvme_ctrl *ctrl; 181 }; 182 183 /* 184 * Mark a bio as coming in through the mpath node. 185 */ 186 #define REQ_NVME_MPATH REQ_DRV 187 188 enum { 189 NVME_REQ_CANCELLED = (1 << 0), 190 NVME_REQ_USERCMD = (1 << 1), 191 NVME_MPATH_IO_STATS = (1 << 2), 192 }; 193 194 static inline struct nvme_request *nvme_req(struct request *req) 195 { 196 return blk_mq_rq_to_pdu(req); 197 } 198 199 static inline u16 nvme_req_qid(struct request *req) 200 { 201 if (!req->q->queuedata) 202 return 0; 203 204 return req->mq_hctx->queue_num + 1; 205 } 206 207 /* The below value is the specific amount of delay needed before checking 208 * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 209 * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 210 * found empirically. 211 */ 212 #define NVME_QUIRK_DELAY_AMOUNT 2300 213 214 /* 215 * enum nvme_ctrl_state: Controller state 216 * 217 * @NVME_CTRL_NEW: New controller just allocated, initial state 218 * @NVME_CTRL_LIVE: Controller is connected and I/O capable 219 * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset) 220 * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the 221 * transport 222 * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion) 223 * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not 224 * disabled/failed immediately. This state comes 225 * after all async event processing took place and 226 * before ns removal and the controller deletion 227 * progress 228 * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during 229 * shutdown or removal. In this case we forcibly 230 * kill all inflight I/O as they have no chance to 231 * complete 232 */ 233 enum nvme_ctrl_state { 234 NVME_CTRL_NEW, 235 NVME_CTRL_LIVE, 236 NVME_CTRL_RESETTING, 237 NVME_CTRL_CONNECTING, 238 NVME_CTRL_DELETING, 239 NVME_CTRL_DELETING_NOIO, 240 NVME_CTRL_DEAD, 241 }; 242 243 struct nvme_fault_inject { 244 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 245 struct fault_attr attr; 246 struct dentry *parent; 247 bool dont_retry; /* DNR, do not retry */ 248 u16 status; /* status code */ 249 #endif 250 }; 251 252 enum nvme_ctrl_flags { 253 NVME_CTRL_FAILFAST_EXPIRED = 0, 254 NVME_CTRL_ADMIN_Q_STOPPED = 1, 255 NVME_CTRL_STARTED_ONCE = 2, 256 NVME_CTRL_STOPPED = 3, 257 NVME_CTRL_SKIP_ID_CNS_CS = 4, 258 NVME_CTRL_DIRTY_CAPABILITY = 5, 259 }; 260 261 struct nvme_ctrl { 262 bool comp_seen; 263 bool identified; 264 enum nvme_ctrl_state state; 265 spinlock_t lock; 266 struct mutex scan_lock; 267 const struct nvme_ctrl_ops *ops; 268 struct request_queue *admin_q; 269 struct request_queue *connect_q; 270 struct request_queue *fabrics_q; 271 struct device *dev; 272 int instance; 273 int numa_node; 274 struct blk_mq_tag_set *tagset; 275 struct blk_mq_tag_set *admin_tagset; 276 struct list_head namespaces; 277 struct rw_semaphore namespaces_rwsem; 278 struct device ctrl_device; 279 struct device *device; /* char device */ 280 #ifdef CONFIG_NVME_HWMON 281 struct device *hwmon_device; 282 #endif 283 struct cdev cdev; 284 struct work_struct reset_work; 285 struct work_struct delete_work; 286 wait_queue_head_t state_wq; 287 288 struct nvme_subsystem *subsys; 289 struct list_head subsys_entry; 290 291 struct opal_dev *opal_dev; 292 293 char name[12]; 294 u16 cntlid; 295 296 u16 mtfa; 297 u32 ctrl_config; 298 u32 queue_count; 299 300 u64 cap; 301 u32 max_hw_sectors; 302 u32 max_segments; 303 u32 max_integrity_segments; 304 u32 max_discard_sectors; 305 u32 max_discard_segments; 306 u32 max_zeroes_sectors; 307 #ifdef CONFIG_BLK_DEV_ZONED 308 u32 max_zone_append; 309 #endif 310 u16 crdt[3]; 311 u16 oncs; 312 u32 dmrsl; 313 u16 oacs; 314 u16 sqsize; 315 u32 max_namespaces; 316 atomic_t abort_limit; 317 u8 vwc; 318 u32 vs; 319 u32 sgls; 320 u16 kas; 321 u8 npss; 322 u8 apsta; 323 u16 wctemp; 324 u16 cctemp; 325 u32 oaes; 326 u32 aen_result; 327 u32 ctratt; 328 unsigned int shutdown_timeout; 329 unsigned int kato; 330 bool subsystem; 331 unsigned long quirks; 332 struct nvme_id_power_state psd[32]; 333 struct nvme_effects_log *effects; 334 struct xarray cels; 335 struct work_struct scan_work; 336 struct work_struct async_event_work; 337 struct delayed_work ka_work; 338 struct delayed_work failfast_work; 339 struct nvme_command ka_cmd; 340 unsigned long ka_last_check_time; 341 struct work_struct fw_act_work; 342 unsigned long events; 343 344 #ifdef CONFIG_NVME_MULTIPATH 345 /* asymmetric namespace access: */ 346 u8 anacap; 347 u8 anatt; 348 u32 anagrpmax; 349 u32 nanagrpid; 350 struct mutex ana_lock; 351 struct nvme_ana_rsp_hdr *ana_log_buf; 352 size_t ana_log_size; 353 struct timer_list anatt_timer; 354 struct work_struct ana_work; 355 #endif 356 357 #ifdef CONFIG_NVME_AUTH 358 struct work_struct dhchap_auth_work; 359 struct mutex dhchap_auth_mutex; 360 struct nvme_dhchap_queue_context *dhchap_ctxs; 361 struct nvme_dhchap_key *host_key; 362 struct nvme_dhchap_key *ctrl_key; 363 u16 transaction; 364 #endif 365 366 /* Power saving configuration */ 367 u64 ps_max_latency_us; 368 bool apst_enabled; 369 370 /* PCIe only: */ 371 u16 hmmaxd; 372 u32 hmpre; 373 u32 hmmin; 374 u32 hmminds; 375 376 /* Fabrics only */ 377 u32 ioccsz; 378 u32 iorcsz; 379 u16 icdoff; 380 u16 maxcmd; 381 int nr_reconnects; 382 unsigned long flags; 383 struct nvmf_ctrl_options *opts; 384 385 struct page *discard_page; 386 unsigned long discard_page_busy; 387 388 struct nvme_fault_inject fault_inject; 389 390 enum nvme_ctrl_type cntrltype; 391 enum nvme_dctype dctype; 392 }; 393 394 static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) 395 { 396 return READ_ONCE(ctrl->state); 397 } 398 399 enum nvme_iopolicy { 400 NVME_IOPOLICY_NUMA, 401 NVME_IOPOLICY_RR, 402 }; 403 404 struct nvme_subsystem { 405 int instance; 406 struct device dev; 407 /* 408 * Because we unregister the device on the last put we need 409 * a separate refcount. 410 */ 411 struct kref ref; 412 struct list_head entry; 413 struct mutex lock; 414 struct list_head ctrls; 415 struct list_head nsheads; 416 char subnqn[NVMF_NQN_SIZE]; 417 char serial[20]; 418 char model[40]; 419 char firmware_rev[8]; 420 u8 cmic; 421 enum nvme_subsys_type subtype; 422 u16 vendor_id; 423 u16 awupf; /* 0's based awupf value. */ 424 struct ida ns_ida; 425 #ifdef CONFIG_NVME_MULTIPATH 426 enum nvme_iopolicy iopolicy; 427 #endif 428 }; 429 430 /* 431 * Container structure for uniqueue namespace identifiers. 432 */ 433 struct nvme_ns_ids { 434 u8 eui64[8]; 435 u8 nguid[16]; 436 uuid_t uuid; 437 u8 csi; 438 }; 439 440 /* 441 * Anchor structure for namespaces. There is one for each namespace in a 442 * NVMe subsystem that any of our controllers can see, and the namespace 443 * structure for each controller is chained of it. For private namespaces 444 * there is a 1:1 relation to our namespace structures, that is ->list 445 * only ever has a single entry for private namespaces. 446 */ 447 struct nvme_ns_head { 448 struct list_head list; 449 struct srcu_struct srcu; 450 struct nvme_subsystem *subsys; 451 unsigned ns_id; 452 struct nvme_ns_ids ids; 453 struct list_head entry; 454 struct kref ref; 455 bool shared; 456 int instance; 457 struct nvme_effects_log *effects; 458 459 struct cdev cdev; 460 struct device cdev_device; 461 462 struct gendisk *disk; 463 #ifdef CONFIG_NVME_MULTIPATH 464 struct bio_list requeue_list; 465 spinlock_t requeue_lock; 466 struct work_struct requeue_work; 467 struct mutex lock; 468 unsigned long flags; 469 #define NVME_NSHEAD_DISK_LIVE 0 470 struct nvme_ns __rcu *current_path[]; 471 #endif 472 }; 473 474 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head) 475 { 476 return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk; 477 } 478 479 enum nvme_ns_features { 480 NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ 481 NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ 482 NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */ 483 }; 484 485 struct nvme_ns { 486 struct list_head list; 487 488 struct nvme_ctrl *ctrl; 489 struct request_queue *queue; 490 struct gendisk *disk; 491 #ifdef CONFIG_NVME_MULTIPATH 492 enum nvme_ana_state ana_state; 493 u32 ana_grpid; 494 #endif 495 struct list_head siblings; 496 struct kref kref; 497 struct nvme_ns_head *head; 498 499 int lba_shift; 500 u16 ms; 501 u16 pi_size; 502 u16 sgs; 503 u32 sws; 504 u8 pi_type; 505 u8 guard_type; 506 #ifdef CONFIG_BLK_DEV_ZONED 507 u64 zsze; 508 #endif 509 unsigned long features; 510 unsigned long flags; 511 #define NVME_NS_REMOVING 0 512 #define NVME_NS_ANA_PENDING 2 513 #define NVME_NS_FORCE_RO 3 514 #define NVME_NS_READY 4 515 516 struct cdev cdev; 517 struct device cdev_device; 518 519 struct nvme_fault_inject fault_inject; 520 521 }; 522 523 /* NVMe ns supports metadata actions by the controller (generate/strip) */ 524 static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 525 { 526 return ns->pi_type && ns->ms == ns->pi_size; 527 } 528 529 struct nvme_ctrl_ops { 530 const char *name; 531 struct module *module; 532 unsigned int flags; 533 #define NVME_F_FABRICS (1 << 0) 534 #define NVME_F_METADATA_SUPPORTED (1 << 1) 535 #define NVME_F_BLOCKING (1 << 2) 536 537 const struct attribute_group **dev_attr_groups; 538 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 539 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 540 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 541 void (*free_ctrl)(struct nvme_ctrl *ctrl); 542 void (*submit_async_event)(struct nvme_ctrl *ctrl); 543 void (*delete_ctrl)(struct nvme_ctrl *ctrl); 544 void (*stop_ctrl)(struct nvme_ctrl *ctrl); 545 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 546 void (*print_device_info)(struct nvme_ctrl *ctrl); 547 bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl); 548 }; 549 550 /* 551 * nvme command_id is constructed as such: 552 * | xxxx | xxxxxxxxxxxx | 553 * gen request tag 554 */ 555 #define nvme_genctr_mask(gen) (gen & 0xf) 556 #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12) 557 #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12) 558 #define nvme_tag_from_cid(cid) (cid & 0xfff) 559 560 static inline u16 nvme_cid(struct request *rq) 561 { 562 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; 563 } 564 565 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags, 566 u16 command_id) 567 { 568 u8 genctr = nvme_genctr_from_cid(command_id); 569 u16 tag = nvme_tag_from_cid(command_id); 570 struct request *rq; 571 572 rq = blk_mq_tag_to_rq(tags, tag); 573 if (unlikely(!rq)) { 574 pr_err("could not locate request for tag %#x\n", 575 tag); 576 return NULL; 577 } 578 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { 579 dev_err(nvme_req(rq)->ctrl->device, 580 "request %#x genctr mismatch (got %#x expected %#x)\n", 581 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); 582 return NULL; 583 } 584 return rq; 585 } 586 587 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, 588 u16 command_id) 589 { 590 return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); 591 } 592 593 /* 594 * Return the length of the string without the space padding 595 */ 596 static inline int nvme_strlen(char *s, int len) 597 { 598 while (s[len - 1] == ' ') 599 len--; 600 return len; 601 } 602 603 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl) 604 { 605 struct nvme_subsystem *subsys = ctrl->subsys; 606 607 if (ctrl->ops->print_device_info) { 608 ctrl->ops->print_device_info(ctrl); 609 return; 610 } 611 612 dev_err(ctrl->device, 613 "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id, 614 nvme_strlen(subsys->model, sizeof(subsys->model)), 615 subsys->model, nvme_strlen(subsys->firmware_rev, 616 sizeof(subsys->firmware_rev)), 617 subsys->firmware_rev); 618 } 619 620 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 621 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 622 const char *dev_name); 623 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); 624 void nvme_should_fail(struct request *req); 625 #else 626 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 627 const char *dev_name) 628 { 629 } 630 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) 631 { 632 } 633 static inline void nvme_should_fail(struct request *req) {} 634 #endif 635 636 bool nvme_wait_reset(struct nvme_ctrl *ctrl); 637 int nvme_try_sched_reset(struct nvme_ctrl *ctrl); 638 639 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 640 { 641 int ret; 642 643 if (!ctrl->subsystem) 644 return -ENOTTY; 645 if (!nvme_wait_reset(ctrl)) 646 return -EBUSY; 647 648 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 649 if (ret) 650 return ret; 651 652 return nvme_try_sched_reset(ctrl); 653 } 654 655 /* 656 * Convert a 512B sector number to a device logical block number. 657 */ 658 static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) 659 { 660 return sector >> (ns->lba_shift - SECTOR_SHIFT); 661 } 662 663 /* 664 * Convert a device logical block number to a 512B sector number. 665 */ 666 static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) 667 { 668 return lba << (ns->lba_shift - SECTOR_SHIFT); 669 } 670 671 /* 672 * Convert byte length to nvme's 0-based num dwords 673 */ 674 static inline u32 nvme_bytes_to_numd(size_t len) 675 { 676 return (len >> 2) - 1; 677 } 678 679 static inline bool nvme_is_ana_error(u16 status) 680 { 681 switch (status & 0x7ff) { 682 case NVME_SC_ANA_TRANSITION: 683 case NVME_SC_ANA_INACCESSIBLE: 684 case NVME_SC_ANA_PERSISTENT_LOSS: 685 return true; 686 default: 687 return false; 688 } 689 } 690 691 static inline bool nvme_is_path_error(u16 status) 692 { 693 /* check for a status code type of 'path related status' */ 694 return (status & 0x700) == 0x300; 695 } 696 697 /* 698 * Fill in the status and result information from the CQE, and then figure out 699 * if blk-mq will need to use IPI magic to complete the request, and if yes do 700 * so. If not let the caller complete the request without an indirect function 701 * call. 702 */ 703 static inline bool nvme_try_complete_req(struct request *req, __le16 status, 704 union nvme_result result) 705 { 706 struct nvme_request *rq = nvme_req(req); 707 struct nvme_ctrl *ctrl = rq->ctrl; 708 709 if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) 710 rq->genctr++; 711 712 rq->status = le16_to_cpu(status) >> 1; 713 rq->result = result; 714 /* inject error when permitted by fault injection framework */ 715 nvme_should_fail(req); 716 if (unlikely(blk_should_fake_timeout(req->q))) 717 return true; 718 return blk_mq_complete_request_remote(req); 719 } 720 721 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 722 { 723 get_device(ctrl->device); 724 } 725 726 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 727 { 728 put_device(ctrl->device); 729 } 730 731 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) 732 { 733 return !qid && 734 nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH; 735 } 736 737 void nvme_complete_rq(struct request *req); 738 void nvme_complete_batch_req(struct request *req); 739 740 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, 741 void (*fn)(struct request *rq)) 742 { 743 struct request *req; 744 745 rq_list_for_each(&iob->req_list, req) { 746 fn(req); 747 nvme_complete_batch_req(req); 748 } 749 blk_mq_end_request_batch(iob); 750 } 751 752 blk_status_t nvme_host_path_error(struct request *req); 753 bool nvme_cancel_request(struct request *req, void *data); 754 void nvme_cancel_tagset(struct nvme_ctrl *ctrl); 755 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); 756 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 757 enum nvme_ctrl_state new_state); 758 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown); 759 int nvme_enable_ctrl(struct nvme_ctrl *ctrl); 760 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 761 const struct nvme_ctrl_ops *ops, unsigned long quirks); 762 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 763 void nvme_start_ctrl(struct nvme_ctrl *ctrl); 764 void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 765 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended); 766 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 767 const struct blk_mq_ops *ops, unsigned int cmd_size); 768 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl); 769 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 770 const struct blk_mq_ops *ops, unsigned int nr_maps, 771 unsigned int cmd_size); 772 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl); 773 774 void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 775 776 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 777 volatile union nvme_result *res); 778 779 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl); 780 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl); 781 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl); 782 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl); 783 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl); 784 void nvme_sync_queues(struct nvme_ctrl *ctrl); 785 void nvme_sync_io_queues(struct nvme_ctrl *ctrl); 786 void nvme_unfreeze(struct nvme_ctrl *ctrl); 787 void nvme_wait_freeze(struct nvme_ctrl *ctrl); 788 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 789 void nvme_start_freeze(struct nvme_ctrl *ctrl); 790 791 static inline enum req_op nvme_req_op(struct nvme_command *cmd) 792 { 793 return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 794 } 795 796 #define NVME_QID_ANY -1 797 void nvme_init_request(struct request *req, struct nvme_command *cmd); 798 void nvme_cleanup_cmd(struct request *req); 799 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); 800 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 801 struct request *req); 802 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 803 bool queue_live); 804 805 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 806 bool queue_live) 807 { 808 if (likely(ctrl->state == NVME_CTRL_LIVE)) 809 return true; 810 if (ctrl->ops->flags & NVME_F_FABRICS && 811 ctrl->state == NVME_CTRL_DELETING) 812 return queue_live; 813 return __nvme_check_ready(ctrl, rq, queue_live); 814 } 815 816 /* 817 * NSID shall be unique for all shared namespaces, or if at least one of the 818 * following conditions is met: 819 * 1. Namespace Management is supported by the controller 820 * 2. ANA is supported by the controller 821 * 3. NVM Set are supported by the controller 822 * 823 * In other case, private namespace are not required to report a unique NSID. 824 */ 825 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, 826 struct nvme_ns_head *head) 827 { 828 return head->shared || 829 (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || 830 (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || 831 (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); 832 } 833 834 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 835 void *buf, unsigned bufflen); 836 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 837 union nvme_result *result, void *buffer, unsigned bufflen, 838 int qid, int at_head, 839 blk_mq_req_flags_t flags); 840 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 841 unsigned int dword11, void *buffer, size_t buflen, 842 u32 *result); 843 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 844 unsigned int dword11, void *buffer, size_t buflen, 845 u32 *result); 846 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 847 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 848 int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 849 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 850 int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 851 void nvme_queue_scan(struct nvme_ctrl *ctrl); 852 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 853 void *log, size_t size, u64 offset); 854 bool nvme_tryget_ns_head(struct nvme_ns_head *head); 855 void nvme_put_ns_head(struct nvme_ns_head *head); 856 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 857 const struct file_operations *fops, struct module *owner); 858 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device); 859 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode, 860 unsigned int cmd, unsigned long arg); 861 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 862 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode, 863 unsigned int cmd, unsigned long arg); 864 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, 865 unsigned long arg); 866 long nvme_dev_ioctl(struct file *file, unsigned int cmd, 867 unsigned long arg); 868 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, 869 struct io_comp_batch *iob, unsigned int poll_flags); 870 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, 871 unsigned int issue_flags); 872 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, 873 unsigned int issue_flags); 874 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); 875 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); 876 877 extern const struct attribute_group *nvme_ns_id_attr_groups[]; 878 extern const struct pr_ops nvme_pr_ops; 879 extern const struct block_device_operations nvme_ns_head_ops; 880 extern const struct attribute_group nvme_dev_attrs_group; 881 extern const struct attribute_group *nvme_subsys_attrs_groups[]; 882 extern const struct attribute_group *nvme_dev_attr_groups[]; 883 extern const struct block_device_operations nvme_bdev_ops; 884 885 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); 886 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 887 #ifdef CONFIG_NVME_MULTIPATH 888 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 889 { 890 return ctrl->ana_log_buf != NULL; 891 } 892 893 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); 894 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); 895 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); 896 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys); 897 void nvme_failover_req(struct request *req); 898 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 899 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 900 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid); 901 void nvme_mpath_remove_disk(struct nvme_ns_head *head); 902 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 903 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); 904 void nvme_mpath_update(struct nvme_ctrl *ctrl); 905 void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 906 void nvme_mpath_stop(struct nvme_ctrl *ctrl); 907 bool nvme_mpath_clear_current_path(struct nvme_ns *ns); 908 void nvme_mpath_revalidate_paths(struct nvme_ns *ns); 909 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); 910 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); 911 void nvme_mpath_start_request(struct request *rq); 912 void nvme_mpath_end_request(struct request *rq); 913 914 static inline void nvme_trace_bio_complete(struct request *req) 915 { 916 struct nvme_ns *ns = req->q->queuedata; 917 918 if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) 919 trace_block_bio_complete(ns->head->disk->queue, req->bio); 920 } 921 922 extern bool multipath; 923 extern struct device_attribute dev_attr_ana_grpid; 924 extern struct device_attribute dev_attr_ana_state; 925 extern struct device_attribute subsys_attr_iopolicy; 926 927 #else 928 #define multipath false 929 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 930 { 931 return false; 932 } 933 static inline void nvme_failover_req(struct request *req) 934 { 935 } 936 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 937 { 938 } 939 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 940 struct nvme_ns_head *head) 941 { 942 return 0; 943 } 944 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) 945 { 946 } 947 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 948 { 949 } 950 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) 951 { 952 return false; 953 } 954 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) 955 { 956 } 957 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) 958 { 959 } 960 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) 961 { 962 } 963 static inline void nvme_trace_bio_complete(struct request *req) 964 { 965 } 966 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) 967 { 968 } 969 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, 970 struct nvme_id_ctrl *id) 971 { 972 if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) 973 dev_warn(ctrl->device, 974 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); 975 return 0; 976 } 977 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) 978 { 979 } 980 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 981 { 982 } 983 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 984 { 985 } 986 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) 987 { 988 } 989 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) 990 { 991 } 992 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 993 { 994 } 995 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys) 996 { 997 } 998 static inline void nvme_mpath_start_request(struct request *rq) 999 { 1000 } 1001 static inline void nvme_mpath_end_request(struct request *rq) 1002 { 1003 } 1004 #endif /* CONFIG_NVME_MULTIPATH */ 1005 1006 int nvme_revalidate_zones(struct nvme_ns *ns); 1007 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, 1008 unsigned int nr_zones, report_zones_cb cb, void *data); 1009 #ifdef CONFIG_BLK_DEV_ZONED 1010 int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); 1011 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, 1012 struct nvme_command *cmnd, 1013 enum nvme_zone_mgmt_action action); 1014 #else 1015 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, 1016 struct request *req, struct nvme_command *cmnd, 1017 enum nvme_zone_mgmt_action action) 1018 { 1019 return BLK_STS_NOTSUPP; 1020 } 1021 1022 static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) 1023 { 1024 dev_warn(ns->ctrl->device, 1025 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); 1026 return -EPROTONOSUPPORT; 1027 } 1028 #endif 1029 1030 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 1031 { 1032 return dev_to_disk(dev)->private_data; 1033 } 1034 1035 #ifdef CONFIG_NVME_HWMON 1036 int nvme_hwmon_init(struct nvme_ctrl *ctrl); 1037 void nvme_hwmon_exit(struct nvme_ctrl *ctrl); 1038 #else 1039 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) 1040 { 1041 return 0; 1042 } 1043 1044 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) 1045 { 1046 } 1047 #endif 1048 1049 static inline void nvme_start_request(struct request *rq) 1050 { 1051 if (rq->cmd_flags & REQ_NVME_MPATH) 1052 nvme_mpath_start_request(rq); 1053 blk_mq_start_request(rq); 1054 } 1055 1056 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) 1057 { 1058 return ctrl->sgls & ((1 << 0) | (1 << 1)); 1059 } 1060 1061 #ifdef CONFIG_NVME_AUTH 1062 int __init nvme_init_auth(void); 1063 void __exit nvme_exit_auth(void); 1064 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl); 1065 void nvme_auth_stop(struct nvme_ctrl *ctrl); 1066 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid); 1067 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid); 1068 void nvme_auth_free(struct nvme_ctrl *ctrl); 1069 #else 1070 static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) 1071 { 1072 return 0; 1073 } 1074 static inline int __init nvme_init_auth(void) 1075 { 1076 return 0; 1077 } 1078 static inline void __exit nvme_exit_auth(void) 1079 { 1080 } 1081 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {}; 1082 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) 1083 { 1084 return -EPROTONOSUPPORT; 1085 } 1086 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) 1087 { 1088 return NVME_SC_AUTH_REQUIRED; 1089 } 1090 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; 1091 #endif 1092 1093 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1094 u8 opcode); 1095 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); 1096 int nvme_execute_rq(struct request *rq, bool at_head); 1097 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1098 struct nvme_command *cmd, int status); 1099 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); 1100 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); 1101 void nvme_put_ns(struct nvme_ns *ns); 1102 1103 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) 1104 { 1105 return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; 1106 } 1107 1108 #ifdef CONFIG_NVME_VERBOSE_ERRORS 1109 const unsigned char *nvme_get_error_status_str(u16 status); 1110 const unsigned char *nvme_get_opcode_str(u8 opcode); 1111 const unsigned char *nvme_get_admin_opcode_str(u8 opcode); 1112 const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode); 1113 #else /* CONFIG_NVME_VERBOSE_ERRORS */ 1114 static inline const unsigned char *nvme_get_error_status_str(u16 status) 1115 { 1116 return "I/O Error"; 1117 } 1118 static inline const unsigned char *nvme_get_opcode_str(u8 opcode) 1119 { 1120 return "I/O Cmd"; 1121 } 1122 static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) 1123 { 1124 return "Admin Cmd"; 1125 } 1126 1127 static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) 1128 { 1129 return "Fabrics Cmd"; 1130 } 1131 #endif /* CONFIG_NVME_VERBOSE_ERRORS */ 1132 1133 static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype) 1134 { 1135 if (opcode == nvme_fabrics_command) 1136 return nvme_get_fabrics_opcode_str(fctype); 1137 return qid ? nvme_get_opcode_str(opcode) : 1138 nvme_get_admin_opcode_str(opcode); 1139 } 1140 #endif /* _NVME_H */ 1141