1bc50ad75SChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */ 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg */ 557dacad5SJay Sternberg 657dacad5SJay Sternberg #ifndef _NVME_H 757dacad5SJay Sternberg #define _NVME_H 857dacad5SJay Sternberg 957dacad5SJay Sternberg #include <linux/nvme.h> 10a6a5149bSChristoph Hellwig #include <linux/cdev.h> 1157dacad5SJay Sternberg #include <linux/pci.h> 1257dacad5SJay Sternberg #include <linux/kref.h> 1357dacad5SJay Sternberg #include <linux/blk-mq.h> 14a98e58e5SScott Bauer #include <linux/sed-opal.h> 15b9e03857SThomas Tai #include <linux/fault-inject.h> 16978628ecSJohannes Thumshirn #include <linux/rcupdate.h> 17c1ac9a4bSKeith Busch #include <linux/wait.h> 184d2ce688SJames Smart #include <linux/t10-pi.h> 1957dacad5SJay Sternberg 2035fe0d12SHannes Reinecke #include <trace/events/block.h> 2135fe0d12SHannes Reinecke 228ae4e447SMarc Olson extern unsigned int nvme_io_timeout; 2357dacad5SJay Sternberg #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 2457dacad5SJay Sternberg 258ae4e447SMarc Olson extern unsigned int admin_timeout; 26dc96f938SChaitanya Kulkarni #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ) 2721d34711SChristoph Hellwig 28038bd4cbSSagi Grimberg #define NVME_DEFAULT_KATO 5 29038bd4cbSSagi Grimberg 3038e18002SIsrael Rukshin #ifdef CONFIG_ARCH_NO_SG_CHAIN 3138e18002SIsrael Rukshin #define NVME_INLINE_SG_CNT 0 32ba7ca2aeSIsrael Rukshin #define NVME_INLINE_METADATA_SG_CNT 0 3338e18002SIsrael Rukshin #else 3438e18002SIsrael Rukshin #define NVME_INLINE_SG_CNT 2 35ba7ca2aeSIsrael Rukshin #define NVME_INLINE_METADATA_SG_CNT 1 3638e18002SIsrael Rukshin #endif 3738e18002SIsrael Rukshin 386c3c05b0SChaitanya Kulkarni /* 396c3c05b0SChaitanya Kulkarni * Default to a 4K page size, with the intention to update this 406c3c05b0SChaitanya Kulkarni * path in the future to accommodate architectures with differing 416c3c05b0SChaitanya Kulkarni * kernel and IO page sizes. 426c3c05b0SChaitanya Kulkarni */ 436c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SHIFT 12 446c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT) 456c3c05b0SChaitanya Kulkarni 469a6327d2SSagi Grimberg extern struct workqueue_struct *nvme_wq; 47b227c59bSRoy Shterman extern struct workqueue_struct *nvme_reset_wq; 48b227c59bSRoy Shterman extern struct workqueue_struct *nvme_delete_wq; 499a6327d2SSagi Grimberg 5057dacad5SJay Sternberg /* 51106198edSChristoph Hellwig * List of workarounds for devices that required behavior not specified in 52106198edSChristoph Hellwig * the standard. 5357dacad5SJay Sternberg */ 54106198edSChristoph Hellwig enum nvme_quirks { 55106198edSChristoph Hellwig /* 56106198edSChristoph Hellwig * Prefers I/O aligned to a stripe size specified in a vendor 57106198edSChristoph Hellwig * specific Identify field. 58106198edSChristoph Hellwig */ 59106198edSChristoph Hellwig NVME_QUIRK_STRIPE_SIZE = (1 << 0), 60540c801cSKeith Busch 61540c801cSKeith Busch /* 62540c801cSKeith Busch * The controller doesn't handle Identify value others than 0 or 1 63540c801cSKeith Busch * correctly. 64540c801cSKeith Busch */ 65540c801cSKeith Busch NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 6608095e70SKeith Busch 6708095e70SKeith Busch /* 68e850fd16SChristoph Hellwig * The controller deterministically returns O's on reads to 69e850fd16SChristoph Hellwig * logical blocks that deallocate was called on. 7008095e70SKeith Busch */ 71e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 7254adc010SGuilherme G. Piccoli 7354adc010SGuilherme G. Piccoli /* 7454adc010SGuilherme G. Piccoli * The controller needs a delay before starts checking the device 7554adc010SGuilherme G. Piccoli * readiness, which is done by reading the NVME_CSTS_RDY bit. 7654adc010SGuilherme G. Piccoli */ 7754adc010SGuilherme G. Piccoli NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 78c5552fdeSAndy Lutomirski 79c5552fdeSAndy Lutomirski /* 80c5552fdeSAndy Lutomirski * APST should not be used. 81c5552fdeSAndy Lutomirski */ 82c5552fdeSAndy Lutomirski NVME_QUIRK_NO_APST = (1 << 4), 83ff5350a8SAndy Lutomirski 84ff5350a8SAndy Lutomirski /* 85ff5350a8SAndy Lutomirski * The deepest sleep state should not be used. 86ff5350a8SAndy Lutomirski */ 87ff5350a8SAndy Lutomirski NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 88608cc4b1SChristoph Hellwig 89608cc4b1SChristoph Hellwig /* 909abd68efSJens Axboe * Set MEDIUM priority on SQ creation 919abd68efSJens Axboe */ 929abd68efSJens Axboe NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 936299358dSJames Dingwall 946299358dSJames Dingwall /* 956299358dSJames Dingwall * Ignore device provided subnqn. 966299358dSJames Dingwall */ 976299358dSJames Dingwall NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), 987b210e4eSChristoph Hellwig 997b210e4eSChristoph Hellwig /* 1007b210e4eSChristoph Hellwig * Broken Write Zeroes. 1017b210e4eSChristoph Hellwig */ 1027b210e4eSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 103cb32de1bSMario Limonciello 104cb32de1bSMario Limonciello /* 105cb32de1bSMario Limonciello * Force simple suspend/resume path. 106cb32de1bSMario Limonciello */ 107cb32de1bSMario Limonciello NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), 1087ad67ca5SLinus Torvalds 1097ad67ca5SLinus Torvalds /* 11066341331SBenjamin Herrenschmidt * Use only one interrupt vector for all queues 11166341331SBenjamin Herrenschmidt */ 1127ad67ca5SLinus Torvalds NVME_QUIRK_SINGLE_VECTOR = (1 << 11), 11366341331SBenjamin Herrenschmidt 11466341331SBenjamin Herrenschmidt /* 11566341331SBenjamin Herrenschmidt * Use non-standard 128 bytes SQEs. 11666341331SBenjamin Herrenschmidt */ 1177ad67ca5SLinus Torvalds NVME_QUIRK_128_BYTES_SQES = (1 << 12), 118d38e9f04SBenjamin Herrenschmidt 119d38e9f04SBenjamin Herrenschmidt /* 120d38e9f04SBenjamin Herrenschmidt * Prevent tag overlap between queues 121d38e9f04SBenjamin Herrenschmidt */ 1227ad67ca5SLinus Torvalds NVME_QUIRK_SHARED_TAGS = (1 << 13), 1236c6aa2f2SAkinobu Mita 1246c6aa2f2SAkinobu Mita /* 1256c6aa2f2SAkinobu Mita * Don't change the value of the temperature threshold feature 1266c6aa2f2SAkinobu Mita */ 1276c6aa2f2SAkinobu Mita NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), 1285bedd3afSChristoph Hellwig 1295bedd3afSChristoph Hellwig /* 1305bedd3afSChristoph Hellwig * The controller doesn't handle the Identify Namespace 1315bedd3afSChristoph Hellwig * Identification Descriptor list subcommand despite claiming 1325bedd3afSChristoph Hellwig * NVMe 1.3 compliance. 1335bedd3afSChristoph Hellwig */ 1345bedd3afSChristoph Hellwig NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), 1354bdf2603SFilippo Sironi 1364bdf2603SFilippo Sironi /* 1374bdf2603SFilippo Sironi * The controller does not properly handle DMA addresses over 1384bdf2603SFilippo Sironi * 48 bits. 1394bdf2603SFilippo Sironi */ 1404bdf2603SFilippo Sironi NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 141a2941f6aSKeith Busch 142a2941f6aSKeith Busch /* 143a2941f6aSKeith Busch * The controller requires the command_id value be be limited, so skip 144a2941f6aSKeith Busch * encoding the generation sequence number. 145a2941f6aSKeith Busch */ 146a2941f6aSKeith Busch NVME_QUIRK_SKIP_CID_GEN = (1 << 17), 147106198edSChristoph Hellwig }; 148106198edSChristoph Hellwig 149d49187e9SChristoph Hellwig /* 150d49187e9SChristoph Hellwig * Common request structure for NVMe passthrough. All drivers must have 151d49187e9SChristoph Hellwig * this structure as the first member of their request-private data. 152d49187e9SChristoph Hellwig */ 153d49187e9SChristoph Hellwig struct nvme_request { 154d49187e9SChristoph Hellwig struct nvme_command *cmd; 155d49187e9SChristoph Hellwig union nvme_result result; 156e7006de6SSagi Grimberg u8 genctr; 15744e44b29SChristoph Hellwig u8 retries; 15827fa9bc5SChristoph Hellwig u8 flags; 15927fa9bc5SChristoph Hellwig u16 status; 16059e29ce6SSagi Grimberg struct nvme_ctrl *ctrl; 16127fa9bc5SChristoph Hellwig }; 16227fa9bc5SChristoph Hellwig 16332acab31SChristoph Hellwig /* 16432acab31SChristoph Hellwig * Mark a bio as coming in through the mpath node. 16532acab31SChristoph Hellwig */ 16632acab31SChristoph Hellwig #define REQ_NVME_MPATH REQ_DRV 16732acab31SChristoph Hellwig 16827fa9bc5SChristoph Hellwig enum { 16927fa9bc5SChristoph Hellwig NVME_REQ_CANCELLED = (1 << 0), 170bb06ec31SJames Smart NVME_REQ_USERCMD = (1 << 1), 171d49187e9SChristoph Hellwig }; 172d49187e9SChristoph Hellwig 173d49187e9SChristoph Hellwig static inline struct nvme_request *nvme_req(struct request *req) 174d49187e9SChristoph Hellwig { 175d49187e9SChristoph Hellwig return blk_mq_rq_to_pdu(req); 176d49187e9SChristoph Hellwig } 177d49187e9SChristoph Hellwig 1785d87eb94SKeith Busch static inline u16 nvme_req_qid(struct request *req) 1795d87eb94SKeith Busch { 180643c476dSKeith Busch if (!req->q->queuedata) 1815d87eb94SKeith Busch return 0; 18284115d6dSBaolin Wang 18384115d6dSBaolin Wang return req->mq_hctx->queue_num + 1; 1845d87eb94SKeith Busch } 1855d87eb94SKeith Busch 18654adc010SGuilherme G. Piccoli /* The below value is the specific amount of delay needed before checking 18754adc010SGuilherme G. Piccoli * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 18854adc010SGuilherme G. Piccoli * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 18954adc010SGuilherme G. Piccoli * found empirically. 19054adc010SGuilherme G. Piccoli */ 1918c97eeccSJeff Lien #define NVME_QUIRK_DELAY_AMOUNT 2300 19254adc010SGuilherme G. Piccoli 1934212f4e9SSagi Grimberg /* 1944212f4e9SSagi Grimberg * enum nvme_ctrl_state: Controller state 1954212f4e9SSagi Grimberg * 1964212f4e9SSagi Grimberg * @NVME_CTRL_NEW: New controller just allocated, initial state 1974212f4e9SSagi Grimberg * @NVME_CTRL_LIVE: Controller is connected and I/O capable 1984212f4e9SSagi Grimberg * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset) 1994212f4e9SSagi Grimberg * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the 2004212f4e9SSagi Grimberg * transport 2014212f4e9SSagi Grimberg * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion) 202ecca390eSSagi Grimberg * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not 203ecca390eSSagi Grimberg * disabled/failed immediately. This state comes 204ecca390eSSagi Grimberg * after all async event processing took place and 205ecca390eSSagi Grimberg * before ns removal and the controller deletion 206ecca390eSSagi Grimberg * progress 2074212f4e9SSagi Grimberg * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during 2084212f4e9SSagi Grimberg * shutdown or removal. In this case we forcibly 2094212f4e9SSagi Grimberg * kill all inflight I/O as they have no chance to 2104212f4e9SSagi Grimberg * complete 2114212f4e9SSagi Grimberg */ 212bb8d261eSChristoph Hellwig enum nvme_ctrl_state { 213bb8d261eSChristoph Hellwig NVME_CTRL_NEW, 214bb8d261eSChristoph Hellwig NVME_CTRL_LIVE, 215bb8d261eSChristoph Hellwig NVME_CTRL_RESETTING, 216ad6a0a52SMax Gurtovoy NVME_CTRL_CONNECTING, 217bb8d261eSChristoph Hellwig NVME_CTRL_DELETING, 218ecca390eSSagi Grimberg NVME_CTRL_DELETING_NOIO, 2190ff9d4e1SKeith Busch NVME_CTRL_DEAD, 220bb8d261eSChristoph Hellwig }; 221bb8d261eSChristoph Hellwig 222a3646451SAkinobu Mita struct nvme_fault_inject { 223a3646451SAkinobu Mita #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 224a3646451SAkinobu Mita struct fault_attr attr; 225a3646451SAkinobu Mita struct dentry *parent; 226a3646451SAkinobu Mita bool dont_retry; /* DNR, do not retry */ 227a3646451SAkinobu Mita u16 status; /* status code */ 228a3646451SAkinobu Mita #endif 229a3646451SAkinobu Mita }; 230a3646451SAkinobu Mita 2311c63dc66SChristoph Hellwig struct nvme_ctrl { 2326e3ca03eSSagi Grimberg bool comp_seen; 233bb8d261eSChristoph Hellwig enum nvme_ctrl_state state; 234bd4da3abSAndy Lutomirski bool identified; 235bb8d261eSChristoph Hellwig spinlock_t lock; 236e7ad43c3SKeith Busch struct mutex scan_lock; 2371c63dc66SChristoph Hellwig const struct nvme_ctrl_ops *ops; 23857dacad5SJay Sternberg struct request_queue *admin_q; 23907bfcd09SChristoph Hellwig struct request_queue *connect_q; 240e7832cb4SSagi Grimberg struct request_queue *fabrics_q; 24157dacad5SJay Sternberg struct device *dev; 24257dacad5SJay Sternberg int instance; 243103e515eSHannes Reinecke int numa_node; 2445bae7f73SChristoph Hellwig struct blk_mq_tag_set *tagset; 24534b6c231SSagi Grimberg struct blk_mq_tag_set *admin_tagset; 2465bae7f73SChristoph Hellwig struct list_head namespaces; 247765cc031SJianchao Wang struct rw_semaphore namespaces_rwsem; 248d22524a4SChristoph Hellwig struct device ctrl_device; 2495bae7f73SChristoph Hellwig struct device *device; /* char device */ 250ed7770f6SHannes Reinecke #ifdef CONFIG_NVME_HWMON 251ed7770f6SHannes Reinecke struct device *hwmon_device; 252ed7770f6SHannes Reinecke #endif 253a6a5149bSChristoph Hellwig struct cdev cdev; 254d86c4d8eSChristoph Hellwig struct work_struct reset_work; 255c5017e85SChristoph Hellwig struct work_struct delete_work; 256c1ac9a4bSKeith Busch wait_queue_head_t state_wq; 2571c63dc66SChristoph Hellwig 258ab9e00ccSChristoph Hellwig struct nvme_subsystem *subsys; 259ab9e00ccSChristoph Hellwig struct list_head subsys_entry; 260ab9e00ccSChristoph Hellwig 2614f1244c8SChristoph Hellwig struct opal_dev *opal_dev; 262a98e58e5SScott Bauer 26357dacad5SJay Sternberg char name[12]; 26476e3914aSChristoph Hellwig u16 cntlid; 2655fd4ce1bSChristoph Hellwig 2665fd4ce1bSChristoph Hellwig u32 ctrl_config; 267b6dccf7fSArnav Dawn u16 mtfa; 268d858e5f0SSagi Grimberg u32 queue_count; 2695fd4ce1bSChristoph Hellwig 27020d0dfe6SSagi Grimberg u64 cap; 27157dacad5SJay Sternberg u32 max_hw_sectors; 272943e942eSJens Axboe u32 max_segments; 27395093350SMax Gurtovoy u32 max_integrity_segments; 2745befc7c2SKeith Busch u32 max_discard_sectors; 2755befc7c2SKeith Busch u32 max_discard_segments; 2765befc7c2SKeith Busch u32 max_zeroes_sectors; 277240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 278240e6ee2SKeith Busch u32 max_zone_append; 279240e6ee2SKeith Busch #endif 28049cd84b6SKeith Busch u16 crdt[3]; 28157dacad5SJay Sternberg u16 oncs; 2828a9ae523SScott Bauer u16 oacs; 283f5d11840SJens Axboe u16 nssa; 284f5d11840SJens Axboe u16 nr_streams; 285f968688fSKeith Busch u16 sqsize; 2860d0b660fSChristoph Hellwig u32 max_namespaces; 2876bf25d16SChristoph Hellwig atomic_t abort_limit; 28857dacad5SJay Sternberg u8 vwc; 289f3ca80fcSChristoph Hellwig u32 vs; 29007bfcd09SChristoph Hellwig u32 sgls; 291038bd4cbSSagi Grimberg u16 kas; 292c5552fdeSAndy Lutomirski u8 npss; 293c5552fdeSAndy Lutomirski u8 apsta; 294400b6a7bSGuenter Roeck u16 wctemp; 295400b6a7bSGuenter Roeck u16 cctemp; 296c0561f82SHannes Reinecke u32 oaes; 297e3d7874dSKeith Busch u32 aen_result; 2983e53ba38SSagi Grimberg u32 ctratt; 29907fbd32aSMartin K. Petersen unsigned int shutdown_timeout; 300038bd4cbSSagi Grimberg unsigned int kato; 301f3ca80fcSChristoph Hellwig bool subsystem; 302106198edSChristoph Hellwig unsigned long quirks; 303c5552fdeSAndy Lutomirski struct nvme_id_power_state psd[32]; 30484fef62dSKeith Busch struct nvme_effects_log *effects; 3051cf7a12eSChaitanya Kulkarni struct xarray cels; 3065955be21SChristoph Hellwig struct work_struct scan_work; 307f866fc42SChristoph Hellwig struct work_struct async_event_work; 308038bd4cbSSagi Grimberg struct delayed_work ka_work; 3098c4dfea9SVictor Gladkov struct delayed_work failfast_work; 3100a34e466SRoland Dreier struct nvme_command ka_cmd; 311b6dccf7fSArnav Dawn struct work_struct fw_act_work; 31230d90964SChristoph Hellwig unsigned long events; 31307bfcd09SChristoph Hellwig 3140d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 3150d0b660fSChristoph Hellwig /* asymmetric namespace access: */ 3160d0b660fSChristoph Hellwig u8 anacap; 3170d0b660fSChristoph Hellwig u8 anatt; 3180d0b660fSChristoph Hellwig u32 anagrpmax; 3190d0b660fSChristoph Hellwig u32 nanagrpid; 3200d0b660fSChristoph Hellwig struct mutex ana_lock; 3210d0b660fSChristoph Hellwig struct nvme_ana_rsp_hdr *ana_log_buf; 3220d0b660fSChristoph Hellwig size_t ana_log_size; 3230d0b660fSChristoph Hellwig struct timer_list anatt_timer; 3240d0b660fSChristoph Hellwig struct work_struct ana_work; 3250d0b660fSChristoph Hellwig #endif 3260d0b660fSChristoph Hellwig 327c5552fdeSAndy Lutomirski /* Power saving configuration */ 328c5552fdeSAndy Lutomirski u64 ps_max_latency_us; 32976a5af84SKai-Heng Feng bool apst_enabled; 330c5552fdeSAndy Lutomirski 331044a9df1SChristoph Hellwig /* PCIe only: */ 332fe6d53c9SChristoph Hellwig u32 hmpre; 333fe6d53c9SChristoph Hellwig u32 hmmin; 334044a9df1SChristoph Hellwig u32 hmminds; 335044a9df1SChristoph Hellwig u16 hmmaxd; 336fe6d53c9SChristoph Hellwig 33707bfcd09SChristoph Hellwig /* Fabrics only */ 33807bfcd09SChristoph Hellwig u32 ioccsz; 33907bfcd09SChristoph Hellwig u32 iorcsz; 34007bfcd09SChristoph Hellwig u16 icdoff; 34107bfcd09SChristoph Hellwig u16 maxcmd; 342fdf9dfa8SSagi Grimberg int nr_reconnects; 3438c4dfea9SVictor Gladkov unsigned long flags; 3448c4dfea9SVictor Gladkov #define NVME_CTRL_FAILFAST_EXPIRED 0 34507bfcd09SChristoph Hellwig struct nvmf_ctrl_options *opts; 346cb5b7262SJens Axboe 347cb5b7262SJens Axboe struct page *discard_page; 348cb5b7262SJens Axboe unsigned long discard_page_busy; 349f79d5fdaSAkinobu Mita 350f79d5fdaSAkinobu Mita struct nvme_fault_inject fault_inject; 35157dacad5SJay Sternberg }; 35257dacad5SJay Sternberg 35375c10e73SHannes Reinecke enum nvme_iopolicy { 35475c10e73SHannes Reinecke NVME_IOPOLICY_NUMA, 35575c10e73SHannes Reinecke NVME_IOPOLICY_RR, 35675c10e73SHannes Reinecke }; 35775c10e73SHannes Reinecke 358ab9e00ccSChristoph Hellwig struct nvme_subsystem { 359ab9e00ccSChristoph Hellwig int instance; 360ab9e00ccSChristoph Hellwig struct device dev; 361ab9e00ccSChristoph Hellwig /* 362ab9e00ccSChristoph Hellwig * Because we unregister the device on the last put we need 363ab9e00ccSChristoph Hellwig * a separate refcount. 364ab9e00ccSChristoph Hellwig */ 365ab9e00ccSChristoph Hellwig struct kref ref; 366ab9e00ccSChristoph Hellwig struct list_head entry; 367ab9e00ccSChristoph Hellwig struct mutex lock; 368ab9e00ccSChristoph Hellwig struct list_head ctrls; 369ed754e5dSChristoph Hellwig struct list_head nsheads; 370ab9e00ccSChristoph Hellwig char subnqn[NVMF_NQN_SIZE]; 371ab9e00ccSChristoph Hellwig char serial[20]; 372ab9e00ccSChristoph Hellwig char model[40]; 373ab9e00ccSChristoph Hellwig char firmware_rev[8]; 374ab9e00ccSChristoph Hellwig u8 cmic; 375ab9e00ccSChristoph Hellwig u16 vendor_id; 37681adb863SBart Van Assche u16 awupf; /* 0's based awupf value. */ 377ed754e5dSChristoph Hellwig struct ida ns_ida; 37875c10e73SHannes Reinecke #ifdef CONFIG_NVME_MULTIPATH 37975c10e73SHannes Reinecke enum nvme_iopolicy iopolicy; 38075c10e73SHannes Reinecke #endif 381ab9e00ccSChristoph Hellwig }; 382ab9e00ccSChristoph Hellwig 383002fab04SChristoph Hellwig /* 384002fab04SChristoph Hellwig * Container structure for uniqueue namespace identifiers. 385002fab04SChristoph Hellwig */ 386002fab04SChristoph Hellwig struct nvme_ns_ids { 387002fab04SChristoph Hellwig u8 eui64[8]; 388002fab04SChristoph Hellwig u8 nguid[16]; 389002fab04SChristoph Hellwig uuid_t uuid; 39071010c30SNiklas Cassel u8 csi; 391002fab04SChristoph Hellwig }; 392002fab04SChristoph Hellwig 393ed754e5dSChristoph Hellwig /* 394ed754e5dSChristoph Hellwig * Anchor structure for namespaces. There is one for each namespace in a 395ed754e5dSChristoph Hellwig * NVMe subsystem that any of our controllers can see, and the namespace 396ed754e5dSChristoph Hellwig * structure for each controller is chained of it. For private namespaces 397ed754e5dSChristoph Hellwig * there is a 1:1 relation to our namespace structures, that is ->list 398ed754e5dSChristoph Hellwig * only ever has a single entry for private namespaces. 399ed754e5dSChristoph Hellwig */ 400ed754e5dSChristoph Hellwig struct nvme_ns_head { 401ed754e5dSChristoph Hellwig struct list_head list; 402ed754e5dSChristoph Hellwig struct srcu_struct srcu; 403ed754e5dSChristoph Hellwig struct nvme_subsystem *subsys; 404ed754e5dSChristoph Hellwig unsigned ns_id; 405ed754e5dSChristoph Hellwig struct nvme_ns_ids ids; 406ed754e5dSChristoph Hellwig struct list_head entry; 407ed754e5dSChristoph Hellwig struct kref ref; 4080c284db7SKeith Busch bool shared; 409ed754e5dSChristoph Hellwig int instance; 410be93e87eSKeith Busch struct nvme_effects_log *effects; 4112637baedSMinwoo Im 4122637baedSMinwoo Im struct cdev cdev; 4132637baedSMinwoo Im struct device cdev_device; 4142637baedSMinwoo Im 415f3334447SChristoph Hellwig struct gendisk *disk; 41630897388SMinwoo Im #ifdef CONFIG_NVME_MULTIPATH 417f3334447SChristoph Hellwig struct bio_list requeue_list; 418f3334447SChristoph Hellwig spinlock_t requeue_lock; 419f3334447SChristoph Hellwig struct work_struct requeue_work; 420f3334447SChristoph Hellwig struct mutex lock; 421d8a22f85SAnton Eidelman unsigned long flags; 422d8a22f85SAnton Eidelman #define NVME_NSHEAD_DISK_LIVE 0 423f3334447SChristoph Hellwig struct nvme_ns __rcu *current_path[]; 424f3334447SChristoph Hellwig #endif 425ed754e5dSChristoph Hellwig }; 426ed754e5dSChristoph Hellwig 42730897388SMinwoo Im static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head) 42830897388SMinwoo Im { 42930897388SMinwoo Im return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk; 43030897388SMinwoo Im } 43130897388SMinwoo Im 432ffc89b1dSMax Gurtovoy enum nvme_ns_features { 433ffc89b1dSMax Gurtovoy NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ 434b29f8485SMax Gurtovoy NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ 435ffc89b1dSMax Gurtovoy }; 436ffc89b1dSMax Gurtovoy 43757dacad5SJay Sternberg struct nvme_ns { 43857dacad5SJay Sternberg struct list_head list; 43957dacad5SJay Sternberg 4401c63dc66SChristoph Hellwig struct nvme_ctrl *ctrl; 44157dacad5SJay Sternberg struct request_queue *queue; 44257dacad5SJay Sternberg struct gendisk *disk; 4430d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 4440d0b660fSChristoph Hellwig enum nvme_ana_state ana_state; 4450d0b660fSChristoph Hellwig u32 ana_grpid; 4460d0b660fSChristoph Hellwig #endif 447ed754e5dSChristoph Hellwig struct list_head siblings; 44857dacad5SJay Sternberg struct kref kref; 449ed754e5dSChristoph Hellwig struct nvme_ns_head *head; 45057dacad5SJay Sternberg 45157dacad5SJay Sternberg int lba_shift; 45257dacad5SJay Sternberg u16 ms; 453f5d11840SJens Axboe u16 sgs; 454f5d11840SJens Axboe u32 sws; 45557dacad5SJay Sternberg u8 pi_type; 456240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 457240e6ee2SKeith Busch u64 zsze; 458240e6ee2SKeith Busch #endif 459ffc89b1dSMax Gurtovoy unsigned long features; 460646017a6SKeith Busch unsigned long flags; 461646017a6SKeith Busch #define NVME_NS_REMOVING 0 46269d9a99cSKeith Busch #define NVME_NS_DEAD 1 4630d0b660fSChristoph Hellwig #define NVME_NS_ANA_PENDING 2 4642f4c9ba2SJavier González #define NVME_NS_FORCE_RO 3 465e7d65803SHannes Reinecke #define NVME_NS_READY 4 466b9e03857SThomas Tai 4672637baedSMinwoo Im struct cdev cdev; 4682637baedSMinwoo Im struct device cdev_device; 4692637baedSMinwoo Im 470b9e03857SThomas Tai struct nvme_fault_inject fault_inject; 471b9e03857SThomas Tai 47257dacad5SJay Sternberg }; 47357dacad5SJay Sternberg 4744d2ce688SJames Smart /* NVMe ns supports metadata actions by the controller (generate/strip) */ 4754d2ce688SJames Smart static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 4764d2ce688SJames Smart { 4774d2ce688SJames Smart return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); 4784d2ce688SJames Smart } 4794d2ce688SJames Smart 4801c63dc66SChristoph Hellwig struct nvme_ctrl_ops { 4811a353d85SMing Lin const char *name; 482e439bb12SSagi Grimberg struct module *module; 483d3d5b87dSChristoph Hellwig unsigned int flags; 484d3d5b87dSChristoph Hellwig #define NVME_F_FABRICS (1 << 0) 485c81bfba9SChristoph Hellwig #define NVME_F_METADATA_SUPPORTED (1 << 1) 486e0596ab2SLogan Gunthorpe #define NVME_F_PCI_P2PDMA (1 << 2) 4871c63dc66SChristoph Hellwig int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 4885fd4ce1bSChristoph Hellwig int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 4897fd8930fSChristoph Hellwig int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 4901673f1f0SChristoph Hellwig void (*free_ctrl)(struct nvme_ctrl *ctrl); 491ad22c355SKeith Busch void (*submit_async_event)(struct nvme_ctrl *ctrl); 492c5017e85SChristoph Hellwig void (*delete_ctrl)(struct nvme_ctrl *ctrl); 4931a353d85SMing Lin int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 49457dacad5SJay Sternberg }; 49557dacad5SJay Sternberg 496e7006de6SSagi Grimberg /* 497e7006de6SSagi Grimberg * nvme command_id is constructed as such: 498e7006de6SSagi Grimberg * | xxxx | xxxxxxxxxxxx | 499e7006de6SSagi Grimberg * gen request tag 500e7006de6SSagi Grimberg */ 501e7006de6SSagi Grimberg #define nvme_genctr_mask(gen) (gen & 0xf) 502e7006de6SSagi Grimberg #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12) 503e7006de6SSagi Grimberg #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12) 504e7006de6SSagi Grimberg #define nvme_tag_from_cid(cid) (cid & 0xfff) 505e7006de6SSagi Grimberg 506e7006de6SSagi Grimberg static inline u16 nvme_cid(struct request *rq) 507e7006de6SSagi Grimberg { 508e7006de6SSagi Grimberg return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; 509e7006de6SSagi Grimberg } 510e7006de6SSagi Grimberg 511e7006de6SSagi Grimberg static inline struct request *nvme_find_rq(struct blk_mq_tags *tags, 512e7006de6SSagi Grimberg u16 command_id) 513e7006de6SSagi Grimberg { 514e7006de6SSagi Grimberg u8 genctr = nvme_genctr_from_cid(command_id); 515e7006de6SSagi Grimberg u16 tag = nvme_tag_from_cid(command_id); 516e7006de6SSagi Grimberg struct request *rq; 517e7006de6SSagi Grimberg 518e7006de6SSagi Grimberg rq = blk_mq_tag_to_rq(tags, tag); 519e7006de6SSagi Grimberg if (unlikely(!rq)) { 520e7006de6SSagi Grimberg pr_err("could not locate request for tag %#x\n", 521e7006de6SSagi Grimberg tag); 522e7006de6SSagi Grimberg return NULL; 523e7006de6SSagi Grimberg } 524e7006de6SSagi Grimberg if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { 525e7006de6SSagi Grimberg dev_err(nvme_req(rq)->ctrl->device, 526e7006de6SSagi Grimberg "request %#x genctr mismatch (got %#x expected %#x)\n", 527e7006de6SSagi Grimberg tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); 528e7006de6SSagi Grimberg return NULL; 529e7006de6SSagi Grimberg } 530e7006de6SSagi Grimberg return rq; 531e7006de6SSagi Grimberg } 532e7006de6SSagi Grimberg 533e7006de6SSagi Grimberg static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, 534e7006de6SSagi Grimberg u16 command_id) 535e7006de6SSagi Grimberg { 536e7006de6SSagi Grimberg return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); 537e7006de6SSagi Grimberg } 538e7006de6SSagi Grimberg 539b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 540a3646451SAkinobu Mita void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 541a3646451SAkinobu Mita const char *dev_name); 542a3646451SAkinobu Mita void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); 543b9e03857SThomas Tai void nvme_should_fail(struct request *req); 544b9e03857SThomas Tai #else 545a3646451SAkinobu Mita static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 546a3646451SAkinobu Mita const char *dev_name) 547a3646451SAkinobu Mita { 548a3646451SAkinobu Mita } 549a3646451SAkinobu Mita static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) 550a3646451SAkinobu Mita { 551a3646451SAkinobu Mita } 552b9e03857SThomas Tai static inline void nvme_should_fail(struct request *req) {} 553b9e03857SThomas Tai #endif 554b9e03857SThomas Tai 555f3ca80fcSChristoph Hellwig static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 556f3ca80fcSChristoph Hellwig { 557f3ca80fcSChristoph Hellwig if (!ctrl->subsystem) 558f3ca80fcSChristoph Hellwig return -ENOTTY; 559f3ca80fcSChristoph Hellwig return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 560f3ca80fcSChristoph Hellwig } 561f3ca80fcSChristoph Hellwig 562314d48ddSDamien Le Moal /* 563314d48ddSDamien Le Moal * Convert a 512B sector number to a device logical block number. 564314d48ddSDamien Le Moal */ 565314d48ddSDamien Le Moal static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) 56657dacad5SJay Sternberg { 567314d48ddSDamien Le Moal return sector >> (ns->lba_shift - SECTOR_SHIFT); 56857dacad5SJay Sternberg } 56957dacad5SJay Sternberg 570e08f2ae8SDamien Le Moal /* 571e08f2ae8SDamien Le Moal * Convert a device logical block number to a 512B sector number. 572e08f2ae8SDamien Le Moal */ 573e08f2ae8SDamien Le Moal static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) 574e08f2ae8SDamien Le Moal { 575e08f2ae8SDamien Le Moal return lba << (ns->lba_shift - SECTOR_SHIFT); 57657dacad5SJay Sternberg } 57757dacad5SJay Sternberg 57871fb90ebSKeith Busch /* 57971fb90ebSKeith Busch * Convert byte length to nvme's 0-based num dwords 58071fb90ebSKeith Busch */ 58171fb90ebSKeith Busch static inline u32 nvme_bytes_to_numd(size_t len) 58271fb90ebSKeith Busch { 58371fb90ebSKeith Busch return (len >> 2) - 1; 58471fb90ebSKeith Busch } 58571fb90ebSKeith Busch 5865ddaabe8SChristoph Hellwig static inline bool nvme_is_ana_error(u16 status) 5875ddaabe8SChristoph Hellwig { 5885ddaabe8SChristoph Hellwig switch (status & 0x7ff) { 5895ddaabe8SChristoph Hellwig case NVME_SC_ANA_TRANSITION: 5905ddaabe8SChristoph Hellwig case NVME_SC_ANA_INACCESSIBLE: 5915ddaabe8SChristoph Hellwig case NVME_SC_ANA_PERSISTENT_LOSS: 5925ddaabe8SChristoph Hellwig return true; 5935ddaabe8SChristoph Hellwig default: 5945ddaabe8SChristoph Hellwig return false; 5955ddaabe8SChristoph Hellwig } 5965ddaabe8SChristoph Hellwig } 5975ddaabe8SChristoph Hellwig 5985ddaabe8SChristoph Hellwig static inline bool nvme_is_path_error(u16 status) 5995ddaabe8SChristoph Hellwig { 6001e41f3bdSChristoph Hellwig /* check for a status code type of 'path related status' */ 6011e41f3bdSChristoph Hellwig return (status & 0x700) == 0x300; 6025ddaabe8SChristoph Hellwig } 6035ddaabe8SChristoph Hellwig 6042eb81a33SChristoph Hellwig /* 6052eb81a33SChristoph Hellwig * Fill in the status and result information from the CQE, and then figure out 6062eb81a33SChristoph Hellwig * if blk-mq will need to use IPI magic to complete the request, and if yes do 6072eb81a33SChristoph Hellwig * so. If not let the caller complete the request without an indirect function 6082eb81a33SChristoph Hellwig * call. 6092eb81a33SChristoph Hellwig */ 6102eb81a33SChristoph Hellwig static inline bool nvme_try_complete_req(struct request *req, __le16 status, 61127fa9bc5SChristoph Hellwig union nvme_result result) 61215a190f7SChristoph Hellwig { 61327fa9bc5SChristoph Hellwig struct nvme_request *rq = nvme_req(req); 61427fa9bc5SChristoph Hellwig 61527fa9bc5SChristoph Hellwig rq->status = le16_to_cpu(status) >> 1; 61627fa9bc5SChristoph Hellwig rq->result = result; 617b9e03857SThomas Tai /* inject error when permitted by fault injection framework */ 618b9e03857SThomas Tai nvme_should_fail(req); 619ff029451SChristoph Hellwig if (unlikely(blk_should_fake_timeout(req->q))) 620ff029451SChristoph Hellwig return true; 621ff029451SChristoph Hellwig return blk_mq_complete_request_remote(req); 62215a190f7SChristoph Hellwig } 62315a190f7SChristoph Hellwig 624d22524a4SChristoph Hellwig static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 625d22524a4SChristoph Hellwig { 626d22524a4SChristoph Hellwig get_device(ctrl->device); 627d22524a4SChristoph Hellwig } 628d22524a4SChristoph Hellwig 629d22524a4SChristoph Hellwig static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 630d22524a4SChristoph Hellwig { 631d22524a4SChristoph Hellwig put_device(ctrl->device); 632d22524a4SChristoph Hellwig } 633d22524a4SChristoph Hellwig 63458a8df67SIsrael Rukshin static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) 63558a8df67SIsrael Rukshin { 636e7006de6SSagi Grimberg return !qid && 637e7006de6SSagi Grimberg nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH; 63858a8df67SIsrael Rukshin } 63958a8df67SIsrael Rukshin 64077f02a7aSChristoph Hellwig void nvme_complete_rq(struct request *req); 641*c234a653SJens Axboe void nvme_complete_batch_req(struct request *req); 642*c234a653SJens Axboe 643*c234a653SJens Axboe static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, 644*c234a653SJens Axboe void (*fn)(struct request *rq)) 645*c234a653SJens Axboe { 646*c234a653SJens Axboe struct request *req; 647*c234a653SJens Axboe 648*c234a653SJens Axboe rq_list_for_each(&iob->req_list, req) { 649*c234a653SJens Axboe fn(req); 650*c234a653SJens Axboe nvme_complete_batch_req(req); 651*c234a653SJens Axboe } 652*c234a653SJens Axboe blk_mq_end_request_batch(iob); 653*c234a653SJens Axboe } 654*c234a653SJens Axboe 655dda3248eSChao Leng blk_status_t nvme_host_path_error(struct request *req); 6567baa8572SJens Axboe bool nvme_cancel_request(struct request *req, void *data, bool reserved); 65725479069SChao Leng void nvme_cancel_tagset(struct nvme_ctrl *ctrl); 65825479069SChao Leng void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); 659bb8d261eSChristoph Hellwig bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 660bb8d261eSChristoph Hellwig enum nvme_ctrl_state new_state); 661c1ac9a4bSKeith Busch bool nvme_wait_reset(struct nvme_ctrl *ctrl); 662b5b05048SSagi Grimberg int nvme_disable_ctrl(struct nvme_ctrl *ctrl); 663c0f2f45bSSagi Grimberg int nvme_enable_ctrl(struct nvme_ctrl *ctrl); 6645fd4ce1bSChristoph Hellwig int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 665f3ca80fcSChristoph Hellwig int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 666f3ca80fcSChristoph Hellwig const struct nvme_ctrl_ops *ops, unsigned long quirks); 66753029b04SKeith Busch void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 668d09f2b45SSagi Grimberg void nvme_start_ctrl(struct nvme_ctrl *ctrl); 669d09f2b45SSagi Grimberg void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 670f21c4769SChaitanya Kulkarni int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl); 6715bae7f73SChristoph Hellwig 6725bae7f73SChristoph Hellwig void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 6731673f1f0SChristoph Hellwig 6744f1244c8SChristoph Hellwig int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 6754f1244c8SChristoph Hellwig bool send); 676a98e58e5SScott Bauer 6777bf58533SChristoph Hellwig void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 678287a63ebSChristoph Hellwig volatile union nvme_result *res); 679f866fc42SChristoph Hellwig 68025646264SKeith Busch void nvme_stop_queues(struct nvme_ctrl *ctrl); 68125646264SKeith Busch void nvme_start_queues(struct nvme_ctrl *ctrl); 68269d9a99cSKeith Busch void nvme_kill_queues(struct nvme_ctrl *ctrl); 683d6135c3aSKeith Busch void nvme_sync_queues(struct nvme_ctrl *ctrl); 68404800fbfSChao Leng void nvme_sync_io_queues(struct nvme_ctrl *ctrl); 685302ad8ccSKeith Busch void nvme_unfreeze(struct nvme_ctrl *ctrl); 686302ad8ccSKeith Busch void nvme_wait_freeze(struct nvme_ctrl *ctrl); 6877cf0d7c0SSagi Grimberg int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 688302ad8ccSKeith Busch void nvme_start_freeze(struct nvme_ctrl *ctrl); 689363c9aacSSagi Grimberg 690eb71f435SChristoph Hellwig #define NVME_QID_ANY -1 6914160982eSChristoph Hellwig struct request *nvme_alloc_request(struct request_queue *q, 69239dfe844SChaitanya Kulkarni struct nvme_command *cmd, blk_mq_req_flags_t flags); 693f7f1fc36SMax Gurtovoy void nvme_cleanup_cmd(struct request *req); 694f4b9e6c9SKeith Busch blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); 695a9715744STao Chiu blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 696a9715744STao Chiu struct request *req); 697a9715744STao Chiu bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 698a9715744STao Chiu bool queue_live); 699a9715744STao Chiu 700a9715744STao Chiu static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 701a9715744STao Chiu bool queue_live) 702a9715744STao Chiu { 703a9715744STao Chiu if (likely(ctrl->state == NVME_CTRL_LIVE)) 704a9715744STao Chiu return true; 705a9715744STao Chiu if (ctrl->ops->flags & NVME_F_FABRICS && 706a9715744STao Chiu ctrl->state == NVME_CTRL_DELETING) 707a9715744STao Chiu return true; 708a9715744STao Chiu return __nvme_check_ready(ctrl, rq, queue_live); 709a9715744STao Chiu } 71057dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 71157dacad5SJay Sternberg void *buf, unsigned bufflen); 71257dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 713d49187e9SChristoph Hellwig union nvme_result *result, void *buffer, unsigned bufflen, 7149a95e4efSBart Van Assche unsigned timeout, int qid, int at_head, 715be42a33bSKeith Busch blk_mq_req_flags_t flags); 7161a87ee65SKeith Busch int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 7171a87ee65SKeith Busch unsigned int dword11, void *buffer, size_t buflen, 7181a87ee65SKeith Busch u32 *result); 7191a87ee65SKeith Busch int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 7201a87ee65SKeith Busch unsigned int dword11, void *buffer, size_t buflen, 7211a87ee65SKeith Busch u32 *result); 7229a0be7abSChristoph Hellwig int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 723038bd4cbSSagi Grimberg void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 724d86c4d8eSChristoph Hellwig int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 7252405252aSChristoph Hellwig int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 726c1ac9a4bSKeith Busch int nvme_try_sched_reset(struct nvme_ctrl *ctrl); 727c5017e85SChristoph Hellwig int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 7282405252aSChristoph Hellwig void nvme_queue_scan(struct nvme_ctrl *ctrl); 729be93e87eSKeith Busch int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 7300e98719bSChristoph Hellwig void *log, size_t size, u64 offset); 7311496bd49SChristoph Hellwig bool nvme_tryget_ns_head(struct nvme_ns_head *head); 7321496bd49SChristoph Hellwig void nvme_put_ns_head(struct nvme_ns_head *head); 7332637baedSMinwoo Im int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 7342637baedSMinwoo Im const struct file_operations *fops, struct module *owner); 7352637baedSMinwoo Im void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device); 7362405252aSChristoph Hellwig int nvme_ioctl(struct block_device *bdev, fmode_t mode, 7372405252aSChristoph Hellwig unsigned int cmd, unsigned long arg); 7382637baedSMinwoo Im long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 7392405252aSChristoph Hellwig int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, 7402405252aSChristoph Hellwig unsigned int cmd, unsigned long arg); 7412637baedSMinwoo Im long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, 7422637baedSMinwoo Im unsigned long arg); 7432405252aSChristoph Hellwig long nvme_dev_ioctl(struct file *file, unsigned int cmd, 7442405252aSChristoph Hellwig unsigned long arg); 7451496bd49SChristoph Hellwig int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); 746d558fb51SMatias Bjørling 74733b14f67SHannes Reinecke extern const struct attribute_group *nvme_ns_id_attr_groups[]; 7481496bd49SChristoph Hellwig extern const struct pr_ops nvme_pr_ops; 74932acab31SChristoph Hellwig extern const struct block_device_operations nvme_ns_head_ops; 75032acab31SChristoph Hellwig 751f1cf35e1SChristoph Hellwig struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 75232acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 75366b20ac0SMarta Rybczynska static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 75466b20ac0SMarta Rybczynska { 75566b20ac0SMarta Rybczynska return ctrl->ana_log_buf != NULL; 75666b20ac0SMarta Rybczynska } 75766b20ac0SMarta Rybczynska 758b9156daeSSagi Grimberg void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); 759b9156daeSSagi Grimberg void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); 760b9156daeSSagi Grimberg void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); 7619953ab0cSChristoph Hellwig bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags); 7625ddaabe8SChristoph Hellwig void nvme_failover_req(struct request *req); 76332acab31SChristoph Hellwig void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 76432acab31SChristoph Hellwig int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 7650d0b660fSChristoph Hellwig void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); 76632acab31SChristoph Hellwig void nvme_mpath_remove_disk(struct nvme_ns_head *head); 7675e1f6899SChristoph Hellwig int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 7685e1f6899SChristoph Hellwig void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); 7690d0b660fSChristoph Hellwig void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 7700d0b660fSChristoph Hellwig void nvme_mpath_stop(struct nvme_ctrl *ctrl); 7710157ec8dSSagi Grimberg bool nvme_mpath_clear_current_path(struct nvme_ns *ns); 772e7d65803SHannes Reinecke void nvme_mpath_revalidate_paths(struct nvme_ns *ns); 7730157ec8dSSagi Grimberg void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); 7745396fdacSHannes Reinecke void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); 775479a322fSSagi Grimberg 7762b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req) 77735fe0d12SHannes Reinecke { 77835fe0d12SHannes Reinecke struct nvme_ns *ns = req->q->queuedata; 77935fe0d12SHannes Reinecke 78035fe0d12SHannes Reinecke if (req->cmd_flags & REQ_NVME_MPATH) 781d24de76aSChristoph Hellwig trace_block_bio_complete(ns->head->disk->queue, req->bio); 78235fe0d12SHannes Reinecke } 78335fe0d12SHannes Reinecke 7840d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_grpid; 7850d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_state; 78675c10e73SHannes Reinecke extern struct device_attribute subsys_attr_iopolicy; 7870d0b660fSChristoph Hellwig 78832acab31SChristoph Hellwig #else 7890d0b660fSChristoph Hellwig static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 7900d0b660fSChristoph Hellwig { 7910d0b660fSChristoph Hellwig return false; 7920d0b660fSChristoph Hellwig } 7939953ab0cSChristoph Hellwig static inline bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, 7949953ab0cSChristoph Hellwig int *flags) 795a785dbccSKeith Busch { 7969953ab0cSChristoph Hellwig return false; 797a785dbccSKeith Busch } 7985ddaabe8SChristoph Hellwig static inline void nvme_failover_req(struct request *req) 79932acab31SChristoph Hellwig { 80032acab31SChristoph Hellwig } 80132acab31SChristoph Hellwig static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 80232acab31SChristoph Hellwig { 80332acab31SChristoph Hellwig } 80432acab31SChristoph Hellwig static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 80532acab31SChristoph Hellwig struct nvme_ns_head *head) 80632acab31SChristoph Hellwig { 80732acab31SChristoph Hellwig return 0; 80832acab31SChristoph Hellwig } 8090d0b660fSChristoph Hellwig static inline void nvme_mpath_add_disk(struct nvme_ns *ns, 8100d0b660fSChristoph Hellwig struct nvme_id_ns *id) 81132acab31SChristoph Hellwig { 81232acab31SChristoph Hellwig } 81332acab31SChristoph Hellwig static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 81432acab31SChristoph Hellwig { 81532acab31SChristoph Hellwig } 8160157ec8dSSagi Grimberg static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) 8170157ec8dSSagi Grimberg { 8180157ec8dSSagi Grimberg return false; 8190157ec8dSSagi Grimberg } 820e7d65803SHannes Reinecke static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) 821e7d65803SHannes Reinecke { 822e7d65803SHannes Reinecke } 8230157ec8dSSagi Grimberg static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) 82432acab31SChristoph Hellwig { 82532acab31SChristoph Hellwig } 8265396fdacSHannes Reinecke static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) 827479a322fSSagi Grimberg { 828479a322fSSagi Grimberg } 8292b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req) 83035fe0d12SHannes Reinecke { 83135fe0d12SHannes Reinecke } 8325e1f6899SChristoph Hellwig static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) 8335e1f6899SChristoph Hellwig { 8345e1f6899SChristoph Hellwig } 8355e1f6899SChristoph Hellwig static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, 8360d0b660fSChristoph Hellwig struct nvme_id_ctrl *id) 8370d0b660fSChristoph Hellwig { 8382bd64307SKanchan Joshi if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) 83914a1336eSChristoph Hellwig dev_warn(ctrl->device, 84014a1336eSChristoph Hellwig "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); 8410d0b660fSChristoph Hellwig return 0; 8420d0b660fSChristoph Hellwig } 8430d0b660fSChristoph Hellwig static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 8440d0b660fSChristoph Hellwig { 8450d0b660fSChristoph Hellwig } 8460d0b660fSChristoph Hellwig static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 8470d0b660fSChristoph Hellwig { 8480d0b660fSChristoph Hellwig } 849b9156daeSSagi Grimberg static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) 850b9156daeSSagi Grimberg { 851b9156daeSSagi Grimberg } 852b9156daeSSagi Grimberg static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) 853b9156daeSSagi Grimberg { 854b9156daeSSagi Grimberg } 855b9156daeSSagi Grimberg static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 856b9156daeSSagi Grimberg { 857b9156daeSSagi Grimberg } 85832acab31SChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */ 85932acab31SChristoph Hellwig 8607fad20ddSChristoph Hellwig int nvme_revalidate_zones(struct nvme_ns *ns); 8618b4fb0f9SChristoph Hellwig int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, 8628b4fb0f9SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 863240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 864d525c3c0SChristoph Hellwig int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); 865240e6ee2SKeith Busch blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, 866240e6ee2SKeith Busch struct nvme_command *cmnd, 867240e6ee2SKeith Busch enum nvme_zone_mgmt_action action); 868240e6ee2SKeith Busch #else 869240e6ee2SKeith Busch static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, 870240e6ee2SKeith Busch struct request *req, struct nvme_command *cmnd, 871240e6ee2SKeith Busch enum nvme_zone_mgmt_action action) 872240e6ee2SKeith Busch { 873240e6ee2SKeith Busch return BLK_STS_NOTSUPP; 874240e6ee2SKeith Busch } 875240e6ee2SKeith Busch 876d525c3c0SChristoph Hellwig static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) 877240e6ee2SKeith Busch { 878240e6ee2SKeith Busch dev_warn(ns->ctrl->device, 879240e6ee2SKeith Busch "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); 880240e6ee2SKeith Busch return -EPROTONOSUPPORT; 881240e6ee2SKeith Busch } 882240e6ee2SKeith Busch #endif 883240e6ee2SKeith Busch 88440267efdSSimon A. F. Lund static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 88540267efdSSimon A. F. Lund { 88640267efdSSimon A. F. Lund return dev_to_disk(dev)->private_data; 88740267efdSSimon A. F. Lund } 888ca064085SMatias Bjørling 889400b6a7bSGuenter Roeck #ifdef CONFIG_NVME_HWMON 89059e330f8SKeith Busch int nvme_hwmon_init(struct nvme_ctrl *ctrl); 891ed7770f6SHannes Reinecke void nvme_hwmon_exit(struct nvme_ctrl *ctrl); 892400b6a7bSGuenter Roeck #else 89359e330f8SKeith Busch static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) 89459e330f8SKeith Busch { 89559e330f8SKeith Busch return 0; 89659e330f8SKeith Busch } 897ed7770f6SHannes Reinecke 898ed7770f6SHannes Reinecke static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) 899ed7770f6SHannes Reinecke { 900ed7770f6SHannes Reinecke } 901400b6a7bSGuenter Roeck #endif 902400b6a7bSGuenter Roeck 90373eefc27SChaitanya Kulkarni static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) 90473eefc27SChaitanya Kulkarni { 90573eefc27SChaitanya Kulkarni return ctrl->sgls & ((1 << 0) | (1 << 1)); 90673eefc27SChaitanya Kulkarni } 90773eefc27SChaitanya Kulkarni 908df21b6b1SLogan Gunthorpe u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 909df21b6b1SLogan Gunthorpe u8 opcode); 910ae5e6886SKeith Busch int nvme_execute_passthru_rq(struct request *rq); 911b2702aaaSChaitanya Kulkarni struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); 91224493b8bSLogan Gunthorpe struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); 91324493b8bSLogan Gunthorpe void nvme_put_ns(struct nvme_ns *ns); 914df21b6b1SLogan Gunthorpe 91543dc9878SAdam Manzanares static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) 91643dc9878SAdam Manzanares { 91743dc9878SAdam Manzanares return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; 91843dc9878SAdam Manzanares } 91943dc9878SAdam Manzanares 92057dacad5SJay Sternberg #endif /* _NVME_H */ 921