1bc50ad75SChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */ 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg */ 557dacad5SJay Sternberg 657dacad5SJay Sternberg #ifndef _NVME_H 757dacad5SJay Sternberg #define _NVME_H 857dacad5SJay Sternberg 957dacad5SJay Sternberg #include <linux/nvme.h> 10a6a5149bSChristoph Hellwig #include <linux/cdev.h> 1157dacad5SJay Sternberg #include <linux/pci.h> 1257dacad5SJay Sternberg #include <linux/kref.h> 1357dacad5SJay Sternberg #include <linux/blk-mq.h> 14a98e58e5SScott Bauer #include <linux/sed-opal.h> 15b9e03857SThomas Tai #include <linux/fault-inject.h> 16978628ecSJohannes Thumshirn #include <linux/rcupdate.h> 17c1ac9a4bSKeith Busch #include <linux/wait.h> 184d2ce688SJames Smart #include <linux/t10-pi.h> 1957dacad5SJay Sternberg 2035fe0d12SHannes Reinecke #include <trace/events/block.h> 2135fe0d12SHannes Reinecke 228ae4e447SMarc Olson extern unsigned int nvme_io_timeout; 2357dacad5SJay Sternberg #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 2457dacad5SJay Sternberg 258ae4e447SMarc Olson extern unsigned int admin_timeout; 26dc96f938SChaitanya Kulkarni #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ) 2721d34711SChristoph Hellwig 28038bd4cbSSagi Grimberg #define NVME_DEFAULT_KATO 5 29038bd4cbSSagi Grimberg 3038e18002SIsrael Rukshin #ifdef CONFIG_ARCH_NO_SG_CHAIN 3138e18002SIsrael Rukshin #define NVME_INLINE_SG_CNT 0 32ba7ca2aeSIsrael Rukshin #define NVME_INLINE_METADATA_SG_CNT 0 3338e18002SIsrael Rukshin #else 3438e18002SIsrael Rukshin #define NVME_INLINE_SG_CNT 2 35ba7ca2aeSIsrael Rukshin #define NVME_INLINE_METADATA_SG_CNT 1 3638e18002SIsrael Rukshin #endif 3738e18002SIsrael Rukshin 386c3c05b0SChaitanya Kulkarni /* 396c3c05b0SChaitanya Kulkarni * Default to a 4K page size, with the intention to update this 406c3c05b0SChaitanya Kulkarni * path in the future to accommodate architectures with differing 416c3c05b0SChaitanya Kulkarni * kernel and IO page sizes. 426c3c05b0SChaitanya Kulkarni */ 436c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SHIFT 12 446c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT) 456c3c05b0SChaitanya Kulkarni 469a6327d2SSagi Grimberg extern struct workqueue_struct *nvme_wq; 47b227c59bSRoy Shterman extern struct workqueue_struct *nvme_reset_wq; 48b227c59bSRoy Shterman extern struct workqueue_struct *nvme_delete_wq; 499a6327d2SSagi Grimberg 5057dacad5SJay Sternberg /* 51106198edSChristoph Hellwig * List of workarounds for devices that required behavior not specified in 52106198edSChristoph Hellwig * the standard. 5357dacad5SJay Sternberg */ 54106198edSChristoph Hellwig enum nvme_quirks { 55106198edSChristoph Hellwig /* 56106198edSChristoph Hellwig * Prefers I/O aligned to a stripe size specified in a vendor 57106198edSChristoph Hellwig * specific Identify field. 58106198edSChristoph Hellwig */ 59106198edSChristoph Hellwig NVME_QUIRK_STRIPE_SIZE = (1 << 0), 60540c801cSKeith Busch 61540c801cSKeith Busch /* 62540c801cSKeith Busch * The controller doesn't handle Identify value others than 0 or 1 63540c801cSKeith Busch * correctly. 64540c801cSKeith Busch */ 65540c801cSKeith Busch NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 6608095e70SKeith Busch 6708095e70SKeith Busch /* 68e850fd16SChristoph Hellwig * The controller deterministically returns O's on reads to 69e850fd16SChristoph Hellwig * logical blocks that deallocate was called on. 7008095e70SKeith Busch */ 71e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 7254adc010SGuilherme G. Piccoli 7354adc010SGuilherme G. Piccoli /* 7454adc010SGuilherme G. Piccoli * The controller needs a delay before starts checking the device 7554adc010SGuilherme G. Piccoli * readiness, which is done by reading the NVME_CSTS_RDY bit. 7654adc010SGuilherme G. Piccoli */ 7754adc010SGuilherme G. Piccoli NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 78c5552fdeSAndy Lutomirski 79c5552fdeSAndy Lutomirski /* 80c5552fdeSAndy Lutomirski * APST should not be used. 81c5552fdeSAndy Lutomirski */ 82c5552fdeSAndy Lutomirski NVME_QUIRK_NO_APST = (1 << 4), 83ff5350a8SAndy Lutomirski 84ff5350a8SAndy Lutomirski /* 85ff5350a8SAndy Lutomirski * The deepest sleep state should not be used. 86ff5350a8SAndy Lutomirski */ 87ff5350a8SAndy Lutomirski NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 88608cc4b1SChristoph Hellwig 89608cc4b1SChristoph Hellwig /* 909abd68efSJens Axboe * Set MEDIUM priority on SQ creation 919abd68efSJens Axboe */ 929abd68efSJens Axboe NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 936299358dSJames Dingwall 946299358dSJames Dingwall /* 956299358dSJames Dingwall * Ignore device provided subnqn. 966299358dSJames Dingwall */ 976299358dSJames Dingwall NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), 987b210e4eSChristoph Hellwig 997b210e4eSChristoph Hellwig /* 1007b210e4eSChristoph Hellwig * Broken Write Zeroes. 1017b210e4eSChristoph Hellwig */ 1027b210e4eSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 103cb32de1bSMario Limonciello 104cb32de1bSMario Limonciello /* 105cb32de1bSMario Limonciello * Force simple suspend/resume path. 106cb32de1bSMario Limonciello */ 107cb32de1bSMario Limonciello NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), 1087ad67ca5SLinus Torvalds 1097ad67ca5SLinus Torvalds /* 11066341331SBenjamin Herrenschmidt * Use only one interrupt vector for all queues 11166341331SBenjamin Herrenschmidt */ 1127ad67ca5SLinus Torvalds NVME_QUIRK_SINGLE_VECTOR = (1 << 11), 11366341331SBenjamin Herrenschmidt 11466341331SBenjamin Herrenschmidt /* 11566341331SBenjamin Herrenschmidt * Use non-standard 128 bytes SQEs. 11666341331SBenjamin Herrenschmidt */ 1177ad67ca5SLinus Torvalds NVME_QUIRK_128_BYTES_SQES = (1 << 12), 118d38e9f04SBenjamin Herrenschmidt 119d38e9f04SBenjamin Herrenschmidt /* 120d38e9f04SBenjamin Herrenschmidt * Prevent tag overlap between queues 121d38e9f04SBenjamin Herrenschmidt */ 1227ad67ca5SLinus Torvalds NVME_QUIRK_SHARED_TAGS = (1 << 13), 1236c6aa2f2SAkinobu Mita 1246c6aa2f2SAkinobu Mita /* 1256c6aa2f2SAkinobu Mita * Don't change the value of the temperature threshold feature 1266c6aa2f2SAkinobu Mita */ 1276c6aa2f2SAkinobu Mita NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), 1285bedd3afSChristoph Hellwig 1295bedd3afSChristoph Hellwig /* 1305bedd3afSChristoph Hellwig * The controller doesn't handle the Identify Namespace 1315bedd3afSChristoph Hellwig * Identification Descriptor list subcommand despite claiming 1325bedd3afSChristoph Hellwig * NVMe 1.3 compliance. 1335bedd3afSChristoph Hellwig */ 1345bedd3afSChristoph Hellwig NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), 1354bdf2603SFilippo Sironi 1364bdf2603SFilippo Sironi /* 1374bdf2603SFilippo Sironi * The controller does not properly handle DMA addresses over 1384bdf2603SFilippo Sironi * 48 bits. 1394bdf2603SFilippo Sironi */ 1404bdf2603SFilippo Sironi NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 141a2941f6aSKeith Busch 142a2941f6aSKeith Busch /* 143b7df575fSXiang wangx * The controller requires the command_id value be limited, so skip 144a2941f6aSKeith Busch * encoding the generation sequence number. 145a2941f6aSKeith Busch */ 146a2941f6aSKeith Busch NVME_QUIRK_SKIP_CID_GEN = (1 << 17), 14700ff400eSChristoph Hellwig 14800ff400eSChristoph Hellwig /* 14900ff400eSChristoph Hellwig * Reports garbage in the namespace identifiers (eui64, nguid, uuid). 15000ff400eSChristoph Hellwig */ 15100ff400eSChristoph Hellwig NVME_QUIRK_BOGUS_NID = (1 << 18), 152bd375feeSHristo Venev 153bd375feeSHristo Venev /* 154bd375feeSHristo Venev * No temperature thresholds for channels other than 0 (Composite). 155bd375feeSHristo Venev */ 156bd375feeSHristo Venev NVME_QUIRK_NO_SECONDARY_TEMP_THRESH = (1 << 19), 157106198edSChristoph Hellwig }; 158106198edSChristoph Hellwig 159d49187e9SChristoph Hellwig /* 160d49187e9SChristoph Hellwig * Common request structure for NVMe passthrough. All drivers must have 161d49187e9SChristoph Hellwig * this structure as the first member of their request-private data. 162d49187e9SChristoph Hellwig */ 163d49187e9SChristoph Hellwig struct nvme_request { 164d49187e9SChristoph Hellwig struct nvme_command *cmd; 165d49187e9SChristoph Hellwig union nvme_result result; 166e7006de6SSagi Grimberg u8 genctr; 16744e44b29SChristoph Hellwig u8 retries; 16827fa9bc5SChristoph Hellwig u8 flags; 16927fa9bc5SChristoph Hellwig u16 status; 170d4d957b5SSagi Grimberg #ifdef CONFIG_NVME_MULTIPATH 171d4d957b5SSagi Grimberg unsigned long start_time; 172d4d957b5SSagi Grimberg #endif 17359e29ce6SSagi Grimberg struct nvme_ctrl *ctrl; 17427fa9bc5SChristoph Hellwig }; 17527fa9bc5SChristoph Hellwig 17632acab31SChristoph Hellwig /* 17732acab31SChristoph Hellwig * Mark a bio as coming in through the mpath node. 17832acab31SChristoph Hellwig */ 17932acab31SChristoph Hellwig #define REQ_NVME_MPATH REQ_DRV 18032acab31SChristoph Hellwig 18127fa9bc5SChristoph Hellwig enum { 18227fa9bc5SChristoph Hellwig NVME_REQ_CANCELLED = (1 << 0), 183bb06ec31SJames Smart NVME_REQ_USERCMD = (1 << 1), 184d4d957b5SSagi Grimberg NVME_MPATH_IO_STATS = (1 << 2), 185d49187e9SChristoph Hellwig }; 186d49187e9SChristoph Hellwig 187d49187e9SChristoph Hellwig static inline struct nvme_request *nvme_req(struct request *req) 188d49187e9SChristoph Hellwig { 189d49187e9SChristoph Hellwig return blk_mq_rq_to_pdu(req); 190d49187e9SChristoph Hellwig } 191d49187e9SChristoph Hellwig 1925d87eb94SKeith Busch static inline u16 nvme_req_qid(struct request *req) 1935d87eb94SKeith Busch { 194643c476dSKeith Busch if (!req->q->queuedata) 1955d87eb94SKeith Busch return 0; 19684115d6dSBaolin Wang 19784115d6dSBaolin Wang return req->mq_hctx->queue_num + 1; 1985d87eb94SKeith Busch } 1995d87eb94SKeith Busch 20054adc010SGuilherme G. Piccoli /* The below value is the specific amount of delay needed before checking 20154adc010SGuilherme G. Piccoli * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 20254adc010SGuilherme G. Piccoli * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 20354adc010SGuilherme G. Piccoli * found empirically. 20454adc010SGuilherme G. Piccoli */ 2058c97eeccSJeff Lien #define NVME_QUIRK_DELAY_AMOUNT 2300 20654adc010SGuilherme G. Piccoli 2074212f4e9SSagi Grimberg /* 2084212f4e9SSagi Grimberg * enum nvme_ctrl_state: Controller state 2094212f4e9SSagi Grimberg * 2104212f4e9SSagi Grimberg * @NVME_CTRL_NEW: New controller just allocated, initial state 2114212f4e9SSagi Grimberg * @NVME_CTRL_LIVE: Controller is connected and I/O capable 2124212f4e9SSagi Grimberg * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset) 2134212f4e9SSagi Grimberg * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the 2144212f4e9SSagi Grimberg * transport 2154212f4e9SSagi Grimberg * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion) 216ecca390eSSagi Grimberg * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not 217ecca390eSSagi Grimberg * disabled/failed immediately. This state comes 218ecca390eSSagi Grimberg * after all async event processing took place and 219ecca390eSSagi Grimberg * before ns removal and the controller deletion 220ecca390eSSagi Grimberg * progress 2214212f4e9SSagi Grimberg * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during 2224212f4e9SSagi Grimberg * shutdown or removal. In this case we forcibly 2234212f4e9SSagi Grimberg * kill all inflight I/O as they have no chance to 2244212f4e9SSagi Grimberg * complete 2254212f4e9SSagi Grimberg */ 226bb8d261eSChristoph Hellwig enum nvme_ctrl_state { 227bb8d261eSChristoph Hellwig NVME_CTRL_NEW, 228bb8d261eSChristoph Hellwig NVME_CTRL_LIVE, 229bb8d261eSChristoph Hellwig NVME_CTRL_RESETTING, 230ad6a0a52SMax Gurtovoy NVME_CTRL_CONNECTING, 231bb8d261eSChristoph Hellwig NVME_CTRL_DELETING, 232ecca390eSSagi Grimberg NVME_CTRL_DELETING_NOIO, 2330ff9d4e1SKeith Busch NVME_CTRL_DEAD, 234bb8d261eSChristoph Hellwig }; 235bb8d261eSChristoph Hellwig 236a3646451SAkinobu Mita struct nvme_fault_inject { 237a3646451SAkinobu Mita #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 238a3646451SAkinobu Mita struct fault_attr attr; 239a3646451SAkinobu Mita struct dentry *parent; 240a3646451SAkinobu Mita bool dont_retry; /* DNR, do not retry */ 241a3646451SAkinobu Mita u16 status; /* status code */ 242a3646451SAkinobu Mita #endif 243a3646451SAkinobu Mita }; 244a3646451SAkinobu Mita 245bf093d97SSagi Grimberg enum nvme_ctrl_flags { 246bf093d97SSagi Grimberg NVME_CTRL_FAILFAST_EXPIRED = 0, 247bf093d97SSagi Grimberg NVME_CTRL_ADMIN_Q_STOPPED = 1, 248f46ef9e8SSagi Grimberg NVME_CTRL_STARTED_ONCE = 2, 24998d81f0dSChao Leng NVME_CTRL_STOPPED = 3, 250bf093d97SSagi Grimberg }; 251bf093d97SSagi Grimberg 2521c63dc66SChristoph Hellwig struct nvme_ctrl { 2536e3ca03eSSagi Grimberg bool comp_seen; 254bb8d261eSChristoph Hellwig enum nvme_ctrl_state state; 255bd4da3abSAndy Lutomirski bool identified; 256bb8d261eSChristoph Hellwig spinlock_t lock; 257e7ad43c3SKeith Busch struct mutex scan_lock; 2581c63dc66SChristoph Hellwig const struct nvme_ctrl_ops *ops; 25957dacad5SJay Sternberg struct request_queue *admin_q; 26007bfcd09SChristoph Hellwig struct request_queue *connect_q; 261e7832cb4SSagi Grimberg struct request_queue *fabrics_q; 26257dacad5SJay Sternberg struct device *dev; 26357dacad5SJay Sternberg int instance; 264103e515eSHannes Reinecke int numa_node; 2655bae7f73SChristoph Hellwig struct blk_mq_tag_set *tagset; 26634b6c231SSagi Grimberg struct blk_mq_tag_set *admin_tagset; 2675bae7f73SChristoph Hellwig struct list_head namespaces; 268765cc031SJianchao Wang struct rw_semaphore namespaces_rwsem; 269d22524a4SChristoph Hellwig struct device ctrl_device; 2705bae7f73SChristoph Hellwig struct device *device; /* char device */ 271ed7770f6SHannes Reinecke #ifdef CONFIG_NVME_HWMON 272ed7770f6SHannes Reinecke struct device *hwmon_device; 273ed7770f6SHannes Reinecke #endif 274a6a5149bSChristoph Hellwig struct cdev cdev; 275d86c4d8eSChristoph Hellwig struct work_struct reset_work; 276c5017e85SChristoph Hellwig struct work_struct delete_work; 277c1ac9a4bSKeith Busch wait_queue_head_t state_wq; 2781c63dc66SChristoph Hellwig 279ab9e00ccSChristoph Hellwig struct nvme_subsystem *subsys; 280ab9e00ccSChristoph Hellwig struct list_head subsys_entry; 281ab9e00ccSChristoph Hellwig 2824f1244c8SChristoph Hellwig struct opal_dev *opal_dev; 283a98e58e5SScott Bauer 28457dacad5SJay Sternberg char name[12]; 28576e3914aSChristoph Hellwig u16 cntlid; 2865fd4ce1bSChristoph Hellwig 2875fd4ce1bSChristoph Hellwig u32 ctrl_config; 288b6dccf7fSArnav Dawn u16 mtfa; 289d858e5f0SSagi Grimberg u32 queue_count; 2905fd4ce1bSChristoph Hellwig 29120d0dfe6SSagi Grimberg u64 cap; 29257dacad5SJay Sternberg u32 max_hw_sectors; 293943e942eSJens Axboe u32 max_segments; 29495093350SMax Gurtovoy u32 max_integrity_segments; 2955befc7c2SKeith Busch u32 max_discard_sectors; 2965befc7c2SKeith Busch u32 max_discard_segments; 2975befc7c2SKeith Busch u32 max_zeroes_sectors; 298240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 299240e6ee2SKeith Busch u32 max_zone_append; 300240e6ee2SKeith Busch #endif 30149cd84b6SKeith Busch u16 crdt[3]; 30257dacad5SJay Sternberg u16 oncs; 3031a86924eSTom Yan u32 dmrsl; 3048a9ae523SScott Bauer u16 oacs; 305f968688fSKeith Busch u16 sqsize; 3060d0b660fSChristoph Hellwig u32 max_namespaces; 3076bf25d16SChristoph Hellwig atomic_t abort_limit; 30857dacad5SJay Sternberg u8 vwc; 309f3ca80fcSChristoph Hellwig u32 vs; 31007bfcd09SChristoph Hellwig u32 sgls; 311038bd4cbSSagi Grimberg u16 kas; 312c5552fdeSAndy Lutomirski u8 npss; 313c5552fdeSAndy Lutomirski u8 apsta; 314400b6a7bSGuenter Roeck u16 wctemp; 315400b6a7bSGuenter Roeck u16 cctemp; 316c0561f82SHannes Reinecke u32 oaes; 317e3d7874dSKeith Busch u32 aen_result; 3183e53ba38SSagi Grimberg u32 ctratt; 31907fbd32aSMartin K. Petersen unsigned int shutdown_timeout; 320038bd4cbSSagi Grimberg unsigned int kato; 321f3ca80fcSChristoph Hellwig bool subsystem; 322106198edSChristoph Hellwig unsigned long quirks; 323c5552fdeSAndy Lutomirski struct nvme_id_power_state psd[32]; 32484fef62dSKeith Busch struct nvme_effects_log *effects; 3251cf7a12eSChaitanya Kulkarni struct xarray cels; 3265955be21SChristoph Hellwig struct work_struct scan_work; 327f866fc42SChristoph Hellwig struct work_struct async_event_work; 328038bd4cbSSagi Grimberg struct delayed_work ka_work; 3298c4dfea9SVictor Gladkov struct delayed_work failfast_work; 3300a34e466SRoland Dreier struct nvme_command ka_cmd; 331*774a9636SUday Shankar unsigned long ka_last_check_time; 332b6dccf7fSArnav Dawn struct work_struct fw_act_work; 33330d90964SChristoph Hellwig unsigned long events; 33407bfcd09SChristoph Hellwig 3350d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 3360d0b660fSChristoph Hellwig /* asymmetric namespace access: */ 3370d0b660fSChristoph Hellwig u8 anacap; 3380d0b660fSChristoph Hellwig u8 anatt; 3390d0b660fSChristoph Hellwig u32 anagrpmax; 3400d0b660fSChristoph Hellwig u32 nanagrpid; 3410d0b660fSChristoph Hellwig struct mutex ana_lock; 3420d0b660fSChristoph Hellwig struct nvme_ana_rsp_hdr *ana_log_buf; 3430d0b660fSChristoph Hellwig size_t ana_log_size; 3440d0b660fSChristoph Hellwig struct timer_list anatt_timer; 3450d0b660fSChristoph Hellwig struct work_struct ana_work; 3460d0b660fSChristoph Hellwig #endif 3470d0b660fSChristoph Hellwig 348f50fff73SHannes Reinecke #ifdef CONFIG_NVME_AUTH 349f50fff73SHannes Reinecke struct work_struct dhchap_auth_work; 350f50fff73SHannes Reinecke struct mutex dhchap_auth_mutex; 351aa36d711SSagi Grimberg struct nvme_dhchap_queue_context *dhchap_ctxs; 352f50fff73SHannes Reinecke struct nvme_dhchap_key *host_key; 353f50fff73SHannes Reinecke struct nvme_dhchap_key *ctrl_key; 354f50fff73SHannes Reinecke u16 transaction; 355f50fff73SHannes Reinecke #endif 356f50fff73SHannes Reinecke 357c5552fdeSAndy Lutomirski /* Power saving configuration */ 358c5552fdeSAndy Lutomirski u64 ps_max_latency_us; 35976a5af84SKai-Heng Feng bool apst_enabled; 360c5552fdeSAndy Lutomirski 361044a9df1SChristoph Hellwig /* PCIe only: */ 362fe6d53c9SChristoph Hellwig u32 hmpre; 363fe6d53c9SChristoph Hellwig u32 hmmin; 364044a9df1SChristoph Hellwig u32 hmminds; 365044a9df1SChristoph Hellwig u16 hmmaxd; 366fe6d53c9SChristoph Hellwig 36707bfcd09SChristoph Hellwig /* Fabrics only */ 36807bfcd09SChristoph Hellwig u32 ioccsz; 36907bfcd09SChristoph Hellwig u32 iorcsz; 37007bfcd09SChristoph Hellwig u16 icdoff; 37107bfcd09SChristoph Hellwig u16 maxcmd; 372fdf9dfa8SSagi Grimberg int nr_reconnects; 3738c4dfea9SVictor Gladkov unsigned long flags; 37407bfcd09SChristoph Hellwig struct nvmf_ctrl_options *opts; 375cb5b7262SJens Axboe 376cb5b7262SJens Axboe struct page *discard_page; 377cb5b7262SJens Axboe unsigned long discard_page_busy; 378f79d5fdaSAkinobu Mita 379f79d5fdaSAkinobu Mita struct nvme_fault_inject fault_inject; 38086c2457aSMartin Belanger 38186c2457aSMartin Belanger enum nvme_ctrl_type cntrltype; 38286c2457aSMartin Belanger enum nvme_dctype dctype; 38357dacad5SJay Sternberg }; 38457dacad5SJay Sternberg 38575c10e73SHannes Reinecke enum nvme_iopolicy { 38675c10e73SHannes Reinecke NVME_IOPOLICY_NUMA, 38775c10e73SHannes Reinecke NVME_IOPOLICY_RR, 38875c10e73SHannes Reinecke }; 38975c10e73SHannes Reinecke 390ab9e00ccSChristoph Hellwig struct nvme_subsystem { 391ab9e00ccSChristoph Hellwig int instance; 392ab9e00ccSChristoph Hellwig struct device dev; 393ab9e00ccSChristoph Hellwig /* 394ab9e00ccSChristoph Hellwig * Because we unregister the device on the last put we need 395ab9e00ccSChristoph Hellwig * a separate refcount. 396ab9e00ccSChristoph Hellwig */ 397ab9e00ccSChristoph Hellwig struct kref ref; 398ab9e00ccSChristoph Hellwig struct list_head entry; 399ab9e00ccSChristoph Hellwig struct mutex lock; 400ab9e00ccSChristoph Hellwig struct list_head ctrls; 401ed754e5dSChristoph Hellwig struct list_head nsheads; 402ab9e00ccSChristoph Hellwig char subnqn[NVMF_NQN_SIZE]; 403ab9e00ccSChristoph Hellwig char serial[20]; 404ab9e00ccSChristoph Hellwig char model[40]; 405ab9e00ccSChristoph Hellwig char firmware_rev[8]; 406ab9e00ccSChristoph Hellwig u8 cmic; 407954ae166SHannes Reinecke enum nvme_subsys_type subtype; 408ab9e00ccSChristoph Hellwig u16 vendor_id; 40981adb863SBart Van Assche u16 awupf; /* 0's based awupf value. */ 410ed754e5dSChristoph Hellwig struct ida ns_ida; 41175c10e73SHannes Reinecke #ifdef CONFIG_NVME_MULTIPATH 41275c10e73SHannes Reinecke enum nvme_iopolicy iopolicy; 41375c10e73SHannes Reinecke #endif 414ab9e00ccSChristoph Hellwig }; 415ab9e00ccSChristoph Hellwig 416002fab04SChristoph Hellwig /* 417002fab04SChristoph Hellwig * Container structure for uniqueue namespace identifiers. 418002fab04SChristoph Hellwig */ 419002fab04SChristoph Hellwig struct nvme_ns_ids { 420002fab04SChristoph Hellwig u8 eui64[8]; 421002fab04SChristoph Hellwig u8 nguid[16]; 422002fab04SChristoph Hellwig uuid_t uuid; 42371010c30SNiklas Cassel u8 csi; 424002fab04SChristoph Hellwig }; 425002fab04SChristoph Hellwig 426ed754e5dSChristoph Hellwig /* 427ed754e5dSChristoph Hellwig * Anchor structure for namespaces. There is one for each namespace in a 428ed754e5dSChristoph Hellwig * NVMe subsystem that any of our controllers can see, and the namespace 429ed754e5dSChristoph Hellwig * structure for each controller is chained of it. For private namespaces 430ed754e5dSChristoph Hellwig * there is a 1:1 relation to our namespace structures, that is ->list 431ed754e5dSChristoph Hellwig * only ever has a single entry for private namespaces. 432ed754e5dSChristoph Hellwig */ 433ed754e5dSChristoph Hellwig struct nvme_ns_head { 434ed754e5dSChristoph Hellwig struct list_head list; 435ed754e5dSChristoph Hellwig struct srcu_struct srcu; 436ed754e5dSChristoph Hellwig struct nvme_subsystem *subsys; 437ed754e5dSChristoph Hellwig unsigned ns_id; 438ed754e5dSChristoph Hellwig struct nvme_ns_ids ids; 439ed754e5dSChristoph Hellwig struct list_head entry; 440ed754e5dSChristoph Hellwig struct kref ref; 4410c284db7SKeith Busch bool shared; 442ed754e5dSChristoph Hellwig int instance; 443be93e87eSKeith Busch struct nvme_effects_log *effects; 4442637baedSMinwoo Im 4452637baedSMinwoo Im struct cdev cdev; 4462637baedSMinwoo Im struct device cdev_device; 4472637baedSMinwoo Im 448f3334447SChristoph Hellwig struct gendisk *disk; 44930897388SMinwoo Im #ifdef CONFIG_NVME_MULTIPATH 450f3334447SChristoph Hellwig struct bio_list requeue_list; 451f3334447SChristoph Hellwig spinlock_t requeue_lock; 452f3334447SChristoph Hellwig struct work_struct requeue_work; 453f3334447SChristoph Hellwig struct mutex lock; 454d8a22f85SAnton Eidelman unsigned long flags; 455d8a22f85SAnton Eidelman #define NVME_NSHEAD_DISK_LIVE 0 456f3334447SChristoph Hellwig struct nvme_ns __rcu *current_path[]; 457f3334447SChristoph Hellwig #endif 458ed754e5dSChristoph Hellwig }; 459ed754e5dSChristoph Hellwig 46030897388SMinwoo Im static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head) 46130897388SMinwoo Im { 46230897388SMinwoo Im return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk; 46330897388SMinwoo Im } 46430897388SMinwoo Im 465ffc89b1dSMax Gurtovoy enum nvme_ns_features { 466ffc89b1dSMax Gurtovoy NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ 467b29f8485SMax Gurtovoy NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ 4681b96f862SChristoph Hellwig NVME_NS_DEAC, /* DEAC bit in Write Zeores supported */ 469ffc89b1dSMax Gurtovoy }; 470ffc89b1dSMax Gurtovoy 47157dacad5SJay Sternberg struct nvme_ns { 47257dacad5SJay Sternberg struct list_head list; 47357dacad5SJay Sternberg 4741c63dc66SChristoph Hellwig struct nvme_ctrl *ctrl; 47557dacad5SJay Sternberg struct request_queue *queue; 47657dacad5SJay Sternberg struct gendisk *disk; 4770d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 4780d0b660fSChristoph Hellwig enum nvme_ana_state ana_state; 4790d0b660fSChristoph Hellwig u32 ana_grpid; 4800d0b660fSChristoph Hellwig #endif 481ed754e5dSChristoph Hellwig struct list_head siblings; 48257dacad5SJay Sternberg struct kref kref; 483ed754e5dSChristoph Hellwig struct nvme_ns_head *head; 48457dacad5SJay Sternberg 48557dacad5SJay Sternberg int lba_shift; 48657dacad5SJay Sternberg u16 ms; 4874020aad8SKeith Busch u16 pi_size; 488f5d11840SJens Axboe u16 sgs; 489f5d11840SJens Axboe u32 sws; 49057dacad5SJay Sternberg u8 pi_type; 4914020aad8SKeith Busch u8 guard_type; 492240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 493240e6ee2SKeith Busch u64 zsze; 494240e6ee2SKeith Busch #endif 495ffc89b1dSMax Gurtovoy unsigned long features; 496646017a6SKeith Busch unsigned long flags; 497646017a6SKeith Busch #define NVME_NS_REMOVING 0 4980d0b660fSChristoph Hellwig #define NVME_NS_ANA_PENDING 2 4992f4c9ba2SJavier González #define NVME_NS_FORCE_RO 3 500e7d65803SHannes Reinecke #define NVME_NS_READY 4 501b9e03857SThomas Tai 5022637baedSMinwoo Im struct cdev cdev; 5032637baedSMinwoo Im struct device cdev_device; 5042637baedSMinwoo Im 505b9e03857SThomas Tai struct nvme_fault_inject fault_inject; 506b9e03857SThomas Tai 50757dacad5SJay Sternberg }; 50857dacad5SJay Sternberg 5094d2ce688SJames Smart /* NVMe ns supports metadata actions by the controller (generate/strip) */ 5104d2ce688SJames Smart static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 5114d2ce688SJames Smart { 5124020aad8SKeith Busch return ns->pi_type && ns->ms == ns->pi_size; 5134d2ce688SJames Smart } 5144d2ce688SJames Smart 5151c63dc66SChristoph Hellwig struct nvme_ctrl_ops { 5161a353d85SMing Lin const char *name; 517e439bb12SSagi Grimberg struct module *module; 518d3d5b87dSChristoph Hellwig unsigned int flags; 519d3d5b87dSChristoph Hellwig #define NVME_F_FABRICS (1 << 0) 520c81bfba9SChristoph Hellwig #define NVME_F_METADATA_SUPPORTED (1 << 1) 521db45e1a5SChristoph Hellwig #define NVME_F_BLOCKING (1 << 2) 522db45e1a5SChristoph Hellwig 52386adbf0cSChristoph Hellwig const struct attribute_group **dev_attr_groups; 5241c63dc66SChristoph Hellwig int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 5255fd4ce1bSChristoph Hellwig int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 5267fd8930fSChristoph Hellwig int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 5271673f1f0SChristoph Hellwig void (*free_ctrl)(struct nvme_ctrl *ctrl); 528ad22c355SKeith Busch void (*submit_async_event)(struct nvme_ctrl *ctrl); 529c5017e85SChristoph Hellwig void (*delete_ctrl)(struct nvme_ctrl *ctrl); 530f7f70f4aSRuozhu Li void (*stop_ctrl)(struct nvme_ctrl *ctrl); 5311a353d85SMing Lin int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 5322f0dad17SKeith Busch void (*print_device_info)(struct nvme_ctrl *ctrl); 5332f859441SLogan Gunthorpe bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl); 53457dacad5SJay Sternberg }; 53557dacad5SJay Sternberg 536e7006de6SSagi Grimberg /* 537e7006de6SSagi Grimberg * nvme command_id is constructed as such: 538e7006de6SSagi Grimberg * | xxxx | xxxxxxxxxxxx | 539e7006de6SSagi Grimberg * gen request tag 540e7006de6SSagi Grimberg */ 541e7006de6SSagi Grimberg #define nvme_genctr_mask(gen) (gen & 0xf) 542e7006de6SSagi Grimberg #define nvme_cid_install_genctr(gen) (nvme_genctr_mask(gen) << 12) 543e7006de6SSagi Grimberg #define nvme_genctr_from_cid(cid) ((cid & 0xf000) >> 12) 544e7006de6SSagi Grimberg #define nvme_tag_from_cid(cid) (cid & 0xfff) 545e7006de6SSagi Grimberg 546e7006de6SSagi Grimberg static inline u16 nvme_cid(struct request *rq) 547e7006de6SSagi Grimberg { 548e7006de6SSagi Grimberg return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; 549e7006de6SSagi Grimberg } 550e7006de6SSagi Grimberg 551e7006de6SSagi Grimberg static inline struct request *nvme_find_rq(struct blk_mq_tags *tags, 552e7006de6SSagi Grimberg u16 command_id) 553e7006de6SSagi Grimberg { 554e7006de6SSagi Grimberg u8 genctr = nvme_genctr_from_cid(command_id); 555e7006de6SSagi Grimberg u16 tag = nvme_tag_from_cid(command_id); 556e7006de6SSagi Grimberg struct request *rq; 557e7006de6SSagi Grimberg 558e7006de6SSagi Grimberg rq = blk_mq_tag_to_rq(tags, tag); 559e7006de6SSagi Grimberg if (unlikely(!rq)) { 560e7006de6SSagi Grimberg pr_err("could not locate request for tag %#x\n", 561e7006de6SSagi Grimberg tag); 562e7006de6SSagi Grimberg return NULL; 563e7006de6SSagi Grimberg } 564e7006de6SSagi Grimberg if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { 565e7006de6SSagi Grimberg dev_err(nvme_req(rq)->ctrl->device, 566e7006de6SSagi Grimberg "request %#x genctr mismatch (got %#x expected %#x)\n", 567e7006de6SSagi Grimberg tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); 568e7006de6SSagi Grimberg return NULL; 569e7006de6SSagi Grimberg } 570e7006de6SSagi Grimberg return rq; 571e7006de6SSagi Grimberg } 572e7006de6SSagi Grimberg 573e7006de6SSagi Grimberg static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, 574e7006de6SSagi Grimberg u16 command_id) 575e7006de6SSagi Grimberg { 576e7006de6SSagi Grimberg return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); 577e7006de6SSagi Grimberg } 578e7006de6SSagi Grimberg 5792f0dad17SKeith Busch /* 5802f0dad17SKeith Busch * Return the length of the string without the space padding 5812f0dad17SKeith Busch */ 5822f0dad17SKeith Busch static inline int nvme_strlen(char *s, int len) 5832f0dad17SKeith Busch { 5842f0dad17SKeith Busch while (s[len - 1] == ' ') 5852f0dad17SKeith Busch len--; 5862f0dad17SKeith Busch return len; 5872f0dad17SKeith Busch } 5882f0dad17SKeith Busch 5892f0dad17SKeith Busch static inline void nvme_print_device_info(struct nvme_ctrl *ctrl) 5902f0dad17SKeith Busch { 5912f0dad17SKeith Busch struct nvme_subsystem *subsys = ctrl->subsys; 5922f0dad17SKeith Busch 5932f0dad17SKeith Busch if (ctrl->ops->print_device_info) { 5942f0dad17SKeith Busch ctrl->ops->print_device_info(ctrl); 5952f0dad17SKeith Busch return; 5962f0dad17SKeith Busch } 5972f0dad17SKeith Busch 5982f0dad17SKeith Busch dev_err(ctrl->device, 5992f0dad17SKeith Busch "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id, 6002f0dad17SKeith Busch nvme_strlen(subsys->model, sizeof(subsys->model)), 6012f0dad17SKeith Busch subsys->model, nvme_strlen(subsys->firmware_rev, 6022f0dad17SKeith Busch sizeof(subsys->firmware_rev)), 6032f0dad17SKeith Busch subsys->firmware_rev); 6042f0dad17SKeith Busch } 6052f0dad17SKeith Busch 606b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 607a3646451SAkinobu Mita void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 608a3646451SAkinobu Mita const char *dev_name); 609a3646451SAkinobu Mita void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); 610b9e03857SThomas Tai void nvme_should_fail(struct request *req); 611b9e03857SThomas Tai #else 612a3646451SAkinobu Mita static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 613a3646451SAkinobu Mita const char *dev_name) 614a3646451SAkinobu Mita { 615a3646451SAkinobu Mita } 616a3646451SAkinobu Mita static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) 617a3646451SAkinobu Mita { 618a3646451SAkinobu Mita } 619b9e03857SThomas Tai static inline void nvme_should_fail(struct request *req) {} 620b9e03857SThomas Tai #endif 621b9e03857SThomas Tai 6221e866afdSKeith Busch bool nvme_wait_reset(struct nvme_ctrl *ctrl); 6231e866afdSKeith Busch int nvme_try_sched_reset(struct nvme_ctrl *ctrl); 6241e866afdSKeith Busch 625f3ca80fcSChristoph Hellwig static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 626f3ca80fcSChristoph Hellwig { 6271e866afdSKeith Busch int ret; 6281e866afdSKeith Busch 629f3ca80fcSChristoph Hellwig if (!ctrl->subsystem) 630f3ca80fcSChristoph Hellwig return -ENOTTY; 6311e866afdSKeith Busch if (!nvme_wait_reset(ctrl)) 6321e866afdSKeith Busch return -EBUSY; 6331e866afdSKeith Busch 6341e866afdSKeith Busch ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 6351e866afdSKeith Busch if (ret) 6361e866afdSKeith Busch return ret; 6371e866afdSKeith Busch 6381e866afdSKeith Busch return nvme_try_sched_reset(ctrl); 639f3ca80fcSChristoph Hellwig } 640f3ca80fcSChristoph Hellwig 641314d48ddSDamien Le Moal /* 642314d48ddSDamien Le Moal * Convert a 512B sector number to a device logical block number. 643314d48ddSDamien Le Moal */ 644314d48ddSDamien Le Moal static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) 64557dacad5SJay Sternberg { 646314d48ddSDamien Le Moal return sector >> (ns->lba_shift - SECTOR_SHIFT); 64757dacad5SJay Sternberg } 64857dacad5SJay Sternberg 649e08f2ae8SDamien Le Moal /* 650e08f2ae8SDamien Le Moal * Convert a device logical block number to a 512B sector number. 651e08f2ae8SDamien Le Moal */ 652e08f2ae8SDamien Le Moal static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) 653e08f2ae8SDamien Le Moal { 654e08f2ae8SDamien Le Moal return lba << (ns->lba_shift - SECTOR_SHIFT); 65557dacad5SJay Sternberg } 65657dacad5SJay Sternberg 65771fb90ebSKeith Busch /* 65871fb90ebSKeith Busch * Convert byte length to nvme's 0-based num dwords 65971fb90ebSKeith Busch */ 66071fb90ebSKeith Busch static inline u32 nvme_bytes_to_numd(size_t len) 66171fb90ebSKeith Busch { 66271fb90ebSKeith Busch return (len >> 2) - 1; 66371fb90ebSKeith Busch } 66471fb90ebSKeith Busch 6655ddaabe8SChristoph Hellwig static inline bool nvme_is_ana_error(u16 status) 6665ddaabe8SChristoph Hellwig { 6675ddaabe8SChristoph Hellwig switch (status & 0x7ff) { 6685ddaabe8SChristoph Hellwig case NVME_SC_ANA_TRANSITION: 6695ddaabe8SChristoph Hellwig case NVME_SC_ANA_INACCESSIBLE: 6705ddaabe8SChristoph Hellwig case NVME_SC_ANA_PERSISTENT_LOSS: 6715ddaabe8SChristoph Hellwig return true; 6725ddaabe8SChristoph Hellwig default: 6735ddaabe8SChristoph Hellwig return false; 6745ddaabe8SChristoph Hellwig } 6755ddaabe8SChristoph Hellwig } 6765ddaabe8SChristoph Hellwig 6775ddaabe8SChristoph Hellwig static inline bool nvme_is_path_error(u16 status) 6785ddaabe8SChristoph Hellwig { 6791e41f3bdSChristoph Hellwig /* check for a status code type of 'path related status' */ 6801e41f3bdSChristoph Hellwig return (status & 0x700) == 0x300; 6815ddaabe8SChristoph Hellwig } 6825ddaabe8SChristoph Hellwig 6832eb81a33SChristoph Hellwig /* 6842eb81a33SChristoph Hellwig * Fill in the status and result information from the CQE, and then figure out 6852eb81a33SChristoph Hellwig * if blk-mq will need to use IPI magic to complete the request, and if yes do 6862eb81a33SChristoph Hellwig * so. If not let the caller complete the request without an indirect function 6872eb81a33SChristoph Hellwig * call. 6882eb81a33SChristoph Hellwig */ 6892eb81a33SChristoph Hellwig static inline bool nvme_try_complete_req(struct request *req, __le16 status, 69027fa9bc5SChristoph Hellwig union nvme_result result) 69115a190f7SChristoph Hellwig { 69227fa9bc5SChristoph Hellwig struct nvme_request *rq = nvme_req(req); 693e4fdb2b1SKeith Busch struct nvme_ctrl *ctrl = rq->ctrl; 694e4fdb2b1SKeith Busch 695e4fdb2b1SKeith Busch if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN)) 696e4fdb2b1SKeith Busch rq->genctr++; 69727fa9bc5SChristoph Hellwig 69827fa9bc5SChristoph Hellwig rq->status = le16_to_cpu(status) >> 1; 69927fa9bc5SChristoph Hellwig rq->result = result; 700b9e03857SThomas Tai /* inject error when permitted by fault injection framework */ 701b9e03857SThomas Tai nvme_should_fail(req); 702ff029451SChristoph Hellwig if (unlikely(blk_should_fake_timeout(req->q))) 703ff029451SChristoph Hellwig return true; 704ff029451SChristoph Hellwig return blk_mq_complete_request_remote(req); 70515a190f7SChristoph Hellwig } 70615a190f7SChristoph Hellwig 707d22524a4SChristoph Hellwig static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 708d22524a4SChristoph Hellwig { 709d22524a4SChristoph Hellwig get_device(ctrl->device); 710d22524a4SChristoph Hellwig } 711d22524a4SChristoph Hellwig 712d22524a4SChristoph Hellwig static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 713d22524a4SChristoph Hellwig { 714d22524a4SChristoph Hellwig put_device(ctrl->device); 715d22524a4SChristoph Hellwig } 716d22524a4SChristoph Hellwig 71758a8df67SIsrael Rukshin static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) 71858a8df67SIsrael Rukshin { 719e7006de6SSagi Grimberg return !qid && 720e7006de6SSagi Grimberg nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH; 72158a8df67SIsrael Rukshin } 72258a8df67SIsrael Rukshin 72377f02a7aSChristoph Hellwig void nvme_complete_rq(struct request *req); 724c234a653SJens Axboe void nvme_complete_batch_req(struct request *req); 725c234a653SJens Axboe 726c234a653SJens Axboe static __always_inline void nvme_complete_batch(struct io_comp_batch *iob, 727c234a653SJens Axboe void (*fn)(struct request *rq)) 728c234a653SJens Axboe { 729c234a653SJens Axboe struct request *req; 730c234a653SJens Axboe 731c234a653SJens Axboe rq_list_for_each(&iob->req_list, req) { 732c234a653SJens Axboe fn(req); 733c234a653SJens Axboe nvme_complete_batch_req(req); 734c234a653SJens Axboe } 735c234a653SJens Axboe blk_mq_end_request_batch(iob); 736c234a653SJens Axboe } 737c234a653SJens Axboe 738dda3248eSChao Leng blk_status_t nvme_host_path_error(struct request *req); 7392dd6532eSJohn Garry bool nvme_cancel_request(struct request *req, void *data); 74025479069SChao Leng void nvme_cancel_tagset(struct nvme_ctrl *ctrl); 74125479069SChao Leng void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); 742bb8d261eSChristoph Hellwig bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 743bb8d261eSChristoph Hellwig enum nvme_ctrl_state new_state); 744285b6e9bSChristoph Hellwig int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown); 745c0f2f45bSSagi Grimberg int nvme_enable_ctrl(struct nvme_ctrl *ctrl); 746f3ca80fcSChristoph Hellwig int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 747f3ca80fcSChristoph Hellwig const struct nvme_ctrl_ops *ops, unsigned long quirks); 74853029b04SKeith Busch void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 749d09f2b45SSagi Grimberg void nvme_start_ctrl(struct nvme_ctrl *ctrl); 750d09f2b45SSagi Grimberg void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 75194cc781fSChristoph Hellwig int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended); 752fe60e8c5SChristoph Hellwig int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 753db45e1a5SChristoph Hellwig const struct blk_mq_ops *ops, unsigned int cmd_size); 754fe60e8c5SChristoph Hellwig void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl); 755fe60e8c5SChristoph Hellwig int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, 756db45e1a5SChristoph Hellwig const struct blk_mq_ops *ops, unsigned int nr_maps, 757db45e1a5SChristoph Hellwig unsigned int cmd_size); 758fe60e8c5SChristoph Hellwig void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl); 7595bae7f73SChristoph Hellwig 7605bae7f73SChristoph Hellwig void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 7611673f1f0SChristoph Hellwig 7627bf58533SChristoph Hellwig void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 763287a63ebSChristoph Hellwig volatile union nvme_result *res); 764f866fc42SChristoph Hellwig 7659f27bd70SChristoph Hellwig void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl); 7669f27bd70SChristoph Hellwig void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl); 7679f27bd70SChristoph Hellwig void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl); 7689f27bd70SChristoph Hellwig void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl); 769cd50f9b2SChristoph Hellwig void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl); 770d6135c3aSKeith Busch void nvme_sync_queues(struct nvme_ctrl *ctrl); 77104800fbfSChao Leng void nvme_sync_io_queues(struct nvme_ctrl *ctrl); 772302ad8ccSKeith Busch void nvme_unfreeze(struct nvme_ctrl *ctrl); 773302ad8ccSKeith Busch void nvme_wait_freeze(struct nvme_ctrl *ctrl); 7747cf0d7c0SSagi Grimberg int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 775302ad8ccSKeith Busch void nvme_start_freeze(struct nvme_ctrl *ctrl); 776363c9aacSSagi Grimberg 777f9ed86dcSBart Van Assche static inline enum req_op nvme_req_op(struct nvme_command *cmd) 778e559398fSChristoph Hellwig { 779e559398fSChristoph Hellwig return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN; 780e559398fSChristoph Hellwig } 781e559398fSChristoph Hellwig 782eb71f435SChristoph Hellwig #define NVME_QID_ANY -1 783e559398fSChristoph Hellwig void nvme_init_request(struct request *req, struct nvme_command *cmd); 784f7f1fc36SMax Gurtovoy void nvme_cleanup_cmd(struct request *req); 785f4b9e6c9SKeith Busch blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req); 786a9715744STao Chiu blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, 787a9715744STao Chiu struct request *req); 788a9715744STao Chiu bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 789a9715744STao Chiu bool queue_live); 790a9715744STao Chiu 791a9715744STao Chiu static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 792a9715744STao Chiu bool queue_live) 793a9715744STao Chiu { 794a9715744STao Chiu if (likely(ctrl->state == NVME_CTRL_LIVE)) 795a9715744STao Chiu return true; 796a9715744STao Chiu if (ctrl->ops->flags & NVME_F_FABRICS && 797a9715744STao Chiu ctrl->state == NVME_CTRL_DELETING) 7988b77fa6fSRuozhu Li return queue_live; 799a9715744STao Chiu return __nvme_check_ready(ctrl, rq, queue_live); 800a9715744STao Chiu } 8015974ea7cSSungup Moon 8025974ea7cSSungup Moon /* 8035974ea7cSSungup Moon * NSID shall be unique for all shared namespaces, or if at least one of the 8045974ea7cSSungup Moon * following conditions is met: 8055974ea7cSSungup Moon * 1. Namespace Management is supported by the controller 8065974ea7cSSungup Moon * 2. ANA is supported by the controller 8075974ea7cSSungup Moon * 3. NVM Set are supported by the controller 8085974ea7cSSungup Moon * 8095974ea7cSSungup Moon * In other case, private namespace are not required to report a unique NSID. 8105974ea7cSSungup Moon */ 8115974ea7cSSungup Moon static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl, 8125974ea7cSSungup Moon struct nvme_ns_head *head) 8135974ea7cSSungup Moon { 8145974ea7cSSungup Moon return head->shared || 8155974ea7cSSungup Moon (ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) || 8165974ea7cSSungup Moon (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) || 8175974ea7cSSungup Moon (ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS); 8185974ea7cSSungup Moon } 8195974ea7cSSungup Moon 82057dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 82157dacad5SJay Sternberg void *buf, unsigned bufflen); 82257dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 823d49187e9SChristoph Hellwig union nvme_result *result, void *buffer, unsigned bufflen, 8246b46fa02SChaitanya Kulkarni int qid, int at_head, 825be42a33bSKeith Busch blk_mq_req_flags_t flags); 8261a87ee65SKeith Busch int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 8271a87ee65SKeith Busch unsigned int dword11, void *buffer, size_t buflen, 8281a87ee65SKeith Busch u32 *result); 8291a87ee65SKeith Busch int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 8301a87ee65SKeith Busch unsigned int dword11, void *buffer, size_t buflen, 8311a87ee65SKeith Busch u32 *result); 8329a0be7abSChristoph Hellwig int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 833038bd4cbSSagi Grimberg void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 834d86c4d8eSChristoph Hellwig int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 8352405252aSChristoph Hellwig int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 836c5017e85SChristoph Hellwig int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 8372405252aSChristoph Hellwig void nvme_queue_scan(struct nvme_ctrl *ctrl); 838be93e87eSKeith Busch int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 8390e98719bSChristoph Hellwig void *log, size_t size, u64 offset); 8401496bd49SChristoph Hellwig bool nvme_tryget_ns_head(struct nvme_ns_head *head); 8411496bd49SChristoph Hellwig void nvme_put_ns_head(struct nvme_ns_head *head); 8422637baedSMinwoo Im int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, 8432637baedSMinwoo Im const struct file_operations *fops, struct module *owner); 8442637baedSMinwoo Im void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device); 8452405252aSChristoph Hellwig int nvme_ioctl(struct block_device *bdev, fmode_t mode, 8462405252aSChristoph Hellwig unsigned int cmd, unsigned long arg); 8472637baedSMinwoo Im long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 8482405252aSChristoph Hellwig int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode, 8492405252aSChristoph Hellwig unsigned int cmd, unsigned long arg); 8502637baedSMinwoo Im long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd, 8512637baedSMinwoo Im unsigned long arg); 8522405252aSChristoph Hellwig long nvme_dev_ioctl(struct file *file, unsigned int cmd, 8532405252aSChristoph Hellwig unsigned long arg); 854de97fcb3SJens Axboe int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, 855de97fcb3SJens Axboe struct io_comp_batch *iob, unsigned int poll_flags); 856de97fcb3SJens Axboe int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd, 857de97fcb3SJens Axboe struct io_comp_batch *iob, unsigned int poll_flags); 858456cba38SKanchan Joshi int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd, 859456cba38SKanchan Joshi unsigned int issue_flags); 860456cba38SKanchan Joshi int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd, 861456cba38SKanchan Joshi unsigned int issue_flags); 8621496bd49SChristoph Hellwig int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo); 86358e5bdebSKanchan Joshi int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags); 864d558fb51SMatias Bjørling 86533b14f67SHannes Reinecke extern const struct attribute_group *nvme_ns_id_attr_groups[]; 8661496bd49SChristoph Hellwig extern const struct pr_ops nvme_pr_ops; 86732acab31SChristoph Hellwig extern const struct block_device_operations nvme_ns_head_ops; 86886adbf0cSChristoph Hellwig extern const struct attribute_group nvme_dev_attrs_group; 86932acab31SChristoph Hellwig 870f1cf35e1SChristoph Hellwig struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 87132acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 87266b20ac0SMarta Rybczynska static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 87366b20ac0SMarta Rybczynska { 87466b20ac0SMarta Rybczynska return ctrl->ana_log_buf != NULL; 87566b20ac0SMarta Rybczynska } 87666b20ac0SMarta Rybczynska 877b9156daeSSagi Grimberg void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); 878b9156daeSSagi Grimberg void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); 879b9156daeSSagi Grimberg void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); 880e3d34794SHannes Reinecke void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys); 8815ddaabe8SChristoph Hellwig void nvme_failover_req(struct request *req); 88232acab31SChristoph Hellwig void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 88332acab31SChristoph Hellwig int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 884c13cf14fSJoel Granados void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid); 88532acab31SChristoph Hellwig void nvme_mpath_remove_disk(struct nvme_ns_head *head); 8865e1f6899SChristoph Hellwig int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 8875e1f6899SChristoph Hellwig void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl); 888a4a6f3c8SAnton Eidelman void nvme_mpath_update(struct nvme_ctrl *ctrl); 8890d0b660fSChristoph Hellwig void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 8900d0b660fSChristoph Hellwig void nvme_mpath_stop(struct nvme_ctrl *ctrl); 8910157ec8dSSagi Grimberg bool nvme_mpath_clear_current_path(struct nvme_ns *ns); 892e7d65803SHannes Reinecke void nvme_mpath_revalidate_paths(struct nvme_ns *ns); 8930157ec8dSSagi Grimberg void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); 8945396fdacSHannes Reinecke void nvme_mpath_shutdown_disk(struct nvme_ns_head *head); 895d4d957b5SSagi Grimberg void nvme_mpath_start_request(struct request *rq); 896d4d957b5SSagi Grimberg void nvme_mpath_end_request(struct request *rq); 897479a322fSSagi Grimberg 8982b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req) 89935fe0d12SHannes Reinecke { 90035fe0d12SHannes Reinecke struct nvme_ns *ns = req->q->queuedata; 90135fe0d12SHannes Reinecke 9023659fb5aSYanjun Zhang if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) 903d24de76aSChristoph Hellwig trace_block_bio_complete(ns->head->disk->queue, req->bio); 90435fe0d12SHannes Reinecke } 90535fe0d12SHannes Reinecke 906b739e137SChristoph Hellwig extern bool multipath; 9070d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_grpid; 9080d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_state; 90975c10e73SHannes Reinecke extern struct device_attribute subsys_attr_iopolicy; 9100d0b660fSChristoph Hellwig 91132acab31SChristoph Hellwig #else 912b739e137SChristoph Hellwig #define multipath false 9130d0b660fSChristoph Hellwig static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 9140d0b660fSChristoph Hellwig { 9150d0b660fSChristoph Hellwig return false; 9160d0b660fSChristoph Hellwig } 9175ddaabe8SChristoph Hellwig static inline void nvme_failover_req(struct request *req) 91832acab31SChristoph Hellwig { 91932acab31SChristoph Hellwig } 92032acab31SChristoph Hellwig static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 92132acab31SChristoph Hellwig { 92232acab31SChristoph Hellwig } 92332acab31SChristoph Hellwig static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 92432acab31SChristoph Hellwig struct nvme_ns_head *head) 92532acab31SChristoph Hellwig { 92632acab31SChristoph Hellwig return 0; 92732acab31SChristoph Hellwig } 928c13cf14fSJoel Granados static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) 92932acab31SChristoph Hellwig { 93032acab31SChristoph Hellwig } 93132acab31SChristoph Hellwig static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 93232acab31SChristoph Hellwig { 93332acab31SChristoph Hellwig } 9340157ec8dSSagi Grimberg static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) 9350157ec8dSSagi Grimberg { 9360157ec8dSSagi Grimberg return false; 9370157ec8dSSagi Grimberg } 938e7d65803SHannes Reinecke static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns) 939e7d65803SHannes Reinecke { 940e7d65803SHannes Reinecke } 9410157ec8dSSagi Grimberg static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) 94232acab31SChristoph Hellwig { 94332acab31SChristoph Hellwig } 9445396fdacSHannes Reinecke static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) 945479a322fSSagi Grimberg { 946479a322fSSagi Grimberg } 9472b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req) 94835fe0d12SHannes Reinecke { 94935fe0d12SHannes Reinecke } 9505e1f6899SChristoph Hellwig static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) 9515e1f6899SChristoph Hellwig { 9525e1f6899SChristoph Hellwig } 9535e1f6899SChristoph Hellwig static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, 9540d0b660fSChristoph Hellwig struct nvme_id_ctrl *id) 9550d0b660fSChristoph Hellwig { 9562bd64307SKanchan Joshi if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) 95714a1336eSChristoph Hellwig dev_warn(ctrl->device, 95814a1336eSChristoph Hellwig "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); 9590d0b660fSChristoph Hellwig return 0; 9600d0b660fSChristoph Hellwig } 961a4a6f3c8SAnton Eidelman static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) 962a4a6f3c8SAnton Eidelman { 963a4a6f3c8SAnton Eidelman } 9640d0b660fSChristoph Hellwig static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 9650d0b660fSChristoph Hellwig { 9660d0b660fSChristoph Hellwig } 9670d0b660fSChristoph Hellwig static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 9680d0b660fSChristoph Hellwig { 9690d0b660fSChristoph Hellwig } 970b9156daeSSagi Grimberg static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) 971b9156daeSSagi Grimberg { 972b9156daeSSagi Grimberg } 973b9156daeSSagi Grimberg static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) 974b9156daeSSagi Grimberg { 975b9156daeSSagi Grimberg } 976b9156daeSSagi Grimberg static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 977b9156daeSSagi Grimberg { 978b9156daeSSagi Grimberg } 979e3d34794SHannes Reinecke static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys) 980e3d34794SHannes Reinecke { 981e3d34794SHannes Reinecke } 982d4d957b5SSagi Grimberg static inline void nvme_mpath_start_request(struct request *rq) 983d4d957b5SSagi Grimberg { 984d4d957b5SSagi Grimberg } 985d4d957b5SSagi Grimberg static inline void nvme_mpath_end_request(struct request *rq) 986d4d957b5SSagi Grimberg { 987d4d957b5SSagi Grimberg } 98832acab31SChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */ 98932acab31SChristoph Hellwig 9907fad20ddSChristoph Hellwig int nvme_revalidate_zones(struct nvme_ns *ns); 9918b4fb0f9SChristoph Hellwig int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector, 9928b4fb0f9SChristoph Hellwig unsigned int nr_zones, report_zones_cb cb, void *data); 993240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 994d525c3c0SChristoph Hellwig int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); 995240e6ee2SKeith Busch blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, 996240e6ee2SKeith Busch struct nvme_command *cmnd, 997240e6ee2SKeith Busch enum nvme_zone_mgmt_action action); 998240e6ee2SKeith Busch #else 999240e6ee2SKeith Busch static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, 1000240e6ee2SKeith Busch struct request *req, struct nvme_command *cmnd, 1001240e6ee2SKeith Busch enum nvme_zone_mgmt_action action) 1002240e6ee2SKeith Busch { 1003240e6ee2SKeith Busch return BLK_STS_NOTSUPP; 1004240e6ee2SKeith Busch } 1005240e6ee2SKeith Busch 1006d525c3c0SChristoph Hellwig static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) 1007240e6ee2SKeith Busch { 1008240e6ee2SKeith Busch dev_warn(ns->ctrl->device, 1009240e6ee2SKeith Busch "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); 1010240e6ee2SKeith Busch return -EPROTONOSUPPORT; 1011240e6ee2SKeith Busch } 1012240e6ee2SKeith Busch #endif 1013240e6ee2SKeith Busch 101440267efdSSimon A. F. Lund static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 101540267efdSSimon A. F. Lund { 101640267efdSSimon A. F. Lund return dev_to_disk(dev)->private_data; 101740267efdSSimon A. F. Lund } 1018ca064085SMatias Bjørling 1019400b6a7bSGuenter Roeck #ifdef CONFIG_NVME_HWMON 102059e330f8SKeith Busch int nvme_hwmon_init(struct nvme_ctrl *ctrl); 1021ed7770f6SHannes Reinecke void nvme_hwmon_exit(struct nvme_ctrl *ctrl); 1022400b6a7bSGuenter Roeck #else 102359e330f8SKeith Busch static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) 102459e330f8SKeith Busch { 102559e330f8SKeith Busch return 0; 102659e330f8SKeith Busch } 1027ed7770f6SHannes Reinecke 1028ed7770f6SHannes Reinecke static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) 1029ed7770f6SHannes Reinecke { 1030ed7770f6SHannes Reinecke } 1031400b6a7bSGuenter Roeck #endif 1032400b6a7bSGuenter Roeck 10336887fc64SSagi Grimberg static inline void nvme_start_request(struct request *rq) 10346887fc64SSagi Grimberg { 1035d4d957b5SSagi Grimberg if (rq->cmd_flags & REQ_NVME_MPATH) 1036d4d957b5SSagi Grimberg nvme_mpath_start_request(rq); 10376887fc64SSagi Grimberg blk_mq_start_request(rq); 10386887fc64SSagi Grimberg } 10396887fc64SSagi Grimberg 104073eefc27SChaitanya Kulkarni static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) 104173eefc27SChaitanya Kulkarni { 104273eefc27SChaitanya Kulkarni return ctrl->sgls & ((1 << 0) | (1 << 1)); 104373eefc27SChaitanya Kulkarni } 104473eefc27SChaitanya Kulkarni 1045f50fff73SHannes Reinecke #ifdef CONFIG_NVME_AUTH 1046e481fc0aSSagi Grimberg int __init nvme_init_auth(void); 1047e481fc0aSSagi Grimberg void __exit nvme_exit_auth(void); 1048193a8c7eSSagi Grimberg int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl); 1049f50fff73SHannes Reinecke void nvme_auth_stop(struct nvme_ctrl *ctrl); 1050f50fff73SHannes Reinecke int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid); 1051f50fff73SHannes Reinecke int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid); 1052f50fff73SHannes Reinecke void nvme_auth_free(struct nvme_ctrl *ctrl); 1053f50fff73SHannes Reinecke #else 1054193a8c7eSSagi Grimberg static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) 1055193a8c7eSSagi Grimberg { 1056193a8c7eSSagi Grimberg return 0; 1057193a8c7eSSagi Grimberg } 1058e481fc0aSSagi Grimberg static inline int __init nvme_init_auth(void) 1059e481fc0aSSagi Grimberg { 1060e481fc0aSSagi Grimberg return 0; 1061e481fc0aSSagi Grimberg } 1062e481fc0aSSagi Grimberg static inline void __exit nvme_exit_auth(void) 1063e481fc0aSSagi Grimberg { 1064e481fc0aSSagi Grimberg } 1065f50fff73SHannes Reinecke static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {}; 1066f50fff73SHannes Reinecke static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) 1067f50fff73SHannes Reinecke { 1068f50fff73SHannes Reinecke return -EPROTONOSUPPORT; 1069f50fff73SHannes Reinecke } 1070f50fff73SHannes Reinecke static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) 1071f50fff73SHannes Reinecke { 1072f50fff73SHannes Reinecke return NVME_SC_AUTH_REQUIRED; 1073f50fff73SHannes Reinecke } 1074f50fff73SHannes Reinecke static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {}; 1075f50fff73SHannes Reinecke #endif 1076f50fff73SHannes Reinecke 1077df21b6b1SLogan Gunthorpe u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 1078df21b6b1SLogan Gunthorpe u8 opcode); 107962281b9eSChristoph Hellwig u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode); 108062281b9eSChristoph Hellwig int nvme_execute_rq(struct request *rq, bool at_head); 108131a59782Smin15.li void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects, 1082bc8fb906SKeith Busch struct nvme_command *cmd, int status); 1083b2702aaaSChaitanya Kulkarni struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); 108424493b8bSLogan Gunthorpe struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); 108524493b8bSLogan Gunthorpe void nvme_put_ns(struct nvme_ns *ns); 1086df21b6b1SLogan Gunthorpe 108743dc9878SAdam Manzanares static inline bool nvme_multi_css(struct nvme_ctrl *ctrl) 108843dc9878SAdam Manzanares { 108943dc9878SAdam Manzanares return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI; 109043dc9878SAdam Manzanares } 109143dc9878SAdam Manzanares 1092bd83fe6fSAlan Adamson #ifdef CONFIG_NVME_VERBOSE_ERRORS 1093bd83fe6fSAlan Adamson const unsigned char *nvme_get_error_status_str(u16 status); 1094bd83fe6fSAlan Adamson const unsigned char *nvme_get_opcode_str(u8 opcode); 1095bd83fe6fSAlan Adamson const unsigned char *nvme_get_admin_opcode_str(u8 opcode); 1096567da14dSAmit Engel const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode); 1097bd83fe6fSAlan Adamson #else /* CONFIG_NVME_VERBOSE_ERRORS */ 1098bd83fe6fSAlan Adamson static inline const unsigned char *nvme_get_error_status_str(u16 status) 1099bd83fe6fSAlan Adamson { 1100bd83fe6fSAlan Adamson return "I/O Error"; 1101bd83fe6fSAlan Adamson } 1102bd83fe6fSAlan Adamson static inline const unsigned char *nvme_get_opcode_str(u8 opcode) 1103bd83fe6fSAlan Adamson { 1104bd83fe6fSAlan Adamson return "I/O Cmd"; 1105bd83fe6fSAlan Adamson } 1106bd83fe6fSAlan Adamson static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode) 1107bd83fe6fSAlan Adamson { 1108bd83fe6fSAlan Adamson return "Admin Cmd"; 1109bd83fe6fSAlan Adamson } 1110567da14dSAmit Engel 1111567da14dSAmit Engel static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode) 1112567da14dSAmit Engel { 1113567da14dSAmit Engel return "Fabrics Cmd"; 1114567da14dSAmit Engel } 1115bd83fe6fSAlan Adamson #endif /* CONFIG_NVME_VERBOSE_ERRORS */ 1116bd83fe6fSAlan Adamson 1117567da14dSAmit Engel static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype) 1118567da14dSAmit Engel { 1119567da14dSAmit Engel if (opcode == nvme_fabrics_command) 1120567da14dSAmit Engel return nvme_get_fabrics_opcode_str(fctype); 1121567da14dSAmit Engel return qid ? nvme_get_opcode_str(opcode) : 1122567da14dSAmit Engel nvme_get_admin_opcode_str(opcode); 1123567da14dSAmit Engel } 112457dacad5SJay Sternberg #endif /* _NVME_H */ 1125