1bc50ad75SChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */ 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 457dacad5SJay Sternberg */ 557dacad5SJay Sternberg 657dacad5SJay Sternberg #ifndef _NVME_H 757dacad5SJay Sternberg #define _NVME_H 857dacad5SJay Sternberg 957dacad5SJay Sternberg #include <linux/nvme.h> 10a6a5149bSChristoph Hellwig #include <linux/cdev.h> 1157dacad5SJay Sternberg #include <linux/pci.h> 1257dacad5SJay Sternberg #include <linux/kref.h> 1357dacad5SJay Sternberg #include <linux/blk-mq.h> 14b0b4e09cSMatias Bjørling #include <linux/lightnvm.h> 15a98e58e5SScott Bauer #include <linux/sed-opal.h> 16b9e03857SThomas Tai #include <linux/fault-inject.h> 17978628ecSJohannes Thumshirn #include <linux/rcupdate.h> 18c1ac9a4bSKeith Busch #include <linux/wait.h> 194d2ce688SJames Smart #include <linux/t10-pi.h> 2057dacad5SJay Sternberg 2135fe0d12SHannes Reinecke #include <trace/events/block.h> 2235fe0d12SHannes Reinecke 238ae4e447SMarc Olson extern unsigned int nvme_io_timeout; 2457dacad5SJay Sternberg #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 2557dacad5SJay Sternberg 268ae4e447SMarc Olson extern unsigned int admin_timeout; 27dc96f938SChaitanya Kulkarni #define NVME_ADMIN_TIMEOUT (admin_timeout * HZ) 2821d34711SChristoph Hellwig 29038bd4cbSSagi Grimberg #define NVME_DEFAULT_KATO 5 30038bd4cbSSagi Grimberg #define NVME_KATO_GRACE 10 31038bd4cbSSagi Grimberg 3238e18002SIsrael Rukshin #ifdef CONFIG_ARCH_NO_SG_CHAIN 3338e18002SIsrael Rukshin #define NVME_INLINE_SG_CNT 0 34ba7ca2aeSIsrael Rukshin #define NVME_INLINE_METADATA_SG_CNT 0 3538e18002SIsrael Rukshin #else 3638e18002SIsrael Rukshin #define NVME_INLINE_SG_CNT 2 37ba7ca2aeSIsrael Rukshin #define NVME_INLINE_METADATA_SG_CNT 1 3838e18002SIsrael Rukshin #endif 3938e18002SIsrael Rukshin 406c3c05b0SChaitanya Kulkarni /* 416c3c05b0SChaitanya Kulkarni * Default to a 4K page size, with the intention to update this 426c3c05b0SChaitanya Kulkarni * path in the future to accommodate architectures with differing 436c3c05b0SChaitanya Kulkarni * kernel and IO page sizes. 446c3c05b0SChaitanya Kulkarni */ 456c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SHIFT 12 466c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SIZE (1 << NVME_CTRL_PAGE_SHIFT) 476c3c05b0SChaitanya Kulkarni 489a6327d2SSagi Grimberg extern struct workqueue_struct *nvme_wq; 49b227c59bSRoy Shterman extern struct workqueue_struct *nvme_reset_wq; 50b227c59bSRoy Shterman extern struct workqueue_struct *nvme_delete_wq; 519a6327d2SSagi Grimberg 52ca064085SMatias Bjørling enum { 53ca064085SMatias Bjørling NVME_NS_LBA = 0, 54ca064085SMatias Bjørling NVME_NS_LIGHTNVM = 1, 55ca064085SMatias Bjørling }; 56ca064085SMatias Bjørling 5757dacad5SJay Sternberg /* 58106198edSChristoph Hellwig * List of workarounds for devices that required behavior not specified in 59106198edSChristoph Hellwig * the standard. 6057dacad5SJay Sternberg */ 61106198edSChristoph Hellwig enum nvme_quirks { 62106198edSChristoph Hellwig /* 63106198edSChristoph Hellwig * Prefers I/O aligned to a stripe size specified in a vendor 64106198edSChristoph Hellwig * specific Identify field. 65106198edSChristoph Hellwig */ 66106198edSChristoph Hellwig NVME_QUIRK_STRIPE_SIZE = (1 << 0), 67540c801cSKeith Busch 68540c801cSKeith Busch /* 69540c801cSKeith Busch * The controller doesn't handle Identify value others than 0 or 1 70540c801cSKeith Busch * correctly. 71540c801cSKeith Busch */ 72540c801cSKeith Busch NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 7308095e70SKeith Busch 7408095e70SKeith Busch /* 75e850fd16SChristoph Hellwig * The controller deterministically returns O's on reads to 76e850fd16SChristoph Hellwig * logical blocks that deallocate was called on. 7708095e70SKeith Busch */ 78e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 7954adc010SGuilherme G. Piccoli 8054adc010SGuilherme G. Piccoli /* 8154adc010SGuilherme G. Piccoli * The controller needs a delay before starts checking the device 8254adc010SGuilherme G. Piccoli * readiness, which is done by reading the NVME_CSTS_RDY bit. 8354adc010SGuilherme G. Piccoli */ 8454adc010SGuilherme G. Piccoli NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 85c5552fdeSAndy Lutomirski 86c5552fdeSAndy Lutomirski /* 87c5552fdeSAndy Lutomirski * APST should not be used. 88c5552fdeSAndy Lutomirski */ 89c5552fdeSAndy Lutomirski NVME_QUIRK_NO_APST = (1 << 4), 90ff5350a8SAndy Lutomirski 91ff5350a8SAndy Lutomirski /* 92ff5350a8SAndy Lutomirski * The deepest sleep state should not be used. 93ff5350a8SAndy Lutomirski */ 94ff5350a8SAndy Lutomirski NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 95608cc4b1SChristoph Hellwig 96608cc4b1SChristoph Hellwig /* 97608cc4b1SChristoph Hellwig * Supports the LighNVM command set if indicated in vs[1]. 98608cc4b1SChristoph Hellwig */ 99608cc4b1SChristoph Hellwig NVME_QUIRK_LIGHTNVM = (1 << 6), 1009abd68efSJens Axboe 1019abd68efSJens Axboe /* 1029abd68efSJens Axboe * Set MEDIUM priority on SQ creation 1039abd68efSJens Axboe */ 1049abd68efSJens Axboe NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 1056299358dSJames Dingwall 1066299358dSJames Dingwall /* 1076299358dSJames Dingwall * Ignore device provided subnqn. 1086299358dSJames Dingwall */ 1096299358dSJames Dingwall NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), 1107b210e4eSChristoph Hellwig 1117b210e4eSChristoph Hellwig /* 1127b210e4eSChristoph Hellwig * Broken Write Zeroes. 1137b210e4eSChristoph Hellwig */ 1147b210e4eSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9), 115cb32de1bSMario Limonciello 116cb32de1bSMario Limonciello /* 117cb32de1bSMario Limonciello * Force simple suspend/resume path. 118cb32de1bSMario Limonciello */ 119cb32de1bSMario Limonciello NVME_QUIRK_SIMPLE_SUSPEND = (1 << 10), 1207ad67ca5SLinus Torvalds 1217ad67ca5SLinus Torvalds /* 12266341331SBenjamin Herrenschmidt * Use only one interrupt vector for all queues 12366341331SBenjamin Herrenschmidt */ 1247ad67ca5SLinus Torvalds NVME_QUIRK_SINGLE_VECTOR = (1 << 11), 12566341331SBenjamin Herrenschmidt 12666341331SBenjamin Herrenschmidt /* 12766341331SBenjamin Herrenschmidt * Use non-standard 128 bytes SQEs. 12866341331SBenjamin Herrenschmidt */ 1297ad67ca5SLinus Torvalds NVME_QUIRK_128_BYTES_SQES = (1 << 12), 130d38e9f04SBenjamin Herrenschmidt 131d38e9f04SBenjamin Herrenschmidt /* 132d38e9f04SBenjamin Herrenschmidt * Prevent tag overlap between queues 133d38e9f04SBenjamin Herrenschmidt */ 1347ad67ca5SLinus Torvalds NVME_QUIRK_SHARED_TAGS = (1 << 13), 1356c6aa2f2SAkinobu Mita 1366c6aa2f2SAkinobu Mita /* 1376c6aa2f2SAkinobu Mita * Don't change the value of the temperature threshold feature 1386c6aa2f2SAkinobu Mita */ 1396c6aa2f2SAkinobu Mita NVME_QUIRK_NO_TEMP_THRESH_CHANGE = (1 << 14), 1405bedd3afSChristoph Hellwig 1415bedd3afSChristoph Hellwig /* 1425bedd3afSChristoph Hellwig * The controller doesn't handle the Identify Namespace 1435bedd3afSChristoph Hellwig * Identification Descriptor list subcommand despite claiming 1445bedd3afSChristoph Hellwig * NVMe 1.3 compliance. 1455bedd3afSChristoph Hellwig */ 1465bedd3afSChristoph Hellwig NVME_QUIRK_NO_NS_DESC_LIST = (1 << 15), 1474bdf2603SFilippo Sironi 1484bdf2603SFilippo Sironi /* 1494bdf2603SFilippo Sironi * The controller does not properly handle DMA addresses over 1504bdf2603SFilippo Sironi * 48 bits. 1514bdf2603SFilippo Sironi */ 1524bdf2603SFilippo Sironi NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16), 153106198edSChristoph Hellwig }; 154106198edSChristoph Hellwig 155d49187e9SChristoph Hellwig /* 156d49187e9SChristoph Hellwig * Common request structure for NVMe passthrough. All drivers must have 157d49187e9SChristoph Hellwig * this structure as the first member of their request-private data. 158d49187e9SChristoph Hellwig */ 159d49187e9SChristoph Hellwig struct nvme_request { 160d49187e9SChristoph Hellwig struct nvme_command *cmd; 161d49187e9SChristoph Hellwig union nvme_result result; 16244e44b29SChristoph Hellwig u8 retries; 16327fa9bc5SChristoph Hellwig u8 flags; 16427fa9bc5SChristoph Hellwig u16 status; 16559e29ce6SSagi Grimberg struct nvme_ctrl *ctrl; 16627fa9bc5SChristoph Hellwig }; 16727fa9bc5SChristoph Hellwig 16832acab31SChristoph Hellwig /* 16932acab31SChristoph Hellwig * Mark a bio as coming in through the mpath node. 17032acab31SChristoph Hellwig */ 17132acab31SChristoph Hellwig #define REQ_NVME_MPATH REQ_DRV 17232acab31SChristoph Hellwig 17327fa9bc5SChristoph Hellwig enum { 17427fa9bc5SChristoph Hellwig NVME_REQ_CANCELLED = (1 << 0), 175bb06ec31SJames Smart NVME_REQ_USERCMD = (1 << 1), 176d49187e9SChristoph Hellwig }; 177d49187e9SChristoph Hellwig 178d49187e9SChristoph Hellwig static inline struct nvme_request *nvme_req(struct request *req) 179d49187e9SChristoph Hellwig { 180d49187e9SChristoph Hellwig return blk_mq_rq_to_pdu(req); 181d49187e9SChristoph Hellwig } 182d49187e9SChristoph Hellwig 1835d87eb94SKeith Busch static inline u16 nvme_req_qid(struct request *req) 1845d87eb94SKeith Busch { 185643c476dSKeith Busch if (!req->q->queuedata) 1865d87eb94SKeith Busch return 0; 18784115d6dSBaolin Wang 18884115d6dSBaolin Wang return req->mq_hctx->queue_num + 1; 1895d87eb94SKeith Busch } 1905d87eb94SKeith Busch 19154adc010SGuilherme G. Piccoli /* The below value is the specific amount of delay needed before checking 19254adc010SGuilherme G. Piccoli * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 19354adc010SGuilherme G. Piccoli * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 19454adc010SGuilherme G. Piccoli * found empirically. 19554adc010SGuilherme G. Piccoli */ 1968c97eeccSJeff Lien #define NVME_QUIRK_DELAY_AMOUNT 2300 19754adc010SGuilherme G. Piccoli 1984212f4e9SSagi Grimberg /* 1994212f4e9SSagi Grimberg * enum nvme_ctrl_state: Controller state 2004212f4e9SSagi Grimberg * 2014212f4e9SSagi Grimberg * @NVME_CTRL_NEW: New controller just allocated, initial state 2024212f4e9SSagi Grimberg * @NVME_CTRL_LIVE: Controller is connected and I/O capable 2034212f4e9SSagi Grimberg * @NVME_CTRL_RESETTING: Controller is resetting (or scheduled reset) 2044212f4e9SSagi Grimberg * @NVME_CTRL_CONNECTING: Controller is disconnected, now connecting the 2054212f4e9SSagi Grimberg * transport 2064212f4e9SSagi Grimberg * @NVME_CTRL_DELETING: Controller is deleting (or scheduled deletion) 207ecca390eSSagi Grimberg * @NVME_CTRL_DELETING_NOIO: Controller is deleting and I/O is not 208ecca390eSSagi Grimberg * disabled/failed immediately. This state comes 209ecca390eSSagi Grimberg * after all async event processing took place and 210ecca390eSSagi Grimberg * before ns removal and the controller deletion 211ecca390eSSagi Grimberg * progress 2124212f4e9SSagi Grimberg * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during 2134212f4e9SSagi Grimberg * shutdown or removal. In this case we forcibly 2144212f4e9SSagi Grimberg * kill all inflight I/O as they have no chance to 2154212f4e9SSagi Grimberg * complete 2164212f4e9SSagi Grimberg */ 217bb8d261eSChristoph Hellwig enum nvme_ctrl_state { 218bb8d261eSChristoph Hellwig NVME_CTRL_NEW, 219bb8d261eSChristoph Hellwig NVME_CTRL_LIVE, 220bb8d261eSChristoph Hellwig NVME_CTRL_RESETTING, 221ad6a0a52SMax Gurtovoy NVME_CTRL_CONNECTING, 222bb8d261eSChristoph Hellwig NVME_CTRL_DELETING, 223ecca390eSSagi Grimberg NVME_CTRL_DELETING_NOIO, 2240ff9d4e1SKeith Busch NVME_CTRL_DEAD, 225bb8d261eSChristoph Hellwig }; 226bb8d261eSChristoph Hellwig 227a3646451SAkinobu Mita struct nvme_fault_inject { 228a3646451SAkinobu Mita #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 229a3646451SAkinobu Mita struct fault_attr attr; 230a3646451SAkinobu Mita struct dentry *parent; 231a3646451SAkinobu Mita bool dont_retry; /* DNR, do not retry */ 232a3646451SAkinobu Mita u16 status; /* status code */ 233a3646451SAkinobu Mita #endif 234a3646451SAkinobu Mita }; 235a3646451SAkinobu Mita 2361c63dc66SChristoph Hellwig struct nvme_ctrl { 2376e3ca03eSSagi Grimberg bool comp_seen; 238bb8d261eSChristoph Hellwig enum nvme_ctrl_state state; 239bd4da3abSAndy Lutomirski bool identified; 240bb8d261eSChristoph Hellwig spinlock_t lock; 241e7ad43c3SKeith Busch struct mutex scan_lock; 2421c63dc66SChristoph Hellwig const struct nvme_ctrl_ops *ops; 24357dacad5SJay Sternberg struct request_queue *admin_q; 24407bfcd09SChristoph Hellwig struct request_queue *connect_q; 245e7832cb4SSagi Grimberg struct request_queue *fabrics_q; 24657dacad5SJay Sternberg struct device *dev; 24757dacad5SJay Sternberg int instance; 248103e515eSHannes Reinecke int numa_node; 2495bae7f73SChristoph Hellwig struct blk_mq_tag_set *tagset; 25034b6c231SSagi Grimberg struct blk_mq_tag_set *admin_tagset; 2515bae7f73SChristoph Hellwig struct list_head namespaces; 252765cc031SJianchao Wang struct rw_semaphore namespaces_rwsem; 253d22524a4SChristoph Hellwig struct device ctrl_device; 2545bae7f73SChristoph Hellwig struct device *device; /* char device */ 255ed7770f6SHannes Reinecke #ifdef CONFIG_NVME_HWMON 256ed7770f6SHannes Reinecke struct device *hwmon_device; 257ed7770f6SHannes Reinecke #endif 258a6a5149bSChristoph Hellwig struct cdev cdev; 259d86c4d8eSChristoph Hellwig struct work_struct reset_work; 260c5017e85SChristoph Hellwig struct work_struct delete_work; 261c1ac9a4bSKeith Busch wait_queue_head_t state_wq; 2621c63dc66SChristoph Hellwig 263ab9e00ccSChristoph Hellwig struct nvme_subsystem *subsys; 264ab9e00ccSChristoph Hellwig struct list_head subsys_entry; 265ab9e00ccSChristoph Hellwig 2664f1244c8SChristoph Hellwig struct opal_dev *opal_dev; 267a98e58e5SScott Bauer 26857dacad5SJay Sternberg char name[12]; 26976e3914aSChristoph Hellwig u16 cntlid; 2705fd4ce1bSChristoph Hellwig 2715fd4ce1bSChristoph Hellwig u32 ctrl_config; 272b6dccf7fSArnav Dawn u16 mtfa; 273d858e5f0SSagi Grimberg u32 queue_count; 2745fd4ce1bSChristoph Hellwig 27520d0dfe6SSagi Grimberg u64 cap; 27657dacad5SJay Sternberg u32 max_hw_sectors; 277943e942eSJens Axboe u32 max_segments; 27895093350SMax Gurtovoy u32 max_integrity_segments; 279240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 280240e6ee2SKeith Busch u32 max_zone_append; 281240e6ee2SKeith Busch #endif 28249cd84b6SKeith Busch u16 crdt[3]; 28357dacad5SJay Sternberg u16 oncs; 2848a9ae523SScott Bauer u16 oacs; 285f5d11840SJens Axboe u16 nssa; 286f5d11840SJens Axboe u16 nr_streams; 287f968688fSKeith Busch u16 sqsize; 2880d0b660fSChristoph Hellwig u32 max_namespaces; 2896bf25d16SChristoph Hellwig atomic_t abort_limit; 29057dacad5SJay Sternberg u8 vwc; 291f3ca80fcSChristoph Hellwig u32 vs; 29207bfcd09SChristoph Hellwig u32 sgls; 293038bd4cbSSagi Grimberg u16 kas; 294c5552fdeSAndy Lutomirski u8 npss; 295c5552fdeSAndy Lutomirski u8 apsta; 296400b6a7bSGuenter Roeck u16 wctemp; 297400b6a7bSGuenter Roeck u16 cctemp; 298c0561f82SHannes Reinecke u32 oaes; 299e3d7874dSKeith Busch u32 aen_result; 3003e53ba38SSagi Grimberg u32 ctratt; 30107fbd32aSMartin K. Petersen unsigned int shutdown_timeout; 302038bd4cbSSagi Grimberg unsigned int kato; 303f3ca80fcSChristoph Hellwig bool subsystem; 304106198edSChristoph Hellwig unsigned long quirks; 305c5552fdeSAndy Lutomirski struct nvme_id_power_state psd[32]; 30684fef62dSKeith Busch struct nvme_effects_log *effects; 3071cf7a12eSChaitanya Kulkarni struct xarray cels; 3085955be21SChristoph Hellwig struct work_struct scan_work; 309f866fc42SChristoph Hellwig struct work_struct async_event_work; 310038bd4cbSSagi Grimberg struct delayed_work ka_work; 3118c4dfea9SVictor Gladkov struct delayed_work failfast_work; 3120a34e466SRoland Dreier struct nvme_command ka_cmd; 313b6dccf7fSArnav Dawn struct work_struct fw_act_work; 31430d90964SChristoph Hellwig unsigned long events; 31507bfcd09SChristoph Hellwig 3160d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 3170d0b660fSChristoph Hellwig /* asymmetric namespace access: */ 3180d0b660fSChristoph Hellwig u8 anacap; 3190d0b660fSChristoph Hellwig u8 anatt; 3200d0b660fSChristoph Hellwig u32 anagrpmax; 3210d0b660fSChristoph Hellwig u32 nanagrpid; 3220d0b660fSChristoph Hellwig struct mutex ana_lock; 3230d0b660fSChristoph Hellwig struct nvme_ana_rsp_hdr *ana_log_buf; 3240d0b660fSChristoph Hellwig size_t ana_log_size; 3250d0b660fSChristoph Hellwig struct timer_list anatt_timer; 3260d0b660fSChristoph Hellwig struct work_struct ana_work; 3270d0b660fSChristoph Hellwig #endif 3280d0b660fSChristoph Hellwig 329c5552fdeSAndy Lutomirski /* Power saving configuration */ 330c5552fdeSAndy Lutomirski u64 ps_max_latency_us; 33176a5af84SKai-Heng Feng bool apst_enabled; 332c5552fdeSAndy Lutomirski 333044a9df1SChristoph Hellwig /* PCIe only: */ 334fe6d53c9SChristoph Hellwig u32 hmpre; 335fe6d53c9SChristoph Hellwig u32 hmmin; 336044a9df1SChristoph Hellwig u32 hmminds; 337044a9df1SChristoph Hellwig u16 hmmaxd; 338fe6d53c9SChristoph Hellwig 33907bfcd09SChristoph Hellwig /* Fabrics only */ 34007bfcd09SChristoph Hellwig u32 ioccsz; 34107bfcd09SChristoph Hellwig u32 iorcsz; 34207bfcd09SChristoph Hellwig u16 icdoff; 34307bfcd09SChristoph Hellwig u16 maxcmd; 344fdf9dfa8SSagi Grimberg int nr_reconnects; 3458c4dfea9SVictor Gladkov unsigned long flags; 3468c4dfea9SVictor Gladkov #define NVME_CTRL_FAILFAST_EXPIRED 0 34707bfcd09SChristoph Hellwig struct nvmf_ctrl_options *opts; 348cb5b7262SJens Axboe 349cb5b7262SJens Axboe struct page *discard_page; 350cb5b7262SJens Axboe unsigned long discard_page_busy; 351f79d5fdaSAkinobu Mita 352f79d5fdaSAkinobu Mita struct nvme_fault_inject fault_inject; 35357dacad5SJay Sternberg }; 35457dacad5SJay Sternberg 35575c10e73SHannes Reinecke enum nvme_iopolicy { 35675c10e73SHannes Reinecke NVME_IOPOLICY_NUMA, 35775c10e73SHannes Reinecke NVME_IOPOLICY_RR, 35875c10e73SHannes Reinecke }; 35975c10e73SHannes Reinecke 360ab9e00ccSChristoph Hellwig struct nvme_subsystem { 361ab9e00ccSChristoph Hellwig int instance; 362ab9e00ccSChristoph Hellwig struct device dev; 363ab9e00ccSChristoph Hellwig /* 364ab9e00ccSChristoph Hellwig * Because we unregister the device on the last put we need 365ab9e00ccSChristoph Hellwig * a separate refcount. 366ab9e00ccSChristoph Hellwig */ 367ab9e00ccSChristoph Hellwig struct kref ref; 368ab9e00ccSChristoph Hellwig struct list_head entry; 369ab9e00ccSChristoph Hellwig struct mutex lock; 370ab9e00ccSChristoph Hellwig struct list_head ctrls; 371ed754e5dSChristoph Hellwig struct list_head nsheads; 372ab9e00ccSChristoph Hellwig char subnqn[NVMF_NQN_SIZE]; 373ab9e00ccSChristoph Hellwig char serial[20]; 374ab9e00ccSChristoph Hellwig char model[40]; 375ab9e00ccSChristoph Hellwig char firmware_rev[8]; 376ab9e00ccSChristoph Hellwig u8 cmic; 377ab9e00ccSChristoph Hellwig u16 vendor_id; 37881adb863SBart Van Assche u16 awupf; /* 0's based awupf value. */ 379ed754e5dSChristoph Hellwig struct ida ns_ida; 38075c10e73SHannes Reinecke #ifdef CONFIG_NVME_MULTIPATH 38175c10e73SHannes Reinecke enum nvme_iopolicy iopolicy; 38275c10e73SHannes Reinecke #endif 383ab9e00ccSChristoph Hellwig }; 384ab9e00ccSChristoph Hellwig 385002fab04SChristoph Hellwig /* 386002fab04SChristoph Hellwig * Container structure for uniqueue namespace identifiers. 387002fab04SChristoph Hellwig */ 388002fab04SChristoph Hellwig struct nvme_ns_ids { 389002fab04SChristoph Hellwig u8 eui64[8]; 390002fab04SChristoph Hellwig u8 nguid[16]; 391002fab04SChristoph Hellwig uuid_t uuid; 39271010c30SNiklas Cassel u8 csi; 393002fab04SChristoph Hellwig }; 394002fab04SChristoph Hellwig 395ed754e5dSChristoph Hellwig /* 396ed754e5dSChristoph Hellwig * Anchor structure for namespaces. There is one for each namespace in a 397ed754e5dSChristoph Hellwig * NVMe subsystem that any of our controllers can see, and the namespace 398ed754e5dSChristoph Hellwig * structure for each controller is chained of it. For private namespaces 399ed754e5dSChristoph Hellwig * there is a 1:1 relation to our namespace structures, that is ->list 400ed754e5dSChristoph Hellwig * only ever has a single entry for private namespaces. 401ed754e5dSChristoph Hellwig */ 402ed754e5dSChristoph Hellwig struct nvme_ns_head { 403ed754e5dSChristoph Hellwig struct list_head list; 404ed754e5dSChristoph Hellwig struct srcu_struct srcu; 405ed754e5dSChristoph Hellwig struct nvme_subsystem *subsys; 406ed754e5dSChristoph Hellwig unsigned ns_id; 407ed754e5dSChristoph Hellwig struct nvme_ns_ids ids; 408ed754e5dSChristoph Hellwig struct list_head entry; 409ed754e5dSChristoph Hellwig struct kref ref; 4100c284db7SKeith Busch bool shared; 411ed754e5dSChristoph Hellwig int instance; 412be93e87eSKeith Busch struct nvme_effects_log *effects; 413f3334447SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 414f3334447SChristoph Hellwig struct gendisk *disk; 415f3334447SChristoph Hellwig struct bio_list requeue_list; 416f3334447SChristoph Hellwig spinlock_t requeue_lock; 417f3334447SChristoph Hellwig struct work_struct requeue_work; 418f3334447SChristoph Hellwig struct mutex lock; 419d8a22f85SAnton Eidelman unsigned long flags; 420d8a22f85SAnton Eidelman #define NVME_NSHEAD_DISK_LIVE 0 421f3334447SChristoph Hellwig struct nvme_ns __rcu *current_path[]; 422f3334447SChristoph Hellwig #endif 423ed754e5dSChristoph Hellwig }; 424ed754e5dSChristoph Hellwig 425ffc89b1dSMax Gurtovoy enum nvme_ns_features { 426ffc89b1dSMax Gurtovoy NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */ 427b29f8485SMax Gurtovoy NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */ 428ffc89b1dSMax Gurtovoy }; 429ffc89b1dSMax Gurtovoy 43057dacad5SJay Sternberg struct nvme_ns { 43157dacad5SJay Sternberg struct list_head list; 43257dacad5SJay Sternberg 4331c63dc66SChristoph Hellwig struct nvme_ctrl *ctrl; 43457dacad5SJay Sternberg struct request_queue *queue; 43557dacad5SJay Sternberg struct gendisk *disk; 4360d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 4370d0b660fSChristoph Hellwig enum nvme_ana_state ana_state; 4380d0b660fSChristoph Hellwig u32 ana_grpid; 4390d0b660fSChristoph Hellwig #endif 440ed754e5dSChristoph Hellwig struct list_head siblings; 441b0b4e09cSMatias Bjørling struct nvm_dev *ndev; 44257dacad5SJay Sternberg struct kref kref; 443ed754e5dSChristoph Hellwig struct nvme_ns_head *head; 44457dacad5SJay Sternberg 44557dacad5SJay Sternberg int lba_shift; 44657dacad5SJay Sternberg u16 ms; 447f5d11840SJens Axboe u16 sgs; 448f5d11840SJens Axboe u32 sws; 44957dacad5SJay Sternberg u8 pi_type; 450240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 451240e6ee2SKeith Busch u64 zsze; 452240e6ee2SKeith Busch #endif 453ffc89b1dSMax Gurtovoy unsigned long features; 454646017a6SKeith Busch unsigned long flags; 455646017a6SKeith Busch #define NVME_NS_REMOVING 0 45669d9a99cSKeith Busch #define NVME_NS_DEAD 1 4570d0b660fSChristoph Hellwig #define NVME_NS_ANA_PENDING 2 4582f4c9ba2SJavier González #define NVME_NS_FORCE_RO 3 459b9e03857SThomas Tai 460b9e03857SThomas Tai struct nvme_fault_inject fault_inject; 461b9e03857SThomas Tai 46257dacad5SJay Sternberg }; 46357dacad5SJay Sternberg 4644d2ce688SJames Smart /* NVMe ns supports metadata actions by the controller (generate/strip) */ 4654d2ce688SJames Smart static inline bool nvme_ns_has_pi(struct nvme_ns *ns) 4664d2ce688SJames Smart { 4674d2ce688SJames Smart return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); 4684d2ce688SJames Smart } 4694d2ce688SJames Smart 4701c63dc66SChristoph Hellwig struct nvme_ctrl_ops { 4711a353d85SMing Lin const char *name; 472e439bb12SSagi Grimberg struct module *module; 473d3d5b87dSChristoph Hellwig unsigned int flags; 474d3d5b87dSChristoph Hellwig #define NVME_F_FABRICS (1 << 0) 475c81bfba9SChristoph Hellwig #define NVME_F_METADATA_SUPPORTED (1 << 1) 476e0596ab2SLogan Gunthorpe #define NVME_F_PCI_P2PDMA (1 << 2) 4771c63dc66SChristoph Hellwig int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 4785fd4ce1bSChristoph Hellwig int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 4797fd8930fSChristoph Hellwig int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 4801673f1f0SChristoph Hellwig void (*free_ctrl)(struct nvme_ctrl *ctrl); 481ad22c355SKeith Busch void (*submit_async_event)(struct nvme_ctrl *ctrl); 482c5017e85SChristoph Hellwig void (*delete_ctrl)(struct nvme_ctrl *ctrl); 4831a353d85SMing Lin int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 48457dacad5SJay Sternberg }; 48557dacad5SJay Sternberg 486b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 487a3646451SAkinobu Mita void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 488a3646451SAkinobu Mita const char *dev_name); 489a3646451SAkinobu Mita void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject); 490b9e03857SThomas Tai void nvme_should_fail(struct request *req); 491b9e03857SThomas Tai #else 492a3646451SAkinobu Mita static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, 493a3646451SAkinobu Mita const char *dev_name) 494a3646451SAkinobu Mita { 495a3646451SAkinobu Mita } 496a3646451SAkinobu Mita static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj) 497a3646451SAkinobu Mita { 498a3646451SAkinobu Mita } 499b9e03857SThomas Tai static inline void nvme_should_fail(struct request *req) {} 500b9e03857SThomas Tai #endif 501b9e03857SThomas Tai 502f3ca80fcSChristoph Hellwig static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 503f3ca80fcSChristoph Hellwig { 504f3ca80fcSChristoph Hellwig if (!ctrl->subsystem) 505f3ca80fcSChristoph Hellwig return -ENOTTY; 506f3ca80fcSChristoph Hellwig return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 507f3ca80fcSChristoph Hellwig } 508f3ca80fcSChristoph Hellwig 509314d48ddSDamien Le Moal /* 510314d48ddSDamien Le Moal * Convert a 512B sector number to a device logical block number. 511314d48ddSDamien Le Moal */ 512314d48ddSDamien Le Moal static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector) 51357dacad5SJay Sternberg { 514314d48ddSDamien Le Moal return sector >> (ns->lba_shift - SECTOR_SHIFT); 51557dacad5SJay Sternberg } 51657dacad5SJay Sternberg 517e08f2ae8SDamien Le Moal /* 518e08f2ae8SDamien Le Moal * Convert a device logical block number to a 512B sector number. 519e08f2ae8SDamien Le Moal */ 520e08f2ae8SDamien Le Moal static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba) 521e08f2ae8SDamien Le Moal { 522e08f2ae8SDamien Le Moal return lba << (ns->lba_shift - SECTOR_SHIFT); 52357dacad5SJay Sternberg } 52457dacad5SJay Sternberg 52571fb90ebSKeith Busch /* 52671fb90ebSKeith Busch * Convert byte length to nvme's 0-based num dwords 52771fb90ebSKeith Busch */ 52871fb90ebSKeith Busch static inline u32 nvme_bytes_to_numd(size_t len) 52971fb90ebSKeith Busch { 53071fb90ebSKeith Busch return (len >> 2) - 1; 53171fb90ebSKeith Busch } 53271fb90ebSKeith Busch 5335ddaabe8SChristoph Hellwig static inline bool nvme_is_ana_error(u16 status) 5345ddaabe8SChristoph Hellwig { 5355ddaabe8SChristoph Hellwig switch (status & 0x7ff) { 5365ddaabe8SChristoph Hellwig case NVME_SC_ANA_TRANSITION: 5375ddaabe8SChristoph Hellwig case NVME_SC_ANA_INACCESSIBLE: 5385ddaabe8SChristoph Hellwig case NVME_SC_ANA_PERSISTENT_LOSS: 5395ddaabe8SChristoph Hellwig return true; 5405ddaabe8SChristoph Hellwig default: 5415ddaabe8SChristoph Hellwig return false; 5425ddaabe8SChristoph Hellwig } 5435ddaabe8SChristoph Hellwig } 5445ddaabe8SChristoph Hellwig 5455ddaabe8SChristoph Hellwig static inline bool nvme_is_path_error(u16 status) 5465ddaabe8SChristoph Hellwig { 5471e41f3bdSChristoph Hellwig /* check for a status code type of 'path related status' */ 5481e41f3bdSChristoph Hellwig return (status & 0x700) == 0x300; 5495ddaabe8SChristoph Hellwig } 5505ddaabe8SChristoph Hellwig 5512eb81a33SChristoph Hellwig /* 5522eb81a33SChristoph Hellwig * Fill in the status and result information from the CQE, and then figure out 5532eb81a33SChristoph Hellwig * if blk-mq will need to use IPI magic to complete the request, and if yes do 5542eb81a33SChristoph Hellwig * so. If not let the caller complete the request without an indirect function 5552eb81a33SChristoph Hellwig * call. 5562eb81a33SChristoph Hellwig */ 5572eb81a33SChristoph Hellwig static inline bool nvme_try_complete_req(struct request *req, __le16 status, 55827fa9bc5SChristoph Hellwig union nvme_result result) 55915a190f7SChristoph Hellwig { 56027fa9bc5SChristoph Hellwig struct nvme_request *rq = nvme_req(req); 56127fa9bc5SChristoph Hellwig 56227fa9bc5SChristoph Hellwig rq->status = le16_to_cpu(status) >> 1; 56327fa9bc5SChristoph Hellwig rq->result = result; 564b9e03857SThomas Tai /* inject error when permitted by fault injection framework */ 565b9e03857SThomas Tai nvme_should_fail(req); 566ff029451SChristoph Hellwig if (unlikely(blk_should_fake_timeout(req->q))) 567ff029451SChristoph Hellwig return true; 568ff029451SChristoph Hellwig return blk_mq_complete_request_remote(req); 56915a190f7SChristoph Hellwig } 57015a190f7SChristoph Hellwig 571d22524a4SChristoph Hellwig static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 572d22524a4SChristoph Hellwig { 573d22524a4SChristoph Hellwig get_device(ctrl->device); 574d22524a4SChristoph Hellwig } 575d22524a4SChristoph Hellwig 576d22524a4SChristoph Hellwig static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 577d22524a4SChristoph Hellwig { 578d22524a4SChristoph Hellwig put_device(ctrl->device); 579d22524a4SChristoph Hellwig } 580d22524a4SChristoph Hellwig 58158a8df67SIsrael Rukshin static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) 58258a8df67SIsrael Rukshin { 58358a8df67SIsrael Rukshin return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH; 58458a8df67SIsrael Rukshin } 58558a8df67SIsrael Rukshin 58677f02a7aSChristoph Hellwig void nvme_complete_rq(struct request *req); 587dda3248eSChao Leng blk_status_t nvme_host_path_error(struct request *req); 5887baa8572SJens Axboe bool nvme_cancel_request(struct request *req, void *data, bool reserved); 58925479069SChao Leng void nvme_cancel_tagset(struct nvme_ctrl *ctrl); 59025479069SChao Leng void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl); 591bb8d261eSChristoph Hellwig bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 592bb8d261eSChristoph Hellwig enum nvme_ctrl_state new_state); 593c1ac9a4bSKeith Busch bool nvme_wait_reset(struct nvme_ctrl *ctrl); 594b5b05048SSagi Grimberg int nvme_disable_ctrl(struct nvme_ctrl *ctrl); 595c0f2f45bSSagi Grimberg int nvme_enable_ctrl(struct nvme_ctrl *ctrl); 5965fd4ce1bSChristoph Hellwig int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 597f3ca80fcSChristoph Hellwig int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 598f3ca80fcSChristoph Hellwig const struct nvme_ctrl_ops *ops, unsigned long quirks); 59953029b04SKeith Busch void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 600d09f2b45SSagi Grimberg void nvme_start_ctrl(struct nvme_ctrl *ctrl); 601d09f2b45SSagi Grimberg void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 6027fd8930fSChristoph Hellwig int nvme_init_identify(struct nvme_ctrl *ctrl); 6035bae7f73SChristoph Hellwig 6045bae7f73SChristoph Hellwig void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 6051673f1f0SChristoph Hellwig 6064f1244c8SChristoph Hellwig int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 6074f1244c8SChristoph Hellwig bool send); 608a98e58e5SScott Bauer 6097bf58533SChristoph Hellwig void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 610287a63ebSChristoph Hellwig volatile union nvme_result *res); 611f866fc42SChristoph Hellwig 61225646264SKeith Busch void nvme_stop_queues(struct nvme_ctrl *ctrl); 61325646264SKeith Busch void nvme_start_queues(struct nvme_ctrl *ctrl); 61469d9a99cSKeith Busch void nvme_kill_queues(struct nvme_ctrl *ctrl); 615d6135c3aSKeith Busch void nvme_sync_queues(struct nvme_ctrl *ctrl); 61604800fbfSChao Leng void nvme_sync_io_queues(struct nvme_ctrl *ctrl); 617302ad8ccSKeith Busch void nvme_unfreeze(struct nvme_ctrl *ctrl); 618302ad8ccSKeith Busch void nvme_wait_freeze(struct nvme_ctrl *ctrl); 6197cf0d7c0SSagi Grimberg int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 620302ad8ccSKeith Busch void nvme_start_freeze(struct nvme_ctrl *ctrl); 621363c9aacSSagi Grimberg 622eb71f435SChristoph Hellwig #define NVME_QID_ANY -1 6234160982eSChristoph Hellwig struct request *nvme_alloc_request(struct request_queue *q, 62439dfe844SChaitanya Kulkarni struct nvme_command *cmd, blk_mq_req_flags_t flags); 625f7f1fc36SMax Gurtovoy void nvme_cleanup_cmd(struct request *req); 626fc17b653SChristoph Hellwig blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 6278093f7caSMing Lin struct nvme_command *cmd); 62857dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 62957dacad5SJay Sternberg void *buf, unsigned bufflen); 63057dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 631d49187e9SChristoph Hellwig union nvme_result *result, void *buffer, unsigned bufflen, 6329a95e4efSBart Van Assche unsigned timeout, int qid, int at_head, 6336287b51cSSagi Grimberg blk_mq_req_flags_t flags, bool poll); 6341a87ee65SKeith Busch int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid, 6351a87ee65SKeith Busch unsigned int dword11, void *buffer, size_t buflen, 6361a87ee65SKeith Busch u32 *result); 6371a87ee65SKeith Busch int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid, 6381a87ee65SKeith Busch unsigned int dword11, void *buffer, size_t buflen, 6391a87ee65SKeith Busch u32 *result); 6409a0be7abSChristoph Hellwig int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 641038bd4cbSSagi Grimberg void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 642d86c4d8eSChristoph Hellwig int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 643c1ac9a4bSKeith Busch int nvme_try_sched_reset(struct nvme_ctrl *ctrl); 644c5017e85SChristoph Hellwig int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 64557dacad5SJay Sternberg 646be93e87eSKeith Busch int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi, 6470e98719bSChristoph Hellwig void *log, size_t size, u64 offset); 648240e6ee2SKeith Busch struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk, 649240e6ee2SKeith Busch struct nvme_ns_head **head, int *srcu_idx); 650240e6ee2SKeith Busch void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx); 651d558fb51SMatias Bjørling 65233b14f67SHannes Reinecke extern const struct attribute_group *nvme_ns_id_attr_groups[]; 65332acab31SChristoph Hellwig extern const struct block_device_operations nvme_ns_head_ops; 65432acab31SChristoph Hellwig 65532acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 65666b20ac0SMarta Rybczynska static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 65766b20ac0SMarta Rybczynska { 65866b20ac0SMarta Rybczynska return ctrl->ana_log_buf != NULL; 65966b20ac0SMarta Rybczynska } 66066b20ac0SMarta Rybczynska 661b9156daeSSagi Grimberg void nvme_mpath_unfreeze(struct nvme_subsystem *subsys); 662b9156daeSSagi Grimberg void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys); 663b9156daeSSagi Grimberg void nvme_mpath_start_freeze(struct nvme_subsystem *subsys); 664a785dbccSKeith Busch void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 665a785dbccSKeith Busch struct nvme_ctrl *ctrl, int *flags); 6665ddaabe8SChristoph Hellwig void nvme_failover_req(struct request *req); 66732acab31SChristoph Hellwig void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 66832acab31SChristoph Hellwig int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 6690d0b660fSChristoph Hellwig void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); 67032acab31SChristoph Hellwig void nvme_mpath_remove_disk(struct nvme_ns_head *head); 6710d0b660fSChristoph Hellwig int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 6720d0b660fSChristoph Hellwig void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 6730d0b660fSChristoph Hellwig void nvme_mpath_stop(struct nvme_ctrl *ctrl); 6740157ec8dSSagi Grimberg bool nvme_mpath_clear_current_path(struct nvme_ns *ns); 6750157ec8dSSagi Grimberg void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl); 67632acab31SChristoph Hellwig struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 677c62b37d9SChristoph Hellwig blk_qc_t nvme_ns_head_submit_bio(struct bio *bio); 678479a322fSSagi Grimberg 679479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 680479a322fSSagi Grimberg { 681479a322fSSagi Grimberg struct nvme_ns_head *head = ns->head; 682479a322fSSagi Grimberg 683479a322fSSagi Grimberg if (head->disk && list_empty(&head->list)) 684479a322fSSagi Grimberg kblockd_schedule_work(&head->requeue_work); 685479a322fSSagi Grimberg } 686479a322fSSagi Grimberg 6872b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req) 68835fe0d12SHannes Reinecke { 68935fe0d12SHannes Reinecke struct nvme_ns *ns = req->q->queuedata; 69035fe0d12SHannes Reinecke 69135fe0d12SHannes Reinecke if (req->cmd_flags & REQ_NVME_MPATH) 692d24de76aSChristoph Hellwig trace_block_bio_complete(ns->head->disk->queue, req->bio); 69335fe0d12SHannes Reinecke } 69435fe0d12SHannes Reinecke 6950d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_grpid; 6960d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_state; 69775c10e73SHannes Reinecke extern struct device_attribute subsys_attr_iopolicy; 6980d0b660fSChristoph Hellwig 69932acab31SChristoph Hellwig #else 7000d0b660fSChristoph Hellwig static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 7010d0b660fSChristoph Hellwig { 7020d0b660fSChristoph Hellwig return false; 7030d0b660fSChristoph Hellwig } 704a785dbccSKeith Busch /* 705a785dbccSKeith Busch * Without the multipath code enabled, multiple controller per subsystems are 706a785dbccSKeith Busch * visible as devices and thus we cannot use the subsystem instance. 707a785dbccSKeith Busch */ 708a785dbccSKeith Busch static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 709a785dbccSKeith Busch struct nvme_ctrl *ctrl, int *flags) 710a785dbccSKeith Busch { 711a785dbccSKeith Busch sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); 712a785dbccSKeith Busch } 713a785dbccSKeith Busch 7145ddaabe8SChristoph Hellwig static inline void nvme_failover_req(struct request *req) 71532acab31SChristoph Hellwig { 71632acab31SChristoph Hellwig } 71732acab31SChristoph Hellwig static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 71832acab31SChristoph Hellwig { 71932acab31SChristoph Hellwig } 72032acab31SChristoph Hellwig static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 72132acab31SChristoph Hellwig struct nvme_ns_head *head) 72232acab31SChristoph Hellwig { 72332acab31SChristoph Hellwig return 0; 72432acab31SChristoph Hellwig } 7250d0b660fSChristoph Hellwig static inline void nvme_mpath_add_disk(struct nvme_ns *ns, 7260d0b660fSChristoph Hellwig struct nvme_id_ns *id) 72732acab31SChristoph Hellwig { 72832acab31SChristoph Hellwig } 72932acab31SChristoph Hellwig static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 73032acab31SChristoph Hellwig { 73132acab31SChristoph Hellwig } 7320157ec8dSSagi Grimberg static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns) 7330157ec8dSSagi Grimberg { 7340157ec8dSSagi Grimberg return false; 7350157ec8dSSagi Grimberg } 7360157ec8dSSagi Grimberg static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) 73732acab31SChristoph Hellwig { 73832acab31SChristoph Hellwig } 739479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 740479a322fSSagi Grimberg { 741479a322fSSagi Grimberg } 7422b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req) 74335fe0d12SHannes Reinecke { 74435fe0d12SHannes Reinecke } 7450d0b660fSChristoph Hellwig static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, 7460d0b660fSChristoph Hellwig struct nvme_id_ctrl *id) 7470d0b660fSChristoph Hellwig { 748*2bd64307SKanchan Joshi if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) 74914a1336eSChristoph Hellwig dev_warn(ctrl->device, 75014a1336eSChristoph Hellwig "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); 7510d0b660fSChristoph Hellwig return 0; 7520d0b660fSChristoph Hellwig } 7530d0b660fSChristoph Hellwig static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 7540d0b660fSChristoph Hellwig { 7550d0b660fSChristoph Hellwig } 7560d0b660fSChristoph Hellwig static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 7570d0b660fSChristoph Hellwig { 7580d0b660fSChristoph Hellwig } 759b9156daeSSagi Grimberg static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys) 760b9156daeSSagi Grimberg { 761b9156daeSSagi Grimberg } 762b9156daeSSagi Grimberg static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys) 763b9156daeSSagi Grimberg { 764b9156daeSSagi Grimberg } 765b9156daeSSagi Grimberg static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys) 766b9156daeSSagi Grimberg { 767b9156daeSSagi Grimberg } 76832acab31SChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */ 76932acab31SChristoph Hellwig 7707fad20ddSChristoph Hellwig int nvme_revalidate_zones(struct nvme_ns *ns); 771240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED 772d525c3c0SChristoph Hellwig int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf); 773240e6ee2SKeith Busch int nvme_report_zones(struct gendisk *disk, sector_t sector, 774240e6ee2SKeith Busch unsigned int nr_zones, report_zones_cb cb, void *data); 775240e6ee2SKeith Busch 776240e6ee2SKeith Busch blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req, 777240e6ee2SKeith Busch struct nvme_command *cmnd, 778240e6ee2SKeith Busch enum nvme_zone_mgmt_action action); 779240e6ee2SKeith Busch #else 780240e6ee2SKeith Busch #define nvme_report_zones NULL 781240e6ee2SKeith Busch 782240e6ee2SKeith Busch static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, 783240e6ee2SKeith Busch struct request *req, struct nvme_command *cmnd, 784240e6ee2SKeith Busch enum nvme_zone_mgmt_action action) 785240e6ee2SKeith Busch { 786240e6ee2SKeith Busch return BLK_STS_NOTSUPP; 787240e6ee2SKeith Busch } 788240e6ee2SKeith Busch 789d525c3c0SChristoph Hellwig static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf) 790240e6ee2SKeith Busch { 791240e6ee2SKeith Busch dev_warn(ns->ctrl->device, 792240e6ee2SKeith Busch "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n"); 793240e6ee2SKeith Busch return -EPROTONOSUPPORT; 794240e6ee2SKeith Busch } 795240e6ee2SKeith Busch #endif 796240e6ee2SKeith Busch 797c4699e70SKeith Busch #ifdef CONFIG_NVM 7983dc87dd0SMatias Bjørling int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); 799b0b4e09cSMatias Bjørling void nvme_nvm_unregister(struct nvme_ns *ns); 80033b14f67SHannes Reinecke extern const struct attribute_group nvme_nvm_attr_group; 80184d4add7SMatias Bjørling int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); 802c4699e70SKeith Busch #else 803b0b4e09cSMatias Bjørling static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, 8043dc87dd0SMatias Bjørling int node) 805c4699e70SKeith Busch { 806c4699e70SKeith Busch return 0; 807c4699e70SKeith Busch } 808c4699e70SKeith Busch 809b0b4e09cSMatias Bjørling static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; 81084d4add7SMatias Bjørling static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, 81184d4add7SMatias Bjørling unsigned long arg) 81284d4add7SMatias Bjørling { 81384d4add7SMatias Bjørling return -ENOTTY; 81484d4add7SMatias Bjørling } 8153dc87dd0SMatias Bjørling #endif /* CONFIG_NVM */ 8163dc87dd0SMatias Bjørling 81740267efdSSimon A. F. Lund static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 81840267efdSSimon A. F. Lund { 81940267efdSSimon A. F. Lund return dev_to_disk(dev)->private_data; 82040267efdSSimon A. F. Lund } 821ca064085SMatias Bjørling 822400b6a7bSGuenter Roeck #ifdef CONFIG_NVME_HWMON 82359e330f8SKeith Busch int nvme_hwmon_init(struct nvme_ctrl *ctrl); 824ed7770f6SHannes Reinecke void nvme_hwmon_exit(struct nvme_ctrl *ctrl); 825400b6a7bSGuenter Roeck #else 82659e330f8SKeith Busch static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) 82759e330f8SKeith Busch { 82859e330f8SKeith Busch return 0; 82959e330f8SKeith Busch } 830ed7770f6SHannes Reinecke 831ed7770f6SHannes Reinecke static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) 832ed7770f6SHannes Reinecke { 833ed7770f6SHannes Reinecke } 834400b6a7bSGuenter Roeck #endif 835400b6a7bSGuenter Roeck 836df21b6b1SLogan Gunthorpe u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 837df21b6b1SLogan Gunthorpe u8 opcode); 83817365ae6SLogan Gunthorpe void nvme_execute_passthru_rq(struct request *rq); 839b2702aaaSChaitanya Kulkarni struct nvme_ctrl *nvme_ctrl_from_file(struct file *file); 84024493b8bSLogan Gunthorpe struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid); 84124493b8bSLogan Gunthorpe void nvme_put_ns(struct nvme_ns *ns); 842df21b6b1SLogan Gunthorpe 84357dacad5SJay Sternberg #endif /* _NVME_H */ 844