xref: /openbmc/linux/drivers/nvme/host/nvme.h (revision 36989c68)
1bc50ad75SChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
257dacad5SJay Sternberg /*
357dacad5SJay Sternberg  * Copyright (c) 2011-2014, Intel Corporation.
457dacad5SJay Sternberg  */
557dacad5SJay Sternberg 
657dacad5SJay Sternberg #ifndef _NVME_H
757dacad5SJay Sternberg #define _NVME_H
857dacad5SJay Sternberg 
957dacad5SJay Sternberg #include <linux/nvme.h>
10a6a5149bSChristoph Hellwig #include <linux/cdev.h>
1157dacad5SJay Sternberg #include <linux/pci.h>
1257dacad5SJay Sternberg #include <linux/kref.h>
1357dacad5SJay Sternberg #include <linux/blk-mq.h>
14a98e58e5SScott Bauer #include <linux/sed-opal.h>
15b9e03857SThomas Tai #include <linux/fault-inject.h>
16978628ecSJohannes Thumshirn #include <linux/rcupdate.h>
17c1ac9a4bSKeith Busch #include <linux/wait.h>
184d2ce688SJames Smart #include <linux/t10-pi.h>
1957dacad5SJay Sternberg 
2035fe0d12SHannes Reinecke #include <trace/events/block.h>
2135fe0d12SHannes Reinecke 
22b668f2f5SMike Christie extern const struct pr_ops nvme_pr_ops;
23b668f2f5SMike Christie 
248ae4e447SMarc Olson extern unsigned int nvme_io_timeout;
2557dacad5SJay Sternberg #define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
2657dacad5SJay Sternberg 
278ae4e447SMarc Olson extern unsigned int admin_timeout;
28dc96f938SChaitanya Kulkarni #define NVME_ADMIN_TIMEOUT	(admin_timeout * HZ)
2921d34711SChristoph Hellwig 
30038bd4cbSSagi Grimberg #define NVME_DEFAULT_KATO	5
31038bd4cbSSagi Grimberg 
3238e18002SIsrael Rukshin #ifdef CONFIG_ARCH_NO_SG_CHAIN
3338e18002SIsrael Rukshin #define  NVME_INLINE_SG_CNT  0
34ba7ca2aeSIsrael Rukshin #define  NVME_INLINE_METADATA_SG_CNT  0
3538e18002SIsrael Rukshin #else
3638e18002SIsrael Rukshin #define  NVME_INLINE_SG_CNT  2
37ba7ca2aeSIsrael Rukshin #define  NVME_INLINE_METADATA_SG_CNT  1
3838e18002SIsrael Rukshin #endif
3938e18002SIsrael Rukshin 
406c3c05b0SChaitanya Kulkarni /*
416c3c05b0SChaitanya Kulkarni  * Default to a 4K page size, with the intention to update this
426c3c05b0SChaitanya Kulkarni  * path in the future to accommodate architectures with differing
436c3c05b0SChaitanya Kulkarni  * kernel and IO page sizes.
446c3c05b0SChaitanya Kulkarni  */
456c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SHIFT	12
466c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SIZE	(1 << NVME_CTRL_PAGE_SHIFT)
476c3c05b0SChaitanya Kulkarni 
489a6327d2SSagi Grimberg extern struct workqueue_struct *nvme_wq;
49b227c59bSRoy Shterman extern struct workqueue_struct *nvme_reset_wq;
50b227c59bSRoy Shterman extern struct workqueue_struct *nvme_delete_wq;
519a6327d2SSagi Grimberg 
5257dacad5SJay Sternberg /*
53106198edSChristoph Hellwig  * List of workarounds for devices that required behavior not specified in
54106198edSChristoph Hellwig  * the standard.
5557dacad5SJay Sternberg  */
56106198edSChristoph Hellwig enum nvme_quirks {
57106198edSChristoph Hellwig 	/*
58106198edSChristoph Hellwig 	 * Prefers I/O aligned to a stripe size specified in a vendor
59106198edSChristoph Hellwig 	 * specific Identify field.
60106198edSChristoph Hellwig 	 */
61106198edSChristoph Hellwig 	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
62540c801cSKeith Busch 
63540c801cSKeith Busch 	/*
64540c801cSKeith Busch 	 * The controller doesn't handle Identify value others than 0 or 1
65540c801cSKeith Busch 	 * correctly.
66540c801cSKeith Busch 	 */
67540c801cSKeith Busch 	NVME_QUIRK_IDENTIFY_CNS			= (1 << 1),
6808095e70SKeith Busch 
6908095e70SKeith Busch 	/*
70e850fd16SChristoph Hellwig 	 * The controller deterministically returns O's on reads to
71e850fd16SChristoph Hellwig 	 * logical blocks that deallocate was called on.
7208095e70SKeith Busch 	 */
73e850fd16SChristoph Hellwig 	NVME_QUIRK_DEALLOCATE_ZEROES		= (1 << 2),
7454adc010SGuilherme G. Piccoli 
7554adc010SGuilherme G. Piccoli 	/*
7654adc010SGuilherme G. Piccoli 	 * The controller needs a delay before starts checking the device
7754adc010SGuilherme G. Piccoli 	 * readiness, which is done by reading the NVME_CSTS_RDY bit.
7854adc010SGuilherme G. Piccoli 	 */
7954adc010SGuilherme G. Piccoli 	NVME_QUIRK_DELAY_BEFORE_CHK_RDY		= (1 << 3),
80c5552fdeSAndy Lutomirski 
81c5552fdeSAndy Lutomirski 	/*
82c5552fdeSAndy Lutomirski 	 * APST should not be used.
83c5552fdeSAndy Lutomirski 	 */
84c5552fdeSAndy Lutomirski 	NVME_QUIRK_NO_APST			= (1 << 4),
85ff5350a8SAndy Lutomirski 
86ff5350a8SAndy Lutomirski 	/*
87ff5350a8SAndy Lutomirski 	 * The deepest sleep state should not be used.
88ff5350a8SAndy Lutomirski 	 */
89ff5350a8SAndy Lutomirski 	NVME_QUIRK_NO_DEEPEST_PS		= (1 << 5),
90608cc4b1SChristoph Hellwig 
91608cc4b1SChristoph Hellwig 	/*
929abd68efSJens Axboe 	 * Set MEDIUM priority on SQ creation
939abd68efSJens Axboe 	 */
949abd68efSJens Axboe 	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
956299358dSJames Dingwall 
966299358dSJames Dingwall 	/*
976299358dSJames Dingwall 	 * Ignore device provided subnqn.
986299358dSJames Dingwall 	 */
996299358dSJames Dingwall 	NVME_QUIRK_IGNORE_DEV_SUBNQN		= (1 << 8),
1007b210e4eSChristoph Hellwig 
1017b210e4eSChristoph Hellwig 	/*
1027b210e4eSChristoph Hellwig 	 * Broken Write Zeroes.
1037b210e4eSChristoph Hellwig 	 */
1047b210e4eSChristoph Hellwig 	NVME_QUIRK_DISABLE_WRITE_ZEROES		= (1 << 9),
105cb32de1bSMario Limonciello 
106cb32de1bSMario Limonciello 	/*
107cb32de1bSMario Limonciello 	 * Force simple suspend/resume path.
108cb32de1bSMario Limonciello 	 */
109cb32de1bSMario Limonciello 	NVME_QUIRK_SIMPLE_SUSPEND		= (1 << 10),
1107ad67ca5SLinus Torvalds 
1117ad67ca5SLinus Torvalds 	/*
11266341331SBenjamin Herrenschmidt 	 * Use only one interrupt vector for all queues
11366341331SBenjamin Herrenschmidt 	 */
1147ad67ca5SLinus Torvalds 	NVME_QUIRK_SINGLE_VECTOR		= (1 << 11),
11566341331SBenjamin Herrenschmidt 
11666341331SBenjamin Herrenschmidt 	/*
11766341331SBenjamin Herrenschmidt 	 * Use non-standard 128 bytes SQEs.
11866341331SBenjamin Herrenschmidt 	 */
1197ad67ca5SLinus Torvalds 	NVME_QUIRK_128_BYTES_SQES		= (1 << 12),
120d38e9f04SBenjamin Herrenschmidt 
121d38e9f04SBenjamin Herrenschmidt 	/*
122d38e9f04SBenjamin Herrenschmidt 	 * Prevent tag overlap between queues
123d38e9f04SBenjamin Herrenschmidt 	 */
1247ad67ca5SLinus Torvalds 	NVME_QUIRK_SHARED_TAGS                  = (1 << 13),
1256c6aa2f2SAkinobu Mita 
1266c6aa2f2SAkinobu Mita 	/*
1276c6aa2f2SAkinobu Mita 	 * Don't change the value of the temperature threshold feature
1286c6aa2f2SAkinobu Mita 	 */
1296c6aa2f2SAkinobu Mita 	NVME_QUIRK_NO_TEMP_THRESH_CHANGE	= (1 << 14),
1305bedd3afSChristoph Hellwig 
1315bedd3afSChristoph Hellwig 	/*
1325bedd3afSChristoph Hellwig 	 * The controller doesn't handle the Identify Namespace
1335bedd3afSChristoph Hellwig 	 * Identification Descriptor list subcommand despite claiming
1345bedd3afSChristoph Hellwig 	 * NVMe 1.3 compliance.
1355bedd3afSChristoph Hellwig 	 */
1365bedd3afSChristoph Hellwig 	NVME_QUIRK_NO_NS_DESC_LIST		= (1 << 15),
1374bdf2603SFilippo Sironi 
1384bdf2603SFilippo Sironi 	/*
1394bdf2603SFilippo Sironi 	 * The controller does not properly handle DMA addresses over
1404bdf2603SFilippo Sironi 	 * 48 bits.
1414bdf2603SFilippo Sironi 	 */
1424bdf2603SFilippo Sironi 	NVME_QUIRK_DMA_ADDRESS_BITS_48		= (1 << 16),
143a2941f6aSKeith Busch 
144a2941f6aSKeith Busch 	/*
145b7df575fSXiang wangx 	 * The controller requires the command_id value be limited, so skip
146a2941f6aSKeith Busch 	 * encoding the generation sequence number.
147a2941f6aSKeith Busch 	 */
148a2941f6aSKeith Busch 	NVME_QUIRK_SKIP_CID_GEN			= (1 << 17),
14900ff400eSChristoph Hellwig 
15000ff400eSChristoph Hellwig 	/*
15100ff400eSChristoph Hellwig 	 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
15200ff400eSChristoph Hellwig 	 */
15300ff400eSChristoph Hellwig 	NVME_QUIRK_BOGUS_NID			= (1 << 18),
154bd375feeSHristo Venev 
155bd375feeSHristo Venev 	/*
156bd375feeSHristo Venev 	 * No temperature thresholds for channels other than 0 (Composite).
157bd375feeSHristo Venev 	 */
158bd375feeSHristo Venev 	NVME_QUIRK_NO_SECONDARY_TEMP_THRESH	= (1 << 19),
159dd864f6eSGeorg Gottleuber 
160dd864f6eSGeorg Gottleuber 	/*
161dd864f6eSGeorg Gottleuber 	 * Disables simple suspend/resume path.
162dd864f6eSGeorg Gottleuber 	 */
163dd864f6eSGeorg Gottleuber 	NVME_QUIRK_FORCE_NO_SIMPLE_SUSPEND	= (1 << 20),
164a2da0e5cSSean Anderson 
165a2da0e5cSSean Anderson 	/*
166a2da0e5cSSean Anderson 	 * MSI (but not MSI-X) interrupts are broken and never fire.
167a2da0e5cSSean Anderson 	 */
168a2da0e5cSSean Anderson 	NVME_QUIRK_BROKEN_MSI			= (1 << 21),
169106198edSChristoph Hellwig };
170106198edSChristoph Hellwig 
171d49187e9SChristoph Hellwig /*
172d49187e9SChristoph Hellwig  * Common request structure for NVMe passthrough.  All drivers must have
173d49187e9SChristoph Hellwig  * this structure as the first member of their request-private data.
174d49187e9SChristoph Hellwig  */
175d49187e9SChristoph Hellwig struct nvme_request {
176d49187e9SChristoph Hellwig 	struct nvme_command	*cmd;
177d49187e9SChristoph Hellwig 	union nvme_result	result;
178e7006de6SSagi Grimberg 	u8			genctr;
17944e44b29SChristoph Hellwig 	u8			retries;
18027fa9bc5SChristoph Hellwig 	u8			flags;
18127fa9bc5SChristoph Hellwig 	u16			status;
182d4d957b5SSagi Grimberg #ifdef CONFIG_NVME_MULTIPATH
183d4d957b5SSagi Grimberg 	unsigned long		start_time;
184d4d957b5SSagi Grimberg #endif
18559e29ce6SSagi Grimberg 	struct nvme_ctrl	*ctrl;
18627fa9bc5SChristoph Hellwig };
18727fa9bc5SChristoph Hellwig 
18832acab31SChristoph Hellwig /*
18932acab31SChristoph Hellwig  * Mark a bio as coming in through the mpath node.
19032acab31SChristoph Hellwig  */
19132acab31SChristoph Hellwig #define REQ_NVME_MPATH		REQ_DRV
19232acab31SChristoph Hellwig 
19327fa9bc5SChristoph Hellwig enum {
19427fa9bc5SChristoph Hellwig 	NVME_REQ_CANCELLED		= (1 << 0),
195bb06ec31SJames Smart 	NVME_REQ_USERCMD		= (1 << 1),
196d4d957b5SSagi Grimberg 	NVME_MPATH_IO_STATS		= (1 << 2),
197d49187e9SChristoph Hellwig };
198d49187e9SChristoph Hellwig 
nvme_req(struct request * req)199d49187e9SChristoph Hellwig static inline struct nvme_request *nvme_req(struct request *req)
200d49187e9SChristoph Hellwig {
201d49187e9SChristoph Hellwig 	return blk_mq_rq_to_pdu(req);
202d49187e9SChristoph Hellwig }
203d49187e9SChristoph Hellwig 
nvme_req_qid(struct request * req)2045d87eb94SKeith Busch static inline u16 nvme_req_qid(struct request *req)
2055d87eb94SKeith Busch {
206643c476dSKeith Busch 	if (!req->q->queuedata)
2075d87eb94SKeith Busch 		return 0;
20884115d6dSBaolin Wang 
20984115d6dSBaolin Wang 	return req->mq_hctx->queue_num + 1;
2105d87eb94SKeith Busch }
2115d87eb94SKeith Busch 
21254adc010SGuilherme G. Piccoli /* The below value is the specific amount of delay needed before checking
21354adc010SGuilherme G. Piccoli  * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
21454adc010SGuilherme G. Piccoli  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
21554adc010SGuilherme G. Piccoli  * found empirically.
21654adc010SGuilherme G. Piccoli  */
2178c97eeccSJeff Lien #define NVME_QUIRK_DELAY_AMOUNT		2300
21854adc010SGuilherme G. Piccoli 
2194212f4e9SSagi Grimberg /*
2204212f4e9SSagi Grimberg  * enum nvme_ctrl_state: Controller state
2214212f4e9SSagi Grimberg  *
2224212f4e9SSagi Grimberg  * @NVME_CTRL_NEW:		New controller just allocated, initial state
2234212f4e9SSagi Grimberg  * @NVME_CTRL_LIVE:		Controller is connected and I/O capable
2244212f4e9SSagi Grimberg  * @NVME_CTRL_RESETTING:	Controller is resetting (or scheduled reset)
2254212f4e9SSagi Grimberg  * @NVME_CTRL_CONNECTING:	Controller is disconnected, now connecting the
2264212f4e9SSagi Grimberg  *				transport
2274212f4e9SSagi Grimberg  * @NVME_CTRL_DELETING:		Controller is deleting (or scheduled deletion)
228ecca390eSSagi Grimberg  * @NVME_CTRL_DELETING_NOIO:	Controller is deleting and I/O is not
229ecca390eSSagi Grimberg  *				disabled/failed immediately. This state comes
230ecca390eSSagi Grimberg  * 				after all async event processing took place and
231ecca390eSSagi Grimberg  * 				before ns removal and the controller deletion
232ecca390eSSagi Grimberg  * 				progress
2334212f4e9SSagi Grimberg  * @NVME_CTRL_DEAD:		Controller is non-present/unresponsive during
2344212f4e9SSagi Grimberg  *				shutdown or removal. In this case we forcibly
2354212f4e9SSagi Grimberg  *				kill all inflight I/O as they have no chance to
2364212f4e9SSagi Grimberg  *				complete
2374212f4e9SSagi Grimberg  */
238bb8d261eSChristoph Hellwig enum nvme_ctrl_state {
239bb8d261eSChristoph Hellwig 	NVME_CTRL_NEW,
240bb8d261eSChristoph Hellwig 	NVME_CTRL_LIVE,
241bb8d261eSChristoph Hellwig 	NVME_CTRL_RESETTING,
242ad6a0a52SMax Gurtovoy 	NVME_CTRL_CONNECTING,
243bb8d261eSChristoph Hellwig 	NVME_CTRL_DELETING,
244ecca390eSSagi Grimberg 	NVME_CTRL_DELETING_NOIO,
2450ff9d4e1SKeith Busch 	NVME_CTRL_DEAD,
246bb8d261eSChristoph Hellwig };
247bb8d261eSChristoph Hellwig 
248a3646451SAkinobu Mita struct nvme_fault_inject {
249a3646451SAkinobu Mita #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
250a3646451SAkinobu Mita 	struct fault_attr attr;
251a3646451SAkinobu Mita 	struct dentry *parent;
252a3646451SAkinobu Mita 	bool dont_retry;	/* DNR, do not retry */
253a3646451SAkinobu Mita 	u16 status;		/* status code */
254a3646451SAkinobu Mita #endif
255a3646451SAkinobu Mita };
256a3646451SAkinobu Mita 
257bf093d97SSagi Grimberg enum nvme_ctrl_flags {
258bf093d97SSagi Grimberg 	NVME_CTRL_FAILFAST_EXPIRED	= 0,
259bf093d97SSagi Grimberg 	NVME_CTRL_ADMIN_Q_STOPPED	= 1,
260f46ef9e8SSagi Grimberg 	NVME_CTRL_STARTED_ONCE		= 2,
26198d81f0dSChao Leng 	NVME_CTRL_STOPPED		= 3,
262c917dd96SKeith Busch 	NVME_CTRL_SKIP_ID_CNS_CS	= 4,
263d0dd594bSBreno Leitao 	NVME_CTRL_DIRTY_CAPABILITY	= 5,
264c52d545cSBitao Hu 	NVME_CTRL_FROZEN		= 6,
265bf093d97SSagi Grimberg };
266bf093d97SSagi Grimberg 
2671c63dc66SChristoph Hellwig struct nvme_ctrl {
2686e3ca03eSSagi Grimberg 	bool comp_seen;
269bd4da3abSAndy Lutomirski 	bool identified;
2709d217fb0SChristophe JAILLET 	enum nvme_ctrl_state state;
271bb8d261eSChristoph Hellwig 	spinlock_t lock;
272e7ad43c3SKeith Busch 	struct mutex scan_lock;
2731c63dc66SChristoph Hellwig 	const struct nvme_ctrl_ops *ops;
27457dacad5SJay Sternberg 	struct request_queue *admin_q;
27507bfcd09SChristoph Hellwig 	struct request_queue *connect_q;
276e7832cb4SSagi Grimberg 	struct request_queue *fabrics_q;
27757dacad5SJay Sternberg 	struct device *dev;
27857dacad5SJay Sternberg 	int instance;
279103e515eSHannes Reinecke 	int numa_node;
2805bae7f73SChristoph Hellwig 	struct blk_mq_tag_set *tagset;
28134b6c231SSagi Grimberg 	struct blk_mq_tag_set *admin_tagset;
2825bae7f73SChristoph Hellwig 	struct list_head namespaces;
283765cc031SJianchao Wang 	struct rw_semaphore namespaces_rwsem;
284d22524a4SChristoph Hellwig 	struct device ctrl_device;
2855bae7f73SChristoph Hellwig 	struct device *device;	/* char device */
286ed7770f6SHannes Reinecke #ifdef CONFIG_NVME_HWMON
287ed7770f6SHannes Reinecke 	struct device *hwmon_device;
288ed7770f6SHannes Reinecke #endif
289a6a5149bSChristoph Hellwig 	struct cdev cdev;
290d86c4d8eSChristoph Hellwig 	struct work_struct reset_work;
291c5017e85SChristoph Hellwig 	struct work_struct delete_work;
292c1ac9a4bSKeith Busch 	wait_queue_head_t state_wq;
2931c63dc66SChristoph Hellwig 
294ab9e00ccSChristoph Hellwig 	struct nvme_subsystem *subsys;
295ab9e00ccSChristoph Hellwig 	struct list_head subsys_entry;
296ab9e00ccSChristoph Hellwig 
2974f1244c8SChristoph Hellwig 	struct opal_dev *opal_dev;
298a98e58e5SScott Bauer 
29957dacad5SJay Sternberg 	char name[12];
30076e3914aSChristoph Hellwig 	u16 cntlid;
3015fd4ce1bSChristoph Hellwig 
302b6dccf7fSArnav Dawn 	u16 mtfa;
3039d217fb0SChristophe JAILLET 	u32 ctrl_config;
304d858e5f0SSagi Grimberg 	u32 queue_count;
3055fd4ce1bSChristoph Hellwig 
30620d0dfe6SSagi Grimberg 	u64 cap;
30757dacad5SJay Sternberg 	u32 max_hw_sectors;
308943e942eSJens Axboe 	u32 max_segments;
30995093350SMax Gurtovoy 	u32 max_integrity_segments;
3105befc7c2SKeith Busch 	u32 max_discard_sectors;
3115befc7c2SKeith Busch 	u32 max_discard_segments;
3125befc7c2SKeith Busch 	u32 max_zeroes_sectors;
313240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED
314240e6ee2SKeith Busch 	u32 max_zone_append;
315240e6ee2SKeith Busch #endif
31649cd84b6SKeith Busch 	u16 crdt[3];
31757dacad5SJay Sternberg 	u16 oncs;
3181a86924eSTom Yan 	u32 dmrsl;
3198a9ae523SScott Bauer 	u16 oacs;
320f968688fSKeith Busch 	u16 sqsize;
3210d0b660fSChristoph Hellwig 	u32 max_namespaces;
3226bf25d16SChristoph Hellwig 	atomic_t abort_limit;
32357dacad5SJay Sternberg 	u8 vwc;
324f3ca80fcSChristoph Hellwig 	u32 vs;
32507bfcd09SChristoph Hellwig 	u32 sgls;
326038bd4cbSSagi Grimberg 	u16 kas;
327c5552fdeSAndy Lutomirski 	u8 npss;
328c5552fdeSAndy Lutomirski 	u8 apsta;
329400b6a7bSGuenter Roeck 	u16 wctemp;
330400b6a7bSGuenter Roeck 	u16 cctemp;
331c0561f82SHannes Reinecke 	u32 oaes;
332e3d7874dSKeith Busch 	u32 aen_result;
3333e53ba38SSagi Grimberg 	u32 ctratt;
33407fbd32aSMartin K. Petersen 	unsigned int shutdown_timeout;
335038bd4cbSSagi Grimberg 	unsigned int kato;
336f3ca80fcSChristoph Hellwig 	bool subsystem;
337106198edSChristoph Hellwig 	unsigned long quirks;
338c5552fdeSAndy Lutomirski 	struct nvme_id_power_state psd[32];
33984fef62dSKeith Busch 	struct nvme_effects_log *effects;
3401cf7a12eSChaitanya Kulkarni 	struct xarray cels;
3415955be21SChristoph Hellwig 	struct work_struct scan_work;
342f866fc42SChristoph Hellwig 	struct work_struct async_event_work;
343038bd4cbSSagi Grimberg 	struct delayed_work ka_work;
3448c4dfea9SVictor Gladkov 	struct delayed_work failfast_work;
3450a34e466SRoland Dreier 	struct nvme_command ka_cmd;
346774a9636SUday Shankar 	unsigned long ka_last_check_time;
347b6dccf7fSArnav Dawn 	struct work_struct fw_act_work;
34830d90964SChristoph Hellwig 	unsigned long events;
34907bfcd09SChristoph Hellwig 
3500d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
3510d0b660fSChristoph Hellwig 	/* asymmetric namespace access: */
3520d0b660fSChristoph Hellwig 	u8 anacap;
3530d0b660fSChristoph Hellwig 	u8 anatt;
3540d0b660fSChristoph Hellwig 	u32 anagrpmax;
3550d0b660fSChristoph Hellwig 	u32 nanagrpid;
3560d0b660fSChristoph Hellwig 	struct mutex ana_lock;
3570d0b660fSChristoph Hellwig 	struct nvme_ana_rsp_hdr *ana_log_buf;
3580d0b660fSChristoph Hellwig 	size_t ana_log_size;
3590d0b660fSChristoph Hellwig 	struct timer_list anatt_timer;
3600d0b660fSChristoph Hellwig 	struct work_struct ana_work;
3610d0b660fSChristoph Hellwig #endif
3620d0b660fSChristoph Hellwig 
363f50fff73SHannes Reinecke #ifdef CONFIG_NVME_AUTH
364f50fff73SHannes Reinecke 	struct work_struct dhchap_auth_work;
365f50fff73SHannes Reinecke 	struct mutex dhchap_auth_mutex;
366aa36d711SSagi Grimberg 	struct nvme_dhchap_queue_context *dhchap_ctxs;
367f50fff73SHannes Reinecke 	struct nvme_dhchap_key *host_key;
368f50fff73SHannes Reinecke 	struct nvme_dhchap_key *ctrl_key;
369f50fff73SHannes Reinecke 	u16 transaction;
370f50fff73SHannes Reinecke #endif
371f50fff73SHannes Reinecke 
372c5552fdeSAndy Lutomirski 	/* Power saving configuration */
373c5552fdeSAndy Lutomirski 	u64 ps_max_latency_us;
37476a5af84SKai-Heng Feng 	bool apst_enabled;
375c5552fdeSAndy Lutomirski 
376044a9df1SChristoph Hellwig 	/* PCIe only: */
3779d217fb0SChristophe JAILLET 	u16 hmmaxd;
378fe6d53c9SChristoph Hellwig 	u32 hmpre;
379fe6d53c9SChristoph Hellwig 	u32 hmmin;
380044a9df1SChristoph Hellwig 	u32 hmminds;
381fe6d53c9SChristoph Hellwig 
38207bfcd09SChristoph Hellwig 	/* Fabrics only */
38307bfcd09SChristoph Hellwig 	u32 ioccsz;
38407bfcd09SChristoph Hellwig 	u32 iorcsz;
38507bfcd09SChristoph Hellwig 	u16 icdoff;
38607bfcd09SChristoph Hellwig 	u16 maxcmd;
387fdf9dfa8SSagi Grimberg 	int nr_reconnects;
3888c4dfea9SVictor Gladkov 	unsigned long flags;
38907bfcd09SChristoph Hellwig 	struct nvmf_ctrl_options *opts;
390cb5b7262SJens Axboe 
391cb5b7262SJens Axboe 	struct page *discard_page;
392cb5b7262SJens Axboe 	unsigned long discard_page_busy;
393f79d5fdaSAkinobu Mita 
394f79d5fdaSAkinobu Mita 	struct nvme_fault_inject fault_inject;
39586c2457aSMartin Belanger 
39686c2457aSMartin Belanger 	enum nvme_ctrl_type cntrltype;
39786c2457aSMartin Belanger 	enum nvme_dctype dctype;
39857dacad5SJay Sternberg };
39957dacad5SJay Sternberg 
nvme_ctrl_state(struct nvme_ctrl * ctrl)400cc5b051eSKeith Busch static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl)
401cc5b051eSKeith Busch {
402cc5b051eSKeith Busch 	return READ_ONCE(ctrl->state);
403cc5b051eSKeith Busch }
404cc5b051eSKeith Busch 
40575c10e73SHannes Reinecke enum nvme_iopolicy {
40675c10e73SHannes Reinecke 	NVME_IOPOLICY_NUMA,
40775c10e73SHannes Reinecke 	NVME_IOPOLICY_RR,
40875c10e73SHannes Reinecke };
40975c10e73SHannes Reinecke 
410ab9e00ccSChristoph Hellwig struct nvme_subsystem {
411ab9e00ccSChristoph Hellwig 	int			instance;
412ab9e00ccSChristoph Hellwig 	struct device		dev;
413ab9e00ccSChristoph Hellwig 	/*
414ab9e00ccSChristoph Hellwig 	 * Because we unregister the device on the last put we need
415ab9e00ccSChristoph Hellwig 	 * a separate refcount.
416ab9e00ccSChristoph Hellwig 	 */
417ab9e00ccSChristoph Hellwig 	struct kref		ref;
418ab9e00ccSChristoph Hellwig 	struct list_head	entry;
419ab9e00ccSChristoph Hellwig 	struct mutex		lock;
420ab9e00ccSChristoph Hellwig 	struct list_head	ctrls;
421ed754e5dSChristoph Hellwig 	struct list_head	nsheads;
422ab9e00ccSChristoph Hellwig 	char			subnqn[NVMF_NQN_SIZE];
423ab9e00ccSChristoph Hellwig 	char			serial[20];
424ab9e00ccSChristoph Hellwig 	char			model[40];
425ab9e00ccSChristoph Hellwig 	char			firmware_rev[8];
426ab9e00ccSChristoph Hellwig 	u8			cmic;
427954ae166SHannes Reinecke 	enum nvme_subsys_type	subtype;
428ab9e00ccSChristoph Hellwig 	u16			vendor_id;
42981adb863SBart Van Assche 	u16			awupf;	/* 0's based awupf value. */
430ed754e5dSChristoph Hellwig 	struct ida		ns_ida;
43175c10e73SHannes Reinecke #ifdef CONFIG_NVME_MULTIPATH
43275c10e73SHannes Reinecke 	enum nvme_iopolicy	iopolicy;
43375c10e73SHannes Reinecke #endif
434ab9e00ccSChristoph Hellwig };
435ab9e00ccSChristoph Hellwig 
436002fab04SChristoph Hellwig /*
437002fab04SChristoph Hellwig  * Container structure for uniqueue namespace identifiers.
438002fab04SChristoph Hellwig  */
439002fab04SChristoph Hellwig struct nvme_ns_ids {
440002fab04SChristoph Hellwig 	u8	eui64[8];
441002fab04SChristoph Hellwig 	u8	nguid[16];
442002fab04SChristoph Hellwig 	uuid_t	uuid;
44371010c30SNiklas Cassel 	u8	csi;
444002fab04SChristoph Hellwig };
445002fab04SChristoph Hellwig 
446ed754e5dSChristoph Hellwig /*
447ed754e5dSChristoph Hellwig  * Anchor structure for namespaces.  There is one for each namespace in a
448ed754e5dSChristoph Hellwig  * NVMe subsystem that any of our controllers can see, and the namespace
449ed754e5dSChristoph Hellwig  * structure for each controller is chained of it.  For private namespaces
450ed754e5dSChristoph Hellwig  * there is a 1:1 relation to our namespace structures, that is ->list
451ed754e5dSChristoph Hellwig  * only ever has a single entry for private namespaces.
452ed754e5dSChristoph Hellwig  */
453ed754e5dSChristoph Hellwig struct nvme_ns_head {
454ed754e5dSChristoph Hellwig 	struct list_head	list;
455ed754e5dSChristoph Hellwig 	struct srcu_struct      srcu;
456ed754e5dSChristoph Hellwig 	struct nvme_subsystem	*subsys;
457ed754e5dSChristoph Hellwig 	unsigned		ns_id;
458ed754e5dSChristoph Hellwig 	struct nvme_ns_ids	ids;
459ed754e5dSChristoph Hellwig 	struct list_head	entry;
460ed754e5dSChristoph Hellwig 	struct kref		ref;
4610c284db7SKeith Busch 	bool			shared;
462ed754e5dSChristoph Hellwig 	int			instance;
463be93e87eSKeith Busch 	struct nvme_effects_log *effects;
4642637baedSMinwoo Im 
4652637baedSMinwoo Im 	struct cdev		cdev;
4662637baedSMinwoo Im 	struct device		cdev_device;
4672637baedSMinwoo Im 
468f3334447SChristoph Hellwig 	struct gendisk		*disk;
46930897388SMinwoo Im #ifdef CONFIG_NVME_MULTIPATH
470f3334447SChristoph Hellwig 	struct bio_list		requeue_list;
471f3334447SChristoph Hellwig 	spinlock_t		requeue_lock;
472f3334447SChristoph Hellwig 	struct work_struct	requeue_work;
473f3334447SChristoph Hellwig 	struct mutex		lock;
474d8a22f85SAnton Eidelman 	unsigned long		flags;
475d8a22f85SAnton Eidelman #define NVME_NSHEAD_DISK_LIVE	0
476f3334447SChristoph Hellwig 	struct nvme_ns __rcu	*current_path[];
477f3334447SChristoph Hellwig #endif
478ed754e5dSChristoph Hellwig };
479ed754e5dSChristoph Hellwig 
nvme_ns_head_multipath(struct nvme_ns_head * head)48030897388SMinwoo Im static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
48130897388SMinwoo Im {
48230897388SMinwoo Im 	return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
48330897388SMinwoo Im }
48430897388SMinwoo Im 
485ffc89b1dSMax Gurtovoy enum nvme_ns_features {
486ffc89b1dSMax Gurtovoy 	NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
487b29f8485SMax Gurtovoy 	NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
4881b96f862SChristoph Hellwig 	NVME_NS_DEAC,		/* DEAC bit in Write Zeores supported */
489ffc89b1dSMax Gurtovoy };
490ffc89b1dSMax Gurtovoy 
49157dacad5SJay Sternberg struct nvme_ns {
49257dacad5SJay Sternberg 	struct list_head list;
49357dacad5SJay Sternberg 
4941c63dc66SChristoph Hellwig 	struct nvme_ctrl *ctrl;
49557dacad5SJay Sternberg 	struct request_queue *queue;
49657dacad5SJay Sternberg 	struct gendisk *disk;
4970d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
4980d0b660fSChristoph Hellwig 	enum nvme_ana_state ana_state;
4990d0b660fSChristoph Hellwig 	u32 ana_grpid;
5000d0b660fSChristoph Hellwig #endif
501ed754e5dSChristoph Hellwig 	struct list_head siblings;
50257dacad5SJay Sternberg 	struct kref kref;
503ed754e5dSChristoph Hellwig 	struct nvme_ns_head *head;
50457dacad5SJay Sternberg 
50557dacad5SJay Sternberg 	int lba_shift;
50657dacad5SJay Sternberg 	u16 ms;
5074020aad8SKeith Busch 	u16 pi_size;
508f5d11840SJens Axboe 	u16 sgs;
509f5d11840SJens Axboe 	u32 sws;
51057dacad5SJay Sternberg 	u8 pi_type;
5114020aad8SKeith Busch 	u8 guard_type;
512240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED
513240e6ee2SKeith Busch 	u64 zsze;
514240e6ee2SKeith Busch #endif
515ffc89b1dSMax Gurtovoy 	unsigned long features;
516646017a6SKeith Busch 	unsigned long flags;
517646017a6SKeith Busch #define NVME_NS_REMOVING	0
5180d0b660fSChristoph Hellwig #define NVME_NS_ANA_PENDING	2
5192f4c9ba2SJavier González #define NVME_NS_FORCE_RO	3
520e7d65803SHannes Reinecke #define NVME_NS_READY		4
521b9e03857SThomas Tai 
5222637baedSMinwoo Im 	struct cdev		cdev;
5232637baedSMinwoo Im 	struct device		cdev_device;
5242637baedSMinwoo Im 
525b9e03857SThomas Tai 	struct nvme_fault_inject fault_inject;
526b9e03857SThomas Tai 
52757dacad5SJay Sternberg };
52857dacad5SJay Sternberg 
5294d2ce688SJames Smart /* NVMe ns supports metadata actions by the controller (generate/strip) */
nvme_ns_has_pi(struct nvme_ns * ns)5304d2ce688SJames Smart static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
5314d2ce688SJames Smart {
5324020aad8SKeith Busch 	return ns->pi_type && ns->ms == ns->pi_size;
5334d2ce688SJames Smart }
5344d2ce688SJames Smart 
5351c63dc66SChristoph Hellwig struct nvme_ctrl_ops {
5361a353d85SMing Lin 	const char *name;
537e439bb12SSagi Grimberg 	struct module *module;
538d3d5b87dSChristoph Hellwig 	unsigned int flags;
539d3d5b87dSChristoph Hellwig #define NVME_F_FABRICS			(1 << 0)
540c81bfba9SChristoph Hellwig #define NVME_F_METADATA_SUPPORTED	(1 << 1)
541db45e1a5SChristoph Hellwig #define NVME_F_BLOCKING			(1 << 2)
542db45e1a5SChristoph Hellwig 
54386adbf0cSChristoph Hellwig 	const struct attribute_group **dev_attr_groups;
5441c63dc66SChristoph Hellwig 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
5455fd4ce1bSChristoph Hellwig 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
5467fd8930fSChristoph Hellwig 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
5471673f1f0SChristoph Hellwig 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
548ad22c355SKeith Busch 	void (*submit_async_event)(struct nvme_ctrl *ctrl);
549c5017e85SChristoph Hellwig 	void (*delete_ctrl)(struct nvme_ctrl *ctrl);
550f7f70f4aSRuozhu Li 	void (*stop_ctrl)(struct nvme_ctrl *ctrl);
5511a353d85SMing Lin 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
5522f0dad17SKeith Busch 	void (*print_device_info)(struct nvme_ctrl *ctrl);
5532f859441SLogan Gunthorpe 	bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
55457dacad5SJay Sternberg };
55557dacad5SJay Sternberg 
556e7006de6SSagi Grimberg /*
557e7006de6SSagi Grimberg  * nvme command_id is constructed as such:
558e7006de6SSagi Grimberg  * | xxxx | xxxxxxxxxxxx |
559e7006de6SSagi Grimberg  *   gen    request tag
560e7006de6SSagi Grimberg  */
561e7006de6SSagi Grimberg #define nvme_genctr_mask(gen)			(gen & 0xf)
562e7006de6SSagi Grimberg #define nvme_cid_install_genctr(gen)		(nvme_genctr_mask(gen) << 12)
563e7006de6SSagi Grimberg #define nvme_genctr_from_cid(cid)		((cid & 0xf000) >> 12)
564e7006de6SSagi Grimberg #define nvme_tag_from_cid(cid)			(cid & 0xfff)
565e7006de6SSagi Grimberg 
nvme_cid(struct request * rq)566e7006de6SSagi Grimberg static inline u16 nvme_cid(struct request *rq)
567e7006de6SSagi Grimberg {
568e7006de6SSagi Grimberg 	return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
569e7006de6SSagi Grimberg }
570e7006de6SSagi Grimberg 
nvme_find_rq(struct blk_mq_tags * tags,u16 command_id)571e7006de6SSagi Grimberg static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
572e7006de6SSagi Grimberg 		u16 command_id)
573e7006de6SSagi Grimberg {
574e7006de6SSagi Grimberg 	u8 genctr = nvme_genctr_from_cid(command_id);
575e7006de6SSagi Grimberg 	u16 tag = nvme_tag_from_cid(command_id);
576e7006de6SSagi Grimberg 	struct request *rq;
577e7006de6SSagi Grimberg 
578e7006de6SSagi Grimberg 	rq = blk_mq_tag_to_rq(tags, tag);
579e7006de6SSagi Grimberg 	if (unlikely(!rq)) {
580e7006de6SSagi Grimberg 		pr_err("could not locate request for tag %#x\n",
581e7006de6SSagi Grimberg 			tag);
582e7006de6SSagi Grimberg 		return NULL;
583e7006de6SSagi Grimberg 	}
584e7006de6SSagi Grimberg 	if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
585e7006de6SSagi Grimberg 		dev_err(nvme_req(rq)->ctrl->device,
586e7006de6SSagi Grimberg 			"request %#x genctr mismatch (got %#x expected %#x)\n",
587e7006de6SSagi Grimberg 			tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
588e7006de6SSagi Grimberg 		return NULL;
589e7006de6SSagi Grimberg 	}
590e7006de6SSagi Grimberg 	return rq;
591e7006de6SSagi Grimberg }
592e7006de6SSagi Grimberg 
nvme_cid_to_rq(struct blk_mq_tags * tags,u16 command_id)593e7006de6SSagi Grimberg static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
594e7006de6SSagi Grimberg                 u16 command_id)
595e7006de6SSagi Grimberg {
596e7006de6SSagi Grimberg 	return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
597e7006de6SSagi Grimberg }
598e7006de6SSagi Grimberg 
5992f0dad17SKeith Busch /*
6002f0dad17SKeith Busch  * Return the length of the string without the space padding
6012f0dad17SKeith Busch  */
nvme_strlen(char * s,int len)6022f0dad17SKeith Busch static inline int nvme_strlen(char *s, int len)
6032f0dad17SKeith Busch {
6042f0dad17SKeith Busch 	while (s[len - 1] == ' ')
6052f0dad17SKeith Busch 		len--;
6062f0dad17SKeith Busch 	return len;
6072f0dad17SKeith Busch }
6082f0dad17SKeith Busch 
nvme_print_device_info(struct nvme_ctrl * ctrl)6092f0dad17SKeith Busch static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
6102f0dad17SKeith Busch {
6112f0dad17SKeith Busch 	struct nvme_subsystem *subsys = ctrl->subsys;
6122f0dad17SKeith Busch 
6132f0dad17SKeith Busch 	if (ctrl->ops->print_device_info) {
6142f0dad17SKeith Busch 		ctrl->ops->print_device_info(ctrl);
6152f0dad17SKeith Busch 		return;
6162f0dad17SKeith Busch 	}
6172f0dad17SKeith Busch 
6182f0dad17SKeith Busch 	dev_err(ctrl->device,
6192f0dad17SKeith Busch 		"VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
6202f0dad17SKeith Busch 		nvme_strlen(subsys->model, sizeof(subsys->model)),
6212f0dad17SKeith Busch 		subsys->model, nvme_strlen(subsys->firmware_rev,
6222f0dad17SKeith Busch 					   sizeof(subsys->firmware_rev)),
6232f0dad17SKeith Busch 		subsys->firmware_rev);
6242f0dad17SKeith Busch }
6252f0dad17SKeith Busch 
626b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
627a3646451SAkinobu Mita void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
628a3646451SAkinobu Mita 			    const char *dev_name);
629a3646451SAkinobu Mita void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
630b9e03857SThomas Tai void nvme_should_fail(struct request *req);
631b9e03857SThomas Tai #else
nvme_fault_inject_init(struct nvme_fault_inject * fault_inj,const char * dev_name)632a3646451SAkinobu Mita static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
633a3646451SAkinobu Mita 					  const char *dev_name)
634a3646451SAkinobu Mita {
635a3646451SAkinobu Mita }
nvme_fault_inject_fini(struct nvme_fault_inject * fault_inj)636a3646451SAkinobu Mita static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
637a3646451SAkinobu Mita {
638a3646451SAkinobu Mita }
nvme_should_fail(struct request * req)639b9e03857SThomas Tai static inline void nvme_should_fail(struct request *req) {}
640b9e03857SThomas Tai #endif
641b9e03857SThomas Tai 
6421e866afdSKeith Busch bool nvme_wait_reset(struct nvme_ctrl *ctrl);
6431e866afdSKeith Busch int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
6441e866afdSKeith Busch 
nvme_reset_subsystem(struct nvme_ctrl * ctrl)645f3ca80fcSChristoph Hellwig static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
646f3ca80fcSChristoph Hellwig {
6471e866afdSKeith Busch 	int ret;
6481e866afdSKeith Busch 
649f3ca80fcSChristoph Hellwig 	if (!ctrl->subsystem)
650f3ca80fcSChristoph Hellwig 		return -ENOTTY;
6511e866afdSKeith Busch 	if (!nvme_wait_reset(ctrl))
6521e866afdSKeith Busch 		return -EBUSY;
6531e866afdSKeith Busch 
6541e866afdSKeith Busch 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
6551e866afdSKeith Busch 	if (ret)
6561e866afdSKeith Busch 		return ret;
6571e866afdSKeith Busch 
6581e866afdSKeith Busch 	return nvme_try_sched_reset(ctrl);
659f3ca80fcSChristoph Hellwig }
660f3ca80fcSChristoph Hellwig 
661314d48ddSDamien Le Moal /*
662314d48ddSDamien Le Moal  * Convert a 512B sector number to a device logical block number.
663314d48ddSDamien Le Moal  */
nvme_sect_to_lba(struct nvme_ns * ns,sector_t sector)664314d48ddSDamien Le Moal static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
66557dacad5SJay Sternberg {
666314d48ddSDamien Le Moal 	return sector >> (ns->lba_shift - SECTOR_SHIFT);
66757dacad5SJay Sternberg }
66857dacad5SJay Sternberg 
669e08f2ae8SDamien Le Moal /*
670e08f2ae8SDamien Le Moal  * Convert a device logical block number to a 512B sector number.
671e08f2ae8SDamien Le Moal  */
nvme_lba_to_sect(struct nvme_ns * ns,u64 lba)672e08f2ae8SDamien Le Moal static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
673e08f2ae8SDamien Le Moal {
674e08f2ae8SDamien Le Moal 	return lba << (ns->lba_shift - SECTOR_SHIFT);
67557dacad5SJay Sternberg }
67657dacad5SJay Sternberg 
67771fb90ebSKeith Busch /*
67871fb90ebSKeith Busch  * Convert byte length to nvme's 0-based num dwords
67971fb90ebSKeith Busch  */
nvme_bytes_to_numd(size_t len)68071fb90ebSKeith Busch static inline u32 nvme_bytes_to_numd(size_t len)
68171fb90ebSKeith Busch {
68271fb90ebSKeith Busch 	return (len >> 2) - 1;
68371fb90ebSKeith Busch }
68471fb90ebSKeith Busch 
nvme_is_ana_error(u16 status)6855ddaabe8SChristoph Hellwig static inline bool nvme_is_ana_error(u16 status)
6865ddaabe8SChristoph Hellwig {
6875ddaabe8SChristoph Hellwig 	switch (status & 0x7ff) {
6885ddaabe8SChristoph Hellwig 	case NVME_SC_ANA_TRANSITION:
6895ddaabe8SChristoph Hellwig 	case NVME_SC_ANA_INACCESSIBLE:
6905ddaabe8SChristoph Hellwig 	case NVME_SC_ANA_PERSISTENT_LOSS:
6915ddaabe8SChristoph Hellwig 		return true;
6925ddaabe8SChristoph Hellwig 	default:
6935ddaabe8SChristoph Hellwig 		return false;
6945ddaabe8SChristoph Hellwig 	}
6955ddaabe8SChristoph Hellwig }
6965ddaabe8SChristoph Hellwig 
nvme_is_path_error(u16 status)6975ddaabe8SChristoph Hellwig static inline bool nvme_is_path_error(u16 status)
6985ddaabe8SChristoph Hellwig {
6991e41f3bdSChristoph Hellwig 	/* check for a status code type of 'path related status' */
7001e41f3bdSChristoph Hellwig 	return (status & 0x700) == 0x300;
7015ddaabe8SChristoph Hellwig }
7025ddaabe8SChristoph Hellwig 
7032eb81a33SChristoph Hellwig /*
7042eb81a33SChristoph Hellwig  * Fill in the status and result information from the CQE, and then figure out
7052eb81a33SChristoph Hellwig  * if blk-mq will need to use IPI magic to complete the request, and if yes do
7062eb81a33SChristoph Hellwig  * so.  If not let the caller complete the request without an indirect function
7072eb81a33SChristoph Hellwig  * call.
7082eb81a33SChristoph Hellwig  */
nvme_try_complete_req(struct request * req,__le16 status,union nvme_result result)7092eb81a33SChristoph Hellwig static inline bool nvme_try_complete_req(struct request *req, __le16 status,
71027fa9bc5SChristoph Hellwig 		union nvme_result result)
71115a190f7SChristoph Hellwig {
71227fa9bc5SChristoph Hellwig 	struct nvme_request *rq = nvme_req(req);
713e4fdb2b1SKeith Busch 	struct nvme_ctrl *ctrl = rq->ctrl;
714e4fdb2b1SKeith Busch 
715e4fdb2b1SKeith Busch 	if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
716e4fdb2b1SKeith Busch 		rq->genctr++;
71727fa9bc5SChristoph Hellwig 
71827fa9bc5SChristoph Hellwig 	rq->status = le16_to_cpu(status) >> 1;
71927fa9bc5SChristoph Hellwig 	rq->result = result;
720b9e03857SThomas Tai 	/* inject error when permitted by fault injection framework */
721b9e03857SThomas Tai 	nvme_should_fail(req);
722ff029451SChristoph Hellwig 	if (unlikely(blk_should_fake_timeout(req->q)))
723ff029451SChristoph Hellwig 		return true;
724ff029451SChristoph Hellwig 	return blk_mq_complete_request_remote(req);
72515a190f7SChristoph Hellwig }
72615a190f7SChristoph Hellwig 
nvme_get_ctrl(struct nvme_ctrl * ctrl)727d22524a4SChristoph Hellwig static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
728d22524a4SChristoph Hellwig {
729d22524a4SChristoph Hellwig 	get_device(ctrl->device);
730d22524a4SChristoph Hellwig }
731d22524a4SChristoph Hellwig 
nvme_put_ctrl(struct nvme_ctrl * ctrl)732d22524a4SChristoph Hellwig static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
733d22524a4SChristoph Hellwig {
734d22524a4SChristoph Hellwig 	put_device(ctrl->device);
735d22524a4SChristoph Hellwig }
736d22524a4SChristoph Hellwig 
nvme_is_aen_req(u16 qid,__u16 command_id)73758a8df67SIsrael Rukshin static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
73858a8df67SIsrael Rukshin {
739e7006de6SSagi Grimberg 	return !qid &&
740e7006de6SSagi Grimberg 		nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
74158a8df67SIsrael Rukshin }
74258a8df67SIsrael Rukshin 
743b6eaa53fSNilay Shroff /*
744b6eaa53fSNilay Shroff  * Returns true for sink states that can't ever transition back to live.
745b6eaa53fSNilay Shroff  */
nvme_state_terminal(struct nvme_ctrl * ctrl)746b6eaa53fSNilay Shroff static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
747b6eaa53fSNilay Shroff {
748b6eaa53fSNilay Shroff 	switch (nvme_ctrl_state(ctrl)) {
749b6eaa53fSNilay Shroff 	case NVME_CTRL_NEW:
750b6eaa53fSNilay Shroff 	case NVME_CTRL_LIVE:
751b6eaa53fSNilay Shroff 	case NVME_CTRL_RESETTING:
752b6eaa53fSNilay Shroff 	case NVME_CTRL_CONNECTING:
753b6eaa53fSNilay Shroff 		return false;
754b6eaa53fSNilay Shroff 	case NVME_CTRL_DELETING:
755b6eaa53fSNilay Shroff 	case NVME_CTRL_DELETING_NOIO:
756b6eaa53fSNilay Shroff 	case NVME_CTRL_DEAD:
757b6eaa53fSNilay Shroff 		return true;
758b6eaa53fSNilay Shroff 	default:
759b6eaa53fSNilay Shroff 		WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
760b6eaa53fSNilay Shroff 		return true;
761b6eaa53fSNilay Shroff 	}
762b6eaa53fSNilay Shroff }
763b6eaa53fSNilay Shroff 
76436989c68SKeith Busch void nvme_end_req(struct request *req);
76577f02a7aSChristoph Hellwig void nvme_complete_rq(struct request *req);
766c234a653SJens Axboe void nvme_complete_batch_req(struct request *req);
767c234a653SJens Axboe 
nvme_complete_batch(struct io_comp_batch * iob,void (* fn)(struct request * rq))768c234a653SJens Axboe static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
769c234a653SJens Axboe 						void (*fn)(struct request *rq))
770c234a653SJens Axboe {
771c234a653SJens Axboe 	struct request *req;
772c234a653SJens Axboe 
773c234a653SJens Axboe 	rq_list_for_each(&iob->req_list, req) {
774c234a653SJens Axboe 		fn(req);
775c234a653SJens Axboe 		nvme_complete_batch_req(req);
776c234a653SJens Axboe 	}
777c234a653SJens Axboe 	blk_mq_end_request_batch(iob);
778c234a653SJens Axboe }
779c234a653SJens Axboe 
780dda3248eSChao Leng blk_status_t nvme_host_path_error(struct request *req);
7812dd6532eSJohn Garry bool nvme_cancel_request(struct request *req, void *data);
78225479069SChao Leng void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
78325479069SChao Leng void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
784bb8d261eSChristoph Hellwig bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
785bb8d261eSChristoph Hellwig 		enum nvme_ctrl_state new_state);
786285b6e9bSChristoph Hellwig int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
787c0f2f45bSSagi Grimberg int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
788f3ca80fcSChristoph Hellwig int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
789f3ca80fcSChristoph Hellwig 		const struct nvme_ctrl_ops *ops, unsigned long quirks);
79053029b04SKeith Busch void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
791d09f2b45SSagi Grimberg void nvme_start_ctrl(struct nvme_ctrl *ctrl);
792d09f2b45SSagi Grimberg void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
79394cc781fSChristoph Hellwig int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
794fe60e8c5SChristoph Hellwig int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
795db45e1a5SChristoph Hellwig 		const struct blk_mq_ops *ops, unsigned int cmd_size);
796fe60e8c5SChristoph Hellwig void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
797fe60e8c5SChristoph Hellwig int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
798db45e1a5SChristoph Hellwig 		const struct blk_mq_ops *ops, unsigned int nr_maps,
799db45e1a5SChristoph Hellwig 		unsigned int cmd_size);
800fe60e8c5SChristoph Hellwig void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
8015bae7f73SChristoph Hellwig 
8025bae7f73SChristoph Hellwig void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
8031673f1f0SChristoph Hellwig 
8047bf58533SChristoph Hellwig void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
805287a63ebSChristoph Hellwig 		volatile union nvme_result *res);
806f866fc42SChristoph Hellwig 
8079f27bd70SChristoph Hellwig void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
8089f27bd70SChristoph Hellwig void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
8099f27bd70SChristoph Hellwig void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
8109f27bd70SChristoph Hellwig void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
811cd50f9b2SChristoph Hellwig void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
812d6135c3aSKeith Busch void nvme_sync_queues(struct nvme_ctrl *ctrl);
81304800fbfSChao Leng void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
814302ad8ccSKeith Busch void nvme_unfreeze(struct nvme_ctrl *ctrl);
815302ad8ccSKeith Busch void nvme_wait_freeze(struct nvme_ctrl *ctrl);
8167cf0d7c0SSagi Grimberg int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
817302ad8ccSKeith Busch void nvme_start_freeze(struct nvme_ctrl *ctrl);
818363c9aacSSagi Grimberg 
nvme_req_op(struct nvme_command * cmd)819f9ed86dcSBart Van Assche static inline enum req_op nvme_req_op(struct nvme_command *cmd)
820e559398fSChristoph Hellwig {
821e559398fSChristoph Hellwig 	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
822e559398fSChristoph Hellwig }
823e559398fSChristoph Hellwig 
824eb71f435SChristoph Hellwig #define NVME_QID_ANY -1
825e559398fSChristoph Hellwig void nvme_init_request(struct request *req, struct nvme_command *cmd);
826f7f1fc36SMax Gurtovoy void nvme_cleanup_cmd(struct request *req);
827f4b9e6c9SKeith Busch blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
828a9715744STao Chiu blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
829a9715744STao Chiu 		struct request *req);
830a9715744STao Chiu bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
831a9715744STao Chiu 		bool queue_live);
832a9715744STao Chiu 
nvme_check_ready(struct nvme_ctrl * ctrl,struct request * rq,bool queue_live)833a9715744STao Chiu static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
834a9715744STao Chiu 		bool queue_live)
835a9715744STao Chiu {
836a9715744STao Chiu 	if (likely(ctrl->state == NVME_CTRL_LIVE))
837a9715744STao Chiu 		return true;
838a9715744STao Chiu 	if (ctrl->ops->flags & NVME_F_FABRICS &&
839a9715744STao Chiu 	    ctrl->state == NVME_CTRL_DELETING)
8408b77fa6fSRuozhu Li 		return queue_live;
841a9715744STao Chiu 	return __nvme_check_ready(ctrl, rq, queue_live);
842a9715744STao Chiu }
8435974ea7cSSungup Moon 
8445974ea7cSSungup Moon /*
8455974ea7cSSungup Moon  * NSID shall be unique for all shared namespaces, or if at least one of the
8465974ea7cSSungup Moon  * following conditions is met:
8475974ea7cSSungup Moon  *   1. Namespace Management is supported by the controller
8485974ea7cSSungup Moon  *   2. ANA is supported by the controller
8495974ea7cSSungup Moon  *   3. NVM Set are supported by the controller
8505974ea7cSSungup Moon  *
8515974ea7cSSungup Moon  * In other case, private namespace are not required to report a unique NSID.
8525974ea7cSSungup Moon  */
nvme_is_unique_nsid(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)8535974ea7cSSungup Moon static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
8545974ea7cSSungup Moon 		struct nvme_ns_head *head)
8555974ea7cSSungup Moon {
8565974ea7cSSungup Moon 	return head->shared ||
8575974ea7cSSungup Moon 		(ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
8585974ea7cSSungup Moon 		(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
8595974ea7cSSungup Moon 		(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
8605974ea7cSSungup Moon }
8615974ea7cSSungup Moon 
86257dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
86357dacad5SJay Sternberg 		void *buf, unsigned bufflen);
86457dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
865d49187e9SChristoph Hellwig 		union nvme_result *result, void *buffer, unsigned bufflen,
8666b46fa02SChaitanya Kulkarni 		int qid, int at_head,
867be42a33bSKeith Busch 		blk_mq_req_flags_t flags);
8681a87ee65SKeith Busch int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
8691a87ee65SKeith Busch 		      unsigned int dword11, void *buffer, size_t buflen,
8701a87ee65SKeith Busch 		      u32 *result);
8711a87ee65SKeith Busch int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
8721a87ee65SKeith Busch 		      unsigned int dword11, void *buffer, size_t buflen,
8731a87ee65SKeith Busch 		      u32 *result);
8749a0be7abSChristoph Hellwig int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
875038bd4cbSSagi Grimberg void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
876d86c4d8eSChristoph Hellwig int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
8772405252aSChristoph Hellwig int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
878c5017e85SChristoph Hellwig int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
8792405252aSChristoph Hellwig void nvme_queue_scan(struct nvme_ctrl *ctrl);
880be93e87eSKeith Busch int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
8810e98719bSChristoph Hellwig 		void *log, size_t size, u64 offset);
8821496bd49SChristoph Hellwig bool nvme_tryget_ns_head(struct nvme_ns_head *head);
8831496bd49SChristoph Hellwig void nvme_put_ns_head(struct nvme_ns_head *head);
8842637baedSMinwoo Im int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
8852637baedSMinwoo Im 		const struct file_operations *fops, struct module *owner);
8862637baedSMinwoo Im void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
88705bdb996SChristoph Hellwig int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
8882405252aSChristoph Hellwig 		unsigned int cmd, unsigned long arg);
8892637baedSMinwoo Im long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
89005bdb996SChristoph Hellwig int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
8912405252aSChristoph Hellwig 		unsigned int cmd, unsigned long arg);
8922637baedSMinwoo Im long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
8932637baedSMinwoo Im 		unsigned long arg);
8942405252aSChristoph Hellwig long nvme_dev_ioctl(struct file *file, unsigned int cmd,
8952405252aSChristoph Hellwig 		unsigned long arg);
896de97fcb3SJens Axboe int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
897de97fcb3SJens Axboe 		struct io_comp_batch *iob, unsigned int poll_flags);
898456cba38SKanchan Joshi int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
899456cba38SKanchan Joshi 		unsigned int issue_flags);
900456cba38SKanchan Joshi int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
901456cba38SKanchan Joshi 		unsigned int issue_flags);
9021496bd49SChristoph Hellwig int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
90358e5bdebSKanchan Joshi int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
904d558fb51SMatias Bjørling 
90533b14f67SHannes Reinecke extern const struct attribute_group *nvme_ns_id_attr_groups[];
9061496bd49SChristoph Hellwig extern const struct pr_ops nvme_pr_ops;
90732acab31SChristoph Hellwig extern const struct block_device_operations nvme_ns_head_ops;
90886adbf0cSChristoph Hellwig extern const struct attribute_group nvme_dev_attrs_group;
909942e21c0SMax Gurtovoy extern const struct attribute_group *nvme_subsys_attrs_groups[];
910942e21c0SMax Gurtovoy extern const struct attribute_group *nvme_dev_attr_groups[];
911942e21c0SMax Gurtovoy extern const struct block_device_operations nvme_bdev_ops;
91232acab31SChristoph Hellwig 
913942e21c0SMax Gurtovoy void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
914f1cf35e1SChristoph Hellwig struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
91532acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)91666b20ac0SMarta Rybczynska static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
91766b20ac0SMarta Rybczynska {
91866b20ac0SMarta Rybczynska 	return ctrl->ana_log_buf != NULL;
91966b20ac0SMarta Rybczynska }
92066b20ac0SMarta Rybczynska 
921b9156daeSSagi Grimberg void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
922b9156daeSSagi Grimberg void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
923b9156daeSSagi Grimberg void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
924e3d34794SHannes Reinecke void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
9255ddaabe8SChristoph Hellwig void nvme_failover_req(struct request *req);
92632acab31SChristoph Hellwig void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
92732acab31SChristoph Hellwig int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
928c13cf14fSJoel Granados void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
92932acab31SChristoph Hellwig void nvme_mpath_remove_disk(struct nvme_ns_head *head);
9305e1f6899SChristoph Hellwig int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
9315e1f6899SChristoph Hellwig void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
932a4a6f3c8SAnton Eidelman void nvme_mpath_update(struct nvme_ctrl *ctrl);
9330d0b660fSChristoph Hellwig void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
9340d0b660fSChristoph Hellwig void nvme_mpath_stop(struct nvme_ctrl *ctrl);
9350157ec8dSSagi Grimberg bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
936e7d65803SHannes Reinecke void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
9370157ec8dSSagi Grimberg void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
9385396fdacSHannes Reinecke void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
939d4d957b5SSagi Grimberg void nvme_mpath_start_request(struct request *rq);
940d4d957b5SSagi Grimberg void nvme_mpath_end_request(struct request *rq);
941479a322fSSagi Grimberg 
nvme_trace_bio_complete(struct request * req)9422b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req)
94335fe0d12SHannes Reinecke {
94435fe0d12SHannes Reinecke 	struct nvme_ns *ns = req->q->queuedata;
94535fe0d12SHannes Reinecke 
9463659fb5aSYanjun Zhang 	if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
947d24de76aSChristoph Hellwig 		trace_block_bio_complete(ns->head->disk->queue, req->bio);
94835fe0d12SHannes Reinecke }
94935fe0d12SHannes Reinecke 
950b739e137SChristoph Hellwig extern bool multipath;
9510d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_grpid;
9520d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_state;
95375c10e73SHannes Reinecke extern struct device_attribute subsys_attr_iopolicy;
9540d0b660fSChristoph Hellwig 
95532acab31SChristoph Hellwig #else
956b739e137SChristoph Hellwig #define multipath false
nvme_ctrl_use_ana(struct nvme_ctrl * ctrl)9570d0b660fSChristoph Hellwig static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
9580d0b660fSChristoph Hellwig {
9590d0b660fSChristoph Hellwig 	return false;
9600d0b660fSChristoph Hellwig }
nvme_failover_req(struct request * req)9615ddaabe8SChristoph Hellwig static inline void nvme_failover_req(struct request *req)
96232acab31SChristoph Hellwig {
96332acab31SChristoph Hellwig }
nvme_kick_requeue_lists(struct nvme_ctrl * ctrl)96432acab31SChristoph Hellwig static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
96532acab31SChristoph Hellwig {
96632acab31SChristoph Hellwig }
nvme_mpath_alloc_disk(struct nvme_ctrl * ctrl,struct nvme_ns_head * head)96732acab31SChristoph Hellwig static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
96832acab31SChristoph Hellwig 		struct nvme_ns_head *head)
96932acab31SChristoph Hellwig {
97032acab31SChristoph Hellwig 	return 0;
97132acab31SChristoph Hellwig }
nvme_mpath_add_disk(struct nvme_ns * ns,__le32 anagrpid)972c13cf14fSJoel Granados static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
97332acab31SChristoph Hellwig {
97432acab31SChristoph Hellwig }
nvme_mpath_remove_disk(struct nvme_ns_head * head)97532acab31SChristoph Hellwig static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
97632acab31SChristoph Hellwig {
97732acab31SChristoph Hellwig }
nvme_mpath_clear_current_path(struct nvme_ns * ns)9780157ec8dSSagi Grimberg static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
9790157ec8dSSagi Grimberg {
9800157ec8dSSagi Grimberg 	return false;
9810157ec8dSSagi Grimberg }
nvme_mpath_revalidate_paths(struct nvme_ns * ns)982e7d65803SHannes Reinecke static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
983e7d65803SHannes Reinecke {
984e7d65803SHannes Reinecke }
nvme_mpath_clear_ctrl_paths(struct nvme_ctrl * ctrl)9850157ec8dSSagi Grimberg static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
98632acab31SChristoph Hellwig {
98732acab31SChristoph Hellwig }
nvme_mpath_shutdown_disk(struct nvme_ns_head * head)9885396fdacSHannes Reinecke static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
989479a322fSSagi Grimberg {
990479a322fSSagi Grimberg }
nvme_trace_bio_complete(struct request * req)9912b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req)
99235fe0d12SHannes Reinecke {
99335fe0d12SHannes Reinecke }
nvme_mpath_init_ctrl(struct nvme_ctrl * ctrl)9945e1f6899SChristoph Hellwig static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
9955e1f6899SChristoph Hellwig {
9965e1f6899SChristoph Hellwig }
nvme_mpath_init_identify(struct nvme_ctrl * ctrl,struct nvme_id_ctrl * id)9975e1f6899SChristoph Hellwig static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
9980d0b660fSChristoph Hellwig 		struct nvme_id_ctrl *id)
9990d0b660fSChristoph Hellwig {
10002bd64307SKanchan Joshi 	if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
100114a1336eSChristoph Hellwig 		dev_warn(ctrl->device,
100214a1336eSChristoph Hellwig "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
10030d0b660fSChristoph Hellwig 	return 0;
10040d0b660fSChristoph Hellwig }
nvme_mpath_update(struct nvme_ctrl * ctrl)1005a4a6f3c8SAnton Eidelman static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
1006a4a6f3c8SAnton Eidelman {
1007a4a6f3c8SAnton Eidelman }
nvme_mpath_uninit(struct nvme_ctrl * ctrl)10080d0b660fSChristoph Hellwig static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
10090d0b660fSChristoph Hellwig {
10100d0b660fSChristoph Hellwig }
nvme_mpath_stop(struct nvme_ctrl * ctrl)10110d0b660fSChristoph Hellwig static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
10120d0b660fSChristoph Hellwig {
10130d0b660fSChristoph Hellwig }
nvme_mpath_unfreeze(struct nvme_subsystem * subsys)1014b9156daeSSagi Grimberg static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
1015b9156daeSSagi Grimberg {
1016b9156daeSSagi Grimberg }
nvme_mpath_wait_freeze(struct nvme_subsystem * subsys)1017b9156daeSSagi Grimberg static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
1018b9156daeSSagi Grimberg {
1019b9156daeSSagi Grimberg }
nvme_mpath_start_freeze(struct nvme_subsystem * subsys)1020b9156daeSSagi Grimberg static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
1021b9156daeSSagi Grimberg {
1022b9156daeSSagi Grimberg }
nvme_mpath_default_iopolicy(struct nvme_subsystem * subsys)1023e3d34794SHannes Reinecke static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
1024e3d34794SHannes Reinecke {
1025e3d34794SHannes Reinecke }
nvme_mpath_start_request(struct request * rq)1026d4d957b5SSagi Grimberg static inline void nvme_mpath_start_request(struct request *rq)
1027d4d957b5SSagi Grimberg {
1028d4d957b5SSagi Grimberg }
nvme_mpath_end_request(struct request * rq)1029d4d957b5SSagi Grimberg static inline void nvme_mpath_end_request(struct request *rq)
1030d4d957b5SSagi Grimberg {
1031d4d957b5SSagi Grimberg }
103232acab31SChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */
103332acab31SChristoph Hellwig 
10347fad20ddSChristoph Hellwig int nvme_revalidate_zones(struct nvme_ns *ns);
10358b4fb0f9SChristoph Hellwig int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
10368b4fb0f9SChristoph Hellwig 		unsigned int nr_zones, report_zones_cb cb, void *data);
1037240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED
1038d525c3c0SChristoph Hellwig int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
1039240e6ee2SKeith Busch blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
1040240e6ee2SKeith Busch 				       struct nvme_command *cmnd,
1041240e6ee2SKeith Busch 				       enum nvme_zone_mgmt_action action);
1042240e6ee2SKeith Busch #else
nvme_setup_zone_mgmt_send(struct nvme_ns * ns,struct request * req,struct nvme_command * cmnd,enum nvme_zone_mgmt_action action)1043240e6ee2SKeith Busch static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
1044240e6ee2SKeith Busch 		struct request *req, struct nvme_command *cmnd,
1045240e6ee2SKeith Busch 		enum nvme_zone_mgmt_action action)
1046240e6ee2SKeith Busch {
1047240e6ee2SKeith Busch 	return BLK_STS_NOTSUPP;
1048240e6ee2SKeith Busch }
1049240e6ee2SKeith Busch 
nvme_update_zone_info(struct nvme_ns * ns,unsigned lbaf)1050d525c3c0SChristoph Hellwig static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
1051240e6ee2SKeith Busch {
1052240e6ee2SKeith Busch 	dev_warn(ns->ctrl->device,
1053240e6ee2SKeith Busch 		 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
1054240e6ee2SKeith Busch 	return -EPROTONOSUPPORT;
1055240e6ee2SKeith Busch }
1056240e6ee2SKeith Busch #endif
1057240e6ee2SKeith Busch 
nvme_get_ns_from_dev(struct device * dev)105840267efdSSimon A. F. Lund static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
105940267efdSSimon A. F. Lund {
106040267efdSSimon A. F. Lund 	return dev_to_disk(dev)->private_data;
106140267efdSSimon A. F. Lund }
1062ca064085SMatias Bjørling 
1063400b6a7bSGuenter Roeck #ifdef CONFIG_NVME_HWMON
106459e330f8SKeith Busch int nvme_hwmon_init(struct nvme_ctrl *ctrl);
1065ed7770f6SHannes Reinecke void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
1066400b6a7bSGuenter Roeck #else
nvme_hwmon_init(struct nvme_ctrl * ctrl)106759e330f8SKeith Busch static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
106859e330f8SKeith Busch {
106959e330f8SKeith Busch 	return 0;
107059e330f8SKeith Busch }
1071ed7770f6SHannes Reinecke 
nvme_hwmon_exit(struct nvme_ctrl * ctrl)1072ed7770f6SHannes Reinecke static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
1073ed7770f6SHannes Reinecke {
1074ed7770f6SHannes Reinecke }
1075400b6a7bSGuenter Roeck #endif
1076400b6a7bSGuenter Roeck 
nvme_start_request(struct request * rq)10776887fc64SSagi Grimberg static inline void nvme_start_request(struct request *rq)
10786887fc64SSagi Grimberg {
1079d4d957b5SSagi Grimberg 	if (rq->cmd_flags & REQ_NVME_MPATH)
1080d4d957b5SSagi Grimberg 		nvme_mpath_start_request(rq);
10816887fc64SSagi Grimberg 	blk_mq_start_request(rq);
10826887fc64SSagi Grimberg }
10836887fc64SSagi Grimberg 
nvme_ctrl_sgl_supported(struct nvme_ctrl * ctrl)108473eefc27SChaitanya Kulkarni static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
108573eefc27SChaitanya Kulkarni {
108673eefc27SChaitanya Kulkarni 	return ctrl->sgls & ((1 << 0) | (1 << 1));
108773eefc27SChaitanya Kulkarni }
108873eefc27SChaitanya Kulkarni 
1089f50fff73SHannes Reinecke #ifdef CONFIG_NVME_AUTH
1090e481fc0aSSagi Grimberg int __init nvme_init_auth(void);
1091e481fc0aSSagi Grimberg void __exit nvme_exit_auth(void);
1092193a8c7eSSagi Grimberg int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1093f50fff73SHannes Reinecke void nvme_auth_stop(struct nvme_ctrl *ctrl);
1094f50fff73SHannes Reinecke int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1095f50fff73SHannes Reinecke int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1096f50fff73SHannes Reinecke void nvme_auth_free(struct nvme_ctrl *ctrl);
1097f50fff73SHannes Reinecke #else
nvme_auth_init_ctrl(struct nvme_ctrl * ctrl)1098193a8c7eSSagi Grimberg static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1099193a8c7eSSagi Grimberg {
1100193a8c7eSSagi Grimberg 	return 0;
1101193a8c7eSSagi Grimberg }
nvme_init_auth(void)1102e481fc0aSSagi Grimberg static inline int __init nvme_init_auth(void)
1103e481fc0aSSagi Grimberg {
1104e481fc0aSSagi Grimberg 	return 0;
1105e481fc0aSSagi Grimberg }
nvme_exit_auth(void)1106e481fc0aSSagi Grimberg static inline void __exit nvme_exit_auth(void)
1107e481fc0aSSagi Grimberg {
1108e481fc0aSSagi Grimberg }
nvme_auth_stop(struct nvme_ctrl * ctrl)1109f50fff73SHannes Reinecke static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
nvme_auth_negotiate(struct nvme_ctrl * ctrl,int qid)1110f50fff73SHannes Reinecke static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
1111f50fff73SHannes Reinecke {
1112f50fff73SHannes Reinecke 	return -EPROTONOSUPPORT;
1113f50fff73SHannes Reinecke }
nvme_auth_wait(struct nvme_ctrl * ctrl,int qid)1114f50fff73SHannes Reinecke static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
1115f50fff73SHannes Reinecke {
1116f50fff73SHannes Reinecke 	return NVME_SC_AUTH_REQUIRED;
1117f50fff73SHannes Reinecke }
nvme_auth_free(struct nvme_ctrl * ctrl)1118f50fff73SHannes Reinecke static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
1119f50fff73SHannes Reinecke #endif
1120f50fff73SHannes Reinecke 
1121df21b6b1SLogan Gunthorpe u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1122df21b6b1SLogan Gunthorpe 			 u8 opcode);
112362281b9eSChristoph Hellwig u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
112462281b9eSChristoph Hellwig int nvme_execute_rq(struct request *rq, bool at_head);
112531a59782Smin15.li void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1126bc8fb906SKeith Busch 		       struct nvme_command *cmd, int status);
1127b2702aaaSChaitanya Kulkarni struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
112824493b8bSLogan Gunthorpe struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
112924493b8bSLogan Gunthorpe void nvme_put_ns(struct nvme_ns *ns);
1130df21b6b1SLogan Gunthorpe 
nvme_multi_css(struct nvme_ctrl * ctrl)113143dc9878SAdam Manzanares static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
113243dc9878SAdam Manzanares {
113343dc9878SAdam Manzanares 	return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
113443dc9878SAdam Manzanares }
113543dc9878SAdam Manzanares 
1136bd83fe6fSAlan Adamson #ifdef CONFIG_NVME_VERBOSE_ERRORS
1137bd83fe6fSAlan Adamson const unsigned char *nvme_get_error_status_str(u16 status);
1138bd83fe6fSAlan Adamson const unsigned char *nvme_get_opcode_str(u8 opcode);
1139bd83fe6fSAlan Adamson const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
1140567da14dSAmit Engel const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
1141bd83fe6fSAlan Adamson #else /* CONFIG_NVME_VERBOSE_ERRORS */
nvme_get_error_status_str(u16 status)1142bd83fe6fSAlan Adamson static inline const unsigned char *nvme_get_error_status_str(u16 status)
1143bd83fe6fSAlan Adamson {
1144bd83fe6fSAlan Adamson 	return "I/O Error";
1145bd83fe6fSAlan Adamson }
nvme_get_opcode_str(u8 opcode)1146bd83fe6fSAlan Adamson static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
1147bd83fe6fSAlan Adamson {
1148bd83fe6fSAlan Adamson 	return "I/O Cmd";
1149bd83fe6fSAlan Adamson }
nvme_get_admin_opcode_str(u8 opcode)1150bd83fe6fSAlan Adamson static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
1151bd83fe6fSAlan Adamson {
1152bd83fe6fSAlan Adamson 	return "Admin Cmd";
1153bd83fe6fSAlan Adamson }
1154567da14dSAmit Engel 
nvme_get_fabrics_opcode_str(u8 opcode)1155567da14dSAmit Engel static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
1156567da14dSAmit Engel {
1157567da14dSAmit Engel 	return "Fabrics Cmd";
1158567da14dSAmit Engel }
1159bd83fe6fSAlan Adamson #endif /* CONFIG_NVME_VERBOSE_ERRORS */
1160bd83fe6fSAlan Adamson 
nvme_opcode_str(int qid,u8 opcode,u8 fctype)1161567da14dSAmit Engel static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
1162567da14dSAmit Engel {
1163567da14dSAmit Engel 	if (opcode == nvme_fabrics_command)
1164567da14dSAmit Engel 		return nvme_get_fabrics_opcode_str(fctype);
1165567da14dSAmit Engel 	return qid ? nvme_get_opcode_str(opcode) :
1166567da14dSAmit Engel 		nvme_get_admin_opcode_str(opcode);
1167567da14dSAmit Engel }
116857dacad5SJay Sternberg #endif /* _NVME_H */
1169