xref: /openbmc/linux/drivers/nvme/host/nvme.h (revision dda3248e)
1bc50ad75SChristoph Hellwig /* SPDX-License-Identifier: GPL-2.0 */
257dacad5SJay Sternberg /*
357dacad5SJay Sternberg  * Copyright (c) 2011-2014, Intel Corporation.
457dacad5SJay Sternberg  */
557dacad5SJay Sternberg 
657dacad5SJay Sternberg #ifndef _NVME_H
757dacad5SJay Sternberg #define _NVME_H
857dacad5SJay Sternberg 
957dacad5SJay Sternberg #include <linux/nvme.h>
10a6a5149bSChristoph Hellwig #include <linux/cdev.h>
1157dacad5SJay Sternberg #include <linux/pci.h>
1257dacad5SJay Sternberg #include <linux/kref.h>
1357dacad5SJay Sternberg #include <linux/blk-mq.h>
14b0b4e09cSMatias Bjørling #include <linux/lightnvm.h>
15a98e58e5SScott Bauer #include <linux/sed-opal.h>
16b9e03857SThomas Tai #include <linux/fault-inject.h>
17978628ecSJohannes Thumshirn #include <linux/rcupdate.h>
18c1ac9a4bSKeith Busch #include <linux/wait.h>
194d2ce688SJames Smart #include <linux/t10-pi.h>
2057dacad5SJay Sternberg 
2135fe0d12SHannes Reinecke #include <trace/events/block.h>
2235fe0d12SHannes Reinecke 
238ae4e447SMarc Olson extern unsigned int nvme_io_timeout;
2457dacad5SJay Sternberg #define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
2557dacad5SJay Sternberg 
268ae4e447SMarc Olson extern unsigned int admin_timeout;
27dc96f938SChaitanya Kulkarni #define NVME_ADMIN_TIMEOUT	(admin_timeout * HZ)
2821d34711SChristoph Hellwig 
29038bd4cbSSagi Grimberg #define NVME_DEFAULT_KATO	5
30038bd4cbSSagi Grimberg #define NVME_KATO_GRACE		10
31038bd4cbSSagi Grimberg 
3238e18002SIsrael Rukshin #ifdef CONFIG_ARCH_NO_SG_CHAIN
3338e18002SIsrael Rukshin #define  NVME_INLINE_SG_CNT  0
34ba7ca2aeSIsrael Rukshin #define  NVME_INLINE_METADATA_SG_CNT  0
3538e18002SIsrael Rukshin #else
3638e18002SIsrael Rukshin #define  NVME_INLINE_SG_CNT  2
37ba7ca2aeSIsrael Rukshin #define  NVME_INLINE_METADATA_SG_CNT  1
3838e18002SIsrael Rukshin #endif
3938e18002SIsrael Rukshin 
406c3c05b0SChaitanya Kulkarni /*
416c3c05b0SChaitanya Kulkarni  * Default to a 4K page size, with the intention to update this
426c3c05b0SChaitanya Kulkarni  * path in the future to accommodate architectures with differing
436c3c05b0SChaitanya Kulkarni  * kernel and IO page sizes.
446c3c05b0SChaitanya Kulkarni  */
456c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SHIFT	12
466c3c05b0SChaitanya Kulkarni #define NVME_CTRL_PAGE_SIZE	(1 << NVME_CTRL_PAGE_SHIFT)
476c3c05b0SChaitanya Kulkarni 
489a6327d2SSagi Grimberg extern struct workqueue_struct *nvme_wq;
49b227c59bSRoy Shterman extern struct workqueue_struct *nvme_reset_wq;
50b227c59bSRoy Shterman extern struct workqueue_struct *nvme_delete_wq;
519a6327d2SSagi Grimberg 
52ca064085SMatias Bjørling enum {
53ca064085SMatias Bjørling 	NVME_NS_LBA		= 0,
54ca064085SMatias Bjørling 	NVME_NS_LIGHTNVM	= 1,
55ca064085SMatias Bjørling };
56ca064085SMatias Bjørling 
5757dacad5SJay Sternberg /*
58106198edSChristoph Hellwig  * List of workarounds for devices that required behavior not specified in
59106198edSChristoph Hellwig  * the standard.
6057dacad5SJay Sternberg  */
61106198edSChristoph Hellwig enum nvme_quirks {
62106198edSChristoph Hellwig 	/*
63106198edSChristoph Hellwig 	 * Prefers I/O aligned to a stripe size specified in a vendor
64106198edSChristoph Hellwig 	 * specific Identify field.
65106198edSChristoph Hellwig 	 */
66106198edSChristoph Hellwig 	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
67540c801cSKeith Busch 
68540c801cSKeith Busch 	/*
69540c801cSKeith Busch 	 * The controller doesn't handle Identify value others than 0 or 1
70540c801cSKeith Busch 	 * correctly.
71540c801cSKeith Busch 	 */
72540c801cSKeith Busch 	NVME_QUIRK_IDENTIFY_CNS			= (1 << 1),
7308095e70SKeith Busch 
7408095e70SKeith Busch 	/*
75e850fd16SChristoph Hellwig 	 * The controller deterministically returns O's on reads to
76e850fd16SChristoph Hellwig 	 * logical blocks that deallocate was called on.
7708095e70SKeith Busch 	 */
78e850fd16SChristoph Hellwig 	NVME_QUIRK_DEALLOCATE_ZEROES		= (1 << 2),
7954adc010SGuilherme G. Piccoli 
8054adc010SGuilherme G. Piccoli 	/*
8154adc010SGuilherme G. Piccoli 	 * The controller needs a delay before starts checking the device
8254adc010SGuilherme G. Piccoli 	 * readiness, which is done by reading the NVME_CSTS_RDY bit.
8354adc010SGuilherme G. Piccoli 	 */
8454adc010SGuilherme G. Piccoli 	NVME_QUIRK_DELAY_BEFORE_CHK_RDY		= (1 << 3),
85c5552fdeSAndy Lutomirski 
86c5552fdeSAndy Lutomirski 	/*
87c5552fdeSAndy Lutomirski 	 * APST should not be used.
88c5552fdeSAndy Lutomirski 	 */
89c5552fdeSAndy Lutomirski 	NVME_QUIRK_NO_APST			= (1 << 4),
90ff5350a8SAndy Lutomirski 
91ff5350a8SAndy Lutomirski 	/*
92ff5350a8SAndy Lutomirski 	 * The deepest sleep state should not be used.
93ff5350a8SAndy Lutomirski 	 */
94ff5350a8SAndy Lutomirski 	NVME_QUIRK_NO_DEEPEST_PS		= (1 << 5),
95608cc4b1SChristoph Hellwig 
96608cc4b1SChristoph Hellwig 	/*
97608cc4b1SChristoph Hellwig 	 * Supports the LighNVM command set if indicated in vs[1].
98608cc4b1SChristoph Hellwig 	 */
99608cc4b1SChristoph Hellwig 	NVME_QUIRK_LIGHTNVM			= (1 << 6),
1009abd68efSJens Axboe 
1019abd68efSJens Axboe 	/*
1029abd68efSJens Axboe 	 * Set MEDIUM priority on SQ creation
1039abd68efSJens Axboe 	 */
1049abd68efSJens Axboe 	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
1056299358dSJames Dingwall 
1066299358dSJames Dingwall 	/*
1076299358dSJames Dingwall 	 * Ignore device provided subnqn.
1086299358dSJames Dingwall 	 */
1096299358dSJames Dingwall 	NVME_QUIRK_IGNORE_DEV_SUBNQN		= (1 << 8),
1107b210e4eSChristoph Hellwig 
1117b210e4eSChristoph Hellwig 	/*
1127b210e4eSChristoph Hellwig 	 * Broken Write Zeroes.
1137b210e4eSChristoph Hellwig 	 */
1147b210e4eSChristoph Hellwig 	NVME_QUIRK_DISABLE_WRITE_ZEROES		= (1 << 9),
115cb32de1bSMario Limonciello 
116cb32de1bSMario Limonciello 	/*
117cb32de1bSMario Limonciello 	 * Force simple suspend/resume path.
118cb32de1bSMario Limonciello 	 */
119cb32de1bSMario Limonciello 	NVME_QUIRK_SIMPLE_SUSPEND		= (1 << 10),
1207ad67ca5SLinus Torvalds 
1217ad67ca5SLinus Torvalds 	/*
12266341331SBenjamin Herrenschmidt 	 * Use only one interrupt vector for all queues
12366341331SBenjamin Herrenschmidt 	 */
1247ad67ca5SLinus Torvalds 	NVME_QUIRK_SINGLE_VECTOR		= (1 << 11),
12566341331SBenjamin Herrenschmidt 
12666341331SBenjamin Herrenschmidt 	/*
12766341331SBenjamin Herrenschmidt 	 * Use non-standard 128 bytes SQEs.
12866341331SBenjamin Herrenschmidt 	 */
1297ad67ca5SLinus Torvalds 	NVME_QUIRK_128_BYTES_SQES		= (1 << 12),
130d38e9f04SBenjamin Herrenschmidt 
131d38e9f04SBenjamin Herrenschmidt 	/*
132d38e9f04SBenjamin Herrenschmidt 	 * Prevent tag overlap between queues
133d38e9f04SBenjamin Herrenschmidt 	 */
1347ad67ca5SLinus Torvalds 	NVME_QUIRK_SHARED_TAGS                  = (1 << 13),
1356c6aa2f2SAkinobu Mita 
1366c6aa2f2SAkinobu Mita 	/*
1376c6aa2f2SAkinobu Mita 	 * Don't change the value of the temperature threshold feature
1386c6aa2f2SAkinobu Mita 	 */
1396c6aa2f2SAkinobu Mita 	NVME_QUIRK_NO_TEMP_THRESH_CHANGE	= (1 << 14),
1405bedd3afSChristoph Hellwig 
1415bedd3afSChristoph Hellwig 	/*
1425bedd3afSChristoph Hellwig 	 * The controller doesn't handle the Identify Namespace
1435bedd3afSChristoph Hellwig 	 * Identification Descriptor list subcommand despite claiming
1445bedd3afSChristoph Hellwig 	 * NVMe 1.3 compliance.
1455bedd3afSChristoph Hellwig 	 */
1465bedd3afSChristoph Hellwig 	NVME_QUIRK_NO_NS_DESC_LIST		= (1 << 15),
147106198edSChristoph Hellwig };
148106198edSChristoph Hellwig 
149d49187e9SChristoph Hellwig /*
150d49187e9SChristoph Hellwig  * Common request structure for NVMe passthrough.  All drivers must have
151d49187e9SChristoph Hellwig  * this structure as the first member of their request-private data.
152d49187e9SChristoph Hellwig  */
153d49187e9SChristoph Hellwig struct nvme_request {
154d49187e9SChristoph Hellwig 	struct nvme_command	*cmd;
155d49187e9SChristoph Hellwig 	union nvme_result	result;
15644e44b29SChristoph Hellwig 	u8			retries;
15727fa9bc5SChristoph Hellwig 	u8			flags;
15827fa9bc5SChristoph Hellwig 	u16			status;
15959e29ce6SSagi Grimberg 	struct nvme_ctrl	*ctrl;
16027fa9bc5SChristoph Hellwig };
16127fa9bc5SChristoph Hellwig 
16232acab31SChristoph Hellwig /*
16332acab31SChristoph Hellwig  * Mark a bio as coming in through the mpath node.
16432acab31SChristoph Hellwig  */
16532acab31SChristoph Hellwig #define REQ_NVME_MPATH		REQ_DRV
16632acab31SChristoph Hellwig 
16727fa9bc5SChristoph Hellwig enum {
16827fa9bc5SChristoph Hellwig 	NVME_REQ_CANCELLED		= (1 << 0),
169bb06ec31SJames Smart 	NVME_REQ_USERCMD		= (1 << 1),
170d49187e9SChristoph Hellwig };
171d49187e9SChristoph Hellwig 
172d49187e9SChristoph Hellwig static inline struct nvme_request *nvme_req(struct request *req)
173d49187e9SChristoph Hellwig {
174d49187e9SChristoph Hellwig 	return blk_mq_rq_to_pdu(req);
175d49187e9SChristoph Hellwig }
176d49187e9SChristoph Hellwig 
1775d87eb94SKeith Busch static inline u16 nvme_req_qid(struct request *req)
1785d87eb94SKeith Busch {
179643c476dSKeith Busch 	if (!req->q->queuedata)
1805d87eb94SKeith Busch 		return 0;
18184115d6dSBaolin Wang 
18284115d6dSBaolin Wang 	return req->mq_hctx->queue_num + 1;
1835d87eb94SKeith Busch }
1845d87eb94SKeith Busch 
18554adc010SGuilherme G. Piccoli /* The below value is the specific amount of delay needed before checking
18654adc010SGuilherme G. Piccoli  * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
18754adc010SGuilherme G. Piccoli  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
18854adc010SGuilherme G. Piccoli  * found empirically.
18954adc010SGuilherme G. Piccoli  */
1908c97eeccSJeff Lien #define NVME_QUIRK_DELAY_AMOUNT		2300
19154adc010SGuilherme G. Piccoli 
1924212f4e9SSagi Grimberg /*
1934212f4e9SSagi Grimberg  * enum nvme_ctrl_state: Controller state
1944212f4e9SSagi Grimberg  *
1954212f4e9SSagi Grimberg  * @NVME_CTRL_NEW:		New controller just allocated, initial state
1964212f4e9SSagi Grimberg  * @NVME_CTRL_LIVE:		Controller is connected and I/O capable
1974212f4e9SSagi Grimberg  * @NVME_CTRL_RESETTING:	Controller is resetting (or scheduled reset)
1984212f4e9SSagi Grimberg  * @NVME_CTRL_CONNECTING:	Controller is disconnected, now connecting the
1994212f4e9SSagi Grimberg  *				transport
2004212f4e9SSagi Grimberg  * @NVME_CTRL_DELETING:		Controller is deleting (or scheduled deletion)
201ecca390eSSagi Grimberg  * @NVME_CTRL_DELETING_NOIO:	Controller is deleting and I/O is not
202ecca390eSSagi Grimberg  *				disabled/failed immediately. This state comes
203ecca390eSSagi Grimberg  * 				after all async event processing took place and
204ecca390eSSagi Grimberg  * 				before ns removal and the controller deletion
205ecca390eSSagi Grimberg  * 				progress
2064212f4e9SSagi Grimberg  * @NVME_CTRL_DEAD:		Controller is non-present/unresponsive during
2074212f4e9SSagi Grimberg  *				shutdown or removal. In this case we forcibly
2084212f4e9SSagi Grimberg  *				kill all inflight I/O as they have no chance to
2094212f4e9SSagi Grimberg  *				complete
2104212f4e9SSagi Grimberg  */
211bb8d261eSChristoph Hellwig enum nvme_ctrl_state {
212bb8d261eSChristoph Hellwig 	NVME_CTRL_NEW,
213bb8d261eSChristoph Hellwig 	NVME_CTRL_LIVE,
214bb8d261eSChristoph Hellwig 	NVME_CTRL_RESETTING,
215ad6a0a52SMax Gurtovoy 	NVME_CTRL_CONNECTING,
216bb8d261eSChristoph Hellwig 	NVME_CTRL_DELETING,
217ecca390eSSagi Grimberg 	NVME_CTRL_DELETING_NOIO,
2180ff9d4e1SKeith Busch 	NVME_CTRL_DEAD,
219bb8d261eSChristoph Hellwig };
220bb8d261eSChristoph Hellwig 
221a3646451SAkinobu Mita struct nvme_fault_inject {
222a3646451SAkinobu Mita #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
223a3646451SAkinobu Mita 	struct fault_attr attr;
224a3646451SAkinobu Mita 	struct dentry *parent;
225a3646451SAkinobu Mita 	bool dont_retry;	/* DNR, do not retry */
226a3646451SAkinobu Mita 	u16 status;		/* status code */
227a3646451SAkinobu Mita #endif
228a3646451SAkinobu Mita };
229a3646451SAkinobu Mita 
2301c63dc66SChristoph Hellwig struct nvme_ctrl {
2316e3ca03eSSagi Grimberg 	bool comp_seen;
232bb8d261eSChristoph Hellwig 	enum nvme_ctrl_state state;
233bd4da3abSAndy Lutomirski 	bool identified;
234bb8d261eSChristoph Hellwig 	spinlock_t lock;
235e7ad43c3SKeith Busch 	struct mutex scan_lock;
2361c63dc66SChristoph Hellwig 	const struct nvme_ctrl_ops *ops;
23757dacad5SJay Sternberg 	struct request_queue *admin_q;
23807bfcd09SChristoph Hellwig 	struct request_queue *connect_q;
239e7832cb4SSagi Grimberg 	struct request_queue *fabrics_q;
24057dacad5SJay Sternberg 	struct device *dev;
24157dacad5SJay Sternberg 	int instance;
242103e515eSHannes Reinecke 	int numa_node;
2435bae7f73SChristoph Hellwig 	struct blk_mq_tag_set *tagset;
24434b6c231SSagi Grimberg 	struct blk_mq_tag_set *admin_tagset;
2455bae7f73SChristoph Hellwig 	struct list_head namespaces;
246765cc031SJianchao Wang 	struct rw_semaphore namespaces_rwsem;
247d22524a4SChristoph Hellwig 	struct device ctrl_device;
2485bae7f73SChristoph Hellwig 	struct device *device;	/* char device */
249a6a5149bSChristoph Hellwig 	struct cdev cdev;
250d86c4d8eSChristoph Hellwig 	struct work_struct reset_work;
251c5017e85SChristoph Hellwig 	struct work_struct delete_work;
252c1ac9a4bSKeith Busch 	wait_queue_head_t state_wq;
2531c63dc66SChristoph Hellwig 
254ab9e00ccSChristoph Hellwig 	struct nvme_subsystem *subsys;
255ab9e00ccSChristoph Hellwig 	struct list_head subsys_entry;
256ab9e00ccSChristoph Hellwig 
2574f1244c8SChristoph Hellwig 	struct opal_dev *opal_dev;
258a98e58e5SScott Bauer 
25957dacad5SJay Sternberg 	char name[12];
26076e3914aSChristoph Hellwig 	u16 cntlid;
2615fd4ce1bSChristoph Hellwig 
2625fd4ce1bSChristoph Hellwig 	u32 ctrl_config;
263b6dccf7fSArnav Dawn 	u16 mtfa;
264d858e5f0SSagi Grimberg 	u32 queue_count;
2655fd4ce1bSChristoph Hellwig 
26620d0dfe6SSagi Grimberg 	u64 cap;
26757dacad5SJay Sternberg 	u32 max_hw_sectors;
268943e942eSJens Axboe 	u32 max_segments;
26995093350SMax Gurtovoy 	u32 max_integrity_segments;
270240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED
271240e6ee2SKeith Busch 	u32 max_zone_append;
272240e6ee2SKeith Busch #endif
27349cd84b6SKeith Busch 	u16 crdt[3];
27457dacad5SJay Sternberg 	u16 oncs;
2758a9ae523SScott Bauer 	u16 oacs;
276f5d11840SJens Axboe 	u16 nssa;
277f5d11840SJens Axboe 	u16 nr_streams;
278f968688fSKeith Busch 	u16 sqsize;
2790d0b660fSChristoph Hellwig 	u32 max_namespaces;
2806bf25d16SChristoph Hellwig 	atomic_t abort_limit;
28157dacad5SJay Sternberg 	u8 vwc;
282f3ca80fcSChristoph Hellwig 	u32 vs;
28307bfcd09SChristoph Hellwig 	u32 sgls;
284038bd4cbSSagi Grimberg 	u16 kas;
285c5552fdeSAndy Lutomirski 	u8 npss;
286c5552fdeSAndy Lutomirski 	u8 apsta;
287400b6a7bSGuenter Roeck 	u16 wctemp;
288400b6a7bSGuenter Roeck 	u16 cctemp;
289c0561f82SHannes Reinecke 	u32 oaes;
290e3d7874dSKeith Busch 	u32 aen_result;
2913e53ba38SSagi Grimberg 	u32 ctratt;
29207fbd32aSMartin K. Petersen 	unsigned int shutdown_timeout;
293038bd4cbSSagi Grimberg 	unsigned int kato;
294f3ca80fcSChristoph Hellwig 	bool subsystem;
295106198edSChristoph Hellwig 	unsigned long quirks;
296c5552fdeSAndy Lutomirski 	struct nvme_id_power_state psd[32];
29784fef62dSKeith Busch 	struct nvme_effects_log *effects;
2981cf7a12eSChaitanya Kulkarni 	struct xarray cels;
2995955be21SChristoph Hellwig 	struct work_struct scan_work;
300f866fc42SChristoph Hellwig 	struct work_struct async_event_work;
301038bd4cbSSagi Grimberg 	struct delayed_work ka_work;
3028c4dfea9SVictor Gladkov 	struct delayed_work failfast_work;
3030a34e466SRoland Dreier 	struct nvme_command ka_cmd;
304b6dccf7fSArnav Dawn 	struct work_struct fw_act_work;
30530d90964SChristoph Hellwig 	unsigned long events;
30607bfcd09SChristoph Hellwig 
3070d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
3080d0b660fSChristoph Hellwig 	/* asymmetric namespace access: */
3090d0b660fSChristoph Hellwig 	u8 anacap;
3100d0b660fSChristoph Hellwig 	u8 anatt;
3110d0b660fSChristoph Hellwig 	u32 anagrpmax;
3120d0b660fSChristoph Hellwig 	u32 nanagrpid;
3130d0b660fSChristoph Hellwig 	struct mutex ana_lock;
3140d0b660fSChristoph Hellwig 	struct nvme_ana_rsp_hdr *ana_log_buf;
3150d0b660fSChristoph Hellwig 	size_t ana_log_size;
3160d0b660fSChristoph Hellwig 	struct timer_list anatt_timer;
3170d0b660fSChristoph Hellwig 	struct work_struct ana_work;
3180d0b660fSChristoph Hellwig #endif
3190d0b660fSChristoph Hellwig 
320c5552fdeSAndy Lutomirski 	/* Power saving configuration */
321c5552fdeSAndy Lutomirski 	u64 ps_max_latency_us;
32276a5af84SKai-Heng Feng 	bool apst_enabled;
323c5552fdeSAndy Lutomirski 
324044a9df1SChristoph Hellwig 	/* PCIe only: */
325fe6d53c9SChristoph Hellwig 	u32 hmpre;
326fe6d53c9SChristoph Hellwig 	u32 hmmin;
327044a9df1SChristoph Hellwig 	u32 hmminds;
328044a9df1SChristoph Hellwig 	u16 hmmaxd;
329fe6d53c9SChristoph Hellwig 
33007bfcd09SChristoph Hellwig 	/* Fabrics only */
33107bfcd09SChristoph Hellwig 	u32 ioccsz;
33207bfcd09SChristoph Hellwig 	u32 iorcsz;
33307bfcd09SChristoph Hellwig 	u16 icdoff;
33407bfcd09SChristoph Hellwig 	u16 maxcmd;
335fdf9dfa8SSagi Grimberg 	int nr_reconnects;
3368c4dfea9SVictor Gladkov 	unsigned long flags;
3378c4dfea9SVictor Gladkov #define NVME_CTRL_FAILFAST_EXPIRED	0
33807bfcd09SChristoph Hellwig 	struct nvmf_ctrl_options *opts;
339cb5b7262SJens Axboe 
340cb5b7262SJens Axboe 	struct page *discard_page;
341cb5b7262SJens Axboe 	unsigned long discard_page_busy;
342f79d5fdaSAkinobu Mita 
343f79d5fdaSAkinobu Mita 	struct nvme_fault_inject fault_inject;
34457dacad5SJay Sternberg };
34557dacad5SJay Sternberg 
34675c10e73SHannes Reinecke enum nvme_iopolicy {
34775c10e73SHannes Reinecke 	NVME_IOPOLICY_NUMA,
34875c10e73SHannes Reinecke 	NVME_IOPOLICY_RR,
34975c10e73SHannes Reinecke };
35075c10e73SHannes Reinecke 
351ab9e00ccSChristoph Hellwig struct nvme_subsystem {
352ab9e00ccSChristoph Hellwig 	int			instance;
353ab9e00ccSChristoph Hellwig 	struct device		dev;
354ab9e00ccSChristoph Hellwig 	/*
355ab9e00ccSChristoph Hellwig 	 * Because we unregister the device on the last put we need
356ab9e00ccSChristoph Hellwig 	 * a separate refcount.
357ab9e00ccSChristoph Hellwig 	 */
358ab9e00ccSChristoph Hellwig 	struct kref		ref;
359ab9e00ccSChristoph Hellwig 	struct list_head	entry;
360ab9e00ccSChristoph Hellwig 	struct mutex		lock;
361ab9e00ccSChristoph Hellwig 	struct list_head	ctrls;
362ed754e5dSChristoph Hellwig 	struct list_head	nsheads;
363ab9e00ccSChristoph Hellwig 	char			subnqn[NVMF_NQN_SIZE];
364ab9e00ccSChristoph Hellwig 	char			serial[20];
365ab9e00ccSChristoph Hellwig 	char			model[40];
366ab9e00ccSChristoph Hellwig 	char			firmware_rev[8];
367ab9e00ccSChristoph Hellwig 	u8			cmic;
368ab9e00ccSChristoph Hellwig 	u16			vendor_id;
36981adb863SBart Van Assche 	u16			awupf;	/* 0's based awupf value. */
370ed754e5dSChristoph Hellwig 	struct ida		ns_ida;
37175c10e73SHannes Reinecke #ifdef CONFIG_NVME_MULTIPATH
37275c10e73SHannes Reinecke 	enum nvme_iopolicy	iopolicy;
37375c10e73SHannes Reinecke #endif
374ab9e00ccSChristoph Hellwig };
375ab9e00ccSChristoph Hellwig 
376002fab04SChristoph Hellwig /*
377002fab04SChristoph Hellwig  * Container structure for uniqueue namespace identifiers.
378002fab04SChristoph Hellwig  */
379002fab04SChristoph Hellwig struct nvme_ns_ids {
380002fab04SChristoph Hellwig 	u8	eui64[8];
381002fab04SChristoph Hellwig 	u8	nguid[16];
382002fab04SChristoph Hellwig 	uuid_t	uuid;
38371010c30SNiklas Cassel 	u8	csi;
384002fab04SChristoph Hellwig };
385002fab04SChristoph Hellwig 
386ed754e5dSChristoph Hellwig /*
387ed754e5dSChristoph Hellwig  * Anchor structure for namespaces.  There is one for each namespace in a
388ed754e5dSChristoph Hellwig  * NVMe subsystem that any of our controllers can see, and the namespace
389ed754e5dSChristoph Hellwig  * structure for each controller is chained of it.  For private namespaces
390ed754e5dSChristoph Hellwig  * there is a 1:1 relation to our namespace structures, that is ->list
391ed754e5dSChristoph Hellwig  * only ever has a single entry for private namespaces.
392ed754e5dSChristoph Hellwig  */
393ed754e5dSChristoph Hellwig struct nvme_ns_head {
394ed754e5dSChristoph Hellwig 	struct list_head	list;
395ed754e5dSChristoph Hellwig 	struct srcu_struct      srcu;
396ed754e5dSChristoph Hellwig 	struct nvme_subsystem	*subsys;
397ed754e5dSChristoph Hellwig 	unsigned		ns_id;
398ed754e5dSChristoph Hellwig 	struct nvme_ns_ids	ids;
399ed754e5dSChristoph Hellwig 	struct list_head	entry;
400ed754e5dSChristoph Hellwig 	struct kref		ref;
4010c284db7SKeith Busch 	bool			shared;
402ed754e5dSChristoph Hellwig 	int			instance;
403be93e87eSKeith Busch 	struct nvme_effects_log *effects;
404f3334447SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
405f3334447SChristoph Hellwig 	struct gendisk		*disk;
406f3334447SChristoph Hellwig 	struct bio_list		requeue_list;
407f3334447SChristoph Hellwig 	spinlock_t		requeue_lock;
408f3334447SChristoph Hellwig 	struct work_struct	requeue_work;
409f3334447SChristoph Hellwig 	struct mutex		lock;
410d8a22f85SAnton Eidelman 	unsigned long		flags;
411d8a22f85SAnton Eidelman #define NVME_NSHEAD_DISK_LIVE	0
412f3334447SChristoph Hellwig 	struct nvme_ns __rcu	*current_path[];
413f3334447SChristoph Hellwig #endif
414ed754e5dSChristoph Hellwig };
415ed754e5dSChristoph Hellwig 
416ffc89b1dSMax Gurtovoy enum nvme_ns_features {
417ffc89b1dSMax Gurtovoy 	NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
418b29f8485SMax Gurtovoy 	NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
419ffc89b1dSMax Gurtovoy };
420ffc89b1dSMax Gurtovoy 
42157dacad5SJay Sternberg struct nvme_ns {
42257dacad5SJay Sternberg 	struct list_head list;
42357dacad5SJay Sternberg 
4241c63dc66SChristoph Hellwig 	struct nvme_ctrl *ctrl;
42557dacad5SJay Sternberg 	struct request_queue *queue;
42657dacad5SJay Sternberg 	struct gendisk *disk;
4270d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
4280d0b660fSChristoph Hellwig 	enum nvme_ana_state ana_state;
4290d0b660fSChristoph Hellwig 	u32 ana_grpid;
4300d0b660fSChristoph Hellwig #endif
431ed754e5dSChristoph Hellwig 	struct list_head siblings;
432b0b4e09cSMatias Bjørling 	struct nvm_dev *ndev;
43357dacad5SJay Sternberg 	struct kref kref;
434ed754e5dSChristoph Hellwig 	struct nvme_ns_head *head;
43557dacad5SJay Sternberg 
43657dacad5SJay Sternberg 	int lba_shift;
43757dacad5SJay Sternberg 	u16 ms;
438f5d11840SJens Axboe 	u16 sgs;
439f5d11840SJens Axboe 	u32 sws;
44057dacad5SJay Sternberg 	u8 pi_type;
441240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED
442240e6ee2SKeith Busch 	u64 zsze;
443240e6ee2SKeith Busch #endif
444ffc89b1dSMax Gurtovoy 	unsigned long features;
445646017a6SKeith Busch 	unsigned long flags;
446646017a6SKeith Busch #define NVME_NS_REMOVING	0
44769d9a99cSKeith Busch #define NVME_NS_DEAD     	1
4480d0b660fSChristoph Hellwig #define NVME_NS_ANA_PENDING	2
4492f4c9ba2SJavier González #define NVME_NS_FORCE_RO	3
450b9e03857SThomas Tai 
451b9e03857SThomas Tai 	struct nvme_fault_inject fault_inject;
452b9e03857SThomas Tai 
45357dacad5SJay Sternberg };
45457dacad5SJay Sternberg 
4554d2ce688SJames Smart /* NVMe ns supports metadata actions by the controller (generate/strip) */
4564d2ce688SJames Smart static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
4574d2ce688SJames Smart {
4584d2ce688SJames Smart 	return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple);
4594d2ce688SJames Smart }
4604d2ce688SJames Smart 
4611c63dc66SChristoph Hellwig struct nvme_ctrl_ops {
4621a353d85SMing Lin 	const char *name;
463e439bb12SSagi Grimberg 	struct module *module;
464d3d5b87dSChristoph Hellwig 	unsigned int flags;
465d3d5b87dSChristoph Hellwig #define NVME_F_FABRICS			(1 << 0)
466c81bfba9SChristoph Hellwig #define NVME_F_METADATA_SUPPORTED	(1 << 1)
467e0596ab2SLogan Gunthorpe #define NVME_F_PCI_P2PDMA		(1 << 2)
4681c63dc66SChristoph Hellwig 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
4695fd4ce1bSChristoph Hellwig 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
4707fd8930fSChristoph Hellwig 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
4711673f1f0SChristoph Hellwig 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
472ad22c355SKeith Busch 	void (*submit_async_event)(struct nvme_ctrl *ctrl);
473c5017e85SChristoph Hellwig 	void (*delete_ctrl)(struct nvme_ctrl *ctrl);
4741a353d85SMing Lin 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
47557dacad5SJay Sternberg };
47657dacad5SJay Sternberg 
477b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
478a3646451SAkinobu Mita void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
479a3646451SAkinobu Mita 			    const char *dev_name);
480a3646451SAkinobu Mita void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
481b9e03857SThomas Tai void nvme_should_fail(struct request *req);
482b9e03857SThomas Tai #else
483a3646451SAkinobu Mita static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
484a3646451SAkinobu Mita 					  const char *dev_name)
485a3646451SAkinobu Mita {
486a3646451SAkinobu Mita }
487a3646451SAkinobu Mita static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
488a3646451SAkinobu Mita {
489a3646451SAkinobu Mita }
490b9e03857SThomas Tai static inline void nvme_should_fail(struct request *req) {}
491b9e03857SThomas Tai #endif
492b9e03857SThomas Tai 
493f3ca80fcSChristoph Hellwig static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
494f3ca80fcSChristoph Hellwig {
495f3ca80fcSChristoph Hellwig 	if (!ctrl->subsystem)
496f3ca80fcSChristoph Hellwig 		return -ENOTTY;
497f3ca80fcSChristoph Hellwig 	return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
498f3ca80fcSChristoph Hellwig }
499f3ca80fcSChristoph Hellwig 
500314d48ddSDamien Le Moal /*
501314d48ddSDamien Le Moal  * Convert a 512B sector number to a device logical block number.
502314d48ddSDamien Le Moal  */
503314d48ddSDamien Le Moal static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
50457dacad5SJay Sternberg {
505314d48ddSDamien Le Moal 	return sector >> (ns->lba_shift - SECTOR_SHIFT);
50657dacad5SJay Sternberg }
50757dacad5SJay Sternberg 
508e08f2ae8SDamien Le Moal /*
509e08f2ae8SDamien Le Moal  * Convert a device logical block number to a 512B sector number.
510e08f2ae8SDamien Le Moal  */
511e08f2ae8SDamien Le Moal static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
512e08f2ae8SDamien Le Moal {
513e08f2ae8SDamien Le Moal 	return lba << (ns->lba_shift - SECTOR_SHIFT);
51457dacad5SJay Sternberg }
51557dacad5SJay Sternberg 
51671fb90ebSKeith Busch /*
51771fb90ebSKeith Busch  * Convert byte length to nvme's 0-based num dwords
51871fb90ebSKeith Busch  */
51971fb90ebSKeith Busch static inline u32 nvme_bytes_to_numd(size_t len)
52071fb90ebSKeith Busch {
52171fb90ebSKeith Busch 	return (len >> 2) - 1;
52271fb90ebSKeith Busch }
52371fb90ebSKeith Busch 
5245ddaabe8SChristoph Hellwig static inline bool nvme_is_ana_error(u16 status)
5255ddaabe8SChristoph Hellwig {
5265ddaabe8SChristoph Hellwig 	switch (status & 0x7ff) {
5275ddaabe8SChristoph Hellwig 	case NVME_SC_ANA_TRANSITION:
5285ddaabe8SChristoph Hellwig 	case NVME_SC_ANA_INACCESSIBLE:
5295ddaabe8SChristoph Hellwig 	case NVME_SC_ANA_PERSISTENT_LOSS:
5305ddaabe8SChristoph Hellwig 		return true;
5315ddaabe8SChristoph Hellwig 	default:
5325ddaabe8SChristoph Hellwig 		return false;
5335ddaabe8SChristoph Hellwig 	}
5345ddaabe8SChristoph Hellwig }
5355ddaabe8SChristoph Hellwig 
5365ddaabe8SChristoph Hellwig static inline bool nvme_is_path_error(u16 status)
5375ddaabe8SChristoph Hellwig {
5381e41f3bdSChristoph Hellwig 	/* check for a status code type of 'path related status' */
5391e41f3bdSChristoph Hellwig 	return (status & 0x700) == 0x300;
5405ddaabe8SChristoph Hellwig }
5415ddaabe8SChristoph Hellwig 
5422eb81a33SChristoph Hellwig /*
5432eb81a33SChristoph Hellwig  * Fill in the status and result information from the CQE, and then figure out
5442eb81a33SChristoph Hellwig  * if blk-mq will need to use IPI magic to complete the request, and if yes do
5452eb81a33SChristoph Hellwig  * so.  If not let the caller complete the request without an indirect function
5462eb81a33SChristoph Hellwig  * call.
5472eb81a33SChristoph Hellwig  */
5482eb81a33SChristoph Hellwig static inline bool nvme_try_complete_req(struct request *req, __le16 status,
54927fa9bc5SChristoph Hellwig 		union nvme_result result)
55015a190f7SChristoph Hellwig {
55127fa9bc5SChristoph Hellwig 	struct nvme_request *rq = nvme_req(req);
55227fa9bc5SChristoph Hellwig 
55327fa9bc5SChristoph Hellwig 	rq->status = le16_to_cpu(status) >> 1;
55427fa9bc5SChristoph Hellwig 	rq->result = result;
555b9e03857SThomas Tai 	/* inject error when permitted by fault injection framework */
556b9e03857SThomas Tai 	nvme_should_fail(req);
557ff029451SChristoph Hellwig 	if (unlikely(blk_should_fake_timeout(req->q)))
558ff029451SChristoph Hellwig 		return true;
559ff029451SChristoph Hellwig 	return blk_mq_complete_request_remote(req);
56015a190f7SChristoph Hellwig }
56115a190f7SChristoph Hellwig 
562d22524a4SChristoph Hellwig static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
563d22524a4SChristoph Hellwig {
564d22524a4SChristoph Hellwig 	get_device(ctrl->device);
565d22524a4SChristoph Hellwig }
566d22524a4SChristoph Hellwig 
567d22524a4SChristoph Hellwig static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
568d22524a4SChristoph Hellwig {
569d22524a4SChristoph Hellwig 	put_device(ctrl->device);
570d22524a4SChristoph Hellwig }
571d22524a4SChristoph Hellwig 
57258a8df67SIsrael Rukshin static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
57358a8df67SIsrael Rukshin {
57458a8df67SIsrael Rukshin 	return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH;
57558a8df67SIsrael Rukshin }
57658a8df67SIsrael Rukshin 
57777f02a7aSChristoph Hellwig void nvme_complete_rq(struct request *req);
578*dda3248eSChao Leng blk_status_t nvme_host_path_error(struct request *req);
5797baa8572SJens Axboe bool nvme_cancel_request(struct request *req, void *data, bool reserved);
58025479069SChao Leng void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
58125479069SChao Leng void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
582bb8d261eSChristoph Hellwig bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
583bb8d261eSChristoph Hellwig 		enum nvme_ctrl_state new_state);
584c1ac9a4bSKeith Busch bool nvme_wait_reset(struct nvme_ctrl *ctrl);
585b5b05048SSagi Grimberg int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
586c0f2f45bSSagi Grimberg int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
5875fd4ce1bSChristoph Hellwig int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
588f3ca80fcSChristoph Hellwig int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
589f3ca80fcSChristoph Hellwig 		const struct nvme_ctrl_ops *ops, unsigned long quirks);
59053029b04SKeith Busch void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
591d09f2b45SSagi Grimberg void nvme_start_ctrl(struct nvme_ctrl *ctrl);
592d09f2b45SSagi Grimberg void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
5937fd8930fSChristoph Hellwig int nvme_init_identify(struct nvme_ctrl *ctrl);
5945bae7f73SChristoph Hellwig 
5955bae7f73SChristoph Hellwig void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
5961673f1f0SChristoph Hellwig 
5974f1244c8SChristoph Hellwig int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
5984f1244c8SChristoph Hellwig 		bool send);
599a98e58e5SScott Bauer 
6007bf58533SChristoph Hellwig void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
601287a63ebSChristoph Hellwig 		volatile union nvme_result *res);
602f866fc42SChristoph Hellwig 
60325646264SKeith Busch void nvme_stop_queues(struct nvme_ctrl *ctrl);
60425646264SKeith Busch void nvme_start_queues(struct nvme_ctrl *ctrl);
60569d9a99cSKeith Busch void nvme_kill_queues(struct nvme_ctrl *ctrl);
606d6135c3aSKeith Busch void nvme_sync_queues(struct nvme_ctrl *ctrl);
60704800fbfSChao Leng void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
608302ad8ccSKeith Busch void nvme_unfreeze(struct nvme_ctrl *ctrl);
609302ad8ccSKeith Busch void nvme_wait_freeze(struct nvme_ctrl *ctrl);
6107cf0d7c0SSagi Grimberg int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
611302ad8ccSKeith Busch void nvme_start_freeze(struct nvme_ctrl *ctrl);
612363c9aacSSagi Grimberg 
613eb71f435SChristoph Hellwig #define NVME_QID_ANY -1
6144160982eSChristoph Hellwig struct request *nvme_alloc_request(struct request_queue *q,
61539dfe844SChaitanya Kulkarni 		struct nvme_command *cmd, blk_mq_req_flags_t flags);
616f7f1fc36SMax Gurtovoy void nvme_cleanup_cmd(struct request *req);
617fc17b653SChristoph Hellwig blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
6188093f7caSMing Lin 		struct nvme_command *cmd);
61957dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
62057dacad5SJay Sternberg 		void *buf, unsigned bufflen);
62157dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
622d49187e9SChristoph Hellwig 		union nvme_result *result, void *buffer, unsigned bufflen,
6239a95e4efSBart Van Assche 		unsigned timeout, int qid, int at_head,
6246287b51cSSagi Grimberg 		blk_mq_req_flags_t flags, bool poll);
6251a87ee65SKeith Busch int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
6261a87ee65SKeith Busch 		      unsigned int dword11, void *buffer, size_t buflen,
6271a87ee65SKeith Busch 		      u32 *result);
6281a87ee65SKeith Busch int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
6291a87ee65SKeith Busch 		      unsigned int dword11, void *buffer, size_t buflen,
6301a87ee65SKeith Busch 		      u32 *result);
6319a0be7abSChristoph Hellwig int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
632038bd4cbSSagi Grimberg void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
633d86c4d8eSChristoph Hellwig int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
634c1ac9a4bSKeith Busch int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
635c5017e85SChristoph Hellwig int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
63657dacad5SJay Sternberg 
637be93e87eSKeith Busch int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
6380e98719bSChristoph Hellwig 		void *log, size_t size, u64 offset);
639240e6ee2SKeith Busch struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk,
640240e6ee2SKeith Busch 		struct nvme_ns_head **head, int *srcu_idx);
641240e6ee2SKeith Busch void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx);
642d558fb51SMatias Bjørling 
64333b14f67SHannes Reinecke extern const struct attribute_group *nvme_ns_id_attr_groups[];
64432acab31SChristoph Hellwig extern const struct block_device_operations nvme_ns_head_ops;
64532acab31SChristoph Hellwig 
64632acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH
64766b20ac0SMarta Rybczynska static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
64866b20ac0SMarta Rybczynska {
64966b20ac0SMarta Rybczynska 	return ctrl->ana_log_buf != NULL;
65066b20ac0SMarta Rybczynska }
65166b20ac0SMarta Rybczynska 
652b9156daeSSagi Grimberg void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
653b9156daeSSagi Grimberg void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
654b9156daeSSagi Grimberg void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
655a785dbccSKeith Busch void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
656a785dbccSKeith Busch 			struct nvme_ctrl *ctrl, int *flags);
6575ddaabe8SChristoph Hellwig void nvme_failover_req(struct request *req);
65832acab31SChristoph Hellwig void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
65932acab31SChristoph Hellwig int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
6600d0b660fSChristoph Hellwig void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
66132acab31SChristoph Hellwig void nvme_mpath_remove_disk(struct nvme_ns_head *head);
6620d0b660fSChristoph Hellwig int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
6630d0b660fSChristoph Hellwig void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
6640d0b660fSChristoph Hellwig void nvme_mpath_stop(struct nvme_ctrl *ctrl);
6650157ec8dSSagi Grimberg bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
6660157ec8dSSagi Grimberg void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
66732acab31SChristoph Hellwig struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
668c62b37d9SChristoph Hellwig blk_qc_t nvme_ns_head_submit_bio(struct bio *bio);
669479a322fSSagi Grimberg 
670479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
671479a322fSSagi Grimberg {
672479a322fSSagi Grimberg 	struct nvme_ns_head *head = ns->head;
673479a322fSSagi Grimberg 
674479a322fSSagi Grimberg 	if (head->disk && list_empty(&head->list))
675479a322fSSagi Grimberg 		kblockd_schedule_work(&head->requeue_work);
676479a322fSSagi Grimberg }
677479a322fSSagi Grimberg 
6782b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req)
67935fe0d12SHannes Reinecke {
68035fe0d12SHannes Reinecke 	struct nvme_ns *ns = req->q->queuedata;
68135fe0d12SHannes Reinecke 
68235fe0d12SHannes Reinecke 	if (req->cmd_flags & REQ_NVME_MPATH)
683d24de76aSChristoph Hellwig 		trace_block_bio_complete(ns->head->disk->queue, req->bio);
68435fe0d12SHannes Reinecke }
68535fe0d12SHannes Reinecke 
6860d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_grpid;
6870d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_state;
68875c10e73SHannes Reinecke extern struct device_attribute subsys_attr_iopolicy;
6890d0b660fSChristoph Hellwig 
69032acab31SChristoph Hellwig #else
6910d0b660fSChristoph Hellwig static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
6920d0b660fSChristoph Hellwig {
6930d0b660fSChristoph Hellwig 	return false;
6940d0b660fSChristoph Hellwig }
695a785dbccSKeith Busch /*
696a785dbccSKeith Busch  * Without the multipath code enabled, multiple controller per subsystems are
697a785dbccSKeith Busch  * visible as devices and thus we cannot use the subsystem instance.
698a785dbccSKeith Busch  */
699a785dbccSKeith Busch static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
700a785dbccSKeith Busch 				      struct nvme_ctrl *ctrl, int *flags)
701a785dbccSKeith Busch {
702a785dbccSKeith Busch 	sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
703a785dbccSKeith Busch }
704a785dbccSKeith Busch 
7055ddaabe8SChristoph Hellwig static inline void nvme_failover_req(struct request *req)
70632acab31SChristoph Hellwig {
70732acab31SChristoph Hellwig }
70832acab31SChristoph Hellwig static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
70932acab31SChristoph Hellwig {
71032acab31SChristoph Hellwig }
71132acab31SChristoph Hellwig static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
71232acab31SChristoph Hellwig 		struct nvme_ns_head *head)
71332acab31SChristoph Hellwig {
71432acab31SChristoph Hellwig 	return 0;
71532acab31SChristoph Hellwig }
7160d0b660fSChristoph Hellwig static inline void nvme_mpath_add_disk(struct nvme_ns *ns,
7170d0b660fSChristoph Hellwig 		struct nvme_id_ns *id)
71832acab31SChristoph Hellwig {
71932acab31SChristoph Hellwig }
72032acab31SChristoph Hellwig static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
72132acab31SChristoph Hellwig {
72232acab31SChristoph Hellwig }
7230157ec8dSSagi Grimberg static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
7240157ec8dSSagi Grimberg {
7250157ec8dSSagi Grimberg 	return false;
7260157ec8dSSagi Grimberg }
7270157ec8dSSagi Grimberg static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
72832acab31SChristoph Hellwig {
72932acab31SChristoph Hellwig }
730479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
731479a322fSSagi Grimberg {
732479a322fSSagi Grimberg }
7332b59787aSMax Gurtovoy static inline void nvme_trace_bio_complete(struct request *req)
73435fe0d12SHannes Reinecke {
73535fe0d12SHannes Reinecke }
7360d0b660fSChristoph Hellwig static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
7370d0b660fSChristoph Hellwig 		struct nvme_id_ctrl *id)
7380d0b660fSChristoph Hellwig {
73914a1336eSChristoph Hellwig 	if (ctrl->subsys->cmic & (1 << 3))
74014a1336eSChristoph Hellwig 		dev_warn(ctrl->device,
74114a1336eSChristoph Hellwig "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
7420d0b660fSChristoph Hellwig 	return 0;
7430d0b660fSChristoph Hellwig }
7440d0b660fSChristoph Hellwig static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
7450d0b660fSChristoph Hellwig {
7460d0b660fSChristoph Hellwig }
7470d0b660fSChristoph Hellwig static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
7480d0b660fSChristoph Hellwig {
7490d0b660fSChristoph Hellwig }
750b9156daeSSagi Grimberg static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
751b9156daeSSagi Grimberg {
752b9156daeSSagi Grimberg }
753b9156daeSSagi Grimberg static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
754b9156daeSSagi Grimberg {
755b9156daeSSagi Grimberg }
756b9156daeSSagi Grimberg static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
757b9156daeSSagi Grimberg {
758b9156daeSSagi Grimberg }
75932acab31SChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */
76032acab31SChristoph Hellwig 
7617fad20ddSChristoph Hellwig int nvme_revalidate_zones(struct nvme_ns *ns);
762240e6ee2SKeith Busch #ifdef CONFIG_BLK_DEV_ZONED
763d525c3c0SChristoph Hellwig int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
764240e6ee2SKeith Busch int nvme_report_zones(struct gendisk *disk, sector_t sector,
765240e6ee2SKeith Busch 		      unsigned int nr_zones, report_zones_cb cb, void *data);
766240e6ee2SKeith Busch 
767240e6ee2SKeith Busch blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
768240e6ee2SKeith Busch 				       struct nvme_command *cmnd,
769240e6ee2SKeith Busch 				       enum nvme_zone_mgmt_action action);
770240e6ee2SKeith Busch #else
771240e6ee2SKeith Busch #define nvme_report_zones NULL
772240e6ee2SKeith Busch 
773240e6ee2SKeith Busch static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
774240e6ee2SKeith Busch 		struct request *req, struct nvme_command *cmnd,
775240e6ee2SKeith Busch 		enum nvme_zone_mgmt_action action)
776240e6ee2SKeith Busch {
777240e6ee2SKeith Busch 	return BLK_STS_NOTSUPP;
778240e6ee2SKeith Busch }
779240e6ee2SKeith Busch 
780d525c3c0SChristoph Hellwig static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
781240e6ee2SKeith Busch {
782240e6ee2SKeith Busch 	dev_warn(ns->ctrl->device,
783240e6ee2SKeith Busch 		 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
784240e6ee2SKeith Busch 	return -EPROTONOSUPPORT;
785240e6ee2SKeith Busch }
786240e6ee2SKeith Busch #endif
787240e6ee2SKeith Busch 
788c4699e70SKeith Busch #ifdef CONFIG_NVM
7893dc87dd0SMatias Bjørling int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
790b0b4e09cSMatias Bjørling void nvme_nvm_unregister(struct nvme_ns *ns);
79133b14f67SHannes Reinecke extern const struct attribute_group nvme_nvm_attr_group;
79284d4add7SMatias Bjørling int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
793c4699e70SKeith Busch #else
794b0b4e09cSMatias Bjørling static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
7953dc87dd0SMatias Bjørling 				    int node)
796c4699e70SKeith Busch {
797c4699e70SKeith Busch 	return 0;
798c4699e70SKeith Busch }
799c4699e70SKeith Busch 
800b0b4e09cSMatias Bjørling static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
80184d4add7SMatias Bjørling static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
80284d4add7SMatias Bjørling 							unsigned long arg)
80384d4add7SMatias Bjørling {
80484d4add7SMatias Bjørling 	return -ENOTTY;
80584d4add7SMatias Bjørling }
8063dc87dd0SMatias Bjørling #endif /* CONFIG_NVM */
8073dc87dd0SMatias Bjørling 
80840267efdSSimon A. F. Lund static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
80940267efdSSimon A. F. Lund {
81040267efdSSimon A. F. Lund 	return dev_to_disk(dev)->private_data;
81140267efdSSimon A. F. Lund }
812ca064085SMatias Bjørling 
813400b6a7bSGuenter Roeck #ifdef CONFIG_NVME_HWMON
81459e330f8SKeith Busch int nvme_hwmon_init(struct nvme_ctrl *ctrl);
815400b6a7bSGuenter Roeck #else
81659e330f8SKeith Busch static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
81759e330f8SKeith Busch {
81859e330f8SKeith Busch 	return 0;
81959e330f8SKeith Busch }
820400b6a7bSGuenter Roeck #endif
821400b6a7bSGuenter Roeck 
822df21b6b1SLogan Gunthorpe u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
823df21b6b1SLogan Gunthorpe 			 u8 opcode);
82417365ae6SLogan Gunthorpe void nvme_execute_passthru_rq(struct request *rq);
825b2702aaaSChaitanya Kulkarni struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
82624493b8bSLogan Gunthorpe struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
82724493b8bSLogan Gunthorpe void nvme_put_ns(struct nvme_ns *ns);
828df21b6b1SLogan Gunthorpe 
82957dacad5SJay Sternberg #endif /* _NVME_H */
830