xref: /openbmc/linux/drivers/nvme/host/nvme.h (revision e8069f5a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2011-2014, Intel Corporation.
4  */
5 
6 #ifndef _NVME_H
7 #define _NVME_H
8 
9 #include <linux/nvme.h>
10 #include <linux/cdev.h>
11 #include <linux/pci.h>
12 #include <linux/kref.h>
13 #include <linux/blk-mq.h>
14 #include <linux/sed-opal.h>
15 #include <linux/fault-inject.h>
16 #include <linux/rcupdate.h>
17 #include <linux/wait.h>
18 #include <linux/t10-pi.h>
19 
20 #include <trace/events/block.h>
21 
22 extern const struct pr_ops nvme_pr_ops;
23 
24 extern unsigned int nvme_io_timeout;
25 #define NVME_IO_TIMEOUT	(nvme_io_timeout * HZ)
26 
27 extern unsigned int admin_timeout;
28 #define NVME_ADMIN_TIMEOUT	(admin_timeout * HZ)
29 
30 #define NVME_DEFAULT_KATO	5
31 
32 #ifdef CONFIG_ARCH_NO_SG_CHAIN
33 #define  NVME_INLINE_SG_CNT  0
34 #define  NVME_INLINE_METADATA_SG_CNT  0
35 #else
36 #define  NVME_INLINE_SG_CNT  2
37 #define  NVME_INLINE_METADATA_SG_CNT  1
38 #endif
39 
40 /*
41  * Default to a 4K page size, with the intention to update this
42  * path in the future to accommodate architectures with differing
43  * kernel and IO page sizes.
44  */
45 #define NVME_CTRL_PAGE_SHIFT	12
46 #define NVME_CTRL_PAGE_SIZE	(1 << NVME_CTRL_PAGE_SHIFT)
47 
48 extern struct workqueue_struct *nvme_wq;
49 extern struct workqueue_struct *nvme_reset_wq;
50 extern struct workqueue_struct *nvme_delete_wq;
51 
52 /*
53  * List of workarounds for devices that required behavior not specified in
54  * the standard.
55  */
56 enum nvme_quirks {
57 	/*
58 	 * Prefers I/O aligned to a stripe size specified in a vendor
59 	 * specific Identify field.
60 	 */
61 	NVME_QUIRK_STRIPE_SIZE			= (1 << 0),
62 
63 	/*
64 	 * The controller doesn't handle Identify value others than 0 or 1
65 	 * correctly.
66 	 */
67 	NVME_QUIRK_IDENTIFY_CNS			= (1 << 1),
68 
69 	/*
70 	 * The controller deterministically returns O's on reads to
71 	 * logical blocks that deallocate was called on.
72 	 */
73 	NVME_QUIRK_DEALLOCATE_ZEROES		= (1 << 2),
74 
75 	/*
76 	 * The controller needs a delay before starts checking the device
77 	 * readiness, which is done by reading the NVME_CSTS_RDY bit.
78 	 */
79 	NVME_QUIRK_DELAY_BEFORE_CHK_RDY		= (1 << 3),
80 
81 	/*
82 	 * APST should not be used.
83 	 */
84 	NVME_QUIRK_NO_APST			= (1 << 4),
85 
86 	/*
87 	 * The deepest sleep state should not be used.
88 	 */
89 	NVME_QUIRK_NO_DEEPEST_PS		= (1 << 5),
90 
91 	/*
92 	 * Set MEDIUM priority on SQ creation
93 	 */
94 	NVME_QUIRK_MEDIUM_PRIO_SQ		= (1 << 7),
95 
96 	/*
97 	 * Ignore device provided subnqn.
98 	 */
99 	NVME_QUIRK_IGNORE_DEV_SUBNQN		= (1 << 8),
100 
101 	/*
102 	 * Broken Write Zeroes.
103 	 */
104 	NVME_QUIRK_DISABLE_WRITE_ZEROES		= (1 << 9),
105 
106 	/*
107 	 * Force simple suspend/resume path.
108 	 */
109 	NVME_QUIRK_SIMPLE_SUSPEND		= (1 << 10),
110 
111 	/*
112 	 * Use only one interrupt vector for all queues
113 	 */
114 	NVME_QUIRK_SINGLE_VECTOR		= (1 << 11),
115 
116 	/*
117 	 * Use non-standard 128 bytes SQEs.
118 	 */
119 	NVME_QUIRK_128_BYTES_SQES		= (1 << 12),
120 
121 	/*
122 	 * Prevent tag overlap between queues
123 	 */
124 	NVME_QUIRK_SHARED_TAGS                  = (1 << 13),
125 
126 	/*
127 	 * Don't change the value of the temperature threshold feature
128 	 */
129 	NVME_QUIRK_NO_TEMP_THRESH_CHANGE	= (1 << 14),
130 
131 	/*
132 	 * The controller doesn't handle the Identify Namespace
133 	 * Identification Descriptor list subcommand despite claiming
134 	 * NVMe 1.3 compliance.
135 	 */
136 	NVME_QUIRK_NO_NS_DESC_LIST		= (1 << 15),
137 
138 	/*
139 	 * The controller does not properly handle DMA addresses over
140 	 * 48 bits.
141 	 */
142 	NVME_QUIRK_DMA_ADDRESS_BITS_48		= (1 << 16),
143 
144 	/*
145 	 * The controller requires the command_id value be limited, so skip
146 	 * encoding the generation sequence number.
147 	 */
148 	NVME_QUIRK_SKIP_CID_GEN			= (1 << 17),
149 
150 	/*
151 	 * Reports garbage in the namespace identifiers (eui64, nguid, uuid).
152 	 */
153 	NVME_QUIRK_BOGUS_NID			= (1 << 18),
154 
155 	/*
156 	 * No temperature thresholds for channels other than 0 (Composite).
157 	 */
158 	NVME_QUIRK_NO_SECONDARY_TEMP_THRESH	= (1 << 19),
159 };
160 
161 /*
162  * Common request structure for NVMe passthrough.  All drivers must have
163  * this structure as the first member of their request-private data.
164  */
165 struct nvme_request {
166 	struct nvme_command	*cmd;
167 	union nvme_result	result;
168 	u8			genctr;
169 	u8			retries;
170 	u8			flags;
171 	u16			status;
172 #ifdef CONFIG_NVME_MULTIPATH
173 	unsigned long		start_time;
174 #endif
175 	struct nvme_ctrl	*ctrl;
176 };
177 
178 /*
179  * Mark a bio as coming in through the mpath node.
180  */
181 #define REQ_NVME_MPATH		REQ_DRV
182 
183 enum {
184 	NVME_REQ_CANCELLED		= (1 << 0),
185 	NVME_REQ_USERCMD		= (1 << 1),
186 	NVME_MPATH_IO_STATS		= (1 << 2),
187 };
188 
189 static inline struct nvme_request *nvme_req(struct request *req)
190 {
191 	return blk_mq_rq_to_pdu(req);
192 }
193 
194 static inline u16 nvme_req_qid(struct request *req)
195 {
196 	if (!req->q->queuedata)
197 		return 0;
198 
199 	return req->mq_hctx->queue_num + 1;
200 }
201 
202 /* The below value is the specific amount of delay needed before checking
203  * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
204  * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
205  * found empirically.
206  */
207 #define NVME_QUIRK_DELAY_AMOUNT		2300
208 
209 /*
210  * enum nvme_ctrl_state: Controller state
211  *
212  * @NVME_CTRL_NEW:		New controller just allocated, initial state
213  * @NVME_CTRL_LIVE:		Controller is connected and I/O capable
214  * @NVME_CTRL_RESETTING:	Controller is resetting (or scheduled reset)
215  * @NVME_CTRL_CONNECTING:	Controller is disconnected, now connecting the
216  *				transport
217  * @NVME_CTRL_DELETING:		Controller is deleting (or scheduled deletion)
218  * @NVME_CTRL_DELETING_NOIO:	Controller is deleting and I/O is not
219  *				disabled/failed immediately. This state comes
220  * 				after all async event processing took place and
221  * 				before ns removal and the controller deletion
222  * 				progress
223  * @NVME_CTRL_DEAD:		Controller is non-present/unresponsive during
224  *				shutdown or removal. In this case we forcibly
225  *				kill all inflight I/O as they have no chance to
226  *				complete
227  */
228 enum nvme_ctrl_state {
229 	NVME_CTRL_NEW,
230 	NVME_CTRL_LIVE,
231 	NVME_CTRL_RESETTING,
232 	NVME_CTRL_CONNECTING,
233 	NVME_CTRL_DELETING,
234 	NVME_CTRL_DELETING_NOIO,
235 	NVME_CTRL_DEAD,
236 };
237 
238 struct nvme_fault_inject {
239 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
240 	struct fault_attr attr;
241 	struct dentry *parent;
242 	bool dont_retry;	/* DNR, do not retry */
243 	u16 status;		/* status code */
244 #endif
245 };
246 
247 enum nvme_ctrl_flags {
248 	NVME_CTRL_FAILFAST_EXPIRED	= 0,
249 	NVME_CTRL_ADMIN_Q_STOPPED	= 1,
250 	NVME_CTRL_STARTED_ONCE		= 2,
251 	NVME_CTRL_STOPPED		= 3,
252 	NVME_CTRL_SKIP_ID_CNS_CS	= 4,
253 };
254 
255 struct nvme_ctrl {
256 	bool comp_seen;
257 	bool identified;
258 	enum nvme_ctrl_state state;
259 	spinlock_t lock;
260 	struct mutex scan_lock;
261 	const struct nvme_ctrl_ops *ops;
262 	struct request_queue *admin_q;
263 	struct request_queue *connect_q;
264 	struct request_queue *fabrics_q;
265 	struct device *dev;
266 	int instance;
267 	int numa_node;
268 	struct blk_mq_tag_set *tagset;
269 	struct blk_mq_tag_set *admin_tagset;
270 	struct list_head namespaces;
271 	struct rw_semaphore namespaces_rwsem;
272 	struct device ctrl_device;
273 	struct device *device;	/* char device */
274 #ifdef CONFIG_NVME_HWMON
275 	struct device *hwmon_device;
276 #endif
277 	struct cdev cdev;
278 	struct work_struct reset_work;
279 	struct work_struct delete_work;
280 	wait_queue_head_t state_wq;
281 
282 	struct nvme_subsystem *subsys;
283 	struct list_head subsys_entry;
284 
285 	struct opal_dev *opal_dev;
286 
287 	char name[12];
288 	u16 cntlid;
289 
290 	u16 mtfa;
291 	u32 ctrl_config;
292 	u32 queue_count;
293 
294 	u64 cap;
295 	u32 max_hw_sectors;
296 	u32 max_segments;
297 	u32 max_integrity_segments;
298 	u32 max_discard_sectors;
299 	u32 max_discard_segments;
300 	u32 max_zeroes_sectors;
301 #ifdef CONFIG_BLK_DEV_ZONED
302 	u32 max_zone_append;
303 #endif
304 	u16 crdt[3];
305 	u16 oncs;
306 	u32 dmrsl;
307 	u16 oacs;
308 	u16 sqsize;
309 	u32 max_namespaces;
310 	atomic_t abort_limit;
311 	u8 vwc;
312 	u32 vs;
313 	u32 sgls;
314 	u16 kas;
315 	u8 npss;
316 	u8 apsta;
317 	u16 wctemp;
318 	u16 cctemp;
319 	u32 oaes;
320 	u32 aen_result;
321 	u32 ctratt;
322 	unsigned int shutdown_timeout;
323 	unsigned int kato;
324 	bool subsystem;
325 	unsigned long quirks;
326 	struct nvme_id_power_state psd[32];
327 	struct nvme_effects_log *effects;
328 	struct xarray cels;
329 	struct work_struct scan_work;
330 	struct work_struct async_event_work;
331 	struct delayed_work ka_work;
332 	struct delayed_work failfast_work;
333 	struct nvme_command ka_cmd;
334 	unsigned long ka_last_check_time;
335 	struct work_struct fw_act_work;
336 	unsigned long events;
337 
338 #ifdef CONFIG_NVME_MULTIPATH
339 	/* asymmetric namespace access: */
340 	u8 anacap;
341 	u8 anatt;
342 	u32 anagrpmax;
343 	u32 nanagrpid;
344 	struct mutex ana_lock;
345 	struct nvme_ana_rsp_hdr *ana_log_buf;
346 	size_t ana_log_size;
347 	struct timer_list anatt_timer;
348 	struct work_struct ana_work;
349 #endif
350 
351 #ifdef CONFIG_NVME_AUTH
352 	struct work_struct dhchap_auth_work;
353 	struct mutex dhchap_auth_mutex;
354 	struct nvme_dhchap_queue_context *dhchap_ctxs;
355 	struct nvme_dhchap_key *host_key;
356 	struct nvme_dhchap_key *ctrl_key;
357 	u16 transaction;
358 #endif
359 
360 	/* Power saving configuration */
361 	u64 ps_max_latency_us;
362 	bool apst_enabled;
363 
364 	/* PCIe only: */
365 	u16 hmmaxd;
366 	u32 hmpre;
367 	u32 hmmin;
368 	u32 hmminds;
369 
370 	/* Fabrics only */
371 	u32 ioccsz;
372 	u32 iorcsz;
373 	u16 icdoff;
374 	u16 maxcmd;
375 	int nr_reconnects;
376 	unsigned long flags;
377 	struct nvmf_ctrl_options *opts;
378 
379 	struct page *discard_page;
380 	unsigned long discard_page_busy;
381 
382 	struct nvme_fault_inject fault_inject;
383 
384 	enum nvme_ctrl_type cntrltype;
385 	enum nvme_dctype dctype;
386 };
387 
388 enum nvme_iopolicy {
389 	NVME_IOPOLICY_NUMA,
390 	NVME_IOPOLICY_RR,
391 };
392 
393 struct nvme_subsystem {
394 	int			instance;
395 	struct device		dev;
396 	/*
397 	 * Because we unregister the device on the last put we need
398 	 * a separate refcount.
399 	 */
400 	struct kref		ref;
401 	struct list_head	entry;
402 	struct mutex		lock;
403 	struct list_head	ctrls;
404 	struct list_head	nsheads;
405 	char			subnqn[NVMF_NQN_SIZE];
406 	char			serial[20];
407 	char			model[40];
408 	char			firmware_rev[8];
409 	u8			cmic;
410 	enum nvme_subsys_type	subtype;
411 	u16			vendor_id;
412 	u16			awupf;	/* 0's based awupf value. */
413 	struct ida		ns_ida;
414 #ifdef CONFIG_NVME_MULTIPATH
415 	enum nvme_iopolicy	iopolicy;
416 #endif
417 };
418 
419 /*
420  * Container structure for uniqueue namespace identifiers.
421  */
422 struct nvme_ns_ids {
423 	u8	eui64[8];
424 	u8	nguid[16];
425 	uuid_t	uuid;
426 	u8	csi;
427 };
428 
429 /*
430  * Anchor structure for namespaces.  There is one for each namespace in a
431  * NVMe subsystem that any of our controllers can see, and the namespace
432  * structure for each controller is chained of it.  For private namespaces
433  * there is a 1:1 relation to our namespace structures, that is ->list
434  * only ever has a single entry for private namespaces.
435  */
436 struct nvme_ns_head {
437 	struct list_head	list;
438 	struct srcu_struct      srcu;
439 	struct nvme_subsystem	*subsys;
440 	unsigned		ns_id;
441 	struct nvme_ns_ids	ids;
442 	struct list_head	entry;
443 	struct kref		ref;
444 	bool			shared;
445 	int			instance;
446 	struct nvme_effects_log *effects;
447 
448 	struct cdev		cdev;
449 	struct device		cdev_device;
450 
451 	struct gendisk		*disk;
452 #ifdef CONFIG_NVME_MULTIPATH
453 	struct bio_list		requeue_list;
454 	spinlock_t		requeue_lock;
455 	struct work_struct	requeue_work;
456 	struct mutex		lock;
457 	unsigned long		flags;
458 #define NVME_NSHEAD_DISK_LIVE	0
459 	struct nvme_ns __rcu	*current_path[];
460 #endif
461 };
462 
463 static inline bool nvme_ns_head_multipath(struct nvme_ns_head *head)
464 {
465 	return IS_ENABLED(CONFIG_NVME_MULTIPATH) && head->disk;
466 }
467 
468 enum nvme_ns_features {
469 	NVME_NS_EXT_LBAS = 1 << 0, /* support extended LBA format */
470 	NVME_NS_METADATA_SUPPORTED = 1 << 1, /* support getting generated md */
471 	NVME_NS_DEAC,		/* DEAC bit in Write Zeores supported */
472 };
473 
474 struct nvme_ns {
475 	struct list_head list;
476 
477 	struct nvme_ctrl *ctrl;
478 	struct request_queue *queue;
479 	struct gendisk *disk;
480 #ifdef CONFIG_NVME_MULTIPATH
481 	enum nvme_ana_state ana_state;
482 	u32 ana_grpid;
483 #endif
484 	struct list_head siblings;
485 	struct kref kref;
486 	struct nvme_ns_head *head;
487 
488 	int lba_shift;
489 	u16 ms;
490 	u16 pi_size;
491 	u16 sgs;
492 	u32 sws;
493 	u8 pi_type;
494 	u8 guard_type;
495 #ifdef CONFIG_BLK_DEV_ZONED
496 	u64 zsze;
497 #endif
498 	unsigned long features;
499 	unsigned long flags;
500 #define NVME_NS_REMOVING	0
501 #define NVME_NS_ANA_PENDING	2
502 #define NVME_NS_FORCE_RO	3
503 #define NVME_NS_READY		4
504 
505 	struct cdev		cdev;
506 	struct device		cdev_device;
507 
508 	struct nvme_fault_inject fault_inject;
509 
510 };
511 
512 /* NVMe ns supports metadata actions by the controller (generate/strip) */
513 static inline bool nvme_ns_has_pi(struct nvme_ns *ns)
514 {
515 	return ns->pi_type && ns->ms == ns->pi_size;
516 }
517 
518 struct nvme_ctrl_ops {
519 	const char *name;
520 	struct module *module;
521 	unsigned int flags;
522 #define NVME_F_FABRICS			(1 << 0)
523 #define NVME_F_METADATA_SUPPORTED	(1 << 1)
524 #define NVME_F_BLOCKING			(1 << 2)
525 
526 	const struct attribute_group **dev_attr_groups;
527 	int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
528 	int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
529 	int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
530 	void (*free_ctrl)(struct nvme_ctrl *ctrl);
531 	void (*submit_async_event)(struct nvme_ctrl *ctrl);
532 	void (*delete_ctrl)(struct nvme_ctrl *ctrl);
533 	void (*stop_ctrl)(struct nvme_ctrl *ctrl);
534 	int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
535 	void (*print_device_info)(struct nvme_ctrl *ctrl);
536 	bool (*supports_pci_p2pdma)(struct nvme_ctrl *ctrl);
537 };
538 
539 /*
540  * nvme command_id is constructed as such:
541  * | xxxx | xxxxxxxxxxxx |
542  *   gen    request tag
543  */
544 #define nvme_genctr_mask(gen)			(gen & 0xf)
545 #define nvme_cid_install_genctr(gen)		(nvme_genctr_mask(gen) << 12)
546 #define nvme_genctr_from_cid(cid)		((cid & 0xf000) >> 12)
547 #define nvme_tag_from_cid(cid)			(cid & 0xfff)
548 
549 static inline u16 nvme_cid(struct request *rq)
550 {
551 	return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag;
552 }
553 
554 static inline struct request *nvme_find_rq(struct blk_mq_tags *tags,
555 		u16 command_id)
556 {
557 	u8 genctr = nvme_genctr_from_cid(command_id);
558 	u16 tag = nvme_tag_from_cid(command_id);
559 	struct request *rq;
560 
561 	rq = blk_mq_tag_to_rq(tags, tag);
562 	if (unlikely(!rq)) {
563 		pr_err("could not locate request for tag %#x\n",
564 			tag);
565 		return NULL;
566 	}
567 	if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) {
568 		dev_err(nvme_req(rq)->ctrl->device,
569 			"request %#x genctr mismatch (got %#x expected %#x)\n",
570 			tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr));
571 		return NULL;
572 	}
573 	return rq;
574 }
575 
576 static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags,
577                 u16 command_id)
578 {
579 	return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id));
580 }
581 
582 /*
583  * Return the length of the string without the space padding
584  */
585 static inline int nvme_strlen(char *s, int len)
586 {
587 	while (s[len - 1] == ' ')
588 		len--;
589 	return len;
590 }
591 
592 static inline void nvme_print_device_info(struct nvme_ctrl *ctrl)
593 {
594 	struct nvme_subsystem *subsys = ctrl->subsys;
595 
596 	if (ctrl->ops->print_device_info) {
597 		ctrl->ops->print_device_info(ctrl);
598 		return;
599 	}
600 
601 	dev_err(ctrl->device,
602 		"VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id,
603 		nvme_strlen(subsys->model, sizeof(subsys->model)),
604 		subsys->model, nvme_strlen(subsys->firmware_rev,
605 					   sizeof(subsys->firmware_rev)),
606 		subsys->firmware_rev);
607 }
608 
609 #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
610 void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
611 			    const char *dev_name);
612 void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject);
613 void nvme_should_fail(struct request *req);
614 #else
615 static inline void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj,
616 					  const char *dev_name)
617 {
618 }
619 static inline void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inj)
620 {
621 }
622 static inline void nvme_should_fail(struct request *req) {}
623 #endif
624 
625 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
626 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
627 
628 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl)
629 {
630 	int ret;
631 
632 	if (!ctrl->subsystem)
633 		return -ENOTTY;
634 	if (!nvme_wait_reset(ctrl))
635 		return -EBUSY;
636 
637 	ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65);
638 	if (ret)
639 		return ret;
640 
641 	return nvme_try_sched_reset(ctrl);
642 }
643 
644 /*
645  * Convert a 512B sector number to a device logical block number.
646  */
647 static inline u64 nvme_sect_to_lba(struct nvme_ns *ns, sector_t sector)
648 {
649 	return sector >> (ns->lba_shift - SECTOR_SHIFT);
650 }
651 
652 /*
653  * Convert a device logical block number to a 512B sector number.
654  */
655 static inline sector_t nvme_lba_to_sect(struct nvme_ns *ns, u64 lba)
656 {
657 	return lba << (ns->lba_shift - SECTOR_SHIFT);
658 }
659 
660 /*
661  * Convert byte length to nvme's 0-based num dwords
662  */
663 static inline u32 nvme_bytes_to_numd(size_t len)
664 {
665 	return (len >> 2) - 1;
666 }
667 
668 static inline bool nvme_is_ana_error(u16 status)
669 {
670 	switch (status & 0x7ff) {
671 	case NVME_SC_ANA_TRANSITION:
672 	case NVME_SC_ANA_INACCESSIBLE:
673 	case NVME_SC_ANA_PERSISTENT_LOSS:
674 		return true;
675 	default:
676 		return false;
677 	}
678 }
679 
680 static inline bool nvme_is_path_error(u16 status)
681 {
682 	/* check for a status code type of 'path related status' */
683 	return (status & 0x700) == 0x300;
684 }
685 
686 /*
687  * Fill in the status and result information from the CQE, and then figure out
688  * if blk-mq will need to use IPI magic to complete the request, and if yes do
689  * so.  If not let the caller complete the request without an indirect function
690  * call.
691  */
692 static inline bool nvme_try_complete_req(struct request *req, __le16 status,
693 		union nvme_result result)
694 {
695 	struct nvme_request *rq = nvme_req(req);
696 	struct nvme_ctrl *ctrl = rq->ctrl;
697 
698 	if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
699 		rq->genctr++;
700 
701 	rq->status = le16_to_cpu(status) >> 1;
702 	rq->result = result;
703 	/* inject error when permitted by fault injection framework */
704 	nvme_should_fail(req);
705 	if (unlikely(blk_should_fake_timeout(req->q)))
706 		return true;
707 	return blk_mq_complete_request_remote(req);
708 }
709 
710 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl)
711 {
712 	get_device(ctrl->device);
713 }
714 
715 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl)
716 {
717 	put_device(ctrl->device);
718 }
719 
720 static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
721 {
722 	return !qid &&
723 		nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
724 }
725 
726 void nvme_complete_rq(struct request *req);
727 void nvme_complete_batch_req(struct request *req);
728 
729 static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
730 						void (*fn)(struct request *rq))
731 {
732 	struct request *req;
733 
734 	rq_list_for_each(&iob->req_list, req) {
735 		fn(req);
736 		nvme_complete_batch_req(req);
737 	}
738 	blk_mq_end_request_batch(iob);
739 }
740 
741 blk_status_t nvme_host_path_error(struct request *req);
742 bool nvme_cancel_request(struct request *req, void *data);
743 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
744 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
745 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
746 		enum nvme_ctrl_state new_state);
747 int nvme_disable_ctrl(struct nvme_ctrl *ctrl, bool shutdown);
748 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
749 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
750 		const struct nvme_ctrl_ops *ops, unsigned long quirks);
751 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
752 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
753 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
754 int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended);
755 int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
756 		const struct blk_mq_ops *ops, unsigned int cmd_size);
757 void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl);
758 int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
759 		const struct blk_mq_ops *ops, unsigned int nr_maps,
760 		unsigned int cmd_size);
761 void nvme_remove_io_tag_set(struct nvme_ctrl *ctrl);
762 
763 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
764 
765 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
766 		volatile union nvme_result *res);
767 
768 void nvme_quiesce_io_queues(struct nvme_ctrl *ctrl);
769 void nvme_unquiesce_io_queues(struct nvme_ctrl *ctrl);
770 void nvme_quiesce_admin_queue(struct nvme_ctrl *ctrl);
771 void nvme_unquiesce_admin_queue(struct nvme_ctrl *ctrl);
772 void nvme_mark_namespaces_dead(struct nvme_ctrl *ctrl);
773 void nvme_sync_queues(struct nvme_ctrl *ctrl);
774 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
775 void nvme_unfreeze(struct nvme_ctrl *ctrl);
776 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
777 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
778 void nvme_start_freeze(struct nvme_ctrl *ctrl);
779 
780 static inline enum req_op nvme_req_op(struct nvme_command *cmd)
781 {
782 	return nvme_is_write(cmd) ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN;
783 }
784 
785 #define NVME_QID_ANY -1
786 void nvme_init_request(struct request *req, struct nvme_command *cmd);
787 void nvme_cleanup_cmd(struct request *req);
788 blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req);
789 blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl,
790 		struct request *req);
791 bool __nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
792 		bool queue_live);
793 
794 static inline bool nvme_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
795 		bool queue_live)
796 {
797 	if (likely(ctrl->state == NVME_CTRL_LIVE))
798 		return true;
799 	if (ctrl->ops->flags & NVME_F_FABRICS &&
800 	    ctrl->state == NVME_CTRL_DELETING)
801 		return queue_live;
802 	return __nvme_check_ready(ctrl, rq, queue_live);
803 }
804 
805 /*
806  * NSID shall be unique for all shared namespaces, or if at least one of the
807  * following conditions is met:
808  *   1. Namespace Management is supported by the controller
809  *   2. ANA is supported by the controller
810  *   3. NVM Set are supported by the controller
811  *
812  * In other case, private namespace are not required to report a unique NSID.
813  */
814 static inline bool nvme_is_unique_nsid(struct nvme_ctrl *ctrl,
815 		struct nvme_ns_head *head)
816 {
817 	return head->shared ||
818 		(ctrl->oacs & NVME_CTRL_OACS_NS_MNGT_SUPP) ||
819 		(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA) ||
820 		(ctrl->ctratt & NVME_CTRL_CTRATT_NVM_SETS);
821 }
822 
823 int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
824 		void *buf, unsigned bufflen);
825 int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
826 		union nvme_result *result, void *buffer, unsigned bufflen,
827 		int qid, int at_head,
828 		blk_mq_req_flags_t flags);
829 int nvme_set_features(struct nvme_ctrl *dev, unsigned int fid,
830 		      unsigned int dword11, void *buffer, size_t buflen,
831 		      u32 *result);
832 int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
833 		      unsigned int dword11, void *buffer, size_t buflen,
834 		      u32 *result);
835 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
836 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
837 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
838 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
839 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
840 void nvme_queue_scan(struct nvme_ctrl *ctrl);
841 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
842 		void *log, size_t size, u64 offset);
843 bool nvme_tryget_ns_head(struct nvme_ns_head *head);
844 void nvme_put_ns_head(struct nvme_ns_head *head);
845 int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
846 		const struct file_operations *fops, struct module *owner);
847 void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device);
848 int nvme_ioctl(struct block_device *bdev, blk_mode_t mode,
849 		unsigned int cmd, unsigned long arg);
850 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
851 int nvme_ns_head_ioctl(struct block_device *bdev, blk_mode_t mode,
852 		unsigned int cmd, unsigned long arg);
853 long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
854 		unsigned long arg);
855 long nvme_dev_ioctl(struct file *file, unsigned int cmd,
856 		unsigned long arg);
857 int nvme_ns_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
858 		struct io_comp_batch *iob, unsigned int poll_flags);
859 int nvme_ns_head_chr_uring_cmd_iopoll(struct io_uring_cmd *ioucmd,
860 		struct io_comp_batch *iob, unsigned int poll_flags);
861 int nvme_ns_chr_uring_cmd(struct io_uring_cmd *ioucmd,
862 		unsigned int issue_flags);
863 int nvme_ns_head_chr_uring_cmd(struct io_uring_cmd *ioucmd,
864 		unsigned int issue_flags);
865 int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo);
866 int nvme_dev_uring_cmd(struct io_uring_cmd *ioucmd, unsigned int issue_flags);
867 
868 extern const struct attribute_group *nvme_ns_id_attr_groups[];
869 extern const struct pr_ops nvme_pr_ops;
870 extern const struct block_device_operations nvme_ns_head_ops;
871 extern const struct attribute_group nvme_dev_attrs_group;
872 extern const struct attribute_group *nvme_subsys_attrs_groups[];
873 extern const struct attribute_group *nvme_dev_attr_groups[];
874 extern const struct block_device_operations nvme_bdev_ops;
875 
876 void nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
877 struct nvme_ns *nvme_find_path(struct nvme_ns_head *head);
878 #ifdef CONFIG_NVME_MULTIPATH
879 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
880 {
881 	return ctrl->ana_log_buf != NULL;
882 }
883 
884 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
885 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
886 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
887 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
888 void nvme_failover_req(struct request *req);
889 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
890 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
891 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid);
892 void nvme_mpath_remove_disk(struct nvme_ns_head *head);
893 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
894 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
895 void nvme_mpath_update(struct nvme_ctrl *ctrl);
896 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
897 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
898 bool nvme_mpath_clear_current_path(struct nvme_ns *ns);
899 void nvme_mpath_revalidate_paths(struct nvme_ns *ns);
900 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
901 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head);
902 void nvme_mpath_start_request(struct request *rq);
903 void nvme_mpath_end_request(struct request *rq);
904 
905 static inline void nvme_trace_bio_complete(struct request *req)
906 {
907 	struct nvme_ns *ns = req->q->queuedata;
908 
909 	if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
910 		trace_block_bio_complete(ns->head->disk->queue, req->bio);
911 }
912 
913 extern bool multipath;
914 extern struct device_attribute dev_attr_ana_grpid;
915 extern struct device_attribute dev_attr_ana_state;
916 extern struct device_attribute subsys_attr_iopolicy;
917 
918 #else
919 #define multipath false
920 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
921 {
922 	return false;
923 }
924 static inline void nvme_failover_req(struct request *req)
925 {
926 }
927 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
928 {
929 }
930 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,
931 		struct nvme_ns_head *head)
932 {
933 	return 0;
934 }
935 static inline void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
936 {
937 }
938 static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head)
939 {
940 }
941 static inline bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
942 {
943 	return false;
944 }
945 static inline void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
946 {
947 }
948 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
949 {
950 }
951 static inline void nvme_mpath_shutdown_disk(struct nvme_ns_head *head)
952 {
953 }
954 static inline void nvme_trace_bio_complete(struct request *req)
955 {
956 }
957 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
958 {
959 }
960 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl,
961 		struct nvme_id_ctrl *id)
962 {
963 	if (ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA)
964 		dev_warn(ctrl->device,
965 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
966 	return 0;
967 }
968 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl)
969 {
970 }
971 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
972 {
973 }
974 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
975 {
976 }
977 static inline void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
978 {
979 }
980 static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
981 {
982 }
983 static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
984 {
985 }
986 static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
987 {
988 }
989 static inline void nvme_mpath_start_request(struct request *rq)
990 {
991 }
992 static inline void nvme_mpath_end_request(struct request *rq)
993 {
994 }
995 #endif /* CONFIG_NVME_MULTIPATH */
996 
997 int nvme_revalidate_zones(struct nvme_ns *ns);
998 int nvme_ns_report_zones(struct nvme_ns *ns, sector_t sector,
999 		unsigned int nr_zones, report_zones_cb cb, void *data);
1000 #ifdef CONFIG_BLK_DEV_ZONED
1001 int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf);
1002 blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
1003 				       struct nvme_command *cmnd,
1004 				       enum nvme_zone_mgmt_action action);
1005 #else
1006 static inline blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns,
1007 		struct request *req, struct nvme_command *cmnd,
1008 		enum nvme_zone_mgmt_action action)
1009 {
1010 	return BLK_STS_NOTSUPP;
1011 }
1012 
1013 static inline int nvme_update_zone_info(struct nvme_ns *ns, unsigned lbaf)
1014 {
1015 	dev_warn(ns->ctrl->device,
1016 		 "Please enable CONFIG_BLK_DEV_ZONED to support ZNS devices\n");
1017 	return -EPROTONOSUPPORT;
1018 }
1019 #endif
1020 
1021 static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
1022 {
1023 	return dev_to_disk(dev)->private_data;
1024 }
1025 
1026 #ifdef CONFIG_NVME_HWMON
1027 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
1028 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
1029 #else
1030 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl)
1031 {
1032 	return 0;
1033 }
1034 
1035 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
1036 {
1037 }
1038 #endif
1039 
1040 static inline void nvme_start_request(struct request *rq)
1041 {
1042 	if (rq->cmd_flags & REQ_NVME_MPATH)
1043 		nvme_mpath_start_request(rq);
1044 	blk_mq_start_request(rq);
1045 }
1046 
1047 static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl)
1048 {
1049 	return ctrl->sgls & ((1 << 0) | (1 << 1));
1050 }
1051 
1052 #ifdef CONFIG_NVME_AUTH
1053 int __init nvme_init_auth(void);
1054 void __exit nvme_exit_auth(void);
1055 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl);
1056 void nvme_auth_stop(struct nvme_ctrl *ctrl);
1057 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid);
1058 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid);
1059 void nvme_auth_free(struct nvme_ctrl *ctrl);
1060 #else
1061 static inline int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1062 {
1063 	return 0;
1064 }
1065 static inline int __init nvme_init_auth(void)
1066 {
1067 	return 0;
1068 }
1069 static inline void __exit nvme_exit_auth(void)
1070 {
1071 }
1072 static inline void nvme_auth_stop(struct nvme_ctrl *ctrl) {};
1073 static inline int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
1074 {
1075 	return -EPROTONOSUPPORT;
1076 }
1077 static inline int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
1078 {
1079 	return NVME_SC_AUTH_REQUIRED;
1080 }
1081 static inline void nvme_auth_free(struct nvme_ctrl *ctrl) {};
1082 #endif
1083 
1084 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1085 			 u8 opcode);
1086 u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode);
1087 int nvme_execute_rq(struct request *rq, bool at_head);
1088 void nvme_passthru_end(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u32 effects,
1089 		       struct nvme_command *cmd, int status);
1090 struct nvme_ctrl *nvme_ctrl_from_file(struct file *file);
1091 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);
1092 void nvme_put_ns(struct nvme_ns *ns);
1093 
1094 static inline bool nvme_multi_css(struct nvme_ctrl *ctrl)
1095 {
1096 	return (ctrl->ctrl_config & NVME_CC_CSS_MASK) == NVME_CC_CSS_CSI;
1097 }
1098 
1099 #ifdef CONFIG_NVME_VERBOSE_ERRORS
1100 const unsigned char *nvme_get_error_status_str(u16 status);
1101 const unsigned char *nvme_get_opcode_str(u8 opcode);
1102 const unsigned char *nvme_get_admin_opcode_str(u8 opcode);
1103 const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode);
1104 #else /* CONFIG_NVME_VERBOSE_ERRORS */
1105 static inline const unsigned char *nvme_get_error_status_str(u16 status)
1106 {
1107 	return "I/O Error";
1108 }
1109 static inline const unsigned char *nvme_get_opcode_str(u8 opcode)
1110 {
1111 	return "I/O Cmd";
1112 }
1113 static inline const unsigned char *nvme_get_admin_opcode_str(u8 opcode)
1114 {
1115 	return "Admin Cmd";
1116 }
1117 
1118 static inline const unsigned char *nvme_get_fabrics_opcode_str(u8 opcode)
1119 {
1120 	return "Fabrics Cmd";
1121 }
1122 #endif /* CONFIG_NVME_VERBOSE_ERRORS */
1123 
1124 static inline const unsigned char *nvme_opcode_str(int qid, u8 opcode, u8 fctype)
1125 {
1126 	if (opcode == nvme_fabrics_command)
1127 		return nvme_get_fabrics_opcode_str(fctype);
1128 	return qid ? nvme_get_opcode_str(opcode) :
1129 		nvme_get_admin_opcode_str(opcode);
1130 }
1131 #endif /* _NVME_H */
1132