xref: /openbmc/linux/drivers/nvme/target/nvmet.h (revision 6490c9ed)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
4  */
5 
6 #ifndef _NVMET_H
7 #define _NVMET_H
8 
9 #include <linux/dma-mapping.h>
10 #include <linux/types.h>
11 #include <linux/device.h>
12 #include <linux/kref.h>
13 #include <linux/percpu-refcount.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/uuid.h>
17 #include <linux/nvme.h>
18 #include <linux/configfs.h>
19 #include <linux/rcupdate.h>
20 #include <linux/blkdev.h>
21 #include <linux/radix-tree.h>
22 #include <linux/t10-pi.h>
23 
24 #define NVMET_DEFAULT_VS		NVME_VS(1, 3, 0)
25 
26 #define NVMET_ASYNC_EVENTS		4
27 #define NVMET_ERROR_LOG_SLOTS		128
28 #define NVMET_NO_ERROR_LOC		((u16)-1)
29 #define NVMET_DEFAULT_CTRL_MODEL	"Linux"
30 #define NVMET_MN_MAX_SIZE		40
31 #define NVMET_SN_MAX_SIZE		20
32 
33 /*
34  * Supported optional AENs:
35  */
36 #define NVMET_AEN_CFG_OPTIONAL \
37 	(NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE)
38 #define NVMET_DISC_AEN_CFG_OPTIONAL \
39 	(NVME_AEN_CFG_DISC_CHANGE)
40 
41 /*
42  * Plus mandatory SMART AENs (we'll never send them, but allow enabling them):
43  */
44 #define NVMET_AEN_CFG_ALL \
45 	(NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \
46 	 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \
47 	 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL)
48 
49 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM
50  * The 16 bit shift is to set IATTR bit to 1, which means offending
51  * offset starts in the data section of connect()
52  */
53 #define IPO_IATTR_CONNECT_DATA(x)	\
54 	(cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x))))
55 #define IPO_IATTR_CONNECT_SQE(x)	\
56 	(cpu_to_le32(offsetof(struct nvmf_connect_command, x)))
57 
58 struct nvmet_ns {
59 	struct percpu_ref	ref;
60 	struct block_device	*bdev;
61 	struct file		*file;
62 	bool			readonly;
63 	u32			nsid;
64 	u32			blksize_shift;
65 	loff_t			size;
66 	u8			nguid[16];
67 	uuid_t			uuid;
68 	u32			anagrpid;
69 
70 	bool			buffered_io;
71 	bool			enabled;
72 	struct nvmet_subsys	*subsys;
73 	const char		*device_path;
74 
75 	struct config_group	device_group;
76 	struct config_group	group;
77 
78 	struct completion	disable_done;
79 	mempool_t		*bvec_pool;
80 	struct kmem_cache	*bvec_cache;
81 
82 	int			use_p2pmem;
83 	struct pci_dev		*p2p_dev;
84 	int			pi_type;
85 	int			metadata_size;
86 	u8			csi;
87 };
88 
89 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
90 {
91 	return container_of(to_config_group(item), struct nvmet_ns, group);
92 }
93 
94 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
95 {
96 	return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
97 }
98 
99 struct nvmet_cq {
100 	u16			qid;
101 	u16			size;
102 };
103 
104 struct nvmet_sq {
105 	struct nvmet_ctrl	*ctrl;
106 	struct percpu_ref	ref;
107 	u16			qid;
108 	u16			size;
109 	u32			sqhd;
110 	bool			sqhd_disabled;
111 	struct completion	free_done;
112 	struct completion	confirm_done;
113 };
114 
115 struct nvmet_ana_group {
116 	struct config_group	group;
117 	struct nvmet_port	*port;
118 	u32			grpid;
119 };
120 
121 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item)
122 {
123 	return container_of(to_config_group(item), struct nvmet_ana_group,
124 			group);
125 }
126 
127 /**
128  * struct nvmet_port -	Common structure to keep port
129  *				information for the target.
130  * @entry:		Entry into referrals or transport list.
131  * @disc_addr:		Address information is stored in a format defined
132  *				for a discovery log page entry.
133  * @group:		ConfigFS group for this element's folder.
134  * @priv:		Private data for the transport.
135  */
136 struct nvmet_port {
137 	struct list_head		entry;
138 	struct nvmf_disc_rsp_page_entry	disc_addr;
139 	struct config_group		group;
140 	struct config_group		subsys_group;
141 	struct list_head		subsystems;
142 	struct config_group		referrals_group;
143 	struct list_head		referrals;
144 	struct list_head		global_entry;
145 	struct config_group		ana_groups_group;
146 	struct nvmet_ana_group		ana_default_group;
147 	enum nvme_ana_state		*ana_state;
148 	void				*priv;
149 	bool				enabled;
150 	int				inline_data_size;
151 	const struct nvmet_fabrics_ops	*tr_ops;
152 	bool				pi_enable;
153 };
154 
155 static inline struct nvmet_port *to_nvmet_port(struct config_item *item)
156 {
157 	return container_of(to_config_group(item), struct nvmet_port,
158 			group);
159 }
160 
161 static inline struct nvmet_port *ana_groups_to_port(
162 		struct config_item *item)
163 {
164 	return container_of(to_config_group(item), struct nvmet_port,
165 			ana_groups_group);
166 }
167 
168 struct nvmet_ctrl {
169 	struct nvmet_subsys	*subsys;
170 	struct nvmet_sq		**sqs;
171 
172 	bool			reset_tbkas;
173 
174 	struct mutex		lock;
175 	u64			cap;
176 	u32			cc;
177 	u32			csts;
178 
179 	uuid_t			hostid;
180 	u16			cntlid;
181 	u32			kato;
182 
183 	struct nvmet_port	*port;
184 
185 	u32			aen_enabled;
186 	unsigned long		aen_masked;
187 	struct nvmet_req	*async_event_cmds[NVMET_ASYNC_EVENTS];
188 	unsigned int		nr_async_event_cmds;
189 	struct list_head	async_events;
190 	struct work_struct	async_event_work;
191 
192 	struct list_head	subsys_entry;
193 	struct kref		ref;
194 	struct delayed_work	ka_work;
195 	struct work_struct	fatal_err_work;
196 
197 	const struct nvmet_fabrics_ops *ops;
198 
199 	__le32			*changed_ns_list;
200 	u32			nr_changed_ns;
201 
202 	char			subsysnqn[NVMF_NQN_FIELD_LEN];
203 	char			hostnqn[NVMF_NQN_FIELD_LEN];
204 
205 	struct device		*p2p_client;
206 	struct radix_tree_root	p2p_ns_map;
207 
208 	spinlock_t		error_lock;
209 	u64			err_counter;
210 	struct nvme_error_slot	slots[NVMET_ERROR_LOG_SLOTS];
211 	bool			pi_support;
212 };
213 
214 struct nvmet_subsys {
215 	enum nvme_subsys_type	type;
216 
217 	struct mutex		lock;
218 	struct kref		ref;
219 
220 	struct xarray		namespaces;
221 	unsigned int		nr_namespaces;
222 	u32			max_nsid;
223 	u16			cntlid_min;
224 	u16			cntlid_max;
225 
226 	struct list_head	ctrls;
227 
228 	struct list_head	hosts;
229 	bool			allow_any_host;
230 
231 	u16			max_qid;
232 
233 	u64			ver;
234 	char			serial[NVMET_SN_MAX_SIZE];
235 	bool			subsys_discovered;
236 	char			*subsysnqn;
237 	bool			pi_support;
238 
239 	struct config_group	group;
240 
241 	struct config_group	namespaces_group;
242 	struct config_group	allowed_hosts_group;
243 
244 	char			*model_number;
245 
246 #ifdef CONFIG_NVME_TARGET_PASSTHRU
247 	struct nvme_ctrl	*passthru_ctrl;
248 	char			*passthru_ctrl_path;
249 	struct config_group	passthru_group;
250 	unsigned int		admin_timeout;
251 	unsigned int		io_timeout;
252 	unsigned int		clear_ids;
253 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
254 
255 #ifdef CONFIG_BLK_DEV_ZONED
256 	u8			zasl;
257 #endif /* CONFIG_BLK_DEV_ZONED */
258 };
259 
260 static inline struct nvmet_subsys *to_subsys(struct config_item *item)
261 {
262 	return container_of(to_config_group(item), struct nvmet_subsys, group);
263 }
264 
265 static inline struct nvmet_subsys *namespaces_to_subsys(
266 		struct config_item *item)
267 {
268 	return container_of(to_config_group(item), struct nvmet_subsys,
269 			namespaces_group);
270 }
271 
272 struct nvmet_host {
273 	struct config_group	group;
274 };
275 
276 static inline struct nvmet_host *to_host(struct config_item *item)
277 {
278 	return container_of(to_config_group(item), struct nvmet_host, group);
279 }
280 
281 static inline char *nvmet_host_name(struct nvmet_host *host)
282 {
283 	return config_item_name(&host->group.cg_item);
284 }
285 
286 struct nvmet_host_link {
287 	struct list_head	entry;
288 	struct nvmet_host	*host;
289 };
290 
291 struct nvmet_subsys_link {
292 	struct list_head	entry;
293 	struct nvmet_subsys	*subsys;
294 };
295 
296 struct nvmet_req;
297 struct nvmet_fabrics_ops {
298 	struct module *owner;
299 	unsigned int type;
300 	unsigned int msdbd;
301 	unsigned int flags;
302 #define NVMF_KEYED_SGLS			(1 << 0)
303 #define NVMF_METADATA_SUPPORTED		(1 << 1)
304 	void (*queue_response)(struct nvmet_req *req);
305 	int (*add_port)(struct nvmet_port *port);
306 	void (*remove_port)(struct nvmet_port *port);
307 	void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
308 	void (*disc_traddr)(struct nvmet_req *req,
309 			struct nvmet_port *port, char *traddr);
310 	u16 (*install_queue)(struct nvmet_sq *nvme_sq);
311 	void (*discovery_chg)(struct nvmet_port *port);
312 	u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
313 	u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
314 };
315 
316 #define NVMET_MAX_INLINE_BIOVEC	8
317 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE
318 
319 struct nvmet_req {
320 	struct nvme_command	*cmd;
321 	struct nvme_completion	*cqe;
322 	struct nvmet_sq		*sq;
323 	struct nvmet_cq		*cq;
324 	struct nvmet_ns		*ns;
325 	struct scatterlist	*sg;
326 	struct scatterlist	*metadata_sg;
327 	struct bio_vec		inline_bvec[NVMET_MAX_INLINE_BIOVEC];
328 	union {
329 		struct {
330 			struct bio      inline_bio;
331 		} b;
332 		struct {
333 			bool			mpool_alloc;
334 			struct kiocb            iocb;
335 			struct bio_vec          *bvec;
336 			struct work_struct      work;
337 		} f;
338 		struct {
339 			struct bio		inline_bio;
340 			struct request		*rq;
341 			struct work_struct      work;
342 			bool			use_workqueue;
343 		} p;
344 #ifdef CONFIG_BLK_DEV_ZONED
345 		struct {
346 			struct bio		inline_bio;
347 			struct work_struct	zmgmt_work;
348 		} z;
349 #endif /* CONFIG_BLK_DEV_ZONED */
350 	};
351 	int			sg_cnt;
352 	int			metadata_sg_cnt;
353 	/* data length as parsed from the SGL descriptor: */
354 	size_t			transfer_len;
355 	size_t			metadata_len;
356 
357 	struct nvmet_port	*port;
358 
359 	void (*execute)(struct nvmet_req *req);
360 	const struct nvmet_fabrics_ops *ops;
361 
362 	struct pci_dev		*p2p_dev;
363 	struct device		*p2p_client;
364 	u16			error_loc;
365 	u64			error_slba;
366 };
367 
368 extern struct workqueue_struct *buffered_io_wq;
369 extern struct workqueue_struct *zbd_wq;
370 extern struct workqueue_struct *nvmet_wq;
371 
372 static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
373 {
374 	req->cqe->result.u32 = cpu_to_le32(result);
375 }
376 
377 /*
378  * NVMe command writes actually are DMA reads for us on the target side.
379  */
380 static inline enum dma_data_direction
381 nvmet_data_dir(struct nvmet_req *req)
382 {
383 	return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
384 }
385 
386 struct nvmet_async_event {
387 	struct list_head	entry;
388 	u8			event_type;
389 	u8			event_info;
390 	u8			log_page;
391 };
392 
393 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
394 {
395 	int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
396 
397 	if (!rae)
398 		clear_bit(bn, &req->sq->ctrl->aen_masked);
399 }
400 
401 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn)
402 {
403 	if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn)))
404 		return true;
405 	return test_and_set_bit(bn, &ctrl->aen_masked);
406 }
407 
408 void nvmet_get_feat_kato(struct nvmet_req *req);
409 void nvmet_get_feat_async_event(struct nvmet_req *req);
410 u16 nvmet_set_feat_kato(struct nvmet_req *req);
411 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask);
412 void nvmet_execute_async_event(struct nvmet_req *req);
413 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl);
414 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl);
415 
416 u16 nvmet_parse_connect_cmd(struct nvmet_req *req);
417 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id);
418 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req);
419 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req);
420 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req);
421 u16 nvmet_parse_admin_cmd(struct nvmet_req *req);
422 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req);
423 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req);
424 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req);
425 
426 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
427 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops);
428 void nvmet_req_uninit(struct nvmet_req *req);
429 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len);
430 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len);
431 void nvmet_req_complete(struct nvmet_req *req, u16 status);
432 int nvmet_req_alloc_sgls(struct nvmet_req *req);
433 void nvmet_req_free_sgls(struct nvmet_req *req);
434 
435 void nvmet_execute_set_features(struct nvmet_req *req);
436 void nvmet_execute_get_features(struct nvmet_req *req);
437 void nvmet_execute_keep_alive(struct nvmet_req *req);
438 
439 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
440 		u16 size);
441 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid,
442 		u16 size);
443 void nvmet_sq_destroy(struct nvmet_sq *sq);
444 int nvmet_sq_init(struct nvmet_sq *sq);
445 
446 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl);
447 
448 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new);
449 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
450 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp);
451 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
452 				       const char *hostnqn, u16 cntlid,
453 				       struct nvmet_req *req);
454 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
455 u16 nvmet_check_ctrl_status(struct nvmet_req *req);
456 
457 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
458 		enum nvme_subsys_type type);
459 void nvmet_subsys_put(struct nvmet_subsys *subsys);
460 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
461 
462 u16 nvmet_req_find_ns(struct nvmet_req *req);
463 void nvmet_put_namespace(struct nvmet_ns *ns);
464 int nvmet_ns_enable(struct nvmet_ns *ns);
465 void nvmet_ns_disable(struct nvmet_ns *ns);
466 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid);
467 void nvmet_ns_free(struct nvmet_ns *ns);
468 
469 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
470 		struct nvmet_port *port);
471 void nvmet_port_send_ana_event(struct nvmet_port *port);
472 
473 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops);
474 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops);
475 
476 void nvmet_port_del_ctrls(struct nvmet_port *port,
477 			  struct nvmet_subsys *subsys);
478 
479 int nvmet_enable_port(struct nvmet_port *port);
480 void nvmet_disable_port(struct nvmet_port *port);
481 
482 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port);
483 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port);
484 
485 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
486 		size_t len);
487 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
488 		size_t len);
489 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
490 
491 u32 nvmet_get_log_page_len(struct nvme_command *cmd);
492 u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
493 
494 extern struct list_head *nvmet_ports;
495 void nvmet_port_disc_changed(struct nvmet_port *port,
496 		struct nvmet_subsys *subsys);
497 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
498 		struct nvmet_host *host);
499 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
500 		u8 event_info, u8 log_page);
501 
502 #define NVMET_QUEUE_SIZE	1024
503 #define NVMET_NR_QUEUES		128
504 #define NVMET_MAX_CMD		NVMET_QUEUE_SIZE
505 
506 /*
507  * Nice round number that makes a list of nsids fit into a page.
508  * Should become tunable at some point in the future.
509  */
510 #define NVMET_MAX_NAMESPACES	1024
511 
512 /*
513  * 0 is not a valid ANA group ID, so we start numbering at 1.
514  *
515  * ANA Group 1 exists without manual intervention, has namespaces assigned to it
516  * by default, and is available in an optimized state through all ports.
517  */
518 #define NVMET_MAX_ANAGRPS	128
519 #define NVMET_DEFAULT_ANA_GRPID	1
520 
521 #define NVMET_KAS		10
522 #define NVMET_DISC_KATO_MS		120000
523 
524 int __init nvmet_init_configfs(void);
525 void __exit nvmet_exit_configfs(void);
526 
527 int __init nvmet_init_discovery(void);
528 void nvmet_exit_discovery(void);
529 
530 extern struct nvmet_subsys *nvmet_disc_subsys;
531 extern struct rw_semaphore nvmet_config_sem;
532 
533 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
534 extern u64 nvmet_ana_chgcnt;
535 extern struct rw_semaphore nvmet_ana_sem;
536 
537 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn);
538 
539 int nvmet_bdev_ns_enable(struct nvmet_ns *ns);
540 int nvmet_file_ns_enable(struct nvmet_ns *ns);
541 void nvmet_bdev_ns_disable(struct nvmet_ns *ns);
542 void nvmet_file_ns_disable(struct nvmet_ns *ns);
543 u16 nvmet_bdev_flush(struct nvmet_req *req);
544 u16 nvmet_file_flush(struct nvmet_req *req);
545 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid);
546 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns);
547 void nvmet_file_ns_revalidate(struct nvmet_ns *ns);
548 bool nvmet_ns_revalidate(struct nvmet_ns *ns);
549 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts);
550 
551 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns);
552 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req);
553 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req);
554 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req);
555 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req);
556 void nvmet_bdev_execute_zone_append(struct nvmet_req *req);
557 
558 static inline u32 nvmet_rw_data_len(struct nvmet_req *req)
559 {
560 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
561 			req->ns->blksize_shift;
562 }
563 
564 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req)
565 {
566 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
567 		return 0;
568 	return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) *
569 			req->ns->metadata_size;
570 }
571 
572 static inline u32 nvmet_dsm_len(struct nvmet_req *req)
573 {
574 	return (le32_to_cpu(req->cmd->dsm.nr) + 1) *
575 		sizeof(struct nvme_dsm_range);
576 }
577 
578 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
579 {
580 	return req->sq->ctrl->subsys;
581 }
582 
583 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
584 {
585     return subsys->type != NVME_NQN_NVME;
586 }
587 
588 #ifdef CONFIG_NVME_TARGET_PASSTHRU
589 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
590 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
591 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
592 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
593 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
594 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
595 {
596 	return subsys->passthru_ctrl;
597 }
598 #else /* CONFIG_NVME_TARGET_PASSTHRU */
599 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys)
600 {
601 }
602 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys)
603 {
604 }
605 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
606 {
607 	return 0;
608 }
609 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
610 {
611 	return 0;
612 }
613 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
614 {
615 	return NULL;
616 }
617 #endif /* CONFIG_NVME_TARGET_PASSTHRU */
618 
619 static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
620 {
621 	return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
622 }
623 
624 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
625 
626 u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
627 u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
628 
629 /* Convert a 32-bit number to a 16-bit 0's based number */
630 static inline __le16 to0based(u32 a)
631 {
632 	return cpu_to_le16(max(1U, min(1U << 16, a)) - 1);
633 }
634 
635 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns)
636 {
637 	if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
638 		return false;
639 	return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple);
640 }
641 
642 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect)
643 {
644 	return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT));
645 }
646 
647 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba)
648 {
649 	return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT);
650 }
651 
652 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req)
653 {
654 	return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN &&
655 	       req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC;
656 }
657 
658 static inline void nvmet_req_cns_error_complete(struct nvmet_req *req)
659 {
660 	pr_debug("unhandled identify cns %d on qid %d\n",
661 	       req->cmd->identify.cns, req->sq->qid);
662 	req->error_loc = offsetof(struct nvme_identify, cns);
663 	nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
664 }
665 
666 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio)
667 {
668 	if (bio != &req->b.inline_bio)
669 		bio_put(bio);
670 }
671 
672 #endif /* _NVMET_H */
673