1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 */ 5 6 #ifndef _NVMET_H 7 #define _NVMET_H 8 9 #include <linux/dma-mapping.h> 10 #include <linux/types.h> 11 #include <linux/device.h> 12 #include <linux/kref.h> 13 #include <linux/percpu-refcount.h> 14 #include <linux/list.h> 15 #include <linux/mutex.h> 16 #include <linux/uuid.h> 17 #include <linux/nvme.h> 18 #include <linux/configfs.h> 19 #include <linux/rcupdate.h> 20 #include <linux/blkdev.h> 21 #include <linux/radix-tree.h> 22 #include <linux/t10-pi.h> 23 24 #define NVMET_DEFAULT_VS NVME_VS(1, 3, 0) 25 26 #define NVMET_ASYNC_EVENTS 4 27 #define NVMET_ERROR_LOG_SLOTS 128 28 #define NVMET_NO_ERROR_LOC ((u16)-1) 29 #define NVMET_DEFAULT_CTRL_MODEL "Linux" 30 #define NVMET_MN_MAX_SIZE 40 31 #define NVMET_SN_MAX_SIZE 20 32 33 /* 34 * Supported optional AENs: 35 */ 36 #define NVMET_AEN_CFG_OPTIONAL \ 37 (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE) 38 #define NVMET_DISC_AEN_CFG_OPTIONAL \ 39 (NVME_AEN_CFG_DISC_CHANGE) 40 41 /* 42 * Plus mandatory SMART AENs (we'll never send them, but allow enabling them): 43 */ 44 #define NVMET_AEN_CFG_ALL \ 45 (NVME_SMART_CRIT_SPARE | NVME_SMART_CRIT_TEMPERATURE | \ 46 NVME_SMART_CRIT_RELIABILITY | NVME_SMART_CRIT_MEDIA | \ 47 NVME_SMART_CRIT_VOLATILE_MEMORY | NVMET_AEN_CFG_OPTIONAL) 48 49 /* Helper Macros when NVMe error is NVME_SC_CONNECT_INVALID_PARAM 50 * The 16 bit shift is to set IATTR bit to 1, which means offending 51 * offset starts in the data section of connect() 52 */ 53 #define IPO_IATTR_CONNECT_DATA(x) \ 54 (cpu_to_le32((1 << 16) | (offsetof(struct nvmf_connect_data, x)))) 55 #define IPO_IATTR_CONNECT_SQE(x) \ 56 (cpu_to_le32(offsetof(struct nvmf_connect_command, x))) 57 58 struct nvmet_ns { 59 struct percpu_ref ref; 60 struct block_device *bdev; 61 struct file *file; 62 bool readonly; 63 u32 nsid; 64 u32 blksize_shift; 65 loff_t size; 66 u8 nguid[16]; 67 uuid_t uuid; 68 u32 anagrpid; 69 70 bool buffered_io; 71 bool enabled; 72 struct nvmet_subsys *subsys; 73 const char *device_path; 74 75 struct config_group device_group; 76 struct config_group group; 77 78 struct completion disable_done; 79 mempool_t *bvec_pool; 80 81 int use_p2pmem; 82 struct pci_dev *p2p_dev; 83 int pi_type; 84 int metadata_size; 85 u8 csi; 86 }; 87 88 static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item) 89 { 90 return container_of(to_config_group(item), struct nvmet_ns, group); 91 } 92 93 static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns) 94 { 95 return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL; 96 } 97 98 struct nvmet_cq { 99 u16 qid; 100 u16 size; 101 }; 102 103 struct nvmet_sq { 104 struct nvmet_ctrl *ctrl; 105 struct percpu_ref ref; 106 u16 qid; 107 u16 size; 108 u32 sqhd; 109 bool sqhd_disabled; 110 #ifdef CONFIG_NVME_TARGET_AUTH 111 struct delayed_work auth_expired_work; 112 bool authenticated; 113 u16 dhchap_tid; 114 u16 dhchap_status; 115 int dhchap_step; 116 u8 *dhchap_c1; 117 u8 *dhchap_c2; 118 u32 dhchap_s1; 119 u32 dhchap_s2; 120 u8 *dhchap_skey; 121 int dhchap_skey_len; 122 #endif 123 struct completion free_done; 124 struct completion confirm_done; 125 }; 126 127 struct nvmet_ana_group { 128 struct config_group group; 129 struct nvmet_port *port; 130 u32 grpid; 131 }; 132 133 static inline struct nvmet_ana_group *to_ana_group(struct config_item *item) 134 { 135 return container_of(to_config_group(item), struct nvmet_ana_group, 136 group); 137 } 138 139 /** 140 * struct nvmet_port - Common structure to keep port 141 * information for the target. 142 * @entry: Entry into referrals or transport list. 143 * @disc_addr: Address information is stored in a format defined 144 * for a discovery log page entry. 145 * @group: ConfigFS group for this element's folder. 146 * @priv: Private data for the transport. 147 */ 148 struct nvmet_port { 149 struct list_head entry; 150 struct nvmf_disc_rsp_page_entry disc_addr; 151 struct config_group group; 152 struct config_group subsys_group; 153 struct list_head subsystems; 154 struct config_group referrals_group; 155 struct list_head referrals; 156 struct list_head global_entry; 157 struct config_group ana_groups_group; 158 struct nvmet_ana_group ana_default_group; 159 enum nvme_ana_state *ana_state; 160 void *priv; 161 bool enabled; 162 int inline_data_size; 163 const struct nvmet_fabrics_ops *tr_ops; 164 bool pi_enable; 165 }; 166 167 static inline struct nvmet_port *to_nvmet_port(struct config_item *item) 168 { 169 return container_of(to_config_group(item), struct nvmet_port, 170 group); 171 } 172 173 static inline struct nvmet_port *ana_groups_to_port( 174 struct config_item *item) 175 { 176 return container_of(to_config_group(item), struct nvmet_port, 177 ana_groups_group); 178 } 179 180 struct nvmet_ctrl { 181 struct nvmet_subsys *subsys; 182 struct nvmet_sq **sqs; 183 184 bool reset_tbkas; 185 186 struct mutex lock; 187 u64 cap; 188 u32 cc; 189 u32 csts; 190 191 uuid_t hostid; 192 u16 cntlid; 193 u32 kato; 194 195 struct nvmet_port *port; 196 197 u32 aen_enabled; 198 unsigned long aen_masked; 199 struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS]; 200 unsigned int nr_async_event_cmds; 201 struct list_head async_events; 202 struct work_struct async_event_work; 203 204 struct list_head subsys_entry; 205 struct kref ref; 206 struct delayed_work ka_work; 207 struct work_struct fatal_err_work; 208 209 const struct nvmet_fabrics_ops *ops; 210 211 __le32 *changed_ns_list; 212 u32 nr_changed_ns; 213 214 char subsysnqn[NVMF_NQN_FIELD_LEN]; 215 char hostnqn[NVMF_NQN_FIELD_LEN]; 216 217 struct device *p2p_client; 218 struct radix_tree_root p2p_ns_map; 219 220 spinlock_t error_lock; 221 u64 err_counter; 222 struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS]; 223 bool pi_support; 224 #ifdef CONFIG_NVME_TARGET_AUTH 225 struct nvme_dhchap_key *host_key; 226 struct nvme_dhchap_key *ctrl_key; 227 u8 shash_id; 228 struct crypto_kpp *dh_tfm; 229 u8 dh_gid; 230 u8 *dh_key; 231 size_t dh_keysize; 232 #endif 233 }; 234 235 struct nvmet_subsys { 236 enum nvme_subsys_type type; 237 238 struct mutex lock; 239 struct kref ref; 240 241 struct xarray namespaces; 242 unsigned int nr_namespaces; 243 u32 max_nsid; 244 u16 cntlid_min; 245 u16 cntlid_max; 246 247 struct list_head ctrls; 248 249 struct list_head hosts; 250 bool allow_any_host; 251 252 u16 max_qid; 253 254 u64 ver; 255 char serial[NVMET_SN_MAX_SIZE]; 256 bool subsys_discovered; 257 char *subsysnqn; 258 bool pi_support; 259 260 struct config_group group; 261 262 struct config_group namespaces_group; 263 struct config_group allowed_hosts_group; 264 265 char *model_number; 266 u32 ieee_oui; 267 268 #ifdef CONFIG_NVME_TARGET_PASSTHRU 269 struct nvme_ctrl *passthru_ctrl; 270 char *passthru_ctrl_path; 271 struct config_group passthru_group; 272 unsigned int admin_timeout; 273 unsigned int io_timeout; 274 unsigned int clear_ids; 275 #endif /* CONFIG_NVME_TARGET_PASSTHRU */ 276 277 #ifdef CONFIG_BLK_DEV_ZONED 278 u8 zasl; 279 #endif /* CONFIG_BLK_DEV_ZONED */ 280 }; 281 282 static inline struct nvmet_subsys *to_subsys(struct config_item *item) 283 { 284 return container_of(to_config_group(item), struct nvmet_subsys, group); 285 } 286 287 static inline struct nvmet_subsys *namespaces_to_subsys( 288 struct config_item *item) 289 { 290 return container_of(to_config_group(item), struct nvmet_subsys, 291 namespaces_group); 292 } 293 294 struct nvmet_host { 295 struct config_group group; 296 u8 *dhchap_secret; 297 u8 *dhchap_ctrl_secret; 298 u8 dhchap_key_hash; 299 u8 dhchap_ctrl_key_hash; 300 u8 dhchap_hash_id; 301 u8 dhchap_dhgroup_id; 302 }; 303 304 static inline struct nvmet_host *to_host(struct config_item *item) 305 { 306 return container_of(to_config_group(item), struct nvmet_host, group); 307 } 308 309 static inline char *nvmet_host_name(struct nvmet_host *host) 310 { 311 return config_item_name(&host->group.cg_item); 312 } 313 314 struct nvmet_host_link { 315 struct list_head entry; 316 struct nvmet_host *host; 317 }; 318 319 struct nvmet_subsys_link { 320 struct list_head entry; 321 struct nvmet_subsys *subsys; 322 }; 323 324 struct nvmet_req; 325 struct nvmet_fabrics_ops { 326 struct module *owner; 327 unsigned int type; 328 unsigned int msdbd; 329 unsigned int flags; 330 #define NVMF_KEYED_SGLS (1 << 0) 331 #define NVMF_METADATA_SUPPORTED (1 << 1) 332 void (*queue_response)(struct nvmet_req *req); 333 int (*add_port)(struct nvmet_port *port); 334 void (*remove_port)(struct nvmet_port *port); 335 void (*delete_ctrl)(struct nvmet_ctrl *ctrl); 336 void (*disc_traddr)(struct nvmet_req *req, 337 struct nvmet_port *port, char *traddr); 338 u16 (*install_queue)(struct nvmet_sq *nvme_sq); 339 void (*discovery_chg)(struct nvmet_port *port); 340 u8 (*get_mdts)(const struct nvmet_ctrl *ctrl); 341 u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl); 342 }; 343 344 #define NVMET_MAX_INLINE_BIOVEC 8 345 #define NVMET_MAX_INLINE_DATA_LEN NVMET_MAX_INLINE_BIOVEC * PAGE_SIZE 346 347 struct nvmet_req { 348 struct nvme_command *cmd; 349 struct nvme_completion *cqe; 350 struct nvmet_sq *sq; 351 struct nvmet_cq *cq; 352 struct nvmet_ns *ns; 353 struct scatterlist *sg; 354 struct scatterlist *metadata_sg; 355 struct bio_vec inline_bvec[NVMET_MAX_INLINE_BIOVEC]; 356 union { 357 struct { 358 struct bio inline_bio; 359 } b; 360 struct { 361 bool mpool_alloc; 362 struct kiocb iocb; 363 struct bio_vec *bvec; 364 struct work_struct work; 365 } f; 366 struct { 367 struct bio inline_bio; 368 struct request *rq; 369 struct work_struct work; 370 bool use_workqueue; 371 } p; 372 #ifdef CONFIG_BLK_DEV_ZONED 373 struct { 374 struct bio inline_bio; 375 struct work_struct zmgmt_work; 376 } z; 377 #endif /* CONFIG_BLK_DEV_ZONED */ 378 }; 379 int sg_cnt; 380 int metadata_sg_cnt; 381 /* data length as parsed from the SGL descriptor: */ 382 size_t transfer_len; 383 size_t metadata_len; 384 385 struct nvmet_port *port; 386 387 void (*execute)(struct nvmet_req *req); 388 const struct nvmet_fabrics_ops *ops; 389 390 struct pci_dev *p2p_dev; 391 struct device *p2p_client; 392 u16 error_loc; 393 u64 error_slba; 394 }; 395 396 #define NVMET_MAX_MPOOL_BVEC 16 397 extern struct kmem_cache *nvmet_bvec_cache; 398 extern struct workqueue_struct *buffered_io_wq; 399 extern struct workqueue_struct *zbd_wq; 400 extern struct workqueue_struct *nvmet_wq; 401 402 static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 403 { 404 req->cqe->result.u32 = cpu_to_le32(result); 405 } 406 407 /* 408 * NVMe command writes actually are DMA reads for us on the target side. 409 */ 410 static inline enum dma_data_direction 411 nvmet_data_dir(struct nvmet_req *req) 412 { 413 return nvme_is_write(req->cmd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 414 } 415 416 struct nvmet_async_event { 417 struct list_head entry; 418 u8 event_type; 419 u8 event_info; 420 u8 log_page; 421 }; 422 423 static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn) 424 { 425 int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15; 426 427 if (!rae) 428 clear_bit(bn, &req->sq->ctrl->aen_masked); 429 } 430 431 static inline bool nvmet_aen_bit_disabled(struct nvmet_ctrl *ctrl, u32 bn) 432 { 433 if (!(READ_ONCE(ctrl->aen_enabled) & (1 << bn))) 434 return true; 435 return test_and_set_bit(bn, &ctrl->aen_masked); 436 } 437 438 void nvmet_get_feat_kato(struct nvmet_req *req); 439 void nvmet_get_feat_async_event(struct nvmet_req *req); 440 u16 nvmet_set_feat_kato(struct nvmet_req *req); 441 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask); 442 void nvmet_execute_async_event(struct nvmet_req *req); 443 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); 444 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); 445 446 u16 nvmet_parse_connect_cmd(struct nvmet_req *req); 447 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); 448 u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); 449 u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); 450 u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req); 451 u16 nvmet_parse_admin_cmd(struct nvmet_req *req); 452 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); 453 u16 nvmet_parse_fabrics_admin_cmd(struct nvmet_req *req); 454 u16 nvmet_parse_fabrics_io_cmd(struct nvmet_req *req); 455 456 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 457 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); 458 void nvmet_req_uninit(struct nvmet_req *req); 459 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); 460 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len); 461 void nvmet_req_complete(struct nvmet_req *req, u16 status); 462 int nvmet_req_alloc_sgls(struct nvmet_req *req); 463 void nvmet_req_free_sgls(struct nvmet_req *req); 464 465 void nvmet_execute_set_features(struct nvmet_req *req); 466 void nvmet_execute_get_features(struct nvmet_req *req); 467 void nvmet_execute_keep_alive(struct nvmet_req *req); 468 469 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid, 470 u16 size); 471 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, 472 u16 size); 473 void nvmet_sq_destroy(struct nvmet_sq *sq); 474 int nvmet_sq_init(struct nvmet_sq *sq); 475 476 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl); 477 478 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new); 479 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, 480 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp); 481 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn, 482 const char *hostnqn, u16 cntlid, 483 struct nvmet_req *req); 484 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); 485 u16 nvmet_check_ctrl_status(struct nvmet_req *req); 486 487 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, 488 enum nvme_subsys_type type); 489 void nvmet_subsys_put(struct nvmet_subsys *subsys); 490 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); 491 492 u16 nvmet_req_find_ns(struct nvmet_req *req); 493 void nvmet_put_namespace(struct nvmet_ns *ns); 494 int nvmet_ns_enable(struct nvmet_ns *ns); 495 void nvmet_ns_disable(struct nvmet_ns *ns); 496 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid); 497 void nvmet_ns_free(struct nvmet_ns *ns); 498 499 void nvmet_send_ana_event(struct nvmet_subsys *subsys, 500 struct nvmet_port *port); 501 void nvmet_port_send_ana_event(struct nvmet_port *port); 502 503 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); 504 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); 505 506 void nvmet_port_del_ctrls(struct nvmet_port *port, 507 struct nvmet_subsys *subsys); 508 509 int nvmet_enable_port(struct nvmet_port *port); 510 void nvmet_disable_port(struct nvmet_port *port); 511 512 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port); 513 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port); 514 515 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf, 516 size_t len); 517 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, 518 size_t len); 519 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len); 520 521 u32 nvmet_get_log_page_len(struct nvme_command *cmd); 522 u64 nvmet_get_log_page_offset(struct nvme_command *cmd); 523 524 extern struct list_head *nvmet_ports; 525 void nvmet_port_disc_changed(struct nvmet_port *port, 526 struct nvmet_subsys *subsys); 527 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys, 528 struct nvmet_host *host); 529 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, 530 u8 event_info, u8 log_page); 531 532 #define NVMET_QUEUE_SIZE 1024 533 #define NVMET_NR_QUEUES 128 534 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE 535 536 /* 537 * Nice round number that makes a list of nsids fit into a page. 538 * Should become tunable at some point in the future. 539 */ 540 #define NVMET_MAX_NAMESPACES 1024 541 542 /* 543 * 0 is not a valid ANA group ID, so we start numbering at 1. 544 * 545 * ANA Group 1 exists without manual intervention, has namespaces assigned to it 546 * by default, and is available in an optimized state through all ports. 547 */ 548 #define NVMET_MAX_ANAGRPS 128 549 #define NVMET_DEFAULT_ANA_GRPID 1 550 551 #define NVMET_KAS 10 552 #define NVMET_DISC_KATO_MS 120000 553 554 int __init nvmet_init_configfs(void); 555 void __exit nvmet_exit_configfs(void); 556 557 int __init nvmet_init_discovery(void); 558 void nvmet_exit_discovery(void); 559 560 extern struct nvmet_subsys *nvmet_disc_subsys; 561 extern struct rw_semaphore nvmet_config_sem; 562 563 extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; 564 extern u64 nvmet_ana_chgcnt; 565 extern struct rw_semaphore nvmet_ana_sem; 566 567 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn); 568 569 int nvmet_bdev_ns_enable(struct nvmet_ns *ns); 570 int nvmet_file_ns_enable(struct nvmet_ns *ns); 571 void nvmet_bdev_ns_disable(struct nvmet_ns *ns); 572 void nvmet_file_ns_disable(struct nvmet_ns *ns); 573 u16 nvmet_bdev_flush(struct nvmet_req *req); 574 u16 nvmet_file_flush(struct nvmet_req *req); 575 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); 576 void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); 577 void nvmet_file_ns_revalidate(struct nvmet_ns *ns); 578 bool nvmet_ns_revalidate(struct nvmet_ns *ns); 579 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts); 580 581 bool nvmet_bdev_zns_enable(struct nvmet_ns *ns); 582 void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req); 583 void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req); 584 void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req); 585 void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req); 586 void nvmet_bdev_execute_zone_append(struct nvmet_req *req); 587 588 static inline u32 nvmet_rw_data_len(struct nvmet_req *req) 589 { 590 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << 591 req->ns->blksize_shift; 592 } 593 594 static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req) 595 { 596 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) 597 return 0; 598 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) * 599 req->ns->metadata_size; 600 } 601 602 static inline u32 nvmet_dsm_len(struct nvmet_req *req) 603 { 604 return (le32_to_cpu(req->cmd->dsm.nr) + 1) * 605 sizeof(struct nvme_dsm_range); 606 } 607 608 static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req) 609 { 610 return req->sq->ctrl->subsys; 611 } 612 613 static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys) 614 { 615 return subsys->type != NVME_NQN_NVME; 616 } 617 618 #ifdef CONFIG_NVME_TARGET_PASSTHRU 619 void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys); 620 int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys); 621 void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys); 622 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req); 623 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req); 624 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys) 625 { 626 return subsys->passthru_ctrl; 627 } 628 #else /* CONFIG_NVME_TARGET_PASSTHRU */ 629 static inline void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys) 630 { 631 } 632 static inline void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) 633 { 634 } 635 static inline u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) 636 { 637 return 0; 638 } 639 static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) 640 { 641 return 0; 642 } 643 static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys) 644 { 645 return NULL; 646 } 647 #endif /* CONFIG_NVME_TARGET_PASSTHRU */ 648 649 static inline bool nvmet_is_passthru_req(struct nvmet_req *req) 650 { 651 return nvmet_is_passthru_subsys(nvmet_req_subsys(req)); 652 } 653 654 void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl); 655 656 u16 errno_to_nvme_status(struct nvmet_req *req, int errno); 657 u16 nvmet_report_invalid_opcode(struct nvmet_req *req); 658 659 /* Convert a 32-bit number to a 16-bit 0's based number */ 660 static inline __le16 to0based(u32 a) 661 { 662 return cpu_to_le16(max(1U, min(1U << 16, a)) - 1); 663 } 664 665 static inline bool nvmet_ns_has_pi(struct nvmet_ns *ns) 666 { 667 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) 668 return false; 669 return ns->pi_type && ns->metadata_size == sizeof(struct t10_pi_tuple); 670 } 671 672 static inline __le64 nvmet_sect_to_lba(struct nvmet_ns *ns, sector_t sect) 673 { 674 return cpu_to_le64(sect >> (ns->blksize_shift - SECTOR_SHIFT)); 675 } 676 677 static inline sector_t nvmet_lba_to_sect(struct nvmet_ns *ns, __le64 lba) 678 { 679 return le64_to_cpu(lba) << (ns->blksize_shift - SECTOR_SHIFT); 680 } 681 682 static inline bool nvmet_use_inline_bvec(struct nvmet_req *req) 683 { 684 return req->transfer_len <= NVMET_MAX_INLINE_DATA_LEN && 685 req->sg_cnt <= NVMET_MAX_INLINE_BIOVEC; 686 } 687 688 static inline void nvmet_req_cns_error_complete(struct nvmet_req *req) 689 { 690 pr_debug("unhandled identify cns %d on qid %d\n", 691 req->cmd->identify.cns, req->sq->qid); 692 req->error_loc = offsetof(struct nvme_identify, cns); 693 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR); 694 } 695 696 static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio) 697 { 698 if (bio != &req->b.inline_bio) 699 bio_put(bio); 700 } 701 702 #ifdef CONFIG_NVME_TARGET_AUTH 703 void nvmet_execute_auth_send(struct nvmet_req *req); 704 void nvmet_execute_auth_receive(struct nvmet_req *req); 705 int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, 706 bool set_ctrl); 707 int nvmet_auth_set_host_hash(struct nvmet_host *host, const char *hash); 708 int nvmet_setup_auth(struct nvmet_ctrl *ctrl); 709 void nvmet_auth_sq_init(struct nvmet_sq *sq); 710 void nvmet_destroy_auth(struct nvmet_ctrl *ctrl); 711 void nvmet_auth_sq_free(struct nvmet_sq *sq); 712 int nvmet_setup_dhgroup(struct nvmet_ctrl *ctrl, u8 dhgroup_id); 713 bool nvmet_check_auth_status(struct nvmet_req *req); 714 int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response, 715 unsigned int hash_len); 716 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response, 717 unsigned int hash_len); 718 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) 719 { 720 return ctrl->host_key != NULL; 721 } 722 int nvmet_auth_ctrl_exponential(struct nvmet_req *req, 723 u8 *buf, int buf_size); 724 int nvmet_auth_ctrl_sesskey(struct nvmet_req *req, 725 u8 *buf, int buf_size); 726 #else 727 static inline int nvmet_setup_auth(struct nvmet_ctrl *ctrl) 728 { 729 return 0; 730 } 731 static inline void nvmet_auth_sq_init(struct nvmet_sq *sq) 732 { 733 } 734 static inline void nvmet_destroy_auth(struct nvmet_ctrl *ctrl) {}; 735 static inline void nvmet_auth_sq_free(struct nvmet_sq *sq) {}; 736 static inline bool nvmet_check_auth_status(struct nvmet_req *req) 737 { 738 return true; 739 } 740 static inline bool nvmet_has_auth(struct nvmet_ctrl *ctrl) 741 { 742 return false; 743 } 744 static inline const char *nvmet_dhchap_dhgroup_name(u8 dhgid) { return NULL; } 745 #endif 746 747 #endif /* _NVMET_H */ 748