nvmet.h (ab5d0b38c0475d6ff59f1a6ccf7c668b9ec2e0a4) | nvmet.h (aaf2e048af2704da5869f27b508b288f36d5c7b7) |
---|---|
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 */ 5 6#ifndef _NVMET_H 7#define _NVMET_H 8 --- 236 unchanged lines hidden (view full) --- 245 246#ifdef CONFIG_NVME_TARGET_PASSTHRU 247 struct nvme_ctrl *passthru_ctrl; 248 char *passthru_ctrl_path; 249 struct config_group passthru_group; 250 unsigned int admin_timeout; 251 unsigned int io_timeout; 252#endif /* CONFIG_NVME_TARGET_PASSTHRU */ | 1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (c) 2015-2016 HGST, a Western Digital Company. 4 */ 5 6#ifndef _NVMET_H 7#define _NVMET_H 8 --- 236 unchanged lines hidden (view full) --- 245 246#ifdef CONFIG_NVME_TARGET_PASSTHRU 247 struct nvme_ctrl *passthru_ctrl; 248 char *passthru_ctrl_path; 249 struct config_group passthru_group; 250 unsigned int admin_timeout; 251 unsigned int io_timeout; 252#endif /* CONFIG_NVME_TARGET_PASSTHRU */ |
253 254#ifdef CONFIG_BLK_DEV_ZONED 255 u8 zasl; 256#endif /* CONFIG_BLK_DEV_ZONED */ |
|
253}; 254 255static inline struct nvmet_subsys *to_subsys(struct config_item *item) 256{ 257 return container_of(to_config_group(item), struct nvmet_subsys, group); 258} 259 260static inline struct nvmet_subsys *namespaces_to_subsys( --- 69 unchanged lines hidden (view full) --- 330 struct work_struct work; 331 } f; 332 struct { 333 struct bio inline_bio; 334 struct request *rq; 335 struct work_struct work; 336 bool use_workqueue; 337 } p; | 257}; 258 259static inline struct nvmet_subsys *to_subsys(struct config_item *item) 260{ 261 return container_of(to_config_group(item), struct nvmet_subsys, group); 262} 263 264static inline struct nvmet_subsys *namespaces_to_subsys( --- 69 unchanged lines hidden (view full) --- 334 struct work_struct work; 335 } f; 336 struct { 337 struct bio inline_bio; 338 struct request *rq; 339 struct work_struct work; 340 bool use_workqueue; 341 } p; |
342#ifdef CONFIG_BLK_DEV_ZONED 343 struct { 344 struct bio inline_bio; 345 struct work_struct zmgmt_work; 346 } z; 347#endif /* CONFIG_BLK_DEV_ZONED */ |
|
338 }; 339 int sg_cnt; 340 int metadata_sg_cnt; 341 /* data length as parsed from the SGL descriptor: */ 342 size_t transfer_len; 343 size_t metadata_len; 344 345 struct nvmet_port *port; 346 347 void (*execute)(struct nvmet_req *req); 348 const struct nvmet_fabrics_ops *ops; 349 350 struct pci_dev *p2p_dev; 351 struct device *p2p_client; 352 u16 error_loc; 353 u64 error_slba; 354}; 355 356extern struct workqueue_struct *buffered_io_wq; | 348 }; 349 int sg_cnt; 350 int metadata_sg_cnt; 351 /* data length as parsed from the SGL descriptor: */ 352 size_t transfer_len; 353 size_t metadata_len; 354 355 struct nvmet_port *port; 356 357 void (*execute)(struct nvmet_req *req); 358 const struct nvmet_fabrics_ops *ops; 359 360 struct pci_dev *p2p_dev; 361 struct device *p2p_client; 362 u16 error_loc; 363 u64 error_slba; 364}; 365 366extern struct workqueue_struct *buffered_io_wq; |
367extern struct workqueue_struct *zbd_wq; |
|
357 358static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 359{ 360 req->cqe->result.u32 = cpu_to_le32(result); 361} 362 363/* 364 * NVMe command writes actually are DMA reads for us on the target side. --- 33 unchanged lines hidden (view full) --- 398void nvmet_execute_async_event(struct nvmet_req *req); 399void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); 400void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); 401 402u16 nvmet_parse_connect_cmd(struct nvmet_req *req); 403void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); 404u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); 405u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); | 368 369static inline void nvmet_set_result(struct nvmet_req *req, u32 result) 370{ 371 req->cqe->result.u32 = cpu_to_le32(result); 372} 373 374/* 375 * NVMe command writes actually are DMA reads for us on the target side. --- 33 unchanged lines hidden (view full) --- 409void nvmet_execute_async_event(struct nvmet_req *req); 410void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); 411void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); 412 413u16 nvmet_parse_connect_cmd(struct nvmet_req *req); 414void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); 415u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req); 416u16 nvmet_file_parse_io_cmd(struct nvmet_req *req); |
417u16 nvmet_bdev_zns_parse_io_cmd(struct nvmet_req *req); |
|
406u16 nvmet_parse_admin_cmd(struct nvmet_req *req); 407u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); 408u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); 409 410bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 411 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); 412void nvmet_req_uninit(struct nvmet_req *req); 413bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); --- 111 unchanged lines hidden (view full) --- 525void nvmet_bdev_ns_disable(struct nvmet_ns *ns); 526void nvmet_file_ns_disable(struct nvmet_ns *ns); 527u16 nvmet_bdev_flush(struct nvmet_req *req); 528u16 nvmet_file_flush(struct nvmet_req *req); 529void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); 530void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); 531int nvmet_file_ns_revalidate(struct nvmet_ns *ns); 532void nvmet_ns_revalidate(struct nvmet_ns *ns); | 418u16 nvmet_parse_admin_cmd(struct nvmet_req *req); 419u16 nvmet_parse_discovery_cmd(struct nvmet_req *req); 420u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req); 421 422bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq, 423 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); 424void nvmet_req_uninit(struct nvmet_req *req); 425bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len); --- 111 unchanged lines hidden (view full) --- 537void nvmet_bdev_ns_disable(struct nvmet_ns *ns); 538void nvmet_file_ns_disable(struct nvmet_ns *ns); 539u16 nvmet_bdev_flush(struct nvmet_req *req); 540u16 nvmet_file_flush(struct nvmet_req *req); 541void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); 542void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns); 543int nvmet_file_ns_revalidate(struct nvmet_ns *ns); 544void nvmet_ns_revalidate(struct nvmet_ns *ns); |
545u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts); |
|
533 | 546 |
547bool nvmet_bdev_zns_enable(struct nvmet_ns *ns); 548void nvmet_execute_identify_cns_cs_ctrl(struct nvmet_req *req); 549void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req); 550void nvmet_bdev_execute_zone_mgmt_recv(struct nvmet_req *req); 551void nvmet_bdev_execute_zone_mgmt_send(struct nvmet_req *req); 552void nvmet_bdev_execute_zone_append(struct nvmet_req *req); 553 |
|
534static inline u32 nvmet_rw_data_len(struct nvmet_req *req) 535{ 536 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << 537 req->ns->blksize_shift; 538} 539 540static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req) 541{ --- 101 unchanged lines hidden --- | 554static inline u32 nvmet_rw_data_len(struct nvmet_req *req) 555{ 556 return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) << 557 req->ns->blksize_shift; 558} 559 560static inline u32 nvmet_rw_metadata_len(struct nvmet_req *req) 561{ --- 101 unchanged lines hidden --- |