157dacad5SJay Sternberg /* 257dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 357dacad5SJay Sternberg * 457dacad5SJay Sternberg * This program is free software; you can redistribute it and/or modify it 557dacad5SJay Sternberg * under the terms and conditions of the GNU General Public License, 657dacad5SJay Sternberg * version 2, as published by the Free Software Foundation. 757dacad5SJay Sternberg * 857dacad5SJay Sternberg * This program is distributed in the hope it will be useful, but WITHOUT 957dacad5SJay Sternberg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1057dacad5SJay Sternberg * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 1157dacad5SJay Sternberg * more details. 1257dacad5SJay Sternberg */ 1357dacad5SJay Sternberg 1457dacad5SJay Sternberg #ifndef _NVME_H 1557dacad5SJay Sternberg #define _NVME_H 1657dacad5SJay Sternberg 1757dacad5SJay Sternberg #include <linux/nvme.h> 18a6a5149bSChristoph Hellwig #include <linux/cdev.h> 1957dacad5SJay Sternberg #include <linux/pci.h> 2057dacad5SJay Sternberg #include <linux/kref.h> 2157dacad5SJay Sternberg #include <linux/blk-mq.h> 22b0b4e09cSMatias Bjørling #include <linux/lightnvm.h> 23a98e58e5SScott Bauer #include <linux/sed-opal.h> 24b9e03857SThomas Tai #include <linux/fault-inject.h> 25978628ecSJohannes Thumshirn #include <linux/rcupdate.h> 2657dacad5SJay Sternberg 278ae4e447SMarc Olson extern unsigned int nvme_io_timeout; 2857dacad5SJay Sternberg #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 2957dacad5SJay Sternberg 308ae4e447SMarc Olson extern unsigned int admin_timeout; 3121d34711SChristoph Hellwig #define ADMIN_TIMEOUT (admin_timeout * HZ) 3221d34711SChristoph Hellwig 33038bd4cbSSagi Grimberg #define NVME_DEFAULT_KATO 5 34038bd4cbSSagi Grimberg #define NVME_KATO_GRACE 10 35038bd4cbSSagi Grimberg 369a6327d2SSagi Grimberg extern struct workqueue_struct *nvme_wq; 37b227c59bSRoy Shterman extern struct workqueue_struct *nvme_reset_wq; 38b227c59bSRoy Shterman extern struct workqueue_struct *nvme_delete_wq; 399a6327d2SSagi Grimberg 40ca064085SMatias Bjørling enum { 41ca064085SMatias Bjørling NVME_NS_LBA = 0, 42ca064085SMatias Bjørling NVME_NS_LIGHTNVM = 1, 43ca064085SMatias Bjørling }; 44ca064085SMatias Bjørling 4557dacad5SJay Sternberg /* 46106198edSChristoph Hellwig * List of workarounds for devices that required behavior not specified in 47106198edSChristoph Hellwig * the standard. 4857dacad5SJay Sternberg */ 49106198edSChristoph Hellwig enum nvme_quirks { 50106198edSChristoph Hellwig /* 51106198edSChristoph Hellwig * Prefers I/O aligned to a stripe size specified in a vendor 52106198edSChristoph Hellwig * specific Identify field. 53106198edSChristoph Hellwig */ 54106198edSChristoph Hellwig NVME_QUIRK_STRIPE_SIZE = (1 << 0), 55540c801cSKeith Busch 56540c801cSKeith Busch /* 57540c801cSKeith Busch * The controller doesn't handle Identify value others than 0 or 1 58540c801cSKeith Busch * correctly. 59540c801cSKeith Busch */ 60540c801cSKeith Busch NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 6108095e70SKeith Busch 6208095e70SKeith Busch /* 63e850fd16SChristoph Hellwig * The controller deterministically returns O's on reads to 64e850fd16SChristoph Hellwig * logical blocks that deallocate was called on. 6508095e70SKeith Busch */ 66e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 6754adc010SGuilherme G. Piccoli 6854adc010SGuilherme G. Piccoli /* 6954adc010SGuilherme G. Piccoli * The controller needs a delay before starts checking the device 7054adc010SGuilherme G. Piccoli * readiness, which is done by reading the NVME_CSTS_RDY bit. 7154adc010SGuilherme G. Piccoli */ 7254adc010SGuilherme G. Piccoli NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 73c5552fdeSAndy Lutomirski 74c5552fdeSAndy Lutomirski /* 75c5552fdeSAndy Lutomirski * APST should not be used. 76c5552fdeSAndy Lutomirski */ 77c5552fdeSAndy Lutomirski NVME_QUIRK_NO_APST = (1 << 4), 78ff5350a8SAndy Lutomirski 79ff5350a8SAndy Lutomirski /* 80ff5350a8SAndy Lutomirski * The deepest sleep state should not be used. 81ff5350a8SAndy Lutomirski */ 82ff5350a8SAndy Lutomirski NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 83608cc4b1SChristoph Hellwig 84608cc4b1SChristoph Hellwig /* 85608cc4b1SChristoph Hellwig * Supports the LighNVM command set if indicated in vs[1]. 86608cc4b1SChristoph Hellwig */ 87608cc4b1SChristoph Hellwig NVME_QUIRK_LIGHTNVM = (1 << 6), 889abd68efSJens Axboe 899abd68efSJens Axboe /* 909abd68efSJens Axboe * Set MEDIUM priority on SQ creation 919abd68efSJens Axboe */ 929abd68efSJens Axboe NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 93106198edSChristoph Hellwig }; 94106198edSChristoph Hellwig 95d49187e9SChristoph Hellwig /* 96d49187e9SChristoph Hellwig * Common request structure for NVMe passthrough. All drivers must have 97d49187e9SChristoph Hellwig * this structure as the first member of their request-private data. 98d49187e9SChristoph Hellwig */ 99d49187e9SChristoph Hellwig struct nvme_request { 100d49187e9SChristoph Hellwig struct nvme_command *cmd; 101d49187e9SChristoph Hellwig union nvme_result result; 10244e44b29SChristoph Hellwig u8 retries; 10327fa9bc5SChristoph Hellwig u8 flags; 10427fa9bc5SChristoph Hellwig u16 status; 10559e29ce6SSagi Grimberg struct nvme_ctrl *ctrl; 10627fa9bc5SChristoph Hellwig }; 10727fa9bc5SChristoph Hellwig 10832acab31SChristoph Hellwig /* 10932acab31SChristoph Hellwig * Mark a bio as coming in through the mpath node. 11032acab31SChristoph Hellwig */ 11132acab31SChristoph Hellwig #define REQ_NVME_MPATH REQ_DRV 11232acab31SChristoph Hellwig 11327fa9bc5SChristoph Hellwig enum { 11427fa9bc5SChristoph Hellwig NVME_REQ_CANCELLED = (1 << 0), 115bb06ec31SJames Smart NVME_REQ_USERCMD = (1 << 1), 116d49187e9SChristoph Hellwig }; 117d49187e9SChristoph Hellwig 118d49187e9SChristoph Hellwig static inline struct nvme_request *nvme_req(struct request *req) 119d49187e9SChristoph Hellwig { 120d49187e9SChristoph Hellwig return blk_mq_rq_to_pdu(req); 121d49187e9SChristoph Hellwig } 122d49187e9SChristoph Hellwig 12354adc010SGuilherme G. Piccoli /* The below value is the specific amount of delay needed before checking 12454adc010SGuilherme G. Piccoli * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 12554adc010SGuilherme G. Piccoli * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 12654adc010SGuilherme G. Piccoli * found empirically. 12754adc010SGuilherme G. Piccoli */ 1288c97eeccSJeff Lien #define NVME_QUIRK_DELAY_AMOUNT 2300 12954adc010SGuilherme G. Piccoli 130bb8d261eSChristoph Hellwig enum nvme_ctrl_state { 131bb8d261eSChristoph Hellwig NVME_CTRL_NEW, 132bb8d261eSChristoph Hellwig NVME_CTRL_LIVE, 1332b1b7e78SJianchao Wang NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 134bb8d261eSChristoph Hellwig NVME_CTRL_RESETTING, 135ad6a0a52SMax Gurtovoy NVME_CTRL_CONNECTING, 136bb8d261eSChristoph Hellwig NVME_CTRL_DELETING, 1370ff9d4e1SKeith Busch NVME_CTRL_DEAD, 138bb8d261eSChristoph Hellwig }; 139bb8d261eSChristoph Hellwig 1401c63dc66SChristoph Hellwig struct nvme_ctrl { 141bb8d261eSChristoph Hellwig enum nvme_ctrl_state state; 142bd4da3abSAndy Lutomirski bool identified; 143bb8d261eSChristoph Hellwig spinlock_t lock; 1441c63dc66SChristoph Hellwig const struct nvme_ctrl_ops *ops; 14557dacad5SJay Sternberg struct request_queue *admin_q; 14607bfcd09SChristoph Hellwig struct request_queue *connect_q; 14757dacad5SJay Sternberg struct device *dev; 14857dacad5SJay Sternberg int instance; 1495bae7f73SChristoph Hellwig struct blk_mq_tag_set *tagset; 15034b6c231SSagi Grimberg struct blk_mq_tag_set *admin_tagset; 1515bae7f73SChristoph Hellwig struct list_head namespaces; 152765cc031SJianchao Wang struct rw_semaphore namespaces_rwsem; 153d22524a4SChristoph Hellwig struct device ctrl_device; 1545bae7f73SChristoph Hellwig struct device *device; /* char device */ 155a6a5149bSChristoph Hellwig struct cdev cdev; 156d86c4d8eSChristoph Hellwig struct work_struct reset_work; 157c5017e85SChristoph Hellwig struct work_struct delete_work; 1581c63dc66SChristoph Hellwig 159ab9e00ccSChristoph Hellwig struct nvme_subsystem *subsys; 160ab9e00ccSChristoph Hellwig struct list_head subsys_entry; 161ab9e00ccSChristoph Hellwig 1624f1244c8SChristoph Hellwig struct opal_dev *opal_dev; 163a98e58e5SScott Bauer 16457dacad5SJay Sternberg char name[12]; 16576e3914aSChristoph Hellwig u16 cntlid; 1665fd4ce1bSChristoph Hellwig 1675fd4ce1bSChristoph Hellwig u32 ctrl_config; 168b6dccf7fSArnav Dawn u16 mtfa; 169d858e5f0SSagi Grimberg u32 queue_count; 1705fd4ce1bSChristoph Hellwig 17120d0dfe6SSagi Grimberg u64 cap; 1725fd4ce1bSChristoph Hellwig u32 page_size; 17357dacad5SJay Sternberg u32 max_hw_sectors; 174943e942eSJens Axboe u32 max_segments; 17557dacad5SJay Sternberg u16 oncs; 1768a9ae523SScott Bauer u16 oacs; 177f5d11840SJens Axboe u16 nssa; 178f5d11840SJens Axboe u16 nr_streams; 1796bf25d16SChristoph Hellwig atomic_t abort_limit; 18057dacad5SJay Sternberg u8 vwc; 181f3ca80fcSChristoph Hellwig u32 vs; 18207bfcd09SChristoph Hellwig u32 sgls; 183038bd4cbSSagi Grimberg u16 kas; 184c5552fdeSAndy Lutomirski u8 npss; 185c5552fdeSAndy Lutomirski u8 apsta; 186c0561f82SHannes Reinecke u32 oaes; 187e3d7874dSKeith Busch u32 aen_result; 18807fbd32aSMartin K. Petersen unsigned int shutdown_timeout; 189038bd4cbSSagi Grimberg unsigned int kato; 190f3ca80fcSChristoph Hellwig bool subsystem; 191106198edSChristoph Hellwig unsigned long quirks; 192c5552fdeSAndy Lutomirski struct nvme_id_power_state psd[32]; 19384fef62dSKeith Busch struct nvme_effects_log *effects; 1945955be21SChristoph Hellwig struct work_struct scan_work; 195f866fc42SChristoph Hellwig struct work_struct async_event_work; 196038bd4cbSSagi Grimberg struct delayed_work ka_work; 1970a34e466SRoland Dreier struct nvme_command ka_cmd; 198b6dccf7fSArnav Dawn struct work_struct fw_act_work; 19930d90964SChristoph Hellwig unsigned long events; 20007bfcd09SChristoph Hellwig 201c5552fdeSAndy Lutomirski /* Power saving configuration */ 202c5552fdeSAndy Lutomirski u64 ps_max_latency_us; 20376a5af84SKai-Heng Feng bool apst_enabled; 204c5552fdeSAndy Lutomirski 205044a9df1SChristoph Hellwig /* PCIe only: */ 206fe6d53c9SChristoph Hellwig u32 hmpre; 207fe6d53c9SChristoph Hellwig u32 hmmin; 208044a9df1SChristoph Hellwig u32 hmminds; 209044a9df1SChristoph Hellwig u16 hmmaxd; 210fe6d53c9SChristoph Hellwig 21107bfcd09SChristoph Hellwig /* Fabrics only */ 21207bfcd09SChristoph Hellwig u16 sqsize; 21307bfcd09SChristoph Hellwig u32 ioccsz; 21407bfcd09SChristoph Hellwig u32 iorcsz; 21507bfcd09SChristoph Hellwig u16 icdoff; 21607bfcd09SChristoph Hellwig u16 maxcmd; 217fdf9dfa8SSagi Grimberg int nr_reconnects; 21807bfcd09SChristoph Hellwig struct nvmf_ctrl_options *opts; 21957dacad5SJay Sternberg }; 22057dacad5SJay Sternberg 221ab9e00ccSChristoph Hellwig struct nvme_subsystem { 222ab9e00ccSChristoph Hellwig int instance; 223ab9e00ccSChristoph Hellwig struct device dev; 224ab9e00ccSChristoph Hellwig /* 225ab9e00ccSChristoph Hellwig * Because we unregister the device on the last put we need 226ab9e00ccSChristoph Hellwig * a separate refcount. 227ab9e00ccSChristoph Hellwig */ 228ab9e00ccSChristoph Hellwig struct kref ref; 229ab9e00ccSChristoph Hellwig struct list_head entry; 230ab9e00ccSChristoph Hellwig struct mutex lock; 231ab9e00ccSChristoph Hellwig struct list_head ctrls; 232ed754e5dSChristoph Hellwig struct list_head nsheads; 233ab9e00ccSChristoph Hellwig char subnqn[NVMF_NQN_SIZE]; 234ab9e00ccSChristoph Hellwig char serial[20]; 235ab9e00ccSChristoph Hellwig char model[40]; 236ab9e00ccSChristoph Hellwig char firmware_rev[8]; 237ab9e00ccSChristoph Hellwig u8 cmic; 238ab9e00ccSChristoph Hellwig u16 vendor_id; 239ed754e5dSChristoph Hellwig struct ida ns_ida; 240ab9e00ccSChristoph Hellwig }; 241ab9e00ccSChristoph Hellwig 242002fab04SChristoph Hellwig /* 243002fab04SChristoph Hellwig * Container structure for uniqueue namespace identifiers. 244002fab04SChristoph Hellwig */ 245002fab04SChristoph Hellwig struct nvme_ns_ids { 246002fab04SChristoph Hellwig u8 eui64[8]; 247002fab04SChristoph Hellwig u8 nguid[16]; 248002fab04SChristoph Hellwig uuid_t uuid; 249002fab04SChristoph Hellwig }; 250002fab04SChristoph Hellwig 251ed754e5dSChristoph Hellwig /* 252ed754e5dSChristoph Hellwig * Anchor structure for namespaces. There is one for each namespace in a 253ed754e5dSChristoph Hellwig * NVMe subsystem that any of our controllers can see, and the namespace 254ed754e5dSChristoph Hellwig * structure for each controller is chained of it. For private namespaces 255ed754e5dSChristoph Hellwig * there is a 1:1 relation to our namespace structures, that is ->list 256ed754e5dSChristoph Hellwig * only ever has a single entry for private namespaces. 257ed754e5dSChristoph Hellwig */ 258ed754e5dSChristoph Hellwig struct nvme_ns_head { 25932acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 26032acab31SChristoph Hellwig struct gendisk *disk; 26132acab31SChristoph Hellwig struct nvme_ns __rcu *current_path; 26232acab31SChristoph Hellwig struct bio_list requeue_list; 26332acab31SChristoph Hellwig spinlock_t requeue_lock; 26432acab31SChristoph Hellwig struct work_struct requeue_work; 26532acab31SChristoph Hellwig #endif 266ed754e5dSChristoph Hellwig struct list_head list; 267ed754e5dSChristoph Hellwig struct srcu_struct srcu; 268ed754e5dSChristoph Hellwig struct nvme_subsystem *subsys; 269ed754e5dSChristoph Hellwig unsigned ns_id; 270ed754e5dSChristoph Hellwig struct nvme_ns_ids ids; 271ed754e5dSChristoph Hellwig struct list_head entry; 272ed754e5dSChristoph Hellwig struct kref ref; 273ed754e5dSChristoph Hellwig int instance; 274ed754e5dSChristoph Hellwig }; 275ed754e5dSChristoph Hellwig 276b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 277b9e03857SThomas Tai struct nvme_fault_inject { 278b9e03857SThomas Tai struct fault_attr attr; 279b9e03857SThomas Tai struct dentry *parent; 280b9e03857SThomas Tai bool dont_retry; /* DNR, do not retry */ 281b9e03857SThomas Tai u16 status; /* status code */ 282b9e03857SThomas Tai }; 283b9e03857SThomas Tai #endif 284b9e03857SThomas Tai 28557dacad5SJay Sternberg struct nvme_ns { 28657dacad5SJay Sternberg struct list_head list; 28757dacad5SJay Sternberg 2881c63dc66SChristoph Hellwig struct nvme_ctrl *ctrl; 28957dacad5SJay Sternberg struct request_queue *queue; 29057dacad5SJay Sternberg struct gendisk *disk; 291ed754e5dSChristoph Hellwig struct list_head siblings; 292b0b4e09cSMatias Bjørling struct nvm_dev *ndev; 29357dacad5SJay Sternberg struct kref kref; 294ed754e5dSChristoph Hellwig struct nvme_ns_head *head; 29557dacad5SJay Sternberg 29657dacad5SJay Sternberg int lba_shift; 29757dacad5SJay Sternberg u16 ms; 298f5d11840SJens Axboe u16 sgs; 299f5d11840SJens Axboe u32 sws; 30057dacad5SJay Sternberg bool ext; 30157dacad5SJay Sternberg u8 pi_type; 302646017a6SKeith Busch unsigned long flags; 303646017a6SKeith Busch #define NVME_NS_REMOVING 0 30469d9a99cSKeith Busch #define NVME_NS_DEAD 1 30557eeaf8eSChristoph Hellwig u16 noiob; 306b9e03857SThomas Tai 307b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 308b9e03857SThomas Tai struct nvme_fault_inject fault_inject; 309b9e03857SThomas Tai #endif 310b9e03857SThomas Tai 31157dacad5SJay Sternberg }; 31257dacad5SJay Sternberg 3131c63dc66SChristoph Hellwig struct nvme_ctrl_ops { 3141a353d85SMing Lin const char *name; 315e439bb12SSagi Grimberg struct module *module; 316d3d5b87dSChristoph Hellwig unsigned int flags; 317d3d5b87dSChristoph Hellwig #define NVME_F_FABRICS (1 << 0) 318c81bfba9SChristoph Hellwig #define NVME_F_METADATA_SUPPORTED (1 << 1) 3191c63dc66SChristoph Hellwig int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 3205fd4ce1bSChristoph Hellwig int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 3217fd8930fSChristoph Hellwig int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 3221673f1f0SChristoph Hellwig void (*free_ctrl)(struct nvme_ctrl *ctrl); 323ad22c355SKeith Busch void (*submit_async_event)(struct nvme_ctrl *ctrl); 324c5017e85SChristoph Hellwig void (*delete_ctrl)(struct nvme_ctrl *ctrl); 3251a353d85SMing Lin int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 326b435eceaSNitzan Carmi void (*stop_ctrl)(struct nvme_ctrl *ctrl); 32757dacad5SJay Sternberg }; 32857dacad5SJay Sternberg 329b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 330b9e03857SThomas Tai void nvme_fault_inject_init(struct nvme_ns *ns); 331b9e03857SThomas Tai void nvme_fault_inject_fini(struct nvme_ns *ns); 332b9e03857SThomas Tai void nvme_should_fail(struct request *req); 333b9e03857SThomas Tai #else 334b9e03857SThomas Tai static inline void nvme_fault_inject_init(struct nvme_ns *ns) {} 335b9e03857SThomas Tai static inline void nvme_fault_inject_fini(struct nvme_ns *ns) {} 336b9e03857SThomas Tai static inline void nvme_should_fail(struct request *req) {} 337b9e03857SThomas Tai #endif 338b9e03857SThomas Tai 3391c63dc66SChristoph Hellwig static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) 3401c63dc66SChristoph Hellwig { 3411c63dc66SChristoph Hellwig u32 val = 0; 3421c63dc66SChristoph Hellwig 3431c63dc66SChristoph Hellwig if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 3441c63dc66SChristoph Hellwig return false; 3451c63dc66SChristoph Hellwig return val & NVME_CSTS_RDY; 3461c63dc66SChristoph Hellwig } 3471c63dc66SChristoph Hellwig 348f3ca80fcSChristoph Hellwig static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 349f3ca80fcSChristoph Hellwig { 350f3ca80fcSChristoph Hellwig if (!ctrl->subsystem) 351f3ca80fcSChristoph Hellwig return -ENOTTY; 352f3ca80fcSChristoph Hellwig return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 353f3ca80fcSChristoph Hellwig } 354f3ca80fcSChristoph Hellwig 35557dacad5SJay Sternberg static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) 35657dacad5SJay Sternberg { 35757dacad5SJay Sternberg return (sector >> (ns->lba_shift - 9)); 35857dacad5SJay Sternberg } 35957dacad5SJay Sternberg 3606904242dSMing Lin static inline void nvme_cleanup_cmd(struct request *req) 3616904242dSMing Lin { 362f9d03f96SChristoph Hellwig if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { 363f9d03f96SChristoph Hellwig kfree(page_address(req->special_vec.bv_page) + 364f9d03f96SChristoph Hellwig req->special_vec.bv_offset); 365f9d03f96SChristoph Hellwig } 3666904242dSMing Lin } 3676904242dSMing Lin 36827fa9bc5SChristoph Hellwig static inline void nvme_end_request(struct request *req, __le16 status, 36927fa9bc5SChristoph Hellwig union nvme_result result) 37015a190f7SChristoph Hellwig { 37127fa9bc5SChristoph Hellwig struct nvme_request *rq = nvme_req(req); 37227fa9bc5SChristoph Hellwig 37327fa9bc5SChristoph Hellwig rq->status = le16_to_cpu(status) >> 1; 37427fa9bc5SChristoph Hellwig rq->result = result; 375b9e03857SThomas Tai /* inject error when permitted by fault injection framework */ 376b9e03857SThomas Tai nvme_should_fail(req); 37708e0029aSChristoph Hellwig blk_mq_complete_request(req); 37815a190f7SChristoph Hellwig } 37915a190f7SChristoph Hellwig 380d22524a4SChristoph Hellwig static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 381d22524a4SChristoph Hellwig { 382d22524a4SChristoph Hellwig get_device(ctrl->device); 383d22524a4SChristoph Hellwig } 384d22524a4SChristoph Hellwig 385d22524a4SChristoph Hellwig static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 386d22524a4SChristoph Hellwig { 387d22524a4SChristoph Hellwig put_device(ctrl->device); 388d22524a4SChristoph Hellwig } 389d22524a4SChristoph Hellwig 39077f02a7aSChristoph Hellwig void nvme_complete_rq(struct request *req); 391c55a2fd4SMing Lin void nvme_cancel_request(struct request *req, void *data, bool reserved); 392bb8d261eSChristoph Hellwig bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 393bb8d261eSChristoph Hellwig enum nvme_ctrl_state new_state); 3945fd4ce1bSChristoph Hellwig int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 3955fd4ce1bSChristoph Hellwig int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 3965fd4ce1bSChristoph Hellwig int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 397f3ca80fcSChristoph Hellwig int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 398f3ca80fcSChristoph Hellwig const struct nvme_ctrl_ops *ops, unsigned long quirks); 39953029b04SKeith Busch void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 400d09f2b45SSagi Grimberg void nvme_start_ctrl(struct nvme_ctrl *ctrl); 401d09f2b45SSagi Grimberg void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 4021673f1f0SChristoph Hellwig void nvme_put_ctrl(struct nvme_ctrl *ctrl); 4037fd8930fSChristoph Hellwig int nvme_init_identify(struct nvme_ctrl *ctrl); 4045bae7f73SChristoph Hellwig 4055bae7f73SChristoph Hellwig void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 4061673f1f0SChristoph Hellwig 4074f1244c8SChristoph Hellwig int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 4084f1244c8SChristoph Hellwig bool send); 409a98e58e5SScott Bauer 4107bf58533SChristoph Hellwig void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 411287a63ebSChristoph Hellwig volatile union nvme_result *res); 412f866fc42SChristoph Hellwig 41325646264SKeith Busch void nvme_stop_queues(struct nvme_ctrl *ctrl); 41425646264SKeith Busch void nvme_start_queues(struct nvme_ctrl *ctrl); 41569d9a99cSKeith Busch void nvme_kill_queues(struct nvme_ctrl *ctrl); 416302ad8ccSKeith Busch void nvme_unfreeze(struct nvme_ctrl *ctrl); 417302ad8ccSKeith Busch void nvme_wait_freeze(struct nvme_ctrl *ctrl); 418302ad8ccSKeith Busch void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 419302ad8ccSKeith Busch void nvme_start_freeze(struct nvme_ctrl *ctrl); 420363c9aacSSagi Grimberg 421eb71f435SChristoph Hellwig #define NVME_QID_ANY -1 4224160982eSChristoph Hellwig struct request *nvme_alloc_request(struct request_queue *q, 4239a95e4efSBart Van Assche struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); 424fc17b653SChristoph Hellwig blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 4258093f7caSMing Lin struct nvme_command *cmd); 42657dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 42757dacad5SJay Sternberg void *buf, unsigned bufflen); 42857dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 429d49187e9SChristoph Hellwig union nvme_result *result, void *buffer, unsigned bufflen, 4309a95e4efSBart Van Assche unsigned timeout, int qid, int at_head, 4319a95e4efSBart Van Assche blk_mq_req_flags_t flags); 4329a0be7abSChristoph Hellwig int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 433038bd4cbSSagi Grimberg void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 434d86c4d8eSChristoph Hellwig int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 43579c48ccfSSagi Grimberg int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 436c5017e85SChristoph Hellwig int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 437c5017e85SChristoph Hellwig int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); 43857dacad5SJay Sternberg 439d558fb51SMatias Bjørling int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, 4407ec6074fSMatias Bjørling u8 log_page, void *log, size_t size, u64 offset); 441d558fb51SMatias Bjørling 4425b85b826SChristoph Hellwig extern const struct attribute_group nvme_ns_id_attr_group; 44332acab31SChristoph Hellwig extern const struct block_device_operations nvme_ns_head_ops; 44432acab31SChristoph Hellwig 44532acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 446a785dbccSKeith Busch void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 447a785dbccSKeith Busch struct nvme_ctrl *ctrl, int *flags); 44832acab31SChristoph Hellwig void nvme_failover_req(struct request *req); 449908e4564SKeith Busch bool nvme_req_needs_failover(struct request *req, blk_status_t error); 45032acab31SChristoph Hellwig void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 45132acab31SChristoph Hellwig int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 45232acab31SChristoph Hellwig void nvme_mpath_add_disk(struct nvme_ns_head *head); 45332acab31SChristoph Hellwig void nvme_mpath_remove_disk(struct nvme_ns_head *head); 45432acab31SChristoph Hellwig 45532acab31SChristoph Hellwig static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 45632acab31SChristoph Hellwig { 45732acab31SChristoph Hellwig struct nvme_ns_head *head = ns->head; 45832acab31SChristoph Hellwig 459978628ecSJohannes Thumshirn if (head && ns == rcu_access_pointer(head->current_path)) 46032acab31SChristoph Hellwig rcu_assign_pointer(head->current_path, NULL); 46132acab31SChristoph Hellwig } 46232acab31SChristoph Hellwig struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 463479a322fSSagi Grimberg 464479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 465479a322fSSagi Grimberg { 466479a322fSSagi Grimberg struct nvme_ns_head *head = ns->head; 467479a322fSSagi Grimberg 468479a322fSSagi Grimberg if (head->disk && list_empty(&head->list)) 469479a322fSSagi Grimberg kblockd_schedule_work(&head->requeue_work); 470479a322fSSagi Grimberg } 471479a322fSSagi Grimberg 47232acab31SChristoph Hellwig #else 473a785dbccSKeith Busch /* 474a785dbccSKeith Busch * Without the multipath code enabled, multiple controller per subsystems are 475a785dbccSKeith Busch * visible as devices and thus we cannot use the subsystem instance. 476a785dbccSKeith Busch */ 477a785dbccSKeith Busch static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 478a785dbccSKeith Busch struct nvme_ctrl *ctrl, int *flags) 479a785dbccSKeith Busch { 480a785dbccSKeith Busch sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); 481a785dbccSKeith Busch } 482a785dbccSKeith Busch 48332acab31SChristoph Hellwig static inline void nvme_failover_req(struct request *req) 48432acab31SChristoph Hellwig { 48532acab31SChristoph Hellwig } 486908e4564SKeith Busch static inline bool nvme_req_needs_failover(struct request *req, 487908e4564SKeith Busch blk_status_t error) 48832acab31SChristoph Hellwig { 48932acab31SChristoph Hellwig return false; 49032acab31SChristoph Hellwig } 49132acab31SChristoph Hellwig static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 49232acab31SChristoph Hellwig { 49332acab31SChristoph Hellwig } 49432acab31SChristoph Hellwig static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 49532acab31SChristoph Hellwig struct nvme_ns_head *head) 49632acab31SChristoph Hellwig { 49732acab31SChristoph Hellwig return 0; 49832acab31SChristoph Hellwig } 49932acab31SChristoph Hellwig static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) 50032acab31SChristoph Hellwig { 50132acab31SChristoph Hellwig } 50232acab31SChristoph Hellwig static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 50332acab31SChristoph Hellwig { 50432acab31SChristoph Hellwig } 50532acab31SChristoph Hellwig static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 50632acab31SChristoph Hellwig { 50732acab31SChristoph Hellwig } 508479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 509479a322fSSagi Grimberg { 510479a322fSSagi Grimberg } 51132acab31SChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */ 51232acab31SChristoph Hellwig 513c4699e70SKeith Busch #ifdef CONFIG_NVM 51496257a8aSMatias Bjørling void nvme_nvm_update_nvm_info(struct nvme_ns *ns); 5153dc87dd0SMatias Bjørling int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); 516b0b4e09cSMatias Bjørling void nvme_nvm_unregister(struct nvme_ns *ns); 5173dc87dd0SMatias Bjørling int nvme_nvm_register_sysfs(struct nvme_ns *ns); 5183dc87dd0SMatias Bjørling void nvme_nvm_unregister_sysfs(struct nvme_ns *ns); 51984d4add7SMatias Bjørling int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); 520c4699e70SKeith Busch #else 52196257a8aSMatias Bjørling static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {}; 522b0b4e09cSMatias Bjørling static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, 5233dc87dd0SMatias Bjørling int node) 524c4699e70SKeith Busch { 525c4699e70SKeith Busch return 0; 526c4699e70SKeith Busch } 527c4699e70SKeith Busch 528b0b4e09cSMatias Bjørling static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; 5293dc87dd0SMatias Bjørling static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns) 5303dc87dd0SMatias Bjørling { 5313dc87dd0SMatias Bjørling return 0; 5323dc87dd0SMatias Bjørling } 5333dc87dd0SMatias Bjørling static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {}; 53484d4add7SMatias Bjørling static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, 53584d4add7SMatias Bjørling unsigned long arg) 53684d4add7SMatias Bjørling { 53784d4add7SMatias Bjørling return -ENOTTY; 53884d4add7SMatias Bjørling } 5393dc87dd0SMatias Bjørling #endif /* CONFIG_NVM */ 5403dc87dd0SMatias Bjørling 54140267efdSSimon A. F. Lund static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 54240267efdSSimon A. F. Lund { 54340267efdSSimon A. F. Lund return dev_to_disk(dev)->private_data; 54440267efdSSimon A. F. Lund } 545ca064085SMatias Bjørling 5465bae7f73SChristoph Hellwig int __init nvme_core_init(void); 5475bae7f73SChristoph Hellwig void nvme_core_exit(void); 5485bae7f73SChristoph Hellwig 54957dacad5SJay Sternberg #endif /* _NVME_H */ 550