157dacad5SJay Sternberg /* 257dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 357dacad5SJay Sternberg * 457dacad5SJay Sternberg * This program is free software; you can redistribute it and/or modify it 557dacad5SJay Sternberg * under the terms and conditions of the GNU General Public License, 657dacad5SJay Sternberg * version 2, as published by the Free Software Foundation. 757dacad5SJay Sternberg * 857dacad5SJay Sternberg * This program is distributed in the hope it will be useful, but WITHOUT 957dacad5SJay Sternberg * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1057dacad5SJay Sternberg * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 1157dacad5SJay Sternberg * more details. 1257dacad5SJay Sternberg */ 1357dacad5SJay Sternberg 1457dacad5SJay Sternberg #ifndef _NVME_H 1557dacad5SJay Sternberg #define _NVME_H 1657dacad5SJay Sternberg 1757dacad5SJay Sternberg #include <linux/nvme.h> 18a6a5149bSChristoph Hellwig #include <linux/cdev.h> 1957dacad5SJay Sternberg #include <linux/pci.h> 2057dacad5SJay Sternberg #include <linux/kref.h> 2157dacad5SJay Sternberg #include <linux/blk-mq.h> 22b0b4e09cSMatias Bjørling #include <linux/lightnvm.h> 23a98e58e5SScott Bauer #include <linux/sed-opal.h> 24b9e03857SThomas Tai #include <linux/fault-inject.h> 25978628ecSJohannes Thumshirn #include <linux/rcupdate.h> 2657dacad5SJay Sternberg 278ae4e447SMarc Olson extern unsigned int nvme_io_timeout; 2857dacad5SJay Sternberg #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) 2957dacad5SJay Sternberg 308ae4e447SMarc Olson extern unsigned int admin_timeout; 3121d34711SChristoph Hellwig #define ADMIN_TIMEOUT (admin_timeout * HZ) 3221d34711SChristoph Hellwig 33038bd4cbSSagi Grimberg #define NVME_DEFAULT_KATO 5 34038bd4cbSSagi Grimberg #define NVME_KATO_GRACE 10 35038bd4cbSSagi Grimberg 369a6327d2SSagi Grimberg extern struct workqueue_struct *nvme_wq; 37b227c59bSRoy Shterman extern struct workqueue_struct *nvme_reset_wq; 38b227c59bSRoy Shterman extern struct workqueue_struct *nvme_delete_wq; 399a6327d2SSagi Grimberg 40ca064085SMatias Bjørling enum { 41ca064085SMatias Bjørling NVME_NS_LBA = 0, 42ca064085SMatias Bjørling NVME_NS_LIGHTNVM = 1, 43ca064085SMatias Bjørling }; 44ca064085SMatias Bjørling 4557dacad5SJay Sternberg /* 46106198edSChristoph Hellwig * List of workarounds for devices that required behavior not specified in 47106198edSChristoph Hellwig * the standard. 4857dacad5SJay Sternberg */ 49106198edSChristoph Hellwig enum nvme_quirks { 50106198edSChristoph Hellwig /* 51106198edSChristoph Hellwig * Prefers I/O aligned to a stripe size specified in a vendor 52106198edSChristoph Hellwig * specific Identify field. 53106198edSChristoph Hellwig */ 54106198edSChristoph Hellwig NVME_QUIRK_STRIPE_SIZE = (1 << 0), 55540c801cSKeith Busch 56540c801cSKeith Busch /* 57540c801cSKeith Busch * The controller doesn't handle Identify value others than 0 or 1 58540c801cSKeith Busch * correctly. 59540c801cSKeith Busch */ 60540c801cSKeith Busch NVME_QUIRK_IDENTIFY_CNS = (1 << 1), 6108095e70SKeith Busch 6208095e70SKeith Busch /* 63e850fd16SChristoph Hellwig * The controller deterministically returns O's on reads to 64e850fd16SChristoph Hellwig * logical blocks that deallocate was called on. 6508095e70SKeith Busch */ 66e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES = (1 << 2), 6754adc010SGuilherme G. Piccoli 6854adc010SGuilherme G. Piccoli /* 6954adc010SGuilherme G. Piccoli * The controller needs a delay before starts checking the device 7054adc010SGuilherme G. Piccoli * readiness, which is done by reading the NVME_CSTS_RDY bit. 7154adc010SGuilherme G. Piccoli */ 7254adc010SGuilherme G. Piccoli NVME_QUIRK_DELAY_BEFORE_CHK_RDY = (1 << 3), 73c5552fdeSAndy Lutomirski 74c5552fdeSAndy Lutomirski /* 75c5552fdeSAndy Lutomirski * APST should not be used. 76c5552fdeSAndy Lutomirski */ 77c5552fdeSAndy Lutomirski NVME_QUIRK_NO_APST = (1 << 4), 78ff5350a8SAndy Lutomirski 79ff5350a8SAndy Lutomirski /* 80ff5350a8SAndy Lutomirski * The deepest sleep state should not be used. 81ff5350a8SAndy Lutomirski */ 82ff5350a8SAndy Lutomirski NVME_QUIRK_NO_DEEPEST_PS = (1 << 5), 83608cc4b1SChristoph Hellwig 84608cc4b1SChristoph Hellwig /* 85608cc4b1SChristoph Hellwig * Supports the LighNVM command set if indicated in vs[1]. 86608cc4b1SChristoph Hellwig */ 87608cc4b1SChristoph Hellwig NVME_QUIRK_LIGHTNVM = (1 << 6), 889abd68efSJens Axboe 899abd68efSJens Axboe /* 909abd68efSJens Axboe * Set MEDIUM priority on SQ creation 919abd68efSJens Axboe */ 929abd68efSJens Axboe NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7), 93106198edSChristoph Hellwig }; 94106198edSChristoph Hellwig 95d49187e9SChristoph Hellwig /* 96d49187e9SChristoph Hellwig * Common request structure for NVMe passthrough. All drivers must have 97d49187e9SChristoph Hellwig * this structure as the first member of their request-private data. 98d49187e9SChristoph Hellwig */ 99d49187e9SChristoph Hellwig struct nvme_request { 100d49187e9SChristoph Hellwig struct nvme_command *cmd; 101d49187e9SChristoph Hellwig union nvme_result result; 10244e44b29SChristoph Hellwig u8 retries; 10327fa9bc5SChristoph Hellwig u8 flags; 10427fa9bc5SChristoph Hellwig u16 status; 10559e29ce6SSagi Grimberg struct nvme_ctrl *ctrl; 10627fa9bc5SChristoph Hellwig }; 10727fa9bc5SChristoph Hellwig 10832acab31SChristoph Hellwig /* 10932acab31SChristoph Hellwig * Mark a bio as coming in through the mpath node. 11032acab31SChristoph Hellwig */ 11132acab31SChristoph Hellwig #define REQ_NVME_MPATH REQ_DRV 11232acab31SChristoph Hellwig 11327fa9bc5SChristoph Hellwig enum { 11427fa9bc5SChristoph Hellwig NVME_REQ_CANCELLED = (1 << 0), 115bb06ec31SJames Smart NVME_REQ_USERCMD = (1 << 1), 116d49187e9SChristoph Hellwig }; 117d49187e9SChristoph Hellwig 118d49187e9SChristoph Hellwig static inline struct nvme_request *nvme_req(struct request *req) 119d49187e9SChristoph Hellwig { 120d49187e9SChristoph Hellwig return blk_mq_rq_to_pdu(req); 121d49187e9SChristoph Hellwig } 122d49187e9SChristoph Hellwig 1235d87eb94SKeith Busch static inline u16 nvme_req_qid(struct request *req) 1245d87eb94SKeith Busch { 1255d87eb94SKeith Busch if (!req->rq_disk) 1265d87eb94SKeith Busch return 0; 1275d87eb94SKeith Busch return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1; 1285d87eb94SKeith Busch } 1295d87eb94SKeith Busch 13054adc010SGuilherme G. Piccoli /* The below value is the specific amount of delay needed before checking 13154adc010SGuilherme G. Piccoli * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the 13254adc010SGuilherme G. Piccoli * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was 13354adc010SGuilherme G. Piccoli * found empirically. 13454adc010SGuilherme G. Piccoli */ 1358c97eeccSJeff Lien #define NVME_QUIRK_DELAY_AMOUNT 2300 13654adc010SGuilherme G. Piccoli 137bb8d261eSChristoph Hellwig enum nvme_ctrl_state { 138bb8d261eSChristoph Hellwig NVME_CTRL_NEW, 139bb8d261eSChristoph Hellwig NVME_CTRL_LIVE, 1402b1b7e78SJianchao Wang NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */ 141bb8d261eSChristoph Hellwig NVME_CTRL_RESETTING, 142ad6a0a52SMax Gurtovoy NVME_CTRL_CONNECTING, 143bb8d261eSChristoph Hellwig NVME_CTRL_DELETING, 1440ff9d4e1SKeith Busch NVME_CTRL_DEAD, 145bb8d261eSChristoph Hellwig }; 146bb8d261eSChristoph Hellwig 1471c63dc66SChristoph Hellwig struct nvme_ctrl { 1486e3ca03eSSagi Grimberg bool comp_seen; 149bb8d261eSChristoph Hellwig enum nvme_ctrl_state state; 150bd4da3abSAndy Lutomirski bool identified; 151bb8d261eSChristoph Hellwig spinlock_t lock; 1521c63dc66SChristoph Hellwig const struct nvme_ctrl_ops *ops; 15357dacad5SJay Sternberg struct request_queue *admin_q; 15407bfcd09SChristoph Hellwig struct request_queue *connect_q; 15557dacad5SJay Sternberg struct device *dev; 15657dacad5SJay Sternberg int instance; 157103e515eSHannes Reinecke int numa_node; 1585bae7f73SChristoph Hellwig struct blk_mq_tag_set *tagset; 15934b6c231SSagi Grimberg struct blk_mq_tag_set *admin_tagset; 1605bae7f73SChristoph Hellwig struct list_head namespaces; 161765cc031SJianchao Wang struct rw_semaphore namespaces_rwsem; 162d22524a4SChristoph Hellwig struct device ctrl_device; 1635bae7f73SChristoph Hellwig struct device *device; /* char device */ 164a6a5149bSChristoph Hellwig struct cdev cdev; 165d86c4d8eSChristoph Hellwig struct work_struct reset_work; 166c5017e85SChristoph Hellwig struct work_struct delete_work; 1671c63dc66SChristoph Hellwig 168ab9e00ccSChristoph Hellwig struct nvme_subsystem *subsys; 169ab9e00ccSChristoph Hellwig struct list_head subsys_entry; 170ab9e00ccSChristoph Hellwig 1714f1244c8SChristoph Hellwig struct opal_dev *opal_dev; 172a98e58e5SScott Bauer 17357dacad5SJay Sternberg char name[12]; 17476e3914aSChristoph Hellwig u16 cntlid; 1755fd4ce1bSChristoph Hellwig 1765fd4ce1bSChristoph Hellwig u32 ctrl_config; 177b6dccf7fSArnav Dawn u16 mtfa; 178d858e5f0SSagi Grimberg u32 queue_count; 1795fd4ce1bSChristoph Hellwig 18020d0dfe6SSagi Grimberg u64 cap; 1815fd4ce1bSChristoph Hellwig u32 page_size; 18257dacad5SJay Sternberg u32 max_hw_sectors; 183943e942eSJens Axboe u32 max_segments; 18457dacad5SJay Sternberg u16 oncs; 1858a9ae523SScott Bauer u16 oacs; 186f5d11840SJens Axboe u16 nssa; 187f5d11840SJens Axboe u16 nr_streams; 1880d0b660fSChristoph Hellwig u32 max_namespaces; 1896bf25d16SChristoph Hellwig atomic_t abort_limit; 19057dacad5SJay Sternberg u8 vwc; 191f3ca80fcSChristoph Hellwig u32 vs; 19207bfcd09SChristoph Hellwig u32 sgls; 193038bd4cbSSagi Grimberg u16 kas; 194c5552fdeSAndy Lutomirski u8 npss; 195c5552fdeSAndy Lutomirski u8 apsta; 196c0561f82SHannes Reinecke u32 oaes; 197e3d7874dSKeith Busch u32 aen_result; 1983e53ba38SSagi Grimberg u32 ctratt; 19907fbd32aSMartin K. Petersen unsigned int shutdown_timeout; 200038bd4cbSSagi Grimberg unsigned int kato; 201f3ca80fcSChristoph Hellwig bool subsystem; 202106198edSChristoph Hellwig unsigned long quirks; 203c5552fdeSAndy Lutomirski struct nvme_id_power_state psd[32]; 20484fef62dSKeith Busch struct nvme_effects_log *effects; 2055955be21SChristoph Hellwig struct work_struct scan_work; 206f866fc42SChristoph Hellwig struct work_struct async_event_work; 207038bd4cbSSagi Grimberg struct delayed_work ka_work; 2080a34e466SRoland Dreier struct nvme_command ka_cmd; 209b6dccf7fSArnav Dawn struct work_struct fw_act_work; 21030d90964SChristoph Hellwig unsigned long events; 21107bfcd09SChristoph Hellwig 2120d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 2130d0b660fSChristoph Hellwig /* asymmetric namespace access: */ 2140d0b660fSChristoph Hellwig u8 anacap; 2150d0b660fSChristoph Hellwig u8 anatt; 2160d0b660fSChristoph Hellwig u32 anagrpmax; 2170d0b660fSChristoph Hellwig u32 nanagrpid; 2180d0b660fSChristoph Hellwig struct mutex ana_lock; 2190d0b660fSChristoph Hellwig struct nvme_ana_rsp_hdr *ana_log_buf; 2200d0b660fSChristoph Hellwig size_t ana_log_size; 2210d0b660fSChristoph Hellwig struct timer_list anatt_timer; 2220d0b660fSChristoph Hellwig struct work_struct ana_work; 2230d0b660fSChristoph Hellwig #endif 2240d0b660fSChristoph Hellwig 225c5552fdeSAndy Lutomirski /* Power saving configuration */ 226c5552fdeSAndy Lutomirski u64 ps_max_latency_us; 22776a5af84SKai-Heng Feng bool apst_enabled; 228c5552fdeSAndy Lutomirski 229044a9df1SChristoph Hellwig /* PCIe only: */ 230fe6d53c9SChristoph Hellwig u32 hmpre; 231fe6d53c9SChristoph Hellwig u32 hmmin; 232044a9df1SChristoph Hellwig u32 hmminds; 233044a9df1SChristoph Hellwig u16 hmmaxd; 234fe6d53c9SChristoph Hellwig 23507bfcd09SChristoph Hellwig /* Fabrics only */ 23607bfcd09SChristoph Hellwig u16 sqsize; 23707bfcd09SChristoph Hellwig u32 ioccsz; 23807bfcd09SChristoph Hellwig u32 iorcsz; 23907bfcd09SChristoph Hellwig u16 icdoff; 24007bfcd09SChristoph Hellwig u16 maxcmd; 241fdf9dfa8SSagi Grimberg int nr_reconnects; 24207bfcd09SChristoph Hellwig struct nvmf_ctrl_options *opts; 24357dacad5SJay Sternberg }; 24457dacad5SJay Sternberg 245ab9e00ccSChristoph Hellwig struct nvme_subsystem { 246ab9e00ccSChristoph Hellwig int instance; 247ab9e00ccSChristoph Hellwig struct device dev; 248ab9e00ccSChristoph Hellwig /* 249ab9e00ccSChristoph Hellwig * Because we unregister the device on the last put we need 250ab9e00ccSChristoph Hellwig * a separate refcount. 251ab9e00ccSChristoph Hellwig */ 252ab9e00ccSChristoph Hellwig struct kref ref; 253ab9e00ccSChristoph Hellwig struct list_head entry; 254ab9e00ccSChristoph Hellwig struct mutex lock; 255ab9e00ccSChristoph Hellwig struct list_head ctrls; 256ed754e5dSChristoph Hellwig struct list_head nsheads; 257ab9e00ccSChristoph Hellwig char subnqn[NVMF_NQN_SIZE]; 258ab9e00ccSChristoph Hellwig char serial[20]; 259ab9e00ccSChristoph Hellwig char model[40]; 260ab9e00ccSChristoph Hellwig char firmware_rev[8]; 261ab9e00ccSChristoph Hellwig u8 cmic; 262ab9e00ccSChristoph Hellwig u16 vendor_id; 263ed754e5dSChristoph Hellwig struct ida ns_ida; 264ab9e00ccSChristoph Hellwig }; 265ab9e00ccSChristoph Hellwig 266002fab04SChristoph Hellwig /* 267002fab04SChristoph Hellwig * Container structure for uniqueue namespace identifiers. 268002fab04SChristoph Hellwig */ 269002fab04SChristoph Hellwig struct nvme_ns_ids { 270002fab04SChristoph Hellwig u8 eui64[8]; 271002fab04SChristoph Hellwig u8 nguid[16]; 272002fab04SChristoph Hellwig uuid_t uuid; 273002fab04SChristoph Hellwig }; 274002fab04SChristoph Hellwig 275ed754e5dSChristoph Hellwig /* 276ed754e5dSChristoph Hellwig * Anchor structure for namespaces. There is one for each namespace in a 277ed754e5dSChristoph Hellwig * NVMe subsystem that any of our controllers can see, and the namespace 278ed754e5dSChristoph Hellwig * structure for each controller is chained of it. For private namespaces 279ed754e5dSChristoph Hellwig * there is a 1:1 relation to our namespace structures, that is ->list 280ed754e5dSChristoph Hellwig * only ever has a single entry for private namespaces. 281ed754e5dSChristoph Hellwig */ 282ed754e5dSChristoph Hellwig struct nvme_ns_head { 283ed754e5dSChristoph Hellwig struct list_head list; 284ed754e5dSChristoph Hellwig struct srcu_struct srcu; 285ed754e5dSChristoph Hellwig struct nvme_subsystem *subsys; 286ed754e5dSChristoph Hellwig unsigned ns_id; 287ed754e5dSChristoph Hellwig struct nvme_ns_ids ids; 288ed754e5dSChristoph Hellwig struct list_head entry; 289ed754e5dSChristoph Hellwig struct kref ref; 290ed754e5dSChristoph Hellwig int instance; 291f3334447SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 292f3334447SChristoph Hellwig struct gendisk *disk; 293f3334447SChristoph Hellwig struct bio_list requeue_list; 294f3334447SChristoph Hellwig spinlock_t requeue_lock; 295f3334447SChristoph Hellwig struct work_struct requeue_work; 296f3334447SChristoph Hellwig struct mutex lock; 297f3334447SChristoph Hellwig struct nvme_ns __rcu *current_path[]; 298f3334447SChristoph Hellwig #endif 299ed754e5dSChristoph Hellwig }; 300ed754e5dSChristoph Hellwig 301b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 302b9e03857SThomas Tai struct nvme_fault_inject { 303b9e03857SThomas Tai struct fault_attr attr; 304b9e03857SThomas Tai struct dentry *parent; 305b9e03857SThomas Tai bool dont_retry; /* DNR, do not retry */ 306b9e03857SThomas Tai u16 status; /* status code */ 307b9e03857SThomas Tai }; 308b9e03857SThomas Tai #endif 309b9e03857SThomas Tai 31057dacad5SJay Sternberg struct nvme_ns { 31157dacad5SJay Sternberg struct list_head list; 31257dacad5SJay Sternberg 3131c63dc66SChristoph Hellwig struct nvme_ctrl *ctrl; 31457dacad5SJay Sternberg struct request_queue *queue; 31557dacad5SJay Sternberg struct gendisk *disk; 3160d0b660fSChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 3170d0b660fSChristoph Hellwig enum nvme_ana_state ana_state; 3180d0b660fSChristoph Hellwig u32 ana_grpid; 3190d0b660fSChristoph Hellwig #endif 320ed754e5dSChristoph Hellwig struct list_head siblings; 321b0b4e09cSMatias Bjørling struct nvm_dev *ndev; 32257dacad5SJay Sternberg struct kref kref; 323ed754e5dSChristoph Hellwig struct nvme_ns_head *head; 32457dacad5SJay Sternberg 32557dacad5SJay Sternberg int lba_shift; 32657dacad5SJay Sternberg u16 ms; 327f5d11840SJens Axboe u16 sgs; 328f5d11840SJens Axboe u32 sws; 32957dacad5SJay Sternberg bool ext; 33057dacad5SJay Sternberg u8 pi_type; 331646017a6SKeith Busch unsigned long flags; 332646017a6SKeith Busch #define NVME_NS_REMOVING 0 33369d9a99cSKeith Busch #define NVME_NS_DEAD 1 3340d0b660fSChristoph Hellwig #define NVME_NS_ANA_PENDING 2 33557eeaf8eSChristoph Hellwig u16 noiob; 336b9e03857SThomas Tai 337b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 338b9e03857SThomas Tai struct nvme_fault_inject fault_inject; 339b9e03857SThomas Tai #endif 340b9e03857SThomas Tai 34157dacad5SJay Sternberg }; 34257dacad5SJay Sternberg 3431c63dc66SChristoph Hellwig struct nvme_ctrl_ops { 3441a353d85SMing Lin const char *name; 345e439bb12SSagi Grimberg struct module *module; 346d3d5b87dSChristoph Hellwig unsigned int flags; 347d3d5b87dSChristoph Hellwig #define NVME_F_FABRICS (1 << 0) 348c81bfba9SChristoph Hellwig #define NVME_F_METADATA_SUPPORTED (1 << 1) 349e0596ab2SLogan Gunthorpe #define NVME_F_PCI_P2PDMA (1 << 2) 3501c63dc66SChristoph Hellwig int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val); 3515fd4ce1bSChristoph Hellwig int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val); 3527fd8930fSChristoph Hellwig int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val); 3531673f1f0SChristoph Hellwig void (*free_ctrl)(struct nvme_ctrl *ctrl); 354ad22c355SKeith Busch void (*submit_async_event)(struct nvme_ctrl *ctrl); 355c5017e85SChristoph Hellwig void (*delete_ctrl)(struct nvme_ctrl *ctrl); 3561a353d85SMing Lin int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); 357b435eceaSNitzan Carmi void (*stop_ctrl)(struct nvme_ctrl *ctrl); 35857dacad5SJay Sternberg }; 35957dacad5SJay Sternberg 360b9e03857SThomas Tai #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS 361b9e03857SThomas Tai void nvme_fault_inject_init(struct nvme_ns *ns); 362b9e03857SThomas Tai void nvme_fault_inject_fini(struct nvme_ns *ns); 363b9e03857SThomas Tai void nvme_should_fail(struct request *req); 364b9e03857SThomas Tai #else 365b9e03857SThomas Tai static inline void nvme_fault_inject_init(struct nvme_ns *ns) {} 366b9e03857SThomas Tai static inline void nvme_fault_inject_fini(struct nvme_ns *ns) {} 367b9e03857SThomas Tai static inline void nvme_should_fail(struct request *req) {} 368b9e03857SThomas Tai #endif 369b9e03857SThomas Tai 3701c63dc66SChristoph Hellwig static inline bool nvme_ctrl_ready(struct nvme_ctrl *ctrl) 3711c63dc66SChristoph Hellwig { 3721c63dc66SChristoph Hellwig u32 val = 0; 3731c63dc66SChristoph Hellwig 3741c63dc66SChristoph Hellwig if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 3751c63dc66SChristoph Hellwig return false; 3761c63dc66SChristoph Hellwig return val & NVME_CSTS_RDY; 3771c63dc66SChristoph Hellwig } 3781c63dc66SChristoph Hellwig 379f3ca80fcSChristoph Hellwig static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) 380f3ca80fcSChristoph Hellwig { 381f3ca80fcSChristoph Hellwig if (!ctrl->subsystem) 382f3ca80fcSChristoph Hellwig return -ENOTTY; 383f3ca80fcSChristoph Hellwig return ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); 384f3ca80fcSChristoph Hellwig } 385f3ca80fcSChristoph Hellwig 38657dacad5SJay Sternberg static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) 38757dacad5SJay Sternberg { 38857dacad5SJay Sternberg return (sector >> (ns->lba_shift - 9)); 38957dacad5SJay Sternberg } 39057dacad5SJay Sternberg 39127fa9bc5SChristoph Hellwig static inline void nvme_end_request(struct request *req, __le16 status, 39227fa9bc5SChristoph Hellwig union nvme_result result) 39315a190f7SChristoph Hellwig { 39427fa9bc5SChristoph Hellwig struct nvme_request *rq = nvme_req(req); 39527fa9bc5SChristoph Hellwig 39627fa9bc5SChristoph Hellwig rq->status = le16_to_cpu(status) >> 1; 39727fa9bc5SChristoph Hellwig rq->result = result; 398b9e03857SThomas Tai /* inject error when permitted by fault injection framework */ 399b9e03857SThomas Tai nvme_should_fail(req); 40008e0029aSChristoph Hellwig blk_mq_complete_request(req); 40115a190f7SChristoph Hellwig } 40215a190f7SChristoph Hellwig 403d22524a4SChristoph Hellwig static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) 404d22524a4SChristoph Hellwig { 405d22524a4SChristoph Hellwig get_device(ctrl->device); 406d22524a4SChristoph Hellwig } 407d22524a4SChristoph Hellwig 408d22524a4SChristoph Hellwig static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) 409d22524a4SChristoph Hellwig { 410d22524a4SChristoph Hellwig put_device(ctrl->device); 411d22524a4SChristoph Hellwig } 412d22524a4SChristoph Hellwig 41377f02a7aSChristoph Hellwig void nvme_complete_rq(struct request *req); 4147baa8572SJens Axboe bool nvme_cancel_request(struct request *req, void *data, bool reserved); 415bb8d261eSChristoph Hellwig bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, 416bb8d261eSChristoph Hellwig enum nvme_ctrl_state new_state); 4175fd4ce1bSChristoph Hellwig int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 4185fd4ce1bSChristoph Hellwig int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap); 4195fd4ce1bSChristoph Hellwig int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); 420f3ca80fcSChristoph Hellwig int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, 421f3ca80fcSChristoph Hellwig const struct nvme_ctrl_ops *ops, unsigned long quirks); 42253029b04SKeith Busch void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); 423d09f2b45SSagi Grimberg void nvme_start_ctrl(struct nvme_ctrl *ctrl); 424d09f2b45SSagi Grimberg void nvme_stop_ctrl(struct nvme_ctrl *ctrl); 4251673f1f0SChristoph Hellwig void nvme_put_ctrl(struct nvme_ctrl *ctrl); 4267fd8930fSChristoph Hellwig int nvme_init_identify(struct nvme_ctrl *ctrl); 4275bae7f73SChristoph Hellwig 4285bae7f73SChristoph Hellwig void nvme_remove_namespaces(struct nvme_ctrl *ctrl); 4291673f1f0SChristoph Hellwig 4304f1244c8SChristoph Hellwig int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len, 4314f1244c8SChristoph Hellwig bool send); 432a98e58e5SScott Bauer 4337bf58533SChristoph Hellwig void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status, 434287a63ebSChristoph Hellwig volatile union nvme_result *res); 435f866fc42SChristoph Hellwig 43625646264SKeith Busch void nvme_stop_queues(struct nvme_ctrl *ctrl); 43725646264SKeith Busch void nvme_start_queues(struct nvme_ctrl *ctrl); 43869d9a99cSKeith Busch void nvme_kill_queues(struct nvme_ctrl *ctrl); 439302ad8ccSKeith Busch void nvme_unfreeze(struct nvme_ctrl *ctrl); 440302ad8ccSKeith Busch void nvme_wait_freeze(struct nvme_ctrl *ctrl); 441302ad8ccSKeith Busch void nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); 442302ad8ccSKeith Busch void nvme_start_freeze(struct nvme_ctrl *ctrl); 443363c9aacSSagi Grimberg 444eb71f435SChristoph Hellwig #define NVME_QID_ANY -1 4454160982eSChristoph Hellwig struct request *nvme_alloc_request(struct request_queue *q, 4469a95e4efSBart Van Assche struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); 447f7f1fc36SMax Gurtovoy void nvme_cleanup_cmd(struct request *req); 448fc17b653SChristoph Hellwig blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 4498093f7caSMing Lin struct nvme_command *cmd); 45057dacad5SJay Sternberg int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 45157dacad5SJay Sternberg void *buf, unsigned bufflen); 45257dacad5SJay Sternberg int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 453d49187e9SChristoph Hellwig union nvme_result *result, void *buffer, unsigned bufflen, 4549a95e4efSBart Van Assche unsigned timeout, int qid, int at_head, 4559a95e4efSBart Van Assche blk_mq_req_flags_t flags); 4569a0be7abSChristoph Hellwig int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); 457038bd4cbSSagi Grimberg void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); 458d86c4d8eSChristoph Hellwig int nvme_reset_ctrl(struct nvme_ctrl *ctrl); 45979c48ccfSSagi Grimberg int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); 460c5017e85SChristoph Hellwig int nvme_delete_ctrl(struct nvme_ctrl *ctrl); 461c5017e85SChristoph Hellwig int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); 46257dacad5SJay Sternberg 4630e98719bSChristoph Hellwig int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, 4640e98719bSChristoph Hellwig void *log, size_t size, u64 offset); 465d558fb51SMatias Bjørling 46633b14f67SHannes Reinecke extern const struct attribute_group *nvme_ns_id_attr_groups[]; 46732acab31SChristoph Hellwig extern const struct block_device_operations nvme_ns_head_ops; 46832acab31SChristoph Hellwig 46932acab31SChristoph Hellwig #ifdef CONFIG_NVME_MULTIPATH 4700d0b660fSChristoph Hellwig bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl); 471a785dbccSKeith Busch void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 472a785dbccSKeith Busch struct nvme_ctrl *ctrl, int *flags); 47332acab31SChristoph Hellwig void nvme_failover_req(struct request *req); 47432acab31SChristoph Hellwig void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); 47532acab31SChristoph Hellwig int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); 4760d0b660fSChristoph Hellwig void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); 47732acab31SChristoph Hellwig void nvme_mpath_remove_disk(struct nvme_ns_head *head); 4780d0b660fSChristoph Hellwig int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); 4790d0b660fSChristoph Hellwig void nvme_mpath_uninit(struct nvme_ctrl *ctrl); 4800d0b660fSChristoph Hellwig void nvme_mpath_stop(struct nvme_ctrl *ctrl); 481f3334447SChristoph Hellwig void nvme_mpath_clear_current_path(struct nvme_ns *ns); 48232acab31SChristoph Hellwig struct nvme_ns *nvme_find_path(struct nvme_ns_head *head); 483479a322fSSagi Grimberg 484479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 485479a322fSSagi Grimberg { 486479a322fSSagi Grimberg struct nvme_ns_head *head = ns->head; 487479a322fSSagi Grimberg 488479a322fSSagi Grimberg if (head->disk && list_empty(&head->list)) 489479a322fSSagi Grimberg kblockd_schedule_work(&head->requeue_work); 490479a322fSSagi Grimberg } 491479a322fSSagi Grimberg 4920d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_grpid; 4930d0b660fSChristoph Hellwig extern struct device_attribute dev_attr_ana_state; 4940d0b660fSChristoph Hellwig 49532acab31SChristoph Hellwig #else 4960d0b660fSChristoph Hellwig static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) 4970d0b660fSChristoph Hellwig { 4980d0b660fSChristoph Hellwig return false; 4990d0b660fSChristoph Hellwig } 500a785dbccSKeith Busch /* 501a785dbccSKeith Busch * Without the multipath code enabled, multiple controller per subsystems are 502a785dbccSKeith Busch * visible as devices and thus we cannot use the subsystem instance. 503a785dbccSKeith Busch */ 504a785dbccSKeith Busch static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, 505a785dbccSKeith Busch struct nvme_ctrl *ctrl, int *flags) 506a785dbccSKeith Busch { 507a785dbccSKeith Busch sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); 508a785dbccSKeith Busch } 509a785dbccSKeith Busch 51032acab31SChristoph Hellwig static inline void nvme_failover_req(struct request *req) 51132acab31SChristoph Hellwig { 51232acab31SChristoph Hellwig } 51332acab31SChristoph Hellwig static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) 51432acab31SChristoph Hellwig { 51532acab31SChristoph Hellwig } 51632acab31SChristoph Hellwig static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, 51732acab31SChristoph Hellwig struct nvme_ns_head *head) 51832acab31SChristoph Hellwig { 51932acab31SChristoph Hellwig return 0; 52032acab31SChristoph Hellwig } 5210d0b660fSChristoph Hellwig static inline void nvme_mpath_add_disk(struct nvme_ns *ns, 5220d0b660fSChristoph Hellwig struct nvme_id_ns *id) 52332acab31SChristoph Hellwig { 52432acab31SChristoph Hellwig } 52532acab31SChristoph Hellwig static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) 52632acab31SChristoph Hellwig { 52732acab31SChristoph Hellwig } 52832acab31SChristoph Hellwig static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) 52932acab31SChristoph Hellwig { 53032acab31SChristoph Hellwig } 531479a322fSSagi Grimberg static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) 532479a322fSSagi Grimberg { 533479a322fSSagi Grimberg } 5340d0b660fSChristoph Hellwig static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, 5350d0b660fSChristoph Hellwig struct nvme_id_ctrl *id) 5360d0b660fSChristoph Hellwig { 53714a1336eSChristoph Hellwig if (ctrl->subsys->cmic & (1 << 3)) 53814a1336eSChristoph Hellwig dev_warn(ctrl->device, 53914a1336eSChristoph Hellwig "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); 5400d0b660fSChristoph Hellwig return 0; 5410d0b660fSChristoph Hellwig } 5420d0b660fSChristoph Hellwig static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 5430d0b660fSChristoph Hellwig { 5440d0b660fSChristoph Hellwig } 5450d0b660fSChristoph Hellwig static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) 5460d0b660fSChristoph Hellwig { 5470d0b660fSChristoph Hellwig } 54832acab31SChristoph Hellwig #endif /* CONFIG_NVME_MULTIPATH */ 54932acab31SChristoph Hellwig 550c4699e70SKeith Busch #ifdef CONFIG_NVM 55196257a8aSMatias Bjørling void nvme_nvm_update_nvm_info(struct nvme_ns *ns); 5523dc87dd0SMatias Bjørling int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); 553b0b4e09cSMatias Bjørling void nvme_nvm_unregister(struct nvme_ns *ns); 55433b14f67SHannes Reinecke extern const struct attribute_group nvme_nvm_attr_group; 55584d4add7SMatias Bjørling int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg); 556c4699e70SKeith Busch #else 55796257a8aSMatias Bjørling static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {}; 558b0b4e09cSMatias Bjørling static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, 5593dc87dd0SMatias Bjørling int node) 560c4699e70SKeith Busch { 561c4699e70SKeith Busch return 0; 562c4699e70SKeith Busch } 563c4699e70SKeith Busch 564b0b4e09cSMatias Bjørling static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; 56584d4add7SMatias Bjørling static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, 56684d4add7SMatias Bjørling unsigned long arg) 56784d4add7SMatias Bjørling { 56884d4add7SMatias Bjørling return -ENOTTY; 56984d4add7SMatias Bjørling } 5703dc87dd0SMatias Bjørling #endif /* CONFIG_NVM */ 5713dc87dd0SMatias Bjørling 57240267efdSSimon A. F. Lund static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) 57340267efdSSimon A. F. Lund { 57440267efdSSimon A. F. Lund return dev_to_disk(dev)->private_data; 57540267efdSSimon A. F. Lund } 576ca064085SMatias Bjørling 5775bae7f73SChristoph Hellwig int __init nvme_core_init(void); 5785bae7f73SChristoph Hellwig void nvme_core_exit(void); 5795bae7f73SChristoph Hellwig 58057dacad5SJay Sternberg #endif /* _NVME_H */ 581