15f37396dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * NVM Express device driver 457dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 557dacad5SJay Sternberg */ 657dacad5SJay Sternberg 7df4f9bc4SDavid E. Box #include <linux/acpi.h> 8a0a3408eSKeith Busch #include <linux/aer.h> 918119775SKeith Busch #include <linux/async.h> 1057dacad5SJay Sternberg #include <linux/blkdev.h> 1157dacad5SJay Sternberg #include <linux/blk-mq.h> 12dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 13fe45e630SChristoph Hellwig #include <linux/blk-integrity.h> 14ff5350a8SAndy Lutomirski #include <linux/dmi.h> 1557dacad5SJay Sternberg #include <linux/init.h> 1657dacad5SJay Sternberg #include <linux/interrupt.h> 1757dacad5SJay Sternberg #include <linux/io.h> 1899722c8aSChristophe JAILLET #include <linux/kstrtox.h> 19dc90f084SChristoph Hellwig #include <linux/memremap.h> 2057dacad5SJay Sternberg #include <linux/mm.h> 2157dacad5SJay Sternberg #include <linux/module.h> 2277bf25eaSKeith Busch #include <linux/mutex.h> 23d0877473SKeith Busch #include <linux/once.h> 2457dacad5SJay Sternberg #include <linux/pci.h> 25d916b1beSKeith Busch #include <linux/suspend.h> 2657dacad5SJay Sternberg #include <linux/t10-pi.h> 2757dacad5SJay Sternberg #include <linux/types.h> 289cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 2920d3bb92SKlaus Jensen #include <linux/io-64-nonatomic-hi-lo.h> 30a98e58e5SScott Bauer #include <linux/sed-opal.h> 310f238ff5SLogan Gunthorpe #include <linux/pci-p2pdma.h> 3257dacad5SJay Sternberg 33604c01d5Syupeng #include "trace.h" 3457dacad5SJay Sternberg #include "nvme.h" 3557dacad5SJay Sternberg 36c1e0cc7eSBenjamin Herrenschmidt #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 378a1d09a6SBenjamin Herrenschmidt #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 3857dacad5SJay Sternberg 3984173423SKeith Busch #define SGES_PER_PAGE (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 40adf68f21SChristoph Hellwig 41943e942eSJens Axboe /* 42943e942eSJens Axboe * These can be higher, but we need to ensure that any command doesn't 43943e942eSJens Axboe * require an sg allocation that needs more than a page of data. 44943e942eSJens Axboe */ 457846c1b5SKeith Busch #define NVME_MAX_KB_SZ 8192 467846c1b5SKeith Busch #define NVME_MAX_SEGS 128 477846c1b5SKeith Busch #define NVME_MAX_NR_ALLOCATIONS 5 48943e942eSJens Axboe 4957dacad5SJay Sternberg static int use_threaded_interrupts; 502e21e445SXin Hao module_param(use_threaded_interrupts, int, 0444); 5157dacad5SJay Sternberg 5257dacad5SJay Sternberg static bool use_cmb_sqes = true; 5369f4eb9fSKeith Busch module_param(use_cmb_sqes, bool, 0444); 5457dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5557dacad5SJay Sternberg 5687ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5787ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5887ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 5987ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 6057dacad5SJay Sternberg 61a7a7cbe3SChaitanya Kulkarni static unsigned int sgl_threshold = SZ_32K; 62a7a7cbe3SChaitanya Kulkarni module_param(sgl_threshold, uint, 0644); 63a7a7cbe3SChaitanya Kulkarni MODULE_PARM_DESC(sgl_threshold, 64a7a7cbe3SChaitanya Kulkarni "Use SGLs when average request segment size is larger or equal to " 65a7a7cbe3SChaitanya Kulkarni "this size. Use 0 to disable SGLs."); 66a7a7cbe3SChaitanya Kulkarni 6727453b45SSagi Grimberg #define NVME_PCI_MIN_QUEUE_SIZE 2 6827453b45SSagi Grimberg #define NVME_PCI_MAX_QUEUE_SIZE 4095 69b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 70b27c1e68Sweiping zhang static const struct kernel_param_ops io_queue_depth_ops = { 71b27c1e68Sweiping zhang .set = io_queue_depth_set, 7261f3b896SChaitanya Kulkarni .get = param_get_uint, 73b27c1e68Sweiping zhang }; 74b27c1e68Sweiping zhang 7561f3b896SChaitanya Kulkarni static unsigned int io_queue_depth = 1024; 76b27c1e68Sweiping zhang module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 7727453b45SSagi Grimberg MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); 78b27c1e68Sweiping zhang 799c9e76d5SWeiping Zhang static int io_queue_count_set(const char *val, const struct kernel_param *kp) 809c9e76d5SWeiping Zhang { 819c9e76d5SWeiping Zhang unsigned int n; 829c9e76d5SWeiping Zhang int ret; 839c9e76d5SWeiping Zhang 849c9e76d5SWeiping Zhang ret = kstrtouint(val, 10, &n); 859c9e76d5SWeiping Zhang if (ret != 0 || n > num_possible_cpus()) 869c9e76d5SWeiping Zhang return -EINVAL; 879c9e76d5SWeiping Zhang return param_set_uint(val, kp); 889c9e76d5SWeiping Zhang } 899c9e76d5SWeiping Zhang 909c9e76d5SWeiping Zhang static const struct kernel_param_ops io_queue_count_ops = { 919c9e76d5SWeiping Zhang .set = io_queue_count_set, 929c9e76d5SWeiping Zhang .get = param_get_uint, 939c9e76d5SWeiping Zhang }; 949c9e76d5SWeiping Zhang 953f68baf7SKeith Busch static unsigned int write_queues; 969c9e76d5SWeiping Zhang module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 973b6592f7SJens Axboe MODULE_PARM_DESC(write_queues, 983b6592f7SJens Axboe "Number of queues to use for writes. If not set, reads and writes " 993b6592f7SJens Axboe "will share a queue set."); 1003b6592f7SJens Axboe 1013f68baf7SKeith Busch static unsigned int poll_queues; 1029c9e76d5SWeiping Zhang module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 1034b04cc6aSJens Axboe MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 1044b04cc6aSJens Axboe 105df4f9bc4SDavid E. Box static bool noacpi; 106df4f9bc4SDavid E. Box module_param(noacpi, bool, 0444); 107df4f9bc4SDavid E. Box MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 108df4f9bc4SDavid E. Box 1091c63dc66SChristoph Hellwig struct nvme_dev; 1101c63dc66SChristoph Hellwig struct nvme_queue; 11157dacad5SJay Sternberg 112a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 1137d879c90SChristoph Hellwig static void nvme_delete_io_queues(struct nvme_dev *dev); 11457dacad5SJay Sternberg 11557dacad5SJay Sternberg /* 1161c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 1171c63dc66SChristoph Hellwig */ 1181c63dc66SChristoph Hellwig struct nvme_dev { 119147b27e4SSagi Grimberg struct nvme_queue *queues; 1201c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1211c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1221c63dc66SChristoph Hellwig u32 __iomem *dbs; 1231c63dc66SChristoph Hellwig struct device *dev; 1241c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1251c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1261c63dc66SChristoph Hellwig unsigned online_queues; 1271c63dc66SChristoph Hellwig unsigned max_qid; 128e20ba6e1SChristoph Hellwig unsigned io_queues[HCTX_MAX_TYPES]; 12922b55601SKeith Busch unsigned int num_vecs; 1307442ddceSJohn Garry u32 q_depth; 131c1e0cc7eSBenjamin Herrenschmidt int io_sqes; 1321c63dc66SChristoph Hellwig u32 db_stride; 1331c63dc66SChristoph Hellwig void __iomem *bar; 13497f6ef64SXu Yu unsigned long bar_mapped_size; 13577bf25eaSKeith Busch struct mutex shutdown_lock; 1361c63dc66SChristoph Hellwig bool subsystem; 1371c63dc66SChristoph Hellwig u64 cmb_size; 1380f238ff5SLogan Gunthorpe bool cmb_use_sqes; 1391c63dc66SChristoph Hellwig u32 cmbsz; 140202021c1SStephen Bates u32 cmbloc; 1411c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 142d916b1beSKeith Busch u32 last_ps; 143a5df5e79SKeith Busch bool hmb; 14487ad72a5SChristoph Hellwig 145943e942eSJens Axboe mempool_t *iod_mempool; 146943e942eSJens Axboe 14787ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 148b5f96cb7SKlaus Jensen __le32 *dbbuf_dbs; 149f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 150b5f96cb7SKlaus Jensen __le32 *dbbuf_eis; 151f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 15287ad72a5SChristoph Hellwig 15387ad72a5SChristoph Hellwig /* host memory buffer support: */ 15487ad72a5SChristoph Hellwig u64 host_mem_size; 15587ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 1564033f35dSChristoph Hellwig dma_addr_t host_mem_descs_dma; 15787ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 15887ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 1592a5bcfddSWeiping Zhang unsigned int nr_allocated_queues; 1602a5bcfddSWeiping Zhang unsigned int nr_write_queues; 1612a5bcfddSWeiping Zhang unsigned int nr_poll_queues; 16257dacad5SJay Sternberg }; 16357dacad5SJay Sternberg 164b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 165b27c1e68Sweiping zhang { 16627453b45SSagi Grimberg return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, 16727453b45SSagi Grimberg NVME_PCI_MAX_QUEUE_SIZE); 168b27c1e68Sweiping zhang } 169b27c1e68Sweiping zhang 170f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 171f9f38e33SHelen Koike { 172f9f38e33SHelen Koike return qid * 2 * stride; 173f9f38e33SHelen Koike } 174f9f38e33SHelen Koike 175f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 176f9f38e33SHelen Koike { 177f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 178f9f38e33SHelen Koike } 179f9f38e33SHelen Koike 1801c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1811c63dc66SChristoph Hellwig { 1821c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1831c63dc66SChristoph Hellwig } 1841c63dc66SChristoph Hellwig 18557dacad5SJay Sternberg /* 18657dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 18757dacad5SJay Sternberg * commands and one for I/O commands). 18857dacad5SJay Sternberg */ 18957dacad5SJay Sternberg struct nvme_queue { 19057dacad5SJay Sternberg struct nvme_dev *dev; 1911ab0cd69SJens Axboe spinlock_t sq_lock; 192c1e0cc7eSBenjamin Herrenschmidt void *sq_cmds; 1933a7afd8eSChristoph Hellwig /* only used for poll queues: */ 1943a7afd8eSChristoph Hellwig spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 19574943d45SKeith Busch struct nvme_completion *cqes; 19657dacad5SJay Sternberg dma_addr_t sq_dma_addr; 19757dacad5SJay Sternberg dma_addr_t cq_dma_addr; 19857dacad5SJay Sternberg u32 __iomem *q_db; 1997442ddceSJohn Garry u32 q_depth; 2007c349ddeSKeith Busch u16 cq_vector; 20157dacad5SJay Sternberg u16 sq_tail; 20238210800SKeith Busch u16 last_sq_tail; 20357dacad5SJay Sternberg u16 cq_head; 20457dacad5SJay Sternberg u16 qid; 20557dacad5SJay Sternberg u8 cq_phase; 206c1e0cc7eSBenjamin Herrenschmidt u8 sqes; 2074e224106SChristoph Hellwig unsigned long flags; 2084e224106SChristoph Hellwig #define NVMEQ_ENABLED 0 20963223078SChristoph Hellwig #define NVMEQ_SQ_CMB 1 210d1ed6aa1SChristoph Hellwig #define NVMEQ_DELETE_ERROR 2 2117c349ddeSKeith Busch #define NVMEQ_POLLED 3 212b5f96cb7SKlaus Jensen __le32 *dbbuf_sq_db; 213b5f96cb7SKlaus Jensen __le32 *dbbuf_cq_db; 214b5f96cb7SKlaus Jensen __le32 *dbbuf_sq_ei; 215b5f96cb7SKlaus Jensen __le32 *dbbuf_cq_ei; 216d1ed6aa1SChristoph Hellwig struct completion delete_done; 21757dacad5SJay Sternberg }; 21857dacad5SJay Sternberg 2197846c1b5SKeith Busch union nvme_descriptor { 2207846c1b5SKeith Busch struct nvme_sgl_desc *sg_list; 2217846c1b5SKeith Busch __le64 *prp_list; 2227846c1b5SKeith Busch }; 2237846c1b5SKeith Busch 22457dacad5SJay Sternberg /* 2259b048119SChristoph Hellwig * The nvme_iod describes the data in an I/O. 2269b048119SChristoph Hellwig * 2279b048119SChristoph Hellwig * The sg pointer contains the list of PRP/SGL chunk allocations in addition 2289b048119SChristoph Hellwig * to the actual struct scatterlist. 22971bd150cSChristoph Hellwig */ 23071bd150cSChristoph Hellwig struct nvme_iod { 231d49187e9SChristoph Hellwig struct nvme_request req; 232af7fae85SKeith Busch struct nvme_command cmd; 23352da4f3fSKeith Busch bool aborted; 234c372cdd1SKeith Busch s8 nr_allocations; /* PRP list pool allocations. 0 means small 235c372cdd1SKeith Busch pool in use */ 236dff824b2SChristoph Hellwig unsigned int dma_len; /* length of single DMA segment mapping */ 237c4c22c52SKeith Busch dma_addr_t first_dma; 238783b94bdSChristoph Hellwig dma_addr_t meta_dma; 23991fb2b60SLogan Gunthorpe struct sg_table sgt; 2407846c1b5SKeith Busch union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; 24157dacad5SJay Sternberg }; 24257dacad5SJay Sternberg 2432a5bcfddSWeiping Zhang static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 2443b6592f7SJens Axboe { 2452a5bcfddSWeiping Zhang return dev->nr_allocated_queues * 8 * dev->db_stride; 246f9f38e33SHelen Koike } 247f9f38e33SHelen Koike 24865a54646SChristoph Hellwig static void nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 249f9f38e33SHelen Koike { 2502a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 251f9f38e33SHelen Koike 25265a54646SChristoph Hellwig if (!(dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP)) 25365a54646SChristoph Hellwig return; 25465a54646SChristoph Hellwig 25558847f12SKeith Busch if (dev->dbbuf_dbs) { 25658847f12SKeith Busch /* 25758847f12SKeith Busch * Clear the dbbuf memory so the driver doesn't observe stale 25858847f12SKeith Busch * values from the previous instantiation. 25958847f12SKeith Busch */ 26058847f12SKeith Busch memset(dev->dbbuf_dbs, 0, mem_size); 26158847f12SKeith Busch memset(dev->dbbuf_eis, 0, mem_size); 26265a54646SChristoph Hellwig return; 26358847f12SKeith Busch } 264f9f38e33SHelen Koike 265f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 266f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 267f9f38e33SHelen Koike GFP_KERNEL); 268f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 26965a54646SChristoph Hellwig goto fail; 270f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 271f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 272f9f38e33SHelen Koike GFP_KERNEL); 27365a54646SChristoph Hellwig if (!dev->dbbuf_eis) 27465a54646SChristoph Hellwig goto fail_free_dbbuf_dbs; 27565a54646SChristoph Hellwig return; 276f9f38e33SHelen Koike 27765a54646SChristoph Hellwig fail_free_dbbuf_dbs: 27865a54646SChristoph Hellwig dma_free_coherent(dev->dev, mem_size, dev->dbbuf_dbs, 27965a54646SChristoph Hellwig dev->dbbuf_dbs_dma_addr); 28065a54646SChristoph Hellwig dev->dbbuf_dbs = NULL; 28165a54646SChristoph Hellwig fail: 28265a54646SChristoph Hellwig dev_warn(dev->dev, "unable to allocate dma for dbbuf\n"); 283f9f38e33SHelen Koike } 284f9f38e33SHelen Koike 285f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 286f9f38e33SHelen Koike { 2872a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 288f9f38e33SHelen Koike 289f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 290f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 291f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 292f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 293f9f38e33SHelen Koike } 294f9f38e33SHelen Koike if (dev->dbbuf_eis) { 295f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 296f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 297f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 298f9f38e33SHelen Koike } 299f9f38e33SHelen Koike } 300f9f38e33SHelen Koike 301f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 302f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 303f9f38e33SHelen Koike { 304f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 305f9f38e33SHelen Koike return; 306f9f38e33SHelen Koike 307f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 308f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 309f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 310f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 311f9f38e33SHelen Koike } 312f9f38e33SHelen Koike 3130f0d2c87SMinwoo Im static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 3140f0d2c87SMinwoo Im { 3150f0d2c87SMinwoo Im if (!nvmeq->qid) 3160f0d2c87SMinwoo Im return; 3170f0d2c87SMinwoo Im 3180f0d2c87SMinwoo Im nvmeq->dbbuf_sq_db = NULL; 3190f0d2c87SMinwoo Im nvmeq->dbbuf_cq_db = NULL; 3200f0d2c87SMinwoo Im nvmeq->dbbuf_sq_ei = NULL; 3210f0d2c87SMinwoo Im nvmeq->dbbuf_cq_ei = NULL; 3220f0d2c87SMinwoo Im } 3230f0d2c87SMinwoo Im 324f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 325f9f38e33SHelen Koike { 326f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 3270f0d2c87SMinwoo Im unsigned int i; 328f9f38e33SHelen Koike 329f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 330f9f38e33SHelen Koike return; 331f9f38e33SHelen Koike 332f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 333f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 334f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 335f9f38e33SHelen Koike 336f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 3379bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 338f9f38e33SHelen Koike /* Free memory and continue on */ 339f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 3400f0d2c87SMinwoo Im 3410f0d2c87SMinwoo Im for (i = 1; i <= dev->online_queues; i++) 3420f0d2c87SMinwoo Im nvme_dbbuf_free(&dev->queues[i]); 343f9f38e33SHelen Koike } 344f9f38e33SHelen Koike } 345f9f38e33SHelen Koike 346f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 347f9f38e33SHelen Koike { 348f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 349f9f38e33SHelen Koike } 350f9f38e33SHelen Koike 351f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 352b5f96cb7SKlaus Jensen static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db, 353b5f96cb7SKlaus Jensen volatile __le32 *dbbuf_ei) 354f9f38e33SHelen Koike { 355f9f38e33SHelen Koike if (dbbuf_db) { 356b5f96cb7SKlaus Jensen u16 old_value, event_idx; 357f9f38e33SHelen Koike 358f9f38e33SHelen Koike /* 359f9f38e33SHelen Koike * Ensure that the queue is written before updating 360f9f38e33SHelen Koike * the doorbell in memory 361f9f38e33SHelen Koike */ 362f9f38e33SHelen Koike wmb(); 363f9f38e33SHelen Koike 364b5f96cb7SKlaus Jensen old_value = le32_to_cpu(*dbbuf_db); 365b5f96cb7SKlaus Jensen *dbbuf_db = cpu_to_le32(value); 366f9f38e33SHelen Koike 367f1ed3df2SMichal Wnukowski /* 368f1ed3df2SMichal Wnukowski * Ensure that the doorbell is updated before reading the event 369f1ed3df2SMichal Wnukowski * index from memory. The controller needs to provide similar 370f1ed3df2SMichal Wnukowski * ordering to ensure the envent index is updated before reading 371f1ed3df2SMichal Wnukowski * the doorbell. 372f1ed3df2SMichal Wnukowski */ 373f1ed3df2SMichal Wnukowski mb(); 374f1ed3df2SMichal Wnukowski 375b5f96cb7SKlaus Jensen event_idx = le32_to_cpu(*dbbuf_ei); 376b5f96cb7SKlaus Jensen if (!nvme_dbbuf_need_event(event_idx, value, old_value)) 377f9f38e33SHelen Koike return false; 378f9f38e33SHelen Koike } 379f9f38e33SHelen Koike 380f9f38e33SHelen Koike return true; 38157dacad5SJay Sternberg } 38257dacad5SJay Sternberg 38357dacad5SJay Sternberg /* 38457dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 38557dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 38657dacad5SJay Sternberg * the I/O. 38757dacad5SJay Sternberg */ 388b13c6393SChaitanya Kulkarni static int nvme_pci_npages_prp(void) 38957dacad5SJay Sternberg { 390c89a529eSKeith Busch unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE; 391c89a529eSKeith Busch unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE); 39284173423SKeith Busch return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8); 39357dacad5SJay Sternberg } 39457dacad5SJay Sternberg 39557dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 39657dacad5SJay Sternberg unsigned int hctx_idx) 39757dacad5SJay Sternberg { 3980da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(data); 399147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 40057dacad5SJay Sternberg 40157dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 40257dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 40357dacad5SJay Sternberg 40457dacad5SJay Sternberg hctx->driver_data = nvmeq; 40557dacad5SJay Sternberg return 0; 40657dacad5SJay Sternberg } 40757dacad5SJay Sternberg 40857dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 40957dacad5SJay Sternberg unsigned int hctx_idx) 41057dacad5SJay Sternberg { 4110da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(data); 412147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 41357dacad5SJay Sternberg 41457dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 41557dacad5SJay Sternberg hctx->driver_data = nvmeq; 41657dacad5SJay Sternberg return 0; 41757dacad5SJay Sternberg } 41857dacad5SJay Sternberg 419e559398fSChristoph Hellwig static int nvme_pci_init_request(struct blk_mq_tag_set *set, 420e559398fSChristoph Hellwig struct request *req, unsigned int hctx_idx, 421e559398fSChristoph Hellwig unsigned int numa_node) 42257dacad5SJay Sternberg { 4230da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(set->driver_data); 424f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 42559e29ce6SSagi Grimberg 42659e29ce6SSagi Grimberg nvme_req(req)->ctrl = &dev->ctrl; 427f4b9e6c9SKeith Busch nvme_req(req)->cmd = &iod->cmd; 42857dacad5SJay Sternberg return 0; 42957dacad5SJay Sternberg } 43057dacad5SJay Sternberg 4313b6592f7SJens Axboe static int queue_irq_offset(struct nvme_dev *dev) 4323b6592f7SJens Axboe { 4333b6592f7SJens Axboe /* if we have more than 1 vec, admin queue offsets us by 1 */ 4343b6592f7SJens Axboe if (dev->num_vecs > 1) 4353b6592f7SJens Axboe return 1; 4363b6592f7SJens Axboe 4373b6592f7SJens Axboe return 0; 4383b6592f7SJens Axboe } 4393b6592f7SJens Axboe 440a4e1d0b7SBart Van Assche static void nvme_pci_map_queues(struct blk_mq_tag_set *set) 441dca51e78SChristoph Hellwig { 4420da7feaaSChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(set->driver_data); 4433b6592f7SJens Axboe int i, qoff, offset; 444dca51e78SChristoph Hellwig 4453b6592f7SJens Axboe offset = queue_irq_offset(dev); 4463b6592f7SJens Axboe for (i = 0, qoff = 0; i < set->nr_maps; i++) { 4473b6592f7SJens Axboe struct blk_mq_queue_map *map = &set->map[i]; 4483b6592f7SJens Axboe 4493b6592f7SJens Axboe map->nr_queues = dev->io_queues[i]; 4503b6592f7SJens Axboe if (!map->nr_queues) { 451e20ba6e1SChristoph Hellwig BUG_ON(i == HCTX_TYPE_DEFAULT); 4527e849dd9SChristoph Hellwig continue; 4533b6592f7SJens Axboe } 4543b6592f7SJens Axboe 4554b04cc6aSJens Axboe /* 4564b04cc6aSJens Axboe * The poll queue(s) doesn't have an IRQ (and hence IRQ 4574b04cc6aSJens Axboe * affinity), so use the regular blk-mq cpu mapping 4584b04cc6aSJens Axboe */ 4593b6592f7SJens Axboe map->queue_offset = qoff; 460cb9e0e50SKeith Busch if (i != HCTX_TYPE_POLL && offset) 4613b6592f7SJens Axboe blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 4624b04cc6aSJens Axboe else 4634b04cc6aSJens Axboe blk_mq_map_queues(map); 4643b6592f7SJens Axboe qoff += map->nr_queues; 4653b6592f7SJens Axboe offset += map->nr_queues; 4663b6592f7SJens Axboe } 467dca51e78SChristoph Hellwig } 468dca51e78SChristoph Hellwig 46938210800SKeith Busch /* 47038210800SKeith Busch * Write sq tail if we are asked to, or if the next command would wrap. 47138210800SKeith Busch */ 47238210800SKeith Busch static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 47304f3eafdSJens Axboe { 47438210800SKeith Busch if (!write_sq) { 47538210800SKeith Busch u16 next_tail = nvmeq->sq_tail + 1; 47638210800SKeith Busch 47738210800SKeith Busch if (next_tail == nvmeq->q_depth) 47838210800SKeith Busch next_tail = 0; 47938210800SKeith Busch if (next_tail != nvmeq->last_sq_tail) 48038210800SKeith Busch return; 48138210800SKeith Busch } 48238210800SKeith Busch 48304f3eafdSJens Axboe if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 48404f3eafdSJens Axboe nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 48504f3eafdSJens Axboe writel(nvmeq->sq_tail, nvmeq->q_db); 48638210800SKeith Busch nvmeq->last_sq_tail = nvmeq->sq_tail; 48704f3eafdSJens Axboe } 48804f3eafdSJens Axboe 4893233b94cSJens Axboe static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, 4903233b94cSJens Axboe struct nvme_command *cmd) 49157dacad5SJay Sternberg { 492c1e0cc7eSBenjamin Herrenschmidt memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 4933233b94cSJens Axboe absolute_pointer(cmd), sizeof(*cmd)); 49490ea5ca4SChristoph Hellwig if (++nvmeq->sq_tail == nvmeq->q_depth) 49590ea5ca4SChristoph Hellwig nvmeq->sq_tail = 0; 49604f3eafdSJens Axboe } 49704f3eafdSJens Axboe 49804f3eafdSJens Axboe static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 49904f3eafdSJens Axboe { 50004f3eafdSJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 50104f3eafdSJens Axboe 50204f3eafdSJens Axboe spin_lock(&nvmeq->sq_lock); 50338210800SKeith Busch if (nvmeq->sq_tail != nvmeq->last_sq_tail) 50438210800SKeith Busch nvme_write_sq_db(nvmeq, true); 50590ea5ca4SChristoph Hellwig spin_unlock(&nvmeq->sq_lock); 50657dacad5SJay Sternberg } 50757dacad5SJay Sternberg 508ae582935SKeith Busch static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, 509ae582935SKeith Busch int nseg) 510955b1b5aSMinwoo Im { 511a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 512955b1b5aSMinwoo Im unsigned int avg_seg_size; 513955b1b5aSMinwoo Im 51420469a37SKeith Busch avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 515955b1b5aSMinwoo Im 516253a0b76SChaitanya Kulkarni if (!nvme_ctrl_sgl_supported(&dev->ctrl)) 517955b1b5aSMinwoo Im return false; 518a53232cbSKeith Busch if (!nvmeq->qid) 519955b1b5aSMinwoo Im return false; 520955b1b5aSMinwoo Im if (!sgl_threshold || avg_seg_size < sgl_threshold) 521955b1b5aSMinwoo Im return false; 522955b1b5aSMinwoo Im return true; 523955b1b5aSMinwoo Im } 524955b1b5aSMinwoo Im 5259275c206SChristoph Hellwig static void nvme_free_prps(struct nvme_dev *dev, struct request *req) 52657dacad5SJay Sternberg { 5276c3c05b0SChaitanya Kulkarni const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 5289275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5299275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 53057dacad5SJay Sternberg int i; 53157dacad5SJay Sternberg 532c372cdd1SKeith Busch for (i = 0; i < iod->nr_allocations; i++) { 5337846c1b5SKeith Busch __le64 *prp_list = iod->list[i].prp_list; 5349275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 5359275c206SChristoph Hellwig 5369275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); 5379275c206SChristoph Hellwig dma_addr = next_dma_addr; 538dff824b2SChristoph Hellwig } 5399275c206SChristoph Hellwig } 5409275c206SChristoph Hellwig 5419275c206SChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 5429275c206SChristoph Hellwig { 5439275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5447fe07d14SChristoph Hellwig 5459275c206SChristoph Hellwig if (iod->dma_len) { 5469275c206SChristoph Hellwig dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, 5479275c206SChristoph Hellwig rq_dma_dir(req)); 5489275c206SChristoph Hellwig return; 5499275c206SChristoph Hellwig } 5509275c206SChristoph Hellwig 55191fb2b60SLogan Gunthorpe WARN_ON_ONCE(!iod->sgt.nents); 5529275c206SChristoph Hellwig 55391fb2b60SLogan Gunthorpe dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 55491fb2b60SLogan Gunthorpe 555c372cdd1SKeith Busch if (iod->nr_allocations == 0) 5567846c1b5SKeith Busch dma_pool_free(dev->prp_small_pool, iod->list[0].sg_list, 5579275c206SChristoph Hellwig iod->first_dma); 5588f0edf45SKeith Busch else if (iod->nr_allocations == 1) 5597846c1b5SKeith Busch dma_pool_free(dev->prp_page_pool, iod->list[0].sg_list, 56001df742dSKeith Busch iod->first_dma); 5619275c206SChristoph Hellwig else 5629275c206SChristoph Hellwig nvme_free_prps(dev, req); 56391fb2b60SLogan Gunthorpe mempool_free(iod->sgt.sgl, dev->iod_mempool); 56457dacad5SJay Sternberg } 56557dacad5SJay Sternberg 566d0877473SKeith Busch static void nvme_print_sgl(struct scatterlist *sgl, int nents) 567d0877473SKeith Busch { 568d0877473SKeith Busch int i; 569d0877473SKeith Busch struct scatterlist *sg; 570d0877473SKeith Busch 571d0877473SKeith Busch for_each_sg(sgl, sg, nents, i) { 572d0877473SKeith Busch dma_addr_t phys = sg_phys(sg); 573d0877473SKeith Busch pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 574d0877473SKeith Busch "dma_address:%pad dma_length:%d\n", 575d0877473SKeith Busch i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 576d0877473SKeith Busch sg_dma_len(sg)); 577d0877473SKeith Busch } 578d0877473SKeith Busch } 579d0877473SKeith Busch 580a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 581a7a7cbe3SChaitanya Kulkarni struct request *req, struct nvme_rw_command *cmnd) 58257dacad5SJay Sternberg { 583f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 58457dacad5SJay Sternberg struct dma_pool *pool; 585b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 58691fb2b60SLogan Gunthorpe struct scatterlist *sg = iod->sgt.sgl; 58757dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 58857dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 5896c3c05b0SChaitanya Kulkarni int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 59057dacad5SJay Sternberg __le64 *prp_list; 59157dacad5SJay Sternberg dma_addr_t prp_dma; 59257dacad5SJay Sternberg int nprps, i; 59357dacad5SJay Sternberg 5946c3c05b0SChaitanya Kulkarni length -= (NVME_CTRL_PAGE_SIZE - offset); 5955228b328SJan H. Schönherr if (length <= 0) { 5965228b328SJan H. Schönherr iod->first_dma = 0; 597a7a7cbe3SChaitanya Kulkarni goto done; 5985228b328SJan H. Schönherr } 59957dacad5SJay Sternberg 6006c3c05b0SChaitanya Kulkarni dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 60157dacad5SJay Sternberg if (dma_len) { 6026c3c05b0SChaitanya Kulkarni dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 60357dacad5SJay Sternberg } else { 60457dacad5SJay Sternberg sg = sg_next(sg); 60557dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 60657dacad5SJay Sternberg dma_len = sg_dma_len(sg); 60757dacad5SJay Sternberg } 60857dacad5SJay Sternberg 6096c3c05b0SChaitanya Kulkarni if (length <= NVME_CTRL_PAGE_SIZE) { 61057dacad5SJay Sternberg iod->first_dma = dma_addr; 611a7a7cbe3SChaitanya Kulkarni goto done; 61257dacad5SJay Sternberg } 61357dacad5SJay Sternberg 6146c3c05b0SChaitanya Kulkarni nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 61557dacad5SJay Sternberg if (nprps <= (256 / 8)) { 61657dacad5SJay Sternberg pool = dev->prp_small_pool; 617c372cdd1SKeith Busch iod->nr_allocations = 0; 61857dacad5SJay Sternberg } else { 61957dacad5SJay Sternberg pool = dev->prp_page_pool; 620c372cdd1SKeith Busch iod->nr_allocations = 1; 62157dacad5SJay Sternberg } 62257dacad5SJay Sternberg 62369d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 62457dacad5SJay Sternberg if (!prp_list) { 625c372cdd1SKeith Busch iod->nr_allocations = -1; 62686eea289SKeith Busch return BLK_STS_RESOURCE; 62757dacad5SJay Sternberg } 6287846c1b5SKeith Busch iod->list[0].prp_list = prp_list; 62957dacad5SJay Sternberg iod->first_dma = prp_dma; 63057dacad5SJay Sternberg i = 0; 63157dacad5SJay Sternberg for (;;) { 6326c3c05b0SChaitanya Kulkarni if (i == NVME_CTRL_PAGE_SIZE >> 3) { 63357dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 63469d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 63557dacad5SJay Sternberg if (!prp_list) 636fa073216SChristoph Hellwig goto free_prps; 6377846c1b5SKeith Busch iod->list[iod->nr_allocations++].prp_list = prp_list; 63857dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 63957dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 64057dacad5SJay Sternberg i = 1; 64157dacad5SJay Sternberg } 64257dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 6436c3c05b0SChaitanya Kulkarni dma_len -= NVME_CTRL_PAGE_SIZE; 6446c3c05b0SChaitanya Kulkarni dma_addr += NVME_CTRL_PAGE_SIZE; 6456c3c05b0SChaitanya Kulkarni length -= NVME_CTRL_PAGE_SIZE; 64657dacad5SJay Sternberg if (length <= 0) 64757dacad5SJay Sternberg break; 64857dacad5SJay Sternberg if (dma_len > 0) 64957dacad5SJay Sternberg continue; 65086eea289SKeith Busch if (unlikely(dma_len < 0)) 65186eea289SKeith Busch goto bad_sgl; 65257dacad5SJay Sternberg sg = sg_next(sg); 65357dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 65457dacad5SJay Sternberg dma_len = sg_dma_len(sg); 65557dacad5SJay Sternberg } 656a7a7cbe3SChaitanya Kulkarni done: 65791fb2b60SLogan Gunthorpe cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); 658a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 65986eea289SKeith Busch return BLK_STS_OK; 660fa073216SChristoph Hellwig free_prps: 661fa073216SChristoph Hellwig nvme_free_prps(dev, req); 662fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 66386eea289SKeith Busch bad_sgl: 66491fb2b60SLogan Gunthorpe WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), 665d0877473SKeith Busch "Invalid SGL for payload:%d nents:%d\n", 66691fb2b60SLogan Gunthorpe blk_rq_payload_bytes(req), iod->sgt.nents); 66786eea289SKeith Busch return BLK_STS_IOERR; 66857dacad5SJay Sternberg } 66957dacad5SJay Sternberg 670a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 671a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg) 672a7a7cbe3SChaitanya Kulkarni { 673a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(sg_dma_address(sg)); 674a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(sg_dma_len(sg)); 675a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_DATA_DESC << 4; 676a7a7cbe3SChaitanya Kulkarni } 677a7a7cbe3SChaitanya Kulkarni 678a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 679a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr, int entries) 680a7a7cbe3SChaitanya Kulkarni { 681a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(dma_addr); 682a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(entries * sizeof(*sge)); 683a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 684a7a7cbe3SChaitanya Kulkarni } 685a7a7cbe3SChaitanya Kulkarni 686a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 68791fb2b60SLogan Gunthorpe struct request *req, struct nvme_rw_command *cmd) 688a7a7cbe3SChaitanya Kulkarni { 689a7a7cbe3SChaitanya Kulkarni struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 690a7a7cbe3SChaitanya Kulkarni struct dma_pool *pool; 691a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list; 69291fb2b60SLogan Gunthorpe struct scatterlist *sg = iod->sgt.sgl; 69391fb2b60SLogan Gunthorpe unsigned int entries = iod->sgt.nents; 694a7a7cbe3SChaitanya Kulkarni dma_addr_t sgl_dma; 695b0f2853bSChristoph Hellwig int i = 0; 696a7a7cbe3SChaitanya Kulkarni 697a7a7cbe3SChaitanya Kulkarni /* setting the transfer type as SGL */ 698a7a7cbe3SChaitanya Kulkarni cmd->flags = NVME_CMD_SGL_METABUF; 699a7a7cbe3SChaitanya Kulkarni 700b0f2853bSChristoph Hellwig if (entries == 1) { 701a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 702a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 703a7a7cbe3SChaitanya Kulkarni } 704a7a7cbe3SChaitanya Kulkarni 705a7a7cbe3SChaitanya Kulkarni if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 706a7a7cbe3SChaitanya Kulkarni pool = dev->prp_small_pool; 707c372cdd1SKeith Busch iod->nr_allocations = 0; 708a7a7cbe3SChaitanya Kulkarni } else { 709a7a7cbe3SChaitanya Kulkarni pool = dev->prp_page_pool; 710c372cdd1SKeith Busch iod->nr_allocations = 1; 711a7a7cbe3SChaitanya Kulkarni } 712a7a7cbe3SChaitanya Kulkarni 713a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 714a7a7cbe3SChaitanya Kulkarni if (!sg_list) { 715c372cdd1SKeith Busch iod->nr_allocations = -1; 716a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 717a7a7cbe3SChaitanya Kulkarni } 718a7a7cbe3SChaitanya Kulkarni 7197846c1b5SKeith Busch iod->list[0].sg_list = sg_list; 720a7a7cbe3SChaitanya Kulkarni iod->first_dma = sgl_dma; 721a7a7cbe3SChaitanya Kulkarni 722a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 723a7a7cbe3SChaitanya Kulkarni do { 724a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&sg_list[i++], sg); 725a7a7cbe3SChaitanya Kulkarni sg = sg_next(sg); 726b0f2853bSChristoph Hellwig } while (--entries > 0); 727a7a7cbe3SChaitanya Kulkarni 728a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 729a7a7cbe3SChaitanya Kulkarni } 730a7a7cbe3SChaitanya Kulkarni 731dff824b2SChristoph Hellwig static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 732dff824b2SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 733dff824b2SChristoph Hellwig struct bio_vec *bv) 734dff824b2SChristoph Hellwig { 735dff824b2SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 7366c3c05b0SChaitanya Kulkarni unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 7376c3c05b0SChaitanya Kulkarni unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 738dff824b2SChristoph Hellwig 739dff824b2SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 740dff824b2SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 741dff824b2SChristoph Hellwig return BLK_STS_RESOURCE; 742dff824b2SChristoph Hellwig iod->dma_len = bv->bv_len; 743dff824b2SChristoph Hellwig 744dff824b2SChristoph Hellwig cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 745dff824b2SChristoph Hellwig if (bv->bv_len > first_prp_len) 746dff824b2SChristoph Hellwig cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 747a56ea614SLei Rao else 748a56ea614SLei Rao cmnd->dptr.prp2 = 0; 749359c1f88SBaolin Wang return BLK_STS_OK; 750dff824b2SChristoph Hellwig } 751dff824b2SChristoph Hellwig 75229791057SChristoph Hellwig static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 75329791057SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 75429791057SChristoph Hellwig struct bio_vec *bv) 75529791057SChristoph Hellwig { 75629791057SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 75729791057SChristoph Hellwig 75829791057SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 75929791057SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 76029791057SChristoph Hellwig return BLK_STS_RESOURCE; 76129791057SChristoph Hellwig iod->dma_len = bv->bv_len; 76229791057SChristoph Hellwig 763049bf372SKlaus Birkelund Jensen cmnd->flags = NVME_CMD_SGL_METABUF; 76429791057SChristoph Hellwig cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 76529791057SChristoph Hellwig cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 76629791057SChristoph Hellwig cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 767359c1f88SBaolin Wang return BLK_STS_OK; 76829791057SChristoph Hellwig } 76929791057SChristoph Hellwig 770fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 771b131c61dSChristoph Hellwig struct nvme_command *cmnd) 77257dacad5SJay Sternberg { 773f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 77470479b71SChristoph Hellwig blk_status_t ret = BLK_STS_RESOURCE; 77591fb2b60SLogan Gunthorpe int rc; 77657dacad5SJay Sternberg 777dff824b2SChristoph Hellwig if (blk_rq_nr_phys_segments(req) == 1) { 778a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 779dff824b2SChristoph Hellwig struct bio_vec bv = req_bvec(req); 780dff824b2SChristoph Hellwig 781dff824b2SChristoph Hellwig if (!is_pci_p2pdma_page(bv.bv_page)) { 7826c3c05b0SChaitanya Kulkarni if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 783dff824b2SChristoph Hellwig return nvme_setup_prp_simple(dev, req, 784dff824b2SChristoph Hellwig &cmnd->rw, &bv); 78529791057SChristoph Hellwig 786a53232cbSKeith Busch if (nvmeq->qid && sgl_threshold && 787253a0b76SChaitanya Kulkarni nvme_ctrl_sgl_supported(&dev->ctrl)) 78829791057SChristoph Hellwig return nvme_setup_sgl_simple(dev, req, 78929791057SChristoph Hellwig &cmnd->rw, &bv); 790dff824b2SChristoph Hellwig } 791dff824b2SChristoph Hellwig } 792dff824b2SChristoph Hellwig 793dff824b2SChristoph Hellwig iod->dma_len = 0; 79491fb2b60SLogan Gunthorpe iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 79591fb2b60SLogan Gunthorpe if (!iod->sgt.sgl) 7969b048119SChristoph Hellwig return BLK_STS_RESOURCE; 79791fb2b60SLogan Gunthorpe sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); 79891fb2b60SLogan Gunthorpe iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); 79991fb2b60SLogan Gunthorpe if (!iod->sgt.orig_nents) 800fa073216SChristoph Hellwig goto out_free_sg; 801ba1ca37eSChristoph Hellwig 80291fb2b60SLogan Gunthorpe rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 80391fb2b60SLogan Gunthorpe DMA_ATTR_NO_WARN); 80491fb2b60SLogan Gunthorpe if (rc) { 80591fb2b60SLogan Gunthorpe if (rc == -EREMOTEIO) 80691fb2b60SLogan Gunthorpe ret = BLK_STS_TARGET; 807fa073216SChristoph Hellwig goto out_free_sg; 80891fb2b60SLogan Gunthorpe } 809ba1ca37eSChristoph Hellwig 810*b6c0c237SKeith Busch if (nvme_pci_use_sgls(dev, req, iod->sgt.nents)) 81191fb2b60SLogan Gunthorpe ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 812a7a7cbe3SChaitanya Kulkarni else 813a7a7cbe3SChaitanya Kulkarni ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 8144aedb705SChristoph Hellwig if (ret != BLK_STS_OK) 815fa073216SChristoph Hellwig goto out_unmap_sg; 816fa073216SChristoph Hellwig return BLK_STS_OK; 817fa073216SChristoph Hellwig 818fa073216SChristoph Hellwig out_unmap_sg: 81991fb2b60SLogan Gunthorpe dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 820fa073216SChristoph Hellwig out_free_sg: 82191fb2b60SLogan Gunthorpe mempool_free(iod->sgt.sgl, dev->iod_mempool); 822ba1ca37eSChristoph Hellwig return ret; 82357dacad5SJay Sternberg } 82457dacad5SJay Sternberg 8254aedb705SChristoph Hellwig static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, 8264aedb705SChristoph Hellwig struct nvme_command *cmnd) 8274aedb705SChristoph Hellwig { 8284aedb705SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8294aedb705SChristoph Hellwig 8304aedb705SChristoph Hellwig iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), 8314aedb705SChristoph Hellwig rq_dma_dir(req), 0); 8324aedb705SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->meta_dma)) 8334aedb705SChristoph Hellwig return BLK_STS_IOERR; 8344aedb705SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 835359c1f88SBaolin Wang return BLK_STS_OK; 8364aedb705SChristoph Hellwig } 8374aedb705SChristoph Hellwig 83862451a2bSJens Axboe static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) 83962451a2bSJens Axboe { 84062451a2bSJens Axboe struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 84162451a2bSJens Axboe blk_status_t ret; 84262451a2bSJens Axboe 84352da4f3fSKeith Busch iod->aborted = false; 844c372cdd1SKeith Busch iod->nr_allocations = -1; 84591fb2b60SLogan Gunthorpe iod->sgt.nents = 0; 84662451a2bSJens Axboe 84762451a2bSJens Axboe ret = nvme_setup_cmd(req->q->queuedata, req); 84862451a2bSJens Axboe if (ret) 84962451a2bSJens Axboe return ret; 85062451a2bSJens Axboe 85162451a2bSJens Axboe if (blk_rq_nr_phys_segments(req)) { 85262451a2bSJens Axboe ret = nvme_map_data(dev, req, &iod->cmd); 85362451a2bSJens Axboe if (ret) 85462451a2bSJens Axboe goto out_free_cmd; 85562451a2bSJens Axboe } 85662451a2bSJens Axboe 85762451a2bSJens Axboe if (blk_integrity_rq(req)) { 85862451a2bSJens Axboe ret = nvme_map_metadata(dev, req, &iod->cmd); 85962451a2bSJens Axboe if (ret) 86062451a2bSJens Axboe goto out_unmap_data; 86162451a2bSJens Axboe } 86262451a2bSJens Axboe 8636887fc64SSagi Grimberg nvme_start_request(req); 86462451a2bSJens Axboe return BLK_STS_OK; 86562451a2bSJens Axboe out_unmap_data: 86662451a2bSJens Axboe nvme_unmap_data(dev, req); 86762451a2bSJens Axboe out_free_cmd: 86862451a2bSJens Axboe nvme_cleanup_cmd(req); 86962451a2bSJens Axboe return ret; 87062451a2bSJens Axboe } 87162451a2bSJens Axboe 87257dacad5SJay Sternberg /* 87357dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 87457dacad5SJay Sternberg */ 875fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 87657dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 87757dacad5SJay Sternberg { 87857dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 87957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 88057dacad5SJay Sternberg struct request *req = bd->rq; 8819b048119SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 882ebe6d874SChristoph Hellwig blk_status_t ret; 88357dacad5SJay Sternberg 884d1f06f4aSJens Axboe /* 885d1f06f4aSJens Axboe * We should not need to do this, but we're still using this to 886d1f06f4aSJens Axboe * ensure we can drain requests on a dying queue. 887d1f06f4aSJens Axboe */ 8884e224106SChristoph Hellwig if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 889d1f06f4aSJens Axboe return BLK_STS_IOERR; 890d1f06f4aSJens Axboe 89162451a2bSJens Axboe if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) 892d4060d2bSTao Chiu return nvme_fail_nonready_command(&dev->ctrl, req); 893d4060d2bSTao Chiu 89462451a2bSJens Axboe ret = nvme_prep_rq(dev, req); 89562451a2bSJens Axboe if (unlikely(ret)) 896f4800d6dSChristoph Hellwig return ret; 8973233b94cSJens Axboe spin_lock(&nvmeq->sq_lock); 8983233b94cSJens Axboe nvme_sq_copy_cmd(nvmeq, &iod->cmd); 8993233b94cSJens Axboe nvme_write_sq_db(nvmeq, bd->last); 9003233b94cSJens Axboe spin_unlock(&nvmeq->sq_lock); 901fc17b653SChristoph Hellwig return BLK_STS_OK; 90257dacad5SJay Sternberg } 90357dacad5SJay Sternberg 904d62cbcf6SJens Axboe static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) 905d62cbcf6SJens Axboe { 906d62cbcf6SJens Axboe spin_lock(&nvmeq->sq_lock); 907d62cbcf6SJens Axboe while (!rq_list_empty(*rqlist)) { 908d62cbcf6SJens Axboe struct request *req = rq_list_pop(rqlist); 909d62cbcf6SJens Axboe struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 910d62cbcf6SJens Axboe 911d62cbcf6SJens Axboe nvme_sq_copy_cmd(nvmeq, &iod->cmd); 912d62cbcf6SJens Axboe } 913d62cbcf6SJens Axboe nvme_write_sq_db(nvmeq, true); 914d62cbcf6SJens Axboe spin_unlock(&nvmeq->sq_lock); 915d62cbcf6SJens Axboe } 916d62cbcf6SJens Axboe 917d62cbcf6SJens Axboe static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) 918d62cbcf6SJens Axboe { 919d62cbcf6SJens Axboe /* 920d62cbcf6SJens Axboe * We should not need to do this, but we're still using this to 921d62cbcf6SJens Axboe * ensure we can drain requests on a dying queue. 922d62cbcf6SJens Axboe */ 923d62cbcf6SJens Axboe if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 924d62cbcf6SJens Axboe return false; 925d62cbcf6SJens Axboe if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) 926d62cbcf6SJens Axboe return false; 927d62cbcf6SJens Axboe 928d62cbcf6SJens Axboe req->mq_hctx->tags->rqs[req->tag] = req; 929d62cbcf6SJens Axboe return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; 930d62cbcf6SJens Axboe } 931d62cbcf6SJens Axboe 932d62cbcf6SJens Axboe static void nvme_queue_rqs(struct request **rqlist) 933d62cbcf6SJens Axboe { 9346bfec799SKeith Busch struct request *req, *next, *prev = NULL; 935d62cbcf6SJens Axboe struct request *requeue_list = NULL; 936d62cbcf6SJens Axboe 9376bfec799SKeith Busch rq_list_for_each_safe(rqlist, req, next) { 938d62cbcf6SJens Axboe struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 939d62cbcf6SJens Axboe 940d62cbcf6SJens Axboe if (!nvme_prep_rq_batch(nvmeq, req)) { 941d62cbcf6SJens Axboe /* detach 'req' and add to remainder list */ 9426bfec799SKeith Busch rq_list_move(rqlist, &requeue_list, req, prev); 9436bfec799SKeith Busch 9446bfec799SKeith Busch req = prev; 9456bfec799SKeith Busch if (!req) 9466bfec799SKeith Busch continue; 947d62cbcf6SJens Axboe } 948d62cbcf6SJens Axboe 9496bfec799SKeith Busch if (!next || req->mq_hctx != next->mq_hctx) { 950d62cbcf6SJens Axboe /* detach rest of list, and submit */ 9516bfec799SKeith Busch req->rq_next = NULL; 952d62cbcf6SJens Axboe nvme_submit_cmds(nvmeq, rqlist); 9536bfec799SKeith Busch *rqlist = next; 9546bfec799SKeith Busch prev = NULL; 9556bfec799SKeith Busch } else 9566bfec799SKeith Busch prev = req; 957d62cbcf6SJens Axboe } 958d62cbcf6SJens Axboe 959d62cbcf6SJens Axboe *rqlist = requeue_list; 960d62cbcf6SJens Axboe } 961d62cbcf6SJens Axboe 962c234a653SJens Axboe static __always_inline void nvme_pci_unmap_rq(struct request *req) 963eee417b0SChristoph Hellwig { 964a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 965a53232cbSKeith Busch struct nvme_dev *dev = nvmeq->dev; 966eee417b0SChristoph Hellwig 967a53232cbSKeith Busch if (blk_integrity_rq(req)) { 968a53232cbSKeith Busch struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 969a53232cbSKeith Busch 9704aedb705SChristoph Hellwig dma_unmap_page(dev->dev, iod->meta_dma, 9714aedb705SChristoph Hellwig rq_integrity_vec(req)->bv_len, rq_data_dir(req)); 972a53232cbSKeith Busch } 973a53232cbSKeith Busch 974b15c592dSChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 9754aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 976c234a653SJens Axboe } 977c234a653SJens Axboe 978c234a653SJens Axboe static void nvme_pci_complete_rq(struct request *req) 979c234a653SJens Axboe { 980c234a653SJens Axboe nvme_pci_unmap_rq(req); 98177f02a7aSChristoph Hellwig nvme_complete_rq(req); 98257dacad5SJay Sternberg } 98357dacad5SJay Sternberg 984c234a653SJens Axboe static void nvme_pci_complete_batch(struct io_comp_batch *iob) 985c234a653SJens Axboe { 986c234a653SJens Axboe nvme_complete_batch(iob, nvme_pci_unmap_rq); 987c234a653SJens Axboe } 988c234a653SJens Axboe 989d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 990750dde44SChristoph Hellwig static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 991d783e0bdSMarta Rybczynska { 99274943d45SKeith Busch struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 99374943d45SKeith Busch 99474943d45SKeith Busch return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 995d783e0bdSMarta Rybczynska } 996d783e0bdSMarta Rybczynska 997eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 99857dacad5SJay Sternberg { 999eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 100057dacad5SJay Sternberg 1001eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 1002eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 1003eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 1004eb281c82SSagi Grimberg } 1005adf68f21SChristoph Hellwig 1006cfa27356SChristoph Hellwig static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 1007cfa27356SChristoph Hellwig { 1008cfa27356SChristoph Hellwig if (!nvmeq->qid) 1009cfa27356SChristoph Hellwig return nvmeq->dev->admin_tagset.tags[0]; 1010cfa27356SChristoph Hellwig return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 1011cfa27356SChristoph Hellwig } 1012cfa27356SChristoph Hellwig 1013c234a653SJens Axboe static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, 1014c234a653SJens Axboe struct io_comp_batch *iob, u16 idx) 101557dacad5SJay Sternberg { 101674943d45SKeith Busch struct nvme_completion *cqe = &nvmeq->cqes[idx]; 101762df8016SLalithambika Krishnakumar __u16 command_id = READ_ONCE(cqe->command_id); 101857dacad5SJay Sternberg struct request *req; 1019adf68f21SChristoph Hellwig 1020adf68f21SChristoph Hellwig /* 1021adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 1022adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1023adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1024adf68f21SChristoph Hellwig * for them but rather special case them here. 1025adf68f21SChristoph Hellwig */ 102662df8016SLalithambika Krishnakumar if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 10277bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 102883a12fb7SSagi Grimberg cqe->status, &cqe->result); 1029a0fa9647SJens Axboe return; 103057dacad5SJay Sternberg } 103157dacad5SJay Sternberg 1032e7006de6SSagi Grimberg req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); 103350b7c243SXianting Tian if (unlikely(!req)) { 103450b7c243SXianting Tian dev_warn(nvmeq->dev->ctrl.device, 103550b7c243SXianting Tian "invalid id %d completed on queue %d\n", 103662df8016SLalithambika Krishnakumar command_id, le16_to_cpu(cqe->sq_id)); 103750b7c243SXianting Tian return; 103850b7c243SXianting Tian } 103950b7c243SXianting Tian 1040604c01d5Syupeng trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 1041c234a653SJens Axboe if (!nvme_try_complete_req(req, cqe->status, cqe->result) && 1042c234a653SJens Axboe !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, 1043c234a653SJens Axboe nvme_pci_complete_batch)) 1044ff029451SChristoph Hellwig nvme_pci_complete_rq(req); 104583a12fb7SSagi Grimberg } 104657dacad5SJay Sternberg 10475cb525c8SJens Axboe static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 10485cb525c8SJens Axboe { 1049a0aac973SJK Kim u32 tmp = nvmeq->cq_head + 1; 1050a8de6639SAlexey Dobriyan 1051a8de6639SAlexey Dobriyan if (tmp == nvmeq->q_depth) { 1052920d13a8SSagi Grimberg nvmeq->cq_head = 0; 1053e2a366a4SAlexey Dobriyan nvmeq->cq_phase ^= 1; 1054a8de6639SAlexey Dobriyan } else { 1055a8de6639SAlexey Dobriyan nvmeq->cq_head = tmp; 1056920d13a8SSagi Grimberg } 1057a0fa9647SJens Axboe } 1058a0fa9647SJens Axboe 1059c234a653SJens Axboe static inline int nvme_poll_cq(struct nvme_queue *nvmeq, 1060c234a653SJens Axboe struct io_comp_batch *iob) 1061a0fa9647SJens Axboe { 10621052b8acSJens Axboe int found = 0; 106383a12fb7SSagi Grimberg 10641052b8acSJens Axboe while (nvme_cqe_pending(nvmeq)) { 10651052b8acSJens Axboe found++; 1066b69e2ef2SKeith Busch /* 1067b69e2ef2SKeith Busch * load-load control dependency between phase and the rest of 1068b69e2ef2SKeith Busch * the cqe requires a full read memory barrier 1069b69e2ef2SKeith Busch */ 1070b69e2ef2SKeith Busch dma_rmb(); 1071c234a653SJens Axboe nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); 10725cb525c8SJens Axboe nvme_update_cq_head(nvmeq); 107357dacad5SJay Sternberg } 107457dacad5SJay Sternberg 1075324b494cSKeith Busch if (found) 1076eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 10775cb525c8SJens Axboe return found; 107857dacad5SJay Sternberg } 107957dacad5SJay Sternberg 108057dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 108157dacad5SJay Sternberg { 108257dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 10834f502245SJens Axboe DEFINE_IO_COMP_BATCH(iob); 10845cb525c8SJens Axboe 10854f502245SJens Axboe if (nvme_poll_cq(nvmeq, &iob)) { 10864f502245SJens Axboe if (!rq_list_empty(iob.req_list)) 10874f502245SJens Axboe nvme_pci_complete_batch(&iob); 108805fae499SChaitanya Kulkarni return IRQ_HANDLED; 10894f502245SJens Axboe } 109005fae499SChaitanya Kulkarni return IRQ_NONE; 109157dacad5SJay Sternberg } 109257dacad5SJay Sternberg 109357dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 109457dacad5SJay Sternberg { 109557dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 10964e523547SBaolin Wang 1097750dde44SChristoph Hellwig if (nvme_cqe_pending(nvmeq)) 109857dacad5SJay Sternberg return IRQ_WAKE_THREAD; 1099d783e0bdSMarta Rybczynska return IRQ_NONE; 110057dacad5SJay Sternberg } 110157dacad5SJay Sternberg 11020b2a8a9fSChristoph Hellwig /* 1103fa059b85SKeith Busch * Poll for completions for any interrupt driven queue 11040b2a8a9fSChristoph Hellwig * Can be called from any context. 11050b2a8a9fSChristoph Hellwig */ 1106fa059b85SKeith Busch static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1107a0fa9647SJens Axboe { 11083a7afd8eSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1109a0fa9647SJens Axboe 1110fa059b85SKeith Busch WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1111fa059b85SKeith Busch 11123a7afd8eSChristoph Hellwig disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1113c234a653SJens Axboe nvme_poll_cq(nvmeq, NULL); 11143a7afd8eSChristoph Hellwig enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 111591a509f8SChristoph Hellwig } 1116442e19b7SSagi Grimberg 11175a72e899SJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 11187776db1cSKeith Busch { 11197776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 1120dabcefabSJens Axboe bool found; 1121dabcefabSJens Axboe 1122dabcefabSJens Axboe if (!nvme_cqe_pending(nvmeq)) 1123dabcefabSJens Axboe return 0; 1124dabcefabSJens Axboe 11253a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 1126c234a653SJens Axboe found = nvme_poll_cq(nvmeq, iob); 11273a7afd8eSChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 1128dabcefabSJens Axboe 1129dabcefabSJens Axboe return found; 1130dabcefabSJens Axboe } 1131dabcefabSJens Axboe 1132ad22c355SKeith Busch static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 113357dacad5SJay Sternberg { 1134f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 1135147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 1136f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 113757dacad5SJay Sternberg 113857dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 1139ad22c355SKeith Busch c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 11403233b94cSJens Axboe 11413233b94cSJens Axboe spin_lock(&nvmeq->sq_lock); 11423233b94cSJens Axboe nvme_sq_copy_cmd(nvmeq, &c); 11433233b94cSJens Axboe nvme_write_sq_db(nvmeq, true); 11443233b94cSJens Axboe spin_unlock(&nvmeq->sq_lock); 114557dacad5SJay Sternberg } 114657dacad5SJay Sternberg 114757dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 114857dacad5SJay Sternberg { 1149f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 115057dacad5SJay Sternberg 115157dacad5SJay Sternberg c.delete_queue.opcode = opcode; 115257dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 115357dacad5SJay Sternberg 11541c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 115557dacad5SJay Sternberg } 115657dacad5SJay Sternberg 115757dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1158a8e3e0bbSJianchao Wang struct nvme_queue *nvmeq, s16 vector) 115957dacad5SJay Sternberg { 1160f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 11614b04cc6aSJens Axboe int flags = NVME_QUEUE_PHYS_CONTIG; 11624b04cc6aSJens Axboe 11637c349ddeSKeith Busch if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 11644b04cc6aSJens Axboe flags |= NVME_CQ_IRQ_ENABLED; 116557dacad5SJay Sternberg 116657dacad5SJay Sternberg /* 116716772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 116857dacad5SJay Sternberg * is attached to the request. 116957dacad5SJay Sternberg */ 117057dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 117157dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 117257dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 117357dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 117457dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 1175a8e3e0bbSJianchao Wang c.create_cq.irq_vector = cpu_to_le16(vector); 117657dacad5SJay Sternberg 11771c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 117857dacad5SJay Sternberg } 117957dacad5SJay Sternberg 118057dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 118157dacad5SJay Sternberg struct nvme_queue *nvmeq) 118257dacad5SJay Sternberg { 11839abd68efSJens Axboe struct nvme_ctrl *ctrl = &dev->ctrl; 1184f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 118581c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 118657dacad5SJay Sternberg 118757dacad5SJay Sternberg /* 11889abd68efSJens Axboe * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 11899abd68efSJens Axboe * set. Since URGENT priority is zeroes, it makes all queues 11909abd68efSJens Axboe * URGENT. 11919abd68efSJens Axboe */ 11929abd68efSJens Axboe if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 11939abd68efSJens Axboe flags |= NVME_SQ_PRIO_MEDIUM; 11949abd68efSJens Axboe 11959abd68efSJens Axboe /* 119616772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 119757dacad5SJay Sternberg * is attached to the request. 119857dacad5SJay Sternberg */ 119957dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 120057dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 120157dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 120257dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 120357dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 120457dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 120557dacad5SJay Sternberg 12061c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 120757dacad5SJay Sternberg } 120857dacad5SJay Sternberg 120957dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 121057dacad5SJay Sternberg { 121157dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 121257dacad5SJay Sternberg } 121357dacad5SJay Sternberg 121457dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 121557dacad5SJay Sternberg { 121657dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 121757dacad5SJay Sternberg } 121857dacad5SJay Sternberg 1219de671d61SJens Axboe static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) 122057dacad5SJay Sternberg { 1221a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 122257dacad5SJay Sternberg 122327fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 122427fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 1225e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1226e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 1227de671d61SJens Axboe return RQ_END_IO_NONE; 122857dacad5SJay Sternberg } 122957dacad5SJay Sternberg 1230b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1231b2a0eb1aSKeith Busch { 1232b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 1233b2a0eb1aSKeith Busch * NVMe Subsystem reset. 1234b2a0eb1aSKeith Busch */ 1235b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1236b2a0eb1aSKeith Busch 1237ad70062cSJianchao Wang /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1238ad70062cSJianchao Wang switch (dev->ctrl.state) { 1239ad70062cSJianchao Wang case NVME_CTRL_RESETTING: 1240ad6a0a52SMax Gurtovoy case NVME_CTRL_CONNECTING: 1241b2a0eb1aSKeith Busch return false; 1242ad70062cSJianchao Wang default: 1243ad70062cSJianchao Wang break; 1244ad70062cSJianchao Wang } 1245b2a0eb1aSKeith Busch 1246b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 1247b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 1248b2a0eb1aSKeith Busch */ 1249b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 1250b2a0eb1aSKeith Busch return false; 1251b2a0eb1aSKeith Busch 1252b2a0eb1aSKeith Busch return true; 1253b2a0eb1aSKeith Busch } 1254b2a0eb1aSKeith Busch 1255b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1256b2a0eb1aSKeith Busch { 1257b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 1258b2a0eb1aSKeith Busch u16 pci_status; 1259b2a0eb1aSKeith Busch int result; 1260b2a0eb1aSKeith Busch 1261b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1262b2a0eb1aSKeith Busch &pci_status); 1263b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 1264b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1265b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1266b2a0eb1aSKeith Busch csts, pci_status); 1267b2a0eb1aSKeith Busch else 1268b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1269b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1270b2a0eb1aSKeith Busch csts, result); 12714641a8e6SKeith Busch 12724641a8e6SKeith Busch if (csts != ~0) 12734641a8e6SKeith Busch return; 12744641a8e6SKeith Busch 12754641a8e6SKeith Busch dev_warn(dev->ctrl.device, 12764641a8e6SKeith Busch "Does your device have a faulty power saving mode enabled?\n"); 12774641a8e6SKeith Busch dev_warn(dev->ctrl.device, 12784641a8e6SKeith Busch "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); 1279b2a0eb1aSKeith Busch } 1280b2a0eb1aSKeith Busch 12819bdb4833SJohn Garry static enum blk_eh_timer_return nvme_timeout(struct request *req) 128257dacad5SJay Sternberg { 1283f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1284a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 128557dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 128657dacad5SJay Sternberg struct request *abort_req; 1287f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 1288b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1289b2a0eb1aSKeith Busch 1290651438bbSWen Xiong /* If PCI error recovery process is happening, we cannot reset or 1291651438bbSWen Xiong * the recovery mechanism will surely fail. 1292651438bbSWen Xiong */ 1293651438bbSWen Xiong mb(); 1294651438bbSWen Xiong if (pci_channel_offline(to_pci_dev(dev->dev))) 1295651438bbSWen Xiong return BLK_EH_RESET_TIMER; 1296651438bbSWen Xiong 1297b2a0eb1aSKeith Busch /* 1298b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1299b2a0eb1aSKeith Busch */ 1300b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1301b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1302b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1303d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1304db8c48e4SChristoph Hellwig return BLK_EH_DONE; 1305b2a0eb1aSKeith Busch } 130657dacad5SJay Sternberg 130731c7c7d2SChristoph Hellwig /* 13087776db1cSKeith Busch * Did we miss an interrupt? 13097776db1cSKeith Busch */ 1310fa059b85SKeith Busch if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 13115a72e899SJens Axboe nvme_poll(req->mq_hctx, NULL); 1312fa059b85SKeith Busch else 1313bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 1314fa059b85SKeith Busch 13151c584208SKeith Busch if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) { 13167776db1cSKeith Busch dev_warn(dev->ctrl.device, 13177776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 13187776db1cSKeith Busch req->tag, nvmeq->qid); 1319db8c48e4SChristoph Hellwig return BLK_EH_DONE; 13207776db1cSKeith Busch } 13217776db1cSKeith Busch 13227776db1cSKeith Busch /* 1323fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1324fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1325fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1326db8c48e4SChristoph Hellwig * shutdown, so we return BLK_EH_DONE. 1327fd634f41SChristoph Hellwig */ 13284244140dSKeith Busch switch (dev->ctrl.state) { 13294244140dSKeith Busch case NVME_CTRL_CONNECTING: 13302036f726SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1331df561f66SGustavo A. R. Silva fallthrough; 13322036f726SKeith Busch case NVME_CTRL_DELETING: 1333b9cac43cSKeith Busch dev_warn_ratelimited(dev->ctrl.device, 1334fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1335fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 133627fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 13377ad92f65STong Zhang nvme_dev_disable(dev, true); 1338db8c48e4SChristoph Hellwig return BLK_EH_DONE; 133939a9dd81SKeith Busch case NVME_CTRL_RESETTING: 134039a9dd81SKeith Busch return BLK_EH_RESET_TIMER; 13414244140dSKeith Busch default: 13424244140dSKeith Busch break; 1343fd634f41SChristoph Hellwig } 1344fd634f41SChristoph Hellwig 1345fd634f41SChristoph Hellwig /* 1346e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1347e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1348e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 134931c7c7d2SChristoph Hellwig */ 1350f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 13511b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 135257dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 135357dacad5SJay Sternberg req->tag, nvmeq->qid); 13547ad92f65STong Zhang nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1355a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1356d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1357e1569a16SKeith Busch 1358db8c48e4SChristoph Hellwig return BLK_EH_DONE; 135957dacad5SJay Sternberg } 136057dacad5SJay Sternberg 1361e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1362e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1363e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1364e7a2a87dSChristoph Hellwig } 136552da4f3fSKeith Busch iod->aborted = true; 136657dacad5SJay Sternberg 136757dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 136885f74acfSKeith Busch cmd.abort.cid = nvme_cid(req); 136957dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 137057dacad5SJay Sternberg 13711b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 137286141440SChristoph Hellwig "I/O %d (%s) QID %d timeout, aborting\n", 137386141440SChristoph Hellwig req->tag, 137486141440SChristoph Hellwig nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), 137586141440SChristoph Hellwig nvmeq->qid); 1376e7a2a87dSChristoph Hellwig 1377e559398fSChristoph Hellwig abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), 137839dfe844SChaitanya Kulkarni BLK_MQ_REQ_NOWAIT); 13796bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 13806bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 138131c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 138257dacad5SJay Sternberg } 1383e559398fSChristoph Hellwig nvme_init_request(abort_req, &cmd); 138457dacad5SJay Sternberg 1385e2e53086SChristoph Hellwig abort_req->end_io = abort_endio; 1386e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1387e2e53086SChristoph Hellwig blk_execute_rq_nowait(abort_req, false); 138857dacad5SJay Sternberg 138957dacad5SJay Sternberg /* 139057dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 139157dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 139257dacad5SJay Sternberg * as the device then is in a faulty state. 139357dacad5SJay Sternberg */ 139457dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 139557dacad5SJay Sternberg } 139657dacad5SJay Sternberg 139757dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 139857dacad5SJay Sternberg { 13998a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 140057dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 140163223078SChristoph Hellwig if (!nvmeq->sq_cmds) 140263223078SChristoph Hellwig return; 14030f238ff5SLogan Gunthorpe 140463223078SChristoph Hellwig if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 140588a041f4SKeith Busch pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 14068a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 140763223078SChristoph Hellwig } else { 14088a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 140963223078SChristoph Hellwig nvmeq->sq_cmds, nvmeq->sq_dma_addr); 14100f238ff5SLogan Gunthorpe } 141157dacad5SJay Sternberg } 141257dacad5SJay Sternberg 141357dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 141457dacad5SJay Sternberg { 141557dacad5SJay Sternberg int i; 141657dacad5SJay Sternberg 1417d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1418d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 1419147b27e4SSagi Grimberg nvme_free_queue(&dev->queues[i]); 142057dacad5SJay Sternberg } 142157dacad5SJay Sternberg } 142257dacad5SJay Sternberg 142310981f23SChristoph Hellwig static void nvme_suspend_queue(struct nvme_dev *dev, unsigned int qid) 142457dacad5SJay Sternberg { 142510981f23SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[qid]; 142610981f23SChristoph Hellwig 14274e224106SChristoph Hellwig if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 142810981f23SChristoph Hellwig return; 142957dacad5SJay Sternberg 14304e224106SChristoph Hellwig /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1431d1f06f4aSJens Axboe mb(); 143257dacad5SJay Sternberg 14334e224106SChristoph Hellwig nvmeq->dev->online_queues--; 14341c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 14359f27bd70SChristoph Hellwig nvme_quiesce_admin_queue(&nvmeq->dev->ctrl); 14367c349ddeSKeith Busch if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 143710981f23SChristoph Hellwig pci_free_irq(to_pci_dev(dev->dev), nvmeq->cq_vector, nvmeq); 143857dacad5SJay Sternberg } 143957dacad5SJay Sternberg 14408fae268bSKeith Busch static void nvme_suspend_io_queues(struct nvme_dev *dev) 14418fae268bSKeith Busch { 14428fae268bSKeith Busch int i; 14438fae268bSKeith Busch 14448fae268bSKeith Busch for (i = dev->ctrl.queue_count - 1; i > 0; i--) 144510981f23SChristoph Hellwig nvme_suspend_queue(dev, i); 144657dacad5SJay Sternberg } 144757dacad5SJay Sternberg 1448fa46c6fbSKeith Busch /* 1449fa46c6fbSKeith Busch * Called only on a device that has been disabled and after all other threads 14509210c075SDongli Zhang * that can check this device's completion queues have synced, except 14519210c075SDongli Zhang * nvme_poll(). This is the last chance for the driver to see a natural 14529210c075SDongli Zhang * completion before nvme_cancel_request() terminates all incomplete requests. 1453fa46c6fbSKeith Busch */ 1454fa46c6fbSKeith Busch static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1455fa46c6fbSKeith Busch { 1456fa46c6fbSKeith Busch int i; 1457fa46c6fbSKeith Busch 14589210c075SDongli Zhang for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 14599210c075SDongli Zhang spin_lock(&dev->queues[i].cq_poll_lock); 1460c234a653SJens Axboe nvme_poll_cq(&dev->queues[i], NULL); 14619210c075SDongli Zhang spin_unlock(&dev->queues[i].cq_poll_lock); 14629210c075SDongli Zhang } 1463fa46c6fbSKeith Busch } 1464fa46c6fbSKeith Busch 146557dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 146657dacad5SJay Sternberg int entry_size) 146757dacad5SJay Sternberg { 146857dacad5SJay Sternberg int q_depth = dev->q_depth; 14695fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 14706c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 147157dacad5SJay Sternberg 147257dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 147357dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 14744e523547SBaolin Wang 14756c3c05b0SChaitanya Kulkarni mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 147657dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 147757dacad5SJay Sternberg 147857dacad5SJay Sternberg /* 147957dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 148057dacad5SJay Sternberg * would be better to map queues in system memory with the 148157dacad5SJay Sternberg * original depth 148257dacad5SJay Sternberg */ 148357dacad5SJay Sternberg if (q_depth < 64) 148457dacad5SJay Sternberg return -ENOMEM; 148557dacad5SJay Sternberg } 148657dacad5SJay Sternberg 148757dacad5SJay Sternberg return q_depth; 148857dacad5SJay Sternberg } 148957dacad5SJay Sternberg 149057dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 14918a1d09a6SBenjamin Herrenschmidt int qid) 149257dacad5SJay Sternberg { 14930f238ff5SLogan Gunthorpe struct pci_dev *pdev = to_pci_dev(dev->dev); 1494815c6704SKeith Busch 14950f238ff5SLogan Gunthorpe if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 14968a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1497bfac8e9fSAlan Mikhak if (nvmeq->sq_cmds) { 14980f238ff5SLogan Gunthorpe nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 14990f238ff5SLogan Gunthorpe nvmeq->sq_cmds); 150063223078SChristoph Hellwig if (nvmeq->sq_dma_addr) { 150163223078SChristoph Hellwig set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 150263223078SChristoph Hellwig return 0; 150363223078SChristoph Hellwig } 1504bfac8e9fSAlan Mikhak 15058a1d09a6SBenjamin Herrenschmidt pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1506bfac8e9fSAlan Mikhak } 15070f238ff5SLogan Gunthorpe } 15080f238ff5SLogan Gunthorpe 15098a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 151057dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 151157dacad5SJay Sternberg if (!nvmeq->sq_cmds) 151257dacad5SJay Sternberg return -ENOMEM; 151357dacad5SJay Sternberg return 0; 151457dacad5SJay Sternberg } 151557dacad5SJay Sternberg 1516a6ff7262SKeith Busch static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 151757dacad5SJay Sternberg { 1518147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[qid]; 151957dacad5SJay Sternberg 152062314e40SKeith Busch if (dev->ctrl.queue_count > qid) 152162314e40SKeith Busch return 0; 152257dacad5SJay Sternberg 1523c1e0cc7eSBenjamin Herrenschmidt nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 15248a1d09a6SBenjamin Herrenschmidt nvmeq->q_depth = depth; 15258a1d09a6SBenjamin Herrenschmidt nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 152657dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 152757dacad5SJay Sternberg if (!nvmeq->cqes) 152857dacad5SJay Sternberg goto free_nvmeq; 152957dacad5SJay Sternberg 15308a1d09a6SBenjamin Herrenschmidt if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 153157dacad5SJay Sternberg goto free_cqdma; 153257dacad5SJay Sternberg 153357dacad5SJay Sternberg nvmeq->dev = dev; 15341ab0cd69SJens Axboe spin_lock_init(&nvmeq->sq_lock); 15353a7afd8eSChristoph Hellwig spin_lock_init(&nvmeq->cq_poll_lock); 153657dacad5SJay Sternberg nvmeq->cq_head = 0; 153757dacad5SJay Sternberg nvmeq->cq_phase = 1; 153857dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 153957dacad5SJay Sternberg nvmeq->qid = qid; 1540d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 154157dacad5SJay Sternberg 1542147b27e4SSagi Grimberg return 0; 154357dacad5SJay Sternberg 154457dacad5SJay Sternberg free_cqdma: 15458a1d09a6SBenjamin Herrenschmidt dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 154657dacad5SJay Sternberg nvmeq->cq_dma_addr); 154757dacad5SJay Sternberg free_nvmeq: 1548147b27e4SSagi Grimberg return -ENOMEM; 154957dacad5SJay Sternberg } 155057dacad5SJay Sternberg 1551dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 155257dacad5SJay Sternberg { 15530ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 15540ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 15550ff199cbSChristoph Hellwig 15560ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 15570ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 15580ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15590ff199cbSChristoph Hellwig } else { 15600ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 15610ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15620ff199cbSChristoph Hellwig } 156357dacad5SJay Sternberg } 156457dacad5SJay Sternberg 156557dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 156657dacad5SJay Sternberg { 156757dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 156857dacad5SJay Sternberg 156957dacad5SJay Sternberg nvmeq->sq_tail = 0; 157038210800SKeith Busch nvmeq->last_sq_tail = 0; 157157dacad5SJay Sternberg nvmeq->cq_head = 0; 157257dacad5SJay Sternberg nvmeq->cq_phase = 1; 157357dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 15748a1d09a6SBenjamin Herrenschmidt memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1575f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 157657dacad5SJay Sternberg dev->online_queues++; 15773a7afd8eSChristoph Hellwig wmb(); /* ensure the first interrupt sees the initialization */ 157857dacad5SJay Sternberg } 157957dacad5SJay Sternberg 1580e4b9852aSCasey Chen /* 1581e4b9852aSCasey Chen * Try getting shutdown_lock while setting up IO queues. 1582e4b9852aSCasey Chen */ 1583e4b9852aSCasey Chen static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1584e4b9852aSCasey Chen { 1585e4b9852aSCasey Chen /* 1586e4b9852aSCasey Chen * Give up if the lock is being held by nvme_dev_disable. 1587e4b9852aSCasey Chen */ 1588e4b9852aSCasey Chen if (!mutex_trylock(&dev->shutdown_lock)) 1589e4b9852aSCasey Chen return -ENODEV; 1590e4b9852aSCasey Chen 1591e4b9852aSCasey Chen /* 1592e4b9852aSCasey Chen * Controller is in wrong state, fail early. 1593e4b9852aSCasey Chen */ 1594e4b9852aSCasey Chen if (dev->ctrl.state != NVME_CTRL_CONNECTING) { 1595e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 1596e4b9852aSCasey Chen return -ENODEV; 1597e4b9852aSCasey Chen } 1598e4b9852aSCasey Chen 1599e4b9852aSCasey Chen return 0; 1600e4b9852aSCasey Chen } 1601e4b9852aSCasey Chen 16024b04cc6aSJens Axboe static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 160357dacad5SJay Sternberg { 160457dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 160557dacad5SJay Sternberg int result; 16067c349ddeSKeith Busch u16 vector = 0; 160757dacad5SJay Sternberg 1608d1ed6aa1SChristoph Hellwig clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1609d1ed6aa1SChristoph Hellwig 161022b55601SKeith Busch /* 161122b55601SKeith Busch * A queue's vector matches the queue identifier unless the controller 161222b55601SKeith Busch * has only one vector available. 161322b55601SKeith Busch */ 16144b04cc6aSJens Axboe if (!polled) 1615a8e3e0bbSJianchao Wang vector = dev->num_vecs == 1 ? 0 : qid; 16164b04cc6aSJens Axboe else 16177c349ddeSKeith Busch set_bit(NVMEQ_POLLED, &nvmeq->flags); 16184b04cc6aSJens Axboe 1619a8e3e0bbSJianchao Wang result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1620ded45505SKeith Busch if (result) 1621ded45505SKeith Busch return result; 162257dacad5SJay Sternberg 162357dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 162457dacad5SJay Sternberg if (result < 0) 1625ded45505SKeith Busch return result; 1626c80b36cdSEdmund Nadolski if (result) 162757dacad5SJay Sternberg goto release_cq; 162857dacad5SJay Sternberg 1629a8e3e0bbSJianchao Wang nvmeq->cq_vector = vector; 16304b04cc6aSJens Axboe 1631e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 1632e4b9852aSCasey Chen if (result) 1633e4b9852aSCasey Chen return result; 1634e4b9852aSCasey Chen nvme_init_queue(nvmeq, qid); 16357c349ddeSKeith Busch if (!polled) { 1636dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 163757dacad5SJay Sternberg if (result < 0) 163857dacad5SJay Sternberg goto release_sq; 16394b04cc6aSJens Axboe } 164057dacad5SJay Sternberg 16414e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1642e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 164357dacad5SJay Sternberg return result; 164457dacad5SJay Sternberg 164557dacad5SJay Sternberg release_sq: 1646f25a2dfcSJianchao Wang dev->online_queues--; 1647e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 164857dacad5SJay Sternberg adapter_delete_sq(dev, qid); 164957dacad5SJay Sternberg release_cq: 165057dacad5SJay Sternberg adapter_delete_cq(dev, qid); 165157dacad5SJay Sternberg return result; 165257dacad5SJay Sternberg } 165357dacad5SJay Sternberg 1654f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 165557dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 165677f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 165757dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 1658e559398fSChristoph Hellwig .init_request = nvme_pci_init_request, 165957dacad5SJay Sternberg .timeout = nvme_timeout, 166057dacad5SJay Sternberg }; 166157dacad5SJay Sternberg 1662f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 1663376f7ef8SChristoph Hellwig .queue_rq = nvme_queue_rq, 1664d62cbcf6SJens Axboe .queue_rqs = nvme_queue_rqs, 1665376f7ef8SChristoph Hellwig .complete = nvme_pci_complete_rq, 1666376f7ef8SChristoph Hellwig .commit_rqs = nvme_commit_rqs, 1667376f7ef8SChristoph Hellwig .init_hctx = nvme_init_hctx, 1668e559398fSChristoph Hellwig .init_request = nvme_pci_init_request, 1669376f7ef8SChristoph Hellwig .map_queues = nvme_pci_map_queues, 1670376f7ef8SChristoph Hellwig .timeout = nvme_timeout, 1671c6d962aeSChristoph Hellwig .poll = nvme_poll, 1672dabcefabSJens Axboe }; 1673dabcefabSJens Axboe 167457dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 167557dacad5SJay Sternberg { 16761c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 167769d9a99cSKeith Busch /* 167869d9a99cSKeith Busch * If the controller was reset during removal, it's possible 167969d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 168069d9a99cSKeith Busch * queue to flush these to completion. 168169d9a99cSKeith Busch */ 16829f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&dev->ctrl); 16830da7feaaSChristoph Hellwig nvme_remove_admin_tag_set(&dev->ctrl); 168457dacad5SJay Sternberg } 168557dacad5SJay Sternberg } 168657dacad5SJay Sternberg 168797f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 168897f6ef64SXu Yu { 168997f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 169097f6ef64SXu Yu } 169197f6ef64SXu Yu 169297f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 169397f6ef64SXu Yu { 169497f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 169597f6ef64SXu Yu 169697f6ef64SXu Yu if (size <= dev->bar_mapped_size) 169797f6ef64SXu Yu return 0; 169897f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 169997f6ef64SXu Yu return -ENOMEM; 170097f6ef64SXu Yu if (dev->bar) 170197f6ef64SXu Yu iounmap(dev->bar); 170297f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 170397f6ef64SXu Yu if (!dev->bar) { 170497f6ef64SXu Yu dev->bar_mapped_size = 0; 170597f6ef64SXu Yu return -ENOMEM; 170697f6ef64SXu Yu } 170797f6ef64SXu Yu dev->bar_mapped_size = size; 170897f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 170997f6ef64SXu Yu 171097f6ef64SXu Yu return 0; 171197f6ef64SXu Yu } 171297f6ef64SXu Yu 171301ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 171457dacad5SJay Sternberg { 171557dacad5SJay Sternberg int result; 171657dacad5SJay Sternberg u32 aqa; 171757dacad5SJay Sternberg struct nvme_queue *nvmeq; 171857dacad5SJay Sternberg 171997f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 172097f6ef64SXu Yu if (result < 0) 172197f6ef64SXu Yu return result; 172297f6ef64SXu Yu 17238ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 172420d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 172557dacad5SJay Sternberg 17267a67cbeaSChristoph Hellwig if (dev->subsystem && 17277a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 17287a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 172957dacad5SJay Sternberg 1730285b6e9bSChristoph Hellwig /* 1731285b6e9bSChristoph Hellwig * If the device has been passed off to us in an enabled state, just 1732285b6e9bSChristoph Hellwig * clear the enabled bit. The spec says we should set the 'shutdown 1733285b6e9bSChristoph Hellwig * notification bits', but doing so may cause the device to complete 1734285b6e9bSChristoph Hellwig * commands to the admin queue ... and we don't know what memory that 1735285b6e9bSChristoph Hellwig * might be pointing at! 1736285b6e9bSChristoph Hellwig */ 1737285b6e9bSChristoph Hellwig result = nvme_disable_ctrl(&dev->ctrl, false); 173857dacad5SJay Sternberg if (result < 0) 173957dacad5SJay Sternberg return result; 174057dacad5SJay Sternberg 1741a6ff7262SKeith Busch result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1742147b27e4SSagi Grimberg if (result) 1743147b27e4SSagi Grimberg return result; 174457dacad5SJay Sternberg 1745635333e4SMax Gurtovoy dev->ctrl.numa_node = dev_to_node(dev->dev); 1746635333e4SMax Gurtovoy 1747147b27e4SSagi Grimberg nvmeq = &dev->queues[0]; 174857dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 174957dacad5SJay Sternberg aqa |= aqa << 16; 175057dacad5SJay Sternberg 17517a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 17527a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 17537a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 175457dacad5SJay Sternberg 1755c0f2f45bSSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl); 175657dacad5SJay Sternberg if (result) 1757d4875622SKeith Busch return result; 175857dacad5SJay Sternberg 175957dacad5SJay Sternberg nvmeq->cq_vector = 0; 1760161b8be2SKeith Busch nvme_init_queue(nvmeq, 0); 1761dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 176257dacad5SJay Sternberg if (result) { 17637c349ddeSKeith Busch dev->online_queues--; 1764d4875622SKeith Busch return result; 176557dacad5SJay Sternberg } 176657dacad5SJay Sternberg 17674e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 176857dacad5SJay Sternberg return result; 176957dacad5SJay Sternberg } 177057dacad5SJay Sternberg 1771749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 177257dacad5SJay Sternberg { 17734b04cc6aSJens Axboe unsigned i, max, rw_queues; 1774749941f2SChristoph Hellwig int ret = 0; 177557dacad5SJay Sternberg 1776d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1777a6ff7262SKeith Busch if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1778749941f2SChristoph Hellwig ret = -ENOMEM; 177957dacad5SJay Sternberg break; 1780749941f2SChristoph Hellwig } 1781749941f2SChristoph Hellwig } 178257dacad5SJay Sternberg 1783d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1784e20ba6e1SChristoph Hellwig if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1785e20ba6e1SChristoph Hellwig rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1786e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ]; 17874b04cc6aSJens Axboe } else { 17884b04cc6aSJens Axboe rw_queues = max; 17894b04cc6aSJens Axboe } 17904b04cc6aSJens Axboe 1791949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 17924b04cc6aSJens Axboe bool polled = i > rw_queues; 17934b04cc6aSJens Axboe 17944b04cc6aSJens Axboe ret = nvme_create_queue(&dev->queues[i], i, polled); 1795d4875622SKeith Busch if (ret) 179657dacad5SJay Sternberg break; 179757dacad5SJay Sternberg } 179857dacad5SJay Sternberg 1799749941f2SChristoph Hellwig /* 1800749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 18018adb8c14SMinwoo Im * than the desired amount of queues, and even a controller without 18028adb8c14SMinwoo Im * I/O queues can still be used to issue admin commands. This might 1803749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1804749941f2SChristoph Hellwig */ 1805749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 180657dacad5SJay Sternberg } 180757dacad5SJay Sternberg 180888de4598SChristoph Hellwig static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 180957dacad5SJay Sternberg { 181088de4598SChristoph Hellwig u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 181188de4598SChristoph Hellwig 181288de4598SChristoph Hellwig return 1ULL << (12 + 4 * szu); 181388de4598SChristoph Hellwig } 181488de4598SChristoph Hellwig 181588de4598SChristoph Hellwig static u32 nvme_cmb_size(struct nvme_dev *dev) 181688de4598SChristoph Hellwig { 181788de4598SChristoph Hellwig return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 181888de4598SChristoph Hellwig } 181988de4598SChristoph Hellwig 1820f65efd6dSChristoph Hellwig static void nvme_map_cmb(struct nvme_dev *dev) 182157dacad5SJay Sternberg { 182288de4598SChristoph Hellwig u64 size, offset; 182357dacad5SJay Sternberg resource_size_t bar_size; 182457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 18258969f1f8SChristoph Hellwig int bar; 182657dacad5SJay Sternberg 18279fe5c59fSKeith Busch if (dev->cmb_size) 18289fe5c59fSKeith Busch return; 18299fe5c59fSKeith Busch 183020d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) 183120d3bb92SKlaus Jensen writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 183220d3bb92SKlaus Jensen 18337a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1834f65efd6dSChristoph Hellwig if (!dev->cmbsz) 1835f65efd6dSChristoph Hellwig return; 1836202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 183757dacad5SJay Sternberg 183888de4598SChristoph Hellwig size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 183988de4598SChristoph Hellwig offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 18408969f1f8SChristoph Hellwig bar = NVME_CMB_BIR(dev->cmbloc); 18418969f1f8SChristoph Hellwig bar_size = pci_resource_len(pdev, bar); 184257dacad5SJay Sternberg 184357dacad5SJay Sternberg if (offset > bar_size) 1844f65efd6dSChristoph Hellwig return; 184557dacad5SJay Sternberg 184657dacad5SJay Sternberg /* 184720d3bb92SKlaus Jensen * Tell the controller about the host side address mapping the CMB, 184820d3bb92SKlaus Jensen * and enable CMB decoding for the NVMe 1.4+ scheme: 184920d3bb92SKlaus Jensen */ 185020d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) { 185120d3bb92SKlaus Jensen hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 185220d3bb92SKlaus Jensen (pci_bus_address(pdev, bar) + offset), 185320d3bb92SKlaus Jensen dev->bar + NVME_REG_CMBMSC); 185420d3bb92SKlaus Jensen } 185520d3bb92SKlaus Jensen 185620d3bb92SKlaus Jensen /* 185757dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 185857dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 185957dacad5SJay Sternberg * the reported size of the BAR 186057dacad5SJay Sternberg */ 186157dacad5SJay Sternberg if (size > bar_size - offset) 186257dacad5SJay Sternberg size = bar_size - offset; 186357dacad5SJay Sternberg 18640f238ff5SLogan Gunthorpe if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 18650f238ff5SLogan Gunthorpe dev_warn(dev->ctrl.device, 18660f238ff5SLogan Gunthorpe "failed to register the CMB\n"); 1867f65efd6dSChristoph Hellwig return; 18680f238ff5SLogan Gunthorpe } 18690f238ff5SLogan Gunthorpe 187057dacad5SJay Sternberg dev->cmb_size = size; 18710f238ff5SLogan Gunthorpe dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 18720f238ff5SLogan Gunthorpe 18730f238ff5SLogan Gunthorpe if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 18740f238ff5SLogan Gunthorpe (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 18750f238ff5SLogan Gunthorpe pci_p2pmem_publish(pdev, true); 187657dacad5SJay Sternberg } 187757dacad5SJay Sternberg 187887ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 187957dacad5SJay Sternberg { 18806c3c05b0SChaitanya Kulkarni u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 18814033f35dSChristoph Hellwig u64 dma_addr = dev->host_mem_descs_dma; 1882f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 188387ad72a5SChristoph Hellwig int ret; 188487ad72a5SChristoph Hellwig 188587ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 188687ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 188787ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 18886c3c05b0SChaitanya Kulkarni c.features.dword12 = cpu_to_le32(host_mem_size); 188987ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 189087ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 189187ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 189287ad72a5SChristoph Hellwig 189387ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 189487ad72a5SChristoph Hellwig if (ret) { 189587ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 189687ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 189787ad72a5SChristoph Hellwig ret, bits); 1898a5df5e79SKeith Busch } else 1899a5df5e79SKeith Busch dev->hmb = bits & NVME_HOST_MEM_ENABLE; 1900a5df5e79SKeith Busch 190187ad72a5SChristoph Hellwig return ret; 190287ad72a5SChristoph Hellwig } 190387ad72a5SChristoph Hellwig 190487ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 190587ad72a5SChristoph Hellwig { 190687ad72a5SChristoph Hellwig int i; 190787ad72a5SChristoph Hellwig 190887ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 190987ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 19106c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 191187ad72a5SChristoph Hellwig 1912cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 1913cc667f6dSLiviu Dudau le64_to_cpu(desc->addr), 1914cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 191587ad72a5SChristoph Hellwig } 191687ad72a5SChristoph Hellwig 191787ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 191887ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 19194033f35dSChristoph Hellwig dma_free_coherent(dev->dev, 19204033f35dSChristoph Hellwig dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 19214033f35dSChristoph Hellwig dev->host_mem_descs, dev->host_mem_descs_dma); 192287ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 19237e5dd57eSMinwoo Im dev->nr_host_mem_descs = 0; 192487ad72a5SChristoph Hellwig } 192587ad72a5SChristoph Hellwig 192692dc6895SChristoph Hellwig static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 192792dc6895SChristoph Hellwig u32 chunk_size) 192887ad72a5SChristoph Hellwig { 192987ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 193092dc6895SChristoph Hellwig u32 max_entries, len; 19314033f35dSChristoph Hellwig dma_addr_t descs_dma; 19322ee0e4edSDan Carpenter int i = 0; 193387ad72a5SChristoph Hellwig void **bufs; 19346fbcde66SMinwoo Im u64 size, tmp; 193587ad72a5SChristoph Hellwig 193687ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 193787ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 193887ad72a5SChristoph Hellwig max_entries = tmp; 1939044a9df1SChristoph Hellwig 1940044a9df1SChristoph Hellwig if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1941044a9df1SChristoph Hellwig max_entries = dev->ctrl.hmmaxd; 1942044a9df1SChristoph Hellwig 1943750afb08SLuis Chamberlain descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 19444033f35dSChristoph Hellwig &descs_dma, GFP_KERNEL); 194587ad72a5SChristoph Hellwig if (!descs) 194687ad72a5SChristoph Hellwig goto out; 194787ad72a5SChristoph Hellwig 194887ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 194987ad72a5SChristoph Hellwig if (!bufs) 195087ad72a5SChristoph Hellwig goto out_free_descs; 195187ad72a5SChristoph Hellwig 1952244a8fe4SMinwoo Im for (size = 0; size < preferred && i < max_entries; size += len) { 195387ad72a5SChristoph Hellwig dma_addr_t dma_addr; 195487ad72a5SChristoph Hellwig 195550cdb7c6SChristoph Hellwig len = min_t(u64, chunk_size, preferred - size); 195687ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 195787ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 195887ad72a5SChristoph Hellwig if (!bufs[i]) 195987ad72a5SChristoph Hellwig break; 196087ad72a5SChristoph Hellwig 196187ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 19626c3c05b0SChaitanya Kulkarni descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 196387ad72a5SChristoph Hellwig i++; 196487ad72a5SChristoph Hellwig } 196587ad72a5SChristoph Hellwig 196692dc6895SChristoph Hellwig if (!size) 196787ad72a5SChristoph Hellwig goto out_free_bufs; 196887ad72a5SChristoph Hellwig 196987ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 197087ad72a5SChristoph Hellwig dev->host_mem_size = size; 197187ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 19724033f35dSChristoph Hellwig dev->host_mem_descs_dma = descs_dma; 197387ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 197487ad72a5SChristoph Hellwig return 0; 197587ad72a5SChristoph Hellwig 197687ad72a5SChristoph Hellwig out_free_bufs: 197787ad72a5SChristoph Hellwig while (--i >= 0) { 19786c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; 197987ad72a5SChristoph Hellwig 1980cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, bufs[i], 1981cc667f6dSLiviu Dudau le64_to_cpu(descs[i].addr), 1982cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 198387ad72a5SChristoph Hellwig } 198487ad72a5SChristoph Hellwig 198587ad72a5SChristoph Hellwig kfree(bufs); 198687ad72a5SChristoph Hellwig out_free_descs: 19874033f35dSChristoph Hellwig dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 19884033f35dSChristoph Hellwig descs_dma); 198987ad72a5SChristoph Hellwig out: 199087ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 199187ad72a5SChristoph Hellwig return -ENOMEM; 199287ad72a5SChristoph Hellwig } 199387ad72a5SChristoph Hellwig 199492dc6895SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 199592dc6895SChristoph Hellwig { 19969dc54a0dSChaitanya Kulkarni u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 19979dc54a0dSChaitanya Kulkarni u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 19989dc54a0dSChaitanya Kulkarni u64 chunk_size; 199992dc6895SChristoph Hellwig 200092dc6895SChristoph Hellwig /* start big and work our way down */ 20019dc54a0dSChaitanya Kulkarni for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 200292dc6895SChristoph Hellwig if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 200392dc6895SChristoph Hellwig if (!min || dev->host_mem_size >= min) 200492dc6895SChristoph Hellwig return 0; 200592dc6895SChristoph Hellwig nvme_free_host_mem(dev); 200692dc6895SChristoph Hellwig } 200792dc6895SChristoph Hellwig } 200892dc6895SChristoph Hellwig 200992dc6895SChristoph Hellwig return -ENOMEM; 201092dc6895SChristoph Hellwig } 201192dc6895SChristoph Hellwig 20129620cfbaSChristoph Hellwig static int nvme_setup_host_mem(struct nvme_dev *dev) 201387ad72a5SChristoph Hellwig { 201487ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 201587ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 201687ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 201787ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 20186fbcde66SMinwoo Im int ret; 201987ad72a5SChristoph Hellwig 2020acb71e53SChristoph Hellwig if (!dev->ctrl.hmpre) 2021acb71e53SChristoph Hellwig return 0; 2022acb71e53SChristoph Hellwig 202387ad72a5SChristoph Hellwig preferred = min(preferred, max); 202487ad72a5SChristoph Hellwig if (min > max) { 202587ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 202687ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 202787ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 202887ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20299620cfbaSChristoph Hellwig return 0; 203087ad72a5SChristoph Hellwig } 203187ad72a5SChristoph Hellwig 203287ad72a5SChristoph Hellwig /* 203387ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 203487ad72a5SChristoph Hellwig */ 203587ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 203687ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 203787ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 203887ad72a5SChristoph Hellwig else 203987ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 204087ad72a5SChristoph Hellwig } 204187ad72a5SChristoph Hellwig 204287ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 204392dc6895SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) { 204492dc6895SChristoph Hellwig dev_warn(dev->ctrl.device, 204592dc6895SChristoph Hellwig "failed to allocate host memory buffer.\n"); 20469620cfbaSChristoph Hellwig return 0; /* controller must work without HMB */ 204787ad72a5SChristoph Hellwig } 204887ad72a5SChristoph Hellwig 204992dc6895SChristoph Hellwig dev_info(dev->ctrl.device, 205092dc6895SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 205192dc6895SChristoph Hellwig dev->host_mem_size >> ilog2(SZ_1M)); 205292dc6895SChristoph Hellwig } 205392dc6895SChristoph Hellwig 20549620cfbaSChristoph Hellwig ret = nvme_set_host_mem(dev, enable_bits); 20559620cfbaSChristoph Hellwig if (ret) 205687ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20579620cfbaSChristoph Hellwig return ret; 205857dacad5SJay Sternberg } 205957dacad5SJay Sternberg 20600521905eSKeith Busch static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, 20610521905eSKeith Busch char *buf) 20620521905eSKeith Busch { 20630521905eSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 20640521905eSKeith Busch 20650521905eSKeith Busch return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", 20660521905eSKeith Busch ndev->cmbloc, ndev->cmbsz); 20670521905eSKeith Busch } 20680521905eSKeith Busch static DEVICE_ATTR_RO(cmb); 20690521905eSKeith Busch 20701751e97aSKeith Busch static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, 20711751e97aSKeith Busch char *buf) 20721751e97aSKeith Busch { 20731751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 20741751e97aSKeith Busch 20751751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbloc); 20761751e97aSKeith Busch } 20771751e97aSKeith Busch static DEVICE_ATTR_RO(cmbloc); 20781751e97aSKeith Busch 20791751e97aSKeith Busch static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, 20801751e97aSKeith Busch char *buf) 20811751e97aSKeith Busch { 20821751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 20831751e97aSKeith Busch 20841751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbsz); 20851751e97aSKeith Busch } 20861751e97aSKeith Busch static DEVICE_ATTR_RO(cmbsz); 20871751e97aSKeith Busch 2088a5df5e79SKeith Busch static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, 2089a5df5e79SKeith Busch char *buf) 2090a5df5e79SKeith Busch { 2091a5df5e79SKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2092a5df5e79SKeith Busch 2093a5df5e79SKeith Busch return sysfs_emit(buf, "%d\n", ndev->hmb); 2094a5df5e79SKeith Busch } 2095a5df5e79SKeith Busch 2096a5df5e79SKeith Busch static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, 2097a5df5e79SKeith Busch const char *buf, size_t count) 2098a5df5e79SKeith Busch { 2099a5df5e79SKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2100a5df5e79SKeith Busch bool new; 2101a5df5e79SKeith Busch int ret; 2102a5df5e79SKeith Busch 210399722c8aSChristophe JAILLET if (kstrtobool(buf, &new) < 0) 2104a5df5e79SKeith Busch return -EINVAL; 2105a5df5e79SKeith Busch 2106a5df5e79SKeith Busch if (new == ndev->hmb) 2107a5df5e79SKeith Busch return count; 2108a5df5e79SKeith Busch 2109a5df5e79SKeith Busch if (new) { 2110a5df5e79SKeith Busch ret = nvme_setup_host_mem(ndev); 2111a5df5e79SKeith Busch } else { 2112a5df5e79SKeith Busch ret = nvme_set_host_mem(ndev, 0); 2113a5df5e79SKeith Busch if (!ret) 2114a5df5e79SKeith Busch nvme_free_host_mem(ndev); 2115a5df5e79SKeith Busch } 2116a5df5e79SKeith Busch 2117a5df5e79SKeith Busch if (ret < 0) 2118a5df5e79SKeith Busch return ret; 2119a5df5e79SKeith Busch 2120a5df5e79SKeith Busch return count; 2121a5df5e79SKeith Busch } 2122a5df5e79SKeith Busch static DEVICE_ATTR_RW(hmb); 2123a5df5e79SKeith Busch 21240521905eSKeith Busch static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, 21250521905eSKeith Busch struct attribute *a, int n) 21260521905eSKeith Busch { 21270521905eSKeith Busch struct nvme_ctrl *ctrl = 21280521905eSKeith Busch dev_get_drvdata(container_of(kobj, struct device, kobj)); 21290521905eSKeith Busch struct nvme_dev *dev = to_nvme_dev(ctrl); 21300521905eSKeith Busch 21311751e97aSKeith Busch if (a == &dev_attr_cmb.attr || 21321751e97aSKeith Busch a == &dev_attr_cmbloc.attr || 21331751e97aSKeith Busch a == &dev_attr_cmbsz.attr) { 21341751e97aSKeith Busch if (!dev->cmbsz) 21350521905eSKeith Busch return 0; 21361751e97aSKeith Busch } 2137a5df5e79SKeith Busch if (a == &dev_attr_hmb.attr && !ctrl->hmpre) 2138a5df5e79SKeith Busch return 0; 2139a5df5e79SKeith Busch 21400521905eSKeith Busch return a->mode; 21410521905eSKeith Busch } 21420521905eSKeith Busch 21430521905eSKeith Busch static struct attribute *nvme_pci_attrs[] = { 21440521905eSKeith Busch &dev_attr_cmb.attr, 21451751e97aSKeith Busch &dev_attr_cmbloc.attr, 21461751e97aSKeith Busch &dev_attr_cmbsz.attr, 2147a5df5e79SKeith Busch &dev_attr_hmb.attr, 21480521905eSKeith Busch NULL, 21490521905eSKeith Busch }; 21500521905eSKeith Busch 215186adbf0cSChristoph Hellwig static const struct attribute_group nvme_pci_dev_attrs_group = { 21520521905eSKeith Busch .attrs = nvme_pci_attrs, 21530521905eSKeith Busch .is_visible = nvme_pci_attrs_are_visible, 21540521905eSKeith Busch }; 21550521905eSKeith Busch 215686adbf0cSChristoph Hellwig static const struct attribute_group *nvme_pci_dev_attr_groups[] = { 215786adbf0cSChristoph Hellwig &nvme_dev_attrs_group, 215886adbf0cSChristoph Hellwig &nvme_pci_dev_attrs_group, 215986adbf0cSChristoph Hellwig NULL, 216086adbf0cSChristoph Hellwig }; 216186adbf0cSChristoph Hellwig 2162612b7286SMing Lei /* 2163612b7286SMing Lei * nirqs is the number of interrupts available for write and read 2164612b7286SMing Lei * queues. The core already reserved an interrupt for the admin queue. 2165612b7286SMing Lei */ 2166612b7286SMing Lei static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 21673b6592f7SJens Axboe { 2168612b7286SMing Lei struct nvme_dev *dev = affd->priv; 21692a5bcfddSWeiping Zhang unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2170c45b1fa2SMing Lei 21713b6592f7SJens Axboe /* 2172ee0d96d3SBaolin Wang * If there is no interrupt available for queues, ensure that 2173612b7286SMing Lei * the default queue is set to 1. The affinity set size is 2174612b7286SMing Lei * also set to one, but the irq core ignores it for this case. 2175612b7286SMing Lei * 2176612b7286SMing Lei * If only one interrupt is available or 'write_queue' == 0, combine 2177612b7286SMing Lei * write and read queues. 2178612b7286SMing Lei * 2179612b7286SMing Lei * If 'write_queues' > 0, ensure it leaves room for at least one read 2180612b7286SMing Lei * queue. 21813b6592f7SJens Axboe */ 2182612b7286SMing Lei if (!nrirqs) { 2183612b7286SMing Lei nrirqs = 1; 2184612b7286SMing Lei nr_read_queues = 0; 21852a5bcfddSWeiping Zhang } else if (nrirqs == 1 || !nr_write_queues) { 2186612b7286SMing Lei nr_read_queues = 0; 21872a5bcfddSWeiping Zhang } else if (nr_write_queues >= nrirqs) { 2188612b7286SMing Lei nr_read_queues = 1; 21893b6592f7SJens Axboe } else { 21902a5bcfddSWeiping Zhang nr_read_queues = nrirqs - nr_write_queues; 21913b6592f7SJens Axboe } 2192612b7286SMing Lei 2193612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2194612b7286SMing Lei affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2195612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2196612b7286SMing Lei affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2197612b7286SMing Lei affd->nr_sets = nr_read_queues ? 2 : 1; 21983b6592f7SJens Axboe } 21993b6592f7SJens Axboe 22006451fe73SJens Axboe static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 22013b6592f7SJens Axboe { 22023b6592f7SJens Axboe struct pci_dev *pdev = to_pci_dev(dev->dev); 22033b6592f7SJens Axboe struct irq_affinity affd = { 22043b6592f7SJens Axboe .pre_vectors = 1, 2205612b7286SMing Lei .calc_sets = nvme_calc_irq_sets, 2206612b7286SMing Lei .priv = dev, 22073b6592f7SJens Axboe }; 220821cc2f3fSJeffle Xu unsigned int irq_queues, poll_queues; 22096451fe73SJens Axboe 22106451fe73SJens Axboe /* 221121cc2f3fSJeffle Xu * Poll queues don't need interrupts, but we need at least one I/O queue 221221cc2f3fSJeffle Xu * left over for non-polled I/O. 22136451fe73SJens Axboe */ 221421cc2f3fSJeffle Xu poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 221521cc2f3fSJeffle Xu dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 22163b6592f7SJens Axboe 221721cc2f3fSJeffle Xu /* 221821cc2f3fSJeffle Xu * Initialize for the single interrupt case, will be updated in 221921cc2f3fSJeffle Xu * nvme_calc_irq_sets(). 222021cc2f3fSJeffle Xu */ 2221612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2222612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = 0; 22233b6592f7SJens Axboe 222466341331SBenjamin Herrenschmidt /* 222521cc2f3fSJeffle Xu * We need interrupts for the admin queue and each non-polled I/O queue, 222621cc2f3fSJeffle Xu * but some Apple controllers require all queues to use the first 222721cc2f3fSJeffle Xu * vector. 222866341331SBenjamin Herrenschmidt */ 222966341331SBenjamin Herrenschmidt irq_queues = 1; 223021cc2f3fSJeffle Xu if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 223121cc2f3fSJeffle Xu irq_queues += (nr_io_queues - poll_queues); 2232612b7286SMing Lei return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 22333b6592f7SJens Axboe PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 22343b6592f7SJens Axboe } 22353b6592f7SJens Axboe 22362a5bcfddSWeiping Zhang static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 22372a5bcfddSWeiping Zhang { 2238e3aef095SNiklas Schnelle /* 2239e3aef095SNiklas Schnelle * If tags are shared with admin queue (Apple bug), then 2240e3aef095SNiklas Schnelle * make sure we only use one IO queue. 2241e3aef095SNiklas Schnelle */ 2242e3aef095SNiklas Schnelle if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2243e3aef095SNiklas Schnelle return 1; 22442a5bcfddSWeiping Zhang return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 22452a5bcfddSWeiping Zhang } 22462a5bcfddSWeiping Zhang 224757dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 224857dacad5SJay Sternberg { 2249147b27e4SSagi Grimberg struct nvme_queue *adminq = &dev->queues[0]; 225057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 22512a5bcfddSWeiping Zhang unsigned int nr_io_queues; 225297f6ef64SXu Yu unsigned long size; 22532a5bcfddSWeiping Zhang int result; 225457dacad5SJay Sternberg 22552a5bcfddSWeiping Zhang /* 22562a5bcfddSWeiping Zhang * Sample the module parameters once at reset time so that we have 22572a5bcfddSWeiping Zhang * stable values to work with. 22582a5bcfddSWeiping Zhang */ 22592a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 22602a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 2261d38e9f04SBenjamin Herrenschmidt 2262ff4e5fbaSNiklas Schnelle nr_io_queues = dev->nr_allocated_queues - 1; 22639a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 22649a0be7abSChristoph Hellwig if (result < 0) 226557dacad5SJay Sternberg return result; 22669a0be7abSChristoph Hellwig 2267f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 2268a5229050SKeith Busch return 0; 226957dacad5SJay Sternberg 2270e4b9852aSCasey Chen /* 2271e4b9852aSCasey Chen * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2272e4b9852aSCasey Chen * from set to unset. If there is a window to it is truely freed, 2273e4b9852aSCasey Chen * pci_free_irq_vectors() jumping into this window will crash. 2274e4b9852aSCasey Chen * And take lock to avoid racing with pci_free_irq_vectors() in 2275e4b9852aSCasey Chen * nvme_dev_disable() path. 2276e4b9852aSCasey Chen */ 2277e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2278e4b9852aSCasey Chen if (result) 2279e4b9852aSCasey Chen return result; 2280e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2281e4b9852aSCasey Chen pci_free_irq(pdev, 0, adminq); 22824e224106SChristoph Hellwig 22830f238ff5SLogan Gunthorpe if (dev->cmb_use_sqes) { 228457dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 228557dacad5SJay Sternberg sizeof(struct nvme_command)); 228688d356caSChristoph Hellwig if (result > 0) { 228757dacad5SJay Sternberg dev->q_depth = result; 228888d356caSChristoph Hellwig dev->ctrl.sqsize = result - 1; 228988d356caSChristoph Hellwig } else { 22900f238ff5SLogan Gunthorpe dev->cmb_use_sqes = false; 229157dacad5SJay Sternberg } 229288d356caSChristoph Hellwig } 229357dacad5SJay Sternberg 229457dacad5SJay Sternberg do { 229597f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 229697f6ef64SXu Yu result = nvme_remap_bar(dev, size); 229797f6ef64SXu Yu if (!result) 229857dacad5SJay Sternberg break; 2299e4b9852aSCasey Chen if (!--nr_io_queues) { 2300e4b9852aSCasey Chen result = -ENOMEM; 2301e4b9852aSCasey Chen goto out_unlock; 2302e4b9852aSCasey Chen } 230357dacad5SJay Sternberg } while (1); 230457dacad5SJay Sternberg adminq->q_db = dev->dbs; 230557dacad5SJay Sternberg 23068fae268bSKeith Busch retry: 230757dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 2308e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 23090ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 231057dacad5SJay Sternberg 231157dacad5SJay Sternberg /* 231257dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 231357dacad5SJay Sternberg * setting up the full range we need. 231457dacad5SJay Sternberg */ 2315dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 23163b6592f7SJens Axboe 23173b6592f7SJens Axboe result = nvme_setup_irqs(dev, nr_io_queues); 2318e4b9852aSCasey Chen if (result <= 0) { 2319e4b9852aSCasey Chen result = -EIO; 2320e4b9852aSCasey Chen goto out_unlock; 2321e4b9852aSCasey Chen } 23223b6592f7SJens Axboe 232322b55601SKeith Busch dev->num_vecs = result; 23244b04cc6aSJens Axboe result = max(result - 1, 1); 2325e20ba6e1SChristoph Hellwig dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 232657dacad5SJay Sternberg 232757dacad5SJay Sternberg /* 232857dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 232957dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 233057dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 233157dacad5SJay Sternberg * number of interrupts. 233257dacad5SJay Sternberg */ 2333dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 23347c349ddeSKeith Busch if (result) 2335e4b9852aSCasey Chen goto out_unlock; 23364e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &adminq->flags); 2337e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 23388fae268bSKeith Busch 23398fae268bSKeith Busch result = nvme_create_io_queues(dev); 23408fae268bSKeith Busch if (result || dev->online_queues < 2) 23418fae268bSKeith Busch return result; 23428fae268bSKeith Busch 23438fae268bSKeith Busch if (dev->online_queues - 1 < dev->max_qid) { 23448fae268bSKeith Busch nr_io_queues = dev->online_queues - 1; 23457d879c90SChristoph Hellwig nvme_delete_io_queues(dev); 2346e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2347e4b9852aSCasey Chen if (result) 2348e4b9852aSCasey Chen return result; 23498fae268bSKeith Busch nvme_suspend_io_queues(dev); 23508fae268bSKeith Busch goto retry; 23518fae268bSKeith Busch } 23528fae268bSKeith Busch dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 23538fae268bSKeith Busch dev->io_queues[HCTX_TYPE_DEFAULT], 23548fae268bSKeith Busch dev->io_queues[HCTX_TYPE_READ], 23558fae268bSKeith Busch dev->io_queues[HCTX_TYPE_POLL]); 23568fae268bSKeith Busch return 0; 2357e4b9852aSCasey Chen out_unlock: 2358e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 2359e4b9852aSCasey Chen return result; 236057dacad5SJay Sternberg } 236157dacad5SJay Sternberg 2362de671d61SJens Axboe static enum rq_end_io_ret nvme_del_queue_end(struct request *req, 2363de671d61SJens Axboe blk_status_t error) 2364db3cbfffSKeith Busch { 2365db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2366db3cbfffSKeith Busch 2367db3cbfffSKeith Busch blk_mq_free_request(req); 2368d1ed6aa1SChristoph Hellwig complete(&nvmeq->delete_done); 2369de671d61SJens Axboe return RQ_END_IO_NONE; 2370db3cbfffSKeith Busch } 2371db3cbfffSKeith Busch 2372de671d61SJens Axboe static enum rq_end_io_ret nvme_del_cq_end(struct request *req, 2373de671d61SJens Axboe blk_status_t error) 2374db3cbfffSKeith Busch { 2375db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2376db3cbfffSKeith Busch 2377d1ed6aa1SChristoph Hellwig if (error) 2378d1ed6aa1SChristoph Hellwig set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2379db3cbfffSKeith Busch 2380de671d61SJens Axboe return nvme_del_queue_end(req, error); 2381db3cbfffSKeith Busch } 2382db3cbfffSKeith Busch 2383db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2384db3cbfffSKeith Busch { 2385db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2386db3cbfffSKeith Busch struct request *req; 2387f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 2388db3cbfffSKeith Busch 2389db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 2390db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2391db3cbfffSKeith Busch 2392e559398fSChristoph Hellwig req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); 2393db3cbfffSKeith Busch if (IS_ERR(req)) 2394db3cbfffSKeith Busch return PTR_ERR(req); 2395e559398fSChristoph Hellwig nvme_init_request(req, &cmd); 2396db3cbfffSKeith Busch 2397e2e53086SChristoph Hellwig if (opcode == nvme_admin_delete_cq) 2398e2e53086SChristoph Hellwig req->end_io = nvme_del_cq_end; 2399e2e53086SChristoph Hellwig else 2400e2e53086SChristoph Hellwig req->end_io = nvme_del_queue_end; 2401db3cbfffSKeith Busch req->end_io_data = nvmeq; 2402db3cbfffSKeith Busch 2403d1ed6aa1SChristoph Hellwig init_completion(&nvmeq->delete_done); 2404e2e53086SChristoph Hellwig blk_execute_rq_nowait(req, false); 2405db3cbfffSKeith Busch return 0; 2406db3cbfffSKeith Busch } 2407db3cbfffSKeith Busch 24087d879c90SChristoph Hellwig static bool __nvme_delete_io_queues(struct nvme_dev *dev, u8 opcode) 2409db3cbfffSKeith Busch { 24105271edd4SChristoph Hellwig int nr_queues = dev->online_queues - 1, sent = 0; 2411db3cbfffSKeith Busch unsigned long timeout; 2412db3cbfffSKeith Busch 2413db3cbfffSKeith Busch retry: 2414dc96f938SChaitanya Kulkarni timeout = NVME_ADMIN_TIMEOUT; 24155271edd4SChristoph Hellwig while (nr_queues > 0) { 24165271edd4SChristoph Hellwig if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2417db3cbfffSKeith Busch break; 24185271edd4SChristoph Hellwig nr_queues--; 24195271edd4SChristoph Hellwig sent++; 24205271edd4SChristoph Hellwig } 2421d1ed6aa1SChristoph Hellwig while (sent) { 2422d1ed6aa1SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2423d1ed6aa1SChristoph Hellwig 2424d1ed6aa1SChristoph Hellwig timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 24255271edd4SChristoph Hellwig timeout); 2426db3cbfffSKeith Busch if (timeout == 0) 24275271edd4SChristoph Hellwig return false; 2428d1ed6aa1SChristoph Hellwig 2429d1ed6aa1SChristoph Hellwig sent--; 24305271edd4SChristoph Hellwig if (nr_queues) 2431db3cbfffSKeith Busch goto retry; 2432db3cbfffSKeith Busch } 24335271edd4SChristoph Hellwig return true; 2434db3cbfffSKeith Busch } 2435db3cbfffSKeith Busch 24367d879c90SChristoph Hellwig static void nvme_delete_io_queues(struct nvme_dev *dev) 243757dacad5SJay Sternberg { 24387d879c90SChristoph Hellwig if (__nvme_delete_io_queues(dev, nvme_admin_delete_sq)) 24397d879c90SChristoph Hellwig __nvme_delete_io_queues(dev, nvme_admin_delete_cq); 24402b1b7e78SJianchao Wang } 24417d879c90SChristoph Hellwig 24420da7feaaSChristoph Hellwig static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev) 244357dacad5SJay Sternberg { 244457dacad5SJay Sternberg if (dev->io_queues[HCTX_TYPE_POLL]) 24450da7feaaSChristoph Hellwig return 3; 24460da7feaaSChristoph Hellwig if (dev->io_queues[HCTX_TYPE_READ]) 24470da7feaaSChristoph Hellwig return 2; 24480da7feaaSChristoph Hellwig return 1; 244957dacad5SJay Sternberg } 2450949928c1SKeith Busch 24512455a4b7SChristoph Hellwig static void nvme_pci_update_nr_queues(struct nvme_dev *dev) 24522455a4b7SChristoph Hellwig { 24532455a4b7SChristoph Hellwig blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 24542455a4b7SChristoph Hellwig /* free previously allocated queues that are no longer usable */ 24552455a4b7SChristoph Hellwig nvme_free_queues(dev, dev->online_queues); 245657dacad5SJay Sternberg } 245757dacad5SJay Sternberg 2458b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 245957dacad5SJay Sternberg { 2460b00a726aSKeith Busch int result = -ENOMEM; 246157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 24624bdf2603SFilippo Sironi int dma_address_bits = 64; 246357dacad5SJay Sternberg 246457dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 246557dacad5SJay Sternberg return result; 246657dacad5SJay Sternberg 246757dacad5SJay Sternberg pci_set_master(pdev); 246857dacad5SJay Sternberg 24694bdf2603SFilippo Sironi if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 24704bdf2603SFilippo Sironi dma_address_bits = 48; 24714bdf2603SFilippo Sironi if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) 247257dacad5SJay Sternberg goto disable; 247357dacad5SJay Sternberg 24747a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 247557dacad5SJay Sternberg result = -ENODEV; 2476b00a726aSKeith Busch goto disable; 247757dacad5SJay Sternberg } 247857dacad5SJay Sternberg 247957dacad5SJay Sternberg /* 2480a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 2481a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2482a5229050SKeith Busch * adjust this later. 248357dacad5SJay Sternberg */ 2484dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2485dca51e78SChristoph Hellwig if (result < 0) 248609113abfSTong Zhang goto disable; 248757dacad5SJay Sternberg 248820d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 24897a67cbeaSChristoph Hellwig 24907442ddceSJohn Garry dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2491b27c1e68Sweiping zhang io_queue_depth); 249220d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 24937a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 24941f390c1fSStephan Günther 24951f390c1fSStephan Günther /* 249666341331SBenjamin Herrenschmidt * Some Apple controllers require a non-standard SQE size. 249766341331SBenjamin Herrenschmidt * Interestingly they also seem to ignore the CC:IOSQES register 249866341331SBenjamin Herrenschmidt * so we don't bother updating it here. 249966341331SBenjamin Herrenschmidt */ 250066341331SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 250166341331SBenjamin Herrenschmidt dev->io_sqes = 7; 250266341331SBenjamin Herrenschmidt else 2503c1e0cc7eSBenjamin Herrenschmidt dev->io_sqes = NVME_NVM_IOSQES; 25041f390c1fSStephan Günther 25051f390c1fSStephan Günther /* 25061f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 25071f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 25081f390c1fSStephan Günther */ 25091f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 25101f390c1fSStephan Günther dev->q_depth = 2; 25119bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 25129bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 25131f390c1fSStephan Günther dev->q_depth); 2514d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2515d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 251620d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2517d554b5e1SMartin K. Petersen dev->q_depth = 64; 2518d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2519d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 25201f390c1fSStephan Günther } 25211f390c1fSStephan Günther 2522d38e9f04SBenjamin Herrenschmidt /* 2523d38e9f04SBenjamin Herrenschmidt * Controllers with the shared tags quirk need the IO queue to be 2524d38e9f04SBenjamin Herrenschmidt * big enough so that we get 32 tags for the admin queue 2525d38e9f04SBenjamin Herrenschmidt */ 2526d38e9f04SBenjamin Herrenschmidt if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2527d38e9f04SBenjamin Herrenschmidt (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2528d38e9f04SBenjamin Herrenschmidt dev->q_depth = NVME_AQ_DEPTH + 2; 2529d38e9f04SBenjamin Herrenschmidt dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2530d38e9f04SBenjamin Herrenschmidt dev->q_depth); 2531d38e9f04SBenjamin Herrenschmidt } 253288d356caSChristoph Hellwig dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 2533d38e9f04SBenjamin Herrenschmidt 2534f65efd6dSChristoph Hellwig nvme_map_cmb(dev); 2535202021c1SStephen Bates 2536a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 2537a0a3408eSKeith Busch pci_save_state(pdev); 2538a6ee7f19SChristoph Hellwig 253909113abfSTong Zhang result = nvme_pci_configure_admin_queue(dev); 254009113abfSTong Zhang if (result) 254109113abfSTong Zhang goto free_irq; 254209113abfSTong Zhang return result; 254357dacad5SJay Sternberg 254409113abfSTong Zhang free_irq: 254509113abfSTong Zhang pci_free_irq_vectors(pdev); 254657dacad5SJay Sternberg disable: 254757dacad5SJay Sternberg pci_disable_device(pdev); 254857dacad5SJay Sternberg return result; 254957dacad5SJay Sternberg } 255057dacad5SJay Sternberg 255157dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 255257dacad5SJay Sternberg { 2553b00a726aSKeith Busch if (dev->bar) 2554b00a726aSKeith Busch iounmap(dev->bar); 2555a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 2556b00a726aSKeith Busch } 2557b00a726aSKeith Busch 255868e81ebaSChristoph Hellwig static bool nvme_pci_ctrl_is_dead(struct nvme_dev *dev) 2559b00a726aSKeith Busch { 256057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 2561081f5e75SKeith Busch u32 csts; 256257dacad5SJay Sternberg 256368e81ebaSChristoph Hellwig if (!pci_is_enabled(pdev) || !pci_device_is_present(pdev)) 256468e81ebaSChristoph Hellwig return true; 256568e81ebaSChristoph Hellwig if (pdev->error_state != pci_channel_io_normal) 256668e81ebaSChristoph Hellwig return true; 256757dacad5SJay Sternberg 256868e81ebaSChristoph Hellwig csts = readl(dev->bar + NVME_REG_CSTS); 256968e81ebaSChristoph Hellwig return (csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY); 2570a0a3408eSKeith Busch } 257157dacad5SJay Sternberg 2572a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 257357dacad5SJay Sternberg { 2574302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 257568e81ebaSChristoph Hellwig bool dead; 257657dacad5SJay Sternberg 257777bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 257868e81ebaSChristoph Hellwig dead = nvme_pci_ctrl_is_dead(dev); 2579ebef7368SKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE || 2580e43269e6SKeith Busch dev->ctrl.state == NVME_CTRL_RESETTING) { 258168e81ebaSChristoph Hellwig if (pci_is_enabled(pdev)) 2582302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 2583302ad8ccSKeith Busch /* 258468e81ebaSChristoph Hellwig * Give the controller a chance to complete all entered requests 258568e81ebaSChristoph Hellwig * if doing a safe shutdown. 2586302ad8ccSKeith Busch */ 258768e81ebaSChristoph Hellwig if (!dead && shutdown) 2588302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 258968e81ebaSChristoph Hellwig } 259087ad72a5SChristoph Hellwig 25919f27bd70SChristoph Hellwig nvme_quiesce_io_queues(&dev->ctrl); 25929a915a5bSJianchao Wang 259364ee0ac0SKeith Busch if (!dead && dev->ctrl.queue_count > 0) { 25947d879c90SChristoph Hellwig nvme_delete_io_queues(dev); 259547d42d22SChristoph Hellwig nvme_disable_ctrl(&dev->ctrl, shutdown); 259647d42d22SChristoph Hellwig nvme_poll_irqdisable(&dev->queues[0]); 259757dacad5SJay Sternberg } 25988fae268bSKeith Busch nvme_suspend_io_queues(dev); 259910981f23SChristoph Hellwig nvme_suspend_queue(dev, 0); 2600c80767f7SChristoph Hellwig pci_free_irq_vectors(pdev); 2601c80767f7SChristoph Hellwig if (pci_is_enabled(pdev)) { 2602c80767f7SChristoph Hellwig pci_disable_pcie_error_reporting(pdev); 2603c80767f7SChristoph Hellwig pci_disable_device(pdev); 2604c80767f7SChristoph Hellwig } 2605fa46c6fbSKeith Busch nvme_reap_pending_cqes(dev); 260657dacad5SJay Sternberg 26071fcfca78SGuixin Liu nvme_cancel_tagset(&dev->ctrl); 26081fcfca78SGuixin Liu nvme_cancel_admin_tagset(&dev->ctrl); 2609302ad8ccSKeith Busch 2610302ad8ccSKeith Busch /* 2611302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2612302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2613302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2614302ad8ccSKeith Busch */ 2615c8e9e9b7SKeith Busch if (shutdown) { 26169f27bd70SChristoph Hellwig nvme_unquiesce_io_queues(&dev->ctrl); 2617c8e9e9b7SKeith Busch if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 26189f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&dev->ctrl); 2619c8e9e9b7SKeith Busch } 262077bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 262157dacad5SJay Sternberg } 262257dacad5SJay Sternberg 2623c1ac9a4bSKeith Busch static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2624c1ac9a4bSKeith Busch { 2625c1ac9a4bSKeith Busch if (!nvme_wait_reset(&dev->ctrl)) 2626c1ac9a4bSKeith Busch return -EBUSY; 2627c1ac9a4bSKeith Busch nvme_dev_disable(dev, shutdown); 2628c1ac9a4bSKeith Busch return 0; 2629c1ac9a4bSKeith Busch } 2630c1ac9a4bSKeith Busch 263157dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 263257dacad5SJay Sternberg { 263357dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2634c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 2635c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 0); 263657dacad5SJay Sternberg if (!dev->prp_page_pool) 263757dacad5SJay Sternberg return -ENOMEM; 263857dacad5SJay Sternberg 263957dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 264057dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 264157dacad5SJay Sternberg 256, 256, 0); 264257dacad5SJay Sternberg if (!dev->prp_small_pool) { 264357dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 264457dacad5SJay Sternberg return -ENOMEM; 264557dacad5SJay Sternberg } 264657dacad5SJay Sternberg return 0; 264757dacad5SJay Sternberg } 264857dacad5SJay Sternberg 264957dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 265057dacad5SJay Sternberg { 265157dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 265257dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 265357dacad5SJay Sternberg } 265457dacad5SJay Sternberg 2655081a7d95SChristoph Hellwig static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) 2656081a7d95SChristoph Hellwig { 26577846c1b5SKeith Busch size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; 2658081a7d95SChristoph Hellwig 2659081a7d95SChristoph Hellwig dev->iod_mempool = mempool_create_node(1, 2660081a7d95SChristoph Hellwig mempool_kmalloc, mempool_kfree, 2661081a7d95SChristoph Hellwig (void *)alloc_size, GFP_KERNEL, 2662081a7d95SChristoph Hellwig dev_to_node(dev->dev)); 2663081a7d95SChristoph Hellwig if (!dev->iod_mempool) 2664081a7d95SChristoph Hellwig return -ENOMEM; 2665081a7d95SChristoph Hellwig return 0; 2666081a7d95SChristoph Hellwig } 2667081a7d95SChristoph Hellwig 2668770597ecSKeith Busch static void nvme_free_tagset(struct nvme_dev *dev) 2669770597ecSKeith Busch { 2670770597ecSKeith Busch if (dev->tagset.tags) 26710da7feaaSChristoph Hellwig nvme_remove_io_tag_set(&dev->ctrl); 2672770597ecSKeith Busch dev->ctrl.tagset = NULL; 2673770597ecSKeith Busch } 2674770597ecSKeith Busch 26752e87570bSChristoph Hellwig /* pairs with nvme_pci_alloc_dev */ 26761673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 267757dacad5SJay Sternberg { 26781673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 267957dacad5SJay Sternberg 2680770597ecSKeith Busch nvme_free_tagset(dev); 2681253fd4acSIsrael Rukshin put_device(dev->dev); 2682253fd4acSIsrael Rukshin kfree(dev->queues); 268357dacad5SJay Sternberg kfree(dev); 268457dacad5SJay Sternberg } 268557dacad5SJay Sternberg 2686fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 268757dacad5SJay Sternberg { 2688d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2689d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2690a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2691e71afda4SChaitanya Kulkarni int result; 269257dacad5SJay Sternberg 26937764656bSZhihao Cheng if (dev->ctrl.state != NVME_CTRL_RESETTING) { 26947764656bSZhihao Cheng dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", 26957764656bSZhihao Cheng dev->ctrl.state); 26968cb9f10bSChristoph Hellwig return; 2697e71afda4SChaitanya Kulkarni } 2698fd634f41SChristoph Hellwig 2699fd634f41SChristoph Hellwig /* 2700fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2701fd634f41SChristoph Hellwig * moving on. 2702fd634f41SChristoph Hellwig */ 2703b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2704a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2705d6135c3aSKeith Busch nvme_sync_queues(&dev->ctrl); 2706fd634f41SChristoph Hellwig 27075c959d73SKeith Busch mutex_lock(&dev->shutdown_lock); 2708b00a726aSKeith Busch result = nvme_pci_enable(dev); 270957dacad5SJay Sternberg if (result) 27104726bcf3SKeith Busch goto out_unlock; 27119f27bd70SChristoph Hellwig nvme_unquiesce_admin_queue(&dev->ctrl); 27125c959d73SKeith Busch mutex_unlock(&dev->shutdown_lock); 27135c959d73SKeith Busch 27145c959d73SKeith Busch /* 27155c959d73SKeith Busch * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 27165c959d73SKeith Busch * initializing procedure here. 27175c959d73SKeith Busch */ 27185c959d73SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 27195c959d73SKeith Busch dev_warn(dev->ctrl.device, 27205c959d73SKeith Busch "failed to mark controller CONNECTING\n"); 2721cee6c269SMinwoo Im result = -EBUSY; 27225c959d73SKeith Busch goto out; 27235c959d73SKeith Busch } 2724943e942eSJens Axboe 272594cc781fSChristoph Hellwig result = nvme_init_ctrl_finish(&dev->ctrl, was_suspend); 2726ce4541f4SChristoph Hellwig if (result) 2727f58944e2SKeith Busch goto out; 2728ce4541f4SChristoph Hellwig 272965a54646SChristoph Hellwig nvme_dbbuf_dma_alloc(dev); 2730a98e58e5SScott Bauer 27319620cfbaSChristoph Hellwig result = nvme_setup_host_mem(dev); 27329620cfbaSChristoph Hellwig if (result < 0) 27339620cfbaSChristoph Hellwig goto out; 273487ad72a5SChristoph Hellwig 273557dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 273657dacad5SJay Sternberg if (result) 2737f58944e2SKeith Busch goto out; 273857dacad5SJay Sternberg 273921f033f7SKeith Busch /* 27400ffc7e98SChristoph Hellwig * Freeze and update the number of I/O queues as thos might have 2741eac3ef26SChristoph Hellwig * changed. If there are no I/O queues left after this reset, keep the 2742eac3ef26SChristoph Hellwig * controller around but remove all namespaces. 274357dacad5SJay Sternberg */ 27440ffc7e98SChristoph Hellwig if (dev->online_queues > 1) { 27459f27bd70SChristoph Hellwig nvme_unquiesce_io_queues(&dev->ctrl); 2746302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 27472455a4b7SChristoph Hellwig nvme_pci_update_nr_queues(dev); 27482455a4b7SChristoph Hellwig nvme_dbbuf_set(dev); 2749302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 27500ffc7e98SChristoph Hellwig } else { 27510ffc7e98SChristoph Hellwig dev_warn(dev->ctrl.device, "IO queues lost\n"); 2752cd50f9b2SChristoph Hellwig nvme_mark_namespaces_dead(&dev->ctrl); 27539f27bd70SChristoph Hellwig nvme_unquiesce_io_queues(&dev->ctrl); 27540ffc7e98SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 27550ffc7e98SChristoph Hellwig nvme_free_tagset(dev); 275657dacad5SJay Sternberg } 275757dacad5SJay Sternberg 27582b1b7e78SJianchao Wang /* 27592b1b7e78SJianchao Wang * If only admin queue live, keep it to do further investigation or 27602b1b7e78SJianchao Wang * recovery. 27612b1b7e78SJianchao Wang */ 27625d02a5c1SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 27632b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 27645d02a5c1SKeith Busch "failed to mark controller live state\n"); 2765e71afda4SChaitanya Kulkarni result = -ENODEV; 2766bb8d261eSChristoph Hellwig goto out; 2767bb8d261eSChristoph Hellwig } 276892911a55SChristoph Hellwig 2769d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 277057dacad5SJay Sternberg return; 277157dacad5SJay Sternberg 27724726bcf3SKeith Busch out_unlock: 27734726bcf3SKeith Busch mutex_unlock(&dev->shutdown_lock); 277457dacad5SJay Sternberg out: 2775c7c16c5bSChristoph Hellwig /* 2776c7c16c5bSChristoph Hellwig * Set state to deleting now to avoid blocking nvme_wait_reset(), which 2777c7c16c5bSChristoph Hellwig * may be holding this pci_dev's device lock. 2778c7c16c5bSChristoph Hellwig */ 2779c7c16c5bSChristoph Hellwig dev_warn(dev->ctrl.device, "Disabling device after reset failure: %d\n", 2780c7c16c5bSChristoph Hellwig result); 2781c7c16c5bSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2782c7c16c5bSChristoph Hellwig nvme_dev_disable(dev, true); 2783c7c16c5bSChristoph Hellwig nvme_mark_namespaces_dead(&dev->ctrl); 2784c7c16c5bSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 278557dacad5SJay Sternberg } 278657dacad5SJay Sternberg 27871c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 278857dacad5SJay Sternberg { 27891c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 27901c63dc66SChristoph Hellwig return 0; 279157dacad5SJay Sternberg } 27921c63dc66SChristoph Hellwig 27935fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 27945fd4ce1bSChristoph Hellwig { 27955fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 27965fd4ce1bSChristoph Hellwig return 0; 27975fd4ce1bSChristoph Hellwig } 27985fd4ce1bSChristoph Hellwig 27997fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 28007fd8930fSChristoph Hellwig { 28013a8ecc93SArd Biesheuvel *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 28027fd8930fSChristoph Hellwig return 0; 28037fd8930fSChristoph Hellwig } 28047fd8930fSChristoph Hellwig 280597c12223SKeith Busch static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 280697c12223SKeith Busch { 280797c12223SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 280897c12223SKeith Busch 28092db24e4aSMax Gurtovoy return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 281097c12223SKeith Busch } 281197c12223SKeith Busch 28122f0dad17SKeith Busch static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) 28132f0dad17SKeith Busch { 28142f0dad17SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 28152f0dad17SKeith Busch struct nvme_subsystem *subsys = ctrl->subsys; 28162f0dad17SKeith Busch 28172f0dad17SKeith Busch dev_err(ctrl->device, 28182f0dad17SKeith Busch "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", 28192f0dad17SKeith Busch pdev->vendor, pdev->device, 28202f0dad17SKeith Busch nvme_strlen(subsys->model, sizeof(subsys->model)), 28212f0dad17SKeith Busch subsys->model, nvme_strlen(subsys->firmware_rev, 28222f0dad17SKeith Busch sizeof(subsys->firmware_rev)), 28232f0dad17SKeith Busch subsys->firmware_rev); 28242f0dad17SKeith Busch } 28252f0dad17SKeith Busch 28262f859441SLogan Gunthorpe static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) 28272f859441SLogan Gunthorpe { 28282f859441SLogan Gunthorpe struct nvme_dev *dev = to_nvme_dev(ctrl); 28292f859441SLogan Gunthorpe 28302f859441SLogan Gunthorpe return dma_pci_p2pdma_supported(dev->dev); 28312f859441SLogan Gunthorpe } 28322f859441SLogan Gunthorpe 28331c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 28341a353d85SMing Lin .name = "pcie", 2835e439bb12SSagi Grimberg .module = THIS_MODULE, 28362f859441SLogan Gunthorpe .flags = NVME_F_METADATA_SUPPORTED, 283786adbf0cSChristoph Hellwig .dev_attr_groups = nvme_pci_dev_attr_groups, 28381c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 28395fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 28407fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 28411673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2842f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 284397c12223SKeith Busch .get_address = nvme_pci_get_address, 28442f0dad17SKeith Busch .print_device_info = nvme_pci_print_device_info, 28452f859441SLogan Gunthorpe .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, 28461c63dc66SChristoph Hellwig }; 284757dacad5SJay Sternberg 2848b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2849b00a726aSKeith Busch { 2850b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2851b00a726aSKeith Busch 2852a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2853b00a726aSKeith Busch return -ENODEV; 2854b00a726aSKeith Busch 285597f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2856b00a726aSKeith Busch goto release; 2857b00a726aSKeith Busch 2858b00a726aSKeith Busch return 0; 2859b00a726aSKeith Busch release: 2860a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2861b00a726aSKeith Busch return -ENODEV; 2862b00a726aSKeith Busch } 2863b00a726aSKeith Busch 28648427bbc2SKai-Heng Feng static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2865ff5350a8SAndy Lutomirski { 2866ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2867ff5350a8SAndy Lutomirski /* 2868ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2869ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2870ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2871ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2872ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2873ff5350a8SAndy Lutomirski * laptops. 2874ff5350a8SAndy Lutomirski */ 2875ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2876ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2877ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2878ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 28798427bbc2SKai-Heng Feng } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 28808427bbc2SKai-Heng Feng /* 28818427bbc2SKai-Heng Feng * Samsung SSD 960 EVO drops off the PCIe bus after system 2882467c77d4SJarosław Janik * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 2883467c77d4SJarosław Janik * within few minutes after bootup on a Coffee Lake board - 2884467c77d4SJarosław Janik * ASUS PRIME Z370-A 28858427bbc2SKai-Heng Feng */ 28868427bbc2SKai-Heng Feng if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2887467c77d4SJarosław Janik (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 2888467c77d4SJarosław Janik dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 28898427bbc2SKai-Heng Feng return NVME_QUIRK_NO_APST; 28901fae37acSShyjumon N } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 28911fae37acSShyjumon N pdev->device == 0xa808 || pdev->device == 0xa809)) || 28921fae37acSShyjumon N (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 28931fae37acSShyjumon N /* 28941fae37acSShyjumon N * Forcing to use host managed nvme power settings for 28951fae37acSShyjumon N * lowest idle power with quick resume latency on 28961fae37acSShyjumon N * Samsung and Toshiba SSDs based on suspend behavior 28971fae37acSShyjumon N * on Coffee Lake board for LENOVO C640 28981fae37acSShyjumon N */ 28991fae37acSShyjumon N if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 29001fae37acSShyjumon N dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 29011fae37acSShyjumon N return NVME_QUIRK_SIMPLE_SUSPEND; 2902ff5350a8SAndy Lutomirski } 2903ff5350a8SAndy Lutomirski 2904ff5350a8SAndy Lutomirski return 0; 2905ff5350a8SAndy Lutomirski } 2906ff5350a8SAndy Lutomirski 29072e87570bSChristoph Hellwig static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, 29082e87570bSChristoph Hellwig const struct pci_device_id *id) 290918119775SKeith Busch { 2910ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 29112e87570bSChristoph Hellwig int node = dev_to_node(&pdev->dev); 29122e87570bSChristoph Hellwig struct nvme_dev *dev; 29132e87570bSChristoph Hellwig int ret = -ENOMEM; 291457dacad5SJay Sternberg 291557dacad5SJay Sternberg if (node == NUMA_NO_NODE) 29162fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 291757dacad5SJay Sternberg 291857dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 291957dacad5SJay Sternberg if (!dev) 29202e87570bSChristoph Hellwig return NULL; 29212e87570bSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 29222e87570bSChristoph Hellwig mutex_init(&dev->shutdown_lock); 2923147b27e4SSagi Grimberg 29242a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 29252a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 29262a5bcfddSWeiping Zhang dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 29272a5bcfddSWeiping Zhang dev->queues = kcalloc_node(dev->nr_allocated_queues, 29282a5bcfddSWeiping Zhang sizeof(struct nvme_queue), GFP_KERNEL, node); 292957dacad5SJay Sternberg if (!dev->queues) 29302e87570bSChristoph Hellwig goto out_free_dev; 293157dacad5SJay Sternberg 293257dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 2933f3ca80fcSChristoph Hellwig 29348427bbc2SKai-Heng Feng quirks |= check_vendor_combination_bug(pdev); 29352744d7a0SMario Limonciello if (!noacpi && acpi_storage_d3(&pdev->dev)) { 2936df4f9bc4SDavid E. Box /* 2937df4f9bc4SDavid E. Box * Some systems use a bios work around to ask for D3 on 2938df4f9bc4SDavid E. Box * platforms that support kernel managed suspend. 2939df4f9bc4SDavid E. Box */ 2940df4f9bc4SDavid E. Box dev_info(&pdev->dev, 2941df4f9bc4SDavid E. Box "platform quirk: setting simple suspend\n"); 2942df4f9bc4SDavid E. Box quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 2943df4f9bc4SDavid E. Box } 29442e87570bSChristoph Hellwig ret = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 29452e87570bSChristoph Hellwig quirks); 29462e87570bSChristoph Hellwig if (ret) 29472e87570bSChristoph Hellwig goto out_put_device; 29483f30a79cSChristoph Hellwig 29493f30a79cSChristoph Hellwig dma_set_min_align_mask(&pdev->dev, NVME_CTRL_PAGE_SIZE - 1); 29503f30a79cSChristoph Hellwig dma_set_max_seg_size(&pdev->dev, 0xffffffff); 2951df4f9bc4SDavid E. Box 2952943e942eSJens Axboe /* 29533f30a79cSChristoph Hellwig * Limit the max command size to prevent iod->sg allocations going 29543f30a79cSChristoph Hellwig * over a single page. 2955943e942eSJens Axboe */ 29563f30a79cSChristoph Hellwig dev->ctrl.max_hw_sectors = min_t(u32, 29573f30a79cSChristoph Hellwig NVME_MAX_KB_SZ << 1, dma_max_mapping_size(&pdev->dev) >> 9); 29583f30a79cSChristoph Hellwig dev->ctrl.max_segments = NVME_MAX_SEGS; 2959943e942eSJens Axboe 29603f30a79cSChristoph Hellwig /* 29613f30a79cSChristoph Hellwig * There is no support for SGLs for metadata (yet), so we are limited to 29623f30a79cSChristoph Hellwig * a single integrity segment for the separate metadata pointer. 29633f30a79cSChristoph Hellwig */ 29643f30a79cSChristoph Hellwig dev->ctrl.max_integrity_segments = 1; 29652e87570bSChristoph Hellwig return dev; 29662e87570bSChristoph Hellwig 29672e87570bSChristoph Hellwig out_put_device: 29682e87570bSChristoph Hellwig put_device(dev->dev); 29692e87570bSChristoph Hellwig kfree(dev->queues); 29702e87570bSChristoph Hellwig out_free_dev: 29712e87570bSChristoph Hellwig kfree(dev); 29722e87570bSChristoph Hellwig return ERR_PTR(ret); 2973943e942eSJens Axboe } 2974943e942eSJens Axboe 29752e87570bSChristoph Hellwig static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 29762e87570bSChristoph Hellwig { 29772e87570bSChristoph Hellwig struct nvme_dev *dev; 29782e87570bSChristoph Hellwig int result = -ENOMEM; 29792e87570bSChristoph Hellwig 29802e87570bSChristoph Hellwig dev = nvme_pci_alloc_dev(pdev, id); 29812e87570bSChristoph Hellwig if (!dev) 29822e87570bSChristoph Hellwig return -ENOMEM; 29832e87570bSChristoph Hellwig 29842e87570bSChristoph Hellwig result = nvme_dev_map(dev); 2985b6e44b4cSKeith Busch if (result) 29862e87570bSChristoph Hellwig goto out_uninit_ctrl; 29872e87570bSChristoph Hellwig 29882e87570bSChristoph Hellwig result = nvme_setup_prp_pools(dev); 29892e87570bSChristoph Hellwig if (result) 29902e87570bSChristoph Hellwig goto out_dev_unmap; 299157dacad5SJay Sternberg 2992081a7d95SChristoph Hellwig result = nvme_pci_alloc_iod_mempool(dev); 2993081a7d95SChristoph Hellwig if (result) 29942e87570bSChristoph Hellwig goto out_release_prp_pools; 2995b6e44b4cSKeith Busch 299657dacad5SJay Sternberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 299757dacad5SJay Sternberg 2998eac3ef26SChristoph Hellwig result = nvme_pci_enable(dev); 2999eac3ef26SChristoph Hellwig if (result) 3000eac3ef26SChristoph Hellwig goto out_release_iod_mempool; 300157dacad5SJay Sternberg 30020da7feaaSChristoph Hellwig result = nvme_alloc_admin_tag_set(&dev->ctrl, &dev->admin_tagset, 30030da7feaaSChristoph Hellwig &nvme_mq_admin_ops, sizeof(struct nvme_iod)); 3004eac3ef26SChristoph Hellwig if (result) 3005eac3ef26SChristoph Hellwig goto out_disable; 3006eac3ef26SChristoph Hellwig 3007eac3ef26SChristoph Hellwig /* 3008eac3ef26SChristoph Hellwig * Mark the controller as connecting before sending admin commands to 3009eac3ef26SChristoph Hellwig * allow the timeout handler to do the right thing. 3010eac3ef26SChristoph Hellwig */ 3011eac3ef26SChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 3012eac3ef26SChristoph Hellwig dev_warn(dev->ctrl.device, 3013eac3ef26SChristoph Hellwig "failed to mark controller CONNECTING\n"); 3014eac3ef26SChristoph Hellwig result = -EBUSY; 3015eac3ef26SChristoph Hellwig goto out_disable; 3016eac3ef26SChristoph Hellwig } 3017eac3ef26SChristoph Hellwig 3018eac3ef26SChristoph Hellwig result = nvme_init_ctrl_finish(&dev->ctrl, false); 3019eac3ef26SChristoph Hellwig if (result) 3020eac3ef26SChristoph Hellwig goto out_disable; 3021eac3ef26SChristoph Hellwig 3022eac3ef26SChristoph Hellwig nvme_dbbuf_dma_alloc(dev); 3023eac3ef26SChristoph Hellwig 3024eac3ef26SChristoph Hellwig result = nvme_setup_host_mem(dev); 3025eac3ef26SChristoph Hellwig if (result < 0) 3026eac3ef26SChristoph Hellwig goto out_disable; 3027eac3ef26SChristoph Hellwig 3028eac3ef26SChristoph Hellwig result = nvme_setup_io_queues(dev); 3029eac3ef26SChristoph Hellwig if (result) 3030eac3ef26SChristoph Hellwig goto out_disable; 3031eac3ef26SChristoph Hellwig 3032eac3ef26SChristoph Hellwig if (dev->online_queues > 1) { 30330da7feaaSChristoph Hellwig nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops, 30340da7feaaSChristoph Hellwig nvme_pci_nr_maps(dev), sizeof(struct nvme_iod)); 3035eac3ef26SChristoph Hellwig nvme_dbbuf_set(dev); 3036eac3ef26SChristoph Hellwig } 3037eac3ef26SChristoph Hellwig 30380da7feaaSChristoph Hellwig if (!dev->ctrl.tagset) 30390da7feaaSChristoph Hellwig dev_warn(dev->ctrl.device, "IO queues not created\n"); 30400da7feaaSChristoph Hellwig 3041eac3ef26SChristoph Hellwig if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 3042eac3ef26SChristoph Hellwig dev_warn(dev->ctrl.device, 3043eac3ef26SChristoph Hellwig "failed to mark controller live state\n"); 3044eac3ef26SChristoph Hellwig result = -ENODEV; 3045eac3ef26SChristoph Hellwig goto out_disable; 3046eac3ef26SChristoph Hellwig } 3047eac3ef26SChristoph Hellwig 30482e87570bSChristoph Hellwig pci_set_drvdata(pdev, dev); 304957dacad5SJay Sternberg 3050eac3ef26SChristoph Hellwig nvme_start_ctrl(&dev->ctrl); 3051eac3ef26SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 30525a5754a4SKeith Busch flush_work(&dev->ctrl.scan_work); 305357dacad5SJay Sternberg return 0; 305457dacad5SJay Sternberg 3055eac3ef26SChristoph Hellwig out_disable: 3056eac3ef26SChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 3057eac3ef26SChristoph Hellwig nvme_dev_disable(dev, true); 3058eac3ef26SChristoph Hellwig nvme_free_host_mem(dev); 3059eac3ef26SChristoph Hellwig nvme_dev_remove_admin(dev); 3060eac3ef26SChristoph Hellwig nvme_dbbuf_dma_free(dev); 3061eac3ef26SChristoph Hellwig nvme_free_queues(dev, 0); 3062eac3ef26SChristoph Hellwig out_release_iod_mempool: 3063b6e44b4cSKeith Busch mempool_destroy(dev->iod_mempool); 30642e87570bSChristoph Hellwig out_release_prp_pools: 306557dacad5SJay Sternberg nvme_release_prp_pools(dev); 30662e87570bSChristoph Hellwig out_dev_unmap: 306757dacad5SJay Sternberg nvme_dev_unmap(dev); 30682e87570bSChristoph Hellwig out_uninit_ctrl: 30692e87570bSChristoph Hellwig nvme_uninit_ctrl(&dev->ctrl); 307057dacad5SJay Sternberg return result; 307157dacad5SJay Sternberg } 307257dacad5SJay Sternberg 3073775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 307457dacad5SJay Sternberg { 307557dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 3076c1ac9a4bSKeith Busch 3077c1ac9a4bSKeith Busch /* 3078c1ac9a4bSKeith Busch * We don't need to check the return value from waiting for the reset 3079c1ac9a4bSKeith Busch * state as pci_dev device lock is held, making it impossible to race 3080c1ac9a4bSKeith Busch * with ->remove(). 3081c1ac9a4bSKeith Busch */ 3082c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, false); 3083c1ac9a4bSKeith Busch nvme_sync_queues(&dev->ctrl); 3084775755edSChristoph Hellwig } 308557dacad5SJay Sternberg 3086775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 3087775755edSChristoph Hellwig { 3088f263fbb8SLinus Torvalds struct nvme_dev *dev = pci_get_drvdata(pdev); 3089c1ac9a4bSKeith Busch 3090c1ac9a4bSKeith Busch if (!nvme_try_sched_reset(&dev->ctrl)) 3091c1ac9a4bSKeith Busch flush_work(&dev->ctrl.reset_work); 309257dacad5SJay Sternberg } 309357dacad5SJay Sternberg 309457dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 309557dacad5SJay Sternberg { 309657dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 30974e523547SBaolin Wang 3098c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, true); 309957dacad5SJay Sternberg } 310057dacad5SJay Sternberg 3101f58944e2SKeith Busch /* 3102f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 3103f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 3104f58944e2SKeith Busch * order to proceed. 3105f58944e2SKeith Busch */ 310657dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 310757dacad5SJay Sternberg { 310857dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 310957dacad5SJay Sternberg 3110bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 311157dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 31120ff9d4e1SKeith Busch 31136db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 31140ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 31151d39e692SKeith Busch nvme_dev_disable(dev, true); 31166db28edaSKeith Busch } 31170ff9d4e1SKeith Busch 3118d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 3119d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 3120d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 3121a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 312287ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 312357dacad5SJay Sternberg nvme_dev_remove_admin(dev); 3124c11b7716SChristoph Hellwig nvme_dbbuf_dma_free(dev); 312557dacad5SJay Sternberg nvme_free_queues(dev, 0); 3126c11b7716SChristoph Hellwig mempool_destroy(dev->iod_mempool); 312757dacad5SJay Sternberg nvme_release_prp_pools(dev); 3128b00a726aSKeith Busch nvme_dev_unmap(dev); 3129726612b6SIsrael Rukshin nvme_uninit_ctrl(&dev->ctrl); 313057dacad5SJay Sternberg } 313157dacad5SJay Sternberg 313257dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 3133d916b1beSKeith Busch static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3134d916b1beSKeith Busch { 3135d916b1beSKeith Busch return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3136d916b1beSKeith Busch } 3137d916b1beSKeith Busch 3138d916b1beSKeith Busch static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3139d916b1beSKeith Busch { 3140d916b1beSKeith Busch return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3141d916b1beSKeith Busch } 3142d916b1beSKeith Busch 3143d916b1beSKeith Busch static int nvme_resume(struct device *dev) 3144d916b1beSKeith Busch { 3145d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3146d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3147d916b1beSKeith Busch 31484eaefe8cSRafael J. Wysocki if (ndev->last_ps == U32_MAX || 3149d916b1beSKeith Busch nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3150e5ad96f3SKeith Busch goto reset; 3151e5ad96f3SKeith Busch if (ctrl->hmpre && nvme_setup_host_mem(ndev)) 3152e5ad96f3SKeith Busch goto reset; 3153e5ad96f3SKeith Busch 3154d916b1beSKeith Busch return 0; 3155e5ad96f3SKeith Busch reset: 3156e5ad96f3SKeith Busch return nvme_try_sched_reset(ctrl); 3157d916b1beSKeith Busch } 3158d916b1beSKeith Busch 315957dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 316057dacad5SJay Sternberg { 316157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 316257dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 3163d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3164d916b1beSKeith Busch int ret = -EBUSY; 3165d916b1beSKeith Busch 31664eaefe8cSRafael J. Wysocki ndev->last_ps = U32_MAX; 31674eaefe8cSRafael J. Wysocki 3168d916b1beSKeith Busch /* 3169d916b1beSKeith Busch * The platform does not remove power for a kernel managed suspend so 3170d916b1beSKeith Busch * use host managed nvme power settings for lowest idle power if 3171d916b1beSKeith Busch * possible. This should have quicker resume latency than a full device 3172d916b1beSKeith Busch * shutdown. But if the firmware is involved after the suspend or the 3173d916b1beSKeith Busch * device does not support any non-default power states, shut down the 3174d916b1beSKeith Busch * device fully. 31754eaefe8cSRafael J. Wysocki * 31764eaefe8cSRafael J. Wysocki * If ASPM is not enabled for the device, shut down the device and allow 31774eaefe8cSRafael J. Wysocki * the PCI bus layer to put it into D3 in order to take the PCIe link 31784eaefe8cSRafael J. Wysocki * down, so as to allow the platform to achieve its minimum low-power 31794eaefe8cSRafael J. Wysocki * state (which may not be possible if the link is up). 3180d916b1beSKeith Busch */ 31814eaefe8cSRafael J. Wysocki if (pm_suspend_via_firmware() || !ctrl->npss || 3182cb32de1bSMario Limonciello !pcie_aspm_enabled(pdev) || 3183c1ac9a4bSKeith Busch (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3184c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 3185d916b1beSKeith Busch 3186d916b1beSKeith Busch nvme_start_freeze(ctrl); 3187d916b1beSKeith Busch nvme_wait_freeze(ctrl); 3188d916b1beSKeith Busch nvme_sync_queues(ctrl); 3189d916b1beSKeith Busch 31905d02a5c1SKeith Busch if (ctrl->state != NVME_CTRL_LIVE) 3191d916b1beSKeith Busch goto unfreeze; 3192d916b1beSKeith Busch 3193e5ad96f3SKeith Busch /* 3194e5ad96f3SKeith Busch * Host memory access may not be successful in a system suspend state, 3195e5ad96f3SKeith Busch * but the specification allows the controller to access memory in a 3196e5ad96f3SKeith Busch * non-operational power state. 3197e5ad96f3SKeith Busch */ 3198e5ad96f3SKeith Busch if (ndev->hmb) { 3199e5ad96f3SKeith Busch ret = nvme_set_host_mem(ndev, 0); 3200e5ad96f3SKeith Busch if (ret < 0) 3201e5ad96f3SKeith Busch goto unfreeze; 3202e5ad96f3SKeith Busch } 3203e5ad96f3SKeith Busch 3204d916b1beSKeith Busch ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3205d916b1beSKeith Busch if (ret < 0) 3206d916b1beSKeith Busch goto unfreeze; 3207d916b1beSKeith Busch 32087cbb5c6fSMario Limonciello /* 32097cbb5c6fSMario Limonciello * A saved state prevents pci pm from generically controlling the 32107cbb5c6fSMario Limonciello * device's power. If we're using protocol specific settings, we don't 32117cbb5c6fSMario Limonciello * want pci interfering. 32127cbb5c6fSMario Limonciello */ 32137cbb5c6fSMario Limonciello pci_save_state(pdev); 32147cbb5c6fSMario Limonciello 3215d916b1beSKeith Busch ret = nvme_set_power_state(ctrl, ctrl->npss); 3216d916b1beSKeith Busch if (ret < 0) 3217d916b1beSKeith Busch goto unfreeze; 3218d916b1beSKeith Busch 3219d916b1beSKeith Busch if (ret) { 32207cbb5c6fSMario Limonciello /* discard the saved state */ 32217cbb5c6fSMario Limonciello pci_load_saved_state(pdev, NULL); 32227cbb5c6fSMario Limonciello 3223d916b1beSKeith Busch /* 3224d916b1beSKeith Busch * Clearing npss forces a controller reset on resume. The 322505d3046fSGeert Uytterhoeven * correct value will be rediscovered then. 3226d916b1beSKeith Busch */ 3227c1ac9a4bSKeith Busch ret = nvme_disable_prepare_reset(ndev, true); 3228d916b1beSKeith Busch ctrl->npss = 0; 3229d916b1beSKeith Busch } 3230d916b1beSKeith Busch unfreeze: 3231d916b1beSKeith Busch nvme_unfreeze(ctrl); 3232d916b1beSKeith Busch return ret; 3233d916b1beSKeith Busch } 3234d916b1beSKeith Busch 3235d916b1beSKeith Busch static int nvme_simple_suspend(struct device *dev) 3236d916b1beSKeith Busch { 3237d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 32384e523547SBaolin Wang 3239c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 324057dacad5SJay Sternberg } 324157dacad5SJay Sternberg 3242d916b1beSKeith Busch static int nvme_simple_resume(struct device *dev) 324357dacad5SJay Sternberg { 324457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 324557dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 324657dacad5SJay Sternberg 3247c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 324857dacad5SJay Sternberg } 324957dacad5SJay Sternberg 325021774222SYueHaibing static const struct dev_pm_ops nvme_dev_pm_ops = { 3251d916b1beSKeith Busch .suspend = nvme_suspend, 3252d916b1beSKeith Busch .resume = nvme_resume, 3253d916b1beSKeith Busch .freeze = nvme_simple_suspend, 3254d916b1beSKeith Busch .thaw = nvme_simple_resume, 3255d916b1beSKeith Busch .poweroff = nvme_simple_suspend, 3256d916b1beSKeith Busch .restore = nvme_simple_resume, 3257d916b1beSKeith Busch }; 3258d916b1beSKeith Busch #endif /* CONFIG_PM_SLEEP */ 325957dacad5SJay Sternberg 3260a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3261a0a3408eSKeith Busch pci_channel_state_t state) 3262a0a3408eSKeith Busch { 3263a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3264a0a3408eSKeith Busch 3265a0a3408eSKeith Busch /* 3266a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 3267a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 3268a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 3269a0a3408eSKeith Busch */ 3270a0a3408eSKeith Busch switch (state) { 3271a0a3408eSKeith Busch case pci_channel_io_normal: 3272a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 3273a0a3408eSKeith Busch case pci_channel_io_frozen: 3274d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3275d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 3276a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 3277a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3278a0a3408eSKeith Busch case pci_channel_io_perm_failure: 3279d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3280d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 3281a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 3282a0a3408eSKeith Busch } 3283a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3284a0a3408eSKeith Busch } 3285a0a3408eSKeith Busch 3286a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3287a0a3408eSKeith Busch { 3288a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3289a0a3408eSKeith Busch 32901b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 3291a0a3408eSKeith Busch pci_restore_state(pdev); 3292d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 3293a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 3294a0a3408eSKeith Busch } 3295a0a3408eSKeith Busch 3296a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 3297a0a3408eSKeith Busch { 329872cd4cc2SKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 329972cd4cc2SKeith Busch 330072cd4cc2SKeith Busch flush_work(&dev->ctrl.reset_work); 3301a0a3408eSKeith Busch } 3302a0a3408eSKeith Busch 330357dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 330457dacad5SJay Sternberg .error_detected = nvme_error_detected, 330557dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 330657dacad5SJay Sternberg .resume = nvme_error_resume, 3307775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 3308775755edSChristoph Hellwig .reset_done = nvme_reset_done, 330957dacad5SJay Sternberg }; 331057dacad5SJay Sternberg 331157dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 3312972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 331308095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3314e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3315972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 331699466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3317e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3318972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 331999466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 332025e58af4SWu Zheng NVME_QUIRK_DEALLOCATE_ZEROES | 332125e58af4SWu Zheng NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3322972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3323f99cb7afSDavid Wayne Fugate .driver_data = NVME_QUIRK_STRIPE_SIZE | 3324f99cb7afSDavid Wayne Fugate NVME_QUIRK_DEALLOCATE_ZEROES, }, 332550af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 33269abd68efSJens Axboe .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 33276c6aa2f2SAkinobu Mita NVME_QUIRK_MEDIUM_PRIO_SQ | 3328ce4cc313SDavid Milburn NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3329ce4cc313SDavid Milburn NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 33306299358dSJames Dingwall { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 33316299358dSJames Dingwall .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3332540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 33337b210e4eSChristoph Hellwig .driver_data = NVME_QUIRK_IDENTIFY_CNS | 333466dd346bSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES | 333566dd346bSChristoph Hellwig NVME_QUIRK_BOGUS_NID, }, 333666dd346bSChristoph Hellwig { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ 333766dd346bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 33385bedd3afSChristoph Hellwig { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 3339c98a8793SKeith Busch .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3340c98a8793SKeith Busch NVME_QUIRK_BOGUS_NID, }, 33410302ae60SMicah Parrish { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 33425e112d3fSJulian Einwag .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 33435e112d3fSJulian Einwag NVME_QUIRK_NO_NS_DESC_LIST, }, 334454adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 334554adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 33468c97eeccSJeff Lien { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 33478c97eeccSJeff Lien .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3348015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3349015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3350d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3351d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3352d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 33537ee5c78cSGopal Tiwari .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3354abbb5f59SDmitry Monakhov NVME_QUIRK_DISABLE_WRITE_ZEROES| 33557ee5c78cSGopal Tiwari NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 33562cf7a77eSKeith Busch { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ 33572cf7a77eSKeith Busch .driver_data = NVME_QUIRK_BOGUS_NID, }, 3358c9e95c39SClaus Stovgaard { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 335973029c9bSKeith Busch .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 336073029c9bSKeith Busch NVME_QUIRK_BOGUS_NID, }, 3361d14c2731STina Hsu { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ 3362d14c2731STina Hsu .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3363d14c2731STina Hsu { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ 3364d14c2731STina Hsu .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 33656e6a6828SPascal Terjan { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 33666e6a6828SPascal Terjan .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 33676e6a6828SPascal Terjan NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3368e1c70d79SLamarque Vieira Souza { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ 3369e1c70d79SLamarque Vieira Souza .driver_data = NVME_QUIRK_BOGUS_NID, }, 337008b903b5SMisha Nasledov { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 33711629de0eSPablo Greco .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 33721629de0eSPablo Greco NVME_QUIRK_BOGUS_NID, }, 3373f03e42c6SGabriel Craciunescu { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3374f03e42c6SGabriel Craciunescu .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3375f03e42c6SGabriel Craciunescu NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 337641f38043SLeo Savernik { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ 337741f38043SLeo Savernik .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, 3378d5ceb4d1SBean Huo { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ 3379d5ceb4d1SBean Huo .driver_data = NVME_QUIRK_BOGUS_NID, }, 33805611ec2bSKai-Heng Feng { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 33815611ec2bSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3382c4f01a77SKeith Busch { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ 3383c4f01a77SKeith Busch .driver_data = NVME_QUIRK_BOGUS_NID, }, 338402ca079cSKai-Heng Feng { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 338502ca079cSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 338689919929SChaitanya Kulkarni { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 338789919929SChaitanya Kulkarni .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 338843047e08Srasheed.hsueh { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ 338943047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 339043047e08Srasheed.hsueh { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ 339143047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 339243047e08Srasheed.hsueh { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ 339343047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 339443047e08Srasheed.hsueh { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ 339543047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3396dc22c1c0SZoltán Böszörményi { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3397dc22c1c0SZoltán Böszörményi .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3398538e4a8cSThorsten Leemhuis { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3399538e4a8cSThorsten Leemhuis .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3400ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ 3401ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3402ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ 3403ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3404ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ 3405ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3406ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ 3407ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3408ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ 3409ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 34108d6e38f6STiago Dias Ferreira { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ 34118d6e38f6STiago Dias Ferreira .driver_data = NVME_QUIRK_BOGUS_NID, }, 341270ce3455SChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ 341370ce3455SChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 3414a98a945bSChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ 3415a98a945bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 3416a98a945bSChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ 3417a98a945bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 34183765fad5SStefan Reiter { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ 34193765fad5SStefan Reiter .driver_data = NVME_QUIRK_BOGUS_NID, }, 3420f37527a0SDennis P. Kliem { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ 3421f37527a0SDennis P. Kliem .driver_data = NVME_QUIRK_BOGUS_NID, }, 3422d5d3c100SXi Ruoyao { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ 3423d5d3c100SXi Ruoyao .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 34246b961bceSNing Wang { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ 34256b961bceSNing Wang .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3426d6c52fa3STobias Gruetzmacher { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ 3427d6c52fa3STobias Gruetzmacher .driver_data = NVME_QUIRK_BOGUS_NID, }, 3428200dccd0SShyamin Ayesh { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ 3429200dccd0SShyamin Ayesh .driver_data = NVME_QUIRK_BOGUS_NID, }, 343080b26240SAbhijit { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ 343180b26240SAbhijit .driver_data = NVME_QUIRK_BOGUS_NID, }, 34324bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 34334bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34344bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 34354bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34364bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 34374bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34384bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 34394bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34404bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 34414bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 34424bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 34434bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 344498f7b86aSAndy Shevchenko { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 344598f7b86aSAndy Shevchenko .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3446124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 344766341331SBenjamin Herrenschmidt { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 344866341331SBenjamin Herrenschmidt .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3449d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_128_BYTES_SQES | 3450a2941f6aSKeith Busch NVME_QUIRK_SHARED_TAGS | 3451453116a4SHector Martin NVME_QUIRK_SKIP_CID_GEN | 3452453116a4SHector Martin NVME_QUIRK_IDENTIFY_CNS }, 34530b85f59dSAndy Shevchenko { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 345457dacad5SJay Sternberg { 0, } 345557dacad5SJay Sternberg }; 345657dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 345757dacad5SJay Sternberg 345857dacad5SJay Sternberg static struct pci_driver nvme_driver = { 345957dacad5SJay Sternberg .name = "nvme", 346057dacad5SJay Sternberg .id_table = nvme_id_table, 346157dacad5SJay Sternberg .probe = nvme_probe, 346257dacad5SJay Sternberg .remove = nvme_remove, 346357dacad5SJay Sternberg .shutdown = nvme_shutdown, 346457dacad5SJay Sternberg .driver = { 3465eac3ef26SChristoph Hellwig .probe_type = PROBE_PREFER_ASYNCHRONOUS, 3466eac3ef26SChristoph Hellwig #ifdef CONFIG_PM_SLEEP 346757dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 3468d916b1beSKeith Busch #endif 3469eac3ef26SChristoph Hellwig }, 347074d986abSAlexander Duyck .sriov_configure = pci_sriov_configure_simple, 347157dacad5SJay Sternberg .err_handler = &nvme_err_handler, 347257dacad5SJay Sternberg }; 347357dacad5SJay Sternberg 347457dacad5SJay Sternberg static int __init nvme_init(void) 347557dacad5SJay Sternberg { 347681101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 347781101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 347881101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3479612b7286SMing Lei BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 348001df742dSKeith Busch BUILD_BUG_ON(NVME_MAX_SEGS > SGES_PER_PAGE); 34817846c1b5SKeith Busch BUILD_BUG_ON(sizeof(struct scatterlist) * NVME_MAX_SEGS > PAGE_SIZE); 34827846c1b5SKeith Busch BUILD_BUG_ON(nvme_pci_npages_prp() > NVME_MAX_NR_ALLOCATIONS); 348317c33167SKeith Busch 34849a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 348557dacad5SJay Sternberg } 348657dacad5SJay Sternberg 348757dacad5SJay Sternberg static void __exit nvme_exit(void) 348857dacad5SJay Sternberg { 348957dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 349003e0f3a6SMing Lei flush_workqueue(nvme_wq); 349157dacad5SJay Sternberg } 349257dacad5SJay Sternberg 349357dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 349457dacad5SJay Sternberg MODULE_LICENSE("GPL"); 349557dacad5SJay Sternberg MODULE_VERSION("1.0"); 349657dacad5SJay Sternberg module_init(nvme_init); 349757dacad5SJay Sternberg module_exit(nvme_exit); 3498