15f37396dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * NVM Express device driver 457dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 557dacad5SJay Sternberg */ 657dacad5SJay Sternberg 7df4f9bc4SDavid E. Box #include <linux/acpi.h> 8a0a3408eSKeith Busch #include <linux/aer.h> 918119775SKeith Busch #include <linux/async.h> 1057dacad5SJay Sternberg #include <linux/blkdev.h> 1157dacad5SJay Sternberg #include <linux/blk-mq.h> 12dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 13fe45e630SChristoph Hellwig #include <linux/blk-integrity.h> 14ff5350a8SAndy Lutomirski #include <linux/dmi.h> 1557dacad5SJay Sternberg #include <linux/init.h> 1657dacad5SJay Sternberg #include <linux/interrupt.h> 1757dacad5SJay Sternberg #include <linux/io.h> 18dc90f084SChristoph Hellwig #include <linux/memremap.h> 1957dacad5SJay Sternberg #include <linux/mm.h> 2057dacad5SJay Sternberg #include <linux/module.h> 2177bf25eaSKeith Busch #include <linux/mutex.h> 22d0877473SKeith Busch #include <linux/once.h> 2357dacad5SJay Sternberg #include <linux/pci.h> 24d916b1beSKeith Busch #include <linux/suspend.h> 2557dacad5SJay Sternberg #include <linux/t10-pi.h> 2657dacad5SJay Sternberg #include <linux/types.h> 279cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 2820d3bb92SKlaus Jensen #include <linux/io-64-nonatomic-hi-lo.h> 29a98e58e5SScott Bauer #include <linux/sed-opal.h> 300f238ff5SLogan Gunthorpe #include <linux/pci-p2pdma.h> 3157dacad5SJay Sternberg 32604c01d5Syupeng #include "trace.h" 3357dacad5SJay Sternberg #include "nvme.h" 3457dacad5SJay Sternberg 35c1e0cc7eSBenjamin Herrenschmidt #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 368a1d09a6SBenjamin Herrenschmidt #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 3757dacad5SJay Sternberg 38a7a7cbe3SChaitanya Kulkarni #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 39adf68f21SChristoph Hellwig 40943e942eSJens Axboe /* 41943e942eSJens Axboe * These can be higher, but we need to ensure that any command doesn't 42943e942eSJens Axboe * require an sg allocation that needs more than a page of data. 43943e942eSJens Axboe */ 44943e942eSJens Axboe #define NVME_MAX_KB_SZ 4096 45943e942eSJens Axboe #define NVME_MAX_SEGS 127 46943e942eSJens Axboe 4757dacad5SJay Sternberg static int use_threaded_interrupts; 482e21e445SXin Hao module_param(use_threaded_interrupts, int, 0444); 4957dacad5SJay Sternberg 5057dacad5SJay Sternberg static bool use_cmb_sqes = true; 5169f4eb9fSKeith Busch module_param(use_cmb_sqes, bool, 0444); 5257dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5357dacad5SJay Sternberg 5487ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5587ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5687ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 5787ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 5857dacad5SJay Sternberg 59a7a7cbe3SChaitanya Kulkarni static unsigned int sgl_threshold = SZ_32K; 60a7a7cbe3SChaitanya Kulkarni module_param(sgl_threshold, uint, 0644); 61a7a7cbe3SChaitanya Kulkarni MODULE_PARM_DESC(sgl_threshold, 62a7a7cbe3SChaitanya Kulkarni "Use SGLs when average request segment size is larger or equal to " 63a7a7cbe3SChaitanya Kulkarni "this size. Use 0 to disable SGLs."); 64a7a7cbe3SChaitanya Kulkarni 6527453b45SSagi Grimberg #define NVME_PCI_MIN_QUEUE_SIZE 2 6627453b45SSagi Grimberg #define NVME_PCI_MAX_QUEUE_SIZE 4095 67b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 68b27c1e68Sweiping zhang static const struct kernel_param_ops io_queue_depth_ops = { 69b27c1e68Sweiping zhang .set = io_queue_depth_set, 7061f3b896SChaitanya Kulkarni .get = param_get_uint, 71b27c1e68Sweiping zhang }; 72b27c1e68Sweiping zhang 7361f3b896SChaitanya Kulkarni static unsigned int io_queue_depth = 1024; 74b27c1e68Sweiping zhang module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 7527453b45SSagi Grimberg MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); 76b27c1e68Sweiping zhang 779c9e76d5SWeiping Zhang static int io_queue_count_set(const char *val, const struct kernel_param *kp) 789c9e76d5SWeiping Zhang { 799c9e76d5SWeiping Zhang unsigned int n; 809c9e76d5SWeiping Zhang int ret; 819c9e76d5SWeiping Zhang 829c9e76d5SWeiping Zhang ret = kstrtouint(val, 10, &n); 839c9e76d5SWeiping Zhang if (ret != 0 || n > num_possible_cpus()) 849c9e76d5SWeiping Zhang return -EINVAL; 859c9e76d5SWeiping Zhang return param_set_uint(val, kp); 869c9e76d5SWeiping Zhang } 879c9e76d5SWeiping Zhang 889c9e76d5SWeiping Zhang static const struct kernel_param_ops io_queue_count_ops = { 899c9e76d5SWeiping Zhang .set = io_queue_count_set, 909c9e76d5SWeiping Zhang .get = param_get_uint, 919c9e76d5SWeiping Zhang }; 929c9e76d5SWeiping Zhang 933f68baf7SKeith Busch static unsigned int write_queues; 949c9e76d5SWeiping Zhang module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 953b6592f7SJens Axboe MODULE_PARM_DESC(write_queues, 963b6592f7SJens Axboe "Number of queues to use for writes. If not set, reads and writes " 973b6592f7SJens Axboe "will share a queue set."); 983b6592f7SJens Axboe 993f68baf7SKeith Busch static unsigned int poll_queues; 1009c9e76d5SWeiping Zhang module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 1014b04cc6aSJens Axboe MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 1024b04cc6aSJens Axboe 103df4f9bc4SDavid E. Box static bool noacpi; 104df4f9bc4SDavid E. Box module_param(noacpi, bool, 0444); 105df4f9bc4SDavid E. Box MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 106df4f9bc4SDavid E. Box 1071c63dc66SChristoph Hellwig struct nvme_dev; 1081c63dc66SChristoph Hellwig struct nvme_queue; 10957dacad5SJay Sternberg 110a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 1118fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); 11257dacad5SJay Sternberg 11357dacad5SJay Sternberg /* 1141c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 1151c63dc66SChristoph Hellwig */ 1161c63dc66SChristoph Hellwig struct nvme_dev { 117147b27e4SSagi Grimberg struct nvme_queue *queues; 1181c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1191c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1201c63dc66SChristoph Hellwig u32 __iomem *dbs; 1211c63dc66SChristoph Hellwig struct device *dev; 1221c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1231c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1241c63dc66SChristoph Hellwig unsigned online_queues; 1251c63dc66SChristoph Hellwig unsigned max_qid; 126e20ba6e1SChristoph Hellwig unsigned io_queues[HCTX_MAX_TYPES]; 12722b55601SKeith Busch unsigned int num_vecs; 1287442ddceSJohn Garry u32 q_depth; 129c1e0cc7eSBenjamin Herrenschmidt int io_sqes; 1301c63dc66SChristoph Hellwig u32 db_stride; 1311c63dc66SChristoph Hellwig void __iomem *bar; 13297f6ef64SXu Yu unsigned long bar_mapped_size; 1335c8809e6SChristoph Hellwig struct work_struct remove_work; 13477bf25eaSKeith Busch struct mutex shutdown_lock; 1351c63dc66SChristoph Hellwig bool subsystem; 1361c63dc66SChristoph Hellwig u64 cmb_size; 1370f238ff5SLogan Gunthorpe bool cmb_use_sqes; 1381c63dc66SChristoph Hellwig u32 cmbsz; 139202021c1SStephen Bates u32 cmbloc; 1401c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 141d916b1beSKeith Busch u32 last_ps; 142a5df5e79SKeith Busch bool hmb; 14387ad72a5SChristoph Hellwig 144943e942eSJens Axboe mempool_t *iod_mempool; 145943e942eSJens Axboe 14687ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 147f9f38e33SHelen Koike u32 *dbbuf_dbs; 148f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 149f9f38e33SHelen Koike u32 *dbbuf_eis; 150f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 15187ad72a5SChristoph Hellwig 15287ad72a5SChristoph Hellwig /* host memory buffer support: */ 15387ad72a5SChristoph Hellwig u64 host_mem_size; 15487ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 1554033f35dSChristoph Hellwig dma_addr_t host_mem_descs_dma; 15687ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 15787ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 1582a5bcfddSWeiping Zhang unsigned int nr_allocated_queues; 1592a5bcfddSWeiping Zhang unsigned int nr_write_queues; 1602a5bcfddSWeiping Zhang unsigned int nr_poll_queues; 1610521905eSKeith Busch 1620521905eSKeith Busch bool attrs_added; 16357dacad5SJay Sternberg }; 16457dacad5SJay Sternberg 165b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 166b27c1e68Sweiping zhang { 16727453b45SSagi Grimberg return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, 16827453b45SSagi Grimberg NVME_PCI_MAX_QUEUE_SIZE); 169b27c1e68Sweiping zhang } 170b27c1e68Sweiping zhang 171f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 172f9f38e33SHelen Koike { 173f9f38e33SHelen Koike return qid * 2 * stride; 174f9f38e33SHelen Koike } 175f9f38e33SHelen Koike 176f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 177f9f38e33SHelen Koike { 178f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 179f9f38e33SHelen Koike } 180f9f38e33SHelen Koike 1811c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1821c63dc66SChristoph Hellwig { 1831c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1841c63dc66SChristoph Hellwig } 1851c63dc66SChristoph Hellwig 18657dacad5SJay Sternberg /* 18757dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 18857dacad5SJay Sternberg * commands and one for I/O commands). 18957dacad5SJay Sternberg */ 19057dacad5SJay Sternberg struct nvme_queue { 19157dacad5SJay Sternberg struct nvme_dev *dev; 1921ab0cd69SJens Axboe spinlock_t sq_lock; 193c1e0cc7eSBenjamin Herrenschmidt void *sq_cmds; 1943a7afd8eSChristoph Hellwig /* only used for poll queues: */ 1953a7afd8eSChristoph Hellwig spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 19674943d45SKeith Busch struct nvme_completion *cqes; 19757dacad5SJay Sternberg dma_addr_t sq_dma_addr; 19857dacad5SJay Sternberg dma_addr_t cq_dma_addr; 19957dacad5SJay Sternberg u32 __iomem *q_db; 2007442ddceSJohn Garry u32 q_depth; 2017c349ddeSKeith Busch u16 cq_vector; 20257dacad5SJay Sternberg u16 sq_tail; 20338210800SKeith Busch u16 last_sq_tail; 20457dacad5SJay Sternberg u16 cq_head; 20557dacad5SJay Sternberg u16 qid; 20657dacad5SJay Sternberg u8 cq_phase; 207c1e0cc7eSBenjamin Herrenschmidt u8 sqes; 2084e224106SChristoph Hellwig unsigned long flags; 2094e224106SChristoph Hellwig #define NVMEQ_ENABLED 0 21063223078SChristoph Hellwig #define NVMEQ_SQ_CMB 1 211d1ed6aa1SChristoph Hellwig #define NVMEQ_DELETE_ERROR 2 2127c349ddeSKeith Busch #define NVMEQ_POLLED 3 213f9f38e33SHelen Koike u32 *dbbuf_sq_db; 214f9f38e33SHelen Koike u32 *dbbuf_cq_db; 215f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 216f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 217d1ed6aa1SChristoph Hellwig struct completion delete_done; 21857dacad5SJay Sternberg }; 21957dacad5SJay Sternberg 22057dacad5SJay Sternberg /* 2219b048119SChristoph Hellwig * The nvme_iod describes the data in an I/O. 2229b048119SChristoph Hellwig * 2239b048119SChristoph Hellwig * The sg pointer contains the list of PRP/SGL chunk allocations in addition 2249b048119SChristoph Hellwig * to the actual struct scatterlist. 22571bd150cSChristoph Hellwig */ 22671bd150cSChristoph Hellwig struct nvme_iod { 227d49187e9SChristoph Hellwig struct nvme_request req; 228af7fae85SKeith Busch struct nvme_command cmd; 229a7a7cbe3SChaitanya Kulkarni bool use_sgl; 23052da4f3fSKeith Busch bool aborted; 231c372cdd1SKeith Busch s8 nr_allocations; /* PRP list pool allocations. 0 means small 232c372cdd1SKeith Busch pool in use */ 233dff824b2SChristoph Hellwig unsigned int dma_len; /* length of single DMA segment mapping */ 234c4c22c52SKeith Busch dma_addr_t first_dma; 235783b94bdSChristoph Hellwig dma_addr_t meta_dma; 23691fb2b60SLogan Gunthorpe struct sg_table sgt; 23757dacad5SJay Sternberg }; 23857dacad5SJay Sternberg 2392a5bcfddSWeiping Zhang static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 2403b6592f7SJens Axboe { 2412a5bcfddSWeiping Zhang return dev->nr_allocated_queues * 8 * dev->db_stride; 242f9f38e33SHelen Koike } 243f9f38e33SHelen Koike 244f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 245f9f38e33SHelen Koike { 2462a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 247f9f38e33SHelen Koike 24858847f12SKeith Busch if (dev->dbbuf_dbs) { 24958847f12SKeith Busch /* 25058847f12SKeith Busch * Clear the dbbuf memory so the driver doesn't observe stale 25158847f12SKeith Busch * values from the previous instantiation. 25258847f12SKeith Busch */ 25358847f12SKeith Busch memset(dev->dbbuf_dbs, 0, mem_size); 25458847f12SKeith Busch memset(dev->dbbuf_eis, 0, mem_size); 255f9f38e33SHelen Koike return 0; 25658847f12SKeith Busch } 257f9f38e33SHelen Koike 258f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 259f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 260f9f38e33SHelen Koike GFP_KERNEL); 261f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 262f9f38e33SHelen Koike return -ENOMEM; 263f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 264f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 265f9f38e33SHelen Koike GFP_KERNEL); 266f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 267f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 268f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 269f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 270f9f38e33SHelen Koike return -ENOMEM; 271f9f38e33SHelen Koike } 272f9f38e33SHelen Koike 273f9f38e33SHelen Koike return 0; 274f9f38e33SHelen Koike } 275f9f38e33SHelen Koike 276f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 277f9f38e33SHelen Koike { 2782a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 279f9f38e33SHelen Koike 280f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 281f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 282f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 283f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 284f9f38e33SHelen Koike } 285f9f38e33SHelen Koike if (dev->dbbuf_eis) { 286f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 287f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 288f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 289f9f38e33SHelen Koike } 290f9f38e33SHelen Koike } 291f9f38e33SHelen Koike 292f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 293f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 294f9f38e33SHelen Koike { 295f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 296f9f38e33SHelen Koike return; 297f9f38e33SHelen Koike 298f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 299f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 300f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 301f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 302f9f38e33SHelen Koike } 303f9f38e33SHelen Koike 3040f0d2c87SMinwoo Im static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 3050f0d2c87SMinwoo Im { 3060f0d2c87SMinwoo Im if (!nvmeq->qid) 3070f0d2c87SMinwoo Im return; 3080f0d2c87SMinwoo Im 3090f0d2c87SMinwoo Im nvmeq->dbbuf_sq_db = NULL; 3100f0d2c87SMinwoo Im nvmeq->dbbuf_cq_db = NULL; 3110f0d2c87SMinwoo Im nvmeq->dbbuf_sq_ei = NULL; 3120f0d2c87SMinwoo Im nvmeq->dbbuf_cq_ei = NULL; 3130f0d2c87SMinwoo Im } 3140f0d2c87SMinwoo Im 315f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 316f9f38e33SHelen Koike { 317f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 3180f0d2c87SMinwoo Im unsigned int i; 319f9f38e33SHelen Koike 320f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 321f9f38e33SHelen Koike return; 322f9f38e33SHelen Koike 323f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 324f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 325f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 326f9f38e33SHelen Koike 327f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 3289bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 329f9f38e33SHelen Koike /* Free memory and continue on */ 330f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 3310f0d2c87SMinwoo Im 3320f0d2c87SMinwoo Im for (i = 1; i <= dev->online_queues; i++) 3330f0d2c87SMinwoo Im nvme_dbbuf_free(&dev->queues[i]); 334f9f38e33SHelen Koike } 335f9f38e33SHelen Koike } 336f9f38e33SHelen Koike 337f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 338f9f38e33SHelen Koike { 339f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 340f9f38e33SHelen Koike } 341f9f38e33SHelen Koike 342f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 343f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 344f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 345f9f38e33SHelen Koike { 346f9f38e33SHelen Koike if (dbbuf_db) { 347f9f38e33SHelen Koike u16 old_value; 348f9f38e33SHelen Koike 349f9f38e33SHelen Koike /* 350f9f38e33SHelen Koike * Ensure that the queue is written before updating 351f9f38e33SHelen Koike * the doorbell in memory 352f9f38e33SHelen Koike */ 353f9f38e33SHelen Koike wmb(); 354f9f38e33SHelen Koike 355f9f38e33SHelen Koike old_value = *dbbuf_db; 356f9f38e33SHelen Koike *dbbuf_db = value; 357f9f38e33SHelen Koike 358f1ed3df2SMichal Wnukowski /* 359f1ed3df2SMichal Wnukowski * Ensure that the doorbell is updated before reading the event 360f1ed3df2SMichal Wnukowski * index from memory. The controller needs to provide similar 361f1ed3df2SMichal Wnukowski * ordering to ensure the envent index is updated before reading 362f1ed3df2SMichal Wnukowski * the doorbell. 363f1ed3df2SMichal Wnukowski */ 364f1ed3df2SMichal Wnukowski mb(); 365f1ed3df2SMichal Wnukowski 366f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 367f9f38e33SHelen Koike return false; 368f9f38e33SHelen Koike } 369f9f38e33SHelen Koike 370f9f38e33SHelen Koike return true; 37157dacad5SJay Sternberg } 37257dacad5SJay Sternberg 37357dacad5SJay Sternberg /* 37457dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 37557dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 37657dacad5SJay Sternberg * the I/O. 37757dacad5SJay Sternberg */ 378b13c6393SChaitanya Kulkarni static int nvme_pci_npages_prp(void) 37957dacad5SJay Sternberg { 380b13c6393SChaitanya Kulkarni unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, 3816c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 38257dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 38357dacad5SJay Sternberg } 38457dacad5SJay Sternberg 385a7a7cbe3SChaitanya Kulkarni /* 386a7a7cbe3SChaitanya Kulkarni * Calculates the number of pages needed for the SGL segments. For example a 4k 387a7a7cbe3SChaitanya Kulkarni * page can accommodate 256 SGL descriptors. 388a7a7cbe3SChaitanya Kulkarni */ 389b13c6393SChaitanya Kulkarni static int nvme_pci_npages_sgl(void) 390f4800d6dSChristoph Hellwig { 391b13c6393SChaitanya Kulkarni return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), 392b13c6393SChaitanya Kulkarni PAGE_SIZE); 393f4800d6dSChristoph Hellwig } 394f4800d6dSChristoph Hellwig 395b13c6393SChaitanya Kulkarni static size_t nvme_pci_iod_alloc_size(void) 39657dacad5SJay Sternberg { 397b13c6393SChaitanya Kulkarni size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); 398a7a7cbe3SChaitanya Kulkarni 399b13c6393SChaitanya Kulkarni return sizeof(__le64 *) * npages + 400b13c6393SChaitanya Kulkarni sizeof(struct scatterlist) * NVME_MAX_SEGS; 401a7a7cbe3SChaitanya Kulkarni } 402a7a7cbe3SChaitanya Kulkarni 40357dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 40457dacad5SJay Sternberg unsigned int hctx_idx) 40557dacad5SJay Sternberg { 40657dacad5SJay Sternberg struct nvme_dev *dev = data; 407147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 40857dacad5SJay Sternberg 40957dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 41057dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 41157dacad5SJay Sternberg 41257dacad5SJay Sternberg hctx->driver_data = nvmeq; 41357dacad5SJay Sternberg return 0; 41457dacad5SJay Sternberg } 41557dacad5SJay Sternberg 41657dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 41757dacad5SJay Sternberg unsigned int hctx_idx) 41857dacad5SJay Sternberg { 41957dacad5SJay Sternberg struct nvme_dev *dev = data; 420147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 42157dacad5SJay Sternberg 42257dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 42357dacad5SJay Sternberg hctx->driver_data = nvmeq; 42457dacad5SJay Sternberg return 0; 42557dacad5SJay Sternberg } 42657dacad5SJay Sternberg 427e559398fSChristoph Hellwig static int nvme_pci_init_request(struct blk_mq_tag_set *set, 428e559398fSChristoph Hellwig struct request *req, unsigned int hctx_idx, 429e559398fSChristoph Hellwig unsigned int numa_node) 43057dacad5SJay Sternberg { 431d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 432f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 43359e29ce6SSagi Grimberg 43459e29ce6SSagi Grimberg nvme_req(req)->ctrl = &dev->ctrl; 435f4b9e6c9SKeith Busch nvme_req(req)->cmd = &iod->cmd; 43657dacad5SJay Sternberg return 0; 43757dacad5SJay Sternberg } 43857dacad5SJay Sternberg 4393b6592f7SJens Axboe static int queue_irq_offset(struct nvme_dev *dev) 4403b6592f7SJens Axboe { 4413b6592f7SJens Axboe /* if we have more than 1 vec, admin queue offsets us by 1 */ 4423b6592f7SJens Axboe if (dev->num_vecs > 1) 4433b6592f7SJens Axboe return 1; 4443b6592f7SJens Axboe 4453b6592f7SJens Axboe return 0; 4463b6592f7SJens Axboe } 4473b6592f7SJens Axboe 448a4e1d0b7SBart Van Assche static void nvme_pci_map_queues(struct blk_mq_tag_set *set) 449dca51e78SChristoph Hellwig { 450dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 4513b6592f7SJens Axboe int i, qoff, offset; 452dca51e78SChristoph Hellwig 4533b6592f7SJens Axboe offset = queue_irq_offset(dev); 4543b6592f7SJens Axboe for (i = 0, qoff = 0; i < set->nr_maps; i++) { 4553b6592f7SJens Axboe struct blk_mq_queue_map *map = &set->map[i]; 4563b6592f7SJens Axboe 4573b6592f7SJens Axboe map->nr_queues = dev->io_queues[i]; 4583b6592f7SJens Axboe if (!map->nr_queues) { 459e20ba6e1SChristoph Hellwig BUG_ON(i == HCTX_TYPE_DEFAULT); 4607e849dd9SChristoph Hellwig continue; 4613b6592f7SJens Axboe } 4623b6592f7SJens Axboe 4634b04cc6aSJens Axboe /* 4644b04cc6aSJens Axboe * The poll queue(s) doesn't have an IRQ (and hence IRQ 4654b04cc6aSJens Axboe * affinity), so use the regular blk-mq cpu mapping 4664b04cc6aSJens Axboe */ 4673b6592f7SJens Axboe map->queue_offset = qoff; 468cb9e0e50SKeith Busch if (i != HCTX_TYPE_POLL && offset) 4693b6592f7SJens Axboe blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 4704b04cc6aSJens Axboe else 4714b04cc6aSJens Axboe blk_mq_map_queues(map); 4723b6592f7SJens Axboe qoff += map->nr_queues; 4733b6592f7SJens Axboe offset += map->nr_queues; 4743b6592f7SJens Axboe } 475dca51e78SChristoph Hellwig } 476dca51e78SChristoph Hellwig 47738210800SKeith Busch /* 47838210800SKeith Busch * Write sq tail if we are asked to, or if the next command would wrap. 47938210800SKeith Busch */ 48038210800SKeith Busch static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 48104f3eafdSJens Axboe { 48238210800SKeith Busch if (!write_sq) { 48338210800SKeith Busch u16 next_tail = nvmeq->sq_tail + 1; 48438210800SKeith Busch 48538210800SKeith Busch if (next_tail == nvmeq->q_depth) 48638210800SKeith Busch next_tail = 0; 48738210800SKeith Busch if (next_tail != nvmeq->last_sq_tail) 48838210800SKeith Busch return; 48938210800SKeith Busch } 49038210800SKeith Busch 49104f3eafdSJens Axboe if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 49204f3eafdSJens Axboe nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 49304f3eafdSJens Axboe writel(nvmeq->sq_tail, nvmeq->q_db); 49438210800SKeith Busch nvmeq->last_sq_tail = nvmeq->sq_tail; 49504f3eafdSJens Axboe } 49604f3eafdSJens Axboe 4973233b94cSJens Axboe static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq, 4983233b94cSJens Axboe struct nvme_command *cmd) 49957dacad5SJay Sternberg { 500c1e0cc7eSBenjamin Herrenschmidt memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 5013233b94cSJens Axboe absolute_pointer(cmd), sizeof(*cmd)); 50290ea5ca4SChristoph Hellwig if (++nvmeq->sq_tail == nvmeq->q_depth) 50390ea5ca4SChristoph Hellwig nvmeq->sq_tail = 0; 50404f3eafdSJens Axboe } 50504f3eafdSJens Axboe 50604f3eafdSJens Axboe static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 50704f3eafdSJens Axboe { 50804f3eafdSJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 50904f3eafdSJens Axboe 51004f3eafdSJens Axboe spin_lock(&nvmeq->sq_lock); 51138210800SKeith Busch if (nvmeq->sq_tail != nvmeq->last_sq_tail) 51238210800SKeith Busch nvme_write_sq_db(nvmeq, true); 51390ea5ca4SChristoph Hellwig spin_unlock(&nvmeq->sq_lock); 51457dacad5SJay Sternberg } 51557dacad5SJay Sternberg 516a7a7cbe3SChaitanya Kulkarni static void **nvme_pci_iod_list(struct request *req) 51757dacad5SJay Sternberg { 518f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 51991fb2b60SLogan Gunthorpe return (void **)(iod->sgt.sgl + blk_rq_nr_phys_segments(req)); 52057dacad5SJay Sternberg } 52157dacad5SJay Sternberg 522955b1b5aSMinwoo Im static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 523955b1b5aSMinwoo Im { 524a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 52520469a37SKeith Busch int nseg = blk_rq_nr_phys_segments(req); 526955b1b5aSMinwoo Im unsigned int avg_seg_size; 527955b1b5aSMinwoo Im 52820469a37SKeith Busch avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 529955b1b5aSMinwoo Im 530253a0b76SChaitanya Kulkarni if (!nvme_ctrl_sgl_supported(&dev->ctrl)) 531955b1b5aSMinwoo Im return false; 532a53232cbSKeith Busch if (!nvmeq->qid) 533955b1b5aSMinwoo Im return false; 534955b1b5aSMinwoo Im if (!sgl_threshold || avg_seg_size < sgl_threshold) 535955b1b5aSMinwoo Im return false; 536955b1b5aSMinwoo Im return true; 537955b1b5aSMinwoo Im } 538955b1b5aSMinwoo Im 5399275c206SChristoph Hellwig static void nvme_free_prps(struct nvme_dev *dev, struct request *req) 54057dacad5SJay Sternberg { 5416c3c05b0SChaitanya Kulkarni const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 5429275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5439275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 54457dacad5SJay Sternberg int i; 54557dacad5SJay Sternberg 546c372cdd1SKeith Busch for (i = 0; i < iod->nr_allocations; i++) { 5479275c206SChristoph Hellwig __le64 *prp_list = nvme_pci_iod_list(req)[i]; 5489275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 5499275c206SChristoph Hellwig 5509275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); 5519275c206SChristoph Hellwig dma_addr = next_dma_addr; 552dff824b2SChristoph Hellwig } 5539275c206SChristoph Hellwig } 5549275c206SChristoph Hellwig 5559275c206SChristoph Hellwig static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) 5569275c206SChristoph Hellwig { 5579275c206SChristoph Hellwig const int last_sg = SGES_PER_PAGE - 1; 5589275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5599275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 5609275c206SChristoph Hellwig int i; 5619275c206SChristoph Hellwig 562c372cdd1SKeith Busch for (i = 0; i < iod->nr_allocations; i++) { 5639275c206SChristoph Hellwig struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; 5649275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); 5659275c206SChristoph Hellwig 5669275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); 5679275c206SChristoph Hellwig dma_addr = next_dma_addr; 5689275c206SChristoph Hellwig } 5699275c206SChristoph Hellwig } 5709275c206SChristoph Hellwig 5719275c206SChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 5729275c206SChristoph Hellwig { 5739275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5747fe07d14SChristoph Hellwig 5759275c206SChristoph Hellwig if (iod->dma_len) { 5769275c206SChristoph Hellwig dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, 5779275c206SChristoph Hellwig rq_dma_dir(req)); 5789275c206SChristoph Hellwig return; 5799275c206SChristoph Hellwig } 5809275c206SChristoph Hellwig 58191fb2b60SLogan Gunthorpe WARN_ON_ONCE(!iod->sgt.nents); 5829275c206SChristoph Hellwig 58391fb2b60SLogan Gunthorpe dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 58491fb2b60SLogan Gunthorpe 585c372cdd1SKeith Busch if (iod->nr_allocations == 0) 586a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 5879275c206SChristoph Hellwig iod->first_dma); 5889275c206SChristoph Hellwig else if (iod->use_sgl) 5899275c206SChristoph Hellwig nvme_free_sgls(dev, req); 5909275c206SChristoph Hellwig else 5919275c206SChristoph Hellwig nvme_free_prps(dev, req); 59291fb2b60SLogan Gunthorpe mempool_free(iod->sgt.sgl, dev->iod_mempool); 59357dacad5SJay Sternberg } 59457dacad5SJay Sternberg 595d0877473SKeith Busch static void nvme_print_sgl(struct scatterlist *sgl, int nents) 596d0877473SKeith Busch { 597d0877473SKeith Busch int i; 598d0877473SKeith Busch struct scatterlist *sg; 599d0877473SKeith Busch 600d0877473SKeith Busch for_each_sg(sgl, sg, nents, i) { 601d0877473SKeith Busch dma_addr_t phys = sg_phys(sg); 602d0877473SKeith Busch pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 603d0877473SKeith Busch "dma_address:%pad dma_length:%d\n", 604d0877473SKeith Busch i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 605d0877473SKeith Busch sg_dma_len(sg)); 606d0877473SKeith Busch } 607d0877473SKeith Busch } 608d0877473SKeith Busch 609a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 610a7a7cbe3SChaitanya Kulkarni struct request *req, struct nvme_rw_command *cmnd) 61157dacad5SJay Sternberg { 612f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 61357dacad5SJay Sternberg struct dma_pool *pool; 614b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 61591fb2b60SLogan Gunthorpe struct scatterlist *sg = iod->sgt.sgl; 61657dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 61757dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 6186c3c05b0SChaitanya Kulkarni int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 61957dacad5SJay Sternberg __le64 *prp_list; 620a7a7cbe3SChaitanya Kulkarni void **list = nvme_pci_iod_list(req); 62157dacad5SJay Sternberg dma_addr_t prp_dma; 62257dacad5SJay Sternberg int nprps, i; 62357dacad5SJay Sternberg 6246c3c05b0SChaitanya Kulkarni length -= (NVME_CTRL_PAGE_SIZE - offset); 6255228b328SJan H. Schönherr if (length <= 0) { 6265228b328SJan H. Schönherr iod->first_dma = 0; 627a7a7cbe3SChaitanya Kulkarni goto done; 6285228b328SJan H. Schönherr } 62957dacad5SJay Sternberg 6306c3c05b0SChaitanya Kulkarni dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 63157dacad5SJay Sternberg if (dma_len) { 6326c3c05b0SChaitanya Kulkarni dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 63357dacad5SJay Sternberg } else { 63457dacad5SJay Sternberg sg = sg_next(sg); 63557dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 63657dacad5SJay Sternberg dma_len = sg_dma_len(sg); 63757dacad5SJay Sternberg } 63857dacad5SJay Sternberg 6396c3c05b0SChaitanya Kulkarni if (length <= NVME_CTRL_PAGE_SIZE) { 64057dacad5SJay Sternberg iod->first_dma = dma_addr; 641a7a7cbe3SChaitanya Kulkarni goto done; 64257dacad5SJay Sternberg } 64357dacad5SJay Sternberg 6446c3c05b0SChaitanya Kulkarni nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 64557dacad5SJay Sternberg if (nprps <= (256 / 8)) { 64657dacad5SJay Sternberg pool = dev->prp_small_pool; 647c372cdd1SKeith Busch iod->nr_allocations = 0; 64857dacad5SJay Sternberg } else { 64957dacad5SJay Sternberg pool = dev->prp_page_pool; 650c372cdd1SKeith Busch iod->nr_allocations = 1; 65157dacad5SJay Sternberg } 65257dacad5SJay Sternberg 65369d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 65457dacad5SJay Sternberg if (!prp_list) { 655c372cdd1SKeith Busch iod->nr_allocations = -1; 65686eea289SKeith Busch return BLK_STS_RESOURCE; 65757dacad5SJay Sternberg } 65857dacad5SJay Sternberg list[0] = prp_list; 65957dacad5SJay Sternberg iod->first_dma = prp_dma; 66057dacad5SJay Sternberg i = 0; 66157dacad5SJay Sternberg for (;;) { 6626c3c05b0SChaitanya Kulkarni if (i == NVME_CTRL_PAGE_SIZE >> 3) { 66357dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 66469d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 66557dacad5SJay Sternberg if (!prp_list) 666fa073216SChristoph Hellwig goto free_prps; 667c372cdd1SKeith Busch list[iod->nr_allocations++] = prp_list; 66857dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 66957dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 67057dacad5SJay Sternberg i = 1; 67157dacad5SJay Sternberg } 67257dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 6736c3c05b0SChaitanya Kulkarni dma_len -= NVME_CTRL_PAGE_SIZE; 6746c3c05b0SChaitanya Kulkarni dma_addr += NVME_CTRL_PAGE_SIZE; 6756c3c05b0SChaitanya Kulkarni length -= NVME_CTRL_PAGE_SIZE; 67657dacad5SJay Sternberg if (length <= 0) 67757dacad5SJay Sternberg break; 67857dacad5SJay Sternberg if (dma_len > 0) 67957dacad5SJay Sternberg continue; 68086eea289SKeith Busch if (unlikely(dma_len < 0)) 68186eea289SKeith Busch goto bad_sgl; 68257dacad5SJay Sternberg sg = sg_next(sg); 68357dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 68457dacad5SJay Sternberg dma_len = sg_dma_len(sg); 68557dacad5SJay Sternberg } 686a7a7cbe3SChaitanya Kulkarni done: 68791fb2b60SLogan Gunthorpe cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sgt.sgl)); 688a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 68986eea289SKeith Busch return BLK_STS_OK; 690fa073216SChristoph Hellwig free_prps: 691fa073216SChristoph Hellwig nvme_free_prps(dev, req); 692fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 69386eea289SKeith Busch bad_sgl: 69491fb2b60SLogan Gunthorpe WARN(DO_ONCE(nvme_print_sgl, iod->sgt.sgl, iod->sgt.nents), 695d0877473SKeith Busch "Invalid SGL for payload:%d nents:%d\n", 69691fb2b60SLogan Gunthorpe blk_rq_payload_bytes(req), iod->sgt.nents); 69786eea289SKeith Busch return BLK_STS_IOERR; 69857dacad5SJay Sternberg } 69957dacad5SJay Sternberg 700a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 701a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg) 702a7a7cbe3SChaitanya Kulkarni { 703a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(sg_dma_address(sg)); 704a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(sg_dma_len(sg)); 705a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_DATA_DESC << 4; 706a7a7cbe3SChaitanya Kulkarni } 707a7a7cbe3SChaitanya Kulkarni 708a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 709a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr, int entries) 710a7a7cbe3SChaitanya Kulkarni { 711a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(dma_addr); 712a7a7cbe3SChaitanya Kulkarni if (entries < SGES_PER_PAGE) { 713a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(entries * sizeof(*sge)); 714a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 715a7a7cbe3SChaitanya Kulkarni } else { 716a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(PAGE_SIZE); 717a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_SEG_DESC << 4; 718a7a7cbe3SChaitanya Kulkarni } 719a7a7cbe3SChaitanya Kulkarni } 720a7a7cbe3SChaitanya Kulkarni 721a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 72291fb2b60SLogan Gunthorpe struct request *req, struct nvme_rw_command *cmd) 723a7a7cbe3SChaitanya Kulkarni { 724a7a7cbe3SChaitanya Kulkarni struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 725a7a7cbe3SChaitanya Kulkarni struct dma_pool *pool; 726a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list; 72791fb2b60SLogan Gunthorpe struct scatterlist *sg = iod->sgt.sgl; 72891fb2b60SLogan Gunthorpe unsigned int entries = iod->sgt.nents; 729a7a7cbe3SChaitanya Kulkarni dma_addr_t sgl_dma; 730b0f2853bSChristoph Hellwig int i = 0; 731a7a7cbe3SChaitanya Kulkarni 732a7a7cbe3SChaitanya Kulkarni /* setting the transfer type as SGL */ 733a7a7cbe3SChaitanya Kulkarni cmd->flags = NVME_CMD_SGL_METABUF; 734a7a7cbe3SChaitanya Kulkarni 735b0f2853bSChristoph Hellwig if (entries == 1) { 736a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 737a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 738a7a7cbe3SChaitanya Kulkarni } 739a7a7cbe3SChaitanya Kulkarni 740a7a7cbe3SChaitanya Kulkarni if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 741a7a7cbe3SChaitanya Kulkarni pool = dev->prp_small_pool; 742c372cdd1SKeith Busch iod->nr_allocations = 0; 743a7a7cbe3SChaitanya Kulkarni } else { 744a7a7cbe3SChaitanya Kulkarni pool = dev->prp_page_pool; 745c372cdd1SKeith Busch iod->nr_allocations = 1; 746a7a7cbe3SChaitanya Kulkarni } 747a7a7cbe3SChaitanya Kulkarni 748a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 749a7a7cbe3SChaitanya Kulkarni if (!sg_list) { 750c372cdd1SKeith Busch iod->nr_allocations = -1; 751a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 752a7a7cbe3SChaitanya Kulkarni } 753a7a7cbe3SChaitanya Kulkarni 754a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[0] = sg_list; 755a7a7cbe3SChaitanya Kulkarni iod->first_dma = sgl_dma; 756a7a7cbe3SChaitanya Kulkarni 757a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 758a7a7cbe3SChaitanya Kulkarni 759a7a7cbe3SChaitanya Kulkarni do { 760a7a7cbe3SChaitanya Kulkarni if (i == SGES_PER_PAGE) { 761a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *old_sg_desc = sg_list; 762a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 763a7a7cbe3SChaitanya Kulkarni 764a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 765a7a7cbe3SChaitanya Kulkarni if (!sg_list) 766fa073216SChristoph Hellwig goto free_sgls; 767a7a7cbe3SChaitanya Kulkarni 768a7a7cbe3SChaitanya Kulkarni i = 0; 769c372cdd1SKeith Busch nvme_pci_iod_list(req)[iod->nr_allocations++] = sg_list; 770a7a7cbe3SChaitanya Kulkarni sg_list[i++] = *link; 771a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(link, sgl_dma, entries); 772a7a7cbe3SChaitanya Kulkarni } 773a7a7cbe3SChaitanya Kulkarni 774a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&sg_list[i++], sg); 775a7a7cbe3SChaitanya Kulkarni sg = sg_next(sg); 776b0f2853bSChristoph Hellwig } while (--entries > 0); 777a7a7cbe3SChaitanya Kulkarni 778a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 779fa073216SChristoph Hellwig free_sgls: 780fa073216SChristoph Hellwig nvme_free_sgls(dev, req); 781fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 782a7a7cbe3SChaitanya Kulkarni } 783a7a7cbe3SChaitanya Kulkarni 784dff824b2SChristoph Hellwig static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 785dff824b2SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 786dff824b2SChristoph Hellwig struct bio_vec *bv) 787dff824b2SChristoph Hellwig { 788dff824b2SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 7896c3c05b0SChaitanya Kulkarni unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 7906c3c05b0SChaitanya Kulkarni unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 791dff824b2SChristoph Hellwig 792dff824b2SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 793dff824b2SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 794dff824b2SChristoph Hellwig return BLK_STS_RESOURCE; 795dff824b2SChristoph Hellwig iod->dma_len = bv->bv_len; 796dff824b2SChristoph Hellwig 797dff824b2SChristoph Hellwig cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 798dff824b2SChristoph Hellwig if (bv->bv_len > first_prp_len) 799dff824b2SChristoph Hellwig cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 800359c1f88SBaolin Wang return BLK_STS_OK; 801dff824b2SChristoph Hellwig } 802dff824b2SChristoph Hellwig 80329791057SChristoph Hellwig static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 80429791057SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 80529791057SChristoph Hellwig struct bio_vec *bv) 80629791057SChristoph Hellwig { 80729791057SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 80829791057SChristoph Hellwig 80929791057SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 81029791057SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 81129791057SChristoph Hellwig return BLK_STS_RESOURCE; 81229791057SChristoph Hellwig iod->dma_len = bv->bv_len; 81329791057SChristoph Hellwig 814049bf372SKlaus Birkelund Jensen cmnd->flags = NVME_CMD_SGL_METABUF; 81529791057SChristoph Hellwig cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 81629791057SChristoph Hellwig cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 81729791057SChristoph Hellwig cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 818359c1f88SBaolin Wang return BLK_STS_OK; 81929791057SChristoph Hellwig } 82029791057SChristoph Hellwig 821fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 822b131c61dSChristoph Hellwig struct nvme_command *cmnd) 82357dacad5SJay Sternberg { 824f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 82570479b71SChristoph Hellwig blk_status_t ret = BLK_STS_RESOURCE; 82691fb2b60SLogan Gunthorpe int rc; 82757dacad5SJay Sternberg 828dff824b2SChristoph Hellwig if (blk_rq_nr_phys_segments(req) == 1) { 829a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 830dff824b2SChristoph Hellwig struct bio_vec bv = req_bvec(req); 831dff824b2SChristoph Hellwig 832dff824b2SChristoph Hellwig if (!is_pci_p2pdma_page(bv.bv_page)) { 8336c3c05b0SChaitanya Kulkarni if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 834dff824b2SChristoph Hellwig return nvme_setup_prp_simple(dev, req, 835dff824b2SChristoph Hellwig &cmnd->rw, &bv); 83629791057SChristoph Hellwig 837a53232cbSKeith Busch if (nvmeq->qid && sgl_threshold && 838253a0b76SChaitanya Kulkarni nvme_ctrl_sgl_supported(&dev->ctrl)) 83929791057SChristoph Hellwig return nvme_setup_sgl_simple(dev, req, 84029791057SChristoph Hellwig &cmnd->rw, &bv); 841dff824b2SChristoph Hellwig } 842dff824b2SChristoph Hellwig } 843dff824b2SChristoph Hellwig 844dff824b2SChristoph Hellwig iod->dma_len = 0; 84591fb2b60SLogan Gunthorpe iod->sgt.sgl = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 84691fb2b60SLogan Gunthorpe if (!iod->sgt.sgl) 8479b048119SChristoph Hellwig return BLK_STS_RESOURCE; 84891fb2b60SLogan Gunthorpe sg_init_table(iod->sgt.sgl, blk_rq_nr_phys_segments(req)); 84991fb2b60SLogan Gunthorpe iod->sgt.orig_nents = blk_rq_map_sg(req->q, req, iod->sgt.sgl); 85091fb2b60SLogan Gunthorpe if (!iod->sgt.orig_nents) 851fa073216SChristoph Hellwig goto out_free_sg; 852ba1ca37eSChristoph Hellwig 85391fb2b60SLogan Gunthorpe rc = dma_map_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 85491fb2b60SLogan Gunthorpe DMA_ATTR_NO_WARN); 85591fb2b60SLogan Gunthorpe if (rc) { 85691fb2b60SLogan Gunthorpe if (rc == -EREMOTEIO) 85791fb2b60SLogan Gunthorpe ret = BLK_STS_TARGET; 858fa073216SChristoph Hellwig goto out_free_sg; 85991fb2b60SLogan Gunthorpe } 860ba1ca37eSChristoph Hellwig 86170479b71SChristoph Hellwig iod->use_sgl = nvme_pci_use_sgls(dev, req); 862955b1b5aSMinwoo Im if (iod->use_sgl) 86391fb2b60SLogan Gunthorpe ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw); 864a7a7cbe3SChaitanya Kulkarni else 865a7a7cbe3SChaitanya Kulkarni ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 8664aedb705SChristoph Hellwig if (ret != BLK_STS_OK) 867fa073216SChristoph Hellwig goto out_unmap_sg; 868fa073216SChristoph Hellwig return BLK_STS_OK; 869fa073216SChristoph Hellwig 870fa073216SChristoph Hellwig out_unmap_sg: 87191fb2b60SLogan Gunthorpe dma_unmap_sgtable(dev->dev, &iod->sgt, rq_dma_dir(req), 0); 872fa073216SChristoph Hellwig out_free_sg: 87391fb2b60SLogan Gunthorpe mempool_free(iod->sgt.sgl, dev->iod_mempool); 874ba1ca37eSChristoph Hellwig return ret; 87557dacad5SJay Sternberg } 87657dacad5SJay Sternberg 8774aedb705SChristoph Hellwig static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, 8784aedb705SChristoph Hellwig struct nvme_command *cmnd) 8794aedb705SChristoph Hellwig { 8804aedb705SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8814aedb705SChristoph Hellwig 8824aedb705SChristoph Hellwig iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), 8834aedb705SChristoph Hellwig rq_dma_dir(req), 0); 8844aedb705SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->meta_dma)) 8854aedb705SChristoph Hellwig return BLK_STS_IOERR; 8864aedb705SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 887359c1f88SBaolin Wang return BLK_STS_OK; 8884aedb705SChristoph Hellwig } 8894aedb705SChristoph Hellwig 89062451a2bSJens Axboe static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) 89162451a2bSJens Axboe { 89262451a2bSJens Axboe struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 89362451a2bSJens Axboe blk_status_t ret; 89462451a2bSJens Axboe 89552da4f3fSKeith Busch iod->aborted = false; 896c372cdd1SKeith Busch iod->nr_allocations = -1; 89791fb2b60SLogan Gunthorpe iod->sgt.nents = 0; 89862451a2bSJens Axboe 89962451a2bSJens Axboe ret = nvme_setup_cmd(req->q->queuedata, req); 90062451a2bSJens Axboe if (ret) 90162451a2bSJens Axboe return ret; 90262451a2bSJens Axboe 90362451a2bSJens Axboe if (blk_rq_nr_phys_segments(req)) { 90462451a2bSJens Axboe ret = nvme_map_data(dev, req, &iod->cmd); 90562451a2bSJens Axboe if (ret) 90662451a2bSJens Axboe goto out_free_cmd; 90762451a2bSJens Axboe } 90862451a2bSJens Axboe 90962451a2bSJens Axboe if (blk_integrity_rq(req)) { 91062451a2bSJens Axboe ret = nvme_map_metadata(dev, req, &iod->cmd); 91162451a2bSJens Axboe if (ret) 91262451a2bSJens Axboe goto out_unmap_data; 91362451a2bSJens Axboe } 91462451a2bSJens Axboe 91562451a2bSJens Axboe blk_mq_start_request(req); 91662451a2bSJens Axboe return BLK_STS_OK; 91762451a2bSJens Axboe out_unmap_data: 91862451a2bSJens Axboe nvme_unmap_data(dev, req); 91962451a2bSJens Axboe out_free_cmd: 92062451a2bSJens Axboe nvme_cleanup_cmd(req); 92162451a2bSJens Axboe return ret; 92262451a2bSJens Axboe } 92362451a2bSJens Axboe 92457dacad5SJay Sternberg /* 92557dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 92657dacad5SJay Sternberg */ 927fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 92857dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 92957dacad5SJay Sternberg { 93057dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 93157dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 93257dacad5SJay Sternberg struct request *req = bd->rq; 9339b048119SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 934ebe6d874SChristoph Hellwig blk_status_t ret; 93557dacad5SJay Sternberg 936d1f06f4aSJens Axboe /* 937d1f06f4aSJens Axboe * We should not need to do this, but we're still using this to 938d1f06f4aSJens Axboe * ensure we can drain requests on a dying queue. 939d1f06f4aSJens Axboe */ 9404e224106SChristoph Hellwig if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 941d1f06f4aSJens Axboe return BLK_STS_IOERR; 942d1f06f4aSJens Axboe 94362451a2bSJens Axboe if (unlikely(!nvme_check_ready(&dev->ctrl, req, true))) 944d4060d2bSTao Chiu return nvme_fail_nonready_command(&dev->ctrl, req); 945d4060d2bSTao Chiu 94662451a2bSJens Axboe ret = nvme_prep_rq(dev, req); 94762451a2bSJens Axboe if (unlikely(ret)) 948f4800d6dSChristoph Hellwig return ret; 9493233b94cSJens Axboe spin_lock(&nvmeq->sq_lock); 9503233b94cSJens Axboe nvme_sq_copy_cmd(nvmeq, &iod->cmd); 9513233b94cSJens Axboe nvme_write_sq_db(nvmeq, bd->last); 9523233b94cSJens Axboe spin_unlock(&nvmeq->sq_lock); 953fc17b653SChristoph Hellwig return BLK_STS_OK; 95457dacad5SJay Sternberg } 95557dacad5SJay Sternberg 956d62cbcf6SJens Axboe static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist) 957d62cbcf6SJens Axboe { 958d62cbcf6SJens Axboe spin_lock(&nvmeq->sq_lock); 959d62cbcf6SJens Axboe while (!rq_list_empty(*rqlist)) { 960d62cbcf6SJens Axboe struct request *req = rq_list_pop(rqlist); 961d62cbcf6SJens Axboe struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 962d62cbcf6SJens Axboe 963d62cbcf6SJens Axboe nvme_sq_copy_cmd(nvmeq, &iod->cmd); 964d62cbcf6SJens Axboe } 965d62cbcf6SJens Axboe nvme_write_sq_db(nvmeq, true); 966d62cbcf6SJens Axboe spin_unlock(&nvmeq->sq_lock); 967d62cbcf6SJens Axboe } 968d62cbcf6SJens Axboe 969d62cbcf6SJens Axboe static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req) 970d62cbcf6SJens Axboe { 971d62cbcf6SJens Axboe /* 972d62cbcf6SJens Axboe * We should not need to do this, but we're still using this to 973d62cbcf6SJens Axboe * ensure we can drain requests on a dying queue. 974d62cbcf6SJens Axboe */ 975d62cbcf6SJens Axboe if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 976d62cbcf6SJens Axboe return false; 977d62cbcf6SJens Axboe if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true))) 978d62cbcf6SJens Axboe return false; 979d62cbcf6SJens Axboe 980d62cbcf6SJens Axboe req->mq_hctx->tags->rqs[req->tag] = req; 981d62cbcf6SJens Axboe return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK; 982d62cbcf6SJens Axboe } 983d62cbcf6SJens Axboe 984d62cbcf6SJens Axboe static void nvme_queue_rqs(struct request **rqlist) 985d62cbcf6SJens Axboe { 9866bfec799SKeith Busch struct request *req, *next, *prev = NULL; 987d62cbcf6SJens Axboe struct request *requeue_list = NULL; 988d62cbcf6SJens Axboe 9896bfec799SKeith Busch rq_list_for_each_safe(rqlist, req, next) { 990d62cbcf6SJens Axboe struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 991d62cbcf6SJens Axboe 992d62cbcf6SJens Axboe if (!nvme_prep_rq_batch(nvmeq, req)) { 993d62cbcf6SJens Axboe /* detach 'req' and add to remainder list */ 9946bfec799SKeith Busch rq_list_move(rqlist, &requeue_list, req, prev); 9956bfec799SKeith Busch 9966bfec799SKeith Busch req = prev; 9976bfec799SKeith Busch if (!req) 9986bfec799SKeith Busch continue; 999d62cbcf6SJens Axboe } 1000d62cbcf6SJens Axboe 10016bfec799SKeith Busch if (!next || req->mq_hctx != next->mq_hctx) { 1002d62cbcf6SJens Axboe /* detach rest of list, and submit */ 10036bfec799SKeith Busch req->rq_next = NULL; 1004d62cbcf6SJens Axboe nvme_submit_cmds(nvmeq, rqlist); 10056bfec799SKeith Busch *rqlist = next; 10066bfec799SKeith Busch prev = NULL; 10076bfec799SKeith Busch } else 10086bfec799SKeith Busch prev = req; 1009d62cbcf6SJens Axboe } 1010d62cbcf6SJens Axboe 1011d62cbcf6SJens Axboe *rqlist = requeue_list; 1012d62cbcf6SJens Axboe } 1013d62cbcf6SJens Axboe 1014c234a653SJens Axboe static __always_inline void nvme_pci_unmap_rq(struct request *req) 1015eee417b0SChristoph Hellwig { 1016a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 1017a53232cbSKeith Busch struct nvme_dev *dev = nvmeq->dev; 1018eee417b0SChristoph Hellwig 1019a53232cbSKeith Busch if (blk_integrity_rq(req)) { 1020a53232cbSKeith Busch struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1021a53232cbSKeith Busch 10224aedb705SChristoph Hellwig dma_unmap_page(dev->dev, iod->meta_dma, 10234aedb705SChristoph Hellwig rq_integrity_vec(req)->bv_len, rq_data_dir(req)); 1024a53232cbSKeith Busch } 1025a53232cbSKeith Busch 1026b15c592dSChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 10274aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 1028c234a653SJens Axboe } 1029c234a653SJens Axboe 1030c234a653SJens Axboe static void nvme_pci_complete_rq(struct request *req) 1031c234a653SJens Axboe { 1032c234a653SJens Axboe nvme_pci_unmap_rq(req); 103377f02a7aSChristoph Hellwig nvme_complete_rq(req); 103457dacad5SJay Sternberg } 103557dacad5SJay Sternberg 1036c234a653SJens Axboe static void nvme_pci_complete_batch(struct io_comp_batch *iob) 1037c234a653SJens Axboe { 1038c234a653SJens Axboe nvme_complete_batch(iob, nvme_pci_unmap_rq); 1039c234a653SJens Axboe } 1040c234a653SJens Axboe 1041d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 1042750dde44SChristoph Hellwig static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 1043d783e0bdSMarta Rybczynska { 104474943d45SKeith Busch struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 104574943d45SKeith Busch 104674943d45SKeith Busch return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 1047d783e0bdSMarta Rybczynska } 1048d783e0bdSMarta Rybczynska 1049eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 105057dacad5SJay Sternberg { 1051eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 105257dacad5SJay Sternberg 1053eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 1054eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 1055eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 1056eb281c82SSagi Grimberg } 1057adf68f21SChristoph Hellwig 1058cfa27356SChristoph Hellwig static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 1059cfa27356SChristoph Hellwig { 1060cfa27356SChristoph Hellwig if (!nvmeq->qid) 1061cfa27356SChristoph Hellwig return nvmeq->dev->admin_tagset.tags[0]; 1062cfa27356SChristoph Hellwig return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 1063cfa27356SChristoph Hellwig } 1064cfa27356SChristoph Hellwig 1065c234a653SJens Axboe static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, 1066c234a653SJens Axboe struct io_comp_batch *iob, u16 idx) 106757dacad5SJay Sternberg { 106874943d45SKeith Busch struct nvme_completion *cqe = &nvmeq->cqes[idx]; 106962df8016SLalithambika Krishnakumar __u16 command_id = READ_ONCE(cqe->command_id); 107057dacad5SJay Sternberg struct request *req; 1071adf68f21SChristoph Hellwig 1072adf68f21SChristoph Hellwig /* 1073adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 1074adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1075adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1076adf68f21SChristoph Hellwig * for them but rather special case them here. 1077adf68f21SChristoph Hellwig */ 107862df8016SLalithambika Krishnakumar if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 10797bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 108083a12fb7SSagi Grimberg cqe->status, &cqe->result); 1081a0fa9647SJens Axboe return; 108257dacad5SJay Sternberg } 108357dacad5SJay Sternberg 1084e7006de6SSagi Grimberg req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); 108550b7c243SXianting Tian if (unlikely(!req)) { 108650b7c243SXianting Tian dev_warn(nvmeq->dev->ctrl.device, 108750b7c243SXianting Tian "invalid id %d completed on queue %d\n", 108862df8016SLalithambika Krishnakumar command_id, le16_to_cpu(cqe->sq_id)); 108950b7c243SXianting Tian return; 109050b7c243SXianting Tian } 109150b7c243SXianting Tian 1092604c01d5Syupeng trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 1093c234a653SJens Axboe if (!nvme_try_complete_req(req, cqe->status, cqe->result) && 1094c234a653SJens Axboe !blk_mq_add_to_batch(req, iob, nvme_req(req)->status, 1095c234a653SJens Axboe nvme_pci_complete_batch)) 1096ff029451SChristoph Hellwig nvme_pci_complete_rq(req); 109783a12fb7SSagi Grimberg } 109857dacad5SJay Sternberg 10995cb525c8SJens Axboe static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 11005cb525c8SJens Axboe { 1101a0aac973SJK Kim u32 tmp = nvmeq->cq_head + 1; 1102a8de6639SAlexey Dobriyan 1103a8de6639SAlexey Dobriyan if (tmp == nvmeq->q_depth) { 1104920d13a8SSagi Grimberg nvmeq->cq_head = 0; 1105e2a366a4SAlexey Dobriyan nvmeq->cq_phase ^= 1; 1106a8de6639SAlexey Dobriyan } else { 1107a8de6639SAlexey Dobriyan nvmeq->cq_head = tmp; 1108920d13a8SSagi Grimberg } 1109a0fa9647SJens Axboe } 1110a0fa9647SJens Axboe 1111c234a653SJens Axboe static inline int nvme_poll_cq(struct nvme_queue *nvmeq, 1112c234a653SJens Axboe struct io_comp_batch *iob) 1113a0fa9647SJens Axboe { 11141052b8acSJens Axboe int found = 0; 111583a12fb7SSagi Grimberg 11161052b8acSJens Axboe while (nvme_cqe_pending(nvmeq)) { 11171052b8acSJens Axboe found++; 1118b69e2ef2SKeith Busch /* 1119b69e2ef2SKeith Busch * load-load control dependency between phase and the rest of 1120b69e2ef2SKeith Busch * the cqe requires a full read memory barrier 1121b69e2ef2SKeith Busch */ 1122b69e2ef2SKeith Busch dma_rmb(); 1123c234a653SJens Axboe nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head); 11245cb525c8SJens Axboe nvme_update_cq_head(nvmeq); 112557dacad5SJay Sternberg } 112657dacad5SJay Sternberg 1127324b494cSKeith Busch if (found) 1128eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 11295cb525c8SJens Axboe return found; 113057dacad5SJay Sternberg } 113157dacad5SJay Sternberg 113257dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 113357dacad5SJay Sternberg { 113457dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 11354f502245SJens Axboe DEFINE_IO_COMP_BATCH(iob); 11365cb525c8SJens Axboe 11374f502245SJens Axboe if (nvme_poll_cq(nvmeq, &iob)) { 11384f502245SJens Axboe if (!rq_list_empty(iob.req_list)) 11394f502245SJens Axboe nvme_pci_complete_batch(&iob); 114005fae499SChaitanya Kulkarni return IRQ_HANDLED; 11414f502245SJens Axboe } 114205fae499SChaitanya Kulkarni return IRQ_NONE; 114357dacad5SJay Sternberg } 114457dacad5SJay Sternberg 114557dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 114657dacad5SJay Sternberg { 114757dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 11484e523547SBaolin Wang 1149750dde44SChristoph Hellwig if (nvme_cqe_pending(nvmeq)) 115057dacad5SJay Sternberg return IRQ_WAKE_THREAD; 1151d783e0bdSMarta Rybczynska return IRQ_NONE; 115257dacad5SJay Sternberg } 115357dacad5SJay Sternberg 11540b2a8a9fSChristoph Hellwig /* 1155fa059b85SKeith Busch * Poll for completions for any interrupt driven queue 11560b2a8a9fSChristoph Hellwig * Can be called from any context. 11570b2a8a9fSChristoph Hellwig */ 1158fa059b85SKeith Busch static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1159a0fa9647SJens Axboe { 11603a7afd8eSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1161a0fa9647SJens Axboe 1162fa059b85SKeith Busch WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1163fa059b85SKeith Busch 11643a7afd8eSChristoph Hellwig disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1165c234a653SJens Axboe nvme_poll_cq(nvmeq, NULL); 11663a7afd8eSChristoph Hellwig enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 116791a509f8SChristoph Hellwig } 1168442e19b7SSagi Grimberg 11695a72e899SJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) 11707776db1cSKeith Busch { 11717776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 1172dabcefabSJens Axboe bool found; 1173dabcefabSJens Axboe 1174dabcefabSJens Axboe if (!nvme_cqe_pending(nvmeq)) 1175dabcefabSJens Axboe return 0; 1176dabcefabSJens Axboe 11773a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 1178c234a653SJens Axboe found = nvme_poll_cq(nvmeq, iob); 11793a7afd8eSChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 1180dabcefabSJens Axboe 1181dabcefabSJens Axboe return found; 1182dabcefabSJens Axboe } 1183dabcefabSJens Axboe 1184ad22c355SKeith Busch static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 118557dacad5SJay Sternberg { 1186f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 1187147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 1188f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 118957dacad5SJay Sternberg 119057dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 1191ad22c355SKeith Busch c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 11923233b94cSJens Axboe 11933233b94cSJens Axboe spin_lock(&nvmeq->sq_lock); 11943233b94cSJens Axboe nvme_sq_copy_cmd(nvmeq, &c); 11953233b94cSJens Axboe nvme_write_sq_db(nvmeq, true); 11963233b94cSJens Axboe spin_unlock(&nvmeq->sq_lock); 119757dacad5SJay Sternberg } 119857dacad5SJay Sternberg 119957dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 120057dacad5SJay Sternberg { 1201f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 120257dacad5SJay Sternberg 120357dacad5SJay Sternberg c.delete_queue.opcode = opcode; 120457dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 120557dacad5SJay Sternberg 12061c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 120757dacad5SJay Sternberg } 120857dacad5SJay Sternberg 120957dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1210a8e3e0bbSJianchao Wang struct nvme_queue *nvmeq, s16 vector) 121157dacad5SJay Sternberg { 1212f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 12134b04cc6aSJens Axboe int flags = NVME_QUEUE_PHYS_CONTIG; 12144b04cc6aSJens Axboe 12157c349ddeSKeith Busch if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 12164b04cc6aSJens Axboe flags |= NVME_CQ_IRQ_ENABLED; 121757dacad5SJay Sternberg 121857dacad5SJay Sternberg /* 121916772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 122057dacad5SJay Sternberg * is attached to the request. 122157dacad5SJay Sternberg */ 122257dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 122357dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 122457dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 122557dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 122657dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 1227a8e3e0bbSJianchao Wang c.create_cq.irq_vector = cpu_to_le16(vector); 122857dacad5SJay Sternberg 12291c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 123057dacad5SJay Sternberg } 123157dacad5SJay Sternberg 123257dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 123357dacad5SJay Sternberg struct nvme_queue *nvmeq) 123457dacad5SJay Sternberg { 12359abd68efSJens Axboe struct nvme_ctrl *ctrl = &dev->ctrl; 1236f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 123781c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 123857dacad5SJay Sternberg 123957dacad5SJay Sternberg /* 12409abd68efSJens Axboe * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 12419abd68efSJens Axboe * set. Since URGENT priority is zeroes, it makes all queues 12429abd68efSJens Axboe * URGENT. 12439abd68efSJens Axboe */ 12449abd68efSJens Axboe if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 12459abd68efSJens Axboe flags |= NVME_SQ_PRIO_MEDIUM; 12469abd68efSJens Axboe 12479abd68efSJens Axboe /* 124816772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 124957dacad5SJay Sternberg * is attached to the request. 125057dacad5SJay Sternberg */ 125157dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 125257dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 125357dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 125457dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 125557dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 125657dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 125757dacad5SJay Sternberg 12581c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 125957dacad5SJay Sternberg } 126057dacad5SJay Sternberg 126157dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 126257dacad5SJay Sternberg { 126357dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 126457dacad5SJay Sternberg } 126557dacad5SJay Sternberg 126657dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 126757dacad5SJay Sternberg { 126857dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 126957dacad5SJay Sternberg } 127057dacad5SJay Sternberg 1271de671d61SJens Axboe static enum rq_end_io_ret abort_endio(struct request *req, blk_status_t error) 127257dacad5SJay Sternberg { 1273a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 127457dacad5SJay Sternberg 127527fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 127627fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 1277e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1278e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 1279de671d61SJens Axboe return RQ_END_IO_NONE; 128057dacad5SJay Sternberg } 128157dacad5SJay Sternberg 1282b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1283b2a0eb1aSKeith Busch { 1284b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 1285b2a0eb1aSKeith Busch * NVMe Subsystem reset. 1286b2a0eb1aSKeith Busch */ 1287b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1288b2a0eb1aSKeith Busch 1289ad70062cSJianchao Wang /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1290ad70062cSJianchao Wang switch (dev->ctrl.state) { 1291ad70062cSJianchao Wang case NVME_CTRL_RESETTING: 1292ad6a0a52SMax Gurtovoy case NVME_CTRL_CONNECTING: 1293b2a0eb1aSKeith Busch return false; 1294ad70062cSJianchao Wang default: 1295ad70062cSJianchao Wang break; 1296ad70062cSJianchao Wang } 1297b2a0eb1aSKeith Busch 1298b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 1299b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 1300b2a0eb1aSKeith Busch */ 1301b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 1302b2a0eb1aSKeith Busch return false; 1303b2a0eb1aSKeith Busch 1304b2a0eb1aSKeith Busch return true; 1305b2a0eb1aSKeith Busch } 1306b2a0eb1aSKeith Busch 1307b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1308b2a0eb1aSKeith Busch { 1309b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 1310b2a0eb1aSKeith Busch u16 pci_status; 1311b2a0eb1aSKeith Busch int result; 1312b2a0eb1aSKeith Busch 1313b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1314b2a0eb1aSKeith Busch &pci_status); 1315b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 1316b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1317b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1318b2a0eb1aSKeith Busch csts, pci_status); 1319b2a0eb1aSKeith Busch else 1320b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1321b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1322b2a0eb1aSKeith Busch csts, result); 13234641a8e6SKeith Busch 13244641a8e6SKeith Busch if (csts != ~0) 13254641a8e6SKeith Busch return; 13264641a8e6SKeith Busch 13274641a8e6SKeith Busch dev_warn(dev->ctrl.device, 13284641a8e6SKeith Busch "Does your device have a faulty power saving mode enabled?\n"); 13294641a8e6SKeith Busch dev_warn(dev->ctrl.device, 13304641a8e6SKeith Busch "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); 1331b2a0eb1aSKeith Busch } 1332b2a0eb1aSKeith Busch 13339bdb4833SJohn Garry static enum blk_eh_timer_return nvme_timeout(struct request *req) 133457dacad5SJay Sternberg { 1335f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1336a53232cbSKeith Busch struct nvme_queue *nvmeq = req->mq_hctx->driver_data; 133757dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 133857dacad5SJay Sternberg struct request *abort_req; 1339f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 1340b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1341b2a0eb1aSKeith Busch 1342651438bbSWen Xiong /* If PCI error recovery process is happening, we cannot reset or 1343651438bbSWen Xiong * the recovery mechanism will surely fail. 1344651438bbSWen Xiong */ 1345651438bbSWen Xiong mb(); 1346651438bbSWen Xiong if (pci_channel_offline(to_pci_dev(dev->dev))) 1347651438bbSWen Xiong return BLK_EH_RESET_TIMER; 1348651438bbSWen Xiong 1349b2a0eb1aSKeith Busch /* 1350b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1351b2a0eb1aSKeith Busch */ 1352b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1353b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1354b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1355d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1356db8c48e4SChristoph Hellwig return BLK_EH_DONE; 1357b2a0eb1aSKeith Busch } 135857dacad5SJay Sternberg 135931c7c7d2SChristoph Hellwig /* 13607776db1cSKeith Busch * Did we miss an interrupt? 13617776db1cSKeith Busch */ 1362fa059b85SKeith Busch if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 13635a72e899SJens Axboe nvme_poll(req->mq_hctx, NULL); 1364fa059b85SKeith Busch else 1365bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 1366fa059b85SKeith Busch 1367bf392a5dSKeith Busch if (blk_mq_request_completed(req)) { 13687776db1cSKeith Busch dev_warn(dev->ctrl.device, 13697776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 13707776db1cSKeith Busch req->tag, nvmeq->qid); 1371db8c48e4SChristoph Hellwig return BLK_EH_DONE; 13727776db1cSKeith Busch } 13737776db1cSKeith Busch 13747776db1cSKeith Busch /* 1375fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1376fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1377fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1378db8c48e4SChristoph Hellwig * shutdown, so we return BLK_EH_DONE. 1379fd634f41SChristoph Hellwig */ 13804244140dSKeith Busch switch (dev->ctrl.state) { 13814244140dSKeith Busch case NVME_CTRL_CONNECTING: 13822036f726SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1383df561f66SGustavo A. R. Silva fallthrough; 13842036f726SKeith Busch case NVME_CTRL_DELETING: 1385b9cac43cSKeith Busch dev_warn_ratelimited(dev->ctrl.device, 1386fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1387fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 138827fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 13897ad92f65STong Zhang nvme_dev_disable(dev, true); 1390db8c48e4SChristoph Hellwig return BLK_EH_DONE; 139139a9dd81SKeith Busch case NVME_CTRL_RESETTING: 139239a9dd81SKeith Busch return BLK_EH_RESET_TIMER; 13934244140dSKeith Busch default: 13944244140dSKeith Busch break; 1395fd634f41SChristoph Hellwig } 1396fd634f41SChristoph Hellwig 1397fd634f41SChristoph Hellwig /* 1398e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1399e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1400e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 140131c7c7d2SChristoph Hellwig */ 1402f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 14031b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 140457dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 140557dacad5SJay Sternberg req->tag, nvmeq->qid); 14067ad92f65STong Zhang nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1407a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1408d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1409e1569a16SKeith Busch 1410db8c48e4SChristoph Hellwig return BLK_EH_DONE; 141157dacad5SJay Sternberg } 141257dacad5SJay Sternberg 1413e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1414e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1415e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1416e7a2a87dSChristoph Hellwig } 141752da4f3fSKeith Busch iod->aborted = true; 141857dacad5SJay Sternberg 141957dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 142085f74acfSKeith Busch cmd.abort.cid = nvme_cid(req); 142157dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 142257dacad5SJay Sternberg 14231b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 142486141440SChristoph Hellwig "I/O %d (%s) QID %d timeout, aborting\n", 142586141440SChristoph Hellwig req->tag, 142686141440SChristoph Hellwig nvme_get_opcode_str(nvme_req(req)->cmd->common.opcode), 142786141440SChristoph Hellwig nvmeq->qid); 1428e7a2a87dSChristoph Hellwig 1429e559398fSChristoph Hellwig abort_req = blk_mq_alloc_request(dev->ctrl.admin_q, nvme_req_op(&cmd), 143039dfe844SChaitanya Kulkarni BLK_MQ_REQ_NOWAIT); 14316bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 14326bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 143331c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 143457dacad5SJay Sternberg } 1435e559398fSChristoph Hellwig nvme_init_request(abort_req, &cmd); 143657dacad5SJay Sternberg 1437e2e53086SChristoph Hellwig abort_req->end_io = abort_endio; 1438e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1439128126a7SChaitanya Kulkarni abort_req->rq_flags |= RQF_QUIET; 1440e2e53086SChristoph Hellwig blk_execute_rq_nowait(abort_req, false); 144157dacad5SJay Sternberg 144257dacad5SJay Sternberg /* 144357dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 144457dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 144557dacad5SJay Sternberg * as the device then is in a faulty state. 144657dacad5SJay Sternberg */ 144757dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 144857dacad5SJay Sternberg } 144957dacad5SJay Sternberg 145057dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 145157dacad5SJay Sternberg { 14528a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 145357dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 145463223078SChristoph Hellwig if (!nvmeq->sq_cmds) 145563223078SChristoph Hellwig return; 14560f238ff5SLogan Gunthorpe 145763223078SChristoph Hellwig if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 145888a041f4SKeith Busch pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 14598a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 146063223078SChristoph Hellwig } else { 14618a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 146263223078SChristoph Hellwig nvmeq->sq_cmds, nvmeq->sq_dma_addr); 14630f238ff5SLogan Gunthorpe } 146457dacad5SJay Sternberg } 146557dacad5SJay Sternberg 146657dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 146757dacad5SJay Sternberg { 146857dacad5SJay Sternberg int i; 146957dacad5SJay Sternberg 1470d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1471d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 1472147b27e4SSagi Grimberg nvme_free_queue(&dev->queues[i]); 147357dacad5SJay Sternberg } 147457dacad5SJay Sternberg } 147557dacad5SJay Sternberg 147657dacad5SJay Sternberg /** 147757dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 147840581d1aSBart Van Assche * @nvmeq: queue to suspend 147957dacad5SJay Sternberg */ 148057dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 148157dacad5SJay Sternberg { 14824e224106SChristoph Hellwig if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 148357dacad5SJay Sternberg return 1; 148457dacad5SJay Sternberg 14854e224106SChristoph Hellwig /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1486d1f06f4aSJens Axboe mb(); 148757dacad5SJay Sternberg 14884e224106SChristoph Hellwig nvmeq->dev->online_queues--; 14891c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 14906ca1d902SMing Lei nvme_stop_admin_queue(&nvmeq->dev->ctrl); 14917c349ddeSKeith Busch if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 14924e224106SChristoph Hellwig pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); 149357dacad5SJay Sternberg return 0; 149457dacad5SJay Sternberg } 149557dacad5SJay Sternberg 14968fae268bSKeith Busch static void nvme_suspend_io_queues(struct nvme_dev *dev) 14978fae268bSKeith Busch { 14988fae268bSKeith Busch int i; 14998fae268bSKeith Busch 15008fae268bSKeith Busch for (i = dev->ctrl.queue_count - 1; i > 0; i--) 15018fae268bSKeith Busch nvme_suspend_queue(&dev->queues[i]); 15028fae268bSKeith Busch } 15038fae268bSKeith Busch 1504a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 150557dacad5SJay Sternberg { 1506147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 150757dacad5SJay Sternberg 1508a5cdb68cSKeith Busch if (shutdown) 1509a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1510a5cdb68cSKeith Busch else 1511b5b05048SSagi Grimberg nvme_disable_ctrl(&dev->ctrl); 151257dacad5SJay Sternberg 1513bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 151457dacad5SJay Sternberg } 151557dacad5SJay Sternberg 1516fa46c6fbSKeith Busch /* 1517fa46c6fbSKeith Busch * Called only on a device that has been disabled and after all other threads 15189210c075SDongli Zhang * that can check this device's completion queues have synced, except 15199210c075SDongli Zhang * nvme_poll(). This is the last chance for the driver to see a natural 15209210c075SDongli Zhang * completion before nvme_cancel_request() terminates all incomplete requests. 1521fa46c6fbSKeith Busch */ 1522fa46c6fbSKeith Busch static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1523fa46c6fbSKeith Busch { 1524fa46c6fbSKeith Busch int i; 1525fa46c6fbSKeith Busch 15269210c075SDongli Zhang for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 15279210c075SDongli Zhang spin_lock(&dev->queues[i].cq_poll_lock); 1528c234a653SJens Axboe nvme_poll_cq(&dev->queues[i], NULL); 15299210c075SDongli Zhang spin_unlock(&dev->queues[i].cq_poll_lock); 15309210c075SDongli Zhang } 1531fa46c6fbSKeith Busch } 1532fa46c6fbSKeith Busch 153357dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 153457dacad5SJay Sternberg int entry_size) 153557dacad5SJay Sternberg { 153657dacad5SJay Sternberg int q_depth = dev->q_depth; 15375fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 15386c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 153957dacad5SJay Sternberg 154057dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 154157dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 15424e523547SBaolin Wang 15436c3c05b0SChaitanya Kulkarni mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 154457dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 154557dacad5SJay Sternberg 154657dacad5SJay Sternberg /* 154757dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 154857dacad5SJay Sternberg * would be better to map queues in system memory with the 154957dacad5SJay Sternberg * original depth 155057dacad5SJay Sternberg */ 155157dacad5SJay Sternberg if (q_depth < 64) 155257dacad5SJay Sternberg return -ENOMEM; 155357dacad5SJay Sternberg } 155457dacad5SJay Sternberg 155557dacad5SJay Sternberg return q_depth; 155657dacad5SJay Sternberg } 155757dacad5SJay Sternberg 155857dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 15598a1d09a6SBenjamin Herrenschmidt int qid) 156057dacad5SJay Sternberg { 15610f238ff5SLogan Gunthorpe struct pci_dev *pdev = to_pci_dev(dev->dev); 1562815c6704SKeith Busch 15630f238ff5SLogan Gunthorpe if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 15648a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1565bfac8e9fSAlan Mikhak if (nvmeq->sq_cmds) { 15660f238ff5SLogan Gunthorpe nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 15670f238ff5SLogan Gunthorpe nvmeq->sq_cmds); 156863223078SChristoph Hellwig if (nvmeq->sq_dma_addr) { 156963223078SChristoph Hellwig set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 157063223078SChristoph Hellwig return 0; 157163223078SChristoph Hellwig } 1572bfac8e9fSAlan Mikhak 15738a1d09a6SBenjamin Herrenschmidt pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1574bfac8e9fSAlan Mikhak } 15750f238ff5SLogan Gunthorpe } 15760f238ff5SLogan Gunthorpe 15778a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 157857dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 157957dacad5SJay Sternberg if (!nvmeq->sq_cmds) 158057dacad5SJay Sternberg return -ENOMEM; 158157dacad5SJay Sternberg return 0; 158257dacad5SJay Sternberg } 158357dacad5SJay Sternberg 1584a6ff7262SKeith Busch static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 158557dacad5SJay Sternberg { 1586147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[qid]; 158757dacad5SJay Sternberg 158862314e40SKeith Busch if (dev->ctrl.queue_count > qid) 158962314e40SKeith Busch return 0; 159057dacad5SJay Sternberg 1591c1e0cc7eSBenjamin Herrenschmidt nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 15928a1d09a6SBenjamin Herrenschmidt nvmeq->q_depth = depth; 15938a1d09a6SBenjamin Herrenschmidt nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 159457dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 159557dacad5SJay Sternberg if (!nvmeq->cqes) 159657dacad5SJay Sternberg goto free_nvmeq; 159757dacad5SJay Sternberg 15988a1d09a6SBenjamin Herrenschmidt if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 159957dacad5SJay Sternberg goto free_cqdma; 160057dacad5SJay Sternberg 160157dacad5SJay Sternberg nvmeq->dev = dev; 16021ab0cd69SJens Axboe spin_lock_init(&nvmeq->sq_lock); 16033a7afd8eSChristoph Hellwig spin_lock_init(&nvmeq->cq_poll_lock); 160457dacad5SJay Sternberg nvmeq->cq_head = 0; 160557dacad5SJay Sternberg nvmeq->cq_phase = 1; 160657dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 160757dacad5SJay Sternberg nvmeq->qid = qid; 1608d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 160957dacad5SJay Sternberg 1610147b27e4SSagi Grimberg return 0; 161157dacad5SJay Sternberg 161257dacad5SJay Sternberg free_cqdma: 16138a1d09a6SBenjamin Herrenschmidt dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 161457dacad5SJay Sternberg nvmeq->cq_dma_addr); 161557dacad5SJay Sternberg free_nvmeq: 1616147b27e4SSagi Grimberg return -ENOMEM; 161757dacad5SJay Sternberg } 161857dacad5SJay Sternberg 1619dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 162057dacad5SJay Sternberg { 16210ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 16220ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 16230ff199cbSChristoph Hellwig 16240ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 16250ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 16260ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 16270ff199cbSChristoph Hellwig } else { 16280ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 16290ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 16300ff199cbSChristoph Hellwig } 163157dacad5SJay Sternberg } 163257dacad5SJay Sternberg 163357dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 163457dacad5SJay Sternberg { 163557dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 163657dacad5SJay Sternberg 163757dacad5SJay Sternberg nvmeq->sq_tail = 0; 163838210800SKeith Busch nvmeq->last_sq_tail = 0; 163957dacad5SJay Sternberg nvmeq->cq_head = 0; 164057dacad5SJay Sternberg nvmeq->cq_phase = 1; 164157dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 16428a1d09a6SBenjamin Herrenschmidt memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1643f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 164457dacad5SJay Sternberg dev->online_queues++; 16453a7afd8eSChristoph Hellwig wmb(); /* ensure the first interrupt sees the initialization */ 164657dacad5SJay Sternberg } 164757dacad5SJay Sternberg 1648e4b9852aSCasey Chen /* 1649e4b9852aSCasey Chen * Try getting shutdown_lock while setting up IO queues. 1650e4b9852aSCasey Chen */ 1651e4b9852aSCasey Chen static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1652e4b9852aSCasey Chen { 1653e4b9852aSCasey Chen /* 1654e4b9852aSCasey Chen * Give up if the lock is being held by nvme_dev_disable. 1655e4b9852aSCasey Chen */ 1656e4b9852aSCasey Chen if (!mutex_trylock(&dev->shutdown_lock)) 1657e4b9852aSCasey Chen return -ENODEV; 1658e4b9852aSCasey Chen 1659e4b9852aSCasey Chen /* 1660e4b9852aSCasey Chen * Controller is in wrong state, fail early. 1661e4b9852aSCasey Chen */ 1662e4b9852aSCasey Chen if (dev->ctrl.state != NVME_CTRL_CONNECTING) { 1663e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 1664e4b9852aSCasey Chen return -ENODEV; 1665e4b9852aSCasey Chen } 1666e4b9852aSCasey Chen 1667e4b9852aSCasey Chen return 0; 1668e4b9852aSCasey Chen } 1669e4b9852aSCasey Chen 16704b04cc6aSJens Axboe static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 167157dacad5SJay Sternberg { 167257dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 167357dacad5SJay Sternberg int result; 16747c349ddeSKeith Busch u16 vector = 0; 167557dacad5SJay Sternberg 1676d1ed6aa1SChristoph Hellwig clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1677d1ed6aa1SChristoph Hellwig 167822b55601SKeith Busch /* 167922b55601SKeith Busch * A queue's vector matches the queue identifier unless the controller 168022b55601SKeith Busch * has only one vector available. 168122b55601SKeith Busch */ 16824b04cc6aSJens Axboe if (!polled) 1683a8e3e0bbSJianchao Wang vector = dev->num_vecs == 1 ? 0 : qid; 16844b04cc6aSJens Axboe else 16857c349ddeSKeith Busch set_bit(NVMEQ_POLLED, &nvmeq->flags); 16864b04cc6aSJens Axboe 1687a8e3e0bbSJianchao Wang result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1688ded45505SKeith Busch if (result) 1689ded45505SKeith Busch return result; 169057dacad5SJay Sternberg 169157dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 169257dacad5SJay Sternberg if (result < 0) 1693ded45505SKeith Busch return result; 1694c80b36cdSEdmund Nadolski if (result) 169557dacad5SJay Sternberg goto release_cq; 169657dacad5SJay Sternberg 1697a8e3e0bbSJianchao Wang nvmeq->cq_vector = vector; 16984b04cc6aSJens Axboe 1699e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 1700e4b9852aSCasey Chen if (result) 1701e4b9852aSCasey Chen return result; 1702e4b9852aSCasey Chen nvme_init_queue(nvmeq, qid); 17037c349ddeSKeith Busch if (!polled) { 1704dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 170557dacad5SJay Sternberg if (result < 0) 170657dacad5SJay Sternberg goto release_sq; 17074b04cc6aSJens Axboe } 170857dacad5SJay Sternberg 17094e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1710e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 171157dacad5SJay Sternberg return result; 171257dacad5SJay Sternberg 171357dacad5SJay Sternberg release_sq: 1714f25a2dfcSJianchao Wang dev->online_queues--; 1715e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 171657dacad5SJay Sternberg adapter_delete_sq(dev, qid); 171757dacad5SJay Sternberg release_cq: 171857dacad5SJay Sternberg adapter_delete_cq(dev, qid); 171957dacad5SJay Sternberg return result; 172057dacad5SJay Sternberg } 172157dacad5SJay Sternberg 1722f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 172357dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 172477f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 172557dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 1726e559398fSChristoph Hellwig .init_request = nvme_pci_init_request, 172757dacad5SJay Sternberg .timeout = nvme_timeout, 172857dacad5SJay Sternberg }; 172957dacad5SJay Sternberg 1730f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 1731376f7ef8SChristoph Hellwig .queue_rq = nvme_queue_rq, 1732d62cbcf6SJens Axboe .queue_rqs = nvme_queue_rqs, 1733376f7ef8SChristoph Hellwig .complete = nvme_pci_complete_rq, 1734376f7ef8SChristoph Hellwig .commit_rqs = nvme_commit_rqs, 1735376f7ef8SChristoph Hellwig .init_hctx = nvme_init_hctx, 1736e559398fSChristoph Hellwig .init_request = nvme_pci_init_request, 1737376f7ef8SChristoph Hellwig .map_queues = nvme_pci_map_queues, 1738376f7ef8SChristoph Hellwig .timeout = nvme_timeout, 1739c6d962aeSChristoph Hellwig .poll = nvme_poll, 1740dabcefabSJens Axboe }; 1741dabcefabSJens Axboe 174257dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 174357dacad5SJay Sternberg { 17441c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 174569d9a99cSKeith Busch /* 174669d9a99cSKeith Busch * If the controller was reset during removal, it's possible 174769d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 174869d9a99cSKeith Busch * queue to flush these to completion. 174969d9a99cSKeith Busch */ 17506ca1d902SMing Lei nvme_start_admin_queue(&dev->ctrl); 17516f8191fdSChristoph Hellwig blk_mq_destroy_queue(dev->ctrl.admin_q); 175257dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 175357dacad5SJay Sternberg } 175457dacad5SJay Sternberg } 175557dacad5SJay Sternberg 1756f91b727cSChristoph Hellwig static int nvme_pci_alloc_admin_tag_set(struct nvme_dev *dev) 175757dacad5SJay Sternberg { 1758f91b727cSChristoph Hellwig struct blk_mq_tag_set *set = &dev->admin_tagset; 1759e3e9d50cSKeith Busch 1760f91b727cSChristoph Hellwig set->ops = &nvme_mq_admin_ops; 1761f91b727cSChristoph Hellwig set->nr_hw_queues = 1; 176257dacad5SJay Sternberg 1763f91b727cSChristoph Hellwig set->queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1764f91b727cSChristoph Hellwig set->timeout = NVME_ADMIN_TIMEOUT; 1765f91b727cSChristoph Hellwig set->numa_node = dev->ctrl.numa_node; 1766f91b727cSChristoph Hellwig set->cmd_size = sizeof(struct nvme_iod); 1767f91b727cSChristoph Hellwig set->flags = BLK_MQ_F_NO_SCHED; 1768f91b727cSChristoph Hellwig set->driver_data = dev; 1769f91b727cSChristoph Hellwig 1770f91b727cSChristoph Hellwig if (blk_mq_alloc_tag_set(set)) 177157dacad5SJay Sternberg return -ENOMEM; 1772f91b727cSChristoph Hellwig dev->ctrl.admin_tagset = set; 177357dacad5SJay Sternberg 1774f91b727cSChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(set); 17751c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 1776f91b727cSChristoph Hellwig blk_mq_free_tag_set(set); 1777da427611SSmith, Kyle Miller (Nimble Kernel) dev->ctrl.admin_q = NULL; 177857dacad5SJay Sternberg return -ENOMEM; 177957dacad5SJay Sternberg } 178057dacad5SJay Sternberg return 0; 178157dacad5SJay Sternberg } 178257dacad5SJay Sternberg 178397f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 178497f6ef64SXu Yu { 178597f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 178697f6ef64SXu Yu } 178797f6ef64SXu Yu 178897f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 178997f6ef64SXu Yu { 179097f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 179197f6ef64SXu Yu 179297f6ef64SXu Yu if (size <= dev->bar_mapped_size) 179397f6ef64SXu Yu return 0; 179497f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 179597f6ef64SXu Yu return -ENOMEM; 179697f6ef64SXu Yu if (dev->bar) 179797f6ef64SXu Yu iounmap(dev->bar); 179897f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 179997f6ef64SXu Yu if (!dev->bar) { 180097f6ef64SXu Yu dev->bar_mapped_size = 0; 180197f6ef64SXu Yu return -ENOMEM; 180297f6ef64SXu Yu } 180397f6ef64SXu Yu dev->bar_mapped_size = size; 180497f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 180597f6ef64SXu Yu 180697f6ef64SXu Yu return 0; 180797f6ef64SXu Yu } 180897f6ef64SXu Yu 180901ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 181057dacad5SJay Sternberg { 181157dacad5SJay Sternberg int result; 181257dacad5SJay Sternberg u32 aqa; 181357dacad5SJay Sternberg struct nvme_queue *nvmeq; 181457dacad5SJay Sternberg 181597f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 181697f6ef64SXu Yu if (result < 0) 181797f6ef64SXu Yu return result; 181897f6ef64SXu Yu 18198ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 182020d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 182157dacad5SJay Sternberg 18227a67cbeaSChristoph Hellwig if (dev->subsystem && 18237a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 18247a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 182557dacad5SJay Sternberg 1826b5b05048SSagi Grimberg result = nvme_disable_ctrl(&dev->ctrl); 182757dacad5SJay Sternberg if (result < 0) 182857dacad5SJay Sternberg return result; 182957dacad5SJay Sternberg 1830a6ff7262SKeith Busch result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1831147b27e4SSagi Grimberg if (result) 1832147b27e4SSagi Grimberg return result; 183357dacad5SJay Sternberg 1834635333e4SMax Gurtovoy dev->ctrl.numa_node = dev_to_node(dev->dev); 1835635333e4SMax Gurtovoy 1836147b27e4SSagi Grimberg nvmeq = &dev->queues[0]; 183757dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 183857dacad5SJay Sternberg aqa |= aqa << 16; 183957dacad5SJay Sternberg 18407a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 18417a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 18427a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 184357dacad5SJay Sternberg 1844c0f2f45bSSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl); 184557dacad5SJay Sternberg if (result) 1846d4875622SKeith Busch return result; 184757dacad5SJay Sternberg 184857dacad5SJay Sternberg nvmeq->cq_vector = 0; 1849161b8be2SKeith Busch nvme_init_queue(nvmeq, 0); 1850dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 185157dacad5SJay Sternberg if (result) { 18527c349ddeSKeith Busch dev->online_queues--; 1853d4875622SKeith Busch return result; 185457dacad5SJay Sternberg } 185557dacad5SJay Sternberg 18564e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 185757dacad5SJay Sternberg return result; 185857dacad5SJay Sternberg } 185957dacad5SJay Sternberg 1860749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 186157dacad5SJay Sternberg { 18624b04cc6aSJens Axboe unsigned i, max, rw_queues; 1863749941f2SChristoph Hellwig int ret = 0; 186457dacad5SJay Sternberg 1865d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1866a6ff7262SKeith Busch if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1867749941f2SChristoph Hellwig ret = -ENOMEM; 186857dacad5SJay Sternberg break; 1869749941f2SChristoph Hellwig } 1870749941f2SChristoph Hellwig } 187157dacad5SJay Sternberg 1872d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1873e20ba6e1SChristoph Hellwig if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1874e20ba6e1SChristoph Hellwig rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1875e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ]; 18764b04cc6aSJens Axboe } else { 18774b04cc6aSJens Axboe rw_queues = max; 18784b04cc6aSJens Axboe } 18794b04cc6aSJens Axboe 1880949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 18814b04cc6aSJens Axboe bool polled = i > rw_queues; 18824b04cc6aSJens Axboe 18834b04cc6aSJens Axboe ret = nvme_create_queue(&dev->queues[i], i, polled); 1884d4875622SKeith Busch if (ret) 188557dacad5SJay Sternberg break; 188657dacad5SJay Sternberg } 188757dacad5SJay Sternberg 1888749941f2SChristoph Hellwig /* 1889749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 18908adb8c14SMinwoo Im * than the desired amount of queues, and even a controller without 18918adb8c14SMinwoo Im * I/O queues can still be used to issue admin commands. This might 1892749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1893749941f2SChristoph Hellwig */ 1894749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 189557dacad5SJay Sternberg } 189657dacad5SJay Sternberg 189788de4598SChristoph Hellwig static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 189857dacad5SJay Sternberg { 189988de4598SChristoph Hellwig u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 190088de4598SChristoph Hellwig 190188de4598SChristoph Hellwig return 1ULL << (12 + 4 * szu); 190288de4598SChristoph Hellwig } 190388de4598SChristoph Hellwig 190488de4598SChristoph Hellwig static u32 nvme_cmb_size(struct nvme_dev *dev) 190588de4598SChristoph Hellwig { 190688de4598SChristoph Hellwig return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 190788de4598SChristoph Hellwig } 190888de4598SChristoph Hellwig 1909f65efd6dSChristoph Hellwig static void nvme_map_cmb(struct nvme_dev *dev) 191057dacad5SJay Sternberg { 191188de4598SChristoph Hellwig u64 size, offset; 191257dacad5SJay Sternberg resource_size_t bar_size; 191357dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 19148969f1f8SChristoph Hellwig int bar; 191557dacad5SJay Sternberg 19169fe5c59fSKeith Busch if (dev->cmb_size) 19179fe5c59fSKeith Busch return; 19189fe5c59fSKeith Busch 191920d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) 192020d3bb92SKlaus Jensen writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 192120d3bb92SKlaus Jensen 19227a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1923f65efd6dSChristoph Hellwig if (!dev->cmbsz) 1924f65efd6dSChristoph Hellwig return; 1925202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 192657dacad5SJay Sternberg 192788de4598SChristoph Hellwig size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 192888de4598SChristoph Hellwig offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 19298969f1f8SChristoph Hellwig bar = NVME_CMB_BIR(dev->cmbloc); 19308969f1f8SChristoph Hellwig bar_size = pci_resource_len(pdev, bar); 193157dacad5SJay Sternberg 193257dacad5SJay Sternberg if (offset > bar_size) 1933f65efd6dSChristoph Hellwig return; 193457dacad5SJay Sternberg 193557dacad5SJay Sternberg /* 193620d3bb92SKlaus Jensen * Tell the controller about the host side address mapping the CMB, 193720d3bb92SKlaus Jensen * and enable CMB decoding for the NVMe 1.4+ scheme: 193820d3bb92SKlaus Jensen */ 193920d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) { 194020d3bb92SKlaus Jensen hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 194120d3bb92SKlaus Jensen (pci_bus_address(pdev, bar) + offset), 194220d3bb92SKlaus Jensen dev->bar + NVME_REG_CMBMSC); 194320d3bb92SKlaus Jensen } 194420d3bb92SKlaus Jensen 194520d3bb92SKlaus Jensen /* 194657dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 194757dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 194857dacad5SJay Sternberg * the reported size of the BAR 194957dacad5SJay Sternberg */ 195057dacad5SJay Sternberg if (size > bar_size - offset) 195157dacad5SJay Sternberg size = bar_size - offset; 195257dacad5SJay Sternberg 19530f238ff5SLogan Gunthorpe if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 19540f238ff5SLogan Gunthorpe dev_warn(dev->ctrl.device, 19550f238ff5SLogan Gunthorpe "failed to register the CMB\n"); 1956f65efd6dSChristoph Hellwig return; 19570f238ff5SLogan Gunthorpe } 19580f238ff5SLogan Gunthorpe 195957dacad5SJay Sternberg dev->cmb_size = size; 19600f238ff5SLogan Gunthorpe dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 19610f238ff5SLogan Gunthorpe 19620f238ff5SLogan Gunthorpe if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 19630f238ff5SLogan Gunthorpe (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 19640f238ff5SLogan Gunthorpe pci_p2pmem_publish(pdev, true); 196557dacad5SJay Sternberg } 196657dacad5SJay Sternberg 196787ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 196857dacad5SJay Sternberg { 19696c3c05b0SChaitanya Kulkarni u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 19704033f35dSChristoph Hellwig u64 dma_addr = dev->host_mem_descs_dma; 1971f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 197287ad72a5SChristoph Hellwig int ret; 197387ad72a5SChristoph Hellwig 197487ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 197587ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 197687ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 19776c3c05b0SChaitanya Kulkarni c.features.dword12 = cpu_to_le32(host_mem_size); 197887ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 197987ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 198087ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 198187ad72a5SChristoph Hellwig 198287ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 198387ad72a5SChristoph Hellwig if (ret) { 198487ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 198587ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 198687ad72a5SChristoph Hellwig ret, bits); 1987a5df5e79SKeith Busch } else 1988a5df5e79SKeith Busch dev->hmb = bits & NVME_HOST_MEM_ENABLE; 1989a5df5e79SKeith Busch 199087ad72a5SChristoph Hellwig return ret; 199187ad72a5SChristoph Hellwig } 199287ad72a5SChristoph Hellwig 199387ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 199487ad72a5SChristoph Hellwig { 199587ad72a5SChristoph Hellwig int i; 199687ad72a5SChristoph Hellwig 199787ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 199887ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 19996c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 200087ad72a5SChristoph Hellwig 2001cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 2002cc667f6dSLiviu Dudau le64_to_cpu(desc->addr), 2003cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 200487ad72a5SChristoph Hellwig } 200587ad72a5SChristoph Hellwig 200687ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 200787ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 20084033f35dSChristoph Hellwig dma_free_coherent(dev->dev, 20094033f35dSChristoph Hellwig dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 20104033f35dSChristoph Hellwig dev->host_mem_descs, dev->host_mem_descs_dma); 201187ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 20127e5dd57eSMinwoo Im dev->nr_host_mem_descs = 0; 201387ad72a5SChristoph Hellwig } 201487ad72a5SChristoph Hellwig 201592dc6895SChristoph Hellwig static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 201692dc6895SChristoph Hellwig u32 chunk_size) 201787ad72a5SChristoph Hellwig { 201887ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 201992dc6895SChristoph Hellwig u32 max_entries, len; 20204033f35dSChristoph Hellwig dma_addr_t descs_dma; 20212ee0e4edSDan Carpenter int i = 0; 202287ad72a5SChristoph Hellwig void **bufs; 20236fbcde66SMinwoo Im u64 size, tmp; 202487ad72a5SChristoph Hellwig 202587ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 202687ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 202787ad72a5SChristoph Hellwig max_entries = tmp; 2028044a9df1SChristoph Hellwig 2029044a9df1SChristoph Hellwig if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 2030044a9df1SChristoph Hellwig max_entries = dev->ctrl.hmmaxd; 2031044a9df1SChristoph Hellwig 2032750afb08SLuis Chamberlain descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 20334033f35dSChristoph Hellwig &descs_dma, GFP_KERNEL); 203487ad72a5SChristoph Hellwig if (!descs) 203587ad72a5SChristoph Hellwig goto out; 203687ad72a5SChristoph Hellwig 203787ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 203887ad72a5SChristoph Hellwig if (!bufs) 203987ad72a5SChristoph Hellwig goto out_free_descs; 204087ad72a5SChristoph Hellwig 2041244a8fe4SMinwoo Im for (size = 0; size < preferred && i < max_entries; size += len) { 204287ad72a5SChristoph Hellwig dma_addr_t dma_addr; 204387ad72a5SChristoph Hellwig 204450cdb7c6SChristoph Hellwig len = min_t(u64, chunk_size, preferred - size); 204587ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 204687ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 204787ad72a5SChristoph Hellwig if (!bufs[i]) 204887ad72a5SChristoph Hellwig break; 204987ad72a5SChristoph Hellwig 205087ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 20516c3c05b0SChaitanya Kulkarni descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 205287ad72a5SChristoph Hellwig i++; 205387ad72a5SChristoph Hellwig } 205487ad72a5SChristoph Hellwig 205592dc6895SChristoph Hellwig if (!size) 205687ad72a5SChristoph Hellwig goto out_free_bufs; 205787ad72a5SChristoph Hellwig 205887ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 205987ad72a5SChristoph Hellwig dev->host_mem_size = size; 206087ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 20614033f35dSChristoph Hellwig dev->host_mem_descs_dma = descs_dma; 206287ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 206387ad72a5SChristoph Hellwig return 0; 206487ad72a5SChristoph Hellwig 206587ad72a5SChristoph Hellwig out_free_bufs: 206687ad72a5SChristoph Hellwig while (--i >= 0) { 20676c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; 206887ad72a5SChristoph Hellwig 2069cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, bufs[i], 2070cc667f6dSLiviu Dudau le64_to_cpu(descs[i].addr), 2071cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 207287ad72a5SChristoph Hellwig } 207387ad72a5SChristoph Hellwig 207487ad72a5SChristoph Hellwig kfree(bufs); 207587ad72a5SChristoph Hellwig out_free_descs: 20764033f35dSChristoph Hellwig dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 20774033f35dSChristoph Hellwig descs_dma); 207887ad72a5SChristoph Hellwig out: 207987ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 208087ad72a5SChristoph Hellwig return -ENOMEM; 208187ad72a5SChristoph Hellwig } 208287ad72a5SChristoph Hellwig 208392dc6895SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 208492dc6895SChristoph Hellwig { 20859dc54a0dSChaitanya Kulkarni u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 20869dc54a0dSChaitanya Kulkarni u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 20879dc54a0dSChaitanya Kulkarni u64 chunk_size; 208892dc6895SChristoph Hellwig 208992dc6895SChristoph Hellwig /* start big and work our way down */ 20909dc54a0dSChaitanya Kulkarni for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 209192dc6895SChristoph Hellwig if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 209292dc6895SChristoph Hellwig if (!min || dev->host_mem_size >= min) 209392dc6895SChristoph Hellwig return 0; 209492dc6895SChristoph Hellwig nvme_free_host_mem(dev); 209592dc6895SChristoph Hellwig } 209692dc6895SChristoph Hellwig } 209792dc6895SChristoph Hellwig 209892dc6895SChristoph Hellwig return -ENOMEM; 209992dc6895SChristoph Hellwig } 210092dc6895SChristoph Hellwig 21019620cfbaSChristoph Hellwig static int nvme_setup_host_mem(struct nvme_dev *dev) 210287ad72a5SChristoph Hellwig { 210387ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 210487ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 210587ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 210687ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 21076fbcde66SMinwoo Im int ret; 210887ad72a5SChristoph Hellwig 210987ad72a5SChristoph Hellwig preferred = min(preferred, max); 211087ad72a5SChristoph Hellwig if (min > max) { 211187ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 211287ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 211387ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 211487ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 21159620cfbaSChristoph Hellwig return 0; 211687ad72a5SChristoph Hellwig } 211787ad72a5SChristoph Hellwig 211887ad72a5SChristoph Hellwig /* 211987ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 212087ad72a5SChristoph Hellwig */ 212187ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 212287ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 212387ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 212487ad72a5SChristoph Hellwig else 212587ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 212687ad72a5SChristoph Hellwig } 212787ad72a5SChristoph Hellwig 212887ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 212992dc6895SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) { 213092dc6895SChristoph Hellwig dev_warn(dev->ctrl.device, 213192dc6895SChristoph Hellwig "failed to allocate host memory buffer.\n"); 21329620cfbaSChristoph Hellwig return 0; /* controller must work without HMB */ 213387ad72a5SChristoph Hellwig } 213487ad72a5SChristoph Hellwig 213592dc6895SChristoph Hellwig dev_info(dev->ctrl.device, 213692dc6895SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 213792dc6895SChristoph Hellwig dev->host_mem_size >> ilog2(SZ_1M)); 213892dc6895SChristoph Hellwig } 213992dc6895SChristoph Hellwig 21409620cfbaSChristoph Hellwig ret = nvme_set_host_mem(dev, enable_bits); 21419620cfbaSChristoph Hellwig if (ret) 214287ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 21439620cfbaSChristoph Hellwig return ret; 214457dacad5SJay Sternberg } 214557dacad5SJay Sternberg 21460521905eSKeith Busch static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, 21470521905eSKeith Busch char *buf) 21480521905eSKeith Busch { 21490521905eSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 21500521905eSKeith Busch 21510521905eSKeith Busch return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", 21520521905eSKeith Busch ndev->cmbloc, ndev->cmbsz); 21530521905eSKeith Busch } 21540521905eSKeith Busch static DEVICE_ATTR_RO(cmb); 21550521905eSKeith Busch 21561751e97aSKeith Busch static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, 21571751e97aSKeith Busch char *buf) 21581751e97aSKeith Busch { 21591751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 21601751e97aSKeith Busch 21611751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbloc); 21621751e97aSKeith Busch } 21631751e97aSKeith Busch static DEVICE_ATTR_RO(cmbloc); 21641751e97aSKeith Busch 21651751e97aSKeith Busch static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, 21661751e97aSKeith Busch char *buf) 21671751e97aSKeith Busch { 21681751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 21691751e97aSKeith Busch 21701751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbsz); 21711751e97aSKeith Busch } 21721751e97aSKeith Busch static DEVICE_ATTR_RO(cmbsz); 21731751e97aSKeith Busch 2174a5df5e79SKeith Busch static ssize_t hmb_show(struct device *dev, struct device_attribute *attr, 2175a5df5e79SKeith Busch char *buf) 2176a5df5e79SKeith Busch { 2177a5df5e79SKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2178a5df5e79SKeith Busch 2179a5df5e79SKeith Busch return sysfs_emit(buf, "%d\n", ndev->hmb); 2180a5df5e79SKeith Busch } 2181a5df5e79SKeith Busch 2182a5df5e79SKeith Busch static ssize_t hmb_store(struct device *dev, struct device_attribute *attr, 2183a5df5e79SKeith Busch const char *buf, size_t count) 2184a5df5e79SKeith Busch { 2185a5df5e79SKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 2186a5df5e79SKeith Busch bool new; 2187a5df5e79SKeith Busch int ret; 2188a5df5e79SKeith Busch 2189a5df5e79SKeith Busch if (strtobool(buf, &new) < 0) 2190a5df5e79SKeith Busch return -EINVAL; 2191a5df5e79SKeith Busch 2192a5df5e79SKeith Busch if (new == ndev->hmb) 2193a5df5e79SKeith Busch return count; 2194a5df5e79SKeith Busch 2195a5df5e79SKeith Busch if (new) { 2196a5df5e79SKeith Busch ret = nvme_setup_host_mem(ndev); 2197a5df5e79SKeith Busch } else { 2198a5df5e79SKeith Busch ret = nvme_set_host_mem(ndev, 0); 2199a5df5e79SKeith Busch if (!ret) 2200a5df5e79SKeith Busch nvme_free_host_mem(ndev); 2201a5df5e79SKeith Busch } 2202a5df5e79SKeith Busch 2203a5df5e79SKeith Busch if (ret < 0) 2204a5df5e79SKeith Busch return ret; 2205a5df5e79SKeith Busch 2206a5df5e79SKeith Busch return count; 2207a5df5e79SKeith Busch } 2208a5df5e79SKeith Busch static DEVICE_ATTR_RW(hmb); 2209a5df5e79SKeith Busch 22100521905eSKeith Busch static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, 22110521905eSKeith Busch struct attribute *a, int n) 22120521905eSKeith Busch { 22130521905eSKeith Busch struct nvme_ctrl *ctrl = 22140521905eSKeith Busch dev_get_drvdata(container_of(kobj, struct device, kobj)); 22150521905eSKeith Busch struct nvme_dev *dev = to_nvme_dev(ctrl); 22160521905eSKeith Busch 22171751e97aSKeith Busch if (a == &dev_attr_cmb.attr || 22181751e97aSKeith Busch a == &dev_attr_cmbloc.attr || 22191751e97aSKeith Busch a == &dev_attr_cmbsz.attr) { 22201751e97aSKeith Busch if (!dev->cmbsz) 22210521905eSKeith Busch return 0; 22221751e97aSKeith Busch } 2223a5df5e79SKeith Busch if (a == &dev_attr_hmb.attr && !ctrl->hmpre) 2224a5df5e79SKeith Busch return 0; 2225a5df5e79SKeith Busch 22260521905eSKeith Busch return a->mode; 22270521905eSKeith Busch } 22280521905eSKeith Busch 22290521905eSKeith Busch static struct attribute *nvme_pci_attrs[] = { 22300521905eSKeith Busch &dev_attr_cmb.attr, 22311751e97aSKeith Busch &dev_attr_cmbloc.attr, 22321751e97aSKeith Busch &dev_attr_cmbsz.attr, 2233a5df5e79SKeith Busch &dev_attr_hmb.attr, 22340521905eSKeith Busch NULL, 22350521905eSKeith Busch }; 22360521905eSKeith Busch 22370521905eSKeith Busch static const struct attribute_group nvme_pci_attr_group = { 22380521905eSKeith Busch .attrs = nvme_pci_attrs, 22390521905eSKeith Busch .is_visible = nvme_pci_attrs_are_visible, 22400521905eSKeith Busch }; 22410521905eSKeith Busch 2242612b7286SMing Lei /* 2243612b7286SMing Lei * nirqs is the number of interrupts available for write and read 2244612b7286SMing Lei * queues. The core already reserved an interrupt for the admin queue. 2245612b7286SMing Lei */ 2246612b7286SMing Lei static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 22473b6592f7SJens Axboe { 2248612b7286SMing Lei struct nvme_dev *dev = affd->priv; 22492a5bcfddSWeiping Zhang unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2250c45b1fa2SMing Lei 22513b6592f7SJens Axboe /* 2252ee0d96d3SBaolin Wang * If there is no interrupt available for queues, ensure that 2253612b7286SMing Lei * the default queue is set to 1. The affinity set size is 2254612b7286SMing Lei * also set to one, but the irq core ignores it for this case. 2255612b7286SMing Lei * 2256612b7286SMing Lei * If only one interrupt is available or 'write_queue' == 0, combine 2257612b7286SMing Lei * write and read queues. 2258612b7286SMing Lei * 2259612b7286SMing Lei * If 'write_queues' > 0, ensure it leaves room for at least one read 2260612b7286SMing Lei * queue. 22613b6592f7SJens Axboe */ 2262612b7286SMing Lei if (!nrirqs) { 2263612b7286SMing Lei nrirqs = 1; 2264612b7286SMing Lei nr_read_queues = 0; 22652a5bcfddSWeiping Zhang } else if (nrirqs == 1 || !nr_write_queues) { 2266612b7286SMing Lei nr_read_queues = 0; 22672a5bcfddSWeiping Zhang } else if (nr_write_queues >= nrirqs) { 2268612b7286SMing Lei nr_read_queues = 1; 22693b6592f7SJens Axboe } else { 22702a5bcfddSWeiping Zhang nr_read_queues = nrirqs - nr_write_queues; 22713b6592f7SJens Axboe } 2272612b7286SMing Lei 2273612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2274612b7286SMing Lei affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2275612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2276612b7286SMing Lei affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2277612b7286SMing Lei affd->nr_sets = nr_read_queues ? 2 : 1; 22783b6592f7SJens Axboe } 22793b6592f7SJens Axboe 22806451fe73SJens Axboe static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 22813b6592f7SJens Axboe { 22823b6592f7SJens Axboe struct pci_dev *pdev = to_pci_dev(dev->dev); 22833b6592f7SJens Axboe struct irq_affinity affd = { 22843b6592f7SJens Axboe .pre_vectors = 1, 2285612b7286SMing Lei .calc_sets = nvme_calc_irq_sets, 2286612b7286SMing Lei .priv = dev, 22873b6592f7SJens Axboe }; 228821cc2f3fSJeffle Xu unsigned int irq_queues, poll_queues; 22896451fe73SJens Axboe 22906451fe73SJens Axboe /* 229121cc2f3fSJeffle Xu * Poll queues don't need interrupts, but we need at least one I/O queue 229221cc2f3fSJeffle Xu * left over for non-polled I/O. 22936451fe73SJens Axboe */ 229421cc2f3fSJeffle Xu poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 229521cc2f3fSJeffle Xu dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 22963b6592f7SJens Axboe 229721cc2f3fSJeffle Xu /* 229821cc2f3fSJeffle Xu * Initialize for the single interrupt case, will be updated in 229921cc2f3fSJeffle Xu * nvme_calc_irq_sets(). 230021cc2f3fSJeffle Xu */ 2301612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2302612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = 0; 23033b6592f7SJens Axboe 230466341331SBenjamin Herrenschmidt /* 230521cc2f3fSJeffle Xu * We need interrupts for the admin queue and each non-polled I/O queue, 230621cc2f3fSJeffle Xu * but some Apple controllers require all queues to use the first 230721cc2f3fSJeffle Xu * vector. 230866341331SBenjamin Herrenschmidt */ 230966341331SBenjamin Herrenschmidt irq_queues = 1; 231021cc2f3fSJeffle Xu if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 231121cc2f3fSJeffle Xu irq_queues += (nr_io_queues - poll_queues); 2312612b7286SMing Lei return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 23133b6592f7SJens Axboe PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 23143b6592f7SJens Axboe } 23153b6592f7SJens Axboe 23168fae268bSKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev) 23178fae268bSKeith Busch { 23188fae268bSKeith Busch if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 23198fae268bSKeith Busch __nvme_disable_io_queues(dev, nvme_admin_delete_cq); 23208fae268bSKeith Busch } 23218fae268bSKeith Busch 23222a5bcfddSWeiping Zhang static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 23232a5bcfddSWeiping Zhang { 2324e3aef095SNiklas Schnelle /* 2325e3aef095SNiklas Schnelle * If tags are shared with admin queue (Apple bug), then 2326e3aef095SNiklas Schnelle * make sure we only use one IO queue. 2327e3aef095SNiklas Schnelle */ 2328e3aef095SNiklas Schnelle if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2329e3aef095SNiklas Schnelle return 1; 23302a5bcfddSWeiping Zhang return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 23312a5bcfddSWeiping Zhang } 23322a5bcfddSWeiping Zhang 233357dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 233457dacad5SJay Sternberg { 2335147b27e4SSagi Grimberg struct nvme_queue *adminq = &dev->queues[0]; 233657dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 23372a5bcfddSWeiping Zhang unsigned int nr_io_queues; 233897f6ef64SXu Yu unsigned long size; 23392a5bcfddSWeiping Zhang int result; 234057dacad5SJay Sternberg 23412a5bcfddSWeiping Zhang /* 23422a5bcfddSWeiping Zhang * Sample the module parameters once at reset time so that we have 23432a5bcfddSWeiping Zhang * stable values to work with. 23442a5bcfddSWeiping Zhang */ 23452a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 23462a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 2347d38e9f04SBenjamin Herrenschmidt 2348ff4e5fbaSNiklas Schnelle nr_io_queues = dev->nr_allocated_queues - 1; 23499a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 23509a0be7abSChristoph Hellwig if (result < 0) 235157dacad5SJay Sternberg return result; 23529a0be7abSChristoph Hellwig 2353f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 2354a5229050SKeith Busch return 0; 235557dacad5SJay Sternberg 2356e4b9852aSCasey Chen /* 2357e4b9852aSCasey Chen * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2358e4b9852aSCasey Chen * from set to unset. If there is a window to it is truely freed, 2359e4b9852aSCasey Chen * pci_free_irq_vectors() jumping into this window will crash. 2360e4b9852aSCasey Chen * And take lock to avoid racing with pci_free_irq_vectors() in 2361e4b9852aSCasey Chen * nvme_dev_disable() path. 2362e4b9852aSCasey Chen */ 2363e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2364e4b9852aSCasey Chen if (result) 2365e4b9852aSCasey Chen return result; 2366e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2367e4b9852aSCasey Chen pci_free_irq(pdev, 0, adminq); 23684e224106SChristoph Hellwig 23690f238ff5SLogan Gunthorpe if (dev->cmb_use_sqes) { 237057dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 237157dacad5SJay Sternberg sizeof(struct nvme_command)); 237257dacad5SJay Sternberg if (result > 0) 237357dacad5SJay Sternberg dev->q_depth = result; 237457dacad5SJay Sternberg else 23750f238ff5SLogan Gunthorpe dev->cmb_use_sqes = false; 237657dacad5SJay Sternberg } 237757dacad5SJay Sternberg 237857dacad5SJay Sternberg do { 237997f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 238097f6ef64SXu Yu result = nvme_remap_bar(dev, size); 238197f6ef64SXu Yu if (!result) 238257dacad5SJay Sternberg break; 2383e4b9852aSCasey Chen if (!--nr_io_queues) { 2384e4b9852aSCasey Chen result = -ENOMEM; 2385e4b9852aSCasey Chen goto out_unlock; 2386e4b9852aSCasey Chen } 238757dacad5SJay Sternberg } while (1); 238857dacad5SJay Sternberg adminq->q_db = dev->dbs; 238957dacad5SJay Sternberg 23908fae268bSKeith Busch retry: 239157dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 2392e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 23930ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 239457dacad5SJay Sternberg 239557dacad5SJay Sternberg /* 239657dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 239757dacad5SJay Sternberg * setting up the full range we need. 239857dacad5SJay Sternberg */ 2399dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 24003b6592f7SJens Axboe 24013b6592f7SJens Axboe result = nvme_setup_irqs(dev, nr_io_queues); 2402e4b9852aSCasey Chen if (result <= 0) { 2403e4b9852aSCasey Chen result = -EIO; 2404e4b9852aSCasey Chen goto out_unlock; 2405e4b9852aSCasey Chen } 24063b6592f7SJens Axboe 240722b55601SKeith Busch dev->num_vecs = result; 24084b04cc6aSJens Axboe result = max(result - 1, 1); 2409e20ba6e1SChristoph Hellwig dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 241057dacad5SJay Sternberg 241157dacad5SJay Sternberg /* 241257dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 241357dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 241457dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 241557dacad5SJay Sternberg * number of interrupts. 241657dacad5SJay Sternberg */ 2417dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 24187c349ddeSKeith Busch if (result) 2419e4b9852aSCasey Chen goto out_unlock; 24204e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &adminq->flags); 2421e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 24228fae268bSKeith Busch 24238fae268bSKeith Busch result = nvme_create_io_queues(dev); 24248fae268bSKeith Busch if (result || dev->online_queues < 2) 24258fae268bSKeith Busch return result; 24268fae268bSKeith Busch 24278fae268bSKeith Busch if (dev->online_queues - 1 < dev->max_qid) { 24288fae268bSKeith Busch nr_io_queues = dev->online_queues - 1; 24298fae268bSKeith Busch nvme_disable_io_queues(dev); 2430e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2431e4b9852aSCasey Chen if (result) 2432e4b9852aSCasey Chen return result; 24338fae268bSKeith Busch nvme_suspend_io_queues(dev); 24348fae268bSKeith Busch goto retry; 24358fae268bSKeith Busch } 24368fae268bSKeith Busch dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 24378fae268bSKeith Busch dev->io_queues[HCTX_TYPE_DEFAULT], 24388fae268bSKeith Busch dev->io_queues[HCTX_TYPE_READ], 24398fae268bSKeith Busch dev->io_queues[HCTX_TYPE_POLL]); 24408fae268bSKeith Busch return 0; 2441e4b9852aSCasey Chen out_unlock: 2442e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 2443e4b9852aSCasey Chen return result; 244457dacad5SJay Sternberg } 244557dacad5SJay Sternberg 2446de671d61SJens Axboe static enum rq_end_io_ret nvme_del_queue_end(struct request *req, 2447de671d61SJens Axboe blk_status_t error) 2448db3cbfffSKeith Busch { 2449db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2450db3cbfffSKeith Busch 2451db3cbfffSKeith Busch blk_mq_free_request(req); 2452d1ed6aa1SChristoph Hellwig complete(&nvmeq->delete_done); 2453de671d61SJens Axboe return RQ_END_IO_NONE; 2454db3cbfffSKeith Busch } 2455db3cbfffSKeith Busch 2456de671d61SJens Axboe static enum rq_end_io_ret nvme_del_cq_end(struct request *req, 2457de671d61SJens Axboe blk_status_t error) 2458db3cbfffSKeith Busch { 2459db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2460db3cbfffSKeith Busch 2461d1ed6aa1SChristoph Hellwig if (error) 2462d1ed6aa1SChristoph Hellwig set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2463db3cbfffSKeith Busch 2464de671d61SJens Axboe return nvme_del_queue_end(req, error); 2465db3cbfffSKeith Busch } 2466db3cbfffSKeith Busch 2467db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2468db3cbfffSKeith Busch { 2469db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2470db3cbfffSKeith Busch struct request *req; 2471f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 2472db3cbfffSKeith Busch 2473db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 2474db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2475db3cbfffSKeith Busch 2476e559398fSChristoph Hellwig req = blk_mq_alloc_request(q, nvme_req_op(&cmd), BLK_MQ_REQ_NOWAIT); 2477db3cbfffSKeith Busch if (IS_ERR(req)) 2478db3cbfffSKeith Busch return PTR_ERR(req); 2479e559398fSChristoph Hellwig nvme_init_request(req, &cmd); 2480db3cbfffSKeith Busch 2481e2e53086SChristoph Hellwig if (opcode == nvme_admin_delete_cq) 2482e2e53086SChristoph Hellwig req->end_io = nvme_del_cq_end; 2483e2e53086SChristoph Hellwig else 2484e2e53086SChristoph Hellwig req->end_io = nvme_del_queue_end; 2485db3cbfffSKeith Busch req->end_io_data = nvmeq; 2486db3cbfffSKeith Busch 2487d1ed6aa1SChristoph Hellwig init_completion(&nvmeq->delete_done); 2488128126a7SChaitanya Kulkarni req->rq_flags |= RQF_QUIET; 2489e2e53086SChristoph Hellwig blk_execute_rq_nowait(req, false); 2490db3cbfffSKeith Busch return 0; 2491db3cbfffSKeith Busch } 2492db3cbfffSKeith Busch 24938fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2494db3cbfffSKeith Busch { 24955271edd4SChristoph Hellwig int nr_queues = dev->online_queues - 1, sent = 0; 2496db3cbfffSKeith Busch unsigned long timeout; 2497db3cbfffSKeith Busch 2498db3cbfffSKeith Busch retry: 2499dc96f938SChaitanya Kulkarni timeout = NVME_ADMIN_TIMEOUT; 25005271edd4SChristoph Hellwig while (nr_queues > 0) { 25015271edd4SChristoph Hellwig if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2502db3cbfffSKeith Busch break; 25035271edd4SChristoph Hellwig nr_queues--; 25045271edd4SChristoph Hellwig sent++; 25055271edd4SChristoph Hellwig } 2506d1ed6aa1SChristoph Hellwig while (sent) { 2507d1ed6aa1SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2508d1ed6aa1SChristoph Hellwig 2509d1ed6aa1SChristoph Hellwig timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 25105271edd4SChristoph Hellwig timeout); 2511db3cbfffSKeith Busch if (timeout == 0) 25125271edd4SChristoph Hellwig return false; 2513d1ed6aa1SChristoph Hellwig 2514d1ed6aa1SChristoph Hellwig sent--; 25155271edd4SChristoph Hellwig if (nr_queues) 2516db3cbfffSKeith Busch goto retry; 2517db3cbfffSKeith Busch } 25185271edd4SChristoph Hellwig return true; 2519db3cbfffSKeith Busch } 2520db3cbfffSKeith Busch 25212455a4b7SChristoph Hellwig static void nvme_pci_alloc_tag_set(struct nvme_dev *dev) 252257dacad5SJay Sternberg { 25232455a4b7SChristoph Hellwig struct blk_mq_tag_set * set = &dev->tagset; 25242b1b7e78SJianchao Wang int ret; 25252b1b7e78SJianchao Wang 25262455a4b7SChristoph Hellwig set->ops = &nvme_mq_ops; 25272455a4b7SChristoph Hellwig set->nr_hw_queues = dev->online_queues - 1; 25286ee742faSKeith Busch set->nr_maps = 1; 25296ee742faSKeith Busch if (dev->io_queues[HCTX_TYPE_READ]) 25306ee742faSKeith Busch set->nr_maps = 2; 2531ed92ad37SChristoph Hellwig if (dev->io_queues[HCTX_TYPE_POLL]) 25326ee742faSKeith Busch set->nr_maps = 3; 25332455a4b7SChristoph Hellwig set->timeout = NVME_IO_TIMEOUT; 25342455a4b7SChristoph Hellwig set->numa_node = dev->ctrl.numa_node; 25352455a4b7SChristoph Hellwig set->queue_depth = min_t(unsigned, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; 25362455a4b7SChristoph Hellwig set->cmd_size = sizeof(struct nvme_iod); 25372455a4b7SChristoph Hellwig set->flags = BLK_MQ_F_SHOULD_MERGE; 25382455a4b7SChristoph Hellwig set->driver_data = dev; 253957dacad5SJay Sternberg 2540d38e9f04SBenjamin Herrenschmidt /* 2541d38e9f04SBenjamin Herrenschmidt * Some Apple controllers requires tags to be unique 2542d38e9f04SBenjamin Herrenschmidt * across admin and IO queue, so reserve the first 32 2543d38e9f04SBenjamin Herrenschmidt * tags of the IO queue. 2544d38e9f04SBenjamin Herrenschmidt */ 2545d38e9f04SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 25462455a4b7SChristoph Hellwig set->reserved_tags = NVME_AQ_DEPTH; 2547d38e9f04SBenjamin Herrenschmidt 25482455a4b7SChristoph Hellwig ret = blk_mq_alloc_tag_set(set); 25492b1b7e78SJianchao Wang if (ret) { 25502b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 25512b1b7e78SJianchao Wang "IO queues tagset allocation failed %d\n", ret); 25525d02a5c1SKeith Busch return; 25532b1b7e78SJianchao Wang } 25542455a4b7SChristoph Hellwig dev->ctrl.tagset = set; 255557dacad5SJay Sternberg } 2556949928c1SKeith Busch 25572455a4b7SChristoph Hellwig static void nvme_pci_update_nr_queues(struct nvme_dev *dev) 25582455a4b7SChristoph Hellwig { 25592455a4b7SChristoph Hellwig blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 25602455a4b7SChristoph Hellwig /* free previously allocated queues that are no longer usable */ 25612455a4b7SChristoph Hellwig nvme_free_queues(dev, dev->online_queues); 256257dacad5SJay Sternberg } 256357dacad5SJay Sternberg 2564b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 256557dacad5SJay Sternberg { 2566b00a726aSKeith Busch int result = -ENOMEM; 256757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 25684bdf2603SFilippo Sironi int dma_address_bits = 64; 256957dacad5SJay Sternberg 257057dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 257157dacad5SJay Sternberg return result; 257257dacad5SJay Sternberg 257357dacad5SJay Sternberg pci_set_master(pdev); 257457dacad5SJay Sternberg 25754bdf2603SFilippo Sironi if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 25764bdf2603SFilippo Sironi dma_address_bits = 48; 25774bdf2603SFilippo Sironi if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) 257857dacad5SJay Sternberg goto disable; 257957dacad5SJay Sternberg 25807a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 258157dacad5SJay Sternberg result = -ENODEV; 2582b00a726aSKeith Busch goto disable; 258357dacad5SJay Sternberg } 258457dacad5SJay Sternberg 258557dacad5SJay Sternberg /* 2586a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 2587a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2588a5229050SKeith Busch * adjust this later. 258957dacad5SJay Sternberg */ 2590dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2591dca51e78SChristoph Hellwig if (result < 0) 2592dca51e78SChristoph Hellwig return result; 259357dacad5SJay Sternberg 259420d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 25957a67cbeaSChristoph Hellwig 25967442ddceSJohn Garry dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2597b27c1e68Sweiping zhang io_queue_depth); 2598aa22c8e6SSagi Grimberg dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 259920d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 26007a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 26011f390c1fSStephan Günther 26021f390c1fSStephan Günther /* 260366341331SBenjamin Herrenschmidt * Some Apple controllers require a non-standard SQE size. 260466341331SBenjamin Herrenschmidt * Interestingly they also seem to ignore the CC:IOSQES register 260566341331SBenjamin Herrenschmidt * so we don't bother updating it here. 260666341331SBenjamin Herrenschmidt */ 260766341331SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 260866341331SBenjamin Herrenschmidt dev->io_sqes = 7; 260966341331SBenjamin Herrenschmidt else 2610c1e0cc7eSBenjamin Herrenschmidt dev->io_sqes = NVME_NVM_IOSQES; 26111f390c1fSStephan Günther 26121f390c1fSStephan Günther /* 26131f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 26141f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 26151f390c1fSStephan Günther */ 26161f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 26171f390c1fSStephan Günther dev->q_depth = 2; 26189bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 26199bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 26201f390c1fSStephan Günther dev->q_depth); 2621d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2622d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 262320d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2624d554b5e1SMartin K. Petersen dev->q_depth = 64; 2625d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2626d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 26271f390c1fSStephan Günther } 26281f390c1fSStephan Günther 2629d38e9f04SBenjamin Herrenschmidt /* 2630d38e9f04SBenjamin Herrenschmidt * Controllers with the shared tags quirk need the IO queue to be 2631d38e9f04SBenjamin Herrenschmidt * big enough so that we get 32 tags for the admin queue 2632d38e9f04SBenjamin Herrenschmidt */ 2633d38e9f04SBenjamin Herrenschmidt if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2634d38e9f04SBenjamin Herrenschmidt (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2635d38e9f04SBenjamin Herrenschmidt dev->q_depth = NVME_AQ_DEPTH + 2; 2636d38e9f04SBenjamin Herrenschmidt dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2637d38e9f04SBenjamin Herrenschmidt dev->q_depth); 2638d38e9f04SBenjamin Herrenschmidt } 2639d38e9f04SBenjamin Herrenschmidt 2640d38e9f04SBenjamin Herrenschmidt 2641f65efd6dSChristoph Hellwig nvme_map_cmb(dev); 2642202021c1SStephen Bates 2643a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 2644a0a3408eSKeith Busch pci_save_state(pdev); 264557dacad5SJay Sternberg return 0; 264657dacad5SJay Sternberg 264757dacad5SJay Sternberg disable: 264857dacad5SJay Sternberg pci_disable_device(pdev); 264957dacad5SJay Sternberg return result; 265057dacad5SJay Sternberg } 265157dacad5SJay Sternberg 265257dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 265357dacad5SJay Sternberg { 2654b00a726aSKeith Busch if (dev->bar) 2655b00a726aSKeith Busch iounmap(dev->bar); 2656a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 2657b00a726aSKeith Busch } 2658b00a726aSKeith Busch 2659b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 2660b00a726aSKeith Busch { 266157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 266257dacad5SJay Sternberg 2663dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 266457dacad5SJay Sternberg 2665a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 2666a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 266757dacad5SJay Sternberg pci_disable_device(pdev); 266857dacad5SJay Sternberg } 2669a0a3408eSKeith Busch } 267057dacad5SJay Sternberg 2671a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 267257dacad5SJay Sternberg { 2673e43269e6SKeith Busch bool dead = true, freeze = false; 2674302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 267557dacad5SJay Sternberg 267677bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 2677081f5e75SKeith Busch if (pci_is_enabled(pdev)) { 2678081f5e75SKeith Busch u32 csts; 2679081f5e75SKeith Busch 2680081f5e75SKeith Busch if (pci_device_is_present(pdev)) 2681081f5e75SKeith Busch csts = readl(dev->bar + NVME_REG_CSTS); 2682081f5e75SKeith Busch else 2683081f5e75SKeith Busch csts = ~0; 2684302ad8ccSKeith Busch 2685ebef7368SKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE || 2686e43269e6SKeith Busch dev->ctrl.state == NVME_CTRL_RESETTING) { 2687e43269e6SKeith Busch freeze = true; 2688302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 2689e43269e6SKeith Busch } 2690302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 2691302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 269257dacad5SJay Sternberg } 2693c21377f8SGabriel Krisman Bertazi 2694302ad8ccSKeith Busch /* 2695302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 2696302ad8ccSKeith Busch * doing a safe shutdown. 2697302ad8ccSKeith Busch */ 2698e43269e6SKeith Busch if (!dead && shutdown && freeze) 2699302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 270087ad72a5SChristoph Hellwig 27019a915a5bSJianchao Wang nvme_stop_queues(&dev->ctrl); 27029a915a5bSJianchao Wang 270364ee0ac0SKeith Busch if (!dead && dev->ctrl.queue_count > 0) { 27048fae268bSKeith Busch nvme_disable_io_queues(dev); 2705a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 270657dacad5SJay Sternberg } 27078fae268bSKeith Busch nvme_suspend_io_queues(dev); 27088fae268bSKeith Busch nvme_suspend_queue(&dev->queues[0]); 2709b00a726aSKeith Busch nvme_pci_disable(dev); 2710fa46c6fbSKeith Busch nvme_reap_pending_cqes(dev); 271157dacad5SJay Sternberg 27121fcfca78SGuixin Liu nvme_cancel_tagset(&dev->ctrl); 27131fcfca78SGuixin Liu nvme_cancel_admin_tagset(&dev->ctrl); 2714302ad8ccSKeith Busch 2715302ad8ccSKeith Busch /* 2716302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2717302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2718302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2719302ad8ccSKeith Busch */ 2720c8e9e9b7SKeith Busch if (shutdown) { 2721302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 2722c8e9e9b7SKeith Busch if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 27236ca1d902SMing Lei nvme_start_admin_queue(&dev->ctrl); 2724c8e9e9b7SKeith Busch } 272577bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 272657dacad5SJay Sternberg } 272757dacad5SJay Sternberg 2728c1ac9a4bSKeith Busch static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2729c1ac9a4bSKeith Busch { 2730c1ac9a4bSKeith Busch if (!nvme_wait_reset(&dev->ctrl)) 2731c1ac9a4bSKeith Busch return -EBUSY; 2732c1ac9a4bSKeith Busch nvme_dev_disable(dev, shutdown); 2733c1ac9a4bSKeith Busch return 0; 2734c1ac9a4bSKeith Busch } 2735c1ac9a4bSKeith Busch 273657dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 273757dacad5SJay Sternberg { 273857dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2739c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 2740c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 0); 274157dacad5SJay Sternberg if (!dev->prp_page_pool) 274257dacad5SJay Sternberg return -ENOMEM; 274357dacad5SJay Sternberg 274457dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 274557dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 274657dacad5SJay Sternberg 256, 256, 0); 274757dacad5SJay Sternberg if (!dev->prp_small_pool) { 274857dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 274957dacad5SJay Sternberg return -ENOMEM; 275057dacad5SJay Sternberg } 275157dacad5SJay Sternberg return 0; 275257dacad5SJay Sternberg } 275357dacad5SJay Sternberg 275457dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 275557dacad5SJay Sternberg { 275657dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 275757dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 275857dacad5SJay Sternberg } 275957dacad5SJay Sternberg 2760770597ecSKeith Busch static void nvme_free_tagset(struct nvme_dev *dev) 2761770597ecSKeith Busch { 2762770597ecSKeith Busch if (dev->tagset.tags) 2763770597ecSKeith Busch blk_mq_free_tag_set(&dev->tagset); 2764770597ecSKeith Busch dev->ctrl.tagset = NULL; 2765770597ecSKeith Busch } 2766770597ecSKeith Busch 27671673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 276857dacad5SJay Sternberg { 27691673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 277057dacad5SJay Sternberg 2771f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 2772770597ecSKeith Busch nvme_free_tagset(dev); 27731c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 27741c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 2775e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2776943e942eSJens Axboe mempool_destroy(dev->iod_mempool); 2777253fd4acSIsrael Rukshin put_device(dev->dev); 2778253fd4acSIsrael Rukshin kfree(dev->queues); 277957dacad5SJay Sternberg kfree(dev); 278057dacad5SJay Sternberg } 278157dacad5SJay Sternberg 27827c1ce408SChaitanya Kulkarni static void nvme_remove_dead_ctrl(struct nvme_dev *dev) 2783f58944e2SKeith Busch { 2784c1ac9a4bSKeith Busch /* 2785c1ac9a4bSKeith Busch * Set state to deleting now to avoid blocking nvme_wait_reset(), which 2786c1ac9a4bSKeith Busch * may be holding this pci_dev's device lock. 2787c1ac9a4bSKeith Busch */ 2788c1ac9a4bSKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2789d22524a4SChristoph Hellwig nvme_get_ctrl(&dev->ctrl); 279069d9a99cSKeith Busch nvme_dev_disable(dev, false); 2791*cd50f9b2SChristoph Hellwig nvme_mark_namespaces_dead(&dev->ctrl); 2792*cd50f9b2SChristoph Hellwig nvme_start_queues(&dev->ctrl); 279303e0f3a6SMing Lei if (!queue_work(nvme_wq, &dev->remove_work)) 2794f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 2795f58944e2SKeith Busch } 2796f58944e2SKeith Busch 2797fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 279857dacad5SJay Sternberg { 2799d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2800d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2801a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2802e71afda4SChaitanya Kulkarni int result; 280357dacad5SJay Sternberg 28047764656bSZhihao Cheng if (dev->ctrl.state != NVME_CTRL_RESETTING) { 28057764656bSZhihao Cheng dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", 28067764656bSZhihao Cheng dev->ctrl.state); 2807e71afda4SChaitanya Kulkarni result = -ENODEV; 2808fd634f41SChristoph Hellwig goto out; 2809e71afda4SChaitanya Kulkarni } 2810fd634f41SChristoph Hellwig 2811fd634f41SChristoph Hellwig /* 2812fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2813fd634f41SChristoph Hellwig * moving on. 2814fd634f41SChristoph Hellwig */ 2815b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2816a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2817d6135c3aSKeith Busch nvme_sync_queues(&dev->ctrl); 2818fd634f41SChristoph Hellwig 28195c959d73SKeith Busch mutex_lock(&dev->shutdown_lock); 2820b00a726aSKeith Busch result = nvme_pci_enable(dev); 282157dacad5SJay Sternberg if (result) 28224726bcf3SKeith Busch goto out_unlock; 282357dacad5SJay Sternberg 282401ad0990SSagi Grimberg result = nvme_pci_configure_admin_queue(dev); 282557dacad5SJay Sternberg if (result) 28264726bcf3SKeith Busch goto out_unlock; 282757dacad5SJay Sternberg 2828f91b727cSChristoph Hellwig if (!dev->ctrl.admin_q) { 2829f91b727cSChristoph Hellwig result = nvme_pci_alloc_admin_tag_set(dev); 283057dacad5SJay Sternberg if (result) 28314726bcf3SKeith Busch goto out_unlock; 2832f91b727cSChristoph Hellwig } else { 2833f91b727cSChristoph Hellwig nvme_start_admin_queue(&dev->ctrl); 2834f91b727cSChristoph Hellwig } 283557dacad5SJay Sternberg 283661ce339fSRishabh Bhatnagar dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); 283761ce339fSRishabh Bhatnagar 2838943e942eSJens Axboe /* 2839943e942eSJens Axboe * Limit the max command size to prevent iod->sg allocations going 2840943e942eSJens Axboe * over a single page. 2841943e942eSJens Axboe */ 28427637de31SChristoph Hellwig dev->ctrl.max_hw_sectors = min_t(u32, 28437637de31SChristoph Hellwig NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); 2844943e942eSJens Axboe dev->ctrl.max_segments = NVME_MAX_SEGS; 2845a48bc520SChristoph Hellwig 2846a48bc520SChristoph Hellwig /* 2847a48bc520SChristoph Hellwig * Don't limit the IOMMU merged segment size. 2848a48bc520SChristoph Hellwig */ 2849a48bc520SChristoph Hellwig dma_set_max_seg_size(dev->dev, 0xffffffff); 2850a48bc520SChristoph Hellwig 28515c959d73SKeith Busch mutex_unlock(&dev->shutdown_lock); 28525c959d73SKeith Busch 28535c959d73SKeith Busch /* 28545c959d73SKeith Busch * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 28555c959d73SKeith Busch * initializing procedure here. 28565c959d73SKeith Busch */ 28575c959d73SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 28585c959d73SKeith Busch dev_warn(dev->ctrl.device, 28595c959d73SKeith Busch "failed to mark controller CONNECTING\n"); 2860cee6c269SMinwoo Im result = -EBUSY; 28615c959d73SKeith Busch goto out; 28625c959d73SKeith Busch } 2863943e942eSJens Axboe 286495093350SMax Gurtovoy /* 286595093350SMax Gurtovoy * We do not support an SGL for metadata (yet), so we are limited to a 286695093350SMax Gurtovoy * single integrity segment for the separate metadata pointer. 286795093350SMax Gurtovoy */ 286895093350SMax Gurtovoy dev->ctrl.max_integrity_segments = 1; 286995093350SMax Gurtovoy 2870f21c4769SChaitanya Kulkarni result = nvme_init_ctrl_finish(&dev->ctrl); 2871ce4541f4SChristoph Hellwig if (result) 2872f58944e2SKeith Busch goto out; 2873ce4541f4SChristoph Hellwig 2874e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2875e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 28764f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 28774f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2878e286bcfcSScott Bauer else if (was_suspend) 28794f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 2880e286bcfcSScott Bauer } else { 2881e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2882e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 2883e286bcfcSScott Bauer } 2884a98e58e5SScott Bauer 2885f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2886f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 2887f9f38e33SHelen Koike if (result) 2888f9f38e33SHelen Koike dev_warn(dev->dev, 2889f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 2890f9f38e33SHelen Koike } 2891f9f38e33SHelen Koike 28929620cfbaSChristoph Hellwig if (dev->ctrl.hmpre) { 28939620cfbaSChristoph Hellwig result = nvme_setup_host_mem(dev); 28949620cfbaSChristoph Hellwig if (result < 0) 28959620cfbaSChristoph Hellwig goto out; 28969620cfbaSChristoph Hellwig } 289787ad72a5SChristoph Hellwig 289857dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 289957dacad5SJay Sternberg if (result) 2900f58944e2SKeith Busch goto out; 290157dacad5SJay Sternberg 29020ffc7e98SChristoph Hellwig if (dev->ctrl.tagset) { 290321f033f7SKeith Busch /* 29040ffc7e98SChristoph Hellwig * This is a controller reset and we already have a tagset. 29050ffc7e98SChristoph Hellwig * Freeze and update the number of I/O queues as thos might have 29060ffc7e98SChristoph Hellwig * changed. If there are no I/O queues left after this reset, 29070ffc7e98SChristoph Hellwig * keep the controller around but remove all namespaces. 290857dacad5SJay Sternberg */ 29090ffc7e98SChristoph Hellwig if (dev->online_queues > 1) { 291025646264SKeith Busch nvme_start_queues(&dev->ctrl); 2911302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 29122455a4b7SChristoph Hellwig nvme_pci_update_nr_queues(dev); 29132455a4b7SChristoph Hellwig nvme_dbbuf_set(dev); 2914302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 29150ffc7e98SChristoph Hellwig } else { 29160ffc7e98SChristoph Hellwig dev_warn(dev->ctrl.device, "IO queues lost\n"); 2917*cd50f9b2SChristoph Hellwig nvme_mark_namespaces_dead(&dev->ctrl); 2918*cd50f9b2SChristoph Hellwig nvme_start_queues(&dev->ctrl); 29190ffc7e98SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 29200ffc7e98SChristoph Hellwig nvme_free_tagset(dev); 29210ffc7e98SChristoph Hellwig } 29220ffc7e98SChristoph Hellwig } else { 29230ffc7e98SChristoph Hellwig /* 29240ffc7e98SChristoph Hellwig * First probe. Still allow the controller to show up even if 29250ffc7e98SChristoph Hellwig * there are no namespaces. 29260ffc7e98SChristoph Hellwig */ 29270ffc7e98SChristoph Hellwig if (dev->online_queues > 1) { 29280ffc7e98SChristoph Hellwig nvme_pci_alloc_tag_set(dev); 29290ffc7e98SChristoph Hellwig nvme_dbbuf_set(dev); 29300ffc7e98SChristoph Hellwig } else { 29310ffc7e98SChristoph Hellwig dev_warn(dev->ctrl.device, "IO queues not created\n"); 29320ffc7e98SChristoph Hellwig } 293357dacad5SJay Sternberg } 293457dacad5SJay Sternberg 29352b1b7e78SJianchao Wang /* 29362b1b7e78SJianchao Wang * If only admin queue live, keep it to do further investigation or 29372b1b7e78SJianchao Wang * recovery. 29382b1b7e78SJianchao Wang */ 29395d02a5c1SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 29402b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 29415d02a5c1SKeith Busch "failed to mark controller live state\n"); 2942e71afda4SChaitanya Kulkarni result = -ENODEV; 2943bb8d261eSChristoph Hellwig goto out; 2944bb8d261eSChristoph Hellwig } 294592911a55SChristoph Hellwig 29460521905eSKeith Busch if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj, 29470521905eSKeith Busch &nvme_pci_attr_group)) 29480521905eSKeith Busch dev->attrs_added = true; 29490521905eSKeith Busch 2950d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 295157dacad5SJay Sternberg return; 295257dacad5SJay Sternberg 29534726bcf3SKeith Busch out_unlock: 29544726bcf3SKeith Busch mutex_unlock(&dev->shutdown_lock); 295557dacad5SJay Sternberg out: 29567c1ce408SChaitanya Kulkarni if (result) 29577c1ce408SChaitanya Kulkarni dev_warn(dev->ctrl.device, 29587c1ce408SChaitanya Kulkarni "Removing after probe failure status: %d\n", result); 29597c1ce408SChaitanya Kulkarni nvme_remove_dead_ctrl(dev); 296057dacad5SJay Sternberg } 296157dacad5SJay Sternberg 29625c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 296357dacad5SJay Sternberg { 29645c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 296557dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 296657dacad5SJay Sternberg 296757dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 2968921920abSKeith Busch device_release_driver(&pdev->dev); 29691673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 297057dacad5SJay Sternberg } 297157dacad5SJay Sternberg 29721c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 297357dacad5SJay Sternberg { 29741c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 29751c63dc66SChristoph Hellwig return 0; 297657dacad5SJay Sternberg } 29771c63dc66SChristoph Hellwig 29785fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 29795fd4ce1bSChristoph Hellwig { 29805fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 29815fd4ce1bSChristoph Hellwig return 0; 29825fd4ce1bSChristoph Hellwig } 29835fd4ce1bSChristoph Hellwig 29847fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 29857fd8930fSChristoph Hellwig { 29863a8ecc93SArd Biesheuvel *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 29877fd8930fSChristoph Hellwig return 0; 29887fd8930fSChristoph Hellwig } 29897fd8930fSChristoph Hellwig 299097c12223SKeith Busch static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 299197c12223SKeith Busch { 299297c12223SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 299397c12223SKeith Busch 29942db24e4aSMax Gurtovoy return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 299597c12223SKeith Busch } 299697c12223SKeith Busch 29972f0dad17SKeith Busch static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) 29982f0dad17SKeith Busch { 29992f0dad17SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 30002f0dad17SKeith Busch struct nvme_subsystem *subsys = ctrl->subsys; 30012f0dad17SKeith Busch 30022f0dad17SKeith Busch dev_err(ctrl->device, 30032f0dad17SKeith Busch "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", 30042f0dad17SKeith Busch pdev->vendor, pdev->device, 30052f0dad17SKeith Busch nvme_strlen(subsys->model, sizeof(subsys->model)), 30062f0dad17SKeith Busch subsys->model, nvme_strlen(subsys->firmware_rev, 30072f0dad17SKeith Busch sizeof(subsys->firmware_rev)), 30082f0dad17SKeith Busch subsys->firmware_rev); 30092f0dad17SKeith Busch } 30102f0dad17SKeith Busch 30112f859441SLogan Gunthorpe static bool nvme_pci_supports_pci_p2pdma(struct nvme_ctrl *ctrl) 30122f859441SLogan Gunthorpe { 30132f859441SLogan Gunthorpe struct nvme_dev *dev = to_nvme_dev(ctrl); 30142f859441SLogan Gunthorpe 30152f859441SLogan Gunthorpe return dma_pci_p2pdma_supported(dev->dev); 30162f859441SLogan Gunthorpe } 30172f859441SLogan Gunthorpe 30181c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 30191a353d85SMing Lin .name = "pcie", 3020e439bb12SSagi Grimberg .module = THIS_MODULE, 30212f859441SLogan Gunthorpe .flags = NVME_F_METADATA_SUPPORTED, 30221c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 30235fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 30247fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 30251673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 3026f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 302797c12223SKeith Busch .get_address = nvme_pci_get_address, 30282f0dad17SKeith Busch .print_device_info = nvme_pci_print_device_info, 30292f859441SLogan Gunthorpe .supports_pci_p2pdma = nvme_pci_supports_pci_p2pdma, 30301c63dc66SChristoph Hellwig }; 303157dacad5SJay Sternberg 3032b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 3033b00a726aSKeith Busch { 3034b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 3035b00a726aSKeith Busch 3036a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 3037b00a726aSKeith Busch return -ENODEV; 3038b00a726aSKeith Busch 303997f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 3040b00a726aSKeith Busch goto release; 3041b00a726aSKeith Busch 3042b00a726aSKeith Busch return 0; 3043b00a726aSKeith Busch release: 3044a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 3045b00a726aSKeith Busch return -ENODEV; 3046b00a726aSKeith Busch } 3047b00a726aSKeith Busch 30488427bbc2SKai-Heng Feng static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 3049ff5350a8SAndy Lutomirski { 3050ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 3051ff5350a8SAndy Lutomirski /* 3052ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 3053ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 3054ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 3055ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 3056ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 3057ff5350a8SAndy Lutomirski * laptops. 3058ff5350a8SAndy Lutomirski */ 3059ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 3060ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 3061ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 3062ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 30638427bbc2SKai-Heng Feng } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 30648427bbc2SKai-Heng Feng /* 30658427bbc2SKai-Heng Feng * Samsung SSD 960 EVO drops off the PCIe bus after system 3066467c77d4SJarosław Janik * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 3067467c77d4SJarosław Janik * within few minutes after bootup on a Coffee Lake board - 3068467c77d4SJarosław Janik * ASUS PRIME Z370-A 30698427bbc2SKai-Heng Feng */ 30708427bbc2SKai-Heng Feng if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 3071467c77d4SJarosław Janik (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 3072467c77d4SJarosław Janik dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 30738427bbc2SKai-Heng Feng return NVME_QUIRK_NO_APST; 30741fae37acSShyjumon N } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 30751fae37acSShyjumon N pdev->device == 0xa808 || pdev->device == 0xa809)) || 30761fae37acSShyjumon N (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 30771fae37acSShyjumon N /* 30781fae37acSShyjumon N * Forcing to use host managed nvme power settings for 30791fae37acSShyjumon N * lowest idle power with quick resume latency on 30801fae37acSShyjumon N * Samsung and Toshiba SSDs based on suspend behavior 30811fae37acSShyjumon N * on Coffee Lake board for LENOVO C640 30821fae37acSShyjumon N */ 30831fae37acSShyjumon N if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 30841fae37acSShyjumon N dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 30851fae37acSShyjumon N return NVME_QUIRK_SIMPLE_SUSPEND; 3086ff5350a8SAndy Lutomirski } 3087ff5350a8SAndy Lutomirski 3088ff5350a8SAndy Lutomirski return 0; 3089ff5350a8SAndy Lutomirski } 3090ff5350a8SAndy Lutomirski 309118119775SKeith Busch static void nvme_async_probe(void *data, async_cookie_t cookie) 309218119775SKeith Busch { 309318119775SKeith Busch struct nvme_dev *dev = data; 309480f513b5SKeith Busch 3095bd46a906SKeith Busch flush_work(&dev->ctrl.reset_work); 309618119775SKeith Busch flush_work(&dev->ctrl.scan_work); 309780f513b5SKeith Busch nvme_put_ctrl(&dev->ctrl); 309818119775SKeith Busch } 309918119775SKeith Busch 310057dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 310157dacad5SJay Sternberg { 310257dacad5SJay Sternberg int node, result = -ENOMEM; 310357dacad5SJay Sternberg struct nvme_dev *dev; 3104ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 3105943e942eSJens Axboe size_t alloc_size; 310657dacad5SJay Sternberg 310757dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 310857dacad5SJay Sternberg if (node == NUMA_NO_NODE) 31092fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 311057dacad5SJay Sternberg 311157dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 311257dacad5SJay Sternberg if (!dev) 311357dacad5SJay Sternberg return -ENOMEM; 3114147b27e4SSagi Grimberg 31152a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 31162a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 31172a5bcfddSWeiping Zhang dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 31182a5bcfddSWeiping Zhang dev->queues = kcalloc_node(dev->nr_allocated_queues, 31192a5bcfddSWeiping Zhang sizeof(struct nvme_queue), GFP_KERNEL, node); 312057dacad5SJay Sternberg if (!dev->queues) 312157dacad5SJay Sternberg goto free; 312257dacad5SJay Sternberg 312357dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 312457dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 312557dacad5SJay Sternberg 3126b00a726aSKeith Busch result = nvme_dev_map(dev); 3127b00a726aSKeith Busch if (result) 3128b00c9b7aSChristophe JAILLET goto put_pci; 3129b00a726aSKeith Busch 3130d86c4d8eSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 31315c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 313277bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 3133f3ca80fcSChristoph Hellwig 3134f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 3135f3ca80fcSChristoph Hellwig if (result) 3136b00c9b7aSChristophe JAILLET goto unmap; 3137f3ca80fcSChristoph Hellwig 31388427bbc2SKai-Heng Feng quirks |= check_vendor_combination_bug(pdev); 3139ff5350a8SAndy Lutomirski 31402744d7a0SMario Limonciello if (!noacpi && acpi_storage_d3(&pdev->dev)) { 3141df4f9bc4SDavid E. Box /* 3142df4f9bc4SDavid E. Box * Some systems use a bios work around to ask for D3 on 3143df4f9bc4SDavid E. Box * platforms that support kernel managed suspend. 3144df4f9bc4SDavid E. Box */ 3145df4f9bc4SDavid E. Box dev_info(&pdev->dev, 3146df4f9bc4SDavid E. Box "platform quirk: setting simple suspend\n"); 3147df4f9bc4SDavid E. Box quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 3148df4f9bc4SDavid E. Box } 3149df4f9bc4SDavid E. Box 3150943e942eSJens Axboe /* 3151943e942eSJens Axboe * Double check that our mempool alloc size will cover the biggest 3152943e942eSJens Axboe * command we support. 3153943e942eSJens Axboe */ 3154b13c6393SChaitanya Kulkarni alloc_size = nvme_pci_iod_alloc_size(); 3155943e942eSJens Axboe WARN_ON_ONCE(alloc_size > PAGE_SIZE); 3156943e942eSJens Axboe 3157943e942eSJens Axboe dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, 3158943e942eSJens Axboe mempool_kfree, 3159943e942eSJens Axboe (void *) alloc_size, 3160943e942eSJens Axboe GFP_KERNEL, node); 3161943e942eSJens Axboe if (!dev->iod_mempool) { 3162943e942eSJens Axboe result = -ENOMEM; 3163943e942eSJens Axboe goto release_pools; 3164943e942eSJens Axboe } 3165943e942eSJens Axboe 3166b6e44b4cSKeith Busch result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 3167b6e44b4cSKeith Busch quirks); 3168b6e44b4cSKeith Busch if (result) 3169b6e44b4cSKeith Busch goto release_mempool; 3170b6e44b4cSKeith Busch 31711b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 31721b3c47c1SSagi Grimberg 3173bd46a906SKeith Busch nvme_reset_ctrl(&dev->ctrl); 317418119775SKeith Busch async_schedule(nvme_async_probe, dev); 31754caff8fcSSagi Grimberg 317657dacad5SJay Sternberg return 0; 317757dacad5SJay Sternberg 3178b6e44b4cSKeith Busch release_mempool: 3179b6e44b4cSKeith Busch mempool_destroy(dev->iod_mempool); 318057dacad5SJay Sternberg release_pools: 318157dacad5SJay Sternberg nvme_release_prp_pools(dev); 3182b00c9b7aSChristophe JAILLET unmap: 3183b00c9b7aSChristophe JAILLET nvme_dev_unmap(dev); 318457dacad5SJay Sternberg put_pci: 318557dacad5SJay Sternberg put_device(dev->dev); 318657dacad5SJay Sternberg free: 318757dacad5SJay Sternberg kfree(dev->queues); 318857dacad5SJay Sternberg kfree(dev); 318957dacad5SJay Sternberg return result; 319057dacad5SJay Sternberg } 319157dacad5SJay Sternberg 3192775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 319357dacad5SJay Sternberg { 319457dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 3195c1ac9a4bSKeith Busch 3196c1ac9a4bSKeith Busch /* 3197c1ac9a4bSKeith Busch * We don't need to check the return value from waiting for the reset 3198c1ac9a4bSKeith Busch * state as pci_dev device lock is held, making it impossible to race 3199c1ac9a4bSKeith Busch * with ->remove(). 3200c1ac9a4bSKeith Busch */ 3201c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, false); 3202c1ac9a4bSKeith Busch nvme_sync_queues(&dev->ctrl); 3203775755edSChristoph Hellwig } 320457dacad5SJay Sternberg 3205775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 3206775755edSChristoph Hellwig { 3207f263fbb8SLinus Torvalds struct nvme_dev *dev = pci_get_drvdata(pdev); 3208c1ac9a4bSKeith Busch 3209c1ac9a4bSKeith Busch if (!nvme_try_sched_reset(&dev->ctrl)) 3210c1ac9a4bSKeith Busch flush_work(&dev->ctrl.reset_work); 321157dacad5SJay Sternberg } 321257dacad5SJay Sternberg 321357dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 321457dacad5SJay Sternberg { 321557dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 32164e523547SBaolin Wang 3217c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, true); 321857dacad5SJay Sternberg } 321957dacad5SJay Sternberg 32200521905eSKeith Busch static void nvme_remove_attrs(struct nvme_dev *dev) 32210521905eSKeith Busch { 32220521905eSKeith Busch if (dev->attrs_added) 32230521905eSKeith Busch sysfs_remove_group(&dev->ctrl.device->kobj, 32240521905eSKeith Busch &nvme_pci_attr_group); 32250521905eSKeith Busch } 32260521905eSKeith Busch 3227f58944e2SKeith Busch /* 3228f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 3229f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 3230f58944e2SKeith Busch * order to proceed. 3231f58944e2SKeith Busch */ 323257dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 323357dacad5SJay Sternberg { 323457dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 323557dacad5SJay Sternberg 3236bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 323757dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 32380ff9d4e1SKeith Busch 32396db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 32400ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 32411d39e692SKeith Busch nvme_dev_disable(dev, true); 32426db28edaSKeith Busch } 32430ff9d4e1SKeith Busch 3244d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 3245d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 3246d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 3247a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 32480521905eSKeith Busch nvme_remove_attrs(dev); 324987ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 325057dacad5SJay Sternberg nvme_dev_remove_admin(dev); 325157dacad5SJay Sternberg nvme_free_queues(dev, 0); 325257dacad5SJay Sternberg nvme_release_prp_pools(dev); 3253b00a726aSKeith Busch nvme_dev_unmap(dev); 3254726612b6SIsrael Rukshin nvme_uninit_ctrl(&dev->ctrl); 325557dacad5SJay Sternberg } 325657dacad5SJay Sternberg 325757dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 3258d916b1beSKeith Busch static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3259d916b1beSKeith Busch { 3260d916b1beSKeith Busch return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3261d916b1beSKeith Busch } 3262d916b1beSKeith Busch 3263d916b1beSKeith Busch static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3264d916b1beSKeith Busch { 3265d916b1beSKeith Busch return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3266d916b1beSKeith Busch } 3267d916b1beSKeith Busch 3268d916b1beSKeith Busch static int nvme_resume(struct device *dev) 3269d916b1beSKeith Busch { 3270d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3271d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3272d916b1beSKeith Busch 32734eaefe8cSRafael J. Wysocki if (ndev->last_ps == U32_MAX || 3274d916b1beSKeith Busch nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3275e5ad96f3SKeith Busch goto reset; 3276e5ad96f3SKeith Busch if (ctrl->hmpre && nvme_setup_host_mem(ndev)) 3277e5ad96f3SKeith Busch goto reset; 3278e5ad96f3SKeith Busch 3279d916b1beSKeith Busch return 0; 3280e5ad96f3SKeith Busch reset: 3281e5ad96f3SKeith Busch return nvme_try_sched_reset(ctrl); 3282d916b1beSKeith Busch } 3283d916b1beSKeith Busch 328457dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 328557dacad5SJay Sternberg { 328657dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 328757dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 3288d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3289d916b1beSKeith Busch int ret = -EBUSY; 3290d916b1beSKeith Busch 32914eaefe8cSRafael J. Wysocki ndev->last_ps = U32_MAX; 32924eaefe8cSRafael J. Wysocki 3293d916b1beSKeith Busch /* 3294d916b1beSKeith Busch * The platform does not remove power for a kernel managed suspend so 3295d916b1beSKeith Busch * use host managed nvme power settings for lowest idle power if 3296d916b1beSKeith Busch * possible. This should have quicker resume latency than a full device 3297d916b1beSKeith Busch * shutdown. But if the firmware is involved after the suspend or the 3298d916b1beSKeith Busch * device does not support any non-default power states, shut down the 3299d916b1beSKeith Busch * device fully. 33004eaefe8cSRafael J. Wysocki * 33014eaefe8cSRafael J. Wysocki * If ASPM is not enabled for the device, shut down the device and allow 33024eaefe8cSRafael J. Wysocki * the PCI bus layer to put it into D3 in order to take the PCIe link 33034eaefe8cSRafael J. Wysocki * down, so as to allow the platform to achieve its minimum low-power 33044eaefe8cSRafael J. Wysocki * state (which may not be possible if the link is up). 3305d916b1beSKeith Busch */ 33064eaefe8cSRafael J. Wysocki if (pm_suspend_via_firmware() || !ctrl->npss || 3307cb32de1bSMario Limonciello !pcie_aspm_enabled(pdev) || 3308c1ac9a4bSKeith Busch (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3309c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 3310d916b1beSKeith Busch 3311d916b1beSKeith Busch nvme_start_freeze(ctrl); 3312d916b1beSKeith Busch nvme_wait_freeze(ctrl); 3313d916b1beSKeith Busch nvme_sync_queues(ctrl); 3314d916b1beSKeith Busch 33155d02a5c1SKeith Busch if (ctrl->state != NVME_CTRL_LIVE) 3316d916b1beSKeith Busch goto unfreeze; 3317d916b1beSKeith Busch 3318e5ad96f3SKeith Busch /* 3319e5ad96f3SKeith Busch * Host memory access may not be successful in a system suspend state, 3320e5ad96f3SKeith Busch * but the specification allows the controller to access memory in a 3321e5ad96f3SKeith Busch * non-operational power state. 3322e5ad96f3SKeith Busch */ 3323e5ad96f3SKeith Busch if (ndev->hmb) { 3324e5ad96f3SKeith Busch ret = nvme_set_host_mem(ndev, 0); 3325e5ad96f3SKeith Busch if (ret < 0) 3326e5ad96f3SKeith Busch goto unfreeze; 3327e5ad96f3SKeith Busch } 3328e5ad96f3SKeith Busch 3329d916b1beSKeith Busch ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3330d916b1beSKeith Busch if (ret < 0) 3331d916b1beSKeith Busch goto unfreeze; 3332d916b1beSKeith Busch 33337cbb5c6fSMario Limonciello /* 33347cbb5c6fSMario Limonciello * A saved state prevents pci pm from generically controlling the 33357cbb5c6fSMario Limonciello * device's power. If we're using protocol specific settings, we don't 33367cbb5c6fSMario Limonciello * want pci interfering. 33377cbb5c6fSMario Limonciello */ 33387cbb5c6fSMario Limonciello pci_save_state(pdev); 33397cbb5c6fSMario Limonciello 3340d916b1beSKeith Busch ret = nvme_set_power_state(ctrl, ctrl->npss); 3341d916b1beSKeith Busch if (ret < 0) 3342d916b1beSKeith Busch goto unfreeze; 3343d916b1beSKeith Busch 3344d916b1beSKeith Busch if (ret) { 33457cbb5c6fSMario Limonciello /* discard the saved state */ 33467cbb5c6fSMario Limonciello pci_load_saved_state(pdev, NULL); 33477cbb5c6fSMario Limonciello 3348d916b1beSKeith Busch /* 3349d916b1beSKeith Busch * Clearing npss forces a controller reset on resume. The 335005d3046fSGeert Uytterhoeven * correct value will be rediscovered then. 3351d916b1beSKeith Busch */ 3352c1ac9a4bSKeith Busch ret = nvme_disable_prepare_reset(ndev, true); 3353d916b1beSKeith Busch ctrl->npss = 0; 3354d916b1beSKeith Busch } 3355d916b1beSKeith Busch unfreeze: 3356d916b1beSKeith Busch nvme_unfreeze(ctrl); 3357d916b1beSKeith Busch return ret; 3358d916b1beSKeith Busch } 3359d916b1beSKeith Busch 3360d916b1beSKeith Busch static int nvme_simple_suspend(struct device *dev) 3361d916b1beSKeith Busch { 3362d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 33634e523547SBaolin Wang 3364c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 336557dacad5SJay Sternberg } 336657dacad5SJay Sternberg 3367d916b1beSKeith Busch static int nvme_simple_resume(struct device *dev) 336857dacad5SJay Sternberg { 336957dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 337057dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 337157dacad5SJay Sternberg 3372c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 337357dacad5SJay Sternberg } 337457dacad5SJay Sternberg 337521774222SYueHaibing static const struct dev_pm_ops nvme_dev_pm_ops = { 3376d916b1beSKeith Busch .suspend = nvme_suspend, 3377d916b1beSKeith Busch .resume = nvme_resume, 3378d916b1beSKeith Busch .freeze = nvme_simple_suspend, 3379d916b1beSKeith Busch .thaw = nvme_simple_resume, 3380d916b1beSKeith Busch .poweroff = nvme_simple_suspend, 3381d916b1beSKeith Busch .restore = nvme_simple_resume, 3382d916b1beSKeith Busch }; 3383d916b1beSKeith Busch #endif /* CONFIG_PM_SLEEP */ 338457dacad5SJay Sternberg 3385a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3386a0a3408eSKeith Busch pci_channel_state_t state) 3387a0a3408eSKeith Busch { 3388a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3389a0a3408eSKeith Busch 3390a0a3408eSKeith Busch /* 3391a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 3392a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 3393a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 3394a0a3408eSKeith Busch */ 3395a0a3408eSKeith Busch switch (state) { 3396a0a3408eSKeith Busch case pci_channel_io_normal: 3397a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 3398a0a3408eSKeith Busch case pci_channel_io_frozen: 3399d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3400d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 3401a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 3402a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3403a0a3408eSKeith Busch case pci_channel_io_perm_failure: 3404d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3405d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 3406a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 3407a0a3408eSKeith Busch } 3408a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3409a0a3408eSKeith Busch } 3410a0a3408eSKeith Busch 3411a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3412a0a3408eSKeith Busch { 3413a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3414a0a3408eSKeith Busch 34151b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 3416a0a3408eSKeith Busch pci_restore_state(pdev); 3417d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 3418a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 3419a0a3408eSKeith Busch } 3420a0a3408eSKeith Busch 3421a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 3422a0a3408eSKeith Busch { 342372cd4cc2SKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 342472cd4cc2SKeith Busch 342572cd4cc2SKeith Busch flush_work(&dev->ctrl.reset_work); 3426a0a3408eSKeith Busch } 3427a0a3408eSKeith Busch 342857dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 342957dacad5SJay Sternberg .error_detected = nvme_error_detected, 343057dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 343157dacad5SJay Sternberg .resume = nvme_error_resume, 3432775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 3433775755edSChristoph Hellwig .reset_done = nvme_reset_done, 343457dacad5SJay Sternberg }; 343557dacad5SJay Sternberg 343657dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 3437972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 343808095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3439e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3440972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 344199466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3442e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3443972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 344499466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 344525e58af4SWu Zheng NVME_QUIRK_DEALLOCATE_ZEROES | 344625e58af4SWu Zheng NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3447972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3448f99cb7afSDavid Wayne Fugate .driver_data = NVME_QUIRK_STRIPE_SIZE | 3449f99cb7afSDavid Wayne Fugate NVME_QUIRK_DEALLOCATE_ZEROES, }, 345050af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 34519abd68efSJens Axboe .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 34526c6aa2f2SAkinobu Mita NVME_QUIRK_MEDIUM_PRIO_SQ | 3453ce4cc313SDavid Milburn NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3454ce4cc313SDavid Milburn NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 34556299358dSJames Dingwall { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 34566299358dSJames Dingwall .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3457540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 34587b210e4eSChristoph Hellwig .driver_data = NVME_QUIRK_IDENTIFY_CNS | 345966dd346bSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES | 346066dd346bSChristoph Hellwig NVME_QUIRK_BOGUS_NID, }, 346166dd346bSChristoph Hellwig { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ 346266dd346bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 34635bedd3afSChristoph Hellwig { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 3464c98a8793SKeith Busch .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 3465c98a8793SKeith Busch NVME_QUIRK_BOGUS_NID, }, 34660302ae60SMicah Parrish { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 34675e112d3fSJulian Einwag .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 34685e112d3fSJulian Einwag NVME_QUIRK_NO_NS_DESC_LIST, }, 346954adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 347054adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 34718c97eeccSJeff Lien { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 34728c97eeccSJeff Lien .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3473015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3474015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3475d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3476d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3477d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 34787ee5c78cSGopal Tiwari .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3479abbb5f59SDmitry Monakhov NVME_QUIRK_DISABLE_WRITE_ZEROES| 34807ee5c78cSGopal Tiwari NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 34812cf7a77eSKeith Busch { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ 34822cf7a77eSKeith Busch .driver_data = NVME_QUIRK_BOGUS_NID, }, 3483c9e95c39SClaus Stovgaard { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 348473029c9bSKeith Busch .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 348573029c9bSKeith Busch NVME_QUIRK_BOGUS_NID, }, 3486d14c2731STina Hsu { PCI_DEVICE(0x1987, 0x5019), /* phison E19 */ 3487d14c2731STina Hsu .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3488d14c2731STina Hsu { PCI_DEVICE(0x1987, 0x5021), /* Phison E21 */ 3489d14c2731STina Hsu .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 34906e6a6828SPascal Terjan { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 34916e6a6828SPascal Terjan .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 34926e6a6828SPascal Terjan NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3493e1c70d79SLamarque Vieira Souza { PCI_DEVICE(0x1cc1, 0x33f8), /* ADATA IM2P33F8ABR1 1 TB */ 3494e1c70d79SLamarque Vieira Souza .driver_data = NVME_QUIRK_BOGUS_NID, }, 349508b903b5SMisha Nasledov { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 34961629de0eSPablo Greco .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | 34971629de0eSPablo Greco NVME_QUIRK_BOGUS_NID, }, 3498f03e42c6SGabriel Craciunescu { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3499f03e42c6SGabriel Craciunescu .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3500f03e42c6SGabriel Craciunescu NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 350141f38043SLeo Savernik { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ 350241f38043SLeo Savernik .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, 35035611ec2bSKai-Heng Feng { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 35045611ec2bSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3505c4f01a77SKeith Busch { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ 3506c4f01a77SKeith Busch .driver_data = NVME_QUIRK_BOGUS_NID, }, 350702ca079cSKai-Heng Feng { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 350802ca079cSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 350989919929SChaitanya Kulkarni { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 351089919929SChaitanya Kulkarni .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 351143047e08Srasheed.hsueh { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ 351243047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 351343047e08Srasheed.hsueh { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ 351443047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 351543047e08Srasheed.hsueh { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ 351643047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 351743047e08Srasheed.hsueh { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ 351843047e08Srasheed.hsueh .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3519dc22c1c0SZoltán Böszörményi { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3520dc22c1c0SZoltán Böszörményi .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3521538e4a8cSThorsten Leemhuis { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3522538e4a8cSThorsten Leemhuis .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3523ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ 3524ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3525ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ 3526ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3527ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ 3528ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3529ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ 3530ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3531ac9b57d4SXander Li { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ 3532ac9b57d4SXander Li .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 353370ce3455SChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ 353470ce3455SChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 3535a98a945bSChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ 3536a98a945bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 3537a98a945bSChristoph Hellwig { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ 3538a98a945bSChristoph Hellwig .driver_data = NVME_QUIRK_BOGUS_NID, }, 35393765fad5SStefan Reiter { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ 35403765fad5SStefan Reiter .driver_data = NVME_QUIRK_BOGUS_NID, }, 3541f37527a0SDennis P. Kliem { PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */ 3542f37527a0SDennis P. Kliem .driver_data = NVME_QUIRK_BOGUS_NID, }, 3543d5d3c100SXi Ruoyao { PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */ 3544d5d3c100SXi Ruoyao .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 35456b961bceSNing Wang { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ 35466b961bceSNing Wang .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3547d6c52fa3STobias Gruetzmacher { PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */ 3548d6c52fa3STobias Gruetzmacher .driver_data = NVME_QUIRK_BOGUS_NID, }, 3549200dccd0SShyamin Ayesh { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ 3550200dccd0SShyamin Ayesh .driver_data = NVME_QUIRK_BOGUS_NID, }, 355180b26240SAbhijit { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ 355280b26240SAbhijit .driver_data = NVME_QUIRK_BOGUS_NID, }, 35534bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 35544bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 35554bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 35564bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 35574bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 35584bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 35594bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 35604bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 35614bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 35624bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 35634bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 35644bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 356598f7b86aSAndy Shevchenko { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 356698f7b86aSAndy Shevchenko .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3567124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 356866341331SBenjamin Herrenschmidt { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 356966341331SBenjamin Herrenschmidt .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3570d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_128_BYTES_SQES | 3571a2941f6aSKeith Busch NVME_QUIRK_SHARED_TAGS | 3572a2941f6aSKeith Busch NVME_QUIRK_SKIP_CID_GEN }, 35730b85f59dSAndy Shevchenko { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 357457dacad5SJay Sternberg { 0, } 357557dacad5SJay Sternberg }; 357657dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 357757dacad5SJay Sternberg 357857dacad5SJay Sternberg static struct pci_driver nvme_driver = { 357957dacad5SJay Sternberg .name = "nvme", 358057dacad5SJay Sternberg .id_table = nvme_id_table, 358157dacad5SJay Sternberg .probe = nvme_probe, 358257dacad5SJay Sternberg .remove = nvme_remove, 358357dacad5SJay Sternberg .shutdown = nvme_shutdown, 3584d916b1beSKeith Busch #ifdef CONFIG_PM_SLEEP 358557dacad5SJay Sternberg .driver = { 358657dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 358757dacad5SJay Sternberg }, 3588d916b1beSKeith Busch #endif 358974d986abSAlexander Duyck .sriov_configure = pci_sriov_configure_simple, 359057dacad5SJay Sternberg .err_handler = &nvme_err_handler, 359157dacad5SJay Sternberg }; 359257dacad5SJay Sternberg 359357dacad5SJay Sternberg static int __init nvme_init(void) 359457dacad5SJay Sternberg { 359581101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 359681101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 359781101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3598612b7286SMing Lei BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 3599c372cdd1SKeith Busch BUILD_BUG_ON(DIV_ROUND_UP(nvme_pci_npages_prp(), NVME_CTRL_PAGE_SIZE) > 3600c372cdd1SKeith Busch S8_MAX); 360117c33167SKeith Busch 36029a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 360357dacad5SJay Sternberg } 360457dacad5SJay Sternberg 360557dacad5SJay Sternberg static void __exit nvme_exit(void) 360657dacad5SJay Sternberg { 360757dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 360803e0f3a6SMing Lei flush_workqueue(nvme_wq); 360957dacad5SJay Sternberg } 361057dacad5SJay Sternberg 361157dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 361257dacad5SJay Sternberg MODULE_LICENSE("GPL"); 361357dacad5SJay Sternberg MODULE_VERSION("1.0"); 361457dacad5SJay Sternberg module_init(nvme_init); 361557dacad5SJay Sternberg module_exit(nvme_exit); 3616