15f37396dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * NVM Express device driver 457dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 557dacad5SJay Sternberg */ 657dacad5SJay Sternberg 7df4f9bc4SDavid E. Box #include <linux/acpi.h> 8a0a3408eSKeith Busch #include <linux/aer.h> 918119775SKeith Busch #include <linux/async.h> 1057dacad5SJay Sternberg #include <linux/blkdev.h> 1157dacad5SJay Sternberg #include <linux/blk-mq.h> 12dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 13ff5350a8SAndy Lutomirski #include <linux/dmi.h> 1457dacad5SJay Sternberg #include <linux/init.h> 1557dacad5SJay Sternberg #include <linux/interrupt.h> 1657dacad5SJay Sternberg #include <linux/io.h> 1757dacad5SJay Sternberg #include <linux/mm.h> 1857dacad5SJay Sternberg #include <linux/module.h> 1977bf25eaSKeith Busch #include <linux/mutex.h> 20d0877473SKeith Busch #include <linux/once.h> 2157dacad5SJay Sternberg #include <linux/pci.h> 22d916b1beSKeith Busch #include <linux/suspend.h> 2357dacad5SJay Sternberg #include <linux/t10-pi.h> 2457dacad5SJay Sternberg #include <linux/types.h> 259cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 2620d3bb92SKlaus Jensen #include <linux/io-64-nonatomic-hi-lo.h> 27a98e58e5SScott Bauer #include <linux/sed-opal.h> 280f238ff5SLogan Gunthorpe #include <linux/pci-p2pdma.h> 2957dacad5SJay Sternberg 30604c01d5Syupeng #include "trace.h" 3157dacad5SJay Sternberg #include "nvme.h" 3257dacad5SJay Sternberg 33c1e0cc7eSBenjamin Herrenschmidt #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 348a1d09a6SBenjamin Herrenschmidt #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 3557dacad5SJay Sternberg 36a7a7cbe3SChaitanya Kulkarni #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 37adf68f21SChristoph Hellwig 38943e942eSJens Axboe /* 39943e942eSJens Axboe * These can be higher, but we need to ensure that any command doesn't 40943e942eSJens Axboe * require an sg allocation that needs more than a page of data. 41943e942eSJens Axboe */ 42943e942eSJens Axboe #define NVME_MAX_KB_SZ 4096 43943e942eSJens Axboe #define NVME_MAX_SEGS 127 44943e942eSJens Axboe 4557dacad5SJay Sternberg static int use_threaded_interrupts; 4657dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 4757dacad5SJay Sternberg 4857dacad5SJay Sternberg static bool use_cmb_sqes = true; 4969f4eb9fSKeith Busch module_param(use_cmb_sqes, bool, 0444); 5057dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5157dacad5SJay Sternberg 5287ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5387ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5487ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 5587ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 5657dacad5SJay Sternberg 57a7a7cbe3SChaitanya Kulkarni static unsigned int sgl_threshold = SZ_32K; 58a7a7cbe3SChaitanya Kulkarni module_param(sgl_threshold, uint, 0644); 59a7a7cbe3SChaitanya Kulkarni MODULE_PARM_DESC(sgl_threshold, 60a7a7cbe3SChaitanya Kulkarni "Use SGLs when average request segment size is larger or equal to " 61a7a7cbe3SChaitanya Kulkarni "this size. Use 0 to disable SGLs."); 62a7a7cbe3SChaitanya Kulkarni 63b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 64b27c1e68Sweiping zhang static const struct kernel_param_ops io_queue_depth_ops = { 65b27c1e68Sweiping zhang .set = io_queue_depth_set, 6661f3b896SChaitanya Kulkarni .get = param_get_uint, 67b27c1e68Sweiping zhang }; 68b27c1e68Sweiping zhang 6961f3b896SChaitanya Kulkarni static unsigned int io_queue_depth = 1024; 70b27c1e68Sweiping zhang module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 71b27c1e68Sweiping zhang MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); 72b27c1e68Sweiping zhang 739c9e76d5SWeiping Zhang static int io_queue_count_set(const char *val, const struct kernel_param *kp) 749c9e76d5SWeiping Zhang { 759c9e76d5SWeiping Zhang unsigned int n; 769c9e76d5SWeiping Zhang int ret; 779c9e76d5SWeiping Zhang 789c9e76d5SWeiping Zhang ret = kstrtouint(val, 10, &n); 799c9e76d5SWeiping Zhang if (ret != 0 || n > num_possible_cpus()) 809c9e76d5SWeiping Zhang return -EINVAL; 819c9e76d5SWeiping Zhang return param_set_uint(val, kp); 829c9e76d5SWeiping Zhang } 839c9e76d5SWeiping Zhang 849c9e76d5SWeiping Zhang static const struct kernel_param_ops io_queue_count_ops = { 859c9e76d5SWeiping Zhang .set = io_queue_count_set, 869c9e76d5SWeiping Zhang .get = param_get_uint, 879c9e76d5SWeiping Zhang }; 889c9e76d5SWeiping Zhang 893f68baf7SKeith Busch static unsigned int write_queues; 909c9e76d5SWeiping Zhang module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 913b6592f7SJens Axboe MODULE_PARM_DESC(write_queues, 923b6592f7SJens Axboe "Number of queues to use for writes. If not set, reads and writes " 933b6592f7SJens Axboe "will share a queue set."); 943b6592f7SJens Axboe 953f68baf7SKeith Busch static unsigned int poll_queues; 969c9e76d5SWeiping Zhang module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 974b04cc6aSJens Axboe MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 984b04cc6aSJens Axboe 99df4f9bc4SDavid E. Box static bool noacpi; 100df4f9bc4SDavid E. Box module_param(noacpi, bool, 0444); 101df4f9bc4SDavid E. Box MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 102df4f9bc4SDavid E. Box 1031c63dc66SChristoph Hellwig struct nvme_dev; 1041c63dc66SChristoph Hellwig struct nvme_queue; 10557dacad5SJay Sternberg 106a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 1078fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); 10857dacad5SJay Sternberg 10957dacad5SJay Sternberg /* 1101c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 1111c63dc66SChristoph Hellwig */ 1121c63dc66SChristoph Hellwig struct nvme_dev { 113147b27e4SSagi Grimberg struct nvme_queue *queues; 1141c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1151c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1161c63dc66SChristoph Hellwig u32 __iomem *dbs; 1171c63dc66SChristoph Hellwig struct device *dev; 1181c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1191c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1201c63dc66SChristoph Hellwig unsigned online_queues; 1211c63dc66SChristoph Hellwig unsigned max_qid; 122e20ba6e1SChristoph Hellwig unsigned io_queues[HCTX_MAX_TYPES]; 12322b55601SKeith Busch unsigned int num_vecs; 1247442ddceSJohn Garry u32 q_depth; 125c1e0cc7eSBenjamin Herrenschmidt int io_sqes; 1261c63dc66SChristoph Hellwig u32 db_stride; 1271c63dc66SChristoph Hellwig void __iomem *bar; 12897f6ef64SXu Yu unsigned long bar_mapped_size; 1295c8809e6SChristoph Hellwig struct work_struct remove_work; 13077bf25eaSKeith Busch struct mutex shutdown_lock; 1311c63dc66SChristoph Hellwig bool subsystem; 1321c63dc66SChristoph Hellwig u64 cmb_size; 1330f238ff5SLogan Gunthorpe bool cmb_use_sqes; 1341c63dc66SChristoph Hellwig u32 cmbsz; 135202021c1SStephen Bates u32 cmbloc; 1361c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 137d916b1beSKeith Busch u32 last_ps; 13887ad72a5SChristoph Hellwig 139943e942eSJens Axboe mempool_t *iod_mempool; 140943e942eSJens Axboe 14187ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 142f9f38e33SHelen Koike u32 *dbbuf_dbs; 143f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 144f9f38e33SHelen Koike u32 *dbbuf_eis; 145f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 14687ad72a5SChristoph Hellwig 14787ad72a5SChristoph Hellwig /* host memory buffer support: */ 14887ad72a5SChristoph Hellwig u64 host_mem_size; 14987ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 1504033f35dSChristoph Hellwig dma_addr_t host_mem_descs_dma; 15187ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 15287ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 1532a5bcfddSWeiping Zhang unsigned int nr_allocated_queues; 1542a5bcfddSWeiping Zhang unsigned int nr_write_queues; 1552a5bcfddSWeiping Zhang unsigned int nr_poll_queues; 15657dacad5SJay Sternberg }; 15757dacad5SJay Sternberg 158b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 159b27c1e68Sweiping zhang { 16061f3b896SChaitanya Kulkarni int ret; 1617442ddceSJohn Garry u32 n; 162b27c1e68Sweiping zhang 1637442ddceSJohn Garry ret = kstrtou32(val, 10, &n); 164b27c1e68Sweiping zhang if (ret != 0 || n < 2) 165b27c1e68Sweiping zhang return -EINVAL; 166b27c1e68Sweiping zhang 1677442ddceSJohn Garry return param_set_uint(val, kp); 168b27c1e68Sweiping zhang } 169b27c1e68Sweiping zhang 170f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 171f9f38e33SHelen Koike { 172f9f38e33SHelen Koike return qid * 2 * stride; 173f9f38e33SHelen Koike } 174f9f38e33SHelen Koike 175f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 176f9f38e33SHelen Koike { 177f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 178f9f38e33SHelen Koike } 179f9f38e33SHelen Koike 1801c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1811c63dc66SChristoph Hellwig { 1821c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1831c63dc66SChristoph Hellwig } 1841c63dc66SChristoph Hellwig 18557dacad5SJay Sternberg /* 18657dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 18757dacad5SJay Sternberg * commands and one for I/O commands). 18857dacad5SJay Sternberg */ 18957dacad5SJay Sternberg struct nvme_queue { 19057dacad5SJay Sternberg struct nvme_dev *dev; 1911ab0cd69SJens Axboe spinlock_t sq_lock; 192c1e0cc7eSBenjamin Herrenschmidt void *sq_cmds; 1933a7afd8eSChristoph Hellwig /* only used for poll queues: */ 1943a7afd8eSChristoph Hellwig spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 19574943d45SKeith Busch struct nvme_completion *cqes; 19657dacad5SJay Sternberg dma_addr_t sq_dma_addr; 19757dacad5SJay Sternberg dma_addr_t cq_dma_addr; 19857dacad5SJay Sternberg u32 __iomem *q_db; 1997442ddceSJohn Garry u32 q_depth; 2007c349ddeSKeith Busch u16 cq_vector; 20157dacad5SJay Sternberg u16 sq_tail; 20238210800SKeith Busch u16 last_sq_tail; 20357dacad5SJay Sternberg u16 cq_head; 20457dacad5SJay Sternberg u16 qid; 20557dacad5SJay Sternberg u8 cq_phase; 206c1e0cc7eSBenjamin Herrenschmidt u8 sqes; 2074e224106SChristoph Hellwig unsigned long flags; 2084e224106SChristoph Hellwig #define NVMEQ_ENABLED 0 20963223078SChristoph Hellwig #define NVMEQ_SQ_CMB 1 210d1ed6aa1SChristoph Hellwig #define NVMEQ_DELETE_ERROR 2 2117c349ddeSKeith Busch #define NVMEQ_POLLED 3 212f9f38e33SHelen Koike u32 *dbbuf_sq_db; 213f9f38e33SHelen Koike u32 *dbbuf_cq_db; 214f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 215f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 216d1ed6aa1SChristoph Hellwig struct completion delete_done; 21757dacad5SJay Sternberg }; 21857dacad5SJay Sternberg 21957dacad5SJay Sternberg /* 2209b048119SChristoph Hellwig * The nvme_iod describes the data in an I/O. 2219b048119SChristoph Hellwig * 2229b048119SChristoph Hellwig * The sg pointer contains the list of PRP/SGL chunk allocations in addition 2239b048119SChristoph Hellwig * to the actual struct scatterlist. 22471bd150cSChristoph Hellwig */ 22571bd150cSChristoph Hellwig struct nvme_iod { 226d49187e9SChristoph Hellwig struct nvme_request req; 227af7fae85SKeith Busch struct nvme_command cmd; 228f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq; 229a7a7cbe3SChaitanya Kulkarni bool use_sgl; 230f4800d6dSChristoph Hellwig int aborted; 23171bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 23271bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 23371bd150cSChristoph Hellwig dma_addr_t first_dma; 234dff824b2SChristoph Hellwig unsigned int dma_len; /* length of single DMA segment mapping */ 235783b94bdSChristoph Hellwig dma_addr_t meta_dma; 236f4800d6dSChristoph Hellwig struct scatterlist *sg; 23757dacad5SJay Sternberg }; 23857dacad5SJay Sternberg 2392a5bcfddSWeiping Zhang static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 2403b6592f7SJens Axboe { 2412a5bcfddSWeiping Zhang return dev->nr_allocated_queues * 8 * dev->db_stride; 242f9f38e33SHelen Koike } 243f9f38e33SHelen Koike 244f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 245f9f38e33SHelen Koike { 2462a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 247f9f38e33SHelen Koike 248f9f38e33SHelen Koike if (dev->dbbuf_dbs) 249f9f38e33SHelen Koike return 0; 250f9f38e33SHelen Koike 251f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 252f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 253f9f38e33SHelen Koike GFP_KERNEL); 254f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 255f9f38e33SHelen Koike return -ENOMEM; 256f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 257f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 258f9f38e33SHelen Koike GFP_KERNEL); 259f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 260f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 261f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 262f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 263f9f38e33SHelen Koike return -ENOMEM; 264f9f38e33SHelen Koike } 265f9f38e33SHelen Koike 266f9f38e33SHelen Koike return 0; 267f9f38e33SHelen Koike } 268f9f38e33SHelen Koike 269f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 270f9f38e33SHelen Koike { 2712a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 272f9f38e33SHelen Koike 273f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 274f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 275f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 276f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 277f9f38e33SHelen Koike } 278f9f38e33SHelen Koike if (dev->dbbuf_eis) { 279f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 280f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 281f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 282f9f38e33SHelen Koike } 283f9f38e33SHelen Koike } 284f9f38e33SHelen Koike 285f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 286f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 287f9f38e33SHelen Koike { 288f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 289f9f38e33SHelen Koike return; 290f9f38e33SHelen Koike 291f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 292f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 293f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 294f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 295f9f38e33SHelen Koike } 296f9f38e33SHelen Koike 2970f0d2c87SMinwoo Im static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 2980f0d2c87SMinwoo Im { 2990f0d2c87SMinwoo Im if (!nvmeq->qid) 3000f0d2c87SMinwoo Im return; 3010f0d2c87SMinwoo Im 3020f0d2c87SMinwoo Im nvmeq->dbbuf_sq_db = NULL; 3030f0d2c87SMinwoo Im nvmeq->dbbuf_cq_db = NULL; 3040f0d2c87SMinwoo Im nvmeq->dbbuf_sq_ei = NULL; 3050f0d2c87SMinwoo Im nvmeq->dbbuf_cq_ei = NULL; 3060f0d2c87SMinwoo Im } 3070f0d2c87SMinwoo Im 308f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 309f9f38e33SHelen Koike { 310f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 3110f0d2c87SMinwoo Im unsigned int i; 312f9f38e33SHelen Koike 313f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 314f9f38e33SHelen Koike return; 315f9f38e33SHelen Koike 316f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 317f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 318f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 319f9f38e33SHelen Koike 320f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 3219bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 322f9f38e33SHelen Koike /* Free memory and continue on */ 323f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 3240f0d2c87SMinwoo Im 3250f0d2c87SMinwoo Im for (i = 1; i <= dev->online_queues; i++) 3260f0d2c87SMinwoo Im nvme_dbbuf_free(&dev->queues[i]); 327f9f38e33SHelen Koike } 328f9f38e33SHelen Koike } 329f9f38e33SHelen Koike 330f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 331f9f38e33SHelen Koike { 332f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 333f9f38e33SHelen Koike } 334f9f38e33SHelen Koike 335f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 336f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 337f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 338f9f38e33SHelen Koike { 339f9f38e33SHelen Koike if (dbbuf_db) { 340f9f38e33SHelen Koike u16 old_value; 341f9f38e33SHelen Koike 342f9f38e33SHelen Koike /* 343f9f38e33SHelen Koike * Ensure that the queue is written before updating 344f9f38e33SHelen Koike * the doorbell in memory 345f9f38e33SHelen Koike */ 346f9f38e33SHelen Koike wmb(); 347f9f38e33SHelen Koike 348f9f38e33SHelen Koike old_value = *dbbuf_db; 349f9f38e33SHelen Koike *dbbuf_db = value; 350f9f38e33SHelen Koike 351f1ed3df2SMichal Wnukowski /* 352f1ed3df2SMichal Wnukowski * Ensure that the doorbell is updated before reading the event 353f1ed3df2SMichal Wnukowski * index from memory. The controller needs to provide similar 354f1ed3df2SMichal Wnukowski * ordering to ensure the envent index is updated before reading 355f1ed3df2SMichal Wnukowski * the doorbell. 356f1ed3df2SMichal Wnukowski */ 357f1ed3df2SMichal Wnukowski mb(); 358f1ed3df2SMichal Wnukowski 359f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 360f9f38e33SHelen Koike return false; 361f9f38e33SHelen Koike } 362f9f38e33SHelen Koike 363f9f38e33SHelen Koike return true; 36457dacad5SJay Sternberg } 36557dacad5SJay Sternberg 36657dacad5SJay Sternberg /* 36757dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 36857dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 36957dacad5SJay Sternberg * the I/O. 37057dacad5SJay Sternberg */ 371b13c6393SChaitanya Kulkarni static int nvme_pci_npages_prp(void) 37257dacad5SJay Sternberg { 373b13c6393SChaitanya Kulkarni unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, 3746c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 37557dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 37657dacad5SJay Sternberg } 37757dacad5SJay Sternberg 378a7a7cbe3SChaitanya Kulkarni /* 379a7a7cbe3SChaitanya Kulkarni * Calculates the number of pages needed for the SGL segments. For example a 4k 380a7a7cbe3SChaitanya Kulkarni * page can accommodate 256 SGL descriptors. 381a7a7cbe3SChaitanya Kulkarni */ 382b13c6393SChaitanya Kulkarni static int nvme_pci_npages_sgl(void) 383f4800d6dSChristoph Hellwig { 384b13c6393SChaitanya Kulkarni return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), 385b13c6393SChaitanya Kulkarni PAGE_SIZE); 386f4800d6dSChristoph Hellwig } 387f4800d6dSChristoph Hellwig 388b13c6393SChaitanya Kulkarni static size_t nvme_pci_iod_alloc_size(void) 38957dacad5SJay Sternberg { 390b13c6393SChaitanya Kulkarni size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); 391a7a7cbe3SChaitanya Kulkarni 392b13c6393SChaitanya Kulkarni return sizeof(__le64 *) * npages + 393b13c6393SChaitanya Kulkarni sizeof(struct scatterlist) * NVME_MAX_SEGS; 394a7a7cbe3SChaitanya Kulkarni } 395a7a7cbe3SChaitanya Kulkarni 39657dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 39757dacad5SJay Sternberg unsigned int hctx_idx) 39857dacad5SJay Sternberg { 39957dacad5SJay Sternberg struct nvme_dev *dev = data; 400147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 40157dacad5SJay Sternberg 40257dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 40357dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 40457dacad5SJay Sternberg 40557dacad5SJay Sternberg hctx->driver_data = nvmeq; 40657dacad5SJay Sternberg return 0; 40757dacad5SJay Sternberg } 40857dacad5SJay Sternberg 40957dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 41057dacad5SJay Sternberg unsigned int hctx_idx) 41157dacad5SJay Sternberg { 41257dacad5SJay Sternberg struct nvme_dev *dev = data; 413147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 41457dacad5SJay Sternberg 41557dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 41657dacad5SJay Sternberg hctx->driver_data = nvmeq; 41757dacad5SJay Sternberg return 0; 41857dacad5SJay Sternberg } 41957dacad5SJay Sternberg 420d6296d39SChristoph Hellwig static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 421d6296d39SChristoph Hellwig unsigned int hctx_idx, unsigned int numa_node) 42257dacad5SJay Sternberg { 423d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 424f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 4250350815aSChristoph Hellwig int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; 426147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[queue_idx]; 42757dacad5SJay Sternberg 42857dacad5SJay Sternberg BUG_ON(!nvmeq); 429f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 43059e29ce6SSagi Grimberg 43159e29ce6SSagi Grimberg nvme_req(req)->ctrl = &dev->ctrl; 432f4b9e6c9SKeith Busch nvme_req(req)->cmd = &iod->cmd; 43357dacad5SJay Sternberg return 0; 43457dacad5SJay Sternberg } 43557dacad5SJay Sternberg 4363b6592f7SJens Axboe static int queue_irq_offset(struct nvme_dev *dev) 4373b6592f7SJens Axboe { 4383b6592f7SJens Axboe /* if we have more than 1 vec, admin queue offsets us by 1 */ 4393b6592f7SJens Axboe if (dev->num_vecs > 1) 4403b6592f7SJens Axboe return 1; 4413b6592f7SJens Axboe 4423b6592f7SJens Axboe return 0; 4433b6592f7SJens Axboe } 4443b6592f7SJens Axboe 445dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 446dca51e78SChristoph Hellwig { 447dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 4483b6592f7SJens Axboe int i, qoff, offset; 449dca51e78SChristoph Hellwig 4503b6592f7SJens Axboe offset = queue_irq_offset(dev); 4513b6592f7SJens Axboe for (i = 0, qoff = 0; i < set->nr_maps; i++) { 4523b6592f7SJens Axboe struct blk_mq_queue_map *map = &set->map[i]; 4533b6592f7SJens Axboe 4543b6592f7SJens Axboe map->nr_queues = dev->io_queues[i]; 4553b6592f7SJens Axboe if (!map->nr_queues) { 456e20ba6e1SChristoph Hellwig BUG_ON(i == HCTX_TYPE_DEFAULT); 4577e849dd9SChristoph Hellwig continue; 4583b6592f7SJens Axboe } 4593b6592f7SJens Axboe 4604b04cc6aSJens Axboe /* 4614b04cc6aSJens Axboe * The poll queue(s) doesn't have an IRQ (and hence IRQ 4624b04cc6aSJens Axboe * affinity), so use the regular blk-mq cpu mapping 4634b04cc6aSJens Axboe */ 4643b6592f7SJens Axboe map->queue_offset = qoff; 465cb9e0e50SKeith Busch if (i != HCTX_TYPE_POLL && offset) 4663b6592f7SJens Axboe blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 4674b04cc6aSJens Axboe else 4684b04cc6aSJens Axboe blk_mq_map_queues(map); 4693b6592f7SJens Axboe qoff += map->nr_queues; 4703b6592f7SJens Axboe offset += map->nr_queues; 4713b6592f7SJens Axboe } 4723b6592f7SJens Axboe 4733b6592f7SJens Axboe return 0; 474dca51e78SChristoph Hellwig } 475dca51e78SChristoph Hellwig 47638210800SKeith Busch /* 47738210800SKeith Busch * Write sq tail if we are asked to, or if the next command would wrap. 47838210800SKeith Busch */ 47938210800SKeith Busch static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 48004f3eafdSJens Axboe { 48138210800SKeith Busch if (!write_sq) { 48238210800SKeith Busch u16 next_tail = nvmeq->sq_tail + 1; 48338210800SKeith Busch 48438210800SKeith Busch if (next_tail == nvmeq->q_depth) 48538210800SKeith Busch next_tail = 0; 48638210800SKeith Busch if (next_tail != nvmeq->last_sq_tail) 48738210800SKeith Busch return; 48838210800SKeith Busch } 48938210800SKeith Busch 49004f3eafdSJens Axboe if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 49104f3eafdSJens Axboe nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 49204f3eafdSJens Axboe writel(nvmeq->sq_tail, nvmeq->q_db); 49338210800SKeith Busch nvmeq->last_sq_tail = nvmeq->sq_tail; 49404f3eafdSJens Axboe } 49504f3eafdSJens Axboe 49657dacad5SJay Sternberg /** 49790ea5ca4SChristoph Hellwig * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 49857dacad5SJay Sternberg * @nvmeq: The queue to use 49957dacad5SJay Sternberg * @cmd: The command to send 50004f3eafdSJens Axboe * @write_sq: whether to write to the SQ doorbell 50157dacad5SJay Sternberg */ 50204f3eafdSJens Axboe static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 50304f3eafdSJens Axboe bool write_sq) 50457dacad5SJay Sternberg { 50590ea5ca4SChristoph Hellwig spin_lock(&nvmeq->sq_lock); 506c1e0cc7eSBenjamin Herrenschmidt memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 507c1e0cc7eSBenjamin Herrenschmidt cmd, sizeof(*cmd)); 50890ea5ca4SChristoph Hellwig if (++nvmeq->sq_tail == nvmeq->q_depth) 50990ea5ca4SChristoph Hellwig nvmeq->sq_tail = 0; 51038210800SKeith Busch nvme_write_sq_db(nvmeq, write_sq); 51104f3eafdSJens Axboe spin_unlock(&nvmeq->sq_lock); 51204f3eafdSJens Axboe } 51304f3eafdSJens Axboe 51404f3eafdSJens Axboe static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 51504f3eafdSJens Axboe { 51604f3eafdSJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 51704f3eafdSJens Axboe 51804f3eafdSJens Axboe spin_lock(&nvmeq->sq_lock); 51938210800SKeith Busch if (nvmeq->sq_tail != nvmeq->last_sq_tail) 52038210800SKeith Busch nvme_write_sq_db(nvmeq, true); 52190ea5ca4SChristoph Hellwig spin_unlock(&nvmeq->sq_lock); 52257dacad5SJay Sternberg } 52357dacad5SJay Sternberg 524a7a7cbe3SChaitanya Kulkarni static void **nvme_pci_iod_list(struct request *req) 52557dacad5SJay Sternberg { 526f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 527a7a7cbe3SChaitanya Kulkarni return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 52857dacad5SJay Sternberg } 52957dacad5SJay Sternberg 530955b1b5aSMinwoo Im static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 531955b1b5aSMinwoo Im { 532955b1b5aSMinwoo Im struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 53320469a37SKeith Busch int nseg = blk_rq_nr_phys_segments(req); 534955b1b5aSMinwoo Im unsigned int avg_seg_size; 535955b1b5aSMinwoo Im 53620469a37SKeith Busch avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 537955b1b5aSMinwoo Im 538253a0b76SChaitanya Kulkarni if (!nvme_ctrl_sgl_supported(&dev->ctrl)) 539955b1b5aSMinwoo Im return false; 540955b1b5aSMinwoo Im if (!iod->nvmeq->qid) 541955b1b5aSMinwoo Im return false; 542955b1b5aSMinwoo Im if (!sgl_threshold || avg_seg_size < sgl_threshold) 543955b1b5aSMinwoo Im return false; 544955b1b5aSMinwoo Im return true; 545955b1b5aSMinwoo Im } 546955b1b5aSMinwoo Im 5479275c206SChristoph Hellwig static void nvme_free_prps(struct nvme_dev *dev, struct request *req) 54857dacad5SJay Sternberg { 5496c3c05b0SChaitanya Kulkarni const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 5509275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5519275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 55257dacad5SJay Sternberg int i; 55357dacad5SJay Sternberg 5549275c206SChristoph Hellwig for (i = 0; i < iod->npages; i++) { 5559275c206SChristoph Hellwig __le64 *prp_list = nvme_pci_iod_list(req)[i]; 5569275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 5579275c206SChristoph Hellwig 5589275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); 5599275c206SChristoph Hellwig dma_addr = next_dma_addr; 560dff824b2SChristoph Hellwig } 5619275c206SChristoph Hellwig } 5629275c206SChristoph Hellwig 5639275c206SChristoph Hellwig static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) 5649275c206SChristoph Hellwig { 5659275c206SChristoph Hellwig const int last_sg = SGES_PER_PAGE - 1; 5669275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5679275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 5689275c206SChristoph Hellwig int i; 5699275c206SChristoph Hellwig 5709275c206SChristoph Hellwig for (i = 0; i < iod->npages; i++) { 5719275c206SChristoph Hellwig struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; 5729275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); 5739275c206SChristoph Hellwig 5749275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); 5759275c206SChristoph Hellwig dma_addr = next_dma_addr; 5769275c206SChristoph Hellwig } 5779275c206SChristoph Hellwig } 5789275c206SChristoph Hellwig 5799275c206SChristoph Hellwig static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) 5809275c206SChristoph Hellwig { 5819275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 582dff824b2SChristoph Hellwig 5837f73eac3SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(iod->sg))) 5847f73eac3SLogan Gunthorpe pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, 5857f73eac3SLogan Gunthorpe rq_dma_dir(req)); 5867f73eac3SLogan Gunthorpe else 587dff824b2SChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); 5889275c206SChristoph Hellwig } 5897fe07d14SChristoph Hellwig 5909275c206SChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 5919275c206SChristoph Hellwig { 5929275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5937fe07d14SChristoph Hellwig 5949275c206SChristoph Hellwig if (iod->dma_len) { 5959275c206SChristoph Hellwig dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, 5969275c206SChristoph Hellwig rq_dma_dir(req)); 5979275c206SChristoph Hellwig return; 5989275c206SChristoph Hellwig } 5999275c206SChristoph Hellwig 6009275c206SChristoph Hellwig WARN_ON_ONCE(!iod->nents); 6019275c206SChristoph Hellwig 6029275c206SChristoph Hellwig nvme_unmap_sg(dev, req); 60357dacad5SJay Sternberg if (iod->npages == 0) 604a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 6059275c206SChristoph Hellwig iod->first_dma); 6069275c206SChristoph Hellwig else if (iod->use_sgl) 6079275c206SChristoph Hellwig nvme_free_sgls(dev, req); 6089275c206SChristoph Hellwig else 6099275c206SChristoph Hellwig nvme_free_prps(dev, req); 610943e942eSJens Axboe mempool_free(iod->sg, dev->iod_mempool); 61157dacad5SJay Sternberg } 61257dacad5SJay Sternberg 613d0877473SKeith Busch static void nvme_print_sgl(struct scatterlist *sgl, int nents) 614d0877473SKeith Busch { 615d0877473SKeith Busch int i; 616d0877473SKeith Busch struct scatterlist *sg; 617d0877473SKeith Busch 618d0877473SKeith Busch for_each_sg(sgl, sg, nents, i) { 619d0877473SKeith Busch dma_addr_t phys = sg_phys(sg); 620d0877473SKeith Busch pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 621d0877473SKeith Busch "dma_address:%pad dma_length:%d\n", 622d0877473SKeith Busch i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 623d0877473SKeith Busch sg_dma_len(sg)); 624d0877473SKeith Busch } 625d0877473SKeith Busch } 626d0877473SKeith Busch 627a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 628a7a7cbe3SChaitanya Kulkarni struct request *req, struct nvme_rw_command *cmnd) 62957dacad5SJay Sternberg { 630f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 63157dacad5SJay Sternberg struct dma_pool *pool; 632b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 63357dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 63457dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 63557dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 6366c3c05b0SChaitanya Kulkarni int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 63757dacad5SJay Sternberg __le64 *prp_list; 638a7a7cbe3SChaitanya Kulkarni void **list = nvme_pci_iod_list(req); 63957dacad5SJay Sternberg dma_addr_t prp_dma; 64057dacad5SJay Sternberg int nprps, i; 64157dacad5SJay Sternberg 6426c3c05b0SChaitanya Kulkarni length -= (NVME_CTRL_PAGE_SIZE - offset); 6435228b328SJan H. Schönherr if (length <= 0) { 6445228b328SJan H. Schönherr iod->first_dma = 0; 645a7a7cbe3SChaitanya Kulkarni goto done; 6465228b328SJan H. Schönherr } 64757dacad5SJay Sternberg 6486c3c05b0SChaitanya Kulkarni dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 64957dacad5SJay Sternberg if (dma_len) { 6506c3c05b0SChaitanya Kulkarni dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 65157dacad5SJay Sternberg } else { 65257dacad5SJay Sternberg sg = sg_next(sg); 65357dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 65457dacad5SJay Sternberg dma_len = sg_dma_len(sg); 65557dacad5SJay Sternberg } 65657dacad5SJay Sternberg 6576c3c05b0SChaitanya Kulkarni if (length <= NVME_CTRL_PAGE_SIZE) { 65857dacad5SJay Sternberg iod->first_dma = dma_addr; 659a7a7cbe3SChaitanya Kulkarni goto done; 66057dacad5SJay Sternberg } 66157dacad5SJay Sternberg 6626c3c05b0SChaitanya Kulkarni nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 66357dacad5SJay Sternberg if (nprps <= (256 / 8)) { 66457dacad5SJay Sternberg pool = dev->prp_small_pool; 66557dacad5SJay Sternberg iod->npages = 0; 66657dacad5SJay Sternberg } else { 66757dacad5SJay Sternberg pool = dev->prp_page_pool; 66857dacad5SJay Sternberg iod->npages = 1; 66957dacad5SJay Sternberg } 67057dacad5SJay Sternberg 67169d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 67257dacad5SJay Sternberg if (!prp_list) { 67357dacad5SJay Sternberg iod->first_dma = dma_addr; 67457dacad5SJay Sternberg iod->npages = -1; 67586eea289SKeith Busch return BLK_STS_RESOURCE; 67657dacad5SJay Sternberg } 67757dacad5SJay Sternberg list[0] = prp_list; 67857dacad5SJay Sternberg iod->first_dma = prp_dma; 67957dacad5SJay Sternberg i = 0; 68057dacad5SJay Sternberg for (;;) { 6816c3c05b0SChaitanya Kulkarni if (i == NVME_CTRL_PAGE_SIZE >> 3) { 68257dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 68369d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 68457dacad5SJay Sternberg if (!prp_list) 685fa073216SChristoph Hellwig goto free_prps; 68657dacad5SJay Sternberg list[iod->npages++] = prp_list; 68757dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 68857dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 68957dacad5SJay Sternberg i = 1; 69057dacad5SJay Sternberg } 69157dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 6926c3c05b0SChaitanya Kulkarni dma_len -= NVME_CTRL_PAGE_SIZE; 6936c3c05b0SChaitanya Kulkarni dma_addr += NVME_CTRL_PAGE_SIZE; 6946c3c05b0SChaitanya Kulkarni length -= NVME_CTRL_PAGE_SIZE; 69557dacad5SJay Sternberg if (length <= 0) 69657dacad5SJay Sternberg break; 69757dacad5SJay Sternberg if (dma_len > 0) 69857dacad5SJay Sternberg continue; 69986eea289SKeith Busch if (unlikely(dma_len < 0)) 70086eea289SKeith Busch goto bad_sgl; 70157dacad5SJay Sternberg sg = sg_next(sg); 70257dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 70357dacad5SJay Sternberg dma_len = sg_dma_len(sg); 70457dacad5SJay Sternberg } 705a7a7cbe3SChaitanya Kulkarni done: 706a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 707a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 70886eea289SKeith Busch return BLK_STS_OK; 709fa073216SChristoph Hellwig free_prps: 710fa073216SChristoph Hellwig nvme_free_prps(dev, req); 711fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 71286eea289SKeith Busch bad_sgl: 713d0877473SKeith Busch WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), 714d0877473SKeith Busch "Invalid SGL for payload:%d nents:%d\n", 715d0877473SKeith Busch blk_rq_payload_bytes(req), iod->nents); 71686eea289SKeith Busch return BLK_STS_IOERR; 71757dacad5SJay Sternberg } 71857dacad5SJay Sternberg 719a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 720a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg) 721a7a7cbe3SChaitanya Kulkarni { 722a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(sg_dma_address(sg)); 723a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(sg_dma_len(sg)); 724a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_DATA_DESC << 4; 725a7a7cbe3SChaitanya Kulkarni } 726a7a7cbe3SChaitanya Kulkarni 727a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 728a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr, int entries) 729a7a7cbe3SChaitanya Kulkarni { 730a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(dma_addr); 731a7a7cbe3SChaitanya Kulkarni if (entries < SGES_PER_PAGE) { 732a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(entries * sizeof(*sge)); 733a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 734a7a7cbe3SChaitanya Kulkarni } else { 735a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(PAGE_SIZE); 736a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_SEG_DESC << 4; 737a7a7cbe3SChaitanya Kulkarni } 738a7a7cbe3SChaitanya Kulkarni } 739a7a7cbe3SChaitanya Kulkarni 740a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 741b0f2853bSChristoph Hellwig struct request *req, struct nvme_rw_command *cmd, int entries) 742a7a7cbe3SChaitanya Kulkarni { 743a7a7cbe3SChaitanya Kulkarni struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 744a7a7cbe3SChaitanya Kulkarni struct dma_pool *pool; 745a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list; 746a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg = iod->sg; 747a7a7cbe3SChaitanya Kulkarni dma_addr_t sgl_dma; 748b0f2853bSChristoph Hellwig int i = 0; 749a7a7cbe3SChaitanya Kulkarni 750a7a7cbe3SChaitanya Kulkarni /* setting the transfer type as SGL */ 751a7a7cbe3SChaitanya Kulkarni cmd->flags = NVME_CMD_SGL_METABUF; 752a7a7cbe3SChaitanya Kulkarni 753b0f2853bSChristoph Hellwig if (entries == 1) { 754a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 755a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 756a7a7cbe3SChaitanya Kulkarni } 757a7a7cbe3SChaitanya Kulkarni 758a7a7cbe3SChaitanya Kulkarni if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 759a7a7cbe3SChaitanya Kulkarni pool = dev->prp_small_pool; 760a7a7cbe3SChaitanya Kulkarni iod->npages = 0; 761a7a7cbe3SChaitanya Kulkarni } else { 762a7a7cbe3SChaitanya Kulkarni pool = dev->prp_page_pool; 763a7a7cbe3SChaitanya Kulkarni iod->npages = 1; 764a7a7cbe3SChaitanya Kulkarni } 765a7a7cbe3SChaitanya Kulkarni 766a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 767a7a7cbe3SChaitanya Kulkarni if (!sg_list) { 768a7a7cbe3SChaitanya Kulkarni iod->npages = -1; 769a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 770a7a7cbe3SChaitanya Kulkarni } 771a7a7cbe3SChaitanya Kulkarni 772a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[0] = sg_list; 773a7a7cbe3SChaitanya Kulkarni iod->first_dma = sgl_dma; 774a7a7cbe3SChaitanya Kulkarni 775a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 776a7a7cbe3SChaitanya Kulkarni 777a7a7cbe3SChaitanya Kulkarni do { 778a7a7cbe3SChaitanya Kulkarni if (i == SGES_PER_PAGE) { 779a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *old_sg_desc = sg_list; 780a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 781a7a7cbe3SChaitanya Kulkarni 782a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 783a7a7cbe3SChaitanya Kulkarni if (!sg_list) 784fa073216SChristoph Hellwig goto free_sgls; 785a7a7cbe3SChaitanya Kulkarni 786a7a7cbe3SChaitanya Kulkarni i = 0; 787a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[iod->npages++] = sg_list; 788a7a7cbe3SChaitanya Kulkarni sg_list[i++] = *link; 789a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(link, sgl_dma, entries); 790a7a7cbe3SChaitanya Kulkarni } 791a7a7cbe3SChaitanya Kulkarni 792a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&sg_list[i++], sg); 793a7a7cbe3SChaitanya Kulkarni sg = sg_next(sg); 794b0f2853bSChristoph Hellwig } while (--entries > 0); 795a7a7cbe3SChaitanya Kulkarni 796a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 797fa073216SChristoph Hellwig free_sgls: 798fa073216SChristoph Hellwig nvme_free_sgls(dev, req); 799fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 800a7a7cbe3SChaitanya Kulkarni } 801a7a7cbe3SChaitanya Kulkarni 802dff824b2SChristoph Hellwig static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 803dff824b2SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 804dff824b2SChristoph Hellwig struct bio_vec *bv) 805dff824b2SChristoph Hellwig { 806dff824b2SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8076c3c05b0SChaitanya Kulkarni unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 8086c3c05b0SChaitanya Kulkarni unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 809dff824b2SChristoph Hellwig 810dff824b2SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 811dff824b2SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 812dff824b2SChristoph Hellwig return BLK_STS_RESOURCE; 813dff824b2SChristoph Hellwig iod->dma_len = bv->bv_len; 814dff824b2SChristoph Hellwig 815dff824b2SChristoph Hellwig cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 816dff824b2SChristoph Hellwig if (bv->bv_len > first_prp_len) 817dff824b2SChristoph Hellwig cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 818359c1f88SBaolin Wang return BLK_STS_OK; 819dff824b2SChristoph Hellwig } 820dff824b2SChristoph Hellwig 82129791057SChristoph Hellwig static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 82229791057SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 82329791057SChristoph Hellwig struct bio_vec *bv) 82429791057SChristoph Hellwig { 82529791057SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 82629791057SChristoph Hellwig 82729791057SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 82829791057SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 82929791057SChristoph Hellwig return BLK_STS_RESOURCE; 83029791057SChristoph Hellwig iod->dma_len = bv->bv_len; 83129791057SChristoph Hellwig 832049bf372SKlaus Birkelund Jensen cmnd->flags = NVME_CMD_SGL_METABUF; 83329791057SChristoph Hellwig cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 83429791057SChristoph Hellwig cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 83529791057SChristoph Hellwig cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 836359c1f88SBaolin Wang return BLK_STS_OK; 83729791057SChristoph Hellwig } 83829791057SChristoph Hellwig 839fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 840b131c61dSChristoph Hellwig struct nvme_command *cmnd) 84157dacad5SJay Sternberg { 842f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 84370479b71SChristoph Hellwig blk_status_t ret = BLK_STS_RESOURCE; 844b0f2853bSChristoph Hellwig int nr_mapped; 84557dacad5SJay Sternberg 846dff824b2SChristoph Hellwig if (blk_rq_nr_phys_segments(req) == 1) { 847dff824b2SChristoph Hellwig struct bio_vec bv = req_bvec(req); 848dff824b2SChristoph Hellwig 849dff824b2SChristoph Hellwig if (!is_pci_p2pdma_page(bv.bv_page)) { 8506c3c05b0SChaitanya Kulkarni if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 851dff824b2SChristoph Hellwig return nvme_setup_prp_simple(dev, req, 852dff824b2SChristoph Hellwig &cmnd->rw, &bv); 85329791057SChristoph Hellwig 854e51183beSNiklas Cassel if (iod->nvmeq->qid && sgl_threshold && 855253a0b76SChaitanya Kulkarni nvme_ctrl_sgl_supported(&dev->ctrl)) 85629791057SChristoph Hellwig return nvme_setup_sgl_simple(dev, req, 85729791057SChristoph Hellwig &cmnd->rw, &bv); 858dff824b2SChristoph Hellwig } 859dff824b2SChristoph Hellwig } 860dff824b2SChristoph Hellwig 861dff824b2SChristoph Hellwig iod->dma_len = 0; 8629b048119SChristoph Hellwig iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 8639b048119SChristoph Hellwig if (!iod->sg) 8649b048119SChristoph Hellwig return BLK_STS_RESOURCE; 865f9d03f96SChristoph Hellwig sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 86670479b71SChristoph Hellwig iod->nents = blk_rq_map_sg(req->q, req, iod->sg); 867ba1ca37eSChristoph Hellwig if (!iod->nents) 868fa073216SChristoph Hellwig goto out_free_sg; 869ba1ca37eSChristoph Hellwig 870e0596ab2SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(iod->sg))) 8712b9f4bb2SLogan Gunthorpe nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, 8722b9f4bb2SLogan Gunthorpe iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); 873e0596ab2SLogan Gunthorpe else 874e0596ab2SLogan Gunthorpe nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, 87570479b71SChristoph Hellwig rq_dma_dir(req), DMA_ATTR_NO_WARN); 876b0f2853bSChristoph Hellwig if (!nr_mapped) 877fa073216SChristoph Hellwig goto out_free_sg; 878ba1ca37eSChristoph Hellwig 87970479b71SChristoph Hellwig iod->use_sgl = nvme_pci_use_sgls(dev, req); 880955b1b5aSMinwoo Im if (iod->use_sgl) 881b0f2853bSChristoph Hellwig ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); 882a7a7cbe3SChaitanya Kulkarni else 883a7a7cbe3SChaitanya Kulkarni ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 8844aedb705SChristoph Hellwig if (ret != BLK_STS_OK) 885fa073216SChristoph Hellwig goto out_unmap_sg; 886fa073216SChristoph Hellwig return BLK_STS_OK; 887fa073216SChristoph Hellwig 888fa073216SChristoph Hellwig out_unmap_sg: 889fa073216SChristoph Hellwig nvme_unmap_sg(dev, req); 890fa073216SChristoph Hellwig out_free_sg: 891fa073216SChristoph Hellwig mempool_free(iod->sg, dev->iod_mempool); 892ba1ca37eSChristoph Hellwig return ret; 89357dacad5SJay Sternberg } 89457dacad5SJay Sternberg 8954aedb705SChristoph Hellwig static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, 8964aedb705SChristoph Hellwig struct nvme_command *cmnd) 8974aedb705SChristoph Hellwig { 8984aedb705SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8994aedb705SChristoph Hellwig 9004aedb705SChristoph Hellwig iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), 9014aedb705SChristoph Hellwig rq_dma_dir(req), 0); 9024aedb705SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->meta_dma)) 9034aedb705SChristoph Hellwig return BLK_STS_IOERR; 9044aedb705SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 905359c1f88SBaolin Wang return BLK_STS_OK; 9064aedb705SChristoph Hellwig } 9074aedb705SChristoph Hellwig 90857dacad5SJay Sternberg /* 90957dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 91057dacad5SJay Sternberg */ 911fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 91257dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 91357dacad5SJay Sternberg { 91457dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 91557dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 91657dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 91757dacad5SJay Sternberg struct request *req = bd->rq; 9189b048119SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 919af7fae85SKeith Busch struct nvme_command *cmnd = &iod->cmd; 920ebe6d874SChristoph Hellwig blk_status_t ret; 92157dacad5SJay Sternberg 9229b048119SChristoph Hellwig iod->aborted = 0; 9239b048119SChristoph Hellwig iod->npages = -1; 9249b048119SChristoph Hellwig iod->nents = 0; 9259b048119SChristoph Hellwig 926d1f06f4aSJens Axboe /* 927d1f06f4aSJens Axboe * We should not need to do this, but we're still using this to 928d1f06f4aSJens Axboe * ensure we can drain requests on a dying queue. 929d1f06f4aSJens Axboe */ 9304e224106SChristoph Hellwig if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 931d1f06f4aSJens Axboe return BLK_STS_IOERR; 932d1f06f4aSJens Axboe 933d4060d2bSTao Chiu if (!nvme_check_ready(&dev->ctrl, req, true)) 934d4060d2bSTao Chiu return nvme_fail_nonready_command(&dev->ctrl, req); 935d4060d2bSTao Chiu 936f4b9e6c9SKeith Busch ret = nvme_setup_cmd(ns, req); 937fc17b653SChristoph Hellwig if (ret) 938f4800d6dSChristoph Hellwig return ret; 93957dacad5SJay Sternberg 940fc17b653SChristoph Hellwig if (blk_rq_nr_phys_segments(req)) { 941af7fae85SKeith Busch ret = nvme_map_data(dev, req, cmnd); 942fc17b653SChristoph Hellwig if (ret) 9439b048119SChristoph Hellwig goto out_free_cmd; 944fc17b653SChristoph Hellwig } 945ba1ca37eSChristoph Hellwig 9464aedb705SChristoph Hellwig if (blk_integrity_rq(req)) { 947af7fae85SKeith Busch ret = nvme_map_metadata(dev, req, cmnd); 9484aedb705SChristoph Hellwig if (ret) 9494aedb705SChristoph Hellwig goto out_unmap_data; 9504aedb705SChristoph Hellwig } 9514aedb705SChristoph Hellwig 952aae239e1SChristoph Hellwig blk_mq_start_request(req); 953af7fae85SKeith Busch nvme_submit_cmd(nvmeq, cmnd, bd->last); 954fc17b653SChristoph Hellwig return BLK_STS_OK; 9554aedb705SChristoph Hellwig out_unmap_data: 9564aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 957f9d03f96SChristoph Hellwig out_free_cmd: 958f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 959ba1ca37eSChristoph Hellwig return ret; 96057dacad5SJay Sternberg } 96157dacad5SJay Sternberg 96277f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req) 963eee417b0SChristoph Hellwig { 964f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 9654aedb705SChristoph Hellwig struct nvme_dev *dev = iod->nvmeq->dev; 966eee417b0SChristoph Hellwig 9674aedb705SChristoph Hellwig if (blk_integrity_rq(req)) 9684aedb705SChristoph Hellwig dma_unmap_page(dev->dev, iod->meta_dma, 9694aedb705SChristoph Hellwig rq_integrity_vec(req)->bv_len, rq_data_dir(req)); 970b15c592dSChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 9714aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 97277f02a7aSChristoph Hellwig nvme_complete_rq(req); 97357dacad5SJay Sternberg } 97457dacad5SJay Sternberg 975d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 976750dde44SChristoph Hellwig static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 977d783e0bdSMarta Rybczynska { 97874943d45SKeith Busch struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 97974943d45SKeith Busch 98074943d45SKeith Busch return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 981d783e0bdSMarta Rybczynska } 982d783e0bdSMarta Rybczynska 983eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 98457dacad5SJay Sternberg { 985eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 98657dacad5SJay Sternberg 987eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 988eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 989eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 990eb281c82SSagi Grimberg } 991adf68f21SChristoph Hellwig 992cfa27356SChristoph Hellwig static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 993cfa27356SChristoph Hellwig { 994cfa27356SChristoph Hellwig if (!nvmeq->qid) 995cfa27356SChristoph Hellwig return nvmeq->dev->admin_tagset.tags[0]; 996cfa27356SChristoph Hellwig return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 997cfa27356SChristoph Hellwig } 998cfa27356SChristoph Hellwig 9995cb525c8SJens Axboe static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 100057dacad5SJay Sternberg { 100174943d45SKeith Busch struct nvme_completion *cqe = &nvmeq->cqes[idx]; 100262df8016SLalithambika Krishnakumar __u16 command_id = READ_ONCE(cqe->command_id); 100357dacad5SJay Sternberg struct request *req; 1004adf68f21SChristoph Hellwig 1005adf68f21SChristoph Hellwig /* 1006adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 1007adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1008adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1009adf68f21SChristoph Hellwig * for them but rather special case them here. 1010adf68f21SChristoph Hellwig */ 101162df8016SLalithambika Krishnakumar if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 10127bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 101383a12fb7SSagi Grimberg cqe->status, &cqe->result); 1014a0fa9647SJens Axboe return; 101557dacad5SJay Sternberg } 101657dacad5SJay Sternberg 101762df8016SLalithambika Krishnakumar req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id); 101850b7c243SXianting Tian if (unlikely(!req)) { 101950b7c243SXianting Tian dev_warn(nvmeq->dev->ctrl.device, 102050b7c243SXianting Tian "invalid id %d completed on queue %d\n", 102162df8016SLalithambika Krishnakumar command_id, le16_to_cpu(cqe->sq_id)); 102250b7c243SXianting Tian return; 102350b7c243SXianting Tian } 102450b7c243SXianting Tian 1025604c01d5Syupeng trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 10262eb81a33SChristoph Hellwig if (!nvme_try_complete_req(req, cqe->status, cqe->result)) 1027ff029451SChristoph Hellwig nvme_pci_complete_rq(req); 102883a12fb7SSagi Grimberg } 102957dacad5SJay Sternberg 10305cb525c8SJens Axboe static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 10315cb525c8SJens Axboe { 1032a0aac973SJK Kim u32 tmp = nvmeq->cq_head + 1; 1033a8de6639SAlexey Dobriyan 1034a8de6639SAlexey Dobriyan if (tmp == nvmeq->q_depth) { 1035920d13a8SSagi Grimberg nvmeq->cq_head = 0; 1036e2a366a4SAlexey Dobriyan nvmeq->cq_phase ^= 1; 1037a8de6639SAlexey Dobriyan } else { 1038a8de6639SAlexey Dobriyan nvmeq->cq_head = tmp; 1039920d13a8SSagi Grimberg } 1040a0fa9647SJens Axboe } 1041a0fa9647SJens Axboe 1042324b494cSKeith Busch static inline int nvme_process_cq(struct nvme_queue *nvmeq) 1043a0fa9647SJens Axboe { 10441052b8acSJens Axboe int found = 0; 104583a12fb7SSagi Grimberg 10461052b8acSJens Axboe while (nvme_cqe_pending(nvmeq)) { 10471052b8acSJens Axboe found++; 1048b69e2ef2SKeith Busch /* 1049b69e2ef2SKeith Busch * load-load control dependency between phase and the rest of 1050b69e2ef2SKeith Busch * the cqe requires a full read memory barrier 1051b69e2ef2SKeith Busch */ 1052b69e2ef2SKeith Busch dma_rmb(); 1053324b494cSKeith Busch nvme_handle_cqe(nvmeq, nvmeq->cq_head); 10545cb525c8SJens Axboe nvme_update_cq_head(nvmeq); 105557dacad5SJay Sternberg } 105657dacad5SJay Sternberg 1057324b494cSKeith Busch if (found) 1058eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 10595cb525c8SJens Axboe return found; 106057dacad5SJay Sternberg } 106157dacad5SJay Sternberg 106257dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 106357dacad5SJay Sternberg { 106457dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 10655cb525c8SJens Axboe 1066324b494cSKeith Busch if (nvme_process_cq(nvmeq)) 106705fae499SChaitanya Kulkarni return IRQ_HANDLED; 106805fae499SChaitanya Kulkarni return IRQ_NONE; 106957dacad5SJay Sternberg } 107057dacad5SJay Sternberg 107157dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 107257dacad5SJay Sternberg { 107357dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 10744e523547SBaolin Wang 1075750dde44SChristoph Hellwig if (nvme_cqe_pending(nvmeq)) 107657dacad5SJay Sternberg return IRQ_WAKE_THREAD; 1077d783e0bdSMarta Rybczynska return IRQ_NONE; 107857dacad5SJay Sternberg } 107957dacad5SJay Sternberg 10800b2a8a9fSChristoph Hellwig /* 1081fa059b85SKeith Busch * Poll for completions for any interrupt driven queue 10820b2a8a9fSChristoph Hellwig * Can be called from any context. 10830b2a8a9fSChristoph Hellwig */ 1084fa059b85SKeith Busch static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1085a0fa9647SJens Axboe { 10863a7afd8eSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1087a0fa9647SJens Axboe 1088fa059b85SKeith Busch WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1089fa059b85SKeith Busch 10903a7afd8eSChristoph Hellwig disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1091fa059b85SKeith Busch nvme_process_cq(nvmeq); 10923a7afd8eSChristoph Hellwig enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 109391a509f8SChristoph Hellwig } 1094442e19b7SSagi Grimberg 10959743139cSJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx) 10967776db1cSKeith Busch { 10977776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 1098dabcefabSJens Axboe bool found; 1099dabcefabSJens Axboe 1100dabcefabSJens Axboe if (!nvme_cqe_pending(nvmeq)) 1101dabcefabSJens Axboe return 0; 1102dabcefabSJens Axboe 11033a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 1104324b494cSKeith Busch found = nvme_process_cq(nvmeq); 11053a7afd8eSChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 1106dabcefabSJens Axboe 1107dabcefabSJens Axboe return found; 1108dabcefabSJens Axboe } 1109dabcefabSJens Axboe 1110ad22c355SKeith Busch static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 111157dacad5SJay Sternberg { 1112f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 1113147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 1114f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 111557dacad5SJay Sternberg 111657dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 1117ad22c355SKeith Busch c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 111804f3eafdSJens Axboe nvme_submit_cmd(nvmeq, &c, true); 111957dacad5SJay Sternberg } 112057dacad5SJay Sternberg 112157dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 112257dacad5SJay Sternberg { 1123f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 112457dacad5SJay Sternberg 112557dacad5SJay Sternberg c.delete_queue.opcode = opcode; 112657dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 112757dacad5SJay Sternberg 11281c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 112957dacad5SJay Sternberg } 113057dacad5SJay Sternberg 113157dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1132a8e3e0bbSJianchao Wang struct nvme_queue *nvmeq, s16 vector) 113357dacad5SJay Sternberg { 1134f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 11354b04cc6aSJens Axboe int flags = NVME_QUEUE_PHYS_CONTIG; 11364b04cc6aSJens Axboe 11377c349ddeSKeith Busch if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 11384b04cc6aSJens Axboe flags |= NVME_CQ_IRQ_ENABLED; 113957dacad5SJay Sternberg 114057dacad5SJay Sternberg /* 114116772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 114257dacad5SJay Sternberg * is attached to the request. 114357dacad5SJay Sternberg */ 114457dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 114557dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 114657dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 114757dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 114857dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 1149a8e3e0bbSJianchao Wang c.create_cq.irq_vector = cpu_to_le16(vector); 115057dacad5SJay Sternberg 11511c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 115257dacad5SJay Sternberg } 115357dacad5SJay Sternberg 115457dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 115557dacad5SJay Sternberg struct nvme_queue *nvmeq) 115657dacad5SJay Sternberg { 11579abd68efSJens Axboe struct nvme_ctrl *ctrl = &dev->ctrl; 1158f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 115981c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 116057dacad5SJay Sternberg 116157dacad5SJay Sternberg /* 11629abd68efSJens Axboe * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 11639abd68efSJens Axboe * set. Since URGENT priority is zeroes, it makes all queues 11649abd68efSJens Axboe * URGENT. 11659abd68efSJens Axboe */ 11669abd68efSJens Axboe if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 11679abd68efSJens Axboe flags |= NVME_SQ_PRIO_MEDIUM; 11689abd68efSJens Axboe 11699abd68efSJens Axboe /* 117016772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 117157dacad5SJay Sternberg * is attached to the request. 117257dacad5SJay Sternberg */ 117357dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 117457dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 117557dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 117657dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 117757dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 117857dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 117957dacad5SJay Sternberg 11801c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 118157dacad5SJay Sternberg } 118257dacad5SJay Sternberg 118357dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 118457dacad5SJay Sternberg { 118557dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 118657dacad5SJay Sternberg } 118757dacad5SJay Sternberg 118857dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 118957dacad5SJay Sternberg { 119057dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 119157dacad5SJay Sternberg } 119257dacad5SJay Sternberg 11932a842acaSChristoph Hellwig static void abort_endio(struct request *req, blk_status_t error) 119457dacad5SJay Sternberg { 1195f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1196f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 119757dacad5SJay Sternberg 119827fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 119927fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 1200e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1201e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 120257dacad5SJay Sternberg } 120357dacad5SJay Sternberg 1204b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1205b2a0eb1aSKeith Busch { 1206b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 1207b2a0eb1aSKeith Busch * NVMe Subsystem reset. 1208b2a0eb1aSKeith Busch */ 1209b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1210b2a0eb1aSKeith Busch 1211ad70062cSJianchao Wang /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1212ad70062cSJianchao Wang switch (dev->ctrl.state) { 1213ad70062cSJianchao Wang case NVME_CTRL_RESETTING: 1214ad6a0a52SMax Gurtovoy case NVME_CTRL_CONNECTING: 1215b2a0eb1aSKeith Busch return false; 1216ad70062cSJianchao Wang default: 1217ad70062cSJianchao Wang break; 1218ad70062cSJianchao Wang } 1219b2a0eb1aSKeith Busch 1220b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 1221b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 1222b2a0eb1aSKeith Busch */ 1223b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 1224b2a0eb1aSKeith Busch return false; 1225b2a0eb1aSKeith Busch 1226b2a0eb1aSKeith Busch return true; 1227b2a0eb1aSKeith Busch } 1228b2a0eb1aSKeith Busch 1229b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1230b2a0eb1aSKeith Busch { 1231b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 1232b2a0eb1aSKeith Busch u16 pci_status; 1233b2a0eb1aSKeith Busch int result; 1234b2a0eb1aSKeith Busch 1235b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1236b2a0eb1aSKeith Busch &pci_status); 1237b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 1238b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1239b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1240b2a0eb1aSKeith Busch csts, pci_status); 1241b2a0eb1aSKeith Busch else 1242b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1243b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1244b2a0eb1aSKeith Busch csts, result); 1245b2a0eb1aSKeith Busch } 1246b2a0eb1aSKeith Busch 124731c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 124857dacad5SJay Sternberg { 1249f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1250f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 125157dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 125257dacad5SJay Sternberg struct request *abort_req; 1253f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 1254b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1255b2a0eb1aSKeith Busch 1256651438bbSWen Xiong /* If PCI error recovery process is happening, we cannot reset or 1257651438bbSWen Xiong * the recovery mechanism will surely fail. 1258651438bbSWen Xiong */ 1259651438bbSWen Xiong mb(); 1260651438bbSWen Xiong if (pci_channel_offline(to_pci_dev(dev->dev))) 1261651438bbSWen Xiong return BLK_EH_RESET_TIMER; 1262651438bbSWen Xiong 1263b2a0eb1aSKeith Busch /* 1264b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1265b2a0eb1aSKeith Busch */ 1266b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1267b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1268b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1269d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1270db8c48e4SChristoph Hellwig return BLK_EH_DONE; 1271b2a0eb1aSKeith Busch } 127257dacad5SJay Sternberg 127331c7c7d2SChristoph Hellwig /* 12747776db1cSKeith Busch * Did we miss an interrupt? 12757776db1cSKeith Busch */ 1276fa059b85SKeith Busch if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1277fa059b85SKeith Busch nvme_poll(req->mq_hctx); 1278fa059b85SKeith Busch else 1279bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 1280fa059b85SKeith Busch 1281bf392a5dSKeith Busch if (blk_mq_request_completed(req)) { 12827776db1cSKeith Busch dev_warn(dev->ctrl.device, 12837776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 12847776db1cSKeith Busch req->tag, nvmeq->qid); 1285db8c48e4SChristoph Hellwig return BLK_EH_DONE; 12867776db1cSKeith Busch } 12877776db1cSKeith Busch 12887776db1cSKeith Busch /* 1289fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1290fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1291fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1292db8c48e4SChristoph Hellwig * shutdown, so we return BLK_EH_DONE. 1293fd634f41SChristoph Hellwig */ 12944244140dSKeith Busch switch (dev->ctrl.state) { 12954244140dSKeith Busch case NVME_CTRL_CONNECTING: 12962036f726SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1297df561f66SGustavo A. R. Silva fallthrough; 12982036f726SKeith Busch case NVME_CTRL_DELETING: 1299b9cac43cSKeith Busch dev_warn_ratelimited(dev->ctrl.device, 1300fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1301fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 130227fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 13037ad92f65STong Zhang nvme_dev_disable(dev, true); 1304db8c48e4SChristoph Hellwig return BLK_EH_DONE; 130539a9dd81SKeith Busch case NVME_CTRL_RESETTING: 130639a9dd81SKeith Busch return BLK_EH_RESET_TIMER; 13074244140dSKeith Busch default: 13084244140dSKeith Busch break; 1309fd634f41SChristoph Hellwig } 1310fd634f41SChristoph Hellwig 1311fd634f41SChristoph Hellwig /* 1312e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1313e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1314e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 131531c7c7d2SChristoph Hellwig */ 1316f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 13171b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 131857dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 131957dacad5SJay Sternberg req->tag, nvmeq->qid); 13207ad92f65STong Zhang nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1321a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1322d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1323e1569a16SKeith Busch 1324db8c48e4SChristoph Hellwig return BLK_EH_DONE; 132557dacad5SJay Sternberg } 132657dacad5SJay Sternberg 1327e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1328e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1329e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1330e7a2a87dSChristoph Hellwig } 13317bf7d778SKeith Busch iod->aborted = 1; 133257dacad5SJay Sternberg 133357dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 133457dacad5SJay Sternberg cmd.abort.cid = req->tag; 133557dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 133657dacad5SJay Sternberg 13371b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 13381b3c47c1SSagi Grimberg "I/O %d QID %d timeout, aborting\n", 133957dacad5SJay Sternberg req->tag, nvmeq->qid); 1340e7a2a87dSChristoph Hellwig 1341e7a2a87dSChristoph Hellwig abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 134239dfe844SChaitanya Kulkarni BLK_MQ_REQ_NOWAIT); 13436bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 13446bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 134531c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 134657dacad5SJay Sternberg } 134757dacad5SJay Sternberg 1348e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 13498eeed0b5SGuoqing Jiang blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio); 135057dacad5SJay Sternberg 135157dacad5SJay Sternberg /* 135257dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 135357dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 135457dacad5SJay Sternberg * as the device then is in a faulty state. 135557dacad5SJay Sternberg */ 135657dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 135757dacad5SJay Sternberg } 135857dacad5SJay Sternberg 135957dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 136057dacad5SJay Sternberg { 13618a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 136257dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 136363223078SChristoph Hellwig if (!nvmeq->sq_cmds) 136463223078SChristoph Hellwig return; 13650f238ff5SLogan Gunthorpe 136663223078SChristoph Hellwig if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 136788a041f4SKeith Busch pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 13688a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 136963223078SChristoph Hellwig } else { 13708a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 137163223078SChristoph Hellwig nvmeq->sq_cmds, nvmeq->sq_dma_addr); 13720f238ff5SLogan Gunthorpe } 137357dacad5SJay Sternberg } 137457dacad5SJay Sternberg 137557dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 137657dacad5SJay Sternberg { 137757dacad5SJay Sternberg int i; 137857dacad5SJay Sternberg 1379d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1380d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 1381147b27e4SSagi Grimberg nvme_free_queue(&dev->queues[i]); 138257dacad5SJay Sternberg } 138357dacad5SJay Sternberg } 138457dacad5SJay Sternberg 138557dacad5SJay Sternberg /** 138657dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 138740581d1aSBart Van Assche * @nvmeq: queue to suspend 138857dacad5SJay Sternberg */ 138957dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 139057dacad5SJay Sternberg { 13914e224106SChristoph Hellwig if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 139257dacad5SJay Sternberg return 1; 139357dacad5SJay Sternberg 13944e224106SChristoph Hellwig /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1395d1f06f4aSJens Axboe mb(); 139657dacad5SJay Sternberg 13974e224106SChristoph Hellwig nvmeq->dev->online_queues--; 13981c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1399c81545f9SSagi Grimberg blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); 14007c349ddeSKeith Busch if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 14014e224106SChristoph Hellwig pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); 140257dacad5SJay Sternberg return 0; 140357dacad5SJay Sternberg } 140457dacad5SJay Sternberg 14058fae268bSKeith Busch static void nvme_suspend_io_queues(struct nvme_dev *dev) 14068fae268bSKeith Busch { 14078fae268bSKeith Busch int i; 14088fae268bSKeith Busch 14098fae268bSKeith Busch for (i = dev->ctrl.queue_count - 1; i > 0; i--) 14108fae268bSKeith Busch nvme_suspend_queue(&dev->queues[i]); 14118fae268bSKeith Busch } 14128fae268bSKeith Busch 1413a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 141457dacad5SJay Sternberg { 1415147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 141657dacad5SJay Sternberg 1417a5cdb68cSKeith Busch if (shutdown) 1418a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1419a5cdb68cSKeith Busch else 1420b5b05048SSagi Grimberg nvme_disable_ctrl(&dev->ctrl); 142157dacad5SJay Sternberg 1422bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 142357dacad5SJay Sternberg } 142457dacad5SJay Sternberg 1425fa46c6fbSKeith Busch /* 1426fa46c6fbSKeith Busch * Called only on a device that has been disabled and after all other threads 14279210c075SDongli Zhang * that can check this device's completion queues have synced, except 14289210c075SDongli Zhang * nvme_poll(). This is the last chance for the driver to see a natural 14299210c075SDongli Zhang * completion before nvme_cancel_request() terminates all incomplete requests. 1430fa46c6fbSKeith Busch */ 1431fa46c6fbSKeith Busch static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1432fa46c6fbSKeith Busch { 1433fa46c6fbSKeith Busch int i; 1434fa46c6fbSKeith Busch 14359210c075SDongli Zhang for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 14369210c075SDongli Zhang spin_lock(&dev->queues[i].cq_poll_lock); 1437324b494cSKeith Busch nvme_process_cq(&dev->queues[i]); 14389210c075SDongli Zhang spin_unlock(&dev->queues[i].cq_poll_lock); 14399210c075SDongli Zhang } 1440fa46c6fbSKeith Busch } 1441fa46c6fbSKeith Busch 144257dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 144357dacad5SJay Sternberg int entry_size) 144457dacad5SJay Sternberg { 144557dacad5SJay Sternberg int q_depth = dev->q_depth; 14465fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 14476c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 144857dacad5SJay Sternberg 144957dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 145057dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 14514e523547SBaolin Wang 14526c3c05b0SChaitanya Kulkarni mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 145357dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 145457dacad5SJay Sternberg 145557dacad5SJay Sternberg /* 145657dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 145757dacad5SJay Sternberg * would be better to map queues in system memory with the 145857dacad5SJay Sternberg * original depth 145957dacad5SJay Sternberg */ 146057dacad5SJay Sternberg if (q_depth < 64) 146157dacad5SJay Sternberg return -ENOMEM; 146257dacad5SJay Sternberg } 146357dacad5SJay Sternberg 146457dacad5SJay Sternberg return q_depth; 146557dacad5SJay Sternberg } 146657dacad5SJay Sternberg 146757dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 14688a1d09a6SBenjamin Herrenschmidt int qid) 146957dacad5SJay Sternberg { 14700f238ff5SLogan Gunthorpe struct pci_dev *pdev = to_pci_dev(dev->dev); 1471815c6704SKeith Busch 14720f238ff5SLogan Gunthorpe if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 14738a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1474bfac8e9fSAlan Mikhak if (nvmeq->sq_cmds) { 14750f238ff5SLogan Gunthorpe nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 14760f238ff5SLogan Gunthorpe nvmeq->sq_cmds); 147763223078SChristoph Hellwig if (nvmeq->sq_dma_addr) { 147863223078SChristoph Hellwig set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 147963223078SChristoph Hellwig return 0; 148063223078SChristoph Hellwig } 1481bfac8e9fSAlan Mikhak 14828a1d09a6SBenjamin Herrenschmidt pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1483bfac8e9fSAlan Mikhak } 14840f238ff5SLogan Gunthorpe } 14850f238ff5SLogan Gunthorpe 14868a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 148757dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 148857dacad5SJay Sternberg if (!nvmeq->sq_cmds) 148957dacad5SJay Sternberg return -ENOMEM; 149057dacad5SJay Sternberg return 0; 149157dacad5SJay Sternberg } 149257dacad5SJay Sternberg 1493a6ff7262SKeith Busch static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 149457dacad5SJay Sternberg { 1495147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[qid]; 149657dacad5SJay Sternberg 149762314e40SKeith Busch if (dev->ctrl.queue_count > qid) 149862314e40SKeith Busch return 0; 149957dacad5SJay Sternberg 1500c1e0cc7eSBenjamin Herrenschmidt nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 15018a1d09a6SBenjamin Herrenschmidt nvmeq->q_depth = depth; 15028a1d09a6SBenjamin Herrenschmidt nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 150357dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 150457dacad5SJay Sternberg if (!nvmeq->cqes) 150557dacad5SJay Sternberg goto free_nvmeq; 150657dacad5SJay Sternberg 15078a1d09a6SBenjamin Herrenschmidt if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 150857dacad5SJay Sternberg goto free_cqdma; 150957dacad5SJay Sternberg 151057dacad5SJay Sternberg nvmeq->dev = dev; 15111ab0cd69SJens Axboe spin_lock_init(&nvmeq->sq_lock); 15123a7afd8eSChristoph Hellwig spin_lock_init(&nvmeq->cq_poll_lock); 151357dacad5SJay Sternberg nvmeq->cq_head = 0; 151457dacad5SJay Sternberg nvmeq->cq_phase = 1; 151557dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 151657dacad5SJay Sternberg nvmeq->qid = qid; 1517d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 151857dacad5SJay Sternberg 1519147b27e4SSagi Grimberg return 0; 152057dacad5SJay Sternberg 152157dacad5SJay Sternberg free_cqdma: 15228a1d09a6SBenjamin Herrenschmidt dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 152357dacad5SJay Sternberg nvmeq->cq_dma_addr); 152457dacad5SJay Sternberg free_nvmeq: 1525147b27e4SSagi Grimberg return -ENOMEM; 152657dacad5SJay Sternberg } 152757dacad5SJay Sternberg 1528dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 152957dacad5SJay Sternberg { 15300ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 15310ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 15320ff199cbSChristoph Hellwig 15330ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 15340ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 15350ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15360ff199cbSChristoph Hellwig } else { 15370ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 15380ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15390ff199cbSChristoph Hellwig } 154057dacad5SJay Sternberg } 154157dacad5SJay Sternberg 154257dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 154357dacad5SJay Sternberg { 154457dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 154557dacad5SJay Sternberg 154657dacad5SJay Sternberg nvmeq->sq_tail = 0; 154738210800SKeith Busch nvmeq->last_sq_tail = 0; 154857dacad5SJay Sternberg nvmeq->cq_head = 0; 154957dacad5SJay Sternberg nvmeq->cq_phase = 1; 155057dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 15518a1d09a6SBenjamin Herrenschmidt memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1552f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 155357dacad5SJay Sternberg dev->online_queues++; 15543a7afd8eSChristoph Hellwig wmb(); /* ensure the first interrupt sees the initialization */ 155557dacad5SJay Sternberg } 155657dacad5SJay Sternberg 1557*e4b9852aSCasey Chen /* 1558*e4b9852aSCasey Chen * Try getting shutdown_lock while setting up IO queues. 1559*e4b9852aSCasey Chen */ 1560*e4b9852aSCasey Chen static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1561*e4b9852aSCasey Chen { 1562*e4b9852aSCasey Chen /* 1563*e4b9852aSCasey Chen * Give up if the lock is being held by nvme_dev_disable. 1564*e4b9852aSCasey Chen */ 1565*e4b9852aSCasey Chen if (!mutex_trylock(&dev->shutdown_lock)) 1566*e4b9852aSCasey Chen return -ENODEV; 1567*e4b9852aSCasey Chen 1568*e4b9852aSCasey Chen /* 1569*e4b9852aSCasey Chen * Controller is in wrong state, fail early. 1570*e4b9852aSCasey Chen */ 1571*e4b9852aSCasey Chen if (dev->ctrl.state != NVME_CTRL_CONNECTING) { 1572*e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 1573*e4b9852aSCasey Chen return -ENODEV; 1574*e4b9852aSCasey Chen } 1575*e4b9852aSCasey Chen 1576*e4b9852aSCasey Chen return 0; 1577*e4b9852aSCasey Chen } 1578*e4b9852aSCasey Chen 15794b04cc6aSJens Axboe static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 158057dacad5SJay Sternberg { 158157dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 158257dacad5SJay Sternberg int result; 15837c349ddeSKeith Busch u16 vector = 0; 158457dacad5SJay Sternberg 1585d1ed6aa1SChristoph Hellwig clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1586d1ed6aa1SChristoph Hellwig 158722b55601SKeith Busch /* 158822b55601SKeith Busch * A queue's vector matches the queue identifier unless the controller 158922b55601SKeith Busch * has only one vector available. 159022b55601SKeith Busch */ 15914b04cc6aSJens Axboe if (!polled) 1592a8e3e0bbSJianchao Wang vector = dev->num_vecs == 1 ? 0 : qid; 15934b04cc6aSJens Axboe else 15947c349ddeSKeith Busch set_bit(NVMEQ_POLLED, &nvmeq->flags); 15954b04cc6aSJens Axboe 1596a8e3e0bbSJianchao Wang result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1597ded45505SKeith Busch if (result) 1598ded45505SKeith Busch return result; 159957dacad5SJay Sternberg 160057dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 160157dacad5SJay Sternberg if (result < 0) 1602ded45505SKeith Busch return result; 1603c80b36cdSEdmund Nadolski if (result) 160457dacad5SJay Sternberg goto release_cq; 160557dacad5SJay Sternberg 1606a8e3e0bbSJianchao Wang nvmeq->cq_vector = vector; 16074b04cc6aSJens Axboe 1608*e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 1609*e4b9852aSCasey Chen if (result) 1610*e4b9852aSCasey Chen return result; 1611*e4b9852aSCasey Chen nvme_init_queue(nvmeq, qid); 16127c349ddeSKeith Busch if (!polled) { 1613dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 161457dacad5SJay Sternberg if (result < 0) 161557dacad5SJay Sternberg goto release_sq; 16164b04cc6aSJens Axboe } 161757dacad5SJay Sternberg 16184e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1619*e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 162057dacad5SJay Sternberg return result; 162157dacad5SJay Sternberg 162257dacad5SJay Sternberg release_sq: 1623f25a2dfcSJianchao Wang dev->online_queues--; 1624*e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 162557dacad5SJay Sternberg adapter_delete_sq(dev, qid); 162657dacad5SJay Sternberg release_cq: 162757dacad5SJay Sternberg adapter_delete_cq(dev, qid); 162857dacad5SJay Sternberg return result; 162957dacad5SJay Sternberg } 163057dacad5SJay Sternberg 1631f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 163257dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 163377f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 163457dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 16350350815aSChristoph Hellwig .init_request = nvme_init_request, 163657dacad5SJay Sternberg .timeout = nvme_timeout, 163757dacad5SJay Sternberg }; 163857dacad5SJay Sternberg 1639f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 1640376f7ef8SChristoph Hellwig .queue_rq = nvme_queue_rq, 1641376f7ef8SChristoph Hellwig .complete = nvme_pci_complete_rq, 1642376f7ef8SChristoph Hellwig .commit_rqs = nvme_commit_rqs, 1643376f7ef8SChristoph Hellwig .init_hctx = nvme_init_hctx, 1644376f7ef8SChristoph Hellwig .init_request = nvme_init_request, 1645376f7ef8SChristoph Hellwig .map_queues = nvme_pci_map_queues, 1646376f7ef8SChristoph Hellwig .timeout = nvme_timeout, 1647c6d962aeSChristoph Hellwig .poll = nvme_poll, 1648dabcefabSJens Axboe }; 1649dabcefabSJens Axboe 165057dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 165157dacad5SJay Sternberg { 16521c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 165369d9a99cSKeith Busch /* 165469d9a99cSKeith Busch * If the controller was reset during removal, it's possible 165569d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 165669d9a99cSKeith Busch * queue to flush these to completion. 165769d9a99cSKeith Busch */ 1658c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 16591c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 166057dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 166157dacad5SJay Sternberg } 166257dacad5SJay Sternberg } 166357dacad5SJay Sternberg 166457dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 166557dacad5SJay Sternberg { 16661c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 166757dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 166857dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 1669e3e9d50cSKeith Busch 167038dabe21SKeith Busch dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1671dc96f938SChaitanya Kulkarni dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; 1672d4ec47f1SMax Gurtovoy dev->admin_tagset.numa_node = dev->ctrl.numa_node; 1673d43f1ccfSChristoph Hellwig dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); 1674d3484991SJens Axboe dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 167557dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 167657dacad5SJay Sternberg 167757dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 167857dacad5SJay Sternberg return -ENOMEM; 167934b6c231SSagi Grimberg dev->ctrl.admin_tagset = &dev->admin_tagset; 168057dacad5SJay Sternberg 16811c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 16821c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 168357dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 168457dacad5SJay Sternberg return -ENOMEM; 168557dacad5SJay Sternberg } 16861c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 168757dacad5SJay Sternberg nvme_dev_remove_admin(dev); 16881c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 168957dacad5SJay Sternberg return -ENODEV; 169057dacad5SJay Sternberg } 169157dacad5SJay Sternberg } else 1692c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 169357dacad5SJay Sternberg 169457dacad5SJay Sternberg return 0; 169557dacad5SJay Sternberg } 169657dacad5SJay Sternberg 169797f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 169897f6ef64SXu Yu { 169997f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 170097f6ef64SXu Yu } 170197f6ef64SXu Yu 170297f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 170397f6ef64SXu Yu { 170497f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 170597f6ef64SXu Yu 170697f6ef64SXu Yu if (size <= dev->bar_mapped_size) 170797f6ef64SXu Yu return 0; 170897f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 170997f6ef64SXu Yu return -ENOMEM; 171097f6ef64SXu Yu if (dev->bar) 171197f6ef64SXu Yu iounmap(dev->bar); 171297f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 171397f6ef64SXu Yu if (!dev->bar) { 171497f6ef64SXu Yu dev->bar_mapped_size = 0; 171597f6ef64SXu Yu return -ENOMEM; 171697f6ef64SXu Yu } 171797f6ef64SXu Yu dev->bar_mapped_size = size; 171897f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 171997f6ef64SXu Yu 172097f6ef64SXu Yu return 0; 172197f6ef64SXu Yu } 172297f6ef64SXu Yu 172301ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 172457dacad5SJay Sternberg { 172557dacad5SJay Sternberg int result; 172657dacad5SJay Sternberg u32 aqa; 172757dacad5SJay Sternberg struct nvme_queue *nvmeq; 172857dacad5SJay Sternberg 172997f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 173097f6ef64SXu Yu if (result < 0) 173197f6ef64SXu Yu return result; 173297f6ef64SXu Yu 17338ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 173420d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 173557dacad5SJay Sternberg 17367a67cbeaSChristoph Hellwig if (dev->subsystem && 17377a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 17387a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 173957dacad5SJay Sternberg 1740b5b05048SSagi Grimberg result = nvme_disable_ctrl(&dev->ctrl); 174157dacad5SJay Sternberg if (result < 0) 174257dacad5SJay Sternberg return result; 174357dacad5SJay Sternberg 1744a6ff7262SKeith Busch result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1745147b27e4SSagi Grimberg if (result) 1746147b27e4SSagi Grimberg return result; 174757dacad5SJay Sternberg 1748635333e4SMax Gurtovoy dev->ctrl.numa_node = dev_to_node(dev->dev); 1749635333e4SMax Gurtovoy 1750147b27e4SSagi Grimberg nvmeq = &dev->queues[0]; 175157dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 175257dacad5SJay Sternberg aqa |= aqa << 16; 175357dacad5SJay Sternberg 17547a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 17557a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 17567a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 175757dacad5SJay Sternberg 1758c0f2f45bSSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl); 175957dacad5SJay Sternberg if (result) 1760d4875622SKeith Busch return result; 176157dacad5SJay Sternberg 176257dacad5SJay Sternberg nvmeq->cq_vector = 0; 1763161b8be2SKeith Busch nvme_init_queue(nvmeq, 0); 1764dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 176557dacad5SJay Sternberg if (result) { 17667c349ddeSKeith Busch dev->online_queues--; 1767d4875622SKeith Busch return result; 176857dacad5SJay Sternberg } 176957dacad5SJay Sternberg 17704e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 177157dacad5SJay Sternberg return result; 177257dacad5SJay Sternberg } 177357dacad5SJay Sternberg 1774749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 177557dacad5SJay Sternberg { 17764b04cc6aSJens Axboe unsigned i, max, rw_queues; 1777749941f2SChristoph Hellwig int ret = 0; 177857dacad5SJay Sternberg 1779d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1780a6ff7262SKeith Busch if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1781749941f2SChristoph Hellwig ret = -ENOMEM; 178257dacad5SJay Sternberg break; 1783749941f2SChristoph Hellwig } 1784749941f2SChristoph Hellwig } 178557dacad5SJay Sternberg 1786d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1787e20ba6e1SChristoph Hellwig if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1788e20ba6e1SChristoph Hellwig rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1789e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ]; 17904b04cc6aSJens Axboe } else { 17914b04cc6aSJens Axboe rw_queues = max; 17924b04cc6aSJens Axboe } 17934b04cc6aSJens Axboe 1794949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 17954b04cc6aSJens Axboe bool polled = i > rw_queues; 17964b04cc6aSJens Axboe 17974b04cc6aSJens Axboe ret = nvme_create_queue(&dev->queues[i], i, polled); 1798d4875622SKeith Busch if (ret) 179957dacad5SJay Sternberg break; 180057dacad5SJay Sternberg } 180157dacad5SJay Sternberg 1802749941f2SChristoph Hellwig /* 1803749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 18048adb8c14SMinwoo Im * than the desired amount of queues, and even a controller without 18058adb8c14SMinwoo Im * I/O queues can still be used to issue admin commands. This might 1806749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1807749941f2SChristoph Hellwig */ 1808749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 180957dacad5SJay Sternberg } 181057dacad5SJay Sternberg 1811202021c1SStephen Bates static ssize_t nvme_cmb_show(struct device *dev, 1812202021c1SStephen Bates struct device_attribute *attr, 1813202021c1SStephen Bates char *buf) 1814202021c1SStephen Bates { 1815202021c1SStephen Bates struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1816202021c1SStephen Bates 1817c965809cSStephen Bates return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1818202021c1SStephen Bates ndev->cmbloc, ndev->cmbsz); 1819202021c1SStephen Bates } 1820202021c1SStephen Bates static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); 1821202021c1SStephen Bates 182288de4598SChristoph Hellwig static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 182357dacad5SJay Sternberg { 182488de4598SChristoph Hellwig u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 182588de4598SChristoph Hellwig 182688de4598SChristoph Hellwig return 1ULL << (12 + 4 * szu); 182788de4598SChristoph Hellwig } 182888de4598SChristoph Hellwig 182988de4598SChristoph Hellwig static u32 nvme_cmb_size(struct nvme_dev *dev) 183088de4598SChristoph Hellwig { 183188de4598SChristoph Hellwig return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 183288de4598SChristoph Hellwig } 183388de4598SChristoph Hellwig 1834f65efd6dSChristoph Hellwig static void nvme_map_cmb(struct nvme_dev *dev) 183557dacad5SJay Sternberg { 183688de4598SChristoph Hellwig u64 size, offset; 183757dacad5SJay Sternberg resource_size_t bar_size; 183857dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 18398969f1f8SChristoph Hellwig int bar; 184057dacad5SJay Sternberg 18419fe5c59fSKeith Busch if (dev->cmb_size) 18429fe5c59fSKeith Busch return; 18439fe5c59fSKeith Busch 184420d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) 184520d3bb92SKlaus Jensen writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 184620d3bb92SKlaus Jensen 18477a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1848f65efd6dSChristoph Hellwig if (!dev->cmbsz) 1849f65efd6dSChristoph Hellwig return; 1850202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 185157dacad5SJay Sternberg 185288de4598SChristoph Hellwig size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 185388de4598SChristoph Hellwig offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 18548969f1f8SChristoph Hellwig bar = NVME_CMB_BIR(dev->cmbloc); 18558969f1f8SChristoph Hellwig bar_size = pci_resource_len(pdev, bar); 185657dacad5SJay Sternberg 185757dacad5SJay Sternberg if (offset > bar_size) 1858f65efd6dSChristoph Hellwig return; 185957dacad5SJay Sternberg 186057dacad5SJay Sternberg /* 186120d3bb92SKlaus Jensen * Tell the controller about the host side address mapping the CMB, 186220d3bb92SKlaus Jensen * and enable CMB decoding for the NVMe 1.4+ scheme: 186320d3bb92SKlaus Jensen */ 186420d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) { 186520d3bb92SKlaus Jensen hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 186620d3bb92SKlaus Jensen (pci_bus_address(pdev, bar) + offset), 186720d3bb92SKlaus Jensen dev->bar + NVME_REG_CMBMSC); 186820d3bb92SKlaus Jensen } 186920d3bb92SKlaus Jensen 187020d3bb92SKlaus Jensen /* 187157dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 187257dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 187357dacad5SJay Sternberg * the reported size of the BAR 187457dacad5SJay Sternberg */ 187557dacad5SJay Sternberg if (size > bar_size - offset) 187657dacad5SJay Sternberg size = bar_size - offset; 187757dacad5SJay Sternberg 18780f238ff5SLogan Gunthorpe if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 18790f238ff5SLogan Gunthorpe dev_warn(dev->ctrl.device, 18800f238ff5SLogan Gunthorpe "failed to register the CMB\n"); 1881f65efd6dSChristoph Hellwig return; 18820f238ff5SLogan Gunthorpe } 18830f238ff5SLogan Gunthorpe 188457dacad5SJay Sternberg dev->cmb_size = size; 18850f238ff5SLogan Gunthorpe dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 18860f238ff5SLogan Gunthorpe 18870f238ff5SLogan Gunthorpe if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 18880f238ff5SLogan Gunthorpe (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 18890f238ff5SLogan Gunthorpe pci_p2pmem_publish(pdev, true); 1890f65efd6dSChristoph Hellwig 1891f65efd6dSChristoph Hellwig if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1892f65efd6dSChristoph Hellwig &dev_attr_cmb.attr, NULL)) 1893f65efd6dSChristoph Hellwig dev_warn(dev->ctrl.device, 1894f65efd6dSChristoph Hellwig "failed to add sysfs attribute for CMB\n"); 189557dacad5SJay Sternberg } 189657dacad5SJay Sternberg 189757dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev) 189857dacad5SJay Sternberg { 18990f238ff5SLogan Gunthorpe if (dev->cmb_size) { 1900f63572dfSJon Derrick sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1901f63572dfSJon Derrick &dev_attr_cmb.attr, NULL); 19020f238ff5SLogan Gunthorpe dev->cmb_size = 0; 1903f63572dfSJon Derrick } 190457dacad5SJay Sternberg } 190557dacad5SJay Sternberg 190687ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 190757dacad5SJay Sternberg { 19086c3c05b0SChaitanya Kulkarni u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 19094033f35dSChristoph Hellwig u64 dma_addr = dev->host_mem_descs_dma; 1910f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 191187ad72a5SChristoph Hellwig int ret; 191287ad72a5SChristoph Hellwig 191387ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 191487ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 191587ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 19166c3c05b0SChaitanya Kulkarni c.features.dword12 = cpu_to_le32(host_mem_size); 191787ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 191887ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 191987ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 192087ad72a5SChristoph Hellwig 192187ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 192287ad72a5SChristoph Hellwig if (ret) { 192387ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 192487ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 192587ad72a5SChristoph Hellwig ret, bits); 192687ad72a5SChristoph Hellwig } 192787ad72a5SChristoph Hellwig return ret; 192887ad72a5SChristoph Hellwig } 192987ad72a5SChristoph Hellwig 193087ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 193187ad72a5SChristoph Hellwig { 193287ad72a5SChristoph Hellwig int i; 193387ad72a5SChristoph Hellwig 193487ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 193587ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 19366c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 193787ad72a5SChristoph Hellwig 1938cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 1939cc667f6dSLiviu Dudau le64_to_cpu(desc->addr), 1940cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 194187ad72a5SChristoph Hellwig } 194287ad72a5SChristoph Hellwig 194387ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 194487ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 19454033f35dSChristoph Hellwig dma_free_coherent(dev->dev, 19464033f35dSChristoph Hellwig dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 19474033f35dSChristoph Hellwig dev->host_mem_descs, dev->host_mem_descs_dma); 194887ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 19497e5dd57eSMinwoo Im dev->nr_host_mem_descs = 0; 195087ad72a5SChristoph Hellwig } 195187ad72a5SChristoph Hellwig 195292dc6895SChristoph Hellwig static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 195392dc6895SChristoph Hellwig u32 chunk_size) 195487ad72a5SChristoph Hellwig { 195587ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 195692dc6895SChristoph Hellwig u32 max_entries, len; 19574033f35dSChristoph Hellwig dma_addr_t descs_dma; 19582ee0e4edSDan Carpenter int i = 0; 195987ad72a5SChristoph Hellwig void **bufs; 19606fbcde66SMinwoo Im u64 size, tmp; 196187ad72a5SChristoph Hellwig 196287ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 196387ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 196487ad72a5SChristoph Hellwig max_entries = tmp; 1965044a9df1SChristoph Hellwig 1966044a9df1SChristoph Hellwig if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1967044a9df1SChristoph Hellwig max_entries = dev->ctrl.hmmaxd; 1968044a9df1SChristoph Hellwig 1969750afb08SLuis Chamberlain descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 19704033f35dSChristoph Hellwig &descs_dma, GFP_KERNEL); 197187ad72a5SChristoph Hellwig if (!descs) 197287ad72a5SChristoph Hellwig goto out; 197387ad72a5SChristoph Hellwig 197487ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 197587ad72a5SChristoph Hellwig if (!bufs) 197687ad72a5SChristoph Hellwig goto out_free_descs; 197787ad72a5SChristoph Hellwig 1978244a8fe4SMinwoo Im for (size = 0; size < preferred && i < max_entries; size += len) { 197987ad72a5SChristoph Hellwig dma_addr_t dma_addr; 198087ad72a5SChristoph Hellwig 198150cdb7c6SChristoph Hellwig len = min_t(u64, chunk_size, preferred - size); 198287ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 198387ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 198487ad72a5SChristoph Hellwig if (!bufs[i]) 198587ad72a5SChristoph Hellwig break; 198687ad72a5SChristoph Hellwig 198787ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 19886c3c05b0SChaitanya Kulkarni descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 198987ad72a5SChristoph Hellwig i++; 199087ad72a5SChristoph Hellwig } 199187ad72a5SChristoph Hellwig 199292dc6895SChristoph Hellwig if (!size) 199387ad72a5SChristoph Hellwig goto out_free_bufs; 199487ad72a5SChristoph Hellwig 199587ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 199687ad72a5SChristoph Hellwig dev->host_mem_size = size; 199787ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 19984033f35dSChristoph Hellwig dev->host_mem_descs_dma = descs_dma; 199987ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 200087ad72a5SChristoph Hellwig return 0; 200187ad72a5SChristoph Hellwig 200287ad72a5SChristoph Hellwig out_free_bufs: 200387ad72a5SChristoph Hellwig while (--i >= 0) { 20046c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; 200587ad72a5SChristoph Hellwig 2006cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, bufs[i], 2007cc667f6dSLiviu Dudau le64_to_cpu(descs[i].addr), 2008cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 200987ad72a5SChristoph Hellwig } 201087ad72a5SChristoph Hellwig 201187ad72a5SChristoph Hellwig kfree(bufs); 201287ad72a5SChristoph Hellwig out_free_descs: 20134033f35dSChristoph Hellwig dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 20144033f35dSChristoph Hellwig descs_dma); 201587ad72a5SChristoph Hellwig out: 201687ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 201787ad72a5SChristoph Hellwig return -ENOMEM; 201887ad72a5SChristoph Hellwig } 201987ad72a5SChristoph Hellwig 202092dc6895SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 202192dc6895SChristoph Hellwig { 20229dc54a0dSChaitanya Kulkarni u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 20239dc54a0dSChaitanya Kulkarni u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 20249dc54a0dSChaitanya Kulkarni u64 chunk_size; 202592dc6895SChristoph Hellwig 202692dc6895SChristoph Hellwig /* start big and work our way down */ 20279dc54a0dSChaitanya Kulkarni for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 202892dc6895SChristoph Hellwig if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 202992dc6895SChristoph Hellwig if (!min || dev->host_mem_size >= min) 203092dc6895SChristoph Hellwig return 0; 203192dc6895SChristoph Hellwig nvme_free_host_mem(dev); 203292dc6895SChristoph Hellwig } 203392dc6895SChristoph Hellwig } 203492dc6895SChristoph Hellwig 203592dc6895SChristoph Hellwig return -ENOMEM; 203692dc6895SChristoph Hellwig } 203792dc6895SChristoph Hellwig 20389620cfbaSChristoph Hellwig static int nvme_setup_host_mem(struct nvme_dev *dev) 203987ad72a5SChristoph Hellwig { 204087ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 204187ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 204287ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 204387ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 20446fbcde66SMinwoo Im int ret; 204587ad72a5SChristoph Hellwig 204687ad72a5SChristoph Hellwig preferred = min(preferred, max); 204787ad72a5SChristoph Hellwig if (min > max) { 204887ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 204987ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 205087ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 205187ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20529620cfbaSChristoph Hellwig return 0; 205387ad72a5SChristoph Hellwig } 205487ad72a5SChristoph Hellwig 205587ad72a5SChristoph Hellwig /* 205687ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 205787ad72a5SChristoph Hellwig */ 205887ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 205987ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 206087ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 206187ad72a5SChristoph Hellwig else 206287ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 206387ad72a5SChristoph Hellwig } 206487ad72a5SChristoph Hellwig 206587ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 206692dc6895SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) { 206792dc6895SChristoph Hellwig dev_warn(dev->ctrl.device, 206892dc6895SChristoph Hellwig "failed to allocate host memory buffer.\n"); 20699620cfbaSChristoph Hellwig return 0; /* controller must work without HMB */ 207087ad72a5SChristoph Hellwig } 207187ad72a5SChristoph Hellwig 207292dc6895SChristoph Hellwig dev_info(dev->ctrl.device, 207392dc6895SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 207492dc6895SChristoph Hellwig dev->host_mem_size >> ilog2(SZ_1M)); 207592dc6895SChristoph Hellwig } 207692dc6895SChristoph Hellwig 20779620cfbaSChristoph Hellwig ret = nvme_set_host_mem(dev, enable_bits); 20789620cfbaSChristoph Hellwig if (ret) 207987ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20809620cfbaSChristoph Hellwig return ret; 208157dacad5SJay Sternberg } 208257dacad5SJay Sternberg 2083612b7286SMing Lei /* 2084612b7286SMing Lei * nirqs is the number of interrupts available for write and read 2085612b7286SMing Lei * queues. The core already reserved an interrupt for the admin queue. 2086612b7286SMing Lei */ 2087612b7286SMing Lei static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 20883b6592f7SJens Axboe { 2089612b7286SMing Lei struct nvme_dev *dev = affd->priv; 20902a5bcfddSWeiping Zhang unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2091c45b1fa2SMing Lei 20923b6592f7SJens Axboe /* 2093ee0d96d3SBaolin Wang * If there is no interrupt available for queues, ensure that 2094612b7286SMing Lei * the default queue is set to 1. The affinity set size is 2095612b7286SMing Lei * also set to one, but the irq core ignores it for this case. 2096612b7286SMing Lei * 2097612b7286SMing Lei * If only one interrupt is available or 'write_queue' == 0, combine 2098612b7286SMing Lei * write and read queues. 2099612b7286SMing Lei * 2100612b7286SMing Lei * If 'write_queues' > 0, ensure it leaves room for at least one read 2101612b7286SMing Lei * queue. 21023b6592f7SJens Axboe */ 2103612b7286SMing Lei if (!nrirqs) { 2104612b7286SMing Lei nrirqs = 1; 2105612b7286SMing Lei nr_read_queues = 0; 21062a5bcfddSWeiping Zhang } else if (nrirqs == 1 || !nr_write_queues) { 2107612b7286SMing Lei nr_read_queues = 0; 21082a5bcfddSWeiping Zhang } else if (nr_write_queues >= nrirqs) { 2109612b7286SMing Lei nr_read_queues = 1; 21103b6592f7SJens Axboe } else { 21112a5bcfddSWeiping Zhang nr_read_queues = nrirqs - nr_write_queues; 21123b6592f7SJens Axboe } 2113612b7286SMing Lei 2114612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2115612b7286SMing Lei affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2116612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2117612b7286SMing Lei affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2118612b7286SMing Lei affd->nr_sets = nr_read_queues ? 2 : 1; 21193b6592f7SJens Axboe } 21203b6592f7SJens Axboe 21216451fe73SJens Axboe static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 21223b6592f7SJens Axboe { 21233b6592f7SJens Axboe struct pci_dev *pdev = to_pci_dev(dev->dev); 21243b6592f7SJens Axboe struct irq_affinity affd = { 21253b6592f7SJens Axboe .pre_vectors = 1, 2126612b7286SMing Lei .calc_sets = nvme_calc_irq_sets, 2127612b7286SMing Lei .priv = dev, 21283b6592f7SJens Axboe }; 212921cc2f3fSJeffle Xu unsigned int irq_queues, poll_queues; 21306451fe73SJens Axboe 21316451fe73SJens Axboe /* 213221cc2f3fSJeffle Xu * Poll queues don't need interrupts, but we need at least one I/O queue 213321cc2f3fSJeffle Xu * left over for non-polled I/O. 21346451fe73SJens Axboe */ 213521cc2f3fSJeffle Xu poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 213621cc2f3fSJeffle Xu dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 21373b6592f7SJens Axboe 213821cc2f3fSJeffle Xu /* 213921cc2f3fSJeffle Xu * Initialize for the single interrupt case, will be updated in 214021cc2f3fSJeffle Xu * nvme_calc_irq_sets(). 214121cc2f3fSJeffle Xu */ 2142612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2143612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = 0; 21443b6592f7SJens Axboe 214566341331SBenjamin Herrenschmidt /* 214621cc2f3fSJeffle Xu * We need interrupts for the admin queue and each non-polled I/O queue, 214721cc2f3fSJeffle Xu * but some Apple controllers require all queues to use the first 214821cc2f3fSJeffle Xu * vector. 214966341331SBenjamin Herrenschmidt */ 215066341331SBenjamin Herrenschmidt irq_queues = 1; 215121cc2f3fSJeffle Xu if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 215221cc2f3fSJeffle Xu irq_queues += (nr_io_queues - poll_queues); 2153612b7286SMing Lei return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 21543b6592f7SJens Axboe PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 21553b6592f7SJens Axboe } 21563b6592f7SJens Axboe 21578fae268bSKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev) 21588fae268bSKeith Busch { 21598fae268bSKeith Busch if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 21608fae268bSKeith Busch __nvme_disable_io_queues(dev, nvme_admin_delete_cq); 21618fae268bSKeith Busch } 21628fae268bSKeith Busch 21632a5bcfddSWeiping Zhang static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 21642a5bcfddSWeiping Zhang { 2165e3aef095SNiklas Schnelle /* 2166e3aef095SNiklas Schnelle * If tags are shared with admin queue (Apple bug), then 2167e3aef095SNiklas Schnelle * make sure we only use one IO queue. 2168e3aef095SNiklas Schnelle */ 2169e3aef095SNiklas Schnelle if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2170e3aef095SNiklas Schnelle return 1; 21712a5bcfddSWeiping Zhang return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 21722a5bcfddSWeiping Zhang } 21732a5bcfddSWeiping Zhang 217457dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 217557dacad5SJay Sternberg { 2176147b27e4SSagi Grimberg struct nvme_queue *adminq = &dev->queues[0]; 217757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 21782a5bcfddSWeiping Zhang unsigned int nr_io_queues; 217997f6ef64SXu Yu unsigned long size; 21802a5bcfddSWeiping Zhang int result; 218157dacad5SJay Sternberg 21822a5bcfddSWeiping Zhang /* 21832a5bcfddSWeiping Zhang * Sample the module parameters once at reset time so that we have 21842a5bcfddSWeiping Zhang * stable values to work with. 21852a5bcfddSWeiping Zhang */ 21862a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 21872a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 2188d38e9f04SBenjamin Herrenschmidt 2189ff4e5fbaSNiklas Schnelle nr_io_queues = dev->nr_allocated_queues - 1; 21909a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 21919a0be7abSChristoph Hellwig if (result < 0) 219257dacad5SJay Sternberg return result; 21939a0be7abSChristoph Hellwig 2194f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 2195a5229050SKeith Busch return 0; 219657dacad5SJay Sternberg 2197*e4b9852aSCasey Chen /* 2198*e4b9852aSCasey Chen * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2199*e4b9852aSCasey Chen * from set to unset. If there is a window to it is truely freed, 2200*e4b9852aSCasey Chen * pci_free_irq_vectors() jumping into this window will crash. 2201*e4b9852aSCasey Chen * And take lock to avoid racing with pci_free_irq_vectors() in 2202*e4b9852aSCasey Chen * nvme_dev_disable() path. 2203*e4b9852aSCasey Chen */ 2204*e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2205*e4b9852aSCasey Chen if (result) 2206*e4b9852aSCasey Chen return result; 2207*e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2208*e4b9852aSCasey Chen pci_free_irq(pdev, 0, adminq); 22094e224106SChristoph Hellwig 22100f238ff5SLogan Gunthorpe if (dev->cmb_use_sqes) { 221157dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 221257dacad5SJay Sternberg sizeof(struct nvme_command)); 221357dacad5SJay Sternberg if (result > 0) 221457dacad5SJay Sternberg dev->q_depth = result; 221557dacad5SJay Sternberg else 22160f238ff5SLogan Gunthorpe dev->cmb_use_sqes = false; 221757dacad5SJay Sternberg } 221857dacad5SJay Sternberg 221957dacad5SJay Sternberg do { 222097f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 222197f6ef64SXu Yu result = nvme_remap_bar(dev, size); 222297f6ef64SXu Yu if (!result) 222357dacad5SJay Sternberg break; 2224*e4b9852aSCasey Chen if (!--nr_io_queues) { 2225*e4b9852aSCasey Chen result = -ENOMEM; 2226*e4b9852aSCasey Chen goto out_unlock; 2227*e4b9852aSCasey Chen } 222857dacad5SJay Sternberg } while (1); 222957dacad5SJay Sternberg adminq->q_db = dev->dbs; 223057dacad5SJay Sternberg 22318fae268bSKeith Busch retry: 223257dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 2233*e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 22340ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 223557dacad5SJay Sternberg 223657dacad5SJay Sternberg /* 223757dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 223857dacad5SJay Sternberg * setting up the full range we need. 223957dacad5SJay Sternberg */ 2240dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 22413b6592f7SJens Axboe 22423b6592f7SJens Axboe result = nvme_setup_irqs(dev, nr_io_queues); 2243*e4b9852aSCasey Chen if (result <= 0) { 2244*e4b9852aSCasey Chen result = -EIO; 2245*e4b9852aSCasey Chen goto out_unlock; 2246*e4b9852aSCasey Chen } 22473b6592f7SJens Axboe 224822b55601SKeith Busch dev->num_vecs = result; 22494b04cc6aSJens Axboe result = max(result - 1, 1); 2250e20ba6e1SChristoph Hellwig dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 225157dacad5SJay Sternberg 225257dacad5SJay Sternberg /* 225357dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 225457dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 225557dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 225657dacad5SJay Sternberg * number of interrupts. 225757dacad5SJay Sternberg */ 2258dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 22597c349ddeSKeith Busch if (result) 2260*e4b9852aSCasey Chen goto out_unlock; 22614e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &adminq->flags); 2262*e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 22638fae268bSKeith Busch 22648fae268bSKeith Busch result = nvme_create_io_queues(dev); 22658fae268bSKeith Busch if (result || dev->online_queues < 2) 22668fae268bSKeith Busch return result; 22678fae268bSKeith Busch 22688fae268bSKeith Busch if (dev->online_queues - 1 < dev->max_qid) { 22698fae268bSKeith Busch nr_io_queues = dev->online_queues - 1; 22708fae268bSKeith Busch nvme_disable_io_queues(dev); 2271*e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2272*e4b9852aSCasey Chen if (result) 2273*e4b9852aSCasey Chen return result; 22748fae268bSKeith Busch nvme_suspend_io_queues(dev); 22758fae268bSKeith Busch goto retry; 22768fae268bSKeith Busch } 22778fae268bSKeith Busch dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 22788fae268bSKeith Busch dev->io_queues[HCTX_TYPE_DEFAULT], 22798fae268bSKeith Busch dev->io_queues[HCTX_TYPE_READ], 22808fae268bSKeith Busch dev->io_queues[HCTX_TYPE_POLL]); 22818fae268bSKeith Busch return 0; 2282*e4b9852aSCasey Chen out_unlock: 2283*e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 2284*e4b9852aSCasey Chen return result; 228557dacad5SJay Sternberg } 228657dacad5SJay Sternberg 22872a842acaSChristoph Hellwig static void nvme_del_queue_end(struct request *req, blk_status_t error) 2288db3cbfffSKeith Busch { 2289db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2290db3cbfffSKeith Busch 2291db3cbfffSKeith Busch blk_mq_free_request(req); 2292d1ed6aa1SChristoph Hellwig complete(&nvmeq->delete_done); 2293db3cbfffSKeith Busch } 2294db3cbfffSKeith Busch 22952a842acaSChristoph Hellwig static void nvme_del_cq_end(struct request *req, blk_status_t error) 2296db3cbfffSKeith Busch { 2297db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2298db3cbfffSKeith Busch 2299d1ed6aa1SChristoph Hellwig if (error) 2300d1ed6aa1SChristoph Hellwig set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2301db3cbfffSKeith Busch 2302db3cbfffSKeith Busch nvme_del_queue_end(req, error); 2303db3cbfffSKeith Busch } 2304db3cbfffSKeith Busch 2305db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2306db3cbfffSKeith Busch { 2307db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2308db3cbfffSKeith Busch struct request *req; 2309f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 2310db3cbfffSKeith Busch 2311db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 2312db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2313db3cbfffSKeith Busch 231439dfe844SChaitanya Kulkarni req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT); 2315db3cbfffSKeith Busch if (IS_ERR(req)) 2316db3cbfffSKeith Busch return PTR_ERR(req); 2317db3cbfffSKeith Busch 2318db3cbfffSKeith Busch req->end_io_data = nvmeq; 2319db3cbfffSKeith Busch 2320d1ed6aa1SChristoph Hellwig init_completion(&nvmeq->delete_done); 23218eeed0b5SGuoqing Jiang blk_execute_rq_nowait(NULL, req, false, 2322db3cbfffSKeith Busch opcode == nvme_admin_delete_cq ? 2323db3cbfffSKeith Busch nvme_del_cq_end : nvme_del_queue_end); 2324db3cbfffSKeith Busch return 0; 2325db3cbfffSKeith Busch } 2326db3cbfffSKeith Busch 23278fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2328db3cbfffSKeith Busch { 23295271edd4SChristoph Hellwig int nr_queues = dev->online_queues - 1, sent = 0; 2330db3cbfffSKeith Busch unsigned long timeout; 2331db3cbfffSKeith Busch 2332db3cbfffSKeith Busch retry: 2333dc96f938SChaitanya Kulkarni timeout = NVME_ADMIN_TIMEOUT; 23345271edd4SChristoph Hellwig while (nr_queues > 0) { 23355271edd4SChristoph Hellwig if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2336db3cbfffSKeith Busch break; 23375271edd4SChristoph Hellwig nr_queues--; 23385271edd4SChristoph Hellwig sent++; 23395271edd4SChristoph Hellwig } 2340d1ed6aa1SChristoph Hellwig while (sent) { 2341d1ed6aa1SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2342d1ed6aa1SChristoph Hellwig 2343d1ed6aa1SChristoph Hellwig timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 23445271edd4SChristoph Hellwig timeout); 2345db3cbfffSKeith Busch if (timeout == 0) 23465271edd4SChristoph Hellwig return false; 2347d1ed6aa1SChristoph Hellwig 2348d1ed6aa1SChristoph Hellwig sent--; 23495271edd4SChristoph Hellwig if (nr_queues) 2350db3cbfffSKeith Busch goto retry; 2351db3cbfffSKeith Busch } 23525271edd4SChristoph Hellwig return true; 2353db3cbfffSKeith Busch } 2354db3cbfffSKeith Busch 23555d02a5c1SKeith Busch static void nvme_dev_add(struct nvme_dev *dev) 235657dacad5SJay Sternberg { 23572b1b7e78SJianchao Wang int ret; 23582b1b7e78SJianchao Wang 23595bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 2360c6d962aeSChristoph Hellwig dev->tagset.ops = &nvme_mq_ops; 236157dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 23628fe34be1Syangerkun dev->tagset.nr_maps = 2; /* default + read */ 2363ed92ad37SChristoph Hellwig if (dev->io_queues[HCTX_TYPE_POLL]) 2364ed92ad37SChristoph Hellwig dev->tagset.nr_maps++; 236557dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 2366d4ec47f1SMax Gurtovoy dev->tagset.numa_node = dev->ctrl.numa_node; 236761f3b896SChaitanya Kulkarni dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth, 236861f3b896SChaitanya Kulkarni BLK_MQ_MAX_DEPTH) - 1; 2369d43f1ccfSChristoph Hellwig dev->tagset.cmd_size = sizeof(struct nvme_iod); 237057dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 237157dacad5SJay Sternberg dev->tagset.driver_data = dev; 237257dacad5SJay Sternberg 2373d38e9f04SBenjamin Herrenschmidt /* 2374d38e9f04SBenjamin Herrenschmidt * Some Apple controllers requires tags to be unique 2375d38e9f04SBenjamin Herrenschmidt * across admin and IO queue, so reserve the first 32 2376d38e9f04SBenjamin Herrenschmidt * tags of the IO queue. 2377d38e9f04SBenjamin Herrenschmidt */ 2378d38e9f04SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2379d38e9f04SBenjamin Herrenschmidt dev->tagset.reserved_tags = NVME_AQ_DEPTH; 2380d38e9f04SBenjamin Herrenschmidt 23812b1b7e78SJianchao Wang ret = blk_mq_alloc_tag_set(&dev->tagset); 23822b1b7e78SJianchao Wang if (ret) { 23832b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 23842b1b7e78SJianchao Wang "IO queues tagset allocation failed %d\n", ret); 23855d02a5c1SKeith Busch return; 23862b1b7e78SJianchao Wang } 23875bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 2388949928c1SKeith Busch } else { 2389949928c1SKeith Busch blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2390949928c1SKeith Busch 2391949928c1SKeith Busch /* Free previously allocated queues that are no longer usable */ 2392949928c1SKeith Busch nvme_free_queues(dev, dev->online_queues); 239357dacad5SJay Sternberg } 2394949928c1SKeith Busch 2395e8fd41bbSMaxim Levitsky nvme_dbbuf_set(dev); 239657dacad5SJay Sternberg } 239757dacad5SJay Sternberg 2398b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 239957dacad5SJay Sternberg { 2400b00a726aSKeith Busch int result = -ENOMEM; 240157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 24024bdf2603SFilippo Sironi int dma_address_bits = 64; 240357dacad5SJay Sternberg 240457dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 240557dacad5SJay Sternberg return result; 240657dacad5SJay Sternberg 240757dacad5SJay Sternberg pci_set_master(pdev); 240857dacad5SJay Sternberg 24094bdf2603SFilippo Sironi if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 24104bdf2603SFilippo Sironi dma_address_bits = 48; 24114bdf2603SFilippo Sironi if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) 241257dacad5SJay Sternberg goto disable; 241357dacad5SJay Sternberg 24147a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 241557dacad5SJay Sternberg result = -ENODEV; 2416b00a726aSKeith Busch goto disable; 241757dacad5SJay Sternberg } 241857dacad5SJay Sternberg 241957dacad5SJay Sternberg /* 2420a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 2421a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2422a5229050SKeith Busch * adjust this later. 242357dacad5SJay Sternberg */ 2424dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2425dca51e78SChristoph Hellwig if (result < 0) 2426dca51e78SChristoph Hellwig return result; 242757dacad5SJay Sternberg 242820d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 24297a67cbeaSChristoph Hellwig 24307442ddceSJohn Garry dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2431b27c1e68Sweiping zhang io_queue_depth); 2432aa22c8e6SSagi Grimberg dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 243320d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 24347a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 24351f390c1fSStephan Günther 24361f390c1fSStephan Günther /* 243766341331SBenjamin Herrenschmidt * Some Apple controllers require a non-standard SQE size. 243866341331SBenjamin Herrenschmidt * Interestingly they also seem to ignore the CC:IOSQES register 243966341331SBenjamin Herrenschmidt * so we don't bother updating it here. 244066341331SBenjamin Herrenschmidt */ 244166341331SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 244266341331SBenjamin Herrenschmidt dev->io_sqes = 7; 244366341331SBenjamin Herrenschmidt else 2444c1e0cc7eSBenjamin Herrenschmidt dev->io_sqes = NVME_NVM_IOSQES; 24451f390c1fSStephan Günther 24461f390c1fSStephan Günther /* 24471f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 24481f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 24491f390c1fSStephan Günther */ 24501f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 24511f390c1fSStephan Günther dev->q_depth = 2; 24529bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 24539bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 24541f390c1fSStephan Günther dev->q_depth); 2455d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2456d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 245720d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2458d554b5e1SMartin K. Petersen dev->q_depth = 64; 2459d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2460d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 24611f390c1fSStephan Günther } 24621f390c1fSStephan Günther 2463d38e9f04SBenjamin Herrenschmidt /* 2464d38e9f04SBenjamin Herrenschmidt * Controllers with the shared tags quirk need the IO queue to be 2465d38e9f04SBenjamin Herrenschmidt * big enough so that we get 32 tags for the admin queue 2466d38e9f04SBenjamin Herrenschmidt */ 2467d38e9f04SBenjamin Herrenschmidt if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2468d38e9f04SBenjamin Herrenschmidt (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2469d38e9f04SBenjamin Herrenschmidt dev->q_depth = NVME_AQ_DEPTH + 2; 2470d38e9f04SBenjamin Herrenschmidt dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2471d38e9f04SBenjamin Herrenschmidt dev->q_depth); 2472d38e9f04SBenjamin Herrenschmidt } 2473d38e9f04SBenjamin Herrenschmidt 2474d38e9f04SBenjamin Herrenschmidt 2475f65efd6dSChristoph Hellwig nvme_map_cmb(dev); 2476202021c1SStephen Bates 2477a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 2478a0a3408eSKeith Busch pci_save_state(pdev); 247957dacad5SJay Sternberg return 0; 248057dacad5SJay Sternberg 248157dacad5SJay Sternberg disable: 248257dacad5SJay Sternberg pci_disable_device(pdev); 248357dacad5SJay Sternberg return result; 248457dacad5SJay Sternberg } 248557dacad5SJay Sternberg 248657dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 248757dacad5SJay Sternberg { 2488b00a726aSKeith Busch if (dev->bar) 2489b00a726aSKeith Busch iounmap(dev->bar); 2490a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 2491b00a726aSKeith Busch } 2492b00a726aSKeith Busch 2493b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 2494b00a726aSKeith Busch { 249557dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 249657dacad5SJay Sternberg 2497dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 249857dacad5SJay Sternberg 2499a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 2500a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 250157dacad5SJay Sternberg pci_disable_device(pdev); 250257dacad5SJay Sternberg } 2503a0a3408eSKeith Busch } 250457dacad5SJay Sternberg 2505a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 250657dacad5SJay Sternberg { 2507e43269e6SKeith Busch bool dead = true, freeze = false; 2508302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 250957dacad5SJay Sternberg 251077bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 2511302ad8ccSKeith Busch if (pci_is_enabled(pdev)) { 2512302ad8ccSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 2513302ad8ccSKeith Busch 2514ebef7368SKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE || 2515e43269e6SKeith Busch dev->ctrl.state == NVME_CTRL_RESETTING) { 2516e43269e6SKeith Busch freeze = true; 2517302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 2518e43269e6SKeith Busch } 2519302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 2520302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 252157dacad5SJay Sternberg } 2522c21377f8SGabriel Krisman Bertazi 2523302ad8ccSKeith Busch /* 2524302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 2525302ad8ccSKeith Busch * doing a safe shutdown. 2526302ad8ccSKeith Busch */ 2527e43269e6SKeith Busch if (!dead && shutdown && freeze) 2528302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 252987ad72a5SChristoph Hellwig 25309a915a5bSJianchao Wang nvme_stop_queues(&dev->ctrl); 25319a915a5bSJianchao Wang 253264ee0ac0SKeith Busch if (!dead && dev->ctrl.queue_count > 0) { 25338fae268bSKeith Busch nvme_disable_io_queues(dev); 2534a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 253557dacad5SJay Sternberg } 25368fae268bSKeith Busch nvme_suspend_io_queues(dev); 25378fae268bSKeith Busch nvme_suspend_queue(&dev->queues[0]); 2538b00a726aSKeith Busch nvme_pci_disable(dev); 2539fa46c6fbSKeith Busch nvme_reap_pending_cqes(dev); 254057dacad5SJay Sternberg 2541e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2542e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 2543622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&dev->tagset); 2544622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&dev->admin_tagset); 2545302ad8ccSKeith Busch 2546302ad8ccSKeith Busch /* 2547302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2548302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2549302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2550302ad8ccSKeith Busch */ 2551c8e9e9b7SKeith Busch if (shutdown) { 2552302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 2553c8e9e9b7SKeith Busch if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 2554c8e9e9b7SKeith Busch blk_mq_unquiesce_queue(dev->ctrl.admin_q); 2555c8e9e9b7SKeith Busch } 255677bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 255757dacad5SJay Sternberg } 255857dacad5SJay Sternberg 2559c1ac9a4bSKeith Busch static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2560c1ac9a4bSKeith Busch { 2561c1ac9a4bSKeith Busch if (!nvme_wait_reset(&dev->ctrl)) 2562c1ac9a4bSKeith Busch return -EBUSY; 2563c1ac9a4bSKeith Busch nvme_dev_disable(dev, shutdown); 2564c1ac9a4bSKeith Busch return 0; 2565c1ac9a4bSKeith Busch } 2566c1ac9a4bSKeith Busch 256757dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 256857dacad5SJay Sternberg { 256957dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2570c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 2571c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 0); 257257dacad5SJay Sternberg if (!dev->prp_page_pool) 257357dacad5SJay Sternberg return -ENOMEM; 257457dacad5SJay Sternberg 257557dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 257657dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 257757dacad5SJay Sternberg 256, 256, 0); 257857dacad5SJay Sternberg if (!dev->prp_small_pool) { 257957dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 258057dacad5SJay Sternberg return -ENOMEM; 258157dacad5SJay Sternberg } 258257dacad5SJay Sternberg return 0; 258357dacad5SJay Sternberg } 258457dacad5SJay Sternberg 258557dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 258657dacad5SJay Sternberg { 258757dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 258857dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 258957dacad5SJay Sternberg } 259057dacad5SJay Sternberg 2591770597ecSKeith Busch static void nvme_free_tagset(struct nvme_dev *dev) 2592770597ecSKeith Busch { 2593770597ecSKeith Busch if (dev->tagset.tags) 2594770597ecSKeith Busch blk_mq_free_tag_set(&dev->tagset); 2595770597ecSKeith Busch dev->ctrl.tagset = NULL; 2596770597ecSKeith Busch } 2597770597ecSKeith Busch 25981673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 259957dacad5SJay Sternberg { 26001673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 260157dacad5SJay Sternberg 2602f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 2603770597ecSKeith Busch nvme_free_tagset(dev); 26041c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 26051c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 2606e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2607943e942eSJens Axboe mempool_destroy(dev->iod_mempool); 2608253fd4acSIsrael Rukshin put_device(dev->dev); 2609253fd4acSIsrael Rukshin kfree(dev->queues); 261057dacad5SJay Sternberg kfree(dev); 261157dacad5SJay Sternberg } 261257dacad5SJay Sternberg 26137c1ce408SChaitanya Kulkarni static void nvme_remove_dead_ctrl(struct nvme_dev *dev) 2614f58944e2SKeith Busch { 2615c1ac9a4bSKeith Busch /* 2616c1ac9a4bSKeith Busch * Set state to deleting now to avoid blocking nvme_wait_reset(), which 2617c1ac9a4bSKeith Busch * may be holding this pci_dev's device lock. 2618c1ac9a4bSKeith Busch */ 2619c1ac9a4bSKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2620d22524a4SChristoph Hellwig nvme_get_ctrl(&dev->ctrl); 262169d9a99cSKeith Busch nvme_dev_disable(dev, false); 26229f9cafc1SJianchao Wang nvme_kill_queues(&dev->ctrl); 262303e0f3a6SMing Lei if (!queue_work(nvme_wq, &dev->remove_work)) 2624f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 2625f58944e2SKeith Busch } 2626f58944e2SKeith Busch 2627fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 262857dacad5SJay Sternberg { 2629d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2630d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2631a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2632e71afda4SChaitanya Kulkarni int result; 263357dacad5SJay Sternberg 2634e71afda4SChaitanya Kulkarni if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { 2635e71afda4SChaitanya Kulkarni result = -ENODEV; 2636fd634f41SChristoph Hellwig goto out; 2637e71afda4SChaitanya Kulkarni } 2638fd634f41SChristoph Hellwig 2639fd634f41SChristoph Hellwig /* 2640fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2641fd634f41SChristoph Hellwig * moving on. 2642fd634f41SChristoph Hellwig */ 2643b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2644a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2645d6135c3aSKeith Busch nvme_sync_queues(&dev->ctrl); 2646fd634f41SChristoph Hellwig 26475c959d73SKeith Busch mutex_lock(&dev->shutdown_lock); 2648b00a726aSKeith Busch result = nvme_pci_enable(dev); 264957dacad5SJay Sternberg if (result) 26504726bcf3SKeith Busch goto out_unlock; 265157dacad5SJay Sternberg 265201ad0990SSagi Grimberg result = nvme_pci_configure_admin_queue(dev); 265357dacad5SJay Sternberg if (result) 26544726bcf3SKeith Busch goto out_unlock; 265557dacad5SJay Sternberg 265657dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 265757dacad5SJay Sternberg if (result) 26584726bcf3SKeith Busch goto out_unlock; 265957dacad5SJay Sternberg 2660943e942eSJens Axboe /* 2661943e942eSJens Axboe * Limit the max command size to prevent iod->sg allocations going 2662943e942eSJens Axboe * over a single page. 2663943e942eSJens Axboe */ 26647637de31SChristoph Hellwig dev->ctrl.max_hw_sectors = min_t(u32, 26657637de31SChristoph Hellwig NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); 2666943e942eSJens Axboe dev->ctrl.max_segments = NVME_MAX_SEGS; 2667a48bc520SChristoph Hellwig 2668a48bc520SChristoph Hellwig /* 2669a48bc520SChristoph Hellwig * Don't limit the IOMMU merged segment size. 2670a48bc520SChristoph Hellwig */ 2671a48bc520SChristoph Hellwig dma_set_max_seg_size(dev->dev, 0xffffffff); 26723d2d861eSJianxiong Gao dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); 2673a48bc520SChristoph Hellwig 26745c959d73SKeith Busch mutex_unlock(&dev->shutdown_lock); 26755c959d73SKeith Busch 26765c959d73SKeith Busch /* 26775c959d73SKeith Busch * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 26785c959d73SKeith Busch * initializing procedure here. 26795c959d73SKeith Busch */ 26805c959d73SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 26815c959d73SKeith Busch dev_warn(dev->ctrl.device, 26825c959d73SKeith Busch "failed to mark controller CONNECTING\n"); 2683cee6c269SMinwoo Im result = -EBUSY; 26845c959d73SKeith Busch goto out; 26855c959d73SKeith Busch } 2686943e942eSJens Axboe 268795093350SMax Gurtovoy /* 268895093350SMax Gurtovoy * We do not support an SGL for metadata (yet), so we are limited to a 268995093350SMax Gurtovoy * single integrity segment for the separate metadata pointer. 269095093350SMax Gurtovoy */ 269195093350SMax Gurtovoy dev->ctrl.max_integrity_segments = 1; 269295093350SMax Gurtovoy 2693f21c4769SChaitanya Kulkarni result = nvme_init_ctrl_finish(&dev->ctrl); 2694ce4541f4SChristoph Hellwig if (result) 2695f58944e2SKeith Busch goto out; 2696ce4541f4SChristoph Hellwig 2697e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2698e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 26994f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 27004f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2701e286bcfcSScott Bauer else if (was_suspend) 27024f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 2703e286bcfcSScott Bauer } else { 2704e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2705e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 2706e286bcfcSScott Bauer } 2707a98e58e5SScott Bauer 2708f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2709f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 2710f9f38e33SHelen Koike if (result) 2711f9f38e33SHelen Koike dev_warn(dev->dev, 2712f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 2713f9f38e33SHelen Koike } 2714f9f38e33SHelen Koike 27159620cfbaSChristoph Hellwig if (dev->ctrl.hmpre) { 27169620cfbaSChristoph Hellwig result = nvme_setup_host_mem(dev); 27179620cfbaSChristoph Hellwig if (result < 0) 27189620cfbaSChristoph Hellwig goto out; 27199620cfbaSChristoph Hellwig } 272087ad72a5SChristoph Hellwig 272157dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 272257dacad5SJay Sternberg if (result) 2723f58944e2SKeith Busch goto out; 272457dacad5SJay Sternberg 272521f033f7SKeith Busch /* 272657dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 272757dacad5SJay Sternberg * any working I/O queue. 272857dacad5SJay Sternberg */ 272957dacad5SJay Sternberg if (dev->online_queues < 2) { 27301b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, "IO queues not created\n"); 27313b24774eSKeith Busch nvme_kill_queues(&dev->ctrl); 27325bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 2733770597ecSKeith Busch nvme_free_tagset(dev); 273457dacad5SJay Sternberg } else { 273525646264SKeith Busch nvme_start_queues(&dev->ctrl); 2736302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 27375d02a5c1SKeith Busch nvme_dev_add(dev); 2738302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 273957dacad5SJay Sternberg } 274057dacad5SJay Sternberg 27412b1b7e78SJianchao Wang /* 27422b1b7e78SJianchao Wang * If only admin queue live, keep it to do further investigation or 27432b1b7e78SJianchao Wang * recovery. 27442b1b7e78SJianchao Wang */ 27455d02a5c1SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 27462b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 27475d02a5c1SKeith Busch "failed to mark controller live state\n"); 2748e71afda4SChaitanya Kulkarni result = -ENODEV; 2749bb8d261eSChristoph Hellwig goto out; 2750bb8d261eSChristoph Hellwig } 275192911a55SChristoph Hellwig 2752d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 275357dacad5SJay Sternberg return; 275457dacad5SJay Sternberg 27554726bcf3SKeith Busch out_unlock: 27564726bcf3SKeith Busch mutex_unlock(&dev->shutdown_lock); 275757dacad5SJay Sternberg out: 27587c1ce408SChaitanya Kulkarni if (result) 27597c1ce408SChaitanya Kulkarni dev_warn(dev->ctrl.device, 27607c1ce408SChaitanya Kulkarni "Removing after probe failure status: %d\n", result); 27617c1ce408SChaitanya Kulkarni nvme_remove_dead_ctrl(dev); 276257dacad5SJay Sternberg } 276357dacad5SJay Sternberg 27645c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 276557dacad5SJay Sternberg { 27665c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 276757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 276857dacad5SJay Sternberg 276957dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 2770921920abSKeith Busch device_release_driver(&pdev->dev); 27711673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 277257dacad5SJay Sternberg } 277357dacad5SJay Sternberg 27741c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 277557dacad5SJay Sternberg { 27761c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 27771c63dc66SChristoph Hellwig return 0; 277857dacad5SJay Sternberg } 27791c63dc66SChristoph Hellwig 27805fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 27815fd4ce1bSChristoph Hellwig { 27825fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 27835fd4ce1bSChristoph Hellwig return 0; 27845fd4ce1bSChristoph Hellwig } 27855fd4ce1bSChristoph Hellwig 27867fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 27877fd8930fSChristoph Hellwig { 27883a8ecc93SArd Biesheuvel *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 27897fd8930fSChristoph Hellwig return 0; 27907fd8930fSChristoph Hellwig } 27917fd8930fSChristoph Hellwig 279297c12223SKeith Busch static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 279397c12223SKeith Busch { 279497c12223SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 279597c12223SKeith Busch 27962db24e4aSMax Gurtovoy return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 279797c12223SKeith Busch } 279897c12223SKeith Busch 27991c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 28001a353d85SMing Lin .name = "pcie", 2801e439bb12SSagi Grimberg .module = THIS_MODULE, 2802e0596ab2SLogan Gunthorpe .flags = NVME_F_METADATA_SUPPORTED | 2803e0596ab2SLogan Gunthorpe NVME_F_PCI_P2PDMA, 28041c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 28055fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 28067fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 28071673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2808f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 280997c12223SKeith Busch .get_address = nvme_pci_get_address, 28101c63dc66SChristoph Hellwig }; 281157dacad5SJay Sternberg 2812b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2813b00a726aSKeith Busch { 2814b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2815b00a726aSKeith Busch 2816a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2817b00a726aSKeith Busch return -ENODEV; 2818b00a726aSKeith Busch 281997f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2820b00a726aSKeith Busch goto release; 2821b00a726aSKeith Busch 2822b00a726aSKeith Busch return 0; 2823b00a726aSKeith Busch release: 2824a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2825b00a726aSKeith Busch return -ENODEV; 2826b00a726aSKeith Busch } 2827b00a726aSKeith Busch 28288427bbc2SKai-Heng Feng static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2829ff5350a8SAndy Lutomirski { 2830ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2831ff5350a8SAndy Lutomirski /* 2832ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2833ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2834ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2835ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2836ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2837ff5350a8SAndy Lutomirski * laptops. 2838ff5350a8SAndy Lutomirski */ 2839ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2840ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2841ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2842ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 28438427bbc2SKai-Heng Feng } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 28448427bbc2SKai-Heng Feng /* 28458427bbc2SKai-Heng Feng * Samsung SSD 960 EVO drops off the PCIe bus after system 2846467c77d4SJarosław Janik * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 2847467c77d4SJarosław Janik * within few minutes after bootup on a Coffee Lake board - 2848467c77d4SJarosław Janik * ASUS PRIME Z370-A 28498427bbc2SKai-Heng Feng */ 28508427bbc2SKai-Heng Feng if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2851467c77d4SJarosław Janik (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 2852467c77d4SJarosław Janik dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 28538427bbc2SKai-Heng Feng return NVME_QUIRK_NO_APST; 28541fae37acSShyjumon N } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 28551fae37acSShyjumon N pdev->device == 0xa808 || pdev->device == 0xa809)) || 28561fae37acSShyjumon N (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 28571fae37acSShyjumon N /* 28581fae37acSShyjumon N * Forcing to use host managed nvme power settings for 28591fae37acSShyjumon N * lowest idle power with quick resume latency on 28601fae37acSShyjumon N * Samsung and Toshiba SSDs based on suspend behavior 28611fae37acSShyjumon N * on Coffee Lake board for LENOVO C640 28621fae37acSShyjumon N */ 28631fae37acSShyjumon N if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 28641fae37acSShyjumon N dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 28651fae37acSShyjumon N return NVME_QUIRK_SIMPLE_SUSPEND; 2866ff5350a8SAndy Lutomirski } 2867ff5350a8SAndy Lutomirski 2868ff5350a8SAndy Lutomirski return 0; 2869ff5350a8SAndy Lutomirski } 2870ff5350a8SAndy Lutomirski 287118119775SKeith Busch static void nvme_async_probe(void *data, async_cookie_t cookie) 287218119775SKeith Busch { 287318119775SKeith Busch struct nvme_dev *dev = data; 287480f513b5SKeith Busch 2875bd46a906SKeith Busch flush_work(&dev->ctrl.reset_work); 287618119775SKeith Busch flush_work(&dev->ctrl.scan_work); 287780f513b5SKeith Busch nvme_put_ctrl(&dev->ctrl); 287818119775SKeith Busch } 287918119775SKeith Busch 288057dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 288157dacad5SJay Sternberg { 288257dacad5SJay Sternberg int node, result = -ENOMEM; 288357dacad5SJay Sternberg struct nvme_dev *dev; 2884ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 2885943e942eSJens Axboe size_t alloc_size; 288657dacad5SJay Sternberg 288757dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 288857dacad5SJay Sternberg if (node == NUMA_NO_NODE) 28892fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 289057dacad5SJay Sternberg 289157dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 289257dacad5SJay Sternberg if (!dev) 289357dacad5SJay Sternberg return -ENOMEM; 2894147b27e4SSagi Grimberg 28952a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 28962a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 28972a5bcfddSWeiping Zhang dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 28982a5bcfddSWeiping Zhang dev->queues = kcalloc_node(dev->nr_allocated_queues, 28992a5bcfddSWeiping Zhang sizeof(struct nvme_queue), GFP_KERNEL, node); 290057dacad5SJay Sternberg if (!dev->queues) 290157dacad5SJay Sternberg goto free; 290257dacad5SJay Sternberg 290357dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 290457dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 290557dacad5SJay Sternberg 2906b00a726aSKeith Busch result = nvme_dev_map(dev); 2907b00a726aSKeith Busch if (result) 2908b00c9b7aSChristophe JAILLET goto put_pci; 2909b00a726aSKeith Busch 2910d86c4d8eSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 29115c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 291277bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 2913f3ca80fcSChristoph Hellwig 2914f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2915f3ca80fcSChristoph Hellwig if (result) 2916b00c9b7aSChristophe JAILLET goto unmap; 2917f3ca80fcSChristoph Hellwig 29188427bbc2SKai-Heng Feng quirks |= check_vendor_combination_bug(pdev); 2919ff5350a8SAndy Lutomirski 29202744d7a0SMario Limonciello if (!noacpi && acpi_storage_d3(&pdev->dev)) { 2921df4f9bc4SDavid E. Box /* 2922df4f9bc4SDavid E. Box * Some systems use a bios work around to ask for D3 on 2923df4f9bc4SDavid E. Box * platforms that support kernel managed suspend. 2924df4f9bc4SDavid E. Box */ 2925df4f9bc4SDavid E. Box dev_info(&pdev->dev, 2926df4f9bc4SDavid E. Box "platform quirk: setting simple suspend\n"); 2927df4f9bc4SDavid E. Box quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 2928df4f9bc4SDavid E. Box } 2929df4f9bc4SDavid E. Box 2930943e942eSJens Axboe /* 2931943e942eSJens Axboe * Double check that our mempool alloc size will cover the biggest 2932943e942eSJens Axboe * command we support. 2933943e942eSJens Axboe */ 2934b13c6393SChaitanya Kulkarni alloc_size = nvme_pci_iod_alloc_size(); 2935943e942eSJens Axboe WARN_ON_ONCE(alloc_size > PAGE_SIZE); 2936943e942eSJens Axboe 2937943e942eSJens Axboe dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, 2938943e942eSJens Axboe mempool_kfree, 2939943e942eSJens Axboe (void *) alloc_size, 2940943e942eSJens Axboe GFP_KERNEL, node); 2941943e942eSJens Axboe if (!dev->iod_mempool) { 2942943e942eSJens Axboe result = -ENOMEM; 2943943e942eSJens Axboe goto release_pools; 2944943e942eSJens Axboe } 2945943e942eSJens Axboe 2946b6e44b4cSKeith Busch result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2947b6e44b4cSKeith Busch quirks); 2948b6e44b4cSKeith Busch if (result) 2949b6e44b4cSKeith Busch goto release_mempool; 2950b6e44b4cSKeith Busch 29511b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 29521b3c47c1SSagi Grimberg 2953bd46a906SKeith Busch nvme_reset_ctrl(&dev->ctrl); 295418119775SKeith Busch async_schedule(nvme_async_probe, dev); 29554caff8fcSSagi Grimberg 295657dacad5SJay Sternberg return 0; 295757dacad5SJay Sternberg 2958b6e44b4cSKeith Busch release_mempool: 2959b6e44b4cSKeith Busch mempool_destroy(dev->iod_mempool); 296057dacad5SJay Sternberg release_pools: 296157dacad5SJay Sternberg nvme_release_prp_pools(dev); 2962b00c9b7aSChristophe JAILLET unmap: 2963b00c9b7aSChristophe JAILLET nvme_dev_unmap(dev); 296457dacad5SJay Sternberg put_pci: 296557dacad5SJay Sternberg put_device(dev->dev); 296657dacad5SJay Sternberg free: 296757dacad5SJay Sternberg kfree(dev->queues); 296857dacad5SJay Sternberg kfree(dev); 296957dacad5SJay Sternberg return result; 297057dacad5SJay Sternberg } 297157dacad5SJay Sternberg 2972775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 297357dacad5SJay Sternberg { 297457dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 2975c1ac9a4bSKeith Busch 2976c1ac9a4bSKeith Busch /* 2977c1ac9a4bSKeith Busch * We don't need to check the return value from waiting for the reset 2978c1ac9a4bSKeith Busch * state as pci_dev device lock is held, making it impossible to race 2979c1ac9a4bSKeith Busch * with ->remove(). 2980c1ac9a4bSKeith Busch */ 2981c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, false); 2982c1ac9a4bSKeith Busch nvme_sync_queues(&dev->ctrl); 2983775755edSChristoph Hellwig } 298457dacad5SJay Sternberg 2985775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 2986775755edSChristoph Hellwig { 2987f263fbb8SLinus Torvalds struct nvme_dev *dev = pci_get_drvdata(pdev); 2988c1ac9a4bSKeith Busch 2989c1ac9a4bSKeith Busch if (!nvme_try_sched_reset(&dev->ctrl)) 2990c1ac9a4bSKeith Busch flush_work(&dev->ctrl.reset_work); 299157dacad5SJay Sternberg } 299257dacad5SJay Sternberg 299357dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 299457dacad5SJay Sternberg { 299557dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 29964e523547SBaolin Wang 2997c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, true); 299857dacad5SJay Sternberg } 299957dacad5SJay Sternberg 3000f58944e2SKeith Busch /* 3001f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 3002f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 3003f58944e2SKeith Busch * order to proceed. 3004f58944e2SKeith Busch */ 300557dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 300657dacad5SJay Sternberg { 300757dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 300857dacad5SJay Sternberg 3009bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 301057dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 30110ff9d4e1SKeith Busch 30126db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 30130ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 30141d39e692SKeith Busch nvme_dev_disable(dev, true); 3015cb4bfda6SKeith Busch nvme_dev_remove_admin(dev); 30166db28edaSKeith Busch } 30170ff9d4e1SKeith Busch 3018d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 3019d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 3020d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 3021a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 30229fe5c59fSKeith Busch nvme_release_cmb(dev); 302387ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 302457dacad5SJay Sternberg nvme_dev_remove_admin(dev); 302557dacad5SJay Sternberg nvme_free_queues(dev, 0); 302657dacad5SJay Sternberg nvme_release_prp_pools(dev); 3027b00a726aSKeith Busch nvme_dev_unmap(dev); 3028726612b6SIsrael Rukshin nvme_uninit_ctrl(&dev->ctrl); 302957dacad5SJay Sternberg } 303057dacad5SJay Sternberg 303157dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 3032d916b1beSKeith Busch static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3033d916b1beSKeith Busch { 3034d916b1beSKeith Busch return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3035d916b1beSKeith Busch } 3036d916b1beSKeith Busch 3037d916b1beSKeith Busch static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3038d916b1beSKeith Busch { 3039d916b1beSKeith Busch return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3040d916b1beSKeith Busch } 3041d916b1beSKeith Busch 3042d916b1beSKeith Busch static int nvme_resume(struct device *dev) 3043d916b1beSKeith Busch { 3044d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3045d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3046d916b1beSKeith Busch 30474eaefe8cSRafael J. Wysocki if (ndev->last_ps == U32_MAX || 3048d916b1beSKeith Busch nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3049c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 3050d916b1beSKeith Busch return 0; 3051d916b1beSKeith Busch } 3052d916b1beSKeith Busch 305357dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 305457dacad5SJay Sternberg { 305557dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 305657dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 3057d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3058d916b1beSKeith Busch int ret = -EBUSY; 3059d916b1beSKeith Busch 30604eaefe8cSRafael J. Wysocki ndev->last_ps = U32_MAX; 30614eaefe8cSRafael J. Wysocki 3062d916b1beSKeith Busch /* 3063d916b1beSKeith Busch * The platform does not remove power for a kernel managed suspend so 3064d916b1beSKeith Busch * use host managed nvme power settings for lowest idle power if 3065d916b1beSKeith Busch * possible. This should have quicker resume latency than a full device 3066d916b1beSKeith Busch * shutdown. But if the firmware is involved after the suspend or the 3067d916b1beSKeith Busch * device does not support any non-default power states, shut down the 3068d916b1beSKeith Busch * device fully. 30694eaefe8cSRafael J. Wysocki * 30704eaefe8cSRafael J. Wysocki * If ASPM is not enabled for the device, shut down the device and allow 30714eaefe8cSRafael J. Wysocki * the PCI bus layer to put it into D3 in order to take the PCIe link 30724eaefe8cSRafael J. Wysocki * down, so as to allow the platform to achieve its minimum low-power 30734eaefe8cSRafael J. Wysocki * state (which may not be possible if the link is up). 3074b97120b1SChristoph Hellwig * 3075b97120b1SChristoph Hellwig * If a host memory buffer is enabled, shut down the device as the NVMe 3076b97120b1SChristoph Hellwig * specification allows the device to access the host memory buffer in 3077b97120b1SChristoph Hellwig * host DRAM from all power states, but hosts will fail access to DRAM 3078b97120b1SChristoph Hellwig * during S3. 3079d916b1beSKeith Busch */ 30804eaefe8cSRafael J. Wysocki if (pm_suspend_via_firmware() || !ctrl->npss || 3081cb32de1bSMario Limonciello !pcie_aspm_enabled(pdev) || 3082b97120b1SChristoph Hellwig ndev->nr_host_mem_descs || 3083c1ac9a4bSKeith Busch (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3084c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 3085d916b1beSKeith Busch 3086d916b1beSKeith Busch nvme_start_freeze(ctrl); 3087d916b1beSKeith Busch nvme_wait_freeze(ctrl); 3088d916b1beSKeith Busch nvme_sync_queues(ctrl); 3089d916b1beSKeith Busch 30905d02a5c1SKeith Busch if (ctrl->state != NVME_CTRL_LIVE) 3091d916b1beSKeith Busch goto unfreeze; 3092d916b1beSKeith Busch 3093d916b1beSKeith Busch ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3094d916b1beSKeith Busch if (ret < 0) 3095d916b1beSKeith Busch goto unfreeze; 3096d916b1beSKeith Busch 30977cbb5c6fSMario Limonciello /* 30987cbb5c6fSMario Limonciello * A saved state prevents pci pm from generically controlling the 30997cbb5c6fSMario Limonciello * device's power. If we're using protocol specific settings, we don't 31007cbb5c6fSMario Limonciello * want pci interfering. 31017cbb5c6fSMario Limonciello */ 31027cbb5c6fSMario Limonciello pci_save_state(pdev); 31037cbb5c6fSMario Limonciello 3104d916b1beSKeith Busch ret = nvme_set_power_state(ctrl, ctrl->npss); 3105d916b1beSKeith Busch if (ret < 0) 3106d916b1beSKeith Busch goto unfreeze; 3107d916b1beSKeith Busch 3108d916b1beSKeith Busch if (ret) { 31097cbb5c6fSMario Limonciello /* discard the saved state */ 31107cbb5c6fSMario Limonciello pci_load_saved_state(pdev, NULL); 31117cbb5c6fSMario Limonciello 3112d916b1beSKeith Busch /* 3113d916b1beSKeith Busch * Clearing npss forces a controller reset on resume. The 311405d3046fSGeert Uytterhoeven * correct value will be rediscovered then. 3115d916b1beSKeith Busch */ 3116c1ac9a4bSKeith Busch ret = nvme_disable_prepare_reset(ndev, true); 3117d916b1beSKeith Busch ctrl->npss = 0; 3118d916b1beSKeith Busch } 3119d916b1beSKeith Busch unfreeze: 3120d916b1beSKeith Busch nvme_unfreeze(ctrl); 3121d916b1beSKeith Busch return ret; 3122d916b1beSKeith Busch } 3123d916b1beSKeith Busch 3124d916b1beSKeith Busch static int nvme_simple_suspend(struct device *dev) 3125d916b1beSKeith Busch { 3126d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 31274e523547SBaolin Wang 3128c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 312957dacad5SJay Sternberg } 313057dacad5SJay Sternberg 3131d916b1beSKeith Busch static int nvme_simple_resume(struct device *dev) 313257dacad5SJay Sternberg { 313357dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 313457dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 313557dacad5SJay Sternberg 3136c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 313757dacad5SJay Sternberg } 313857dacad5SJay Sternberg 313921774222SYueHaibing static const struct dev_pm_ops nvme_dev_pm_ops = { 3140d916b1beSKeith Busch .suspend = nvme_suspend, 3141d916b1beSKeith Busch .resume = nvme_resume, 3142d916b1beSKeith Busch .freeze = nvme_simple_suspend, 3143d916b1beSKeith Busch .thaw = nvme_simple_resume, 3144d916b1beSKeith Busch .poweroff = nvme_simple_suspend, 3145d916b1beSKeith Busch .restore = nvme_simple_resume, 3146d916b1beSKeith Busch }; 3147d916b1beSKeith Busch #endif /* CONFIG_PM_SLEEP */ 314857dacad5SJay Sternberg 3149a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3150a0a3408eSKeith Busch pci_channel_state_t state) 3151a0a3408eSKeith Busch { 3152a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3153a0a3408eSKeith Busch 3154a0a3408eSKeith Busch /* 3155a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 3156a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 3157a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 3158a0a3408eSKeith Busch */ 3159a0a3408eSKeith Busch switch (state) { 3160a0a3408eSKeith Busch case pci_channel_io_normal: 3161a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 3162a0a3408eSKeith Busch case pci_channel_io_frozen: 3163d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3164d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 3165a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 3166a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3167a0a3408eSKeith Busch case pci_channel_io_perm_failure: 3168d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3169d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 3170a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 3171a0a3408eSKeith Busch } 3172a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3173a0a3408eSKeith Busch } 3174a0a3408eSKeith Busch 3175a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3176a0a3408eSKeith Busch { 3177a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3178a0a3408eSKeith Busch 31791b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 3180a0a3408eSKeith Busch pci_restore_state(pdev); 3181d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 3182a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 3183a0a3408eSKeith Busch } 3184a0a3408eSKeith Busch 3185a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 3186a0a3408eSKeith Busch { 318772cd4cc2SKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 318872cd4cc2SKeith Busch 318972cd4cc2SKeith Busch flush_work(&dev->ctrl.reset_work); 3190a0a3408eSKeith Busch } 3191a0a3408eSKeith Busch 319257dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 319357dacad5SJay Sternberg .error_detected = nvme_error_detected, 319457dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 319557dacad5SJay Sternberg .resume = nvme_error_resume, 3196775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 3197775755edSChristoph Hellwig .reset_done = nvme_reset_done, 319857dacad5SJay Sternberg }; 319957dacad5SJay Sternberg 320057dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 3201972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 320208095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3203e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3204972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 320599466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3206e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3207972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 320899466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3209e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3210972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3211f99cb7afSDavid Wayne Fugate .driver_data = NVME_QUIRK_STRIPE_SIZE | 3212f99cb7afSDavid Wayne Fugate NVME_QUIRK_DEALLOCATE_ZEROES, }, 321350af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 32149abd68efSJens Axboe .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 32156c6aa2f2SAkinobu Mita NVME_QUIRK_MEDIUM_PRIO_SQ | 3216ce4cc313SDavid Milburn NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3217ce4cc313SDavid Milburn NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 32186299358dSJames Dingwall { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 32196299358dSJames Dingwall .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3220540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 32217b210e4eSChristoph Hellwig .driver_data = NVME_QUIRK_IDENTIFY_CNS | 32227b210e4eSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 32235bedd3afSChristoph Hellwig { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 32245bedd3afSChristoph Hellwig .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, 32250302ae60SMicah Parrish { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 32265e112d3fSJulian Einwag .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 32275e112d3fSJulian Einwag NVME_QUIRK_NO_NS_DESC_LIST, }, 322854adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 322954adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 32308c97eeccSJeff Lien { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 32318c97eeccSJeff Lien .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3232015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3233015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3234d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3235d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3236d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 32377ee5c78cSGopal Tiwari .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3238abbb5f59SDmitry Monakhov NVME_QUIRK_DISABLE_WRITE_ZEROES| 32397ee5c78cSGopal Tiwari NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3240c9e95c39SClaus Stovgaard { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 3241c9e95c39SClaus Stovgaard .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 32426e6a6828SPascal Terjan { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 32436e6a6828SPascal Terjan .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 32446e6a6828SPascal Terjan NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3245608cc4b1SChristoph Hellwig { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ 3246608cc4b1SChristoph Hellwig .driver_data = NVME_QUIRK_LIGHTNVM, }, 3247608cc4b1SChristoph Hellwig { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ 3248608cc4b1SChristoph Hellwig .driver_data = NVME_QUIRK_LIGHTNVM, }, 3249ea48e877SWei Xu { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ 3250ea48e877SWei Xu .driver_data = NVME_QUIRK_LIGHTNVM, }, 325108b903b5SMisha Nasledov { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 325208b903b5SMisha Nasledov .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3253f03e42c6SGabriel Craciunescu { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3254f03e42c6SGabriel Craciunescu .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3255f03e42c6SGabriel Craciunescu NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 32565611ec2bSKai-Heng Feng { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 32575611ec2bSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 325802ca079cSKai-Heng Feng { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 325902ca079cSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 326089919929SChaitanya Kulkarni { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 326189919929SChaitanya Kulkarni .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3262dc22c1c0SZoltán Böszörményi { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3263dc22c1c0SZoltán Böszörményi .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3264538e4a8cSThorsten Leemhuis { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3265538e4a8cSThorsten Leemhuis .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 32664bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 32674bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 32684bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 32694bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 32704bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 32714bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 32724bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 32734bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 32744bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 32754bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 32764bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 32774bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 327898f7b86aSAndy Shevchenko { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 327998f7b86aSAndy Shevchenko .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3280124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 328166341331SBenjamin Herrenschmidt { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 328266341331SBenjamin Herrenschmidt .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3283d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_128_BYTES_SQES | 3284d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_SHARED_TAGS }, 32850b85f59dSAndy Shevchenko 32860b85f59dSAndy Shevchenko { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 328757dacad5SJay Sternberg { 0, } 328857dacad5SJay Sternberg }; 328957dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 329057dacad5SJay Sternberg 329157dacad5SJay Sternberg static struct pci_driver nvme_driver = { 329257dacad5SJay Sternberg .name = "nvme", 329357dacad5SJay Sternberg .id_table = nvme_id_table, 329457dacad5SJay Sternberg .probe = nvme_probe, 329557dacad5SJay Sternberg .remove = nvme_remove, 329657dacad5SJay Sternberg .shutdown = nvme_shutdown, 3297d916b1beSKeith Busch #ifdef CONFIG_PM_SLEEP 329857dacad5SJay Sternberg .driver = { 329957dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 330057dacad5SJay Sternberg }, 3301d916b1beSKeith Busch #endif 330274d986abSAlexander Duyck .sriov_configure = pci_sriov_configure_simple, 330357dacad5SJay Sternberg .err_handler = &nvme_err_handler, 330457dacad5SJay Sternberg }; 330557dacad5SJay Sternberg 330657dacad5SJay Sternberg static int __init nvme_init(void) 330757dacad5SJay Sternberg { 330881101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 330981101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 331081101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3311612b7286SMing Lei BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 331217c33167SKeith Busch 33139a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 331457dacad5SJay Sternberg } 331557dacad5SJay Sternberg 331657dacad5SJay Sternberg static void __exit nvme_exit(void) 331757dacad5SJay Sternberg { 331857dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 331903e0f3a6SMing Lei flush_workqueue(nvme_wq); 332057dacad5SJay Sternberg } 332157dacad5SJay Sternberg 332257dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 332357dacad5SJay Sternberg MODULE_LICENSE("GPL"); 332457dacad5SJay Sternberg MODULE_VERSION("1.0"); 332557dacad5SJay Sternberg module_init(nvme_init); 332657dacad5SJay Sternberg module_exit(nvme_exit); 3327