15f37396dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * NVM Express device driver 457dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 557dacad5SJay Sternberg */ 657dacad5SJay Sternberg 7df4f9bc4SDavid E. Box #include <linux/acpi.h> 8a0a3408eSKeith Busch #include <linux/aer.h> 918119775SKeith Busch #include <linux/async.h> 1057dacad5SJay Sternberg #include <linux/blkdev.h> 1157dacad5SJay Sternberg #include <linux/blk-mq.h> 12dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 13ff5350a8SAndy Lutomirski #include <linux/dmi.h> 1457dacad5SJay Sternberg #include <linux/init.h> 1557dacad5SJay Sternberg #include <linux/interrupt.h> 1657dacad5SJay Sternberg #include <linux/io.h> 1757dacad5SJay Sternberg #include <linux/mm.h> 1857dacad5SJay Sternberg #include <linux/module.h> 1977bf25eaSKeith Busch #include <linux/mutex.h> 20d0877473SKeith Busch #include <linux/once.h> 2157dacad5SJay Sternberg #include <linux/pci.h> 22d916b1beSKeith Busch #include <linux/suspend.h> 2357dacad5SJay Sternberg #include <linux/t10-pi.h> 2457dacad5SJay Sternberg #include <linux/types.h> 259cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 2620d3bb92SKlaus Jensen #include <linux/io-64-nonatomic-hi-lo.h> 27a98e58e5SScott Bauer #include <linux/sed-opal.h> 280f238ff5SLogan Gunthorpe #include <linux/pci-p2pdma.h> 2957dacad5SJay Sternberg 30604c01d5Syupeng #include "trace.h" 3157dacad5SJay Sternberg #include "nvme.h" 3257dacad5SJay Sternberg 33c1e0cc7eSBenjamin Herrenschmidt #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 348a1d09a6SBenjamin Herrenschmidt #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 3557dacad5SJay Sternberg 36a7a7cbe3SChaitanya Kulkarni #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 37adf68f21SChristoph Hellwig 38943e942eSJens Axboe /* 39943e942eSJens Axboe * These can be higher, but we need to ensure that any command doesn't 40943e942eSJens Axboe * require an sg allocation that needs more than a page of data. 41943e942eSJens Axboe */ 42943e942eSJens Axboe #define NVME_MAX_KB_SZ 4096 43943e942eSJens Axboe #define NVME_MAX_SEGS 127 44943e942eSJens Axboe 4557dacad5SJay Sternberg static int use_threaded_interrupts; 4657dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 4757dacad5SJay Sternberg 4857dacad5SJay Sternberg static bool use_cmb_sqes = true; 4969f4eb9fSKeith Busch module_param(use_cmb_sqes, bool, 0444); 5057dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5157dacad5SJay Sternberg 5287ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5387ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5487ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 5587ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 5657dacad5SJay Sternberg 57a7a7cbe3SChaitanya Kulkarni static unsigned int sgl_threshold = SZ_32K; 58a7a7cbe3SChaitanya Kulkarni module_param(sgl_threshold, uint, 0644); 59a7a7cbe3SChaitanya Kulkarni MODULE_PARM_DESC(sgl_threshold, 60a7a7cbe3SChaitanya Kulkarni "Use SGLs when average request segment size is larger or equal to " 61a7a7cbe3SChaitanya Kulkarni "this size. Use 0 to disable SGLs."); 62a7a7cbe3SChaitanya Kulkarni 6327453b45SSagi Grimberg #define NVME_PCI_MIN_QUEUE_SIZE 2 6427453b45SSagi Grimberg #define NVME_PCI_MAX_QUEUE_SIZE 4095 65b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 66b27c1e68Sweiping zhang static const struct kernel_param_ops io_queue_depth_ops = { 67b27c1e68Sweiping zhang .set = io_queue_depth_set, 6861f3b896SChaitanya Kulkarni .get = param_get_uint, 69b27c1e68Sweiping zhang }; 70b27c1e68Sweiping zhang 7161f3b896SChaitanya Kulkarni static unsigned int io_queue_depth = 1024; 72b27c1e68Sweiping zhang module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 7327453b45SSagi Grimberg MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2 and < 4096"); 74b27c1e68Sweiping zhang 759c9e76d5SWeiping Zhang static int io_queue_count_set(const char *val, const struct kernel_param *kp) 769c9e76d5SWeiping Zhang { 779c9e76d5SWeiping Zhang unsigned int n; 789c9e76d5SWeiping Zhang int ret; 799c9e76d5SWeiping Zhang 809c9e76d5SWeiping Zhang ret = kstrtouint(val, 10, &n); 819c9e76d5SWeiping Zhang if (ret != 0 || n > num_possible_cpus()) 829c9e76d5SWeiping Zhang return -EINVAL; 839c9e76d5SWeiping Zhang return param_set_uint(val, kp); 849c9e76d5SWeiping Zhang } 859c9e76d5SWeiping Zhang 869c9e76d5SWeiping Zhang static const struct kernel_param_ops io_queue_count_ops = { 879c9e76d5SWeiping Zhang .set = io_queue_count_set, 889c9e76d5SWeiping Zhang .get = param_get_uint, 899c9e76d5SWeiping Zhang }; 909c9e76d5SWeiping Zhang 913f68baf7SKeith Busch static unsigned int write_queues; 929c9e76d5SWeiping Zhang module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 933b6592f7SJens Axboe MODULE_PARM_DESC(write_queues, 943b6592f7SJens Axboe "Number of queues to use for writes. If not set, reads and writes " 953b6592f7SJens Axboe "will share a queue set."); 963b6592f7SJens Axboe 973f68baf7SKeith Busch static unsigned int poll_queues; 989c9e76d5SWeiping Zhang module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 994b04cc6aSJens Axboe MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 1004b04cc6aSJens Axboe 101df4f9bc4SDavid E. Box static bool noacpi; 102df4f9bc4SDavid E. Box module_param(noacpi, bool, 0444); 103df4f9bc4SDavid E. Box MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 104df4f9bc4SDavid E. Box 1051c63dc66SChristoph Hellwig struct nvme_dev; 1061c63dc66SChristoph Hellwig struct nvme_queue; 10757dacad5SJay Sternberg 108a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 1098fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); 11057dacad5SJay Sternberg 11157dacad5SJay Sternberg /* 1121c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 1131c63dc66SChristoph Hellwig */ 1141c63dc66SChristoph Hellwig struct nvme_dev { 115147b27e4SSagi Grimberg struct nvme_queue *queues; 1161c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1171c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1181c63dc66SChristoph Hellwig u32 __iomem *dbs; 1191c63dc66SChristoph Hellwig struct device *dev; 1201c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1211c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1221c63dc66SChristoph Hellwig unsigned online_queues; 1231c63dc66SChristoph Hellwig unsigned max_qid; 124e20ba6e1SChristoph Hellwig unsigned io_queues[HCTX_MAX_TYPES]; 12522b55601SKeith Busch unsigned int num_vecs; 1267442ddceSJohn Garry u32 q_depth; 127c1e0cc7eSBenjamin Herrenschmidt int io_sqes; 1281c63dc66SChristoph Hellwig u32 db_stride; 1291c63dc66SChristoph Hellwig void __iomem *bar; 13097f6ef64SXu Yu unsigned long bar_mapped_size; 1315c8809e6SChristoph Hellwig struct work_struct remove_work; 13277bf25eaSKeith Busch struct mutex shutdown_lock; 1331c63dc66SChristoph Hellwig bool subsystem; 1341c63dc66SChristoph Hellwig u64 cmb_size; 1350f238ff5SLogan Gunthorpe bool cmb_use_sqes; 1361c63dc66SChristoph Hellwig u32 cmbsz; 137202021c1SStephen Bates u32 cmbloc; 1381c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 139d916b1beSKeith Busch u32 last_ps; 14087ad72a5SChristoph Hellwig 141943e942eSJens Axboe mempool_t *iod_mempool; 142943e942eSJens Axboe 14387ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 144f9f38e33SHelen Koike u32 *dbbuf_dbs; 145f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 146f9f38e33SHelen Koike u32 *dbbuf_eis; 147f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 14887ad72a5SChristoph Hellwig 14987ad72a5SChristoph Hellwig /* host memory buffer support: */ 15087ad72a5SChristoph Hellwig u64 host_mem_size; 15187ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 1524033f35dSChristoph Hellwig dma_addr_t host_mem_descs_dma; 15387ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 15487ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 1552a5bcfddSWeiping Zhang unsigned int nr_allocated_queues; 1562a5bcfddSWeiping Zhang unsigned int nr_write_queues; 1572a5bcfddSWeiping Zhang unsigned int nr_poll_queues; 1580521905eSKeith Busch 1590521905eSKeith Busch bool attrs_added; 16057dacad5SJay Sternberg }; 16157dacad5SJay Sternberg 162b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 163b27c1e68Sweiping zhang { 16427453b45SSagi Grimberg return param_set_uint_minmax(val, kp, NVME_PCI_MIN_QUEUE_SIZE, 16527453b45SSagi Grimberg NVME_PCI_MAX_QUEUE_SIZE); 166b27c1e68Sweiping zhang } 167b27c1e68Sweiping zhang 168f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 169f9f38e33SHelen Koike { 170f9f38e33SHelen Koike return qid * 2 * stride; 171f9f38e33SHelen Koike } 172f9f38e33SHelen Koike 173f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 174f9f38e33SHelen Koike { 175f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 176f9f38e33SHelen Koike } 177f9f38e33SHelen Koike 1781c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1791c63dc66SChristoph Hellwig { 1801c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1811c63dc66SChristoph Hellwig } 1821c63dc66SChristoph Hellwig 18357dacad5SJay Sternberg /* 18457dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 18557dacad5SJay Sternberg * commands and one for I/O commands). 18657dacad5SJay Sternberg */ 18757dacad5SJay Sternberg struct nvme_queue { 18857dacad5SJay Sternberg struct nvme_dev *dev; 1891ab0cd69SJens Axboe spinlock_t sq_lock; 190c1e0cc7eSBenjamin Herrenschmidt void *sq_cmds; 1913a7afd8eSChristoph Hellwig /* only used for poll queues: */ 1923a7afd8eSChristoph Hellwig spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 19374943d45SKeith Busch struct nvme_completion *cqes; 19457dacad5SJay Sternberg dma_addr_t sq_dma_addr; 19557dacad5SJay Sternberg dma_addr_t cq_dma_addr; 19657dacad5SJay Sternberg u32 __iomem *q_db; 1977442ddceSJohn Garry u32 q_depth; 1987c349ddeSKeith Busch u16 cq_vector; 19957dacad5SJay Sternberg u16 sq_tail; 20038210800SKeith Busch u16 last_sq_tail; 20157dacad5SJay Sternberg u16 cq_head; 20257dacad5SJay Sternberg u16 qid; 20357dacad5SJay Sternberg u8 cq_phase; 204c1e0cc7eSBenjamin Herrenschmidt u8 sqes; 2054e224106SChristoph Hellwig unsigned long flags; 2064e224106SChristoph Hellwig #define NVMEQ_ENABLED 0 20763223078SChristoph Hellwig #define NVMEQ_SQ_CMB 1 208d1ed6aa1SChristoph Hellwig #define NVMEQ_DELETE_ERROR 2 2097c349ddeSKeith Busch #define NVMEQ_POLLED 3 210f9f38e33SHelen Koike u32 *dbbuf_sq_db; 211f9f38e33SHelen Koike u32 *dbbuf_cq_db; 212f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 213f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 214d1ed6aa1SChristoph Hellwig struct completion delete_done; 21557dacad5SJay Sternberg }; 21657dacad5SJay Sternberg 21757dacad5SJay Sternberg /* 2189b048119SChristoph Hellwig * The nvme_iod describes the data in an I/O. 2199b048119SChristoph Hellwig * 2209b048119SChristoph Hellwig * The sg pointer contains the list of PRP/SGL chunk allocations in addition 2219b048119SChristoph Hellwig * to the actual struct scatterlist. 22271bd150cSChristoph Hellwig */ 22371bd150cSChristoph Hellwig struct nvme_iod { 224d49187e9SChristoph Hellwig struct nvme_request req; 225af7fae85SKeith Busch struct nvme_command cmd; 226f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq; 227a7a7cbe3SChaitanya Kulkarni bool use_sgl; 228f4800d6dSChristoph Hellwig int aborted; 22971bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 23071bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 23171bd150cSChristoph Hellwig dma_addr_t first_dma; 232dff824b2SChristoph Hellwig unsigned int dma_len; /* length of single DMA segment mapping */ 233783b94bdSChristoph Hellwig dma_addr_t meta_dma; 234f4800d6dSChristoph Hellwig struct scatterlist *sg; 23557dacad5SJay Sternberg }; 23657dacad5SJay Sternberg 2372a5bcfddSWeiping Zhang static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 2383b6592f7SJens Axboe { 2392a5bcfddSWeiping Zhang return dev->nr_allocated_queues * 8 * dev->db_stride; 240f9f38e33SHelen Koike } 241f9f38e33SHelen Koike 242f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 243f9f38e33SHelen Koike { 2442a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 245f9f38e33SHelen Koike 246f9f38e33SHelen Koike if (dev->dbbuf_dbs) 247f9f38e33SHelen Koike return 0; 248f9f38e33SHelen Koike 249f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 250f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 251f9f38e33SHelen Koike GFP_KERNEL); 252f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 253f9f38e33SHelen Koike return -ENOMEM; 254f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 255f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 256f9f38e33SHelen Koike GFP_KERNEL); 257f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 258f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 259f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 260f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 261f9f38e33SHelen Koike return -ENOMEM; 262f9f38e33SHelen Koike } 263f9f38e33SHelen Koike 264f9f38e33SHelen Koike return 0; 265f9f38e33SHelen Koike } 266f9f38e33SHelen Koike 267f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 268f9f38e33SHelen Koike { 2692a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 270f9f38e33SHelen Koike 271f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 272f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 273f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 274f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 275f9f38e33SHelen Koike } 276f9f38e33SHelen Koike if (dev->dbbuf_eis) { 277f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 278f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 279f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 280f9f38e33SHelen Koike } 281f9f38e33SHelen Koike } 282f9f38e33SHelen Koike 283f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 284f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 285f9f38e33SHelen Koike { 286f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 287f9f38e33SHelen Koike return; 288f9f38e33SHelen Koike 289f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 290f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 291f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 292f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 293f9f38e33SHelen Koike } 294f9f38e33SHelen Koike 2950f0d2c87SMinwoo Im static void nvme_dbbuf_free(struct nvme_queue *nvmeq) 2960f0d2c87SMinwoo Im { 2970f0d2c87SMinwoo Im if (!nvmeq->qid) 2980f0d2c87SMinwoo Im return; 2990f0d2c87SMinwoo Im 3000f0d2c87SMinwoo Im nvmeq->dbbuf_sq_db = NULL; 3010f0d2c87SMinwoo Im nvmeq->dbbuf_cq_db = NULL; 3020f0d2c87SMinwoo Im nvmeq->dbbuf_sq_ei = NULL; 3030f0d2c87SMinwoo Im nvmeq->dbbuf_cq_ei = NULL; 3040f0d2c87SMinwoo Im } 3050f0d2c87SMinwoo Im 306f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 307f9f38e33SHelen Koike { 308f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 3090f0d2c87SMinwoo Im unsigned int i; 310f9f38e33SHelen Koike 311f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 312f9f38e33SHelen Koike return; 313f9f38e33SHelen Koike 314f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 315f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 316f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 317f9f38e33SHelen Koike 318f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 3199bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 320f9f38e33SHelen Koike /* Free memory and continue on */ 321f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 3220f0d2c87SMinwoo Im 3230f0d2c87SMinwoo Im for (i = 1; i <= dev->online_queues; i++) 3240f0d2c87SMinwoo Im nvme_dbbuf_free(&dev->queues[i]); 325f9f38e33SHelen Koike } 326f9f38e33SHelen Koike } 327f9f38e33SHelen Koike 328f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 329f9f38e33SHelen Koike { 330f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 331f9f38e33SHelen Koike } 332f9f38e33SHelen Koike 333f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 334f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 335f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 336f9f38e33SHelen Koike { 337f9f38e33SHelen Koike if (dbbuf_db) { 338f9f38e33SHelen Koike u16 old_value; 339f9f38e33SHelen Koike 340f9f38e33SHelen Koike /* 341f9f38e33SHelen Koike * Ensure that the queue is written before updating 342f9f38e33SHelen Koike * the doorbell in memory 343f9f38e33SHelen Koike */ 344f9f38e33SHelen Koike wmb(); 345f9f38e33SHelen Koike 346f9f38e33SHelen Koike old_value = *dbbuf_db; 347f9f38e33SHelen Koike *dbbuf_db = value; 348f9f38e33SHelen Koike 349f1ed3df2SMichal Wnukowski /* 350f1ed3df2SMichal Wnukowski * Ensure that the doorbell is updated before reading the event 351f1ed3df2SMichal Wnukowski * index from memory. The controller needs to provide similar 352f1ed3df2SMichal Wnukowski * ordering to ensure the envent index is updated before reading 353f1ed3df2SMichal Wnukowski * the doorbell. 354f1ed3df2SMichal Wnukowski */ 355f1ed3df2SMichal Wnukowski mb(); 356f1ed3df2SMichal Wnukowski 357f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 358f9f38e33SHelen Koike return false; 359f9f38e33SHelen Koike } 360f9f38e33SHelen Koike 361f9f38e33SHelen Koike return true; 36257dacad5SJay Sternberg } 36357dacad5SJay Sternberg 36457dacad5SJay Sternberg /* 36557dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 36657dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 36757dacad5SJay Sternberg * the I/O. 36857dacad5SJay Sternberg */ 369b13c6393SChaitanya Kulkarni static int nvme_pci_npages_prp(void) 37057dacad5SJay Sternberg { 371b13c6393SChaitanya Kulkarni unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, 3726c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 37357dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 37457dacad5SJay Sternberg } 37557dacad5SJay Sternberg 376a7a7cbe3SChaitanya Kulkarni /* 377a7a7cbe3SChaitanya Kulkarni * Calculates the number of pages needed for the SGL segments. For example a 4k 378a7a7cbe3SChaitanya Kulkarni * page can accommodate 256 SGL descriptors. 379a7a7cbe3SChaitanya Kulkarni */ 380b13c6393SChaitanya Kulkarni static int nvme_pci_npages_sgl(void) 381f4800d6dSChristoph Hellwig { 382b13c6393SChaitanya Kulkarni return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), 383b13c6393SChaitanya Kulkarni PAGE_SIZE); 384f4800d6dSChristoph Hellwig } 385f4800d6dSChristoph Hellwig 386b13c6393SChaitanya Kulkarni static size_t nvme_pci_iod_alloc_size(void) 38757dacad5SJay Sternberg { 388b13c6393SChaitanya Kulkarni size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); 389a7a7cbe3SChaitanya Kulkarni 390b13c6393SChaitanya Kulkarni return sizeof(__le64 *) * npages + 391b13c6393SChaitanya Kulkarni sizeof(struct scatterlist) * NVME_MAX_SEGS; 392a7a7cbe3SChaitanya Kulkarni } 393a7a7cbe3SChaitanya Kulkarni 39457dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 39557dacad5SJay Sternberg unsigned int hctx_idx) 39657dacad5SJay Sternberg { 39757dacad5SJay Sternberg struct nvme_dev *dev = data; 398147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 39957dacad5SJay Sternberg 40057dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 40157dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 40257dacad5SJay Sternberg 40357dacad5SJay Sternberg hctx->driver_data = nvmeq; 40457dacad5SJay Sternberg return 0; 40557dacad5SJay Sternberg } 40657dacad5SJay Sternberg 40757dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 40857dacad5SJay Sternberg unsigned int hctx_idx) 40957dacad5SJay Sternberg { 41057dacad5SJay Sternberg struct nvme_dev *dev = data; 411147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 41257dacad5SJay Sternberg 41357dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 41457dacad5SJay Sternberg hctx->driver_data = nvmeq; 41557dacad5SJay Sternberg return 0; 41657dacad5SJay Sternberg } 41757dacad5SJay Sternberg 418d6296d39SChristoph Hellwig static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 419d6296d39SChristoph Hellwig unsigned int hctx_idx, unsigned int numa_node) 42057dacad5SJay Sternberg { 421d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 422f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 4230350815aSChristoph Hellwig int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; 424147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[queue_idx]; 42557dacad5SJay Sternberg 42657dacad5SJay Sternberg BUG_ON(!nvmeq); 427f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 42859e29ce6SSagi Grimberg 42959e29ce6SSagi Grimberg nvme_req(req)->ctrl = &dev->ctrl; 430f4b9e6c9SKeith Busch nvme_req(req)->cmd = &iod->cmd; 43157dacad5SJay Sternberg return 0; 43257dacad5SJay Sternberg } 43357dacad5SJay Sternberg 4343b6592f7SJens Axboe static int queue_irq_offset(struct nvme_dev *dev) 4353b6592f7SJens Axboe { 4363b6592f7SJens Axboe /* if we have more than 1 vec, admin queue offsets us by 1 */ 4373b6592f7SJens Axboe if (dev->num_vecs > 1) 4383b6592f7SJens Axboe return 1; 4393b6592f7SJens Axboe 4403b6592f7SJens Axboe return 0; 4413b6592f7SJens Axboe } 4423b6592f7SJens Axboe 443dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 444dca51e78SChristoph Hellwig { 445dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 4463b6592f7SJens Axboe int i, qoff, offset; 447dca51e78SChristoph Hellwig 4483b6592f7SJens Axboe offset = queue_irq_offset(dev); 4493b6592f7SJens Axboe for (i = 0, qoff = 0; i < set->nr_maps; i++) { 4503b6592f7SJens Axboe struct blk_mq_queue_map *map = &set->map[i]; 4513b6592f7SJens Axboe 4523b6592f7SJens Axboe map->nr_queues = dev->io_queues[i]; 4533b6592f7SJens Axboe if (!map->nr_queues) { 454e20ba6e1SChristoph Hellwig BUG_ON(i == HCTX_TYPE_DEFAULT); 4557e849dd9SChristoph Hellwig continue; 4563b6592f7SJens Axboe } 4573b6592f7SJens Axboe 4584b04cc6aSJens Axboe /* 4594b04cc6aSJens Axboe * The poll queue(s) doesn't have an IRQ (and hence IRQ 4604b04cc6aSJens Axboe * affinity), so use the regular blk-mq cpu mapping 4614b04cc6aSJens Axboe */ 4623b6592f7SJens Axboe map->queue_offset = qoff; 463cb9e0e50SKeith Busch if (i != HCTX_TYPE_POLL && offset) 4643b6592f7SJens Axboe blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 4654b04cc6aSJens Axboe else 4664b04cc6aSJens Axboe blk_mq_map_queues(map); 4673b6592f7SJens Axboe qoff += map->nr_queues; 4683b6592f7SJens Axboe offset += map->nr_queues; 4693b6592f7SJens Axboe } 4703b6592f7SJens Axboe 4713b6592f7SJens Axboe return 0; 472dca51e78SChristoph Hellwig } 473dca51e78SChristoph Hellwig 47438210800SKeith Busch /* 47538210800SKeith Busch * Write sq tail if we are asked to, or if the next command would wrap. 47638210800SKeith Busch */ 47738210800SKeith Busch static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 47804f3eafdSJens Axboe { 47938210800SKeith Busch if (!write_sq) { 48038210800SKeith Busch u16 next_tail = nvmeq->sq_tail + 1; 48138210800SKeith Busch 48238210800SKeith Busch if (next_tail == nvmeq->q_depth) 48338210800SKeith Busch next_tail = 0; 48438210800SKeith Busch if (next_tail != nvmeq->last_sq_tail) 48538210800SKeith Busch return; 48638210800SKeith Busch } 48738210800SKeith Busch 48804f3eafdSJens Axboe if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 48904f3eafdSJens Axboe nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 49004f3eafdSJens Axboe writel(nvmeq->sq_tail, nvmeq->q_db); 49138210800SKeith Busch nvmeq->last_sq_tail = nvmeq->sq_tail; 49204f3eafdSJens Axboe } 49304f3eafdSJens Axboe 49457dacad5SJay Sternberg /** 49590ea5ca4SChristoph Hellwig * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 49657dacad5SJay Sternberg * @nvmeq: The queue to use 49757dacad5SJay Sternberg * @cmd: The command to send 49804f3eafdSJens Axboe * @write_sq: whether to write to the SQ doorbell 49957dacad5SJay Sternberg */ 50004f3eafdSJens Axboe static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 50104f3eafdSJens Axboe bool write_sq) 50257dacad5SJay Sternberg { 50390ea5ca4SChristoph Hellwig spin_lock(&nvmeq->sq_lock); 504c1e0cc7eSBenjamin Herrenschmidt memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 505c1e0cc7eSBenjamin Herrenschmidt cmd, sizeof(*cmd)); 50690ea5ca4SChristoph Hellwig if (++nvmeq->sq_tail == nvmeq->q_depth) 50790ea5ca4SChristoph Hellwig nvmeq->sq_tail = 0; 50838210800SKeith Busch nvme_write_sq_db(nvmeq, write_sq); 50904f3eafdSJens Axboe spin_unlock(&nvmeq->sq_lock); 51004f3eafdSJens Axboe } 51104f3eafdSJens Axboe 51204f3eafdSJens Axboe static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 51304f3eafdSJens Axboe { 51404f3eafdSJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 51504f3eafdSJens Axboe 51604f3eafdSJens Axboe spin_lock(&nvmeq->sq_lock); 51738210800SKeith Busch if (nvmeq->sq_tail != nvmeq->last_sq_tail) 51838210800SKeith Busch nvme_write_sq_db(nvmeq, true); 51990ea5ca4SChristoph Hellwig spin_unlock(&nvmeq->sq_lock); 52057dacad5SJay Sternberg } 52157dacad5SJay Sternberg 522a7a7cbe3SChaitanya Kulkarni static void **nvme_pci_iod_list(struct request *req) 52357dacad5SJay Sternberg { 524f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 525a7a7cbe3SChaitanya Kulkarni return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 52657dacad5SJay Sternberg } 52757dacad5SJay Sternberg 528955b1b5aSMinwoo Im static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 529955b1b5aSMinwoo Im { 530955b1b5aSMinwoo Im struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 53120469a37SKeith Busch int nseg = blk_rq_nr_phys_segments(req); 532955b1b5aSMinwoo Im unsigned int avg_seg_size; 533955b1b5aSMinwoo Im 53420469a37SKeith Busch avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 535955b1b5aSMinwoo Im 536253a0b76SChaitanya Kulkarni if (!nvme_ctrl_sgl_supported(&dev->ctrl)) 537955b1b5aSMinwoo Im return false; 538955b1b5aSMinwoo Im if (!iod->nvmeq->qid) 539955b1b5aSMinwoo Im return false; 540955b1b5aSMinwoo Im if (!sgl_threshold || avg_seg_size < sgl_threshold) 541955b1b5aSMinwoo Im return false; 542955b1b5aSMinwoo Im return true; 543955b1b5aSMinwoo Im } 544955b1b5aSMinwoo Im 5459275c206SChristoph Hellwig static void nvme_free_prps(struct nvme_dev *dev, struct request *req) 54657dacad5SJay Sternberg { 5476c3c05b0SChaitanya Kulkarni const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 5489275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5499275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 55057dacad5SJay Sternberg int i; 55157dacad5SJay Sternberg 5529275c206SChristoph Hellwig for (i = 0; i < iod->npages; i++) { 5539275c206SChristoph Hellwig __le64 *prp_list = nvme_pci_iod_list(req)[i]; 5549275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); 5559275c206SChristoph Hellwig 5569275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); 5579275c206SChristoph Hellwig dma_addr = next_dma_addr; 558dff824b2SChristoph Hellwig } 5599275c206SChristoph Hellwig } 5609275c206SChristoph Hellwig 5619275c206SChristoph Hellwig static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) 5629275c206SChristoph Hellwig { 5639275c206SChristoph Hellwig const int last_sg = SGES_PER_PAGE - 1; 5649275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5659275c206SChristoph Hellwig dma_addr_t dma_addr = iod->first_dma; 5669275c206SChristoph Hellwig int i; 5679275c206SChristoph Hellwig 5689275c206SChristoph Hellwig for (i = 0; i < iod->npages; i++) { 5699275c206SChristoph Hellwig struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; 5709275c206SChristoph Hellwig dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); 5719275c206SChristoph Hellwig 5729275c206SChristoph Hellwig dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); 5739275c206SChristoph Hellwig dma_addr = next_dma_addr; 5749275c206SChristoph Hellwig } 5759275c206SChristoph Hellwig } 5769275c206SChristoph Hellwig 5779275c206SChristoph Hellwig static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) 5789275c206SChristoph Hellwig { 5799275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 580dff824b2SChristoph Hellwig 5817f73eac3SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(iod->sg))) 5827f73eac3SLogan Gunthorpe pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, 5837f73eac3SLogan Gunthorpe rq_dma_dir(req)); 5847f73eac3SLogan Gunthorpe else 585dff824b2SChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); 5869275c206SChristoph Hellwig } 5877fe07d14SChristoph Hellwig 5889275c206SChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 5899275c206SChristoph Hellwig { 5909275c206SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5917fe07d14SChristoph Hellwig 5929275c206SChristoph Hellwig if (iod->dma_len) { 5939275c206SChristoph Hellwig dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, 5949275c206SChristoph Hellwig rq_dma_dir(req)); 5959275c206SChristoph Hellwig return; 5969275c206SChristoph Hellwig } 5979275c206SChristoph Hellwig 5989275c206SChristoph Hellwig WARN_ON_ONCE(!iod->nents); 5999275c206SChristoph Hellwig 6009275c206SChristoph Hellwig nvme_unmap_sg(dev, req); 60157dacad5SJay Sternberg if (iod->npages == 0) 602a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 6039275c206SChristoph Hellwig iod->first_dma); 6049275c206SChristoph Hellwig else if (iod->use_sgl) 6059275c206SChristoph Hellwig nvme_free_sgls(dev, req); 6069275c206SChristoph Hellwig else 6079275c206SChristoph Hellwig nvme_free_prps(dev, req); 608943e942eSJens Axboe mempool_free(iod->sg, dev->iod_mempool); 60957dacad5SJay Sternberg } 61057dacad5SJay Sternberg 611d0877473SKeith Busch static void nvme_print_sgl(struct scatterlist *sgl, int nents) 612d0877473SKeith Busch { 613d0877473SKeith Busch int i; 614d0877473SKeith Busch struct scatterlist *sg; 615d0877473SKeith Busch 616d0877473SKeith Busch for_each_sg(sgl, sg, nents, i) { 617d0877473SKeith Busch dma_addr_t phys = sg_phys(sg); 618d0877473SKeith Busch pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 619d0877473SKeith Busch "dma_address:%pad dma_length:%d\n", 620d0877473SKeith Busch i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 621d0877473SKeith Busch sg_dma_len(sg)); 622d0877473SKeith Busch } 623d0877473SKeith Busch } 624d0877473SKeith Busch 625a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 626a7a7cbe3SChaitanya Kulkarni struct request *req, struct nvme_rw_command *cmnd) 62757dacad5SJay Sternberg { 628f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 62957dacad5SJay Sternberg struct dma_pool *pool; 630b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 63157dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 63257dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 63357dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 6346c3c05b0SChaitanya Kulkarni int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 63557dacad5SJay Sternberg __le64 *prp_list; 636a7a7cbe3SChaitanya Kulkarni void **list = nvme_pci_iod_list(req); 63757dacad5SJay Sternberg dma_addr_t prp_dma; 63857dacad5SJay Sternberg int nprps, i; 63957dacad5SJay Sternberg 6406c3c05b0SChaitanya Kulkarni length -= (NVME_CTRL_PAGE_SIZE - offset); 6415228b328SJan H. Schönherr if (length <= 0) { 6425228b328SJan H. Schönherr iod->first_dma = 0; 643a7a7cbe3SChaitanya Kulkarni goto done; 6445228b328SJan H. Schönherr } 64557dacad5SJay Sternberg 6466c3c05b0SChaitanya Kulkarni dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 64757dacad5SJay Sternberg if (dma_len) { 6486c3c05b0SChaitanya Kulkarni dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 64957dacad5SJay Sternberg } else { 65057dacad5SJay Sternberg sg = sg_next(sg); 65157dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 65257dacad5SJay Sternberg dma_len = sg_dma_len(sg); 65357dacad5SJay Sternberg } 65457dacad5SJay Sternberg 6556c3c05b0SChaitanya Kulkarni if (length <= NVME_CTRL_PAGE_SIZE) { 65657dacad5SJay Sternberg iod->first_dma = dma_addr; 657a7a7cbe3SChaitanya Kulkarni goto done; 65857dacad5SJay Sternberg } 65957dacad5SJay Sternberg 6606c3c05b0SChaitanya Kulkarni nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 66157dacad5SJay Sternberg if (nprps <= (256 / 8)) { 66257dacad5SJay Sternberg pool = dev->prp_small_pool; 66357dacad5SJay Sternberg iod->npages = 0; 66457dacad5SJay Sternberg } else { 66557dacad5SJay Sternberg pool = dev->prp_page_pool; 66657dacad5SJay Sternberg iod->npages = 1; 66757dacad5SJay Sternberg } 66857dacad5SJay Sternberg 66969d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 67057dacad5SJay Sternberg if (!prp_list) { 67157dacad5SJay Sternberg iod->first_dma = dma_addr; 67257dacad5SJay Sternberg iod->npages = -1; 67386eea289SKeith Busch return BLK_STS_RESOURCE; 67457dacad5SJay Sternberg } 67557dacad5SJay Sternberg list[0] = prp_list; 67657dacad5SJay Sternberg iod->first_dma = prp_dma; 67757dacad5SJay Sternberg i = 0; 67857dacad5SJay Sternberg for (;;) { 6796c3c05b0SChaitanya Kulkarni if (i == NVME_CTRL_PAGE_SIZE >> 3) { 68057dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 68169d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 68257dacad5SJay Sternberg if (!prp_list) 683fa073216SChristoph Hellwig goto free_prps; 68457dacad5SJay Sternberg list[iod->npages++] = prp_list; 68557dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 68657dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 68757dacad5SJay Sternberg i = 1; 68857dacad5SJay Sternberg } 68957dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 6906c3c05b0SChaitanya Kulkarni dma_len -= NVME_CTRL_PAGE_SIZE; 6916c3c05b0SChaitanya Kulkarni dma_addr += NVME_CTRL_PAGE_SIZE; 6926c3c05b0SChaitanya Kulkarni length -= NVME_CTRL_PAGE_SIZE; 69357dacad5SJay Sternberg if (length <= 0) 69457dacad5SJay Sternberg break; 69557dacad5SJay Sternberg if (dma_len > 0) 69657dacad5SJay Sternberg continue; 69786eea289SKeith Busch if (unlikely(dma_len < 0)) 69886eea289SKeith Busch goto bad_sgl; 69957dacad5SJay Sternberg sg = sg_next(sg); 70057dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 70157dacad5SJay Sternberg dma_len = sg_dma_len(sg); 70257dacad5SJay Sternberg } 703a7a7cbe3SChaitanya Kulkarni done: 704a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 705a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 70686eea289SKeith Busch return BLK_STS_OK; 707fa073216SChristoph Hellwig free_prps: 708fa073216SChristoph Hellwig nvme_free_prps(dev, req); 709fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 71086eea289SKeith Busch bad_sgl: 711d0877473SKeith Busch WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), 712d0877473SKeith Busch "Invalid SGL for payload:%d nents:%d\n", 713d0877473SKeith Busch blk_rq_payload_bytes(req), iod->nents); 71486eea289SKeith Busch return BLK_STS_IOERR; 71557dacad5SJay Sternberg } 71657dacad5SJay Sternberg 717a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 718a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg) 719a7a7cbe3SChaitanya Kulkarni { 720a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(sg_dma_address(sg)); 721a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(sg_dma_len(sg)); 722a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_DATA_DESC << 4; 723a7a7cbe3SChaitanya Kulkarni } 724a7a7cbe3SChaitanya Kulkarni 725a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 726a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr, int entries) 727a7a7cbe3SChaitanya Kulkarni { 728a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(dma_addr); 729a7a7cbe3SChaitanya Kulkarni if (entries < SGES_PER_PAGE) { 730a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(entries * sizeof(*sge)); 731a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 732a7a7cbe3SChaitanya Kulkarni } else { 733a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(PAGE_SIZE); 734a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_SEG_DESC << 4; 735a7a7cbe3SChaitanya Kulkarni } 736a7a7cbe3SChaitanya Kulkarni } 737a7a7cbe3SChaitanya Kulkarni 738a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 739b0f2853bSChristoph Hellwig struct request *req, struct nvme_rw_command *cmd, int entries) 740a7a7cbe3SChaitanya Kulkarni { 741a7a7cbe3SChaitanya Kulkarni struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 742a7a7cbe3SChaitanya Kulkarni struct dma_pool *pool; 743a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list; 744a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg = iod->sg; 745a7a7cbe3SChaitanya Kulkarni dma_addr_t sgl_dma; 746b0f2853bSChristoph Hellwig int i = 0; 747a7a7cbe3SChaitanya Kulkarni 748a7a7cbe3SChaitanya Kulkarni /* setting the transfer type as SGL */ 749a7a7cbe3SChaitanya Kulkarni cmd->flags = NVME_CMD_SGL_METABUF; 750a7a7cbe3SChaitanya Kulkarni 751b0f2853bSChristoph Hellwig if (entries == 1) { 752a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 753a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 754a7a7cbe3SChaitanya Kulkarni } 755a7a7cbe3SChaitanya Kulkarni 756a7a7cbe3SChaitanya Kulkarni if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 757a7a7cbe3SChaitanya Kulkarni pool = dev->prp_small_pool; 758a7a7cbe3SChaitanya Kulkarni iod->npages = 0; 759a7a7cbe3SChaitanya Kulkarni } else { 760a7a7cbe3SChaitanya Kulkarni pool = dev->prp_page_pool; 761a7a7cbe3SChaitanya Kulkarni iod->npages = 1; 762a7a7cbe3SChaitanya Kulkarni } 763a7a7cbe3SChaitanya Kulkarni 764a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 765a7a7cbe3SChaitanya Kulkarni if (!sg_list) { 766a7a7cbe3SChaitanya Kulkarni iod->npages = -1; 767a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 768a7a7cbe3SChaitanya Kulkarni } 769a7a7cbe3SChaitanya Kulkarni 770a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[0] = sg_list; 771a7a7cbe3SChaitanya Kulkarni iod->first_dma = sgl_dma; 772a7a7cbe3SChaitanya Kulkarni 773a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 774a7a7cbe3SChaitanya Kulkarni 775a7a7cbe3SChaitanya Kulkarni do { 776a7a7cbe3SChaitanya Kulkarni if (i == SGES_PER_PAGE) { 777a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *old_sg_desc = sg_list; 778a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 779a7a7cbe3SChaitanya Kulkarni 780a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 781a7a7cbe3SChaitanya Kulkarni if (!sg_list) 782fa073216SChristoph Hellwig goto free_sgls; 783a7a7cbe3SChaitanya Kulkarni 784a7a7cbe3SChaitanya Kulkarni i = 0; 785a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[iod->npages++] = sg_list; 786a7a7cbe3SChaitanya Kulkarni sg_list[i++] = *link; 787a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(link, sgl_dma, entries); 788a7a7cbe3SChaitanya Kulkarni } 789a7a7cbe3SChaitanya Kulkarni 790a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&sg_list[i++], sg); 791a7a7cbe3SChaitanya Kulkarni sg = sg_next(sg); 792b0f2853bSChristoph Hellwig } while (--entries > 0); 793a7a7cbe3SChaitanya Kulkarni 794a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 795fa073216SChristoph Hellwig free_sgls: 796fa073216SChristoph Hellwig nvme_free_sgls(dev, req); 797fa073216SChristoph Hellwig return BLK_STS_RESOURCE; 798a7a7cbe3SChaitanya Kulkarni } 799a7a7cbe3SChaitanya Kulkarni 800dff824b2SChristoph Hellwig static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 801dff824b2SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 802dff824b2SChristoph Hellwig struct bio_vec *bv) 803dff824b2SChristoph Hellwig { 804dff824b2SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8056c3c05b0SChaitanya Kulkarni unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 8066c3c05b0SChaitanya Kulkarni unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 807dff824b2SChristoph Hellwig 808dff824b2SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 809dff824b2SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 810dff824b2SChristoph Hellwig return BLK_STS_RESOURCE; 811dff824b2SChristoph Hellwig iod->dma_len = bv->bv_len; 812dff824b2SChristoph Hellwig 813dff824b2SChristoph Hellwig cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 814dff824b2SChristoph Hellwig if (bv->bv_len > first_prp_len) 815dff824b2SChristoph Hellwig cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 816359c1f88SBaolin Wang return BLK_STS_OK; 817dff824b2SChristoph Hellwig } 818dff824b2SChristoph Hellwig 81929791057SChristoph Hellwig static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 82029791057SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 82129791057SChristoph Hellwig struct bio_vec *bv) 82229791057SChristoph Hellwig { 82329791057SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 82429791057SChristoph Hellwig 82529791057SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 82629791057SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 82729791057SChristoph Hellwig return BLK_STS_RESOURCE; 82829791057SChristoph Hellwig iod->dma_len = bv->bv_len; 82929791057SChristoph Hellwig 830049bf372SKlaus Birkelund Jensen cmnd->flags = NVME_CMD_SGL_METABUF; 83129791057SChristoph Hellwig cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 83229791057SChristoph Hellwig cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 83329791057SChristoph Hellwig cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 834359c1f88SBaolin Wang return BLK_STS_OK; 83529791057SChristoph Hellwig } 83629791057SChristoph Hellwig 837fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 838b131c61dSChristoph Hellwig struct nvme_command *cmnd) 83957dacad5SJay Sternberg { 840f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 84170479b71SChristoph Hellwig blk_status_t ret = BLK_STS_RESOURCE; 842b0f2853bSChristoph Hellwig int nr_mapped; 84357dacad5SJay Sternberg 844dff824b2SChristoph Hellwig if (blk_rq_nr_phys_segments(req) == 1) { 845dff824b2SChristoph Hellwig struct bio_vec bv = req_bvec(req); 846dff824b2SChristoph Hellwig 847dff824b2SChristoph Hellwig if (!is_pci_p2pdma_page(bv.bv_page)) { 8486c3c05b0SChaitanya Kulkarni if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 849dff824b2SChristoph Hellwig return nvme_setup_prp_simple(dev, req, 850dff824b2SChristoph Hellwig &cmnd->rw, &bv); 85129791057SChristoph Hellwig 852e51183beSNiklas Cassel if (iod->nvmeq->qid && sgl_threshold && 853253a0b76SChaitanya Kulkarni nvme_ctrl_sgl_supported(&dev->ctrl)) 85429791057SChristoph Hellwig return nvme_setup_sgl_simple(dev, req, 85529791057SChristoph Hellwig &cmnd->rw, &bv); 856dff824b2SChristoph Hellwig } 857dff824b2SChristoph Hellwig } 858dff824b2SChristoph Hellwig 859dff824b2SChristoph Hellwig iod->dma_len = 0; 8609b048119SChristoph Hellwig iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 8619b048119SChristoph Hellwig if (!iod->sg) 8629b048119SChristoph Hellwig return BLK_STS_RESOURCE; 863f9d03f96SChristoph Hellwig sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 86470479b71SChristoph Hellwig iod->nents = blk_rq_map_sg(req->q, req, iod->sg); 865ba1ca37eSChristoph Hellwig if (!iod->nents) 866fa073216SChristoph Hellwig goto out_free_sg; 867ba1ca37eSChristoph Hellwig 868e0596ab2SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(iod->sg))) 8692b9f4bb2SLogan Gunthorpe nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, 8702b9f4bb2SLogan Gunthorpe iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); 871e0596ab2SLogan Gunthorpe else 872e0596ab2SLogan Gunthorpe nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, 87370479b71SChristoph Hellwig rq_dma_dir(req), DMA_ATTR_NO_WARN); 874b0f2853bSChristoph Hellwig if (!nr_mapped) 875fa073216SChristoph Hellwig goto out_free_sg; 876ba1ca37eSChristoph Hellwig 87770479b71SChristoph Hellwig iod->use_sgl = nvme_pci_use_sgls(dev, req); 878955b1b5aSMinwoo Im if (iod->use_sgl) 879b0f2853bSChristoph Hellwig ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); 880a7a7cbe3SChaitanya Kulkarni else 881a7a7cbe3SChaitanya Kulkarni ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 8824aedb705SChristoph Hellwig if (ret != BLK_STS_OK) 883fa073216SChristoph Hellwig goto out_unmap_sg; 884fa073216SChristoph Hellwig return BLK_STS_OK; 885fa073216SChristoph Hellwig 886fa073216SChristoph Hellwig out_unmap_sg: 887fa073216SChristoph Hellwig nvme_unmap_sg(dev, req); 888fa073216SChristoph Hellwig out_free_sg: 889fa073216SChristoph Hellwig mempool_free(iod->sg, dev->iod_mempool); 890ba1ca37eSChristoph Hellwig return ret; 89157dacad5SJay Sternberg } 89257dacad5SJay Sternberg 8934aedb705SChristoph Hellwig static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, 8944aedb705SChristoph Hellwig struct nvme_command *cmnd) 8954aedb705SChristoph Hellwig { 8964aedb705SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8974aedb705SChristoph Hellwig 8984aedb705SChristoph Hellwig iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), 8994aedb705SChristoph Hellwig rq_dma_dir(req), 0); 9004aedb705SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->meta_dma)) 9014aedb705SChristoph Hellwig return BLK_STS_IOERR; 9024aedb705SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 903359c1f88SBaolin Wang return BLK_STS_OK; 9044aedb705SChristoph Hellwig } 9054aedb705SChristoph Hellwig 90657dacad5SJay Sternberg /* 90757dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 90857dacad5SJay Sternberg */ 909fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 91057dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 91157dacad5SJay Sternberg { 91257dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 91357dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 91457dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 91557dacad5SJay Sternberg struct request *req = bd->rq; 9169b048119SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 917af7fae85SKeith Busch struct nvme_command *cmnd = &iod->cmd; 918ebe6d874SChristoph Hellwig blk_status_t ret; 91957dacad5SJay Sternberg 9209b048119SChristoph Hellwig iod->aborted = 0; 9219b048119SChristoph Hellwig iod->npages = -1; 9229b048119SChristoph Hellwig iod->nents = 0; 9239b048119SChristoph Hellwig 924d1f06f4aSJens Axboe /* 925d1f06f4aSJens Axboe * We should not need to do this, but we're still using this to 926d1f06f4aSJens Axboe * ensure we can drain requests on a dying queue. 927d1f06f4aSJens Axboe */ 9284e224106SChristoph Hellwig if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 929d1f06f4aSJens Axboe return BLK_STS_IOERR; 930d1f06f4aSJens Axboe 931d4060d2bSTao Chiu if (!nvme_check_ready(&dev->ctrl, req, true)) 932d4060d2bSTao Chiu return nvme_fail_nonready_command(&dev->ctrl, req); 933d4060d2bSTao Chiu 934f4b9e6c9SKeith Busch ret = nvme_setup_cmd(ns, req); 935fc17b653SChristoph Hellwig if (ret) 936f4800d6dSChristoph Hellwig return ret; 93757dacad5SJay Sternberg 938fc17b653SChristoph Hellwig if (blk_rq_nr_phys_segments(req)) { 939af7fae85SKeith Busch ret = nvme_map_data(dev, req, cmnd); 940fc17b653SChristoph Hellwig if (ret) 9419b048119SChristoph Hellwig goto out_free_cmd; 942fc17b653SChristoph Hellwig } 943ba1ca37eSChristoph Hellwig 9444aedb705SChristoph Hellwig if (blk_integrity_rq(req)) { 945af7fae85SKeith Busch ret = nvme_map_metadata(dev, req, cmnd); 9464aedb705SChristoph Hellwig if (ret) 9474aedb705SChristoph Hellwig goto out_unmap_data; 9484aedb705SChristoph Hellwig } 9494aedb705SChristoph Hellwig 950aae239e1SChristoph Hellwig blk_mq_start_request(req); 951af7fae85SKeith Busch nvme_submit_cmd(nvmeq, cmnd, bd->last); 952fc17b653SChristoph Hellwig return BLK_STS_OK; 9534aedb705SChristoph Hellwig out_unmap_data: 9544aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 955f9d03f96SChristoph Hellwig out_free_cmd: 956f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 957ba1ca37eSChristoph Hellwig return ret; 95857dacad5SJay Sternberg } 95957dacad5SJay Sternberg 96077f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req) 961eee417b0SChristoph Hellwig { 962f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 9634aedb705SChristoph Hellwig struct nvme_dev *dev = iod->nvmeq->dev; 964eee417b0SChristoph Hellwig 9654aedb705SChristoph Hellwig if (blk_integrity_rq(req)) 9664aedb705SChristoph Hellwig dma_unmap_page(dev->dev, iod->meta_dma, 9674aedb705SChristoph Hellwig rq_integrity_vec(req)->bv_len, rq_data_dir(req)); 968b15c592dSChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 9694aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 97077f02a7aSChristoph Hellwig nvme_complete_rq(req); 97157dacad5SJay Sternberg } 97257dacad5SJay Sternberg 973d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 974750dde44SChristoph Hellwig static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 975d783e0bdSMarta Rybczynska { 97674943d45SKeith Busch struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 97774943d45SKeith Busch 97874943d45SKeith Busch return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 979d783e0bdSMarta Rybczynska } 980d783e0bdSMarta Rybczynska 981eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 98257dacad5SJay Sternberg { 983eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 98457dacad5SJay Sternberg 985eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 986eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 987eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 988eb281c82SSagi Grimberg } 989adf68f21SChristoph Hellwig 990cfa27356SChristoph Hellwig static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 991cfa27356SChristoph Hellwig { 992cfa27356SChristoph Hellwig if (!nvmeq->qid) 993cfa27356SChristoph Hellwig return nvmeq->dev->admin_tagset.tags[0]; 994cfa27356SChristoph Hellwig return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 995cfa27356SChristoph Hellwig } 996cfa27356SChristoph Hellwig 9975cb525c8SJens Axboe static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 99857dacad5SJay Sternberg { 99974943d45SKeith Busch struct nvme_completion *cqe = &nvmeq->cqes[idx]; 100062df8016SLalithambika Krishnakumar __u16 command_id = READ_ONCE(cqe->command_id); 100157dacad5SJay Sternberg struct request *req; 1002adf68f21SChristoph Hellwig 1003adf68f21SChristoph Hellwig /* 1004adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 1005adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 1006adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 1007adf68f21SChristoph Hellwig * for them but rather special case them here. 1008adf68f21SChristoph Hellwig */ 100962df8016SLalithambika Krishnakumar if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) { 10107bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 101183a12fb7SSagi Grimberg cqe->status, &cqe->result); 1012a0fa9647SJens Axboe return; 101357dacad5SJay Sternberg } 101457dacad5SJay Sternberg 1015e7006de6SSagi Grimberg req = nvme_find_rq(nvme_queue_tagset(nvmeq), command_id); 101650b7c243SXianting Tian if (unlikely(!req)) { 101750b7c243SXianting Tian dev_warn(nvmeq->dev->ctrl.device, 101850b7c243SXianting Tian "invalid id %d completed on queue %d\n", 101962df8016SLalithambika Krishnakumar command_id, le16_to_cpu(cqe->sq_id)); 102050b7c243SXianting Tian return; 102150b7c243SXianting Tian } 102250b7c243SXianting Tian 1023604c01d5Syupeng trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 10242eb81a33SChristoph Hellwig if (!nvme_try_complete_req(req, cqe->status, cqe->result)) 1025ff029451SChristoph Hellwig nvme_pci_complete_rq(req); 102683a12fb7SSagi Grimberg } 102757dacad5SJay Sternberg 10285cb525c8SJens Axboe static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 10295cb525c8SJens Axboe { 1030a0aac973SJK Kim u32 tmp = nvmeq->cq_head + 1; 1031a8de6639SAlexey Dobriyan 1032a8de6639SAlexey Dobriyan if (tmp == nvmeq->q_depth) { 1033920d13a8SSagi Grimberg nvmeq->cq_head = 0; 1034e2a366a4SAlexey Dobriyan nvmeq->cq_phase ^= 1; 1035a8de6639SAlexey Dobriyan } else { 1036a8de6639SAlexey Dobriyan nvmeq->cq_head = tmp; 1037920d13a8SSagi Grimberg } 1038a0fa9647SJens Axboe } 1039a0fa9647SJens Axboe 1040324b494cSKeith Busch static inline int nvme_process_cq(struct nvme_queue *nvmeq) 1041a0fa9647SJens Axboe { 10421052b8acSJens Axboe int found = 0; 104383a12fb7SSagi Grimberg 10441052b8acSJens Axboe while (nvme_cqe_pending(nvmeq)) { 10451052b8acSJens Axboe found++; 1046b69e2ef2SKeith Busch /* 1047b69e2ef2SKeith Busch * load-load control dependency between phase and the rest of 1048b69e2ef2SKeith Busch * the cqe requires a full read memory barrier 1049b69e2ef2SKeith Busch */ 1050b69e2ef2SKeith Busch dma_rmb(); 1051324b494cSKeith Busch nvme_handle_cqe(nvmeq, nvmeq->cq_head); 10525cb525c8SJens Axboe nvme_update_cq_head(nvmeq); 105357dacad5SJay Sternberg } 105457dacad5SJay Sternberg 1055324b494cSKeith Busch if (found) 1056eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 10575cb525c8SJens Axboe return found; 105857dacad5SJay Sternberg } 105957dacad5SJay Sternberg 106057dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 106157dacad5SJay Sternberg { 106257dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 10635cb525c8SJens Axboe 1064324b494cSKeith Busch if (nvme_process_cq(nvmeq)) 106505fae499SChaitanya Kulkarni return IRQ_HANDLED; 106605fae499SChaitanya Kulkarni return IRQ_NONE; 106757dacad5SJay Sternberg } 106857dacad5SJay Sternberg 106957dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 107057dacad5SJay Sternberg { 107157dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 10724e523547SBaolin Wang 1073750dde44SChristoph Hellwig if (nvme_cqe_pending(nvmeq)) 107457dacad5SJay Sternberg return IRQ_WAKE_THREAD; 1075d783e0bdSMarta Rybczynska return IRQ_NONE; 107657dacad5SJay Sternberg } 107757dacad5SJay Sternberg 10780b2a8a9fSChristoph Hellwig /* 1079fa059b85SKeith Busch * Poll for completions for any interrupt driven queue 10800b2a8a9fSChristoph Hellwig * Can be called from any context. 10810b2a8a9fSChristoph Hellwig */ 1082fa059b85SKeith Busch static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1083a0fa9647SJens Axboe { 10843a7afd8eSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1085a0fa9647SJens Axboe 1086fa059b85SKeith Busch WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1087fa059b85SKeith Busch 10883a7afd8eSChristoph Hellwig disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1089fa059b85SKeith Busch nvme_process_cq(nvmeq); 10903a7afd8eSChristoph Hellwig enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 109191a509f8SChristoph Hellwig } 1092442e19b7SSagi Grimberg 10939743139cSJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx) 10947776db1cSKeith Busch { 10957776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 1096dabcefabSJens Axboe bool found; 1097dabcefabSJens Axboe 1098dabcefabSJens Axboe if (!nvme_cqe_pending(nvmeq)) 1099dabcefabSJens Axboe return 0; 1100dabcefabSJens Axboe 11013a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 1102324b494cSKeith Busch found = nvme_process_cq(nvmeq); 11033a7afd8eSChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 1104dabcefabSJens Axboe 1105dabcefabSJens Axboe return found; 1106dabcefabSJens Axboe } 1107dabcefabSJens Axboe 1108ad22c355SKeith Busch static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 110957dacad5SJay Sternberg { 1110f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 1111147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 1112f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 111357dacad5SJay Sternberg 111457dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 1115ad22c355SKeith Busch c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 111604f3eafdSJens Axboe nvme_submit_cmd(nvmeq, &c, true); 111757dacad5SJay Sternberg } 111857dacad5SJay Sternberg 111957dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 112057dacad5SJay Sternberg { 1121f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 112257dacad5SJay Sternberg 112357dacad5SJay Sternberg c.delete_queue.opcode = opcode; 112457dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 112557dacad5SJay Sternberg 11261c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 112757dacad5SJay Sternberg } 112857dacad5SJay Sternberg 112957dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1130a8e3e0bbSJianchao Wang struct nvme_queue *nvmeq, s16 vector) 113157dacad5SJay Sternberg { 1132f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 11334b04cc6aSJens Axboe int flags = NVME_QUEUE_PHYS_CONTIG; 11344b04cc6aSJens Axboe 11357c349ddeSKeith Busch if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 11364b04cc6aSJens Axboe flags |= NVME_CQ_IRQ_ENABLED; 113757dacad5SJay Sternberg 113857dacad5SJay Sternberg /* 113916772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 114057dacad5SJay Sternberg * is attached to the request. 114157dacad5SJay Sternberg */ 114257dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 114357dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 114457dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 114557dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 114657dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 1147a8e3e0bbSJianchao Wang c.create_cq.irq_vector = cpu_to_le16(vector); 114857dacad5SJay Sternberg 11491c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 115057dacad5SJay Sternberg } 115157dacad5SJay Sternberg 115257dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 115357dacad5SJay Sternberg struct nvme_queue *nvmeq) 115457dacad5SJay Sternberg { 11559abd68efSJens Axboe struct nvme_ctrl *ctrl = &dev->ctrl; 1156f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 115781c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 115857dacad5SJay Sternberg 115957dacad5SJay Sternberg /* 11609abd68efSJens Axboe * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 11619abd68efSJens Axboe * set. Since URGENT priority is zeroes, it makes all queues 11629abd68efSJens Axboe * URGENT. 11639abd68efSJens Axboe */ 11649abd68efSJens Axboe if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 11659abd68efSJens Axboe flags |= NVME_SQ_PRIO_MEDIUM; 11669abd68efSJens Axboe 11679abd68efSJens Axboe /* 116816772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 116957dacad5SJay Sternberg * is attached to the request. 117057dacad5SJay Sternberg */ 117157dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 117257dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 117357dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 117457dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 117557dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 117657dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 117757dacad5SJay Sternberg 11781c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 117957dacad5SJay Sternberg } 118057dacad5SJay Sternberg 118157dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 118257dacad5SJay Sternberg { 118357dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 118457dacad5SJay Sternberg } 118557dacad5SJay Sternberg 118657dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 118757dacad5SJay Sternberg { 118857dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 118957dacad5SJay Sternberg } 119057dacad5SJay Sternberg 11912a842acaSChristoph Hellwig static void abort_endio(struct request *req, blk_status_t error) 119257dacad5SJay Sternberg { 1193f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1194f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 119557dacad5SJay Sternberg 119627fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 119727fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 1198e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1199e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 120057dacad5SJay Sternberg } 120157dacad5SJay Sternberg 1202b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1203b2a0eb1aSKeith Busch { 1204b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 1205b2a0eb1aSKeith Busch * NVMe Subsystem reset. 1206b2a0eb1aSKeith Busch */ 1207b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1208b2a0eb1aSKeith Busch 1209ad70062cSJianchao Wang /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1210ad70062cSJianchao Wang switch (dev->ctrl.state) { 1211ad70062cSJianchao Wang case NVME_CTRL_RESETTING: 1212ad6a0a52SMax Gurtovoy case NVME_CTRL_CONNECTING: 1213b2a0eb1aSKeith Busch return false; 1214ad70062cSJianchao Wang default: 1215ad70062cSJianchao Wang break; 1216ad70062cSJianchao Wang } 1217b2a0eb1aSKeith Busch 1218b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 1219b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 1220b2a0eb1aSKeith Busch */ 1221b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 1222b2a0eb1aSKeith Busch return false; 1223b2a0eb1aSKeith Busch 1224b2a0eb1aSKeith Busch return true; 1225b2a0eb1aSKeith Busch } 1226b2a0eb1aSKeith Busch 1227b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1228b2a0eb1aSKeith Busch { 1229b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 1230b2a0eb1aSKeith Busch u16 pci_status; 1231b2a0eb1aSKeith Busch int result; 1232b2a0eb1aSKeith Busch 1233b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1234b2a0eb1aSKeith Busch &pci_status); 1235b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 1236b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1237b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1238b2a0eb1aSKeith Busch csts, pci_status); 1239b2a0eb1aSKeith Busch else 1240b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1241b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1242b2a0eb1aSKeith Busch csts, result); 1243b2a0eb1aSKeith Busch } 1244b2a0eb1aSKeith Busch 124531c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 124657dacad5SJay Sternberg { 1247f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1248f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 124957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 125057dacad5SJay Sternberg struct request *abort_req; 1251f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 1252b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1253b2a0eb1aSKeith Busch 1254651438bbSWen Xiong /* If PCI error recovery process is happening, we cannot reset or 1255651438bbSWen Xiong * the recovery mechanism will surely fail. 1256651438bbSWen Xiong */ 1257651438bbSWen Xiong mb(); 1258651438bbSWen Xiong if (pci_channel_offline(to_pci_dev(dev->dev))) 1259651438bbSWen Xiong return BLK_EH_RESET_TIMER; 1260651438bbSWen Xiong 1261b2a0eb1aSKeith Busch /* 1262b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1263b2a0eb1aSKeith Busch */ 1264b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1265b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1266b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1267d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1268db8c48e4SChristoph Hellwig return BLK_EH_DONE; 1269b2a0eb1aSKeith Busch } 127057dacad5SJay Sternberg 127131c7c7d2SChristoph Hellwig /* 12727776db1cSKeith Busch * Did we miss an interrupt? 12737776db1cSKeith Busch */ 1274fa059b85SKeith Busch if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1275fa059b85SKeith Busch nvme_poll(req->mq_hctx); 1276fa059b85SKeith Busch else 1277bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 1278fa059b85SKeith Busch 1279bf392a5dSKeith Busch if (blk_mq_request_completed(req)) { 12807776db1cSKeith Busch dev_warn(dev->ctrl.device, 12817776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 12827776db1cSKeith Busch req->tag, nvmeq->qid); 1283db8c48e4SChristoph Hellwig return BLK_EH_DONE; 12847776db1cSKeith Busch } 12857776db1cSKeith Busch 12867776db1cSKeith Busch /* 1287fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1288fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1289fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1290db8c48e4SChristoph Hellwig * shutdown, so we return BLK_EH_DONE. 1291fd634f41SChristoph Hellwig */ 12924244140dSKeith Busch switch (dev->ctrl.state) { 12934244140dSKeith Busch case NVME_CTRL_CONNECTING: 12942036f726SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1295df561f66SGustavo A. R. Silva fallthrough; 12962036f726SKeith Busch case NVME_CTRL_DELETING: 1297b9cac43cSKeith Busch dev_warn_ratelimited(dev->ctrl.device, 1298fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1299fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 130027fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 13017ad92f65STong Zhang nvme_dev_disable(dev, true); 1302db8c48e4SChristoph Hellwig return BLK_EH_DONE; 130339a9dd81SKeith Busch case NVME_CTRL_RESETTING: 130439a9dd81SKeith Busch return BLK_EH_RESET_TIMER; 13054244140dSKeith Busch default: 13064244140dSKeith Busch break; 1307fd634f41SChristoph Hellwig } 1308fd634f41SChristoph Hellwig 1309fd634f41SChristoph Hellwig /* 1310e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1311e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1312e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 131331c7c7d2SChristoph Hellwig */ 1314f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 13151b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 131657dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 131757dacad5SJay Sternberg req->tag, nvmeq->qid); 13187ad92f65STong Zhang nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1319a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1320d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1321e1569a16SKeith Busch 1322db8c48e4SChristoph Hellwig return BLK_EH_DONE; 132357dacad5SJay Sternberg } 132457dacad5SJay Sternberg 1325e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1326e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1327e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1328e7a2a87dSChristoph Hellwig } 13297bf7d778SKeith Busch iod->aborted = 1; 133057dacad5SJay Sternberg 133157dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 133257dacad5SJay Sternberg cmd.abort.cid = req->tag; 133357dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 133457dacad5SJay Sternberg 13351b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 13361b3c47c1SSagi Grimberg "I/O %d QID %d timeout, aborting\n", 133757dacad5SJay Sternberg req->tag, nvmeq->qid); 1338e7a2a87dSChristoph Hellwig 1339e7a2a87dSChristoph Hellwig abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 134039dfe844SChaitanya Kulkarni BLK_MQ_REQ_NOWAIT); 13416bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 13426bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 134331c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 134457dacad5SJay Sternberg } 134557dacad5SJay Sternberg 1346e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 13478eeed0b5SGuoqing Jiang blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio); 134857dacad5SJay Sternberg 134957dacad5SJay Sternberg /* 135057dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 135157dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 135257dacad5SJay Sternberg * as the device then is in a faulty state. 135357dacad5SJay Sternberg */ 135457dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 135557dacad5SJay Sternberg } 135657dacad5SJay Sternberg 135757dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 135857dacad5SJay Sternberg { 13598a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 136057dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 136163223078SChristoph Hellwig if (!nvmeq->sq_cmds) 136263223078SChristoph Hellwig return; 13630f238ff5SLogan Gunthorpe 136463223078SChristoph Hellwig if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 136588a041f4SKeith Busch pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 13668a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 136763223078SChristoph Hellwig } else { 13688a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 136963223078SChristoph Hellwig nvmeq->sq_cmds, nvmeq->sq_dma_addr); 13700f238ff5SLogan Gunthorpe } 137157dacad5SJay Sternberg } 137257dacad5SJay Sternberg 137357dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 137457dacad5SJay Sternberg { 137557dacad5SJay Sternberg int i; 137657dacad5SJay Sternberg 1377d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1378d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 1379147b27e4SSagi Grimberg nvme_free_queue(&dev->queues[i]); 138057dacad5SJay Sternberg } 138157dacad5SJay Sternberg } 138257dacad5SJay Sternberg 138357dacad5SJay Sternberg /** 138457dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 138540581d1aSBart Van Assche * @nvmeq: queue to suspend 138657dacad5SJay Sternberg */ 138757dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 138857dacad5SJay Sternberg { 13894e224106SChristoph Hellwig if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 139057dacad5SJay Sternberg return 1; 139157dacad5SJay Sternberg 13924e224106SChristoph Hellwig /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1393d1f06f4aSJens Axboe mb(); 139457dacad5SJay Sternberg 13954e224106SChristoph Hellwig nvmeq->dev->online_queues--; 13961c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1397c81545f9SSagi Grimberg blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); 13987c349ddeSKeith Busch if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 13994e224106SChristoph Hellwig pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); 140057dacad5SJay Sternberg return 0; 140157dacad5SJay Sternberg } 140257dacad5SJay Sternberg 14038fae268bSKeith Busch static void nvme_suspend_io_queues(struct nvme_dev *dev) 14048fae268bSKeith Busch { 14058fae268bSKeith Busch int i; 14068fae268bSKeith Busch 14078fae268bSKeith Busch for (i = dev->ctrl.queue_count - 1; i > 0; i--) 14088fae268bSKeith Busch nvme_suspend_queue(&dev->queues[i]); 14098fae268bSKeith Busch } 14108fae268bSKeith Busch 1411a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 141257dacad5SJay Sternberg { 1413147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 141457dacad5SJay Sternberg 1415a5cdb68cSKeith Busch if (shutdown) 1416a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1417a5cdb68cSKeith Busch else 1418b5b05048SSagi Grimberg nvme_disable_ctrl(&dev->ctrl); 141957dacad5SJay Sternberg 1420bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 142157dacad5SJay Sternberg } 142257dacad5SJay Sternberg 1423fa46c6fbSKeith Busch /* 1424fa46c6fbSKeith Busch * Called only on a device that has been disabled and after all other threads 14259210c075SDongli Zhang * that can check this device's completion queues have synced, except 14269210c075SDongli Zhang * nvme_poll(). This is the last chance for the driver to see a natural 14279210c075SDongli Zhang * completion before nvme_cancel_request() terminates all incomplete requests. 1428fa46c6fbSKeith Busch */ 1429fa46c6fbSKeith Busch static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1430fa46c6fbSKeith Busch { 1431fa46c6fbSKeith Busch int i; 1432fa46c6fbSKeith Busch 14339210c075SDongli Zhang for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 14349210c075SDongli Zhang spin_lock(&dev->queues[i].cq_poll_lock); 1435324b494cSKeith Busch nvme_process_cq(&dev->queues[i]); 14369210c075SDongli Zhang spin_unlock(&dev->queues[i].cq_poll_lock); 14379210c075SDongli Zhang } 1438fa46c6fbSKeith Busch } 1439fa46c6fbSKeith Busch 144057dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 144157dacad5SJay Sternberg int entry_size) 144257dacad5SJay Sternberg { 144357dacad5SJay Sternberg int q_depth = dev->q_depth; 14445fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 14456c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 144657dacad5SJay Sternberg 144757dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 144857dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 14494e523547SBaolin Wang 14506c3c05b0SChaitanya Kulkarni mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 145157dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 145257dacad5SJay Sternberg 145357dacad5SJay Sternberg /* 145457dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 145557dacad5SJay Sternberg * would be better to map queues in system memory with the 145657dacad5SJay Sternberg * original depth 145757dacad5SJay Sternberg */ 145857dacad5SJay Sternberg if (q_depth < 64) 145957dacad5SJay Sternberg return -ENOMEM; 146057dacad5SJay Sternberg } 146157dacad5SJay Sternberg 146257dacad5SJay Sternberg return q_depth; 146357dacad5SJay Sternberg } 146457dacad5SJay Sternberg 146557dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 14668a1d09a6SBenjamin Herrenschmidt int qid) 146757dacad5SJay Sternberg { 14680f238ff5SLogan Gunthorpe struct pci_dev *pdev = to_pci_dev(dev->dev); 1469815c6704SKeith Busch 14700f238ff5SLogan Gunthorpe if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 14718a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1472bfac8e9fSAlan Mikhak if (nvmeq->sq_cmds) { 14730f238ff5SLogan Gunthorpe nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 14740f238ff5SLogan Gunthorpe nvmeq->sq_cmds); 147563223078SChristoph Hellwig if (nvmeq->sq_dma_addr) { 147663223078SChristoph Hellwig set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 147763223078SChristoph Hellwig return 0; 147863223078SChristoph Hellwig } 1479bfac8e9fSAlan Mikhak 14808a1d09a6SBenjamin Herrenschmidt pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1481bfac8e9fSAlan Mikhak } 14820f238ff5SLogan Gunthorpe } 14830f238ff5SLogan Gunthorpe 14848a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 148557dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 148657dacad5SJay Sternberg if (!nvmeq->sq_cmds) 148757dacad5SJay Sternberg return -ENOMEM; 148857dacad5SJay Sternberg return 0; 148957dacad5SJay Sternberg } 149057dacad5SJay Sternberg 1491a6ff7262SKeith Busch static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 149257dacad5SJay Sternberg { 1493147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[qid]; 149457dacad5SJay Sternberg 149562314e40SKeith Busch if (dev->ctrl.queue_count > qid) 149662314e40SKeith Busch return 0; 149757dacad5SJay Sternberg 1498c1e0cc7eSBenjamin Herrenschmidt nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 14998a1d09a6SBenjamin Herrenschmidt nvmeq->q_depth = depth; 15008a1d09a6SBenjamin Herrenschmidt nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 150157dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 150257dacad5SJay Sternberg if (!nvmeq->cqes) 150357dacad5SJay Sternberg goto free_nvmeq; 150457dacad5SJay Sternberg 15058a1d09a6SBenjamin Herrenschmidt if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 150657dacad5SJay Sternberg goto free_cqdma; 150757dacad5SJay Sternberg 150857dacad5SJay Sternberg nvmeq->dev = dev; 15091ab0cd69SJens Axboe spin_lock_init(&nvmeq->sq_lock); 15103a7afd8eSChristoph Hellwig spin_lock_init(&nvmeq->cq_poll_lock); 151157dacad5SJay Sternberg nvmeq->cq_head = 0; 151257dacad5SJay Sternberg nvmeq->cq_phase = 1; 151357dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 151457dacad5SJay Sternberg nvmeq->qid = qid; 1515d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 151657dacad5SJay Sternberg 1517147b27e4SSagi Grimberg return 0; 151857dacad5SJay Sternberg 151957dacad5SJay Sternberg free_cqdma: 15208a1d09a6SBenjamin Herrenschmidt dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 152157dacad5SJay Sternberg nvmeq->cq_dma_addr); 152257dacad5SJay Sternberg free_nvmeq: 1523147b27e4SSagi Grimberg return -ENOMEM; 152457dacad5SJay Sternberg } 152557dacad5SJay Sternberg 1526dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 152757dacad5SJay Sternberg { 15280ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 15290ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 15300ff199cbSChristoph Hellwig 15310ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 15320ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 15330ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15340ff199cbSChristoph Hellwig } else { 15350ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 15360ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15370ff199cbSChristoph Hellwig } 153857dacad5SJay Sternberg } 153957dacad5SJay Sternberg 154057dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 154157dacad5SJay Sternberg { 154257dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 154357dacad5SJay Sternberg 154457dacad5SJay Sternberg nvmeq->sq_tail = 0; 154538210800SKeith Busch nvmeq->last_sq_tail = 0; 154657dacad5SJay Sternberg nvmeq->cq_head = 0; 154757dacad5SJay Sternberg nvmeq->cq_phase = 1; 154857dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 15498a1d09a6SBenjamin Herrenschmidt memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1550f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 155157dacad5SJay Sternberg dev->online_queues++; 15523a7afd8eSChristoph Hellwig wmb(); /* ensure the first interrupt sees the initialization */ 155357dacad5SJay Sternberg } 155457dacad5SJay Sternberg 1555e4b9852aSCasey Chen /* 1556e4b9852aSCasey Chen * Try getting shutdown_lock while setting up IO queues. 1557e4b9852aSCasey Chen */ 1558e4b9852aSCasey Chen static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) 1559e4b9852aSCasey Chen { 1560e4b9852aSCasey Chen /* 1561e4b9852aSCasey Chen * Give up if the lock is being held by nvme_dev_disable. 1562e4b9852aSCasey Chen */ 1563e4b9852aSCasey Chen if (!mutex_trylock(&dev->shutdown_lock)) 1564e4b9852aSCasey Chen return -ENODEV; 1565e4b9852aSCasey Chen 1566e4b9852aSCasey Chen /* 1567e4b9852aSCasey Chen * Controller is in wrong state, fail early. 1568e4b9852aSCasey Chen */ 1569e4b9852aSCasey Chen if (dev->ctrl.state != NVME_CTRL_CONNECTING) { 1570e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 1571e4b9852aSCasey Chen return -ENODEV; 1572e4b9852aSCasey Chen } 1573e4b9852aSCasey Chen 1574e4b9852aSCasey Chen return 0; 1575e4b9852aSCasey Chen } 1576e4b9852aSCasey Chen 15774b04cc6aSJens Axboe static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 157857dacad5SJay Sternberg { 157957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 158057dacad5SJay Sternberg int result; 15817c349ddeSKeith Busch u16 vector = 0; 158257dacad5SJay Sternberg 1583d1ed6aa1SChristoph Hellwig clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1584d1ed6aa1SChristoph Hellwig 158522b55601SKeith Busch /* 158622b55601SKeith Busch * A queue's vector matches the queue identifier unless the controller 158722b55601SKeith Busch * has only one vector available. 158822b55601SKeith Busch */ 15894b04cc6aSJens Axboe if (!polled) 1590a8e3e0bbSJianchao Wang vector = dev->num_vecs == 1 ? 0 : qid; 15914b04cc6aSJens Axboe else 15927c349ddeSKeith Busch set_bit(NVMEQ_POLLED, &nvmeq->flags); 15934b04cc6aSJens Axboe 1594a8e3e0bbSJianchao Wang result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1595ded45505SKeith Busch if (result) 1596ded45505SKeith Busch return result; 159757dacad5SJay Sternberg 159857dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 159957dacad5SJay Sternberg if (result < 0) 1600ded45505SKeith Busch return result; 1601c80b36cdSEdmund Nadolski if (result) 160257dacad5SJay Sternberg goto release_cq; 160357dacad5SJay Sternberg 1604a8e3e0bbSJianchao Wang nvmeq->cq_vector = vector; 16054b04cc6aSJens Axboe 1606e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 1607e4b9852aSCasey Chen if (result) 1608e4b9852aSCasey Chen return result; 1609e4b9852aSCasey Chen nvme_init_queue(nvmeq, qid); 16107c349ddeSKeith Busch if (!polled) { 1611dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 161257dacad5SJay Sternberg if (result < 0) 161357dacad5SJay Sternberg goto release_sq; 16144b04cc6aSJens Axboe } 161557dacad5SJay Sternberg 16164e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 1617e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 161857dacad5SJay Sternberg return result; 161957dacad5SJay Sternberg 162057dacad5SJay Sternberg release_sq: 1621f25a2dfcSJianchao Wang dev->online_queues--; 1622e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 162357dacad5SJay Sternberg adapter_delete_sq(dev, qid); 162457dacad5SJay Sternberg release_cq: 162557dacad5SJay Sternberg adapter_delete_cq(dev, qid); 162657dacad5SJay Sternberg return result; 162757dacad5SJay Sternberg } 162857dacad5SJay Sternberg 1629f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 163057dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 163177f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 163257dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 16330350815aSChristoph Hellwig .init_request = nvme_init_request, 163457dacad5SJay Sternberg .timeout = nvme_timeout, 163557dacad5SJay Sternberg }; 163657dacad5SJay Sternberg 1637f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 1638376f7ef8SChristoph Hellwig .queue_rq = nvme_queue_rq, 1639376f7ef8SChristoph Hellwig .complete = nvme_pci_complete_rq, 1640376f7ef8SChristoph Hellwig .commit_rqs = nvme_commit_rqs, 1641376f7ef8SChristoph Hellwig .init_hctx = nvme_init_hctx, 1642376f7ef8SChristoph Hellwig .init_request = nvme_init_request, 1643376f7ef8SChristoph Hellwig .map_queues = nvme_pci_map_queues, 1644376f7ef8SChristoph Hellwig .timeout = nvme_timeout, 1645c6d962aeSChristoph Hellwig .poll = nvme_poll, 1646dabcefabSJens Axboe }; 1647dabcefabSJens Axboe 164857dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 164957dacad5SJay Sternberg { 16501c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 165169d9a99cSKeith Busch /* 165269d9a99cSKeith Busch * If the controller was reset during removal, it's possible 165369d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 165469d9a99cSKeith Busch * queue to flush these to completion. 165569d9a99cSKeith Busch */ 1656c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 16571c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 165857dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 165957dacad5SJay Sternberg } 166057dacad5SJay Sternberg } 166157dacad5SJay Sternberg 166257dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 166357dacad5SJay Sternberg { 16641c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 166557dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 166657dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 1667e3e9d50cSKeith Busch 166838dabe21SKeith Busch dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1669dc96f938SChaitanya Kulkarni dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; 1670d4ec47f1SMax Gurtovoy dev->admin_tagset.numa_node = dev->ctrl.numa_node; 1671d43f1ccfSChristoph Hellwig dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); 1672d3484991SJens Axboe dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 167357dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 167457dacad5SJay Sternberg 167557dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 167657dacad5SJay Sternberg return -ENOMEM; 167734b6c231SSagi Grimberg dev->ctrl.admin_tagset = &dev->admin_tagset; 167857dacad5SJay Sternberg 16791c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 16801c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 168157dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 168257dacad5SJay Sternberg return -ENOMEM; 168357dacad5SJay Sternberg } 16841c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 168557dacad5SJay Sternberg nvme_dev_remove_admin(dev); 16861c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 168757dacad5SJay Sternberg return -ENODEV; 168857dacad5SJay Sternberg } 168957dacad5SJay Sternberg } else 1690c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 169157dacad5SJay Sternberg 169257dacad5SJay Sternberg return 0; 169357dacad5SJay Sternberg } 169457dacad5SJay Sternberg 169597f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 169697f6ef64SXu Yu { 169797f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 169897f6ef64SXu Yu } 169997f6ef64SXu Yu 170097f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 170197f6ef64SXu Yu { 170297f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 170397f6ef64SXu Yu 170497f6ef64SXu Yu if (size <= dev->bar_mapped_size) 170597f6ef64SXu Yu return 0; 170697f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 170797f6ef64SXu Yu return -ENOMEM; 170897f6ef64SXu Yu if (dev->bar) 170997f6ef64SXu Yu iounmap(dev->bar); 171097f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 171197f6ef64SXu Yu if (!dev->bar) { 171297f6ef64SXu Yu dev->bar_mapped_size = 0; 171397f6ef64SXu Yu return -ENOMEM; 171497f6ef64SXu Yu } 171597f6ef64SXu Yu dev->bar_mapped_size = size; 171697f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 171797f6ef64SXu Yu 171897f6ef64SXu Yu return 0; 171997f6ef64SXu Yu } 172097f6ef64SXu Yu 172101ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 172257dacad5SJay Sternberg { 172357dacad5SJay Sternberg int result; 172457dacad5SJay Sternberg u32 aqa; 172557dacad5SJay Sternberg struct nvme_queue *nvmeq; 172657dacad5SJay Sternberg 172797f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 172897f6ef64SXu Yu if (result < 0) 172997f6ef64SXu Yu return result; 173097f6ef64SXu Yu 17318ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 173220d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 173357dacad5SJay Sternberg 17347a67cbeaSChristoph Hellwig if (dev->subsystem && 17357a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 17367a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 173757dacad5SJay Sternberg 1738b5b05048SSagi Grimberg result = nvme_disable_ctrl(&dev->ctrl); 173957dacad5SJay Sternberg if (result < 0) 174057dacad5SJay Sternberg return result; 174157dacad5SJay Sternberg 1742a6ff7262SKeith Busch result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1743147b27e4SSagi Grimberg if (result) 1744147b27e4SSagi Grimberg return result; 174557dacad5SJay Sternberg 1746635333e4SMax Gurtovoy dev->ctrl.numa_node = dev_to_node(dev->dev); 1747635333e4SMax Gurtovoy 1748147b27e4SSagi Grimberg nvmeq = &dev->queues[0]; 174957dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 175057dacad5SJay Sternberg aqa |= aqa << 16; 175157dacad5SJay Sternberg 17527a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 17537a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 17547a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 175557dacad5SJay Sternberg 1756c0f2f45bSSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl); 175757dacad5SJay Sternberg if (result) 1758d4875622SKeith Busch return result; 175957dacad5SJay Sternberg 176057dacad5SJay Sternberg nvmeq->cq_vector = 0; 1761161b8be2SKeith Busch nvme_init_queue(nvmeq, 0); 1762dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 176357dacad5SJay Sternberg if (result) { 17647c349ddeSKeith Busch dev->online_queues--; 1765d4875622SKeith Busch return result; 176657dacad5SJay Sternberg } 176757dacad5SJay Sternberg 17684e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 176957dacad5SJay Sternberg return result; 177057dacad5SJay Sternberg } 177157dacad5SJay Sternberg 1772749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 177357dacad5SJay Sternberg { 17744b04cc6aSJens Axboe unsigned i, max, rw_queues; 1775749941f2SChristoph Hellwig int ret = 0; 177657dacad5SJay Sternberg 1777d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1778a6ff7262SKeith Busch if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1779749941f2SChristoph Hellwig ret = -ENOMEM; 178057dacad5SJay Sternberg break; 1781749941f2SChristoph Hellwig } 1782749941f2SChristoph Hellwig } 178357dacad5SJay Sternberg 1784d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1785e20ba6e1SChristoph Hellwig if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1786e20ba6e1SChristoph Hellwig rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1787e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ]; 17884b04cc6aSJens Axboe } else { 17894b04cc6aSJens Axboe rw_queues = max; 17904b04cc6aSJens Axboe } 17914b04cc6aSJens Axboe 1792949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 17934b04cc6aSJens Axboe bool polled = i > rw_queues; 17944b04cc6aSJens Axboe 17954b04cc6aSJens Axboe ret = nvme_create_queue(&dev->queues[i], i, polled); 1796d4875622SKeith Busch if (ret) 179757dacad5SJay Sternberg break; 179857dacad5SJay Sternberg } 179957dacad5SJay Sternberg 1800749941f2SChristoph Hellwig /* 1801749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 18028adb8c14SMinwoo Im * than the desired amount of queues, and even a controller without 18038adb8c14SMinwoo Im * I/O queues can still be used to issue admin commands. This might 1804749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1805749941f2SChristoph Hellwig */ 1806749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 180757dacad5SJay Sternberg } 180857dacad5SJay Sternberg 180988de4598SChristoph Hellwig static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 181057dacad5SJay Sternberg { 181188de4598SChristoph Hellwig u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 181288de4598SChristoph Hellwig 181388de4598SChristoph Hellwig return 1ULL << (12 + 4 * szu); 181488de4598SChristoph Hellwig } 181588de4598SChristoph Hellwig 181688de4598SChristoph Hellwig static u32 nvme_cmb_size(struct nvme_dev *dev) 181788de4598SChristoph Hellwig { 181888de4598SChristoph Hellwig return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 181988de4598SChristoph Hellwig } 182088de4598SChristoph Hellwig 1821f65efd6dSChristoph Hellwig static void nvme_map_cmb(struct nvme_dev *dev) 182257dacad5SJay Sternberg { 182388de4598SChristoph Hellwig u64 size, offset; 182457dacad5SJay Sternberg resource_size_t bar_size; 182557dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 18268969f1f8SChristoph Hellwig int bar; 182757dacad5SJay Sternberg 18289fe5c59fSKeith Busch if (dev->cmb_size) 18299fe5c59fSKeith Busch return; 18309fe5c59fSKeith Busch 183120d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) 183220d3bb92SKlaus Jensen writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); 183320d3bb92SKlaus Jensen 18347a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1835f65efd6dSChristoph Hellwig if (!dev->cmbsz) 1836f65efd6dSChristoph Hellwig return; 1837202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 183857dacad5SJay Sternberg 183988de4598SChristoph Hellwig size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 184088de4598SChristoph Hellwig offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 18418969f1f8SChristoph Hellwig bar = NVME_CMB_BIR(dev->cmbloc); 18428969f1f8SChristoph Hellwig bar_size = pci_resource_len(pdev, bar); 184357dacad5SJay Sternberg 184457dacad5SJay Sternberg if (offset > bar_size) 1845f65efd6dSChristoph Hellwig return; 184657dacad5SJay Sternberg 184757dacad5SJay Sternberg /* 184820d3bb92SKlaus Jensen * Tell the controller about the host side address mapping the CMB, 184920d3bb92SKlaus Jensen * and enable CMB decoding for the NVMe 1.4+ scheme: 185020d3bb92SKlaus Jensen */ 185120d3bb92SKlaus Jensen if (NVME_CAP_CMBS(dev->ctrl.cap)) { 185220d3bb92SKlaus Jensen hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | 185320d3bb92SKlaus Jensen (pci_bus_address(pdev, bar) + offset), 185420d3bb92SKlaus Jensen dev->bar + NVME_REG_CMBMSC); 185520d3bb92SKlaus Jensen } 185620d3bb92SKlaus Jensen 185720d3bb92SKlaus Jensen /* 185857dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 185957dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 186057dacad5SJay Sternberg * the reported size of the BAR 186157dacad5SJay Sternberg */ 186257dacad5SJay Sternberg if (size > bar_size - offset) 186357dacad5SJay Sternberg size = bar_size - offset; 186457dacad5SJay Sternberg 18650f238ff5SLogan Gunthorpe if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 18660f238ff5SLogan Gunthorpe dev_warn(dev->ctrl.device, 18670f238ff5SLogan Gunthorpe "failed to register the CMB\n"); 1868f65efd6dSChristoph Hellwig return; 18690f238ff5SLogan Gunthorpe } 18700f238ff5SLogan Gunthorpe 187157dacad5SJay Sternberg dev->cmb_size = size; 18720f238ff5SLogan Gunthorpe dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 18730f238ff5SLogan Gunthorpe 18740f238ff5SLogan Gunthorpe if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 18750f238ff5SLogan Gunthorpe (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 18760f238ff5SLogan Gunthorpe pci_p2pmem_publish(pdev, true); 187757dacad5SJay Sternberg } 187857dacad5SJay Sternberg 187987ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 188057dacad5SJay Sternberg { 18816c3c05b0SChaitanya Kulkarni u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 18824033f35dSChristoph Hellwig u64 dma_addr = dev->host_mem_descs_dma; 1883f66e2804SChaitanya Kulkarni struct nvme_command c = { }; 188487ad72a5SChristoph Hellwig int ret; 188587ad72a5SChristoph Hellwig 188687ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 188787ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 188887ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 18896c3c05b0SChaitanya Kulkarni c.features.dword12 = cpu_to_le32(host_mem_size); 189087ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 189187ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 189287ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 189387ad72a5SChristoph Hellwig 189487ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 189587ad72a5SChristoph Hellwig if (ret) { 189687ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 189787ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 189887ad72a5SChristoph Hellwig ret, bits); 189987ad72a5SChristoph Hellwig } 190087ad72a5SChristoph Hellwig return ret; 190187ad72a5SChristoph Hellwig } 190287ad72a5SChristoph Hellwig 190387ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 190487ad72a5SChristoph Hellwig { 190587ad72a5SChristoph Hellwig int i; 190687ad72a5SChristoph Hellwig 190787ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 190887ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 19096c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 191087ad72a5SChristoph Hellwig 1911cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 1912cc667f6dSLiviu Dudau le64_to_cpu(desc->addr), 1913cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 191487ad72a5SChristoph Hellwig } 191587ad72a5SChristoph Hellwig 191687ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 191787ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 19184033f35dSChristoph Hellwig dma_free_coherent(dev->dev, 19194033f35dSChristoph Hellwig dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 19204033f35dSChristoph Hellwig dev->host_mem_descs, dev->host_mem_descs_dma); 192187ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 19227e5dd57eSMinwoo Im dev->nr_host_mem_descs = 0; 192387ad72a5SChristoph Hellwig } 192487ad72a5SChristoph Hellwig 192592dc6895SChristoph Hellwig static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 192692dc6895SChristoph Hellwig u32 chunk_size) 192787ad72a5SChristoph Hellwig { 192887ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 192992dc6895SChristoph Hellwig u32 max_entries, len; 19304033f35dSChristoph Hellwig dma_addr_t descs_dma; 19312ee0e4edSDan Carpenter int i = 0; 193287ad72a5SChristoph Hellwig void **bufs; 19336fbcde66SMinwoo Im u64 size, tmp; 193487ad72a5SChristoph Hellwig 193587ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 193687ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 193787ad72a5SChristoph Hellwig max_entries = tmp; 1938044a9df1SChristoph Hellwig 1939044a9df1SChristoph Hellwig if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1940044a9df1SChristoph Hellwig max_entries = dev->ctrl.hmmaxd; 1941044a9df1SChristoph Hellwig 1942750afb08SLuis Chamberlain descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 19434033f35dSChristoph Hellwig &descs_dma, GFP_KERNEL); 194487ad72a5SChristoph Hellwig if (!descs) 194587ad72a5SChristoph Hellwig goto out; 194687ad72a5SChristoph Hellwig 194787ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 194887ad72a5SChristoph Hellwig if (!bufs) 194987ad72a5SChristoph Hellwig goto out_free_descs; 195087ad72a5SChristoph Hellwig 1951244a8fe4SMinwoo Im for (size = 0; size < preferred && i < max_entries; size += len) { 195287ad72a5SChristoph Hellwig dma_addr_t dma_addr; 195387ad72a5SChristoph Hellwig 195450cdb7c6SChristoph Hellwig len = min_t(u64, chunk_size, preferred - size); 195587ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 195687ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 195787ad72a5SChristoph Hellwig if (!bufs[i]) 195887ad72a5SChristoph Hellwig break; 195987ad72a5SChristoph Hellwig 196087ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 19616c3c05b0SChaitanya Kulkarni descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 196287ad72a5SChristoph Hellwig i++; 196387ad72a5SChristoph Hellwig } 196487ad72a5SChristoph Hellwig 196592dc6895SChristoph Hellwig if (!size) 196687ad72a5SChristoph Hellwig goto out_free_bufs; 196787ad72a5SChristoph Hellwig 196887ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 196987ad72a5SChristoph Hellwig dev->host_mem_size = size; 197087ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 19714033f35dSChristoph Hellwig dev->host_mem_descs_dma = descs_dma; 197287ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 197387ad72a5SChristoph Hellwig return 0; 197487ad72a5SChristoph Hellwig 197587ad72a5SChristoph Hellwig out_free_bufs: 197687ad72a5SChristoph Hellwig while (--i >= 0) { 19776c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; 197887ad72a5SChristoph Hellwig 1979cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, bufs[i], 1980cc667f6dSLiviu Dudau le64_to_cpu(descs[i].addr), 1981cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 198287ad72a5SChristoph Hellwig } 198387ad72a5SChristoph Hellwig 198487ad72a5SChristoph Hellwig kfree(bufs); 198587ad72a5SChristoph Hellwig out_free_descs: 19864033f35dSChristoph Hellwig dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 19874033f35dSChristoph Hellwig descs_dma); 198887ad72a5SChristoph Hellwig out: 198987ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 199087ad72a5SChristoph Hellwig return -ENOMEM; 199187ad72a5SChristoph Hellwig } 199287ad72a5SChristoph Hellwig 199392dc6895SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 199492dc6895SChristoph Hellwig { 19959dc54a0dSChaitanya Kulkarni u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 19969dc54a0dSChaitanya Kulkarni u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 19979dc54a0dSChaitanya Kulkarni u64 chunk_size; 199892dc6895SChristoph Hellwig 199992dc6895SChristoph Hellwig /* start big and work our way down */ 20009dc54a0dSChaitanya Kulkarni for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 200192dc6895SChristoph Hellwig if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 200292dc6895SChristoph Hellwig if (!min || dev->host_mem_size >= min) 200392dc6895SChristoph Hellwig return 0; 200492dc6895SChristoph Hellwig nvme_free_host_mem(dev); 200592dc6895SChristoph Hellwig } 200692dc6895SChristoph Hellwig } 200792dc6895SChristoph Hellwig 200892dc6895SChristoph Hellwig return -ENOMEM; 200992dc6895SChristoph Hellwig } 201092dc6895SChristoph Hellwig 20119620cfbaSChristoph Hellwig static int nvme_setup_host_mem(struct nvme_dev *dev) 201287ad72a5SChristoph Hellwig { 201387ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 201487ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 201587ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 201687ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 20176fbcde66SMinwoo Im int ret; 201887ad72a5SChristoph Hellwig 201987ad72a5SChristoph Hellwig preferred = min(preferred, max); 202087ad72a5SChristoph Hellwig if (min > max) { 202187ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 202287ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 202387ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 202487ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20259620cfbaSChristoph Hellwig return 0; 202687ad72a5SChristoph Hellwig } 202787ad72a5SChristoph Hellwig 202887ad72a5SChristoph Hellwig /* 202987ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 203087ad72a5SChristoph Hellwig */ 203187ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 203287ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 203387ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 203487ad72a5SChristoph Hellwig else 203587ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 203687ad72a5SChristoph Hellwig } 203787ad72a5SChristoph Hellwig 203887ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 203992dc6895SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) { 204092dc6895SChristoph Hellwig dev_warn(dev->ctrl.device, 204192dc6895SChristoph Hellwig "failed to allocate host memory buffer.\n"); 20429620cfbaSChristoph Hellwig return 0; /* controller must work without HMB */ 204387ad72a5SChristoph Hellwig } 204487ad72a5SChristoph Hellwig 204592dc6895SChristoph Hellwig dev_info(dev->ctrl.device, 204692dc6895SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 204792dc6895SChristoph Hellwig dev->host_mem_size >> ilog2(SZ_1M)); 204892dc6895SChristoph Hellwig } 204992dc6895SChristoph Hellwig 20509620cfbaSChristoph Hellwig ret = nvme_set_host_mem(dev, enable_bits); 20519620cfbaSChristoph Hellwig if (ret) 205287ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20539620cfbaSChristoph Hellwig return ret; 205457dacad5SJay Sternberg } 205557dacad5SJay Sternberg 20560521905eSKeith Busch static ssize_t cmb_show(struct device *dev, struct device_attribute *attr, 20570521905eSKeith Busch char *buf) 20580521905eSKeith Busch { 20590521905eSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 20600521905eSKeith Busch 20610521905eSKeith Busch return sysfs_emit(buf, "cmbloc : x%08x\ncmbsz : x%08x\n", 20620521905eSKeith Busch ndev->cmbloc, ndev->cmbsz); 20630521905eSKeith Busch } 20640521905eSKeith Busch static DEVICE_ATTR_RO(cmb); 20650521905eSKeith Busch 20661751e97aSKeith Busch static ssize_t cmbloc_show(struct device *dev, struct device_attribute *attr, 20671751e97aSKeith Busch char *buf) 20681751e97aSKeith Busch { 20691751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 20701751e97aSKeith Busch 20711751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbloc); 20721751e97aSKeith Busch } 20731751e97aSKeith Busch static DEVICE_ATTR_RO(cmbloc); 20741751e97aSKeith Busch 20751751e97aSKeith Busch static ssize_t cmbsz_show(struct device *dev, struct device_attribute *attr, 20761751e97aSKeith Busch char *buf) 20771751e97aSKeith Busch { 20781751e97aSKeith Busch struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 20791751e97aSKeith Busch 20801751e97aSKeith Busch return sysfs_emit(buf, "%u\n", ndev->cmbsz); 20811751e97aSKeith Busch } 20821751e97aSKeith Busch static DEVICE_ATTR_RO(cmbsz); 20831751e97aSKeith Busch 20840521905eSKeith Busch static umode_t nvme_pci_attrs_are_visible(struct kobject *kobj, 20850521905eSKeith Busch struct attribute *a, int n) 20860521905eSKeith Busch { 20870521905eSKeith Busch struct nvme_ctrl *ctrl = 20880521905eSKeith Busch dev_get_drvdata(container_of(kobj, struct device, kobj)); 20890521905eSKeith Busch struct nvme_dev *dev = to_nvme_dev(ctrl); 20900521905eSKeith Busch 20911751e97aSKeith Busch if (a == &dev_attr_cmb.attr || 20921751e97aSKeith Busch a == &dev_attr_cmbloc.attr || 20931751e97aSKeith Busch a == &dev_attr_cmbsz.attr) { 20941751e97aSKeith Busch if (!dev->cmbsz) 20950521905eSKeith Busch return 0; 20961751e97aSKeith Busch } 20970521905eSKeith Busch return a->mode; 20980521905eSKeith Busch } 20990521905eSKeith Busch 21000521905eSKeith Busch static struct attribute *nvme_pci_attrs[] = { 21010521905eSKeith Busch &dev_attr_cmb.attr, 21021751e97aSKeith Busch &dev_attr_cmbloc.attr, 21031751e97aSKeith Busch &dev_attr_cmbsz.attr, 21040521905eSKeith Busch NULL, 21050521905eSKeith Busch }; 21060521905eSKeith Busch 21070521905eSKeith Busch static const struct attribute_group nvme_pci_attr_group = { 21080521905eSKeith Busch .attrs = nvme_pci_attrs, 21090521905eSKeith Busch .is_visible = nvme_pci_attrs_are_visible, 21100521905eSKeith Busch }; 21110521905eSKeith Busch 2112612b7286SMing Lei /* 2113612b7286SMing Lei * nirqs is the number of interrupts available for write and read 2114612b7286SMing Lei * queues. The core already reserved an interrupt for the admin queue. 2115612b7286SMing Lei */ 2116612b7286SMing Lei static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 21173b6592f7SJens Axboe { 2118612b7286SMing Lei struct nvme_dev *dev = affd->priv; 21192a5bcfddSWeiping Zhang unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2120c45b1fa2SMing Lei 21213b6592f7SJens Axboe /* 2122ee0d96d3SBaolin Wang * If there is no interrupt available for queues, ensure that 2123612b7286SMing Lei * the default queue is set to 1. The affinity set size is 2124612b7286SMing Lei * also set to one, but the irq core ignores it for this case. 2125612b7286SMing Lei * 2126612b7286SMing Lei * If only one interrupt is available or 'write_queue' == 0, combine 2127612b7286SMing Lei * write and read queues. 2128612b7286SMing Lei * 2129612b7286SMing Lei * If 'write_queues' > 0, ensure it leaves room for at least one read 2130612b7286SMing Lei * queue. 21313b6592f7SJens Axboe */ 2132612b7286SMing Lei if (!nrirqs) { 2133612b7286SMing Lei nrirqs = 1; 2134612b7286SMing Lei nr_read_queues = 0; 21352a5bcfddSWeiping Zhang } else if (nrirqs == 1 || !nr_write_queues) { 2136612b7286SMing Lei nr_read_queues = 0; 21372a5bcfddSWeiping Zhang } else if (nr_write_queues >= nrirqs) { 2138612b7286SMing Lei nr_read_queues = 1; 21393b6592f7SJens Axboe } else { 21402a5bcfddSWeiping Zhang nr_read_queues = nrirqs - nr_write_queues; 21413b6592f7SJens Axboe } 2142612b7286SMing Lei 2143612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2144612b7286SMing Lei affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2145612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2146612b7286SMing Lei affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2147612b7286SMing Lei affd->nr_sets = nr_read_queues ? 2 : 1; 21483b6592f7SJens Axboe } 21493b6592f7SJens Axboe 21506451fe73SJens Axboe static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 21513b6592f7SJens Axboe { 21523b6592f7SJens Axboe struct pci_dev *pdev = to_pci_dev(dev->dev); 21533b6592f7SJens Axboe struct irq_affinity affd = { 21543b6592f7SJens Axboe .pre_vectors = 1, 2155612b7286SMing Lei .calc_sets = nvme_calc_irq_sets, 2156612b7286SMing Lei .priv = dev, 21573b6592f7SJens Axboe }; 215821cc2f3fSJeffle Xu unsigned int irq_queues, poll_queues; 21596451fe73SJens Axboe 21606451fe73SJens Axboe /* 216121cc2f3fSJeffle Xu * Poll queues don't need interrupts, but we need at least one I/O queue 216221cc2f3fSJeffle Xu * left over for non-polled I/O. 21636451fe73SJens Axboe */ 216421cc2f3fSJeffle Xu poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 216521cc2f3fSJeffle Xu dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 21663b6592f7SJens Axboe 216721cc2f3fSJeffle Xu /* 216821cc2f3fSJeffle Xu * Initialize for the single interrupt case, will be updated in 216921cc2f3fSJeffle Xu * nvme_calc_irq_sets(). 217021cc2f3fSJeffle Xu */ 2171612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2172612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = 0; 21733b6592f7SJens Axboe 217466341331SBenjamin Herrenschmidt /* 217521cc2f3fSJeffle Xu * We need interrupts for the admin queue and each non-polled I/O queue, 217621cc2f3fSJeffle Xu * but some Apple controllers require all queues to use the first 217721cc2f3fSJeffle Xu * vector. 217866341331SBenjamin Herrenschmidt */ 217966341331SBenjamin Herrenschmidt irq_queues = 1; 218021cc2f3fSJeffle Xu if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 218121cc2f3fSJeffle Xu irq_queues += (nr_io_queues - poll_queues); 2182612b7286SMing Lei return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 21833b6592f7SJens Axboe PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 21843b6592f7SJens Axboe } 21853b6592f7SJens Axboe 21868fae268bSKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev) 21878fae268bSKeith Busch { 21888fae268bSKeith Busch if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 21898fae268bSKeith Busch __nvme_disable_io_queues(dev, nvme_admin_delete_cq); 21908fae268bSKeith Busch } 21918fae268bSKeith Busch 21922a5bcfddSWeiping Zhang static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 21932a5bcfddSWeiping Zhang { 2194e3aef095SNiklas Schnelle /* 2195e3aef095SNiklas Schnelle * If tags are shared with admin queue (Apple bug), then 2196e3aef095SNiklas Schnelle * make sure we only use one IO queue. 2197e3aef095SNiklas Schnelle */ 2198e3aef095SNiklas Schnelle if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2199e3aef095SNiklas Schnelle return 1; 22002a5bcfddSWeiping Zhang return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 22012a5bcfddSWeiping Zhang } 22022a5bcfddSWeiping Zhang 220357dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 220457dacad5SJay Sternberg { 2205147b27e4SSagi Grimberg struct nvme_queue *adminq = &dev->queues[0]; 220657dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 22072a5bcfddSWeiping Zhang unsigned int nr_io_queues; 220897f6ef64SXu Yu unsigned long size; 22092a5bcfddSWeiping Zhang int result; 221057dacad5SJay Sternberg 22112a5bcfddSWeiping Zhang /* 22122a5bcfddSWeiping Zhang * Sample the module parameters once at reset time so that we have 22132a5bcfddSWeiping Zhang * stable values to work with. 22142a5bcfddSWeiping Zhang */ 22152a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 22162a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 2217d38e9f04SBenjamin Herrenschmidt 2218ff4e5fbaSNiklas Schnelle nr_io_queues = dev->nr_allocated_queues - 1; 22199a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 22209a0be7abSChristoph Hellwig if (result < 0) 222157dacad5SJay Sternberg return result; 22229a0be7abSChristoph Hellwig 2223f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 2224a5229050SKeith Busch return 0; 222557dacad5SJay Sternberg 2226e4b9852aSCasey Chen /* 2227e4b9852aSCasey Chen * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions 2228e4b9852aSCasey Chen * from set to unset. If there is a window to it is truely freed, 2229e4b9852aSCasey Chen * pci_free_irq_vectors() jumping into this window will crash. 2230e4b9852aSCasey Chen * And take lock to avoid racing with pci_free_irq_vectors() in 2231e4b9852aSCasey Chen * nvme_dev_disable() path. 2232e4b9852aSCasey Chen */ 2233e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2234e4b9852aSCasey Chen if (result) 2235e4b9852aSCasey Chen return result; 2236e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 2237e4b9852aSCasey Chen pci_free_irq(pdev, 0, adminq); 22384e224106SChristoph Hellwig 22390f238ff5SLogan Gunthorpe if (dev->cmb_use_sqes) { 224057dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 224157dacad5SJay Sternberg sizeof(struct nvme_command)); 224257dacad5SJay Sternberg if (result > 0) 224357dacad5SJay Sternberg dev->q_depth = result; 224457dacad5SJay Sternberg else 22450f238ff5SLogan Gunthorpe dev->cmb_use_sqes = false; 224657dacad5SJay Sternberg } 224757dacad5SJay Sternberg 224857dacad5SJay Sternberg do { 224997f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 225097f6ef64SXu Yu result = nvme_remap_bar(dev, size); 225197f6ef64SXu Yu if (!result) 225257dacad5SJay Sternberg break; 2253e4b9852aSCasey Chen if (!--nr_io_queues) { 2254e4b9852aSCasey Chen result = -ENOMEM; 2255e4b9852aSCasey Chen goto out_unlock; 2256e4b9852aSCasey Chen } 225757dacad5SJay Sternberg } while (1); 225857dacad5SJay Sternberg adminq->q_db = dev->dbs; 225957dacad5SJay Sternberg 22608fae268bSKeith Busch retry: 226157dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 2262e4b9852aSCasey Chen if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) 22630ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 226457dacad5SJay Sternberg 226557dacad5SJay Sternberg /* 226657dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 226757dacad5SJay Sternberg * setting up the full range we need. 226857dacad5SJay Sternberg */ 2269dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 22703b6592f7SJens Axboe 22713b6592f7SJens Axboe result = nvme_setup_irqs(dev, nr_io_queues); 2272e4b9852aSCasey Chen if (result <= 0) { 2273e4b9852aSCasey Chen result = -EIO; 2274e4b9852aSCasey Chen goto out_unlock; 2275e4b9852aSCasey Chen } 22763b6592f7SJens Axboe 227722b55601SKeith Busch dev->num_vecs = result; 22784b04cc6aSJens Axboe result = max(result - 1, 1); 2279e20ba6e1SChristoph Hellwig dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 228057dacad5SJay Sternberg 228157dacad5SJay Sternberg /* 228257dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 228357dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 228457dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 228557dacad5SJay Sternberg * number of interrupts. 228657dacad5SJay Sternberg */ 2287dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 22887c349ddeSKeith Busch if (result) 2289e4b9852aSCasey Chen goto out_unlock; 22904e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &adminq->flags); 2291e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 22928fae268bSKeith Busch 22938fae268bSKeith Busch result = nvme_create_io_queues(dev); 22948fae268bSKeith Busch if (result || dev->online_queues < 2) 22958fae268bSKeith Busch return result; 22968fae268bSKeith Busch 22978fae268bSKeith Busch if (dev->online_queues - 1 < dev->max_qid) { 22988fae268bSKeith Busch nr_io_queues = dev->online_queues - 1; 22998fae268bSKeith Busch nvme_disable_io_queues(dev); 2300e4b9852aSCasey Chen result = nvme_setup_io_queues_trylock(dev); 2301e4b9852aSCasey Chen if (result) 2302e4b9852aSCasey Chen return result; 23038fae268bSKeith Busch nvme_suspend_io_queues(dev); 23048fae268bSKeith Busch goto retry; 23058fae268bSKeith Busch } 23068fae268bSKeith Busch dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 23078fae268bSKeith Busch dev->io_queues[HCTX_TYPE_DEFAULT], 23088fae268bSKeith Busch dev->io_queues[HCTX_TYPE_READ], 23098fae268bSKeith Busch dev->io_queues[HCTX_TYPE_POLL]); 23108fae268bSKeith Busch return 0; 2311e4b9852aSCasey Chen out_unlock: 2312e4b9852aSCasey Chen mutex_unlock(&dev->shutdown_lock); 2313e4b9852aSCasey Chen return result; 231457dacad5SJay Sternberg } 231557dacad5SJay Sternberg 23162a842acaSChristoph Hellwig static void nvme_del_queue_end(struct request *req, blk_status_t error) 2317db3cbfffSKeith Busch { 2318db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2319db3cbfffSKeith Busch 2320db3cbfffSKeith Busch blk_mq_free_request(req); 2321d1ed6aa1SChristoph Hellwig complete(&nvmeq->delete_done); 2322db3cbfffSKeith Busch } 2323db3cbfffSKeith Busch 23242a842acaSChristoph Hellwig static void nvme_del_cq_end(struct request *req, blk_status_t error) 2325db3cbfffSKeith Busch { 2326db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2327db3cbfffSKeith Busch 2328d1ed6aa1SChristoph Hellwig if (error) 2329d1ed6aa1SChristoph Hellwig set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2330db3cbfffSKeith Busch 2331db3cbfffSKeith Busch nvme_del_queue_end(req, error); 2332db3cbfffSKeith Busch } 2333db3cbfffSKeith Busch 2334db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2335db3cbfffSKeith Busch { 2336db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2337db3cbfffSKeith Busch struct request *req; 2338f66e2804SChaitanya Kulkarni struct nvme_command cmd = { }; 2339db3cbfffSKeith Busch 2340db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 2341db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2342db3cbfffSKeith Busch 234339dfe844SChaitanya Kulkarni req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT); 2344db3cbfffSKeith Busch if (IS_ERR(req)) 2345db3cbfffSKeith Busch return PTR_ERR(req); 2346db3cbfffSKeith Busch 2347db3cbfffSKeith Busch req->end_io_data = nvmeq; 2348db3cbfffSKeith Busch 2349d1ed6aa1SChristoph Hellwig init_completion(&nvmeq->delete_done); 23508eeed0b5SGuoqing Jiang blk_execute_rq_nowait(NULL, req, false, 2351db3cbfffSKeith Busch opcode == nvme_admin_delete_cq ? 2352db3cbfffSKeith Busch nvme_del_cq_end : nvme_del_queue_end); 2353db3cbfffSKeith Busch return 0; 2354db3cbfffSKeith Busch } 2355db3cbfffSKeith Busch 23568fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2357db3cbfffSKeith Busch { 23585271edd4SChristoph Hellwig int nr_queues = dev->online_queues - 1, sent = 0; 2359db3cbfffSKeith Busch unsigned long timeout; 2360db3cbfffSKeith Busch 2361db3cbfffSKeith Busch retry: 2362dc96f938SChaitanya Kulkarni timeout = NVME_ADMIN_TIMEOUT; 23635271edd4SChristoph Hellwig while (nr_queues > 0) { 23645271edd4SChristoph Hellwig if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2365db3cbfffSKeith Busch break; 23665271edd4SChristoph Hellwig nr_queues--; 23675271edd4SChristoph Hellwig sent++; 23685271edd4SChristoph Hellwig } 2369d1ed6aa1SChristoph Hellwig while (sent) { 2370d1ed6aa1SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2371d1ed6aa1SChristoph Hellwig 2372d1ed6aa1SChristoph Hellwig timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 23735271edd4SChristoph Hellwig timeout); 2374db3cbfffSKeith Busch if (timeout == 0) 23755271edd4SChristoph Hellwig return false; 2376d1ed6aa1SChristoph Hellwig 2377d1ed6aa1SChristoph Hellwig sent--; 23785271edd4SChristoph Hellwig if (nr_queues) 2379db3cbfffSKeith Busch goto retry; 2380db3cbfffSKeith Busch } 23815271edd4SChristoph Hellwig return true; 2382db3cbfffSKeith Busch } 2383db3cbfffSKeith Busch 23845d02a5c1SKeith Busch static void nvme_dev_add(struct nvme_dev *dev) 238557dacad5SJay Sternberg { 23862b1b7e78SJianchao Wang int ret; 23872b1b7e78SJianchao Wang 23885bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 2389c6d962aeSChristoph Hellwig dev->tagset.ops = &nvme_mq_ops; 239057dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 23918fe34be1Syangerkun dev->tagset.nr_maps = 2; /* default + read */ 2392ed92ad37SChristoph Hellwig if (dev->io_queues[HCTX_TYPE_POLL]) 2393ed92ad37SChristoph Hellwig dev->tagset.nr_maps++; 239457dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 2395d4ec47f1SMax Gurtovoy dev->tagset.numa_node = dev->ctrl.numa_node; 239661f3b896SChaitanya Kulkarni dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth, 239761f3b896SChaitanya Kulkarni BLK_MQ_MAX_DEPTH) - 1; 2398d43f1ccfSChristoph Hellwig dev->tagset.cmd_size = sizeof(struct nvme_iod); 239957dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 240057dacad5SJay Sternberg dev->tagset.driver_data = dev; 240157dacad5SJay Sternberg 2402d38e9f04SBenjamin Herrenschmidt /* 2403d38e9f04SBenjamin Herrenschmidt * Some Apple controllers requires tags to be unique 2404d38e9f04SBenjamin Herrenschmidt * across admin and IO queue, so reserve the first 32 2405d38e9f04SBenjamin Herrenschmidt * tags of the IO queue. 2406d38e9f04SBenjamin Herrenschmidt */ 2407d38e9f04SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2408d38e9f04SBenjamin Herrenschmidt dev->tagset.reserved_tags = NVME_AQ_DEPTH; 2409d38e9f04SBenjamin Herrenschmidt 24102b1b7e78SJianchao Wang ret = blk_mq_alloc_tag_set(&dev->tagset); 24112b1b7e78SJianchao Wang if (ret) { 24122b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 24132b1b7e78SJianchao Wang "IO queues tagset allocation failed %d\n", ret); 24145d02a5c1SKeith Busch return; 24152b1b7e78SJianchao Wang } 24165bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 2417949928c1SKeith Busch } else { 2418949928c1SKeith Busch blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2419949928c1SKeith Busch 2420949928c1SKeith Busch /* Free previously allocated queues that are no longer usable */ 2421949928c1SKeith Busch nvme_free_queues(dev, dev->online_queues); 242257dacad5SJay Sternberg } 2423949928c1SKeith Busch 2424e8fd41bbSMaxim Levitsky nvme_dbbuf_set(dev); 242557dacad5SJay Sternberg } 242657dacad5SJay Sternberg 2427b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 242857dacad5SJay Sternberg { 2429b00a726aSKeith Busch int result = -ENOMEM; 243057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 24314bdf2603SFilippo Sironi int dma_address_bits = 64; 243257dacad5SJay Sternberg 243357dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 243457dacad5SJay Sternberg return result; 243557dacad5SJay Sternberg 243657dacad5SJay Sternberg pci_set_master(pdev); 243757dacad5SJay Sternberg 24384bdf2603SFilippo Sironi if (dev->ctrl.quirks & NVME_QUIRK_DMA_ADDRESS_BITS_48) 24394bdf2603SFilippo Sironi dma_address_bits = 48; 24404bdf2603SFilippo Sironi if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(dma_address_bits))) 244157dacad5SJay Sternberg goto disable; 244257dacad5SJay Sternberg 24437a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 244457dacad5SJay Sternberg result = -ENODEV; 2445b00a726aSKeith Busch goto disable; 244657dacad5SJay Sternberg } 244757dacad5SJay Sternberg 244857dacad5SJay Sternberg /* 2449a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 2450a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2451a5229050SKeith Busch * adjust this later. 245257dacad5SJay Sternberg */ 2453dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2454dca51e78SChristoph Hellwig if (result < 0) 2455dca51e78SChristoph Hellwig return result; 245657dacad5SJay Sternberg 245720d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 24587a67cbeaSChristoph Hellwig 24597442ddceSJohn Garry dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2460b27c1e68Sweiping zhang io_queue_depth); 2461aa22c8e6SSagi Grimberg dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 246220d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 24637a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 24641f390c1fSStephan Günther 24651f390c1fSStephan Günther /* 246666341331SBenjamin Herrenschmidt * Some Apple controllers require a non-standard SQE size. 246766341331SBenjamin Herrenschmidt * Interestingly they also seem to ignore the CC:IOSQES register 246866341331SBenjamin Herrenschmidt * so we don't bother updating it here. 246966341331SBenjamin Herrenschmidt */ 247066341331SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 247166341331SBenjamin Herrenschmidt dev->io_sqes = 7; 247266341331SBenjamin Herrenschmidt else 2473c1e0cc7eSBenjamin Herrenschmidt dev->io_sqes = NVME_NVM_IOSQES; 24741f390c1fSStephan Günther 24751f390c1fSStephan Günther /* 24761f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 24771f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 24781f390c1fSStephan Günther */ 24791f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 24801f390c1fSStephan Günther dev->q_depth = 2; 24819bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 24829bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 24831f390c1fSStephan Günther dev->q_depth); 2484d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2485d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 248620d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2487d554b5e1SMartin K. Petersen dev->q_depth = 64; 2488d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2489d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 24901f390c1fSStephan Günther } 24911f390c1fSStephan Günther 2492d38e9f04SBenjamin Herrenschmidt /* 2493d38e9f04SBenjamin Herrenschmidt * Controllers with the shared tags quirk need the IO queue to be 2494d38e9f04SBenjamin Herrenschmidt * big enough so that we get 32 tags for the admin queue 2495d38e9f04SBenjamin Herrenschmidt */ 2496d38e9f04SBenjamin Herrenschmidt if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2497d38e9f04SBenjamin Herrenschmidt (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2498d38e9f04SBenjamin Herrenschmidt dev->q_depth = NVME_AQ_DEPTH + 2; 2499d38e9f04SBenjamin Herrenschmidt dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2500d38e9f04SBenjamin Herrenschmidt dev->q_depth); 2501d38e9f04SBenjamin Herrenschmidt } 2502d38e9f04SBenjamin Herrenschmidt 2503d38e9f04SBenjamin Herrenschmidt 2504f65efd6dSChristoph Hellwig nvme_map_cmb(dev); 2505202021c1SStephen Bates 2506a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 2507a0a3408eSKeith Busch pci_save_state(pdev); 250857dacad5SJay Sternberg return 0; 250957dacad5SJay Sternberg 251057dacad5SJay Sternberg disable: 251157dacad5SJay Sternberg pci_disable_device(pdev); 251257dacad5SJay Sternberg return result; 251357dacad5SJay Sternberg } 251457dacad5SJay Sternberg 251557dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 251657dacad5SJay Sternberg { 2517b00a726aSKeith Busch if (dev->bar) 2518b00a726aSKeith Busch iounmap(dev->bar); 2519a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 2520b00a726aSKeith Busch } 2521b00a726aSKeith Busch 2522b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 2523b00a726aSKeith Busch { 252457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 252557dacad5SJay Sternberg 2526dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 252757dacad5SJay Sternberg 2528a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 2529a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 253057dacad5SJay Sternberg pci_disable_device(pdev); 253157dacad5SJay Sternberg } 2532a0a3408eSKeith Busch } 253357dacad5SJay Sternberg 2534a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 253557dacad5SJay Sternberg { 2536e43269e6SKeith Busch bool dead = true, freeze = false; 2537302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 253857dacad5SJay Sternberg 253977bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 2540302ad8ccSKeith Busch if (pci_is_enabled(pdev)) { 2541302ad8ccSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 2542302ad8ccSKeith Busch 2543ebef7368SKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE || 2544e43269e6SKeith Busch dev->ctrl.state == NVME_CTRL_RESETTING) { 2545e43269e6SKeith Busch freeze = true; 2546302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 2547e43269e6SKeith Busch } 2548302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 2549302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 255057dacad5SJay Sternberg } 2551c21377f8SGabriel Krisman Bertazi 2552302ad8ccSKeith Busch /* 2553302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 2554302ad8ccSKeith Busch * doing a safe shutdown. 2555302ad8ccSKeith Busch */ 2556e43269e6SKeith Busch if (!dead && shutdown && freeze) 2557302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 255887ad72a5SChristoph Hellwig 25599a915a5bSJianchao Wang nvme_stop_queues(&dev->ctrl); 25609a915a5bSJianchao Wang 256164ee0ac0SKeith Busch if (!dead && dev->ctrl.queue_count > 0) { 25628fae268bSKeith Busch nvme_disable_io_queues(dev); 2563a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 256457dacad5SJay Sternberg } 25658fae268bSKeith Busch nvme_suspend_io_queues(dev); 25668fae268bSKeith Busch nvme_suspend_queue(&dev->queues[0]); 2567b00a726aSKeith Busch nvme_pci_disable(dev); 2568fa46c6fbSKeith Busch nvme_reap_pending_cqes(dev); 256957dacad5SJay Sternberg 2570e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2571e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 2572622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&dev->tagset); 2573622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&dev->admin_tagset); 2574302ad8ccSKeith Busch 2575302ad8ccSKeith Busch /* 2576302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2577302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2578302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2579302ad8ccSKeith Busch */ 2580c8e9e9b7SKeith Busch if (shutdown) { 2581302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 2582c8e9e9b7SKeith Busch if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 2583c8e9e9b7SKeith Busch blk_mq_unquiesce_queue(dev->ctrl.admin_q); 2584c8e9e9b7SKeith Busch } 258577bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 258657dacad5SJay Sternberg } 258757dacad5SJay Sternberg 2588c1ac9a4bSKeith Busch static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2589c1ac9a4bSKeith Busch { 2590c1ac9a4bSKeith Busch if (!nvme_wait_reset(&dev->ctrl)) 2591c1ac9a4bSKeith Busch return -EBUSY; 2592c1ac9a4bSKeith Busch nvme_dev_disable(dev, shutdown); 2593c1ac9a4bSKeith Busch return 0; 2594c1ac9a4bSKeith Busch } 2595c1ac9a4bSKeith Busch 259657dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 259757dacad5SJay Sternberg { 259857dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2599c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 2600c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 0); 260157dacad5SJay Sternberg if (!dev->prp_page_pool) 260257dacad5SJay Sternberg return -ENOMEM; 260357dacad5SJay Sternberg 260457dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 260557dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 260657dacad5SJay Sternberg 256, 256, 0); 260757dacad5SJay Sternberg if (!dev->prp_small_pool) { 260857dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 260957dacad5SJay Sternberg return -ENOMEM; 261057dacad5SJay Sternberg } 261157dacad5SJay Sternberg return 0; 261257dacad5SJay Sternberg } 261357dacad5SJay Sternberg 261457dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 261557dacad5SJay Sternberg { 261657dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 261757dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 261857dacad5SJay Sternberg } 261957dacad5SJay Sternberg 2620770597ecSKeith Busch static void nvme_free_tagset(struct nvme_dev *dev) 2621770597ecSKeith Busch { 2622770597ecSKeith Busch if (dev->tagset.tags) 2623770597ecSKeith Busch blk_mq_free_tag_set(&dev->tagset); 2624770597ecSKeith Busch dev->ctrl.tagset = NULL; 2625770597ecSKeith Busch } 2626770597ecSKeith Busch 26271673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 262857dacad5SJay Sternberg { 26291673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 263057dacad5SJay Sternberg 2631f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 2632770597ecSKeith Busch nvme_free_tagset(dev); 26331c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 26341c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 2635e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2636943e942eSJens Axboe mempool_destroy(dev->iod_mempool); 2637253fd4acSIsrael Rukshin put_device(dev->dev); 2638253fd4acSIsrael Rukshin kfree(dev->queues); 263957dacad5SJay Sternberg kfree(dev); 264057dacad5SJay Sternberg } 264157dacad5SJay Sternberg 26427c1ce408SChaitanya Kulkarni static void nvme_remove_dead_ctrl(struct nvme_dev *dev) 2643f58944e2SKeith Busch { 2644c1ac9a4bSKeith Busch /* 2645c1ac9a4bSKeith Busch * Set state to deleting now to avoid blocking nvme_wait_reset(), which 2646c1ac9a4bSKeith Busch * may be holding this pci_dev's device lock. 2647c1ac9a4bSKeith Busch */ 2648c1ac9a4bSKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2649d22524a4SChristoph Hellwig nvme_get_ctrl(&dev->ctrl); 265069d9a99cSKeith Busch nvme_dev_disable(dev, false); 26519f9cafc1SJianchao Wang nvme_kill_queues(&dev->ctrl); 265203e0f3a6SMing Lei if (!queue_work(nvme_wq, &dev->remove_work)) 2653f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 2654f58944e2SKeith Busch } 2655f58944e2SKeith Busch 2656fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 265757dacad5SJay Sternberg { 2658d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2659d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2660a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2661e71afda4SChaitanya Kulkarni int result; 266257dacad5SJay Sternberg 26637764656bSZhihao Cheng if (dev->ctrl.state != NVME_CTRL_RESETTING) { 26647764656bSZhihao Cheng dev_warn(dev->ctrl.device, "ctrl state %d is not RESETTING\n", 26657764656bSZhihao Cheng dev->ctrl.state); 2666e71afda4SChaitanya Kulkarni result = -ENODEV; 2667fd634f41SChristoph Hellwig goto out; 2668e71afda4SChaitanya Kulkarni } 2669fd634f41SChristoph Hellwig 2670fd634f41SChristoph Hellwig /* 2671fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2672fd634f41SChristoph Hellwig * moving on. 2673fd634f41SChristoph Hellwig */ 2674b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2675a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2676d6135c3aSKeith Busch nvme_sync_queues(&dev->ctrl); 2677fd634f41SChristoph Hellwig 26785c959d73SKeith Busch mutex_lock(&dev->shutdown_lock); 2679b00a726aSKeith Busch result = nvme_pci_enable(dev); 268057dacad5SJay Sternberg if (result) 26814726bcf3SKeith Busch goto out_unlock; 268257dacad5SJay Sternberg 268301ad0990SSagi Grimberg result = nvme_pci_configure_admin_queue(dev); 268457dacad5SJay Sternberg if (result) 26854726bcf3SKeith Busch goto out_unlock; 268657dacad5SJay Sternberg 268757dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 268857dacad5SJay Sternberg if (result) 26894726bcf3SKeith Busch goto out_unlock; 269057dacad5SJay Sternberg 2691943e942eSJens Axboe /* 2692943e942eSJens Axboe * Limit the max command size to prevent iod->sg allocations going 2693943e942eSJens Axboe * over a single page. 2694943e942eSJens Axboe */ 26957637de31SChristoph Hellwig dev->ctrl.max_hw_sectors = min_t(u32, 26967637de31SChristoph Hellwig NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); 2697943e942eSJens Axboe dev->ctrl.max_segments = NVME_MAX_SEGS; 2698a48bc520SChristoph Hellwig 2699a48bc520SChristoph Hellwig /* 2700a48bc520SChristoph Hellwig * Don't limit the IOMMU merged segment size. 2701a48bc520SChristoph Hellwig */ 2702a48bc520SChristoph Hellwig dma_set_max_seg_size(dev->dev, 0xffffffff); 27033d2d861eSJianxiong Gao dma_set_min_align_mask(dev->dev, NVME_CTRL_PAGE_SIZE - 1); 2704a48bc520SChristoph Hellwig 27055c959d73SKeith Busch mutex_unlock(&dev->shutdown_lock); 27065c959d73SKeith Busch 27075c959d73SKeith Busch /* 27085c959d73SKeith Busch * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 27095c959d73SKeith Busch * initializing procedure here. 27105c959d73SKeith Busch */ 27115c959d73SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 27125c959d73SKeith Busch dev_warn(dev->ctrl.device, 27135c959d73SKeith Busch "failed to mark controller CONNECTING\n"); 2714cee6c269SMinwoo Im result = -EBUSY; 27155c959d73SKeith Busch goto out; 27165c959d73SKeith Busch } 2717943e942eSJens Axboe 271895093350SMax Gurtovoy /* 271995093350SMax Gurtovoy * We do not support an SGL for metadata (yet), so we are limited to a 272095093350SMax Gurtovoy * single integrity segment for the separate metadata pointer. 272195093350SMax Gurtovoy */ 272295093350SMax Gurtovoy dev->ctrl.max_integrity_segments = 1; 272395093350SMax Gurtovoy 2724f21c4769SChaitanya Kulkarni result = nvme_init_ctrl_finish(&dev->ctrl); 2725ce4541f4SChristoph Hellwig if (result) 2726f58944e2SKeith Busch goto out; 2727ce4541f4SChristoph Hellwig 2728e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2729e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 27304f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 27314f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2732e286bcfcSScott Bauer else if (was_suspend) 27334f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 2734e286bcfcSScott Bauer } else { 2735e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2736e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 2737e286bcfcSScott Bauer } 2738a98e58e5SScott Bauer 2739f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2740f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 2741f9f38e33SHelen Koike if (result) 2742f9f38e33SHelen Koike dev_warn(dev->dev, 2743f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 2744f9f38e33SHelen Koike } 2745f9f38e33SHelen Koike 27469620cfbaSChristoph Hellwig if (dev->ctrl.hmpre) { 27479620cfbaSChristoph Hellwig result = nvme_setup_host_mem(dev); 27489620cfbaSChristoph Hellwig if (result < 0) 27499620cfbaSChristoph Hellwig goto out; 27509620cfbaSChristoph Hellwig } 275187ad72a5SChristoph Hellwig 275257dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 275357dacad5SJay Sternberg if (result) 2754f58944e2SKeith Busch goto out; 275557dacad5SJay Sternberg 275621f033f7SKeith Busch /* 275757dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 275857dacad5SJay Sternberg * any working I/O queue. 275957dacad5SJay Sternberg */ 276057dacad5SJay Sternberg if (dev->online_queues < 2) { 27611b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, "IO queues not created\n"); 27623b24774eSKeith Busch nvme_kill_queues(&dev->ctrl); 27635bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 2764770597ecSKeith Busch nvme_free_tagset(dev); 276557dacad5SJay Sternberg } else { 276625646264SKeith Busch nvme_start_queues(&dev->ctrl); 2767302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 27685d02a5c1SKeith Busch nvme_dev_add(dev); 2769302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 277057dacad5SJay Sternberg } 277157dacad5SJay Sternberg 27722b1b7e78SJianchao Wang /* 27732b1b7e78SJianchao Wang * If only admin queue live, keep it to do further investigation or 27742b1b7e78SJianchao Wang * recovery. 27752b1b7e78SJianchao Wang */ 27765d02a5c1SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 27772b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 27785d02a5c1SKeith Busch "failed to mark controller live state\n"); 2779e71afda4SChaitanya Kulkarni result = -ENODEV; 2780bb8d261eSChristoph Hellwig goto out; 2781bb8d261eSChristoph Hellwig } 278292911a55SChristoph Hellwig 27830521905eSKeith Busch if (!dev->attrs_added && !sysfs_create_group(&dev->ctrl.device->kobj, 27840521905eSKeith Busch &nvme_pci_attr_group)) 27850521905eSKeith Busch dev->attrs_added = true; 27860521905eSKeith Busch 2787d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 278857dacad5SJay Sternberg return; 278957dacad5SJay Sternberg 27904726bcf3SKeith Busch out_unlock: 27914726bcf3SKeith Busch mutex_unlock(&dev->shutdown_lock); 279257dacad5SJay Sternberg out: 27937c1ce408SChaitanya Kulkarni if (result) 27947c1ce408SChaitanya Kulkarni dev_warn(dev->ctrl.device, 27957c1ce408SChaitanya Kulkarni "Removing after probe failure status: %d\n", result); 27967c1ce408SChaitanya Kulkarni nvme_remove_dead_ctrl(dev); 279757dacad5SJay Sternberg } 279857dacad5SJay Sternberg 27995c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 280057dacad5SJay Sternberg { 28015c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 280257dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 280357dacad5SJay Sternberg 280457dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 2805921920abSKeith Busch device_release_driver(&pdev->dev); 28061673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 280757dacad5SJay Sternberg } 280857dacad5SJay Sternberg 28091c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 281057dacad5SJay Sternberg { 28111c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 28121c63dc66SChristoph Hellwig return 0; 281357dacad5SJay Sternberg } 28141c63dc66SChristoph Hellwig 28155fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 28165fd4ce1bSChristoph Hellwig { 28175fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 28185fd4ce1bSChristoph Hellwig return 0; 28195fd4ce1bSChristoph Hellwig } 28205fd4ce1bSChristoph Hellwig 28217fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 28227fd8930fSChristoph Hellwig { 28233a8ecc93SArd Biesheuvel *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 28247fd8930fSChristoph Hellwig return 0; 28257fd8930fSChristoph Hellwig } 28267fd8930fSChristoph Hellwig 282797c12223SKeith Busch static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 282897c12223SKeith Busch { 282997c12223SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 283097c12223SKeith Busch 28312db24e4aSMax Gurtovoy return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 283297c12223SKeith Busch } 283397c12223SKeith Busch 28341c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 28351a353d85SMing Lin .name = "pcie", 2836e439bb12SSagi Grimberg .module = THIS_MODULE, 2837e0596ab2SLogan Gunthorpe .flags = NVME_F_METADATA_SUPPORTED | 2838e0596ab2SLogan Gunthorpe NVME_F_PCI_P2PDMA, 28391c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 28405fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 28417fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 28421673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2843f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 284497c12223SKeith Busch .get_address = nvme_pci_get_address, 28451c63dc66SChristoph Hellwig }; 284657dacad5SJay Sternberg 2847b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2848b00a726aSKeith Busch { 2849b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2850b00a726aSKeith Busch 2851a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2852b00a726aSKeith Busch return -ENODEV; 2853b00a726aSKeith Busch 285497f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2855b00a726aSKeith Busch goto release; 2856b00a726aSKeith Busch 2857b00a726aSKeith Busch return 0; 2858b00a726aSKeith Busch release: 2859a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2860b00a726aSKeith Busch return -ENODEV; 2861b00a726aSKeith Busch } 2862b00a726aSKeith Busch 28638427bbc2SKai-Heng Feng static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2864ff5350a8SAndy Lutomirski { 2865ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2866ff5350a8SAndy Lutomirski /* 2867ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2868ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2869ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2870ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2871ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2872ff5350a8SAndy Lutomirski * laptops. 2873ff5350a8SAndy Lutomirski */ 2874ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2875ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2876ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2877ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 28788427bbc2SKai-Heng Feng } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 28798427bbc2SKai-Heng Feng /* 28808427bbc2SKai-Heng Feng * Samsung SSD 960 EVO drops off the PCIe bus after system 2881467c77d4SJarosław Janik * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 2882467c77d4SJarosław Janik * within few minutes after bootup on a Coffee Lake board - 2883467c77d4SJarosław Janik * ASUS PRIME Z370-A 28848427bbc2SKai-Heng Feng */ 28858427bbc2SKai-Heng Feng if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2886467c77d4SJarosław Janik (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 2887467c77d4SJarosław Janik dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 28888427bbc2SKai-Heng Feng return NVME_QUIRK_NO_APST; 28891fae37acSShyjumon N } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 28901fae37acSShyjumon N pdev->device == 0xa808 || pdev->device == 0xa809)) || 28911fae37acSShyjumon N (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 28921fae37acSShyjumon N /* 28931fae37acSShyjumon N * Forcing to use host managed nvme power settings for 28941fae37acSShyjumon N * lowest idle power with quick resume latency on 28951fae37acSShyjumon N * Samsung and Toshiba SSDs based on suspend behavior 28961fae37acSShyjumon N * on Coffee Lake board for LENOVO C640 28971fae37acSShyjumon N */ 28981fae37acSShyjumon N if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 28991fae37acSShyjumon N dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 29001fae37acSShyjumon N return NVME_QUIRK_SIMPLE_SUSPEND; 2901ff5350a8SAndy Lutomirski } 2902ff5350a8SAndy Lutomirski 2903ff5350a8SAndy Lutomirski return 0; 2904ff5350a8SAndy Lutomirski } 2905ff5350a8SAndy Lutomirski 290618119775SKeith Busch static void nvme_async_probe(void *data, async_cookie_t cookie) 290718119775SKeith Busch { 290818119775SKeith Busch struct nvme_dev *dev = data; 290980f513b5SKeith Busch 2910bd46a906SKeith Busch flush_work(&dev->ctrl.reset_work); 291118119775SKeith Busch flush_work(&dev->ctrl.scan_work); 291280f513b5SKeith Busch nvme_put_ctrl(&dev->ctrl); 291318119775SKeith Busch } 291418119775SKeith Busch 291557dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 291657dacad5SJay Sternberg { 291757dacad5SJay Sternberg int node, result = -ENOMEM; 291857dacad5SJay Sternberg struct nvme_dev *dev; 2919ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 2920943e942eSJens Axboe size_t alloc_size; 292157dacad5SJay Sternberg 292257dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 292357dacad5SJay Sternberg if (node == NUMA_NO_NODE) 29242fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 292557dacad5SJay Sternberg 292657dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 292757dacad5SJay Sternberg if (!dev) 292857dacad5SJay Sternberg return -ENOMEM; 2929147b27e4SSagi Grimberg 29302a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 29312a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 29322a5bcfddSWeiping Zhang dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 29332a5bcfddSWeiping Zhang dev->queues = kcalloc_node(dev->nr_allocated_queues, 29342a5bcfddSWeiping Zhang sizeof(struct nvme_queue), GFP_KERNEL, node); 293557dacad5SJay Sternberg if (!dev->queues) 293657dacad5SJay Sternberg goto free; 293757dacad5SJay Sternberg 293857dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 293957dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 294057dacad5SJay Sternberg 2941b00a726aSKeith Busch result = nvme_dev_map(dev); 2942b00a726aSKeith Busch if (result) 2943b00c9b7aSChristophe JAILLET goto put_pci; 2944b00a726aSKeith Busch 2945d86c4d8eSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 29465c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 294777bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 2948f3ca80fcSChristoph Hellwig 2949f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2950f3ca80fcSChristoph Hellwig if (result) 2951b00c9b7aSChristophe JAILLET goto unmap; 2952f3ca80fcSChristoph Hellwig 29538427bbc2SKai-Heng Feng quirks |= check_vendor_combination_bug(pdev); 2954ff5350a8SAndy Lutomirski 29552744d7a0SMario Limonciello if (!noacpi && acpi_storage_d3(&pdev->dev)) { 2956df4f9bc4SDavid E. Box /* 2957df4f9bc4SDavid E. Box * Some systems use a bios work around to ask for D3 on 2958df4f9bc4SDavid E. Box * platforms that support kernel managed suspend. 2959df4f9bc4SDavid E. Box */ 2960df4f9bc4SDavid E. Box dev_info(&pdev->dev, 2961df4f9bc4SDavid E. Box "platform quirk: setting simple suspend\n"); 2962df4f9bc4SDavid E. Box quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 2963df4f9bc4SDavid E. Box } 2964df4f9bc4SDavid E. Box 2965943e942eSJens Axboe /* 2966943e942eSJens Axboe * Double check that our mempool alloc size will cover the biggest 2967943e942eSJens Axboe * command we support. 2968943e942eSJens Axboe */ 2969b13c6393SChaitanya Kulkarni alloc_size = nvme_pci_iod_alloc_size(); 2970943e942eSJens Axboe WARN_ON_ONCE(alloc_size > PAGE_SIZE); 2971943e942eSJens Axboe 2972943e942eSJens Axboe dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, 2973943e942eSJens Axboe mempool_kfree, 2974943e942eSJens Axboe (void *) alloc_size, 2975943e942eSJens Axboe GFP_KERNEL, node); 2976943e942eSJens Axboe if (!dev->iod_mempool) { 2977943e942eSJens Axboe result = -ENOMEM; 2978943e942eSJens Axboe goto release_pools; 2979943e942eSJens Axboe } 2980943e942eSJens Axboe 2981b6e44b4cSKeith Busch result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2982b6e44b4cSKeith Busch quirks); 2983b6e44b4cSKeith Busch if (result) 2984b6e44b4cSKeith Busch goto release_mempool; 2985b6e44b4cSKeith Busch 29861b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 29871b3c47c1SSagi Grimberg 2988bd46a906SKeith Busch nvme_reset_ctrl(&dev->ctrl); 298918119775SKeith Busch async_schedule(nvme_async_probe, dev); 29904caff8fcSSagi Grimberg 299157dacad5SJay Sternberg return 0; 299257dacad5SJay Sternberg 2993b6e44b4cSKeith Busch release_mempool: 2994b6e44b4cSKeith Busch mempool_destroy(dev->iod_mempool); 299557dacad5SJay Sternberg release_pools: 299657dacad5SJay Sternberg nvme_release_prp_pools(dev); 2997b00c9b7aSChristophe JAILLET unmap: 2998b00c9b7aSChristophe JAILLET nvme_dev_unmap(dev); 299957dacad5SJay Sternberg put_pci: 300057dacad5SJay Sternberg put_device(dev->dev); 300157dacad5SJay Sternberg free: 300257dacad5SJay Sternberg kfree(dev->queues); 300357dacad5SJay Sternberg kfree(dev); 300457dacad5SJay Sternberg return result; 300557dacad5SJay Sternberg } 300657dacad5SJay Sternberg 3007775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 300857dacad5SJay Sternberg { 300957dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 3010c1ac9a4bSKeith Busch 3011c1ac9a4bSKeith Busch /* 3012c1ac9a4bSKeith Busch * We don't need to check the return value from waiting for the reset 3013c1ac9a4bSKeith Busch * state as pci_dev device lock is held, making it impossible to race 3014c1ac9a4bSKeith Busch * with ->remove(). 3015c1ac9a4bSKeith Busch */ 3016c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, false); 3017c1ac9a4bSKeith Busch nvme_sync_queues(&dev->ctrl); 3018775755edSChristoph Hellwig } 301957dacad5SJay Sternberg 3020775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 3021775755edSChristoph Hellwig { 3022f263fbb8SLinus Torvalds struct nvme_dev *dev = pci_get_drvdata(pdev); 3023c1ac9a4bSKeith Busch 3024c1ac9a4bSKeith Busch if (!nvme_try_sched_reset(&dev->ctrl)) 3025c1ac9a4bSKeith Busch flush_work(&dev->ctrl.reset_work); 302657dacad5SJay Sternberg } 302757dacad5SJay Sternberg 302857dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 302957dacad5SJay Sternberg { 303057dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 30314e523547SBaolin Wang 3032c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, true); 303357dacad5SJay Sternberg } 303457dacad5SJay Sternberg 30350521905eSKeith Busch static void nvme_remove_attrs(struct nvme_dev *dev) 30360521905eSKeith Busch { 30370521905eSKeith Busch if (dev->attrs_added) 30380521905eSKeith Busch sysfs_remove_group(&dev->ctrl.device->kobj, 30390521905eSKeith Busch &nvme_pci_attr_group); 30400521905eSKeith Busch } 30410521905eSKeith Busch 3042f58944e2SKeith Busch /* 3043f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 3044f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 3045f58944e2SKeith Busch * order to proceed. 3046f58944e2SKeith Busch */ 304757dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 304857dacad5SJay Sternberg { 304957dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 305057dacad5SJay Sternberg 3051bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 305257dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 30530ff9d4e1SKeith Busch 30546db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 30550ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 30561d39e692SKeith Busch nvme_dev_disable(dev, true); 30576db28edaSKeith Busch } 30580ff9d4e1SKeith Busch 3059d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 3060d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 3061d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 3062a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 30630521905eSKeith Busch nvme_remove_attrs(dev); 306487ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 306557dacad5SJay Sternberg nvme_dev_remove_admin(dev); 306657dacad5SJay Sternberg nvme_free_queues(dev, 0); 306757dacad5SJay Sternberg nvme_release_prp_pools(dev); 3068b00a726aSKeith Busch nvme_dev_unmap(dev); 3069726612b6SIsrael Rukshin nvme_uninit_ctrl(&dev->ctrl); 307057dacad5SJay Sternberg } 307157dacad5SJay Sternberg 307257dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 3073d916b1beSKeith Busch static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 3074d916b1beSKeith Busch { 3075d916b1beSKeith Busch return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 3076d916b1beSKeith Busch } 3077d916b1beSKeith Busch 3078d916b1beSKeith Busch static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 3079d916b1beSKeith Busch { 3080d916b1beSKeith Busch return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 3081d916b1beSKeith Busch } 3082d916b1beSKeith Busch 3083d916b1beSKeith Busch static int nvme_resume(struct device *dev) 3084d916b1beSKeith Busch { 3085d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 3086d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3087d916b1beSKeith Busch 30884eaefe8cSRafael J. Wysocki if (ndev->last_ps == U32_MAX || 3089d916b1beSKeith Busch nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3090*e5ad96f3SKeith Busch goto reset; 3091*e5ad96f3SKeith Busch if (ctrl->hmpre && nvme_setup_host_mem(ndev)) 3092*e5ad96f3SKeith Busch goto reset; 3093*e5ad96f3SKeith Busch 3094d916b1beSKeith Busch return 0; 3095*e5ad96f3SKeith Busch reset: 3096*e5ad96f3SKeith Busch return nvme_try_sched_reset(ctrl); 3097d916b1beSKeith Busch } 3098d916b1beSKeith Busch 309957dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 310057dacad5SJay Sternberg { 310157dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 310257dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 3103d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3104d916b1beSKeith Busch int ret = -EBUSY; 3105d916b1beSKeith Busch 31064eaefe8cSRafael J. Wysocki ndev->last_ps = U32_MAX; 31074eaefe8cSRafael J. Wysocki 3108d916b1beSKeith Busch /* 3109d916b1beSKeith Busch * The platform does not remove power for a kernel managed suspend so 3110d916b1beSKeith Busch * use host managed nvme power settings for lowest idle power if 3111d916b1beSKeith Busch * possible. This should have quicker resume latency than a full device 3112d916b1beSKeith Busch * shutdown. But if the firmware is involved after the suspend or the 3113d916b1beSKeith Busch * device does not support any non-default power states, shut down the 3114d916b1beSKeith Busch * device fully. 31154eaefe8cSRafael J. Wysocki * 31164eaefe8cSRafael J. Wysocki * If ASPM is not enabled for the device, shut down the device and allow 31174eaefe8cSRafael J. Wysocki * the PCI bus layer to put it into D3 in order to take the PCIe link 31184eaefe8cSRafael J. Wysocki * down, so as to allow the platform to achieve its minimum low-power 31194eaefe8cSRafael J. Wysocki * state (which may not be possible if the link is up). 3120d916b1beSKeith Busch */ 31214eaefe8cSRafael J. Wysocki if (pm_suspend_via_firmware() || !ctrl->npss || 3122cb32de1bSMario Limonciello !pcie_aspm_enabled(pdev) || 3123c1ac9a4bSKeith Busch (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3124c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 3125d916b1beSKeith Busch 3126d916b1beSKeith Busch nvme_start_freeze(ctrl); 3127d916b1beSKeith Busch nvme_wait_freeze(ctrl); 3128d916b1beSKeith Busch nvme_sync_queues(ctrl); 3129d916b1beSKeith Busch 31305d02a5c1SKeith Busch if (ctrl->state != NVME_CTRL_LIVE) 3131d916b1beSKeith Busch goto unfreeze; 3132d916b1beSKeith Busch 3133*e5ad96f3SKeith Busch /* 3134*e5ad96f3SKeith Busch * Host memory access may not be successful in a system suspend state, 3135*e5ad96f3SKeith Busch * but the specification allows the controller to access memory in a 3136*e5ad96f3SKeith Busch * non-operational power state. 3137*e5ad96f3SKeith Busch */ 3138*e5ad96f3SKeith Busch if (ndev->hmb) { 3139*e5ad96f3SKeith Busch ret = nvme_set_host_mem(ndev, 0); 3140*e5ad96f3SKeith Busch if (ret < 0) 3141*e5ad96f3SKeith Busch goto unfreeze; 3142*e5ad96f3SKeith Busch } 3143*e5ad96f3SKeith Busch 3144d916b1beSKeith Busch ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3145d916b1beSKeith Busch if (ret < 0) 3146d916b1beSKeith Busch goto unfreeze; 3147d916b1beSKeith Busch 31487cbb5c6fSMario Limonciello /* 31497cbb5c6fSMario Limonciello * A saved state prevents pci pm from generically controlling the 31507cbb5c6fSMario Limonciello * device's power. If we're using protocol specific settings, we don't 31517cbb5c6fSMario Limonciello * want pci interfering. 31527cbb5c6fSMario Limonciello */ 31537cbb5c6fSMario Limonciello pci_save_state(pdev); 31547cbb5c6fSMario Limonciello 3155d916b1beSKeith Busch ret = nvme_set_power_state(ctrl, ctrl->npss); 3156d916b1beSKeith Busch if (ret < 0) 3157d916b1beSKeith Busch goto unfreeze; 3158d916b1beSKeith Busch 3159d916b1beSKeith Busch if (ret) { 31607cbb5c6fSMario Limonciello /* discard the saved state */ 31617cbb5c6fSMario Limonciello pci_load_saved_state(pdev, NULL); 31627cbb5c6fSMario Limonciello 3163d916b1beSKeith Busch /* 3164d916b1beSKeith Busch * Clearing npss forces a controller reset on resume. The 316505d3046fSGeert Uytterhoeven * correct value will be rediscovered then. 3166d916b1beSKeith Busch */ 3167c1ac9a4bSKeith Busch ret = nvme_disable_prepare_reset(ndev, true); 3168d916b1beSKeith Busch ctrl->npss = 0; 3169d916b1beSKeith Busch } 3170d916b1beSKeith Busch unfreeze: 3171d916b1beSKeith Busch nvme_unfreeze(ctrl); 3172d916b1beSKeith Busch return ret; 3173d916b1beSKeith Busch } 3174d916b1beSKeith Busch 3175d916b1beSKeith Busch static int nvme_simple_suspend(struct device *dev) 3176d916b1beSKeith Busch { 3177d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 31784e523547SBaolin Wang 3179c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 318057dacad5SJay Sternberg } 318157dacad5SJay Sternberg 3182d916b1beSKeith Busch static int nvme_simple_resume(struct device *dev) 318357dacad5SJay Sternberg { 318457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 318557dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 318657dacad5SJay Sternberg 3187c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 318857dacad5SJay Sternberg } 318957dacad5SJay Sternberg 319021774222SYueHaibing static const struct dev_pm_ops nvme_dev_pm_ops = { 3191d916b1beSKeith Busch .suspend = nvme_suspend, 3192d916b1beSKeith Busch .resume = nvme_resume, 3193d916b1beSKeith Busch .freeze = nvme_simple_suspend, 3194d916b1beSKeith Busch .thaw = nvme_simple_resume, 3195d916b1beSKeith Busch .poweroff = nvme_simple_suspend, 3196d916b1beSKeith Busch .restore = nvme_simple_resume, 3197d916b1beSKeith Busch }; 3198d916b1beSKeith Busch #endif /* CONFIG_PM_SLEEP */ 319957dacad5SJay Sternberg 3200a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3201a0a3408eSKeith Busch pci_channel_state_t state) 3202a0a3408eSKeith Busch { 3203a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3204a0a3408eSKeith Busch 3205a0a3408eSKeith Busch /* 3206a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 3207a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 3208a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 3209a0a3408eSKeith Busch */ 3210a0a3408eSKeith Busch switch (state) { 3211a0a3408eSKeith Busch case pci_channel_io_normal: 3212a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 3213a0a3408eSKeith Busch case pci_channel_io_frozen: 3214d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3215d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 3216a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 3217a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3218a0a3408eSKeith Busch case pci_channel_io_perm_failure: 3219d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3220d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 3221a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 3222a0a3408eSKeith Busch } 3223a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3224a0a3408eSKeith Busch } 3225a0a3408eSKeith Busch 3226a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3227a0a3408eSKeith Busch { 3228a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3229a0a3408eSKeith Busch 32301b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 3231a0a3408eSKeith Busch pci_restore_state(pdev); 3232d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 3233a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 3234a0a3408eSKeith Busch } 3235a0a3408eSKeith Busch 3236a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 3237a0a3408eSKeith Busch { 323872cd4cc2SKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 323972cd4cc2SKeith Busch 324072cd4cc2SKeith Busch flush_work(&dev->ctrl.reset_work); 3241a0a3408eSKeith Busch } 3242a0a3408eSKeith Busch 324357dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 324457dacad5SJay Sternberg .error_detected = nvme_error_detected, 324557dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 324657dacad5SJay Sternberg .resume = nvme_error_resume, 3247775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 3248775755edSChristoph Hellwig .reset_done = nvme_reset_done, 324957dacad5SJay Sternberg }; 325057dacad5SJay Sternberg 325157dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 3252972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 325308095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3254e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3255972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 325699466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3257e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3258972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 325999466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3260e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3261972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3262f99cb7afSDavid Wayne Fugate .driver_data = NVME_QUIRK_STRIPE_SIZE | 3263f99cb7afSDavid Wayne Fugate NVME_QUIRK_DEALLOCATE_ZEROES, }, 326450af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 32659abd68efSJens Axboe .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 32666c6aa2f2SAkinobu Mita NVME_QUIRK_MEDIUM_PRIO_SQ | 3267ce4cc313SDavid Milburn NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3268ce4cc313SDavid Milburn NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 32696299358dSJames Dingwall { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 32706299358dSJames Dingwall .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3271540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 32727b210e4eSChristoph Hellwig .driver_data = NVME_QUIRK_IDENTIFY_CNS | 32737b210e4eSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 32745bedd3afSChristoph Hellwig { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 32755bedd3afSChristoph Hellwig .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, 32760302ae60SMicah Parrish { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 32775e112d3fSJulian Einwag .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 32785e112d3fSJulian Einwag NVME_QUIRK_NO_NS_DESC_LIST, }, 327954adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 328054adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 32818c97eeccSJeff Lien { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 32828c97eeccSJeff Lien .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3283015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3284015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3285d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3286d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3287d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 32887ee5c78cSGopal Tiwari .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | 3289abbb5f59SDmitry Monakhov NVME_QUIRK_DISABLE_WRITE_ZEROES| 32907ee5c78cSGopal Tiwari NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3291c9e95c39SClaus Stovgaard { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ 3292c9e95c39SClaus Stovgaard .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 32936e6a6828SPascal Terjan { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ 32946e6a6828SPascal Terjan .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | 32956e6a6828SPascal Terjan NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 329608b903b5SMisha Nasledov { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 329708b903b5SMisha Nasledov .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3298f03e42c6SGabriel Craciunescu { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3299f03e42c6SGabriel Craciunescu .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3300f03e42c6SGabriel Craciunescu NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 33015611ec2bSKai-Heng Feng { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 33025611ec2bSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 330302ca079cSKai-Heng Feng { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 330402ca079cSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 330589919929SChaitanya Kulkarni { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ 330689919929SChaitanya Kulkarni .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 3307dc22c1c0SZoltán Böszörményi { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ 3308dc22c1c0SZoltán Böszörményi .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 3309538e4a8cSThorsten Leemhuis { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ 3310538e4a8cSThorsten Leemhuis .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, 33114bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), 33124bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 33134bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), 33144bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 33154bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x8061), 33164bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 33174bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd00), 33184bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 33194bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd01), 33204bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 33214bdf2603SFilippo Sironi { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0xcd02), 33224bdf2603SFilippo Sironi .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, 332398f7b86aSAndy Shevchenko { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 332498f7b86aSAndy Shevchenko .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3325124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 332666341331SBenjamin Herrenschmidt { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 332766341331SBenjamin Herrenschmidt .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3328d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_128_BYTES_SQES | 3329d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_SHARED_TAGS }, 33300b85f59dSAndy Shevchenko 33310b85f59dSAndy Shevchenko { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 333257dacad5SJay Sternberg { 0, } 333357dacad5SJay Sternberg }; 333457dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 333557dacad5SJay Sternberg 333657dacad5SJay Sternberg static struct pci_driver nvme_driver = { 333757dacad5SJay Sternberg .name = "nvme", 333857dacad5SJay Sternberg .id_table = nvme_id_table, 333957dacad5SJay Sternberg .probe = nvme_probe, 334057dacad5SJay Sternberg .remove = nvme_remove, 334157dacad5SJay Sternberg .shutdown = nvme_shutdown, 3342d916b1beSKeith Busch #ifdef CONFIG_PM_SLEEP 334357dacad5SJay Sternberg .driver = { 334457dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 334557dacad5SJay Sternberg }, 3346d916b1beSKeith Busch #endif 334774d986abSAlexander Duyck .sriov_configure = pci_sriov_configure_simple, 334857dacad5SJay Sternberg .err_handler = &nvme_err_handler, 334957dacad5SJay Sternberg }; 335057dacad5SJay Sternberg 335157dacad5SJay Sternberg static int __init nvme_init(void) 335257dacad5SJay Sternberg { 335381101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 335481101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 335581101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3356612b7286SMing Lei BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 335717c33167SKeith Busch 33589a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 335957dacad5SJay Sternberg } 336057dacad5SJay Sternberg 336157dacad5SJay Sternberg static void __exit nvme_exit(void) 336257dacad5SJay Sternberg { 336357dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 336403e0f3a6SMing Lei flush_workqueue(nvme_wq); 336557dacad5SJay Sternberg } 336657dacad5SJay Sternberg 336757dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 336857dacad5SJay Sternberg MODULE_LICENSE("GPL"); 336957dacad5SJay Sternberg MODULE_VERSION("1.0"); 337057dacad5SJay Sternberg module_init(nvme_init); 337157dacad5SJay Sternberg module_exit(nvme_exit); 3372