15f37396dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 257dacad5SJay Sternberg /* 357dacad5SJay Sternberg * NVM Express device driver 457dacad5SJay Sternberg * Copyright (c) 2011-2014, Intel Corporation. 557dacad5SJay Sternberg */ 657dacad5SJay Sternberg 7df4f9bc4SDavid E. Box #include <linux/acpi.h> 8a0a3408eSKeith Busch #include <linux/aer.h> 918119775SKeith Busch #include <linux/async.h> 1057dacad5SJay Sternberg #include <linux/blkdev.h> 1157dacad5SJay Sternberg #include <linux/blk-mq.h> 12dca51e78SChristoph Hellwig #include <linux/blk-mq-pci.h> 13ff5350a8SAndy Lutomirski #include <linux/dmi.h> 1457dacad5SJay Sternberg #include <linux/init.h> 1557dacad5SJay Sternberg #include <linux/interrupt.h> 1657dacad5SJay Sternberg #include <linux/io.h> 1757dacad5SJay Sternberg #include <linux/mm.h> 1857dacad5SJay Sternberg #include <linux/module.h> 1977bf25eaSKeith Busch #include <linux/mutex.h> 20d0877473SKeith Busch #include <linux/once.h> 2157dacad5SJay Sternberg #include <linux/pci.h> 22d916b1beSKeith Busch #include <linux/suspend.h> 2357dacad5SJay Sternberg #include <linux/t10-pi.h> 2457dacad5SJay Sternberg #include <linux/types.h> 259cf5c095SLinus Torvalds #include <linux/io-64-nonatomic-lo-hi.h> 26a98e58e5SScott Bauer #include <linux/sed-opal.h> 270f238ff5SLogan Gunthorpe #include <linux/pci-p2pdma.h> 2857dacad5SJay Sternberg 29604c01d5Syupeng #include "trace.h" 3057dacad5SJay Sternberg #include "nvme.h" 3157dacad5SJay Sternberg 32c1e0cc7eSBenjamin Herrenschmidt #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) 338a1d09a6SBenjamin Herrenschmidt #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) 3457dacad5SJay Sternberg 35a7a7cbe3SChaitanya Kulkarni #define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc)) 36adf68f21SChristoph Hellwig 37943e942eSJens Axboe /* 38943e942eSJens Axboe * These can be higher, but we need to ensure that any command doesn't 39943e942eSJens Axboe * require an sg allocation that needs more than a page of data. 40943e942eSJens Axboe */ 41943e942eSJens Axboe #define NVME_MAX_KB_SZ 4096 42943e942eSJens Axboe #define NVME_MAX_SEGS 127 43943e942eSJens Axboe 4457dacad5SJay Sternberg static int use_threaded_interrupts; 4557dacad5SJay Sternberg module_param(use_threaded_interrupts, int, 0); 4657dacad5SJay Sternberg 4757dacad5SJay Sternberg static bool use_cmb_sqes = true; 4869f4eb9fSKeith Busch module_param(use_cmb_sqes, bool, 0444); 4957dacad5SJay Sternberg MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes"); 5057dacad5SJay Sternberg 5187ad72a5SChristoph Hellwig static unsigned int max_host_mem_size_mb = 128; 5287ad72a5SChristoph Hellwig module_param(max_host_mem_size_mb, uint, 0444); 5387ad72a5SChristoph Hellwig MODULE_PARM_DESC(max_host_mem_size_mb, 5487ad72a5SChristoph Hellwig "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); 5557dacad5SJay Sternberg 56a7a7cbe3SChaitanya Kulkarni static unsigned int sgl_threshold = SZ_32K; 57a7a7cbe3SChaitanya Kulkarni module_param(sgl_threshold, uint, 0644); 58a7a7cbe3SChaitanya Kulkarni MODULE_PARM_DESC(sgl_threshold, 59a7a7cbe3SChaitanya Kulkarni "Use SGLs when average request segment size is larger or equal to " 60a7a7cbe3SChaitanya Kulkarni "this size. Use 0 to disable SGLs."); 61a7a7cbe3SChaitanya Kulkarni 62b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp); 63b27c1e68Sweiping zhang static const struct kernel_param_ops io_queue_depth_ops = { 64b27c1e68Sweiping zhang .set = io_queue_depth_set, 6561f3b896SChaitanya Kulkarni .get = param_get_uint, 66b27c1e68Sweiping zhang }; 67b27c1e68Sweiping zhang 6861f3b896SChaitanya Kulkarni static unsigned int io_queue_depth = 1024; 69b27c1e68Sweiping zhang module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); 70b27c1e68Sweiping zhang MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); 71b27c1e68Sweiping zhang 729c9e76d5SWeiping Zhang static int io_queue_count_set(const char *val, const struct kernel_param *kp) 739c9e76d5SWeiping Zhang { 749c9e76d5SWeiping Zhang unsigned int n; 759c9e76d5SWeiping Zhang int ret; 769c9e76d5SWeiping Zhang 779c9e76d5SWeiping Zhang ret = kstrtouint(val, 10, &n); 789c9e76d5SWeiping Zhang if (ret != 0 || n > num_possible_cpus()) 799c9e76d5SWeiping Zhang return -EINVAL; 809c9e76d5SWeiping Zhang return param_set_uint(val, kp); 819c9e76d5SWeiping Zhang } 829c9e76d5SWeiping Zhang 839c9e76d5SWeiping Zhang static const struct kernel_param_ops io_queue_count_ops = { 849c9e76d5SWeiping Zhang .set = io_queue_count_set, 859c9e76d5SWeiping Zhang .get = param_get_uint, 869c9e76d5SWeiping Zhang }; 879c9e76d5SWeiping Zhang 883f68baf7SKeith Busch static unsigned int write_queues; 899c9e76d5SWeiping Zhang module_param_cb(write_queues, &io_queue_count_ops, &write_queues, 0644); 903b6592f7SJens Axboe MODULE_PARM_DESC(write_queues, 913b6592f7SJens Axboe "Number of queues to use for writes. If not set, reads and writes " 923b6592f7SJens Axboe "will share a queue set."); 933b6592f7SJens Axboe 943f68baf7SKeith Busch static unsigned int poll_queues; 959c9e76d5SWeiping Zhang module_param_cb(poll_queues, &io_queue_count_ops, &poll_queues, 0644); 964b04cc6aSJens Axboe MODULE_PARM_DESC(poll_queues, "Number of queues to use for polled IO."); 974b04cc6aSJens Axboe 98df4f9bc4SDavid E. Box static bool noacpi; 99df4f9bc4SDavid E. Box module_param(noacpi, bool, 0444); 100df4f9bc4SDavid E. Box MODULE_PARM_DESC(noacpi, "disable acpi bios quirks"); 101df4f9bc4SDavid E. Box 1021c63dc66SChristoph Hellwig struct nvme_dev; 1031c63dc66SChristoph Hellwig struct nvme_queue; 10457dacad5SJay Sternberg 105a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 1068fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode); 10757dacad5SJay Sternberg 10857dacad5SJay Sternberg /* 1091c63dc66SChristoph Hellwig * Represents an NVM Express device. Each nvme_dev is a PCI function. 1101c63dc66SChristoph Hellwig */ 1111c63dc66SChristoph Hellwig struct nvme_dev { 112147b27e4SSagi Grimberg struct nvme_queue *queues; 1131c63dc66SChristoph Hellwig struct blk_mq_tag_set tagset; 1141c63dc66SChristoph Hellwig struct blk_mq_tag_set admin_tagset; 1151c63dc66SChristoph Hellwig u32 __iomem *dbs; 1161c63dc66SChristoph Hellwig struct device *dev; 1171c63dc66SChristoph Hellwig struct dma_pool *prp_page_pool; 1181c63dc66SChristoph Hellwig struct dma_pool *prp_small_pool; 1191c63dc66SChristoph Hellwig unsigned online_queues; 1201c63dc66SChristoph Hellwig unsigned max_qid; 121e20ba6e1SChristoph Hellwig unsigned io_queues[HCTX_MAX_TYPES]; 12222b55601SKeith Busch unsigned int num_vecs; 1237442ddceSJohn Garry u32 q_depth; 124c1e0cc7eSBenjamin Herrenschmidt int io_sqes; 1251c63dc66SChristoph Hellwig u32 db_stride; 1261c63dc66SChristoph Hellwig void __iomem *bar; 12797f6ef64SXu Yu unsigned long bar_mapped_size; 1285c8809e6SChristoph Hellwig struct work_struct remove_work; 12977bf25eaSKeith Busch struct mutex shutdown_lock; 1301c63dc66SChristoph Hellwig bool subsystem; 1311c63dc66SChristoph Hellwig u64 cmb_size; 1320f238ff5SLogan Gunthorpe bool cmb_use_sqes; 1331c63dc66SChristoph Hellwig u32 cmbsz; 134202021c1SStephen Bates u32 cmbloc; 1351c63dc66SChristoph Hellwig struct nvme_ctrl ctrl; 136d916b1beSKeith Busch u32 last_ps; 13787ad72a5SChristoph Hellwig 138943e942eSJens Axboe mempool_t *iod_mempool; 139943e942eSJens Axboe 14087ad72a5SChristoph Hellwig /* shadow doorbell buffer support: */ 141f9f38e33SHelen Koike u32 *dbbuf_dbs; 142f9f38e33SHelen Koike dma_addr_t dbbuf_dbs_dma_addr; 143f9f38e33SHelen Koike u32 *dbbuf_eis; 144f9f38e33SHelen Koike dma_addr_t dbbuf_eis_dma_addr; 14587ad72a5SChristoph Hellwig 14687ad72a5SChristoph Hellwig /* host memory buffer support: */ 14787ad72a5SChristoph Hellwig u64 host_mem_size; 14887ad72a5SChristoph Hellwig u32 nr_host_mem_descs; 1494033f35dSChristoph Hellwig dma_addr_t host_mem_descs_dma; 15087ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *host_mem_descs; 15187ad72a5SChristoph Hellwig void **host_mem_desc_bufs; 1522a5bcfddSWeiping Zhang unsigned int nr_allocated_queues; 1532a5bcfddSWeiping Zhang unsigned int nr_write_queues; 1542a5bcfddSWeiping Zhang unsigned int nr_poll_queues; 15557dacad5SJay Sternberg }; 15657dacad5SJay Sternberg 157b27c1e68Sweiping zhang static int io_queue_depth_set(const char *val, const struct kernel_param *kp) 158b27c1e68Sweiping zhang { 15961f3b896SChaitanya Kulkarni int ret; 1607442ddceSJohn Garry u32 n; 161b27c1e68Sweiping zhang 1627442ddceSJohn Garry ret = kstrtou32(val, 10, &n); 163b27c1e68Sweiping zhang if (ret != 0 || n < 2) 164b27c1e68Sweiping zhang return -EINVAL; 165b27c1e68Sweiping zhang 1667442ddceSJohn Garry return param_set_uint(val, kp); 167b27c1e68Sweiping zhang } 168b27c1e68Sweiping zhang 169f9f38e33SHelen Koike static inline unsigned int sq_idx(unsigned int qid, u32 stride) 170f9f38e33SHelen Koike { 171f9f38e33SHelen Koike return qid * 2 * stride; 172f9f38e33SHelen Koike } 173f9f38e33SHelen Koike 174f9f38e33SHelen Koike static inline unsigned int cq_idx(unsigned int qid, u32 stride) 175f9f38e33SHelen Koike { 176f9f38e33SHelen Koike return (qid * 2 + 1) * stride; 177f9f38e33SHelen Koike } 178f9f38e33SHelen Koike 1791c63dc66SChristoph Hellwig static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl) 1801c63dc66SChristoph Hellwig { 1811c63dc66SChristoph Hellwig return container_of(ctrl, struct nvme_dev, ctrl); 1821c63dc66SChristoph Hellwig } 1831c63dc66SChristoph Hellwig 18457dacad5SJay Sternberg /* 18557dacad5SJay Sternberg * An NVM Express queue. Each device has at least two (one for admin 18657dacad5SJay Sternberg * commands and one for I/O commands). 18757dacad5SJay Sternberg */ 18857dacad5SJay Sternberg struct nvme_queue { 18957dacad5SJay Sternberg struct nvme_dev *dev; 1901ab0cd69SJens Axboe spinlock_t sq_lock; 191c1e0cc7eSBenjamin Herrenschmidt void *sq_cmds; 1923a7afd8eSChristoph Hellwig /* only used for poll queues: */ 1933a7afd8eSChristoph Hellwig spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; 19474943d45SKeith Busch struct nvme_completion *cqes; 19557dacad5SJay Sternberg dma_addr_t sq_dma_addr; 19657dacad5SJay Sternberg dma_addr_t cq_dma_addr; 19757dacad5SJay Sternberg u32 __iomem *q_db; 1987442ddceSJohn Garry u32 q_depth; 1997c349ddeSKeith Busch u16 cq_vector; 20057dacad5SJay Sternberg u16 sq_tail; 20138210800SKeith Busch u16 last_sq_tail; 20257dacad5SJay Sternberg u16 cq_head; 20357dacad5SJay Sternberg u16 qid; 20457dacad5SJay Sternberg u8 cq_phase; 205c1e0cc7eSBenjamin Herrenschmidt u8 sqes; 2064e224106SChristoph Hellwig unsigned long flags; 2074e224106SChristoph Hellwig #define NVMEQ_ENABLED 0 20863223078SChristoph Hellwig #define NVMEQ_SQ_CMB 1 209d1ed6aa1SChristoph Hellwig #define NVMEQ_DELETE_ERROR 2 2107c349ddeSKeith Busch #define NVMEQ_POLLED 3 211f9f38e33SHelen Koike u32 *dbbuf_sq_db; 212f9f38e33SHelen Koike u32 *dbbuf_cq_db; 213f9f38e33SHelen Koike u32 *dbbuf_sq_ei; 214f9f38e33SHelen Koike u32 *dbbuf_cq_ei; 215d1ed6aa1SChristoph Hellwig struct completion delete_done; 21657dacad5SJay Sternberg }; 21757dacad5SJay Sternberg 21857dacad5SJay Sternberg /* 2199b048119SChristoph Hellwig * The nvme_iod describes the data in an I/O. 2209b048119SChristoph Hellwig * 2219b048119SChristoph Hellwig * The sg pointer contains the list of PRP/SGL chunk allocations in addition 2229b048119SChristoph Hellwig * to the actual struct scatterlist. 22371bd150cSChristoph Hellwig */ 22471bd150cSChristoph Hellwig struct nvme_iod { 225d49187e9SChristoph Hellwig struct nvme_request req; 226f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq; 227a7a7cbe3SChaitanya Kulkarni bool use_sgl; 228f4800d6dSChristoph Hellwig int aborted; 22971bd150cSChristoph Hellwig int npages; /* In the PRP list. 0 means small pool in use */ 23071bd150cSChristoph Hellwig int nents; /* Used in scatterlist */ 23171bd150cSChristoph Hellwig dma_addr_t first_dma; 232dff824b2SChristoph Hellwig unsigned int dma_len; /* length of single DMA segment mapping */ 233783b94bdSChristoph Hellwig dma_addr_t meta_dma; 234f4800d6dSChristoph Hellwig struct scatterlist *sg; 23557dacad5SJay Sternberg }; 23657dacad5SJay Sternberg 2372a5bcfddSWeiping Zhang static inline unsigned int nvme_dbbuf_size(struct nvme_dev *dev) 2383b6592f7SJens Axboe { 2392a5bcfddSWeiping Zhang return dev->nr_allocated_queues * 8 * dev->db_stride; 240f9f38e33SHelen Koike } 241f9f38e33SHelen Koike 242f9f38e33SHelen Koike static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev) 243f9f38e33SHelen Koike { 2442a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 245f9f38e33SHelen Koike 246f9f38e33SHelen Koike if (dev->dbbuf_dbs) 247f9f38e33SHelen Koike return 0; 248f9f38e33SHelen Koike 249f9f38e33SHelen Koike dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size, 250f9f38e33SHelen Koike &dev->dbbuf_dbs_dma_addr, 251f9f38e33SHelen Koike GFP_KERNEL); 252f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 253f9f38e33SHelen Koike return -ENOMEM; 254f9f38e33SHelen Koike dev->dbbuf_eis = dma_alloc_coherent(dev->dev, mem_size, 255f9f38e33SHelen Koike &dev->dbbuf_eis_dma_addr, 256f9f38e33SHelen Koike GFP_KERNEL); 257f9f38e33SHelen Koike if (!dev->dbbuf_eis) { 258f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 259f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 260f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 261f9f38e33SHelen Koike return -ENOMEM; 262f9f38e33SHelen Koike } 263f9f38e33SHelen Koike 264f9f38e33SHelen Koike return 0; 265f9f38e33SHelen Koike } 266f9f38e33SHelen Koike 267f9f38e33SHelen Koike static void nvme_dbbuf_dma_free(struct nvme_dev *dev) 268f9f38e33SHelen Koike { 2692a5bcfddSWeiping Zhang unsigned int mem_size = nvme_dbbuf_size(dev); 270f9f38e33SHelen Koike 271f9f38e33SHelen Koike if (dev->dbbuf_dbs) { 272f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 273f9f38e33SHelen Koike dev->dbbuf_dbs, dev->dbbuf_dbs_dma_addr); 274f9f38e33SHelen Koike dev->dbbuf_dbs = NULL; 275f9f38e33SHelen Koike } 276f9f38e33SHelen Koike if (dev->dbbuf_eis) { 277f9f38e33SHelen Koike dma_free_coherent(dev->dev, mem_size, 278f9f38e33SHelen Koike dev->dbbuf_eis, dev->dbbuf_eis_dma_addr); 279f9f38e33SHelen Koike dev->dbbuf_eis = NULL; 280f9f38e33SHelen Koike } 281f9f38e33SHelen Koike } 282f9f38e33SHelen Koike 283f9f38e33SHelen Koike static void nvme_dbbuf_init(struct nvme_dev *dev, 284f9f38e33SHelen Koike struct nvme_queue *nvmeq, int qid) 285f9f38e33SHelen Koike { 286f9f38e33SHelen Koike if (!dev->dbbuf_dbs || !qid) 287f9f38e33SHelen Koike return; 288f9f38e33SHelen Koike 289f9f38e33SHelen Koike nvmeq->dbbuf_sq_db = &dev->dbbuf_dbs[sq_idx(qid, dev->db_stride)]; 290f9f38e33SHelen Koike nvmeq->dbbuf_cq_db = &dev->dbbuf_dbs[cq_idx(qid, dev->db_stride)]; 291f9f38e33SHelen Koike nvmeq->dbbuf_sq_ei = &dev->dbbuf_eis[sq_idx(qid, dev->db_stride)]; 292f9f38e33SHelen Koike nvmeq->dbbuf_cq_ei = &dev->dbbuf_eis[cq_idx(qid, dev->db_stride)]; 293f9f38e33SHelen Koike } 294f9f38e33SHelen Koike 295f9f38e33SHelen Koike static void nvme_dbbuf_set(struct nvme_dev *dev) 296f9f38e33SHelen Koike { 297f9f38e33SHelen Koike struct nvme_command c; 298f9f38e33SHelen Koike 299f9f38e33SHelen Koike if (!dev->dbbuf_dbs) 300f9f38e33SHelen Koike return; 301f9f38e33SHelen Koike 302f9f38e33SHelen Koike memset(&c, 0, sizeof(c)); 303f9f38e33SHelen Koike c.dbbuf.opcode = nvme_admin_dbbuf; 304f9f38e33SHelen Koike c.dbbuf.prp1 = cpu_to_le64(dev->dbbuf_dbs_dma_addr); 305f9f38e33SHelen Koike c.dbbuf.prp2 = cpu_to_le64(dev->dbbuf_eis_dma_addr); 306f9f38e33SHelen Koike 307f9f38e33SHelen Koike if (nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0)) { 3089bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "unable to set dbbuf\n"); 309f9f38e33SHelen Koike /* Free memory and continue on */ 310f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 311f9f38e33SHelen Koike } 312f9f38e33SHelen Koike } 313f9f38e33SHelen Koike 314f9f38e33SHelen Koike static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old) 315f9f38e33SHelen Koike { 316f9f38e33SHelen Koike return (u16)(new_idx - event_idx - 1) < (u16)(new_idx - old); 317f9f38e33SHelen Koike } 318f9f38e33SHelen Koike 319f9f38e33SHelen Koike /* Update dbbuf and return true if an MMIO is required */ 320f9f38e33SHelen Koike static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, 321f9f38e33SHelen Koike volatile u32 *dbbuf_ei) 322f9f38e33SHelen Koike { 323f9f38e33SHelen Koike if (dbbuf_db) { 324f9f38e33SHelen Koike u16 old_value; 325f9f38e33SHelen Koike 326f9f38e33SHelen Koike /* 327f9f38e33SHelen Koike * Ensure that the queue is written before updating 328f9f38e33SHelen Koike * the doorbell in memory 329f9f38e33SHelen Koike */ 330f9f38e33SHelen Koike wmb(); 331f9f38e33SHelen Koike 332f9f38e33SHelen Koike old_value = *dbbuf_db; 333f9f38e33SHelen Koike *dbbuf_db = value; 334f9f38e33SHelen Koike 335f1ed3df2SMichal Wnukowski /* 336f1ed3df2SMichal Wnukowski * Ensure that the doorbell is updated before reading the event 337f1ed3df2SMichal Wnukowski * index from memory. The controller needs to provide similar 338f1ed3df2SMichal Wnukowski * ordering to ensure the envent index is updated before reading 339f1ed3df2SMichal Wnukowski * the doorbell. 340f1ed3df2SMichal Wnukowski */ 341f1ed3df2SMichal Wnukowski mb(); 342f1ed3df2SMichal Wnukowski 343f9f38e33SHelen Koike if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value)) 344f9f38e33SHelen Koike return false; 345f9f38e33SHelen Koike } 346f9f38e33SHelen Koike 347f9f38e33SHelen Koike return true; 34857dacad5SJay Sternberg } 34957dacad5SJay Sternberg 35057dacad5SJay Sternberg /* 35157dacad5SJay Sternberg * Will slightly overestimate the number of pages needed. This is OK 35257dacad5SJay Sternberg * as it only leads to a small amount of wasted memory for the lifetime of 35357dacad5SJay Sternberg * the I/O. 35457dacad5SJay Sternberg */ 355b13c6393SChaitanya Kulkarni static int nvme_pci_npages_prp(void) 35657dacad5SJay Sternberg { 357b13c6393SChaitanya Kulkarni unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE, 3586c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 35957dacad5SJay Sternberg return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8); 36057dacad5SJay Sternberg } 36157dacad5SJay Sternberg 362a7a7cbe3SChaitanya Kulkarni /* 363a7a7cbe3SChaitanya Kulkarni * Calculates the number of pages needed for the SGL segments. For example a 4k 364a7a7cbe3SChaitanya Kulkarni * page can accommodate 256 SGL descriptors. 365a7a7cbe3SChaitanya Kulkarni */ 366b13c6393SChaitanya Kulkarni static int nvme_pci_npages_sgl(void) 367f4800d6dSChristoph Hellwig { 368b13c6393SChaitanya Kulkarni return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc), 369b13c6393SChaitanya Kulkarni PAGE_SIZE); 370f4800d6dSChristoph Hellwig } 371f4800d6dSChristoph Hellwig 372b13c6393SChaitanya Kulkarni static size_t nvme_pci_iod_alloc_size(void) 37357dacad5SJay Sternberg { 374b13c6393SChaitanya Kulkarni size_t npages = max(nvme_pci_npages_prp(), nvme_pci_npages_sgl()); 375a7a7cbe3SChaitanya Kulkarni 376b13c6393SChaitanya Kulkarni return sizeof(__le64 *) * npages + 377b13c6393SChaitanya Kulkarni sizeof(struct scatterlist) * NVME_MAX_SEGS; 378a7a7cbe3SChaitanya Kulkarni } 379a7a7cbe3SChaitanya Kulkarni 38057dacad5SJay Sternberg static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 38157dacad5SJay Sternberg unsigned int hctx_idx) 38257dacad5SJay Sternberg { 38357dacad5SJay Sternberg struct nvme_dev *dev = data; 384147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 38557dacad5SJay Sternberg 38657dacad5SJay Sternberg WARN_ON(hctx_idx != 0); 38757dacad5SJay Sternberg WARN_ON(dev->admin_tagset.tags[0] != hctx->tags); 38857dacad5SJay Sternberg 38957dacad5SJay Sternberg hctx->driver_data = nvmeq; 39057dacad5SJay Sternberg return 0; 39157dacad5SJay Sternberg } 39257dacad5SJay Sternberg 39357dacad5SJay Sternberg static int nvme_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 39457dacad5SJay Sternberg unsigned int hctx_idx) 39557dacad5SJay Sternberg { 39657dacad5SJay Sternberg struct nvme_dev *dev = data; 397147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; 39857dacad5SJay Sternberg 39957dacad5SJay Sternberg WARN_ON(dev->tagset.tags[hctx_idx] != hctx->tags); 40057dacad5SJay Sternberg hctx->driver_data = nvmeq; 40157dacad5SJay Sternberg return 0; 40257dacad5SJay Sternberg } 40357dacad5SJay Sternberg 404d6296d39SChristoph Hellwig static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, 405d6296d39SChristoph Hellwig unsigned int hctx_idx, unsigned int numa_node) 40657dacad5SJay Sternberg { 407d6296d39SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 408f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 4090350815aSChristoph Hellwig int queue_idx = (set == &dev->tagset) ? hctx_idx + 1 : 0; 410147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[queue_idx]; 41157dacad5SJay Sternberg 41257dacad5SJay Sternberg BUG_ON(!nvmeq); 413f4800d6dSChristoph Hellwig iod->nvmeq = nvmeq; 41459e29ce6SSagi Grimberg 41559e29ce6SSagi Grimberg nvme_req(req)->ctrl = &dev->ctrl; 41657dacad5SJay Sternberg return 0; 41757dacad5SJay Sternberg } 41857dacad5SJay Sternberg 4193b6592f7SJens Axboe static int queue_irq_offset(struct nvme_dev *dev) 4203b6592f7SJens Axboe { 4213b6592f7SJens Axboe /* if we have more than 1 vec, admin queue offsets us by 1 */ 4223b6592f7SJens Axboe if (dev->num_vecs > 1) 4233b6592f7SJens Axboe return 1; 4243b6592f7SJens Axboe 4253b6592f7SJens Axboe return 0; 4263b6592f7SJens Axboe } 4273b6592f7SJens Axboe 428dca51e78SChristoph Hellwig static int nvme_pci_map_queues(struct blk_mq_tag_set *set) 429dca51e78SChristoph Hellwig { 430dca51e78SChristoph Hellwig struct nvme_dev *dev = set->driver_data; 4313b6592f7SJens Axboe int i, qoff, offset; 432dca51e78SChristoph Hellwig 4333b6592f7SJens Axboe offset = queue_irq_offset(dev); 4343b6592f7SJens Axboe for (i = 0, qoff = 0; i < set->nr_maps; i++) { 4353b6592f7SJens Axboe struct blk_mq_queue_map *map = &set->map[i]; 4363b6592f7SJens Axboe 4373b6592f7SJens Axboe map->nr_queues = dev->io_queues[i]; 4383b6592f7SJens Axboe if (!map->nr_queues) { 439e20ba6e1SChristoph Hellwig BUG_ON(i == HCTX_TYPE_DEFAULT); 4407e849dd9SChristoph Hellwig continue; 4413b6592f7SJens Axboe } 4423b6592f7SJens Axboe 4434b04cc6aSJens Axboe /* 4444b04cc6aSJens Axboe * The poll queue(s) doesn't have an IRQ (and hence IRQ 4454b04cc6aSJens Axboe * affinity), so use the regular blk-mq cpu mapping 4464b04cc6aSJens Axboe */ 4473b6592f7SJens Axboe map->queue_offset = qoff; 448cb9e0e50SKeith Busch if (i != HCTX_TYPE_POLL && offset) 4493b6592f7SJens Axboe blk_mq_pci_map_queues(map, to_pci_dev(dev->dev), offset); 4504b04cc6aSJens Axboe else 4514b04cc6aSJens Axboe blk_mq_map_queues(map); 4523b6592f7SJens Axboe qoff += map->nr_queues; 4533b6592f7SJens Axboe offset += map->nr_queues; 4543b6592f7SJens Axboe } 4553b6592f7SJens Axboe 4563b6592f7SJens Axboe return 0; 457dca51e78SChristoph Hellwig } 458dca51e78SChristoph Hellwig 45938210800SKeith Busch /* 46038210800SKeith Busch * Write sq tail if we are asked to, or if the next command would wrap. 46138210800SKeith Busch */ 46238210800SKeith Busch static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) 46304f3eafdSJens Axboe { 46438210800SKeith Busch if (!write_sq) { 46538210800SKeith Busch u16 next_tail = nvmeq->sq_tail + 1; 46638210800SKeith Busch 46738210800SKeith Busch if (next_tail == nvmeq->q_depth) 46838210800SKeith Busch next_tail = 0; 46938210800SKeith Busch if (next_tail != nvmeq->last_sq_tail) 47038210800SKeith Busch return; 47138210800SKeith Busch } 47238210800SKeith Busch 47304f3eafdSJens Axboe if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, 47404f3eafdSJens Axboe nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) 47504f3eafdSJens Axboe writel(nvmeq->sq_tail, nvmeq->q_db); 47638210800SKeith Busch nvmeq->last_sq_tail = nvmeq->sq_tail; 47704f3eafdSJens Axboe } 47804f3eafdSJens Axboe 47957dacad5SJay Sternberg /** 48090ea5ca4SChristoph Hellwig * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell 48157dacad5SJay Sternberg * @nvmeq: The queue to use 48257dacad5SJay Sternberg * @cmd: The command to send 48304f3eafdSJens Axboe * @write_sq: whether to write to the SQ doorbell 48457dacad5SJay Sternberg */ 48504f3eafdSJens Axboe static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, 48604f3eafdSJens Axboe bool write_sq) 48757dacad5SJay Sternberg { 48890ea5ca4SChristoph Hellwig spin_lock(&nvmeq->sq_lock); 489c1e0cc7eSBenjamin Herrenschmidt memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), 490c1e0cc7eSBenjamin Herrenschmidt cmd, sizeof(*cmd)); 49190ea5ca4SChristoph Hellwig if (++nvmeq->sq_tail == nvmeq->q_depth) 49290ea5ca4SChristoph Hellwig nvmeq->sq_tail = 0; 49338210800SKeith Busch nvme_write_sq_db(nvmeq, write_sq); 49404f3eafdSJens Axboe spin_unlock(&nvmeq->sq_lock); 49504f3eafdSJens Axboe } 49604f3eafdSJens Axboe 49704f3eafdSJens Axboe static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) 49804f3eafdSJens Axboe { 49904f3eafdSJens Axboe struct nvme_queue *nvmeq = hctx->driver_data; 50004f3eafdSJens Axboe 50104f3eafdSJens Axboe spin_lock(&nvmeq->sq_lock); 50238210800SKeith Busch if (nvmeq->sq_tail != nvmeq->last_sq_tail) 50338210800SKeith Busch nvme_write_sq_db(nvmeq, true); 50490ea5ca4SChristoph Hellwig spin_unlock(&nvmeq->sq_lock); 50557dacad5SJay Sternberg } 50657dacad5SJay Sternberg 507a7a7cbe3SChaitanya Kulkarni static void **nvme_pci_iod_list(struct request *req) 50857dacad5SJay Sternberg { 509f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 510a7a7cbe3SChaitanya Kulkarni return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); 51157dacad5SJay Sternberg } 51257dacad5SJay Sternberg 513955b1b5aSMinwoo Im static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) 514955b1b5aSMinwoo Im { 515955b1b5aSMinwoo Im struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 51620469a37SKeith Busch int nseg = blk_rq_nr_phys_segments(req); 517955b1b5aSMinwoo Im unsigned int avg_seg_size; 518955b1b5aSMinwoo Im 51920469a37SKeith Busch avg_seg_size = DIV_ROUND_UP(blk_rq_payload_bytes(req), nseg); 520955b1b5aSMinwoo Im 521955b1b5aSMinwoo Im if (!(dev->ctrl.sgls & ((1 << 0) | (1 << 1)))) 522955b1b5aSMinwoo Im return false; 523955b1b5aSMinwoo Im if (!iod->nvmeq->qid) 524955b1b5aSMinwoo Im return false; 525955b1b5aSMinwoo Im if (!sgl_threshold || avg_seg_size < sgl_threshold) 526955b1b5aSMinwoo Im return false; 527955b1b5aSMinwoo Im return true; 528955b1b5aSMinwoo Im } 529955b1b5aSMinwoo Im 5307fe07d14SChristoph Hellwig static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) 53157dacad5SJay Sternberg { 532f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 5336c3c05b0SChaitanya Kulkarni const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; 534a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr = iod->first_dma, next_dma_addr; 53557dacad5SJay Sternberg int i; 53657dacad5SJay Sternberg 537dff824b2SChristoph Hellwig if (iod->dma_len) { 538f2fa006fSIsrael Rukshin dma_unmap_page(dev->dev, dma_addr, iod->dma_len, 539f2fa006fSIsrael Rukshin rq_dma_dir(req)); 540dff824b2SChristoph Hellwig return; 541dff824b2SChristoph Hellwig } 542dff824b2SChristoph Hellwig 543dff824b2SChristoph Hellwig WARN_ON_ONCE(!iod->nents); 544dff824b2SChristoph Hellwig 5457f73eac3SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(iod->sg))) 5467f73eac3SLogan Gunthorpe pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, 5477f73eac3SLogan Gunthorpe rq_dma_dir(req)); 5487f73eac3SLogan Gunthorpe else 549dff824b2SChristoph Hellwig dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); 5507fe07d14SChristoph Hellwig 5517fe07d14SChristoph Hellwig 55257dacad5SJay Sternberg if (iod->npages == 0) 553a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], 554a7a7cbe3SChaitanya Kulkarni dma_addr); 555a7a7cbe3SChaitanya Kulkarni 55657dacad5SJay Sternberg for (i = 0; i < iod->npages; i++) { 557a7a7cbe3SChaitanya Kulkarni void *addr = nvme_pci_iod_list(req)[i]; 558a7a7cbe3SChaitanya Kulkarni 559a7a7cbe3SChaitanya Kulkarni if (iod->use_sgl) { 560a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list = addr; 561a7a7cbe3SChaitanya Kulkarni 562a7a7cbe3SChaitanya Kulkarni next_dma_addr = 563a7a7cbe3SChaitanya Kulkarni le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); 564a7a7cbe3SChaitanya Kulkarni } else { 565a7a7cbe3SChaitanya Kulkarni __le64 *prp_list = addr; 566a7a7cbe3SChaitanya Kulkarni 567a7a7cbe3SChaitanya Kulkarni next_dma_addr = le64_to_cpu(prp_list[last_prp]); 568a7a7cbe3SChaitanya Kulkarni } 569a7a7cbe3SChaitanya Kulkarni 570a7a7cbe3SChaitanya Kulkarni dma_pool_free(dev->prp_page_pool, addr, dma_addr); 571a7a7cbe3SChaitanya Kulkarni dma_addr = next_dma_addr; 57257dacad5SJay Sternberg } 57357dacad5SJay Sternberg 574943e942eSJens Axboe mempool_free(iod->sg, dev->iod_mempool); 57557dacad5SJay Sternberg } 57657dacad5SJay Sternberg 577d0877473SKeith Busch static void nvme_print_sgl(struct scatterlist *sgl, int nents) 578d0877473SKeith Busch { 579d0877473SKeith Busch int i; 580d0877473SKeith Busch struct scatterlist *sg; 581d0877473SKeith Busch 582d0877473SKeith Busch for_each_sg(sgl, sg, nents, i) { 583d0877473SKeith Busch dma_addr_t phys = sg_phys(sg); 584d0877473SKeith Busch pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " 585d0877473SKeith Busch "dma_address:%pad dma_length:%d\n", 586d0877473SKeith Busch i, &phys, sg->offset, sg->length, &sg_dma_address(sg), 587d0877473SKeith Busch sg_dma_len(sg)); 588d0877473SKeith Busch } 589d0877473SKeith Busch } 590d0877473SKeith Busch 591a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, 592a7a7cbe3SChaitanya Kulkarni struct request *req, struct nvme_rw_command *cmnd) 59357dacad5SJay Sternberg { 594f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 59557dacad5SJay Sternberg struct dma_pool *pool; 596b131c61dSChristoph Hellwig int length = blk_rq_payload_bytes(req); 59757dacad5SJay Sternberg struct scatterlist *sg = iod->sg; 59857dacad5SJay Sternberg int dma_len = sg_dma_len(sg); 59957dacad5SJay Sternberg u64 dma_addr = sg_dma_address(sg); 6006c3c05b0SChaitanya Kulkarni int offset = dma_addr & (NVME_CTRL_PAGE_SIZE - 1); 60157dacad5SJay Sternberg __le64 *prp_list; 602a7a7cbe3SChaitanya Kulkarni void **list = nvme_pci_iod_list(req); 60357dacad5SJay Sternberg dma_addr_t prp_dma; 60457dacad5SJay Sternberg int nprps, i; 60557dacad5SJay Sternberg 6066c3c05b0SChaitanya Kulkarni length -= (NVME_CTRL_PAGE_SIZE - offset); 6075228b328SJan H. Schönherr if (length <= 0) { 6085228b328SJan H. Schönherr iod->first_dma = 0; 609a7a7cbe3SChaitanya Kulkarni goto done; 6105228b328SJan H. Schönherr } 61157dacad5SJay Sternberg 6126c3c05b0SChaitanya Kulkarni dma_len -= (NVME_CTRL_PAGE_SIZE - offset); 61357dacad5SJay Sternberg if (dma_len) { 6146c3c05b0SChaitanya Kulkarni dma_addr += (NVME_CTRL_PAGE_SIZE - offset); 61557dacad5SJay Sternberg } else { 61657dacad5SJay Sternberg sg = sg_next(sg); 61757dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 61857dacad5SJay Sternberg dma_len = sg_dma_len(sg); 61957dacad5SJay Sternberg } 62057dacad5SJay Sternberg 6216c3c05b0SChaitanya Kulkarni if (length <= NVME_CTRL_PAGE_SIZE) { 62257dacad5SJay Sternberg iod->first_dma = dma_addr; 623a7a7cbe3SChaitanya Kulkarni goto done; 62457dacad5SJay Sternberg } 62557dacad5SJay Sternberg 6266c3c05b0SChaitanya Kulkarni nprps = DIV_ROUND_UP(length, NVME_CTRL_PAGE_SIZE); 62757dacad5SJay Sternberg if (nprps <= (256 / 8)) { 62857dacad5SJay Sternberg pool = dev->prp_small_pool; 62957dacad5SJay Sternberg iod->npages = 0; 63057dacad5SJay Sternberg } else { 63157dacad5SJay Sternberg pool = dev->prp_page_pool; 63257dacad5SJay Sternberg iod->npages = 1; 63357dacad5SJay Sternberg } 63457dacad5SJay Sternberg 63569d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 63657dacad5SJay Sternberg if (!prp_list) { 63757dacad5SJay Sternberg iod->first_dma = dma_addr; 63857dacad5SJay Sternberg iod->npages = -1; 63986eea289SKeith Busch return BLK_STS_RESOURCE; 64057dacad5SJay Sternberg } 64157dacad5SJay Sternberg list[0] = prp_list; 64257dacad5SJay Sternberg iod->first_dma = prp_dma; 64357dacad5SJay Sternberg i = 0; 64457dacad5SJay Sternberg for (;;) { 6456c3c05b0SChaitanya Kulkarni if (i == NVME_CTRL_PAGE_SIZE >> 3) { 64657dacad5SJay Sternberg __le64 *old_prp_list = prp_list; 64769d2b571SChristoph Hellwig prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); 64857dacad5SJay Sternberg if (!prp_list) 64986eea289SKeith Busch return BLK_STS_RESOURCE; 65057dacad5SJay Sternberg list[iod->npages++] = prp_list; 65157dacad5SJay Sternberg prp_list[0] = old_prp_list[i - 1]; 65257dacad5SJay Sternberg old_prp_list[i - 1] = cpu_to_le64(prp_dma); 65357dacad5SJay Sternberg i = 1; 65457dacad5SJay Sternberg } 65557dacad5SJay Sternberg prp_list[i++] = cpu_to_le64(dma_addr); 6566c3c05b0SChaitanya Kulkarni dma_len -= NVME_CTRL_PAGE_SIZE; 6576c3c05b0SChaitanya Kulkarni dma_addr += NVME_CTRL_PAGE_SIZE; 6586c3c05b0SChaitanya Kulkarni length -= NVME_CTRL_PAGE_SIZE; 65957dacad5SJay Sternberg if (length <= 0) 66057dacad5SJay Sternberg break; 66157dacad5SJay Sternberg if (dma_len > 0) 66257dacad5SJay Sternberg continue; 66386eea289SKeith Busch if (unlikely(dma_len < 0)) 66486eea289SKeith Busch goto bad_sgl; 66557dacad5SJay Sternberg sg = sg_next(sg); 66657dacad5SJay Sternberg dma_addr = sg_dma_address(sg); 66757dacad5SJay Sternberg dma_len = sg_dma_len(sg); 66857dacad5SJay Sternberg } 66957dacad5SJay Sternberg 670a7a7cbe3SChaitanya Kulkarni done: 671a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); 672a7a7cbe3SChaitanya Kulkarni cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); 673a7a7cbe3SChaitanya Kulkarni 67486eea289SKeith Busch return BLK_STS_OK; 67586eea289SKeith Busch 67686eea289SKeith Busch bad_sgl: 677d0877473SKeith Busch WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), 678d0877473SKeith Busch "Invalid SGL for payload:%d nents:%d\n", 679d0877473SKeith Busch blk_rq_payload_bytes(req), iod->nents); 68086eea289SKeith Busch return BLK_STS_IOERR; 68157dacad5SJay Sternberg } 68257dacad5SJay Sternberg 683a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_data(struct nvme_sgl_desc *sge, 684a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg) 685a7a7cbe3SChaitanya Kulkarni { 686a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(sg_dma_address(sg)); 687a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(sg_dma_len(sg)); 688a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_DATA_DESC << 4; 689a7a7cbe3SChaitanya Kulkarni } 690a7a7cbe3SChaitanya Kulkarni 691a7a7cbe3SChaitanya Kulkarni static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge, 692a7a7cbe3SChaitanya Kulkarni dma_addr_t dma_addr, int entries) 693a7a7cbe3SChaitanya Kulkarni { 694a7a7cbe3SChaitanya Kulkarni sge->addr = cpu_to_le64(dma_addr); 695a7a7cbe3SChaitanya Kulkarni if (entries < SGES_PER_PAGE) { 696a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(entries * sizeof(*sge)); 697a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4; 698a7a7cbe3SChaitanya Kulkarni } else { 699a7a7cbe3SChaitanya Kulkarni sge->length = cpu_to_le32(PAGE_SIZE); 700a7a7cbe3SChaitanya Kulkarni sge->type = NVME_SGL_FMT_SEG_DESC << 4; 701a7a7cbe3SChaitanya Kulkarni } 702a7a7cbe3SChaitanya Kulkarni } 703a7a7cbe3SChaitanya Kulkarni 704a7a7cbe3SChaitanya Kulkarni static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, 705b0f2853bSChristoph Hellwig struct request *req, struct nvme_rw_command *cmd, int entries) 706a7a7cbe3SChaitanya Kulkarni { 707a7a7cbe3SChaitanya Kulkarni struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 708a7a7cbe3SChaitanya Kulkarni struct dma_pool *pool; 709a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *sg_list; 710a7a7cbe3SChaitanya Kulkarni struct scatterlist *sg = iod->sg; 711a7a7cbe3SChaitanya Kulkarni dma_addr_t sgl_dma; 712b0f2853bSChristoph Hellwig int i = 0; 713a7a7cbe3SChaitanya Kulkarni 714a7a7cbe3SChaitanya Kulkarni /* setting the transfer type as SGL */ 715a7a7cbe3SChaitanya Kulkarni cmd->flags = NVME_CMD_SGL_METABUF; 716a7a7cbe3SChaitanya Kulkarni 717b0f2853bSChristoph Hellwig if (entries == 1) { 718a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&cmd->dptr.sgl, sg); 719a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 720a7a7cbe3SChaitanya Kulkarni } 721a7a7cbe3SChaitanya Kulkarni 722a7a7cbe3SChaitanya Kulkarni if (entries <= (256 / sizeof(struct nvme_sgl_desc))) { 723a7a7cbe3SChaitanya Kulkarni pool = dev->prp_small_pool; 724a7a7cbe3SChaitanya Kulkarni iod->npages = 0; 725a7a7cbe3SChaitanya Kulkarni } else { 726a7a7cbe3SChaitanya Kulkarni pool = dev->prp_page_pool; 727a7a7cbe3SChaitanya Kulkarni iod->npages = 1; 728a7a7cbe3SChaitanya Kulkarni } 729a7a7cbe3SChaitanya Kulkarni 730a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 731a7a7cbe3SChaitanya Kulkarni if (!sg_list) { 732a7a7cbe3SChaitanya Kulkarni iod->npages = -1; 733a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 734a7a7cbe3SChaitanya Kulkarni } 735a7a7cbe3SChaitanya Kulkarni 736a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[0] = sg_list; 737a7a7cbe3SChaitanya Kulkarni iod->first_dma = sgl_dma; 738a7a7cbe3SChaitanya Kulkarni 739a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(&cmd->dptr.sgl, sgl_dma, entries); 740a7a7cbe3SChaitanya Kulkarni 741a7a7cbe3SChaitanya Kulkarni do { 742a7a7cbe3SChaitanya Kulkarni if (i == SGES_PER_PAGE) { 743a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *old_sg_desc = sg_list; 744a7a7cbe3SChaitanya Kulkarni struct nvme_sgl_desc *link = &old_sg_desc[i - 1]; 745a7a7cbe3SChaitanya Kulkarni 746a7a7cbe3SChaitanya Kulkarni sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); 747a7a7cbe3SChaitanya Kulkarni if (!sg_list) 748a7a7cbe3SChaitanya Kulkarni return BLK_STS_RESOURCE; 749a7a7cbe3SChaitanya Kulkarni 750a7a7cbe3SChaitanya Kulkarni i = 0; 751a7a7cbe3SChaitanya Kulkarni nvme_pci_iod_list(req)[iod->npages++] = sg_list; 752a7a7cbe3SChaitanya Kulkarni sg_list[i++] = *link; 753a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_seg(link, sgl_dma, entries); 754a7a7cbe3SChaitanya Kulkarni } 755a7a7cbe3SChaitanya Kulkarni 756a7a7cbe3SChaitanya Kulkarni nvme_pci_sgl_set_data(&sg_list[i++], sg); 757a7a7cbe3SChaitanya Kulkarni sg = sg_next(sg); 758b0f2853bSChristoph Hellwig } while (--entries > 0); 759a7a7cbe3SChaitanya Kulkarni 760a7a7cbe3SChaitanya Kulkarni return BLK_STS_OK; 761a7a7cbe3SChaitanya Kulkarni } 762a7a7cbe3SChaitanya Kulkarni 763dff824b2SChristoph Hellwig static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, 764dff824b2SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 765dff824b2SChristoph Hellwig struct bio_vec *bv) 766dff824b2SChristoph Hellwig { 767dff824b2SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 7686c3c05b0SChaitanya Kulkarni unsigned int offset = bv->bv_offset & (NVME_CTRL_PAGE_SIZE - 1); 7696c3c05b0SChaitanya Kulkarni unsigned int first_prp_len = NVME_CTRL_PAGE_SIZE - offset; 770dff824b2SChristoph Hellwig 771dff824b2SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 772dff824b2SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 773dff824b2SChristoph Hellwig return BLK_STS_RESOURCE; 774dff824b2SChristoph Hellwig iod->dma_len = bv->bv_len; 775dff824b2SChristoph Hellwig 776dff824b2SChristoph Hellwig cmnd->dptr.prp1 = cpu_to_le64(iod->first_dma); 777dff824b2SChristoph Hellwig if (bv->bv_len > first_prp_len) 778dff824b2SChristoph Hellwig cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma + first_prp_len); 779359c1f88SBaolin Wang return BLK_STS_OK; 780dff824b2SChristoph Hellwig } 781dff824b2SChristoph Hellwig 78229791057SChristoph Hellwig static blk_status_t nvme_setup_sgl_simple(struct nvme_dev *dev, 78329791057SChristoph Hellwig struct request *req, struct nvme_rw_command *cmnd, 78429791057SChristoph Hellwig struct bio_vec *bv) 78529791057SChristoph Hellwig { 78629791057SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 78729791057SChristoph Hellwig 78829791057SChristoph Hellwig iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0); 78929791057SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->first_dma)) 79029791057SChristoph Hellwig return BLK_STS_RESOURCE; 79129791057SChristoph Hellwig iod->dma_len = bv->bv_len; 79229791057SChristoph Hellwig 793049bf372SKlaus Birkelund Jensen cmnd->flags = NVME_CMD_SGL_METABUF; 79429791057SChristoph Hellwig cmnd->dptr.sgl.addr = cpu_to_le64(iod->first_dma); 79529791057SChristoph Hellwig cmnd->dptr.sgl.length = cpu_to_le32(iod->dma_len); 79629791057SChristoph Hellwig cmnd->dptr.sgl.type = NVME_SGL_FMT_DATA_DESC << 4; 797359c1f88SBaolin Wang return BLK_STS_OK; 79829791057SChristoph Hellwig } 79929791057SChristoph Hellwig 800fc17b653SChristoph Hellwig static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 801b131c61dSChristoph Hellwig struct nvme_command *cmnd) 80257dacad5SJay Sternberg { 803f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 80470479b71SChristoph Hellwig blk_status_t ret = BLK_STS_RESOURCE; 805b0f2853bSChristoph Hellwig int nr_mapped; 80657dacad5SJay Sternberg 807dff824b2SChristoph Hellwig if (blk_rq_nr_phys_segments(req) == 1) { 808dff824b2SChristoph Hellwig struct bio_vec bv = req_bvec(req); 809dff824b2SChristoph Hellwig 810dff824b2SChristoph Hellwig if (!is_pci_p2pdma_page(bv.bv_page)) { 8116c3c05b0SChaitanya Kulkarni if (bv.bv_offset + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) 812dff824b2SChristoph Hellwig return nvme_setup_prp_simple(dev, req, 813dff824b2SChristoph Hellwig &cmnd->rw, &bv); 81429791057SChristoph Hellwig 81529791057SChristoph Hellwig if (iod->nvmeq->qid && 81629791057SChristoph Hellwig dev->ctrl.sgls & ((1 << 0) | (1 << 1))) 81729791057SChristoph Hellwig return nvme_setup_sgl_simple(dev, req, 81829791057SChristoph Hellwig &cmnd->rw, &bv); 819dff824b2SChristoph Hellwig } 820dff824b2SChristoph Hellwig } 821dff824b2SChristoph Hellwig 822dff824b2SChristoph Hellwig iod->dma_len = 0; 8239b048119SChristoph Hellwig iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); 8249b048119SChristoph Hellwig if (!iod->sg) 8259b048119SChristoph Hellwig return BLK_STS_RESOURCE; 826f9d03f96SChristoph Hellwig sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 82770479b71SChristoph Hellwig iod->nents = blk_rq_map_sg(req->q, req, iod->sg); 828ba1ca37eSChristoph Hellwig if (!iod->nents) 829ba1ca37eSChristoph Hellwig goto out; 830ba1ca37eSChristoph Hellwig 831e0596ab2SLogan Gunthorpe if (is_pci_p2pdma_page(sg_page(iod->sg))) 8322b9f4bb2SLogan Gunthorpe nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, 8332b9f4bb2SLogan Gunthorpe iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); 834e0596ab2SLogan Gunthorpe else 835e0596ab2SLogan Gunthorpe nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, 83670479b71SChristoph Hellwig rq_dma_dir(req), DMA_ATTR_NO_WARN); 837b0f2853bSChristoph Hellwig if (!nr_mapped) 838ba1ca37eSChristoph Hellwig goto out; 839ba1ca37eSChristoph Hellwig 84070479b71SChristoph Hellwig iod->use_sgl = nvme_pci_use_sgls(dev, req); 841955b1b5aSMinwoo Im if (iod->use_sgl) 842b0f2853bSChristoph Hellwig ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); 843a7a7cbe3SChaitanya Kulkarni else 844a7a7cbe3SChaitanya Kulkarni ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); 845ba1ca37eSChristoph Hellwig out: 8464aedb705SChristoph Hellwig if (ret != BLK_STS_OK) 8477fe07d14SChristoph Hellwig nvme_unmap_data(dev, req); 848ba1ca37eSChristoph Hellwig return ret; 84957dacad5SJay Sternberg } 85057dacad5SJay Sternberg 8514aedb705SChristoph Hellwig static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, 8524aedb705SChristoph Hellwig struct nvme_command *cmnd) 8534aedb705SChristoph Hellwig { 8544aedb705SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 8554aedb705SChristoph Hellwig 8564aedb705SChristoph Hellwig iod->meta_dma = dma_map_bvec(dev->dev, rq_integrity_vec(req), 8574aedb705SChristoph Hellwig rq_dma_dir(req), 0); 8584aedb705SChristoph Hellwig if (dma_mapping_error(dev->dev, iod->meta_dma)) 8594aedb705SChristoph Hellwig return BLK_STS_IOERR; 8604aedb705SChristoph Hellwig cmnd->rw.metadata = cpu_to_le64(iod->meta_dma); 861359c1f88SBaolin Wang return BLK_STS_OK; 8624aedb705SChristoph Hellwig } 8634aedb705SChristoph Hellwig 86457dacad5SJay Sternberg /* 86557dacad5SJay Sternberg * NOTE: ns is NULL when called on the admin queue. 86657dacad5SJay Sternberg */ 867fc17b653SChristoph Hellwig static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 86857dacad5SJay Sternberg const struct blk_mq_queue_data *bd) 86957dacad5SJay Sternberg { 87057dacad5SJay Sternberg struct nvme_ns *ns = hctx->queue->queuedata; 87157dacad5SJay Sternberg struct nvme_queue *nvmeq = hctx->driver_data; 87257dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 87357dacad5SJay Sternberg struct request *req = bd->rq; 8749b048119SChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 875ba1ca37eSChristoph Hellwig struct nvme_command cmnd; 876ebe6d874SChristoph Hellwig blk_status_t ret; 87757dacad5SJay Sternberg 8789b048119SChristoph Hellwig iod->aborted = 0; 8799b048119SChristoph Hellwig iod->npages = -1; 8809b048119SChristoph Hellwig iod->nents = 0; 8819b048119SChristoph Hellwig 882d1f06f4aSJens Axboe /* 883d1f06f4aSJens Axboe * We should not need to do this, but we're still using this to 884d1f06f4aSJens Axboe * ensure we can drain requests on a dying queue. 885d1f06f4aSJens Axboe */ 8864e224106SChristoph Hellwig if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags))) 887d1f06f4aSJens Axboe return BLK_STS_IOERR; 888d1f06f4aSJens Axboe 889f9d03f96SChristoph Hellwig ret = nvme_setup_cmd(ns, req, &cmnd); 890fc17b653SChristoph Hellwig if (ret) 891f4800d6dSChristoph Hellwig return ret; 89257dacad5SJay Sternberg 893fc17b653SChristoph Hellwig if (blk_rq_nr_phys_segments(req)) { 894b131c61dSChristoph Hellwig ret = nvme_map_data(dev, req, &cmnd); 895fc17b653SChristoph Hellwig if (ret) 8969b048119SChristoph Hellwig goto out_free_cmd; 897fc17b653SChristoph Hellwig } 898ba1ca37eSChristoph Hellwig 8994aedb705SChristoph Hellwig if (blk_integrity_rq(req)) { 9004aedb705SChristoph Hellwig ret = nvme_map_metadata(dev, req, &cmnd); 9014aedb705SChristoph Hellwig if (ret) 9024aedb705SChristoph Hellwig goto out_unmap_data; 9034aedb705SChristoph Hellwig } 9044aedb705SChristoph Hellwig 905aae239e1SChristoph Hellwig blk_mq_start_request(req); 90604f3eafdSJens Axboe nvme_submit_cmd(nvmeq, &cmnd, bd->last); 907fc17b653SChristoph Hellwig return BLK_STS_OK; 9084aedb705SChristoph Hellwig out_unmap_data: 9094aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 910f9d03f96SChristoph Hellwig out_free_cmd: 911f9d03f96SChristoph Hellwig nvme_cleanup_cmd(req); 912ba1ca37eSChristoph Hellwig return ret; 91357dacad5SJay Sternberg } 91457dacad5SJay Sternberg 91577f02a7aSChristoph Hellwig static void nvme_pci_complete_rq(struct request *req) 916eee417b0SChristoph Hellwig { 917f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 9184aedb705SChristoph Hellwig struct nvme_dev *dev = iod->nvmeq->dev; 919eee417b0SChristoph Hellwig 9204aedb705SChristoph Hellwig if (blk_integrity_rq(req)) 9214aedb705SChristoph Hellwig dma_unmap_page(dev->dev, iod->meta_dma, 9224aedb705SChristoph Hellwig rq_integrity_vec(req)->bv_len, rq_data_dir(req)); 923b15c592dSChristoph Hellwig if (blk_rq_nr_phys_segments(req)) 9244aedb705SChristoph Hellwig nvme_unmap_data(dev, req); 92577f02a7aSChristoph Hellwig nvme_complete_rq(req); 92657dacad5SJay Sternberg } 92757dacad5SJay Sternberg 928d783e0bdSMarta Rybczynska /* We read the CQE phase first to check if the rest of the entry is valid */ 929750dde44SChristoph Hellwig static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq) 930d783e0bdSMarta Rybczynska { 93174943d45SKeith Busch struct nvme_completion *hcqe = &nvmeq->cqes[nvmeq->cq_head]; 93274943d45SKeith Busch 93374943d45SKeith Busch return (le16_to_cpu(READ_ONCE(hcqe->status)) & 1) == nvmeq->cq_phase; 934d783e0bdSMarta Rybczynska } 935d783e0bdSMarta Rybczynska 936eb281c82SSagi Grimberg static inline void nvme_ring_cq_doorbell(struct nvme_queue *nvmeq) 93757dacad5SJay Sternberg { 938eb281c82SSagi Grimberg u16 head = nvmeq->cq_head; 93957dacad5SJay Sternberg 940eb281c82SSagi Grimberg if (nvme_dbbuf_update_and_check_event(head, nvmeq->dbbuf_cq_db, 941eb281c82SSagi Grimberg nvmeq->dbbuf_cq_ei)) 942eb281c82SSagi Grimberg writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 943eb281c82SSagi Grimberg } 944adf68f21SChristoph Hellwig 945cfa27356SChristoph Hellwig static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq) 946cfa27356SChristoph Hellwig { 947cfa27356SChristoph Hellwig if (!nvmeq->qid) 948cfa27356SChristoph Hellwig return nvmeq->dev->admin_tagset.tags[0]; 949cfa27356SChristoph Hellwig return nvmeq->dev->tagset.tags[nvmeq->qid - 1]; 950cfa27356SChristoph Hellwig } 951cfa27356SChristoph Hellwig 9525cb525c8SJens Axboe static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) 95357dacad5SJay Sternberg { 95474943d45SKeith Busch struct nvme_completion *cqe = &nvmeq->cqes[idx]; 95557dacad5SJay Sternberg struct request *req; 956adf68f21SChristoph Hellwig 957adf68f21SChristoph Hellwig /* 958adf68f21SChristoph Hellwig * AEN requests are special as they don't time out and can 959adf68f21SChristoph Hellwig * survive any kind of queue freeze and often don't respond to 960adf68f21SChristoph Hellwig * aborts. We don't even bother to allocate a struct request 961adf68f21SChristoph Hellwig * for them but rather special case them here. 962adf68f21SChristoph Hellwig */ 96358a8df67SIsrael Rukshin if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { 9647bf58533SChristoph Hellwig nvme_complete_async_event(&nvmeq->dev->ctrl, 96583a12fb7SSagi Grimberg cqe->status, &cqe->result); 966a0fa9647SJens Axboe return; 96757dacad5SJay Sternberg } 96857dacad5SJay Sternberg 969cfa27356SChristoph Hellwig req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); 97050b7c243SXianting Tian if (unlikely(!req)) { 97150b7c243SXianting Tian dev_warn(nvmeq->dev->ctrl.device, 97250b7c243SXianting Tian "invalid id %d completed on queue %d\n", 97350b7c243SXianting Tian cqe->command_id, le16_to_cpu(cqe->sq_id)); 97450b7c243SXianting Tian return; 97550b7c243SXianting Tian } 97650b7c243SXianting Tian 977604c01d5Syupeng trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); 9782eb81a33SChristoph Hellwig if (!nvme_try_complete_req(req, cqe->status, cqe->result)) 979ff029451SChristoph Hellwig nvme_pci_complete_rq(req); 98083a12fb7SSagi Grimberg } 98157dacad5SJay Sternberg 9825cb525c8SJens Axboe static inline void nvme_update_cq_head(struct nvme_queue *nvmeq) 9835cb525c8SJens Axboe { 984a8de6639SAlexey Dobriyan u16 tmp = nvmeq->cq_head + 1; 985a8de6639SAlexey Dobriyan 986a8de6639SAlexey Dobriyan if (tmp == nvmeq->q_depth) { 987920d13a8SSagi Grimberg nvmeq->cq_head = 0; 988e2a366a4SAlexey Dobriyan nvmeq->cq_phase ^= 1; 989a8de6639SAlexey Dobriyan } else { 990a8de6639SAlexey Dobriyan nvmeq->cq_head = tmp; 991920d13a8SSagi Grimberg } 992a0fa9647SJens Axboe } 993a0fa9647SJens Axboe 994324b494cSKeith Busch static inline int nvme_process_cq(struct nvme_queue *nvmeq) 995a0fa9647SJens Axboe { 9961052b8acSJens Axboe int found = 0; 99783a12fb7SSagi Grimberg 9981052b8acSJens Axboe while (nvme_cqe_pending(nvmeq)) { 9991052b8acSJens Axboe found++; 1000b69e2ef2SKeith Busch /* 1001b69e2ef2SKeith Busch * load-load control dependency between phase and the rest of 1002b69e2ef2SKeith Busch * the cqe requires a full read memory barrier 1003b69e2ef2SKeith Busch */ 1004b69e2ef2SKeith Busch dma_rmb(); 1005324b494cSKeith Busch nvme_handle_cqe(nvmeq, nvmeq->cq_head); 10065cb525c8SJens Axboe nvme_update_cq_head(nvmeq); 100757dacad5SJay Sternberg } 100857dacad5SJay Sternberg 1009324b494cSKeith Busch if (found) 1010eb281c82SSagi Grimberg nvme_ring_cq_doorbell(nvmeq); 10115cb525c8SJens Axboe return found; 101257dacad5SJay Sternberg } 101357dacad5SJay Sternberg 101457dacad5SJay Sternberg static irqreturn_t nvme_irq(int irq, void *data) 101557dacad5SJay Sternberg { 101657dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 101768fa9dbeSJens Axboe irqreturn_t ret = IRQ_NONE; 10185cb525c8SJens Axboe 10193a7afd8eSChristoph Hellwig /* 10203a7afd8eSChristoph Hellwig * The rmb/wmb pair ensures we see all updates from a previous run of 10213a7afd8eSChristoph Hellwig * the irq handler, even if that was on another CPU. 10223a7afd8eSChristoph Hellwig */ 10233a7afd8eSChristoph Hellwig rmb(); 1024324b494cSKeith Busch if (nvme_process_cq(nvmeq)) 1025324b494cSKeith Busch ret = IRQ_HANDLED; 10263a7afd8eSChristoph Hellwig wmb(); 10275cb525c8SJens Axboe 102868fa9dbeSJens Axboe return ret; 102957dacad5SJay Sternberg } 103057dacad5SJay Sternberg 103157dacad5SJay Sternberg static irqreturn_t nvme_irq_check(int irq, void *data) 103257dacad5SJay Sternberg { 103357dacad5SJay Sternberg struct nvme_queue *nvmeq = data; 10344e523547SBaolin Wang 1035750dde44SChristoph Hellwig if (nvme_cqe_pending(nvmeq)) 103657dacad5SJay Sternberg return IRQ_WAKE_THREAD; 1037d783e0bdSMarta Rybczynska return IRQ_NONE; 103857dacad5SJay Sternberg } 103957dacad5SJay Sternberg 10400b2a8a9fSChristoph Hellwig /* 1041fa059b85SKeith Busch * Poll for completions for any interrupt driven queue 10420b2a8a9fSChristoph Hellwig * Can be called from any context. 10430b2a8a9fSChristoph Hellwig */ 1044fa059b85SKeith Busch static void nvme_poll_irqdisable(struct nvme_queue *nvmeq) 1045a0fa9647SJens Axboe { 10463a7afd8eSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 1047a0fa9647SJens Axboe 1048fa059b85SKeith Busch WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags)); 1049fa059b85SKeith Busch 10503a7afd8eSChristoph Hellwig disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 1051fa059b85SKeith Busch nvme_process_cq(nvmeq); 10523a7afd8eSChristoph Hellwig enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector)); 105391a509f8SChristoph Hellwig } 1054442e19b7SSagi Grimberg 10559743139cSJens Axboe static int nvme_poll(struct blk_mq_hw_ctx *hctx) 10567776db1cSKeith Busch { 10577776db1cSKeith Busch struct nvme_queue *nvmeq = hctx->driver_data; 1058dabcefabSJens Axboe bool found; 1059dabcefabSJens Axboe 1060dabcefabSJens Axboe if (!nvme_cqe_pending(nvmeq)) 1061dabcefabSJens Axboe return 0; 1062dabcefabSJens Axboe 10633a7afd8eSChristoph Hellwig spin_lock(&nvmeq->cq_poll_lock); 1064324b494cSKeith Busch found = nvme_process_cq(nvmeq); 10653a7afd8eSChristoph Hellwig spin_unlock(&nvmeq->cq_poll_lock); 1066dabcefabSJens Axboe 1067dabcefabSJens Axboe return found; 1068dabcefabSJens Axboe } 1069dabcefabSJens Axboe 1070ad22c355SKeith Busch static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl) 107157dacad5SJay Sternberg { 1072f866fc42SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 1073147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 107457dacad5SJay Sternberg struct nvme_command c; 107557dacad5SJay Sternberg 107657dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 107757dacad5SJay Sternberg c.common.opcode = nvme_admin_async_event; 1078ad22c355SKeith Busch c.common.command_id = NVME_AQ_BLK_MQ_DEPTH; 107904f3eafdSJens Axboe nvme_submit_cmd(nvmeq, &c, true); 108057dacad5SJay Sternberg } 108157dacad5SJay Sternberg 108257dacad5SJay Sternberg static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id) 108357dacad5SJay Sternberg { 108457dacad5SJay Sternberg struct nvme_command c; 108557dacad5SJay Sternberg 108657dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 108757dacad5SJay Sternberg c.delete_queue.opcode = opcode; 108857dacad5SJay Sternberg c.delete_queue.qid = cpu_to_le16(id); 108957dacad5SJay Sternberg 10901c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 109157dacad5SJay Sternberg } 109257dacad5SJay Sternberg 109357dacad5SJay Sternberg static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid, 1094a8e3e0bbSJianchao Wang struct nvme_queue *nvmeq, s16 vector) 109557dacad5SJay Sternberg { 109657dacad5SJay Sternberg struct nvme_command c; 10974b04cc6aSJens Axboe int flags = NVME_QUEUE_PHYS_CONTIG; 10984b04cc6aSJens Axboe 10997c349ddeSKeith Busch if (!test_bit(NVMEQ_POLLED, &nvmeq->flags)) 11004b04cc6aSJens Axboe flags |= NVME_CQ_IRQ_ENABLED; 110157dacad5SJay Sternberg 110257dacad5SJay Sternberg /* 110316772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 110457dacad5SJay Sternberg * is attached to the request. 110557dacad5SJay Sternberg */ 110657dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 110757dacad5SJay Sternberg c.create_cq.opcode = nvme_admin_create_cq; 110857dacad5SJay Sternberg c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr); 110957dacad5SJay Sternberg c.create_cq.cqid = cpu_to_le16(qid); 111057dacad5SJay Sternberg c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 111157dacad5SJay Sternberg c.create_cq.cq_flags = cpu_to_le16(flags); 1112a8e3e0bbSJianchao Wang c.create_cq.irq_vector = cpu_to_le16(vector); 111357dacad5SJay Sternberg 11141c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 111557dacad5SJay Sternberg } 111657dacad5SJay Sternberg 111757dacad5SJay Sternberg static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid, 111857dacad5SJay Sternberg struct nvme_queue *nvmeq) 111957dacad5SJay Sternberg { 11209abd68efSJens Axboe struct nvme_ctrl *ctrl = &dev->ctrl; 112157dacad5SJay Sternberg struct nvme_command c; 112281c1cd98SKeith Busch int flags = NVME_QUEUE_PHYS_CONTIG; 112357dacad5SJay Sternberg 112457dacad5SJay Sternberg /* 11259abd68efSJens Axboe * Some drives have a bug that auto-enables WRRU if MEDIUM isn't 11269abd68efSJens Axboe * set. Since URGENT priority is zeroes, it makes all queues 11279abd68efSJens Axboe * URGENT. 11289abd68efSJens Axboe */ 11299abd68efSJens Axboe if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ) 11309abd68efSJens Axboe flags |= NVME_SQ_PRIO_MEDIUM; 11319abd68efSJens Axboe 11329abd68efSJens Axboe /* 113316772ae6SMinwoo Im * Note: we (ab)use the fact that the prp fields survive if no data 113457dacad5SJay Sternberg * is attached to the request. 113557dacad5SJay Sternberg */ 113657dacad5SJay Sternberg memset(&c, 0, sizeof(c)); 113757dacad5SJay Sternberg c.create_sq.opcode = nvme_admin_create_sq; 113857dacad5SJay Sternberg c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr); 113957dacad5SJay Sternberg c.create_sq.sqid = cpu_to_le16(qid); 114057dacad5SJay Sternberg c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); 114157dacad5SJay Sternberg c.create_sq.sq_flags = cpu_to_le16(flags); 114257dacad5SJay Sternberg c.create_sq.cqid = cpu_to_le16(qid); 114357dacad5SJay Sternberg 11441c63dc66SChristoph Hellwig return nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 114557dacad5SJay Sternberg } 114657dacad5SJay Sternberg 114757dacad5SJay Sternberg static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid) 114857dacad5SJay Sternberg { 114957dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid); 115057dacad5SJay Sternberg } 115157dacad5SJay Sternberg 115257dacad5SJay Sternberg static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) 115357dacad5SJay Sternberg { 115457dacad5SJay Sternberg return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); 115557dacad5SJay Sternberg } 115657dacad5SJay Sternberg 11572a842acaSChristoph Hellwig static void abort_endio(struct request *req, blk_status_t error) 115857dacad5SJay Sternberg { 1159f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1160f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 116157dacad5SJay Sternberg 116227fa9bc5SChristoph Hellwig dev_warn(nvmeq->dev->ctrl.device, 116327fa9bc5SChristoph Hellwig "Abort status: 0x%x", nvme_req(req)->status); 1164e7a2a87dSChristoph Hellwig atomic_inc(&nvmeq->dev->ctrl.abort_limit); 1165e7a2a87dSChristoph Hellwig blk_mq_free_request(req); 116657dacad5SJay Sternberg } 116757dacad5SJay Sternberg 1168b2a0eb1aSKeith Busch static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) 1169b2a0eb1aSKeith Busch { 1170b2a0eb1aSKeith Busch /* If true, indicates loss of adapter communication, possibly by a 1171b2a0eb1aSKeith Busch * NVMe Subsystem reset. 1172b2a0eb1aSKeith Busch */ 1173b2a0eb1aSKeith Busch bool nssro = dev->subsystem && (csts & NVME_CSTS_NSSRO); 1174b2a0eb1aSKeith Busch 1175ad70062cSJianchao Wang /* If there is a reset/reinit ongoing, we shouldn't reset again. */ 1176ad70062cSJianchao Wang switch (dev->ctrl.state) { 1177ad70062cSJianchao Wang case NVME_CTRL_RESETTING: 1178ad6a0a52SMax Gurtovoy case NVME_CTRL_CONNECTING: 1179b2a0eb1aSKeith Busch return false; 1180ad70062cSJianchao Wang default: 1181ad70062cSJianchao Wang break; 1182ad70062cSJianchao Wang } 1183b2a0eb1aSKeith Busch 1184b2a0eb1aSKeith Busch /* We shouldn't reset unless the controller is on fatal error state 1185b2a0eb1aSKeith Busch * _or_ if we lost the communication with it. 1186b2a0eb1aSKeith Busch */ 1187b2a0eb1aSKeith Busch if (!(csts & NVME_CSTS_CFS) && !nssro) 1188b2a0eb1aSKeith Busch return false; 1189b2a0eb1aSKeith Busch 1190b2a0eb1aSKeith Busch return true; 1191b2a0eb1aSKeith Busch } 1192b2a0eb1aSKeith Busch 1193b2a0eb1aSKeith Busch static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) 1194b2a0eb1aSKeith Busch { 1195b2a0eb1aSKeith Busch /* Read a config register to help see what died. */ 1196b2a0eb1aSKeith Busch u16 pci_status; 1197b2a0eb1aSKeith Busch int result; 1198b2a0eb1aSKeith Busch 1199b2a0eb1aSKeith Busch result = pci_read_config_word(to_pci_dev(dev->dev), PCI_STATUS, 1200b2a0eb1aSKeith Busch &pci_status); 1201b2a0eb1aSKeith Busch if (result == PCIBIOS_SUCCESSFUL) 1202b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1203b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS=0x%hx\n", 1204b2a0eb1aSKeith Busch csts, pci_status); 1205b2a0eb1aSKeith Busch else 1206b2a0eb1aSKeith Busch dev_warn(dev->ctrl.device, 1207b2a0eb1aSKeith Busch "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", 1208b2a0eb1aSKeith Busch csts, result); 1209b2a0eb1aSKeith Busch } 1210b2a0eb1aSKeith Busch 121131c7c7d2SChristoph Hellwig static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) 121257dacad5SJay Sternberg { 1213f4800d6dSChristoph Hellwig struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 1214f4800d6dSChristoph Hellwig struct nvme_queue *nvmeq = iod->nvmeq; 121557dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 121657dacad5SJay Sternberg struct request *abort_req; 121757dacad5SJay Sternberg struct nvme_command cmd; 1218b2a0eb1aSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 1219b2a0eb1aSKeith Busch 1220651438bbSWen Xiong /* If PCI error recovery process is happening, we cannot reset or 1221651438bbSWen Xiong * the recovery mechanism will surely fail. 1222651438bbSWen Xiong */ 1223651438bbSWen Xiong mb(); 1224651438bbSWen Xiong if (pci_channel_offline(to_pci_dev(dev->dev))) 1225651438bbSWen Xiong return BLK_EH_RESET_TIMER; 1226651438bbSWen Xiong 1227b2a0eb1aSKeith Busch /* 1228b2a0eb1aSKeith Busch * Reset immediately if the controller is failed 1229b2a0eb1aSKeith Busch */ 1230b2a0eb1aSKeith Busch if (nvme_should_reset(dev, csts)) { 1231b2a0eb1aSKeith Busch nvme_warn_reset(dev, csts); 1232b2a0eb1aSKeith Busch nvme_dev_disable(dev, false); 1233d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1234db8c48e4SChristoph Hellwig return BLK_EH_DONE; 1235b2a0eb1aSKeith Busch } 123657dacad5SJay Sternberg 123731c7c7d2SChristoph Hellwig /* 12387776db1cSKeith Busch * Did we miss an interrupt? 12397776db1cSKeith Busch */ 1240fa059b85SKeith Busch if (test_bit(NVMEQ_POLLED, &nvmeq->flags)) 1241fa059b85SKeith Busch nvme_poll(req->mq_hctx); 1242fa059b85SKeith Busch else 1243bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 1244fa059b85SKeith Busch 1245bf392a5dSKeith Busch if (blk_mq_request_completed(req)) { 12467776db1cSKeith Busch dev_warn(dev->ctrl.device, 12477776db1cSKeith Busch "I/O %d QID %d timeout, completion polled\n", 12487776db1cSKeith Busch req->tag, nvmeq->qid); 1249db8c48e4SChristoph Hellwig return BLK_EH_DONE; 12507776db1cSKeith Busch } 12517776db1cSKeith Busch 12527776db1cSKeith Busch /* 1253fd634f41SChristoph Hellwig * Shutdown immediately if controller times out while starting. The 1254fd634f41SChristoph Hellwig * reset work will see the pci device disabled when it gets the forced 1255fd634f41SChristoph Hellwig * cancellation error. All outstanding requests are completed on 1256db8c48e4SChristoph Hellwig * shutdown, so we return BLK_EH_DONE. 1257fd634f41SChristoph Hellwig */ 12584244140dSKeith Busch switch (dev->ctrl.state) { 12594244140dSKeith Busch case NVME_CTRL_CONNECTING: 12602036f726SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 1261df561f66SGustavo A. R. Silva fallthrough; 12622036f726SKeith Busch case NVME_CTRL_DELETING: 1263b9cac43cSKeith Busch dev_warn_ratelimited(dev->ctrl.device, 1264fd634f41SChristoph Hellwig "I/O %d QID %d timeout, disable controller\n", 1265fd634f41SChristoph Hellwig req->tag, nvmeq->qid); 126627fa9bc5SChristoph Hellwig nvme_req(req)->flags |= NVME_REQ_CANCELLED; 12677ad92f65STong Zhang nvme_dev_disable(dev, true); 1268db8c48e4SChristoph Hellwig return BLK_EH_DONE; 126939a9dd81SKeith Busch case NVME_CTRL_RESETTING: 127039a9dd81SKeith Busch return BLK_EH_RESET_TIMER; 12714244140dSKeith Busch default: 12724244140dSKeith Busch break; 1273fd634f41SChristoph Hellwig } 1274fd634f41SChristoph Hellwig 1275fd634f41SChristoph Hellwig /* 1276e1569a16SKeith Busch * Shutdown the controller immediately and schedule a reset if the 1277e1569a16SKeith Busch * command was already aborted once before and still hasn't been 1278e1569a16SKeith Busch * returned to the driver, or if this is the admin queue. 127931c7c7d2SChristoph Hellwig */ 1280f4800d6dSChristoph Hellwig if (!nvmeq->qid || iod->aborted) { 12811b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, 128257dacad5SJay Sternberg "I/O %d QID %d timeout, reset controller\n", 128357dacad5SJay Sternberg req->tag, nvmeq->qid); 12847ad92f65STong Zhang nvme_req(req)->flags |= NVME_REQ_CANCELLED; 1285a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 1286d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 1287e1569a16SKeith Busch 1288db8c48e4SChristoph Hellwig return BLK_EH_DONE; 128957dacad5SJay Sternberg } 129057dacad5SJay Sternberg 1291e7a2a87dSChristoph Hellwig if (atomic_dec_return(&dev->ctrl.abort_limit) < 0) { 1292e7a2a87dSChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 1293e7a2a87dSChristoph Hellwig return BLK_EH_RESET_TIMER; 1294e7a2a87dSChristoph Hellwig } 12957bf7d778SKeith Busch iod->aborted = 1; 129657dacad5SJay Sternberg 129757dacad5SJay Sternberg memset(&cmd, 0, sizeof(cmd)); 129857dacad5SJay Sternberg cmd.abort.opcode = nvme_admin_abort_cmd; 129957dacad5SJay Sternberg cmd.abort.cid = req->tag; 130057dacad5SJay Sternberg cmd.abort.sqid = cpu_to_le16(nvmeq->qid); 130157dacad5SJay Sternberg 13021b3c47c1SSagi Grimberg dev_warn(nvmeq->dev->ctrl.device, 13031b3c47c1SSagi Grimberg "I/O %d QID %d timeout, aborting\n", 130457dacad5SJay Sternberg req->tag, nvmeq->qid); 1305e7a2a87dSChristoph Hellwig 1306e7a2a87dSChristoph Hellwig abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd, 1307eb71f435SChristoph Hellwig BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 13086bf25d16SChristoph Hellwig if (IS_ERR(abort_req)) { 13096bf25d16SChristoph Hellwig atomic_inc(&dev->ctrl.abort_limit); 131031c7c7d2SChristoph Hellwig return BLK_EH_RESET_TIMER; 131157dacad5SJay Sternberg } 131257dacad5SJay Sternberg 1313e7a2a87dSChristoph Hellwig abort_req->end_io_data = NULL; 1314e7a2a87dSChristoph Hellwig blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); 131557dacad5SJay Sternberg 131657dacad5SJay Sternberg /* 131757dacad5SJay Sternberg * The aborted req will be completed on receiving the abort req. 131857dacad5SJay Sternberg * We enable the timer again. If hit twice, it'll cause a device reset, 131957dacad5SJay Sternberg * as the device then is in a faulty state. 132057dacad5SJay Sternberg */ 132157dacad5SJay Sternberg return BLK_EH_RESET_TIMER; 132257dacad5SJay Sternberg } 132357dacad5SJay Sternberg 132457dacad5SJay Sternberg static void nvme_free_queue(struct nvme_queue *nvmeq) 132557dacad5SJay Sternberg { 13268a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, CQ_SIZE(nvmeq), 132757dacad5SJay Sternberg (void *)nvmeq->cqes, nvmeq->cq_dma_addr); 132863223078SChristoph Hellwig if (!nvmeq->sq_cmds) 132963223078SChristoph Hellwig return; 13300f238ff5SLogan Gunthorpe 133163223078SChristoph Hellwig if (test_and_clear_bit(NVMEQ_SQ_CMB, &nvmeq->flags)) { 133288a041f4SKeith Busch pci_free_p2pmem(to_pci_dev(nvmeq->dev->dev), 13338a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 133463223078SChristoph Hellwig } else { 13358a1d09a6SBenjamin Herrenschmidt dma_free_coherent(nvmeq->dev->dev, SQ_SIZE(nvmeq), 133663223078SChristoph Hellwig nvmeq->sq_cmds, nvmeq->sq_dma_addr); 13370f238ff5SLogan Gunthorpe } 133857dacad5SJay Sternberg } 133957dacad5SJay Sternberg 134057dacad5SJay Sternberg static void nvme_free_queues(struct nvme_dev *dev, int lowest) 134157dacad5SJay Sternberg { 134257dacad5SJay Sternberg int i; 134357dacad5SJay Sternberg 1344d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { 1345d858e5f0SSagi Grimberg dev->ctrl.queue_count--; 1346147b27e4SSagi Grimberg nvme_free_queue(&dev->queues[i]); 134757dacad5SJay Sternberg } 134857dacad5SJay Sternberg } 134957dacad5SJay Sternberg 135057dacad5SJay Sternberg /** 135157dacad5SJay Sternberg * nvme_suspend_queue - put queue into suspended state 135240581d1aSBart Van Assche * @nvmeq: queue to suspend 135357dacad5SJay Sternberg */ 135457dacad5SJay Sternberg static int nvme_suspend_queue(struct nvme_queue *nvmeq) 135557dacad5SJay Sternberg { 13564e224106SChristoph Hellwig if (!test_and_clear_bit(NVMEQ_ENABLED, &nvmeq->flags)) 135757dacad5SJay Sternberg return 1; 135857dacad5SJay Sternberg 13594e224106SChristoph Hellwig /* ensure that nvme_queue_rq() sees NVMEQ_ENABLED cleared */ 1360d1f06f4aSJens Axboe mb(); 136157dacad5SJay Sternberg 13624e224106SChristoph Hellwig nvmeq->dev->online_queues--; 13631c63dc66SChristoph Hellwig if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 1364c81545f9SSagi Grimberg blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); 13657c349ddeSKeith Busch if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags)) 13664e224106SChristoph Hellwig pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq); 136757dacad5SJay Sternberg return 0; 136857dacad5SJay Sternberg } 136957dacad5SJay Sternberg 13708fae268bSKeith Busch static void nvme_suspend_io_queues(struct nvme_dev *dev) 13718fae268bSKeith Busch { 13728fae268bSKeith Busch int i; 13738fae268bSKeith Busch 13748fae268bSKeith Busch for (i = dev->ctrl.queue_count - 1; i > 0; i--) 13758fae268bSKeith Busch nvme_suspend_queue(&dev->queues[i]); 13768fae268bSKeith Busch } 13778fae268bSKeith Busch 1378a5cdb68cSKeith Busch static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) 137957dacad5SJay Sternberg { 1380147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[0]; 138157dacad5SJay Sternberg 1382a5cdb68cSKeith Busch if (shutdown) 1383a5cdb68cSKeith Busch nvme_shutdown_ctrl(&dev->ctrl); 1384a5cdb68cSKeith Busch else 1385b5b05048SSagi Grimberg nvme_disable_ctrl(&dev->ctrl); 138657dacad5SJay Sternberg 1387bf392a5dSKeith Busch nvme_poll_irqdisable(nvmeq); 138857dacad5SJay Sternberg } 138957dacad5SJay Sternberg 1390fa46c6fbSKeith Busch /* 1391fa46c6fbSKeith Busch * Called only on a device that has been disabled and after all other threads 13929210c075SDongli Zhang * that can check this device's completion queues have synced, except 13939210c075SDongli Zhang * nvme_poll(). This is the last chance for the driver to see a natural 13949210c075SDongli Zhang * completion before nvme_cancel_request() terminates all incomplete requests. 1395fa46c6fbSKeith Busch */ 1396fa46c6fbSKeith Busch static void nvme_reap_pending_cqes(struct nvme_dev *dev) 1397fa46c6fbSKeith Busch { 1398fa46c6fbSKeith Busch int i; 1399fa46c6fbSKeith Busch 14009210c075SDongli Zhang for (i = dev->ctrl.queue_count - 1; i > 0; i--) { 14019210c075SDongli Zhang spin_lock(&dev->queues[i].cq_poll_lock); 1402324b494cSKeith Busch nvme_process_cq(&dev->queues[i]); 14039210c075SDongli Zhang spin_unlock(&dev->queues[i].cq_poll_lock); 14049210c075SDongli Zhang } 1405fa46c6fbSKeith Busch } 1406fa46c6fbSKeith Busch 140757dacad5SJay Sternberg static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues, 140857dacad5SJay Sternberg int entry_size) 140957dacad5SJay Sternberg { 141057dacad5SJay Sternberg int q_depth = dev->q_depth; 14115fd4ce1bSChristoph Hellwig unsigned q_size_aligned = roundup(q_depth * entry_size, 14126c3c05b0SChaitanya Kulkarni NVME_CTRL_PAGE_SIZE); 141357dacad5SJay Sternberg 141457dacad5SJay Sternberg if (q_size_aligned * nr_io_queues > dev->cmb_size) { 141557dacad5SJay Sternberg u64 mem_per_q = div_u64(dev->cmb_size, nr_io_queues); 14164e523547SBaolin Wang 14176c3c05b0SChaitanya Kulkarni mem_per_q = round_down(mem_per_q, NVME_CTRL_PAGE_SIZE); 141857dacad5SJay Sternberg q_depth = div_u64(mem_per_q, entry_size); 141957dacad5SJay Sternberg 142057dacad5SJay Sternberg /* 142157dacad5SJay Sternberg * Ensure the reduced q_depth is above some threshold where it 142257dacad5SJay Sternberg * would be better to map queues in system memory with the 142357dacad5SJay Sternberg * original depth 142457dacad5SJay Sternberg */ 142557dacad5SJay Sternberg if (q_depth < 64) 142657dacad5SJay Sternberg return -ENOMEM; 142757dacad5SJay Sternberg } 142857dacad5SJay Sternberg 142957dacad5SJay Sternberg return q_depth; 143057dacad5SJay Sternberg } 143157dacad5SJay Sternberg 143257dacad5SJay Sternberg static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, 14338a1d09a6SBenjamin Herrenschmidt int qid) 143457dacad5SJay Sternberg { 14350f238ff5SLogan Gunthorpe struct pci_dev *pdev = to_pci_dev(dev->dev); 1436815c6704SKeith Busch 14370f238ff5SLogan Gunthorpe if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) { 14388a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(nvmeq)); 1439bfac8e9fSAlan Mikhak if (nvmeq->sq_cmds) { 14400f238ff5SLogan Gunthorpe nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev, 14410f238ff5SLogan Gunthorpe nvmeq->sq_cmds); 144263223078SChristoph Hellwig if (nvmeq->sq_dma_addr) { 144363223078SChristoph Hellwig set_bit(NVMEQ_SQ_CMB, &nvmeq->flags); 144463223078SChristoph Hellwig return 0; 144563223078SChristoph Hellwig } 1446bfac8e9fSAlan Mikhak 14478a1d09a6SBenjamin Herrenschmidt pci_free_p2pmem(pdev, nvmeq->sq_cmds, SQ_SIZE(nvmeq)); 1448bfac8e9fSAlan Mikhak } 14490f238ff5SLogan Gunthorpe } 14500f238ff5SLogan Gunthorpe 14518a1d09a6SBenjamin Herrenschmidt nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(nvmeq), 145257dacad5SJay Sternberg &nvmeq->sq_dma_addr, GFP_KERNEL); 145357dacad5SJay Sternberg if (!nvmeq->sq_cmds) 145457dacad5SJay Sternberg return -ENOMEM; 145557dacad5SJay Sternberg return 0; 145657dacad5SJay Sternberg } 145757dacad5SJay Sternberg 1458a6ff7262SKeith Busch static int nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth) 145957dacad5SJay Sternberg { 1460147b27e4SSagi Grimberg struct nvme_queue *nvmeq = &dev->queues[qid]; 146157dacad5SJay Sternberg 146262314e40SKeith Busch if (dev->ctrl.queue_count > qid) 146362314e40SKeith Busch return 0; 146457dacad5SJay Sternberg 1465c1e0cc7eSBenjamin Herrenschmidt nvmeq->sqes = qid ? dev->io_sqes : NVME_ADM_SQES; 14668a1d09a6SBenjamin Herrenschmidt nvmeq->q_depth = depth; 14678a1d09a6SBenjamin Herrenschmidt nvmeq->cqes = dma_alloc_coherent(dev->dev, CQ_SIZE(nvmeq), 146857dacad5SJay Sternberg &nvmeq->cq_dma_addr, GFP_KERNEL); 146957dacad5SJay Sternberg if (!nvmeq->cqes) 147057dacad5SJay Sternberg goto free_nvmeq; 147157dacad5SJay Sternberg 14728a1d09a6SBenjamin Herrenschmidt if (nvme_alloc_sq_cmds(dev, nvmeq, qid)) 147357dacad5SJay Sternberg goto free_cqdma; 147457dacad5SJay Sternberg 147557dacad5SJay Sternberg nvmeq->dev = dev; 14761ab0cd69SJens Axboe spin_lock_init(&nvmeq->sq_lock); 14773a7afd8eSChristoph Hellwig spin_lock_init(&nvmeq->cq_poll_lock); 147857dacad5SJay Sternberg nvmeq->cq_head = 0; 147957dacad5SJay Sternberg nvmeq->cq_phase = 1; 148057dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 148157dacad5SJay Sternberg nvmeq->qid = qid; 1482d858e5f0SSagi Grimberg dev->ctrl.queue_count++; 148357dacad5SJay Sternberg 1484147b27e4SSagi Grimberg return 0; 148557dacad5SJay Sternberg 148657dacad5SJay Sternberg free_cqdma: 14878a1d09a6SBenjamin Herrenschmidt dma_free_coherent(dev->dev, CQ_SIZE(nvmeq), (void *)nvmeq->cqes, 148857dacad5SJay Sternberg nvmeq->cq_dma_addr); 148957dacad5SJay Sternberg free_nvmeq: 1490147b27e4SSagi Grimberg return -ENOMEM; 149157dacad5SJay Sternberg } 149257dacad5SJay Sternberg 1493dca51e78SChristoph Hellwig static int queue_request_irq(struct nvme_queue *nvmeq) 149457dacad5SJay Sternberg { 14950ff199cbSChristoph Hellwig struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev); 14960ff199cbSChristoph Hellwig int nr = nvmeq->dev->ctrl.instance; 14970ff199cbSChristoph Hellwig 14980ff199cbSChristoph Hellwig if (use_threaded_interrupts) { 14990ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check, 15000ff199cbSChristoph Hellwig nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15010ff199cbSChristoph Hellwig } else { 15020ff199cbSChristoph Hellwig return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq, 15030ff199cbSChristoph Hellwig NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid); 15040ff199cbSChristoph Hellwig } 150557dacad5SJay Sternberg } 150657dacad5SJay Sternberg 150757dacad5SJay Sternberg static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 150857dacad5SJay Sternberg { 150957dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 151057dacad5SJay Sternberg 151157dacad5SJay Sternberg nvmeq->sq_tail = 0; 151238210800SKeith Busch nvmeq->last_sq_tail = 0; 151357dacad5SJay Sternberg nvmeq->cq_head = 0; 151457dacad5SJay Sternberg nvmeq->cq_phase = 1; 151557dacad5SJay Sternberg nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 15168a1d09a6SBenjamin Herrenschmidt memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq)); 1517f9f38e33SHelen Koike nvme_dbbuf_init(dev, nvmeq, qid); 151857dacad5SJay Sternberg dev->online_queues++; 15193a7afd8eSChristoph Hellwig wmb(); /* ensure the first interrupt sees the initialization */ 152057dacad5SJay Sternberg } 152157dacad5SJay Sternberg 15224b04cc6aSJens Axboe static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) 152357dacad5SJay Sternberg { 152457dacad5SJay Sternberg struct nvme_dev *dev = nvmeq->dev; 152557dacad5SJay Sternberg int result; 15267c349ddeSKeith Busch u16 vector = 0; 152757dacad5SJay Sternberg 1528d1ed6aa1SChristoph Hellwig clear_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 1529d1ed6aa1SChristoph Hellwig 153022b55601SKeith Busch /* 153122b55601SKeith Busch * A queue's vector matches the queue identifier unless the controller 153222b55601SKeith Busch * has only one vector available. 153322b55601SKeith Busch */ 15344b04cc6aSJens Axboe if (!polled) 1535a8e3e0bbSJianchao Wang vector = dev->num_vecs == 1 ? 0 : qid; 15364b04cc6aSJens Axboe else 15377c349ddeSKeith Busch set_bit(NVMEQ_POLLED, &nvmeq->flags); 15384b04cc6aSJens Axboe 1539a8e3e0bbSJianchao Wang result = adapter_alloc_cq(dev, qid, nvmeq, vector); 1540ded45505SKeith Busch if (result) 1541ded45505SKeith Busch return result; 154257dacad5SJay Sternberg 154357dacad5SJay Sternberg result = adapter_alloc_sq(dev, qid, nvmeq); 154457dacad5SJay Sternberg if (result < 0) 1545ded45505SKeith Busch return result; 1546c80b36cdSEdmund Nadolski if (result) 154757dacad5SJay Sternberg goto release_cq; 154857dacad5SJay Sternberg 1549a8e3e0bbSJianchao Wang nvmeq->cq_vector = vector; 1550161b8be2SKeith Busch nvme_init_queue(nvmeq, qid); 15514b04cc6aSJens Axboe 15527c349ddeSKeith Busch if (!polled) { 1553dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 155457dacad5SJay Sternberg if (result < 0) 155557dacad5SJay Sternberg goto release_sq; 15564b04cc6aSJens Axboe } 155757dacad5SJay Sternberg 15584e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 155957dacad5SJay Sternberg return result; 156057dacad5SJay Sternberg 156157dacad5SJay Sternberg release_sq: 1562f25a2dfcSJianchao Wang dev->online_queues--; 156357dacad5SJay Sternberg adapter_delete_sq(dev, qid); 156457dacad5SJay Sternberg release_cq: 156557dacad5SJay Sternberg adapter_delete_cq(dev, qid); 156657dacad5SJay Sternberg return result; 156757dacad5SJay Sternberg } 156857dacad5SJay Sternberg 1569f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_admin_ops = { 157057dacad5SJay Sternberg .queue_rq = nvme_queue_rq, 157177f02a7aSChristoph Hellwig .complete = nvme_pci_complete_rq, 157257dacad5SJay Sternberg .init_hctx = nvme_admin_init_hctx, 15730350815aSChristoph Hellwig .init_request = nvme_init_request, 157457dacad5SJay Sternberg .timeout = nvme_timeout, 157557dacad5SJay Sternberg }; 157657dacad5SJay Sternberg 1577f363b089SEric Biggers static const struct blk_mq_ops nvme_mq_ops = { 1578376f7ef8SChristoph Hellwig .queue_rq = nvme_queue_rq, 1579376f7ef8SChristoph Hellwig .complete = nvme_pci_complete_rq, 1580376f7ef8SChristoph Hellwig .commit_rqs = nvme_commit_rqs, 1581376f7ef8SChristoph Hellwig .init_hctx = nvme_init_hctx, 1582376f7ef8SChristoph Hellwig .init_request = nvme_init_request, 1583376f7ef8SChristoph Hellwig .map_queues = nvme_pci_map_queues, 1584376f7ef8SChristoph Hellwig .timeout = nvme_timeout, 1585c6d962aeSChristoph Hellwig .poll = nvme_poll, 1586dabcefabSJens Axboe }; 1587dabcefabSJens Axboe 158857dacad5SJay Sternberg static void nvme_dev_remove_admin(struct nvme_dev *dev) 158957dacad5SJay Sternberg { 15901c63dc66SChristoph Hellwig if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 159169d9a99cSKeith Busch /* 159269d9a99cSKeith Busch * If the controller was reset during removal, it's possible 159369d9a99cSKeith Busch * user requests may be waiting on a stopped queue. Start the 159469d9a99cSKeith Busch * queue to flush these to completion. 159569d9a99cSKeith Busch */ 1596c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 15971c63dc66SChristoph Hellwig blk_cleanup_queue(dev->ctrl.admin_q); 159857dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 159957dacad5SJay Sternberg } 160057dacad5SJay Sternberg } 160157dacad5SJay Sternberg 160257dacad5SJay Sternberg static int nvme_alloc_admin_tags(struct nvme_dev *dev) 160357dacad5SJay Sternberg { 16041c63dc66SChristoph Hellwig if (!dev->ctrl.admin_q) { 160557dacad5SJay Sternberg dev->admin_tagset.ops = &nvme_mq_admin_ops; 160657dacad5SJay Sternberg dev->admin_tagset.nr_hw_queues = 1; 1607e3e9d50cSKeith Busch 160838dabe21SKeith Busch dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; 1609*dc96f938SChaitanya Kulkarni dev->admin_tagset.timeout = NVME_ADMIN_TIMEOUT; 1610d4ec47f1SMax Gurtovoy dev->admin_tagset.numa_node = dev->ctrl.numa_node; 1611d43f1ccfSChristoph Hellwig dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); 1612d3484991SJens Axboe dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; 161357dacad5SJay Sternberg dev->admin_tagset.driver_data = dev; 161457dacad5SJay Sternberg 161557dacad5SJay Sternberg if (blk_mq_alloc_tag_set(&dev->admin_tagset)) 161657dacad5SJay Sternberg return -ENOMEM; 161734b6c231SSagi Grimberg dev->ctrl.admin_tagset = &dev->admin_tagset; 161857dacad5SJay Sternberg 16191c63dc66SChristoph Hellwig dev->ctrl.admin_q = blk_mq_init_queue(&dev->admin_tagset); 16201c63dc66SChristoph Hellwig if (IS_ERR(dev->ctrl.admin_q)) { 162157dacad5SJay Sternberg blk_mq_free_tag_set(&dev->admin_tagset); 162257dacad5SJay Sternberg return -ENOMEM; 162357dacad5SJay Sternberg } 16241c63dc66SChristoph Hellwig if (!blk_get_queue(dev->ctrl.admin_q)) { 162557dacad5SJay Sternberg nvme_dev_remove_admin(dev); 16261c63dc66SChristoph Hellwig dev->ctrl.admin_q = NULL; 162757dacad5SJay Sternberg return -ENODEV; 162857dacad5SJay Sternberg } 162957dacad5SJay Sternberg } else 1630c81545f9SSagi Grimberg blk_mq_unquiesce_queue(dev->ctrl.admin_q); 163157dacad5SJay Sternberg 163257dacad5SJay Sternberg return 0; 163357dacad5SJay Sternberg } 163457dacad5SJay Sternberg 163597f6ef64SXu Yu static unsigned long db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) 163697f6ef64SXu Yu { 163797f6ef64SXu Yu return NVME_REG_DBS + ((nr_io_queues + 1) * 8 * dev->db_stride); 163897f6ef64SXu Yu } 163997f6ef64SXu Yu 164097f6ef64SXu Yu static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) 164197f6ef64SXu Yu { 164297f6ef64SXu Yu struct pci_dev *pdev = to_pci_dev(dev->dev); 164397f6ef64SXu Yu 164497f6ef64SXu Yu if (size <= dev->bar_mapped_size) 164597f6ef64SXu Yu return 0; 164697f6ef64SXu Yu if (size > pci_resource_len(pdev, 0)) 164797f6ef64SXu Yu return -ENOMEM; 164897f6ef64SXu Yu if (dev->bar) 164997f6ef64SXu Yu iounmap(dev->bar); 165097f6ef64SXu Yu dev->bar = ioremap(pci_resource_start(pdev, 0), size); 165197f6ef64SXu Yu if (!dev->bar) { 165297f6ef64SXu Yu dev->bar_mapped_size = 0; 165397f6ef64SXu Yu return -ENOMEM; 165497f6ef64SXu Yu } 165597f6ef64SXu Yu dev->bar_mapped_size = size; 165697f6ef64SXu Yu dev->dbs = dev->bar + NVME_REG_DBS; 165797f6ef64SXu Yu 165897f6ef64SXu Yu return 0; 165997f6ef64SXu Yu } 166097f6ef64SXu Yu 166101ad0990SSagi Grimberg static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) 166257dacad5SJay Sternberg { 166357dacad5SJay Sternberg int result; 166457dacad5SJay Sternberg u32 aqa; 166557dacad5SJay Sternberg struct nvme_queue *nvmeq; 166657dacad5SJay Sternberg 166797f6ef64SXu Yu result = nvme_remap_bar(dev, db_bar_size(dev, 0)); 166897f6ef64SXu Yu if (result < 0) 166997f6ef64SXu Yu return result; 167097f6ef64SXu Yu 16718ef2074dSGabriel Krisman Bertazi dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? 167220d0dfe6SSagi Grimberg NVME_CAP_NSSRC(dev->ctrl.cap) : 0; 167357dacad5SJay Sternberg 16747a67cbeaSChristoph Hellwig if (dev->subsystem && 16757a67cbeaSChristoph Hellwig (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) 16767a67cbeaSChristoph Hellwig writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); 167757dacad5SJay Sternberg 1678b5b05048SSagi Grimberg result = nvme_disable_ctrl(&dev->ctrl); 167957dacad5SJay Sternberg if (result < 0) 168057dacad5SJay Sternberg return result; 168157dacad5SJay Sternberg 1682a6ff7262SKeith Busch result = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); 1683147b27e4SSagi Grimberg if (result) 1684147b27e4SSagi Grimberg return result; 168557dacad5SJay Sternberg 1686635333e4SMax Gurtovoy dev->ctrl.numa_node = dev_to_node(dev->dev); 1687635333e4SMax Gurtovoy 1688147b27e4SSagi Grimberg nvmeq = &dev->queues[0]; 168957dacad5SJay Sternberg aqa = nvmeq->q_depth - 1; 169057dacad5SJay Sternberg aqa |= aqa << 16; 169157dacad5SJay Sternberg 16927a67cbeaSChristoph Hellwig writel(aqa, dev->bar + NVME_REG_AQA); 16937a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); 16947a67cbeaSChristoph Hellwig lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); 169557dacad5SJay Sternberg 1696c0f2f45bSSagi Grimberg result = nvme_enable_ctrl(&dev->ctrl); 169757dacad5SJay Sternberg if (result) 1698d4875622SKeith Busch return result; 169957dacad5SJay Sternberg 170057dacad5SJay Sternberg nvmeq->cq_vector = 0; 1701161b8be2SKeith Busch nvme_init_queue(nvmeq, 0); 1702dca51e78SChristoph Hellwig result = queue_request_irq(nvmeq); 170357dacad5SJay Sternberg if (result) { 17047c349ddeSKeith Busch dev->online_queues--; 1705d4875622SKeith Busch return result; 170657dacad5SJay Sternberg } 170757dacad5SJay Sternberg 17084e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &nvmeq->flags); 170957dacad5SJay Sternberg return result; 171057dacad5SJay Sternberg } 171157dacad5SJay Sternberg 1712749941f2SChristoph Hellwig static int nvme_create_io_queues(struct nvme_dev *dev) 171357dacad5SJay Sternberg { 17144b04cc6aSJens Axboe unsigned i, max, rw_queues; 1715749941f2SChristoph Hellwig int ret = 0; 171657dacad5SJay Sternberg 1717d858e5f0SSagi Grimberg for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { 1718a6ff7262SKeith Busch if (nvme_alloc_queue(dev, i, dev->q_depth)) { 1719749941f2SChristoph Hellwig ret = -ENOMEM; 172057dacad5SJay Sternberg break; 1721749941f2SChristoph Hellwig } 1722749941f2SChristoph Hellwig } 172357dacad5SJay Sternberg 1724d858e5f0SSagi Grimberg max = min(dev->max_qid, dev->ctrl.queue_count - 1); 1725e20ba6e1SChristoph Hellwig if (max != 1 && dev->io_queues[HCTX_TYPE_POLL]) { 1726e20ba6e1SChristoph Hellwig rw_queues = dev->io_queues[HCTX_TYPE_DEFAULT] + 1727e20ba6e1SChristoph Hellwig dev->io_queues[HCTX_TYPE_READ]; 17284b04cc6aSJens Axboe } else { 17294b04cc6aSJens Axboe rw_queues = max; 17304b04cc6aSJens Axboe } 17314b04cc6aSJens Axboe 1732949928c1SKeith Busch for (i = dev->online_queues; i <= max; i++) { 17334b04cc6aSJens Axboe bool polled = i > rw_queues; 17344b04cc6aSJens Axboe 17354b04cc6aSJens Axboe ret = nvme_create_queue(&dev->queues[i], i, polled); 1736d4875622SKeith Busch if (ret) 173757dacad5SJay Sternberg break; 173857dacad5SJay Sternberg } 173957dacad5SJay Sternberg 1740749941f2SChristoph Hellwig /* 1741749941f2SChristoph Hellwig * Ignore failing Create SQ/CQ commands, we can continue with less 17428adb8c14SMinwoo Im * than the desired amount of queues, and even a controller without 17438adb8c14SMinwoo Im * I/O queues can still be used to issue admin commands. This might 1744749941f2SChristoph Hellwig * be useful to upgrade a buggy firmware for example. 1745749941f2SChristoph Hellwig */ 1746749941f2SChristoph Hellwig return ret >= 0 ? 0 : ret; 174757dacad5SJay Sternberg } 174857dacad5SJay Sternberg 1749202021c1SStephen Bates static ssize_t nvme_cmb_show(struct device *dev, 1750202021c1SStephen Bates struct device_attribute *attr, 1751202021c1SStephen Bates char *buf) 1752202021c1SStephen Bates { 1753202021c1SStephen Bates struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); 1754202021c1SStephen Bates 1755c965809cSStephen Bates return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", 1756202021c1SStephen Bates ndev->cmbloc, ndev->cmbsz); 1757202021c1SStephen Bates } 1758202021c1SStephen Bates static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); 1759202021c1SStephen Bates 176088de4598SChristoph Hellwig static u64 nvme_cmb_size_unit(struct nvme_dev *dev) 176157dacad5SJay Sternberg { 176288de4598SChristoph Hellwig u8 szu = (dev->cmbsz >> NVME_CMBSZ_SZU_SHIFT) & NVME_CMBSZ_SZU_MASK; 176388de4598SChristoph Hellwig 176488de4598SChristoph Hellwig return 1ULL << (12 + 4 * szu); 176588de4598SChristoph Hellwig } 176688de4598SChristoph Hellwig 176788de4598SChristoph Hellwig static u32 nvme_cmb_size(struct nvme_dev *dev) 176888de4598SChristoph Hellwig { 176988de4598SChristoph Hellwig return (dev->cmbsz >> NVME_CMBSZ_SZ_SHIFT) & NVME_CMBSZ_SZ_MASK; 177088de4598SChristoph Hellwig } 177188de4598SChristoph Hellwig 1772f65efd6dSChristoph Hellwig static void nvme_map_cmb(struct nvme_dev *dev) 177357dacad5SJay Sternberg { 177488de4598SChristoph Hellwig u64 size, offset; 177557dacad5SJay Sternberg resource_size_t bar_size; 177657dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 17778969f1f8SChristoph Hellwig int bar; 177857dacad5SJay Sternberg 17799fe5c59fSKeith Busch if (dev->cmb_size) 17809fe5c59fSKeith Busch return; 17819fe5c59fSKeith Busch 17827a67cbeaSChristoph Hellwig dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1783f65efd6dSChristoph Hellwig if (!dev->cmbsz) 1784f65efd6dSChristoph Hellwig return; 1785202021c1SStephen Bates dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC); 178657dacad5SJay Sternberg 178788de4598SChristoph Hellwig size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev); 178888de4598SChristoph Hellwig offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc); 17898969f1f8SChristoph Hellwig bar = NVME_CMB_BIR(dev->cmbloc); 17908969f1f8SChristoph Hellwig bar_size = pci_resource_len(pdev, bar); 179157dacad5SJay Sternberg 179257dacad5SJay Sternberg if (offset > bar_size) 1793f65efd6dSChristoph Hellwig return; 179457dacad5SJay Sternberg 179557dacad5SJay Sternberg /* 179657dacad5SJay Sternberg * Controllers may support a CMB size larger than their BAR, 179757dacad5SJay Sternberg * for example, due to being behind a bridge. Reduce the CMB to 179857dacad5SJay Sternberg * the reported size of the BAR 179957dacad5SJay Sternberg */ 180057dacad5SJay Sternberg if (size > bar_size - offset) 180157dacad5SJay Sternberg size = bar_size - offset; 180257dacad5SJay Sternberg 18030f238ff5SLogan Gunthorpe if (pci_p2pdma_add_resource(pdev, bar, size, offset)) { 18040f238ff5SLogan Gunthorpe dev_warn(dev->ctrl.device, 18050f238ff5SLogan Gunthorpe "failed to register the CMB\n"); 1806f65efd6dSChristoph Hellwig return; 18070f238ff5SLogan Gunthorpe } 18080f238ff5SLogan Gunthorpe 180957dacad5SJay Sternberg dev->cmb_size = size; 18100f238ff5SLogan Gunthorpe dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS); 18110f238ff5SLogan Gunthorpe 18120f238ff5SLogan Gunthorpe if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) == 18130f238ff5SLogan Gunthorpe (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) 18140f238ff5SLogan Gunthorpe pci_p2pmem_publish(pdev, true); 1815f65efd6dSChristoph Hellwig 1816f65efd6dSChristoph Hellwig if (sysfs_add_file_to_group(&dev->ctrl.device->kobj, 1817f65efd6dSChristoph Hellwig &dev_attr_cmb.attr, NULL)) 1818f65efd6dSChristoph Hellwig dev_warn(dev->ctrl.device, 1819f65efd6dSChristoph Hellwig "failed to add sysfs attribute for CMB\n"); 182057dacad5SJay Sternberg } 182157dacad5SJay Sternberg 182257dacad5SJay Sternberg static inline void nvme_release_cmb(struct nvme_dev *dev) 182357dacad5SJay Sternberg { 18240f238ff5SLogan Gunthorpe if (dev->cmb_size) { 1825f63572dfSJon Derrick sysfs_remove_file_from_group(&dev->ctrl.device->kobj, 1826f63572dfSJon Derrick &dev_attr_cmb.attr, NULL); 18270f238ff5SLogan Gunthorpe dev->cmb_size = 0; 1828f63572dfSJon Derrick } 182957dacad5SJay Sternberg } 183057dacad5SJay Sternberg 183187ad72a5SChristoph Hellwig static int nvme_set_host_mem(struct nvme_dev *dev, u32 bits) 183257dacad5SJay Sternberg { 18336c3c05b0SChaitanya Kulkarni u32 host_mem_size = dev->host_mem_size >> NVME_CTRL_PAGE_SHIFT; 18344033f35dSChristoph Hellwig u64 dma_addr = dev->host_mem_descs_dma; 183587ad72a5SChristoph Hellwig struct nvme_command c; 183687ad72a5SChristoph Hellwig int ret; 183787ad72a5SChristoph Hellwig 183887ad72a5SChristoph Hellwig memset(&c, 0, sizeof(c)); 183987ad72a5SChristoph Hellwig c.features.opcode = nvme_admin_set_features; 184087ad72a5SChristoph Hellwig c.features.fid = cpu_to_le32(NVME_FEAT_HOST_MEM_BUF); 184187ad72a5SChristoph Hellwig c.features.dword11 = cpu_to_le32(bits); 18426c3c05b0SChaitanya Kulkarni c.features.dword12 = cpu_to_le32(host_mem_size); 184387ad72a5SChristoph Hellwig c.features.dword13 = cpu_to_le32(lower_32_bits(dma_addr)); 184487ad72a5SChristoph Hellwig c.features.dword14 = cpu_to_le32(upper_32_bits(dma_addr)); 184587ad72a5SChristoph Hellwig c.features.dword15 = cpu_to_le32(dev->nr_host_mem_descs); 184687ad72a5SChristoph Hellwig 184787ad72a5SChristoph Hellwig ret = nvme_submit_sync_cmd(dev->ctrl.admin_q, &c, NULL, 0); 184887ad72a5SChristoph Hellwig if (ret) { 184987ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 185087ad72a5SChristoph Hellwig "failed to set host mem (err %d, flags %#x).\n", 185187ad72a5SChristoph Hellwig ret, bits); 185287ad72a5SChristoph Hellwig } 185387ad72a5SChristoph Hellwig return ret; 185487ad72a5SChristoph Hellwig } 185587ad72a5SChristoph Hellwig 185687ad72a5SChristoph Hellwig static void nvme_free_host_mem(struct nvme_dev *dev) 185787ad72a5SChristoph Hellwig { 185887ad72a5SChristoph Hellwig int i; 185987ad72a5SChristoph Hellwig 186087ad72a5SChristoph Hellwig for (i = 0; i < dev->nr_host_mem_descs; i++) { 186187ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i]; 18626c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(desc->size) * NVME_CTRL_PAGE_SIZE; 186387ad72a5SChristoph Hellwig 1864cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i], 1865cc667f6dSLiviu Dudau le64_to_cpu(desc->addr), 1866cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 186787ad72a5SChristoph Hellwig } 186887ad72a5SChristoph Hellwig 186987ad72a5SChristoph Hellwig kfree(dev->host_mem_desc_bufs); 187087ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = NULL; 18714033f35dSChristoph Hellwig dma_free_coherent(dev->dev, 18724033f35dSChristoph Hellwig dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs), 18734033f35dSChristoph Hellwig dev->host_mem_descs, dev->host_mem_descs_dma); 187487ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 18757e5dd57eSMinwoo Im dev->nr_host_mem_descs = 0; 187687ad72a5SChristoph Hellwig } 187787ad72a5SChristoph Hellwig 187892dc6895SChristoph Hellwig static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred, 187992dc6895SChristoph Hellwig u32 chunk_size) 188087ad72a5SChristoph Hellwig { 188187ad72a5SChristoph Hellwig struct nvme_host_mem_buf_desc *descs; 188292dc6895SChristoph Hellwig u32 max_entries, len; 18834033f35dSChristoph Hellwig dma_addr_t descs_dma; 18842ee0e4edSDan Carpenter int i = 0; 188587ad72a5SChristoph Hellwig void **bufs; 18866fbcde66SMinwoo Im u64 size, tmp; 188787ad72a5SChristoph Hellwig 188887ad72a5SChristoph Hellwig tmp = (preferred + chunk_size - 1); 188987ad72a5SChristoph Hellwig do_div(tmp, chunk_size); 189087ad72a5SChristoph Hellwig max_entries = tmp; 1891044a9df1SChristoph Hellwig 1892044a9df1SChristoph Hellwig if (dev->ctrl.hmmaxd && dev->ctrl.hmmaxd < max_entries) 1893044a9df1SChristoph Hellwig max_entries = dev->ctrl.hmmaxd; 1894044a9df1SChristoph Hellwig 1895750afb08SLuis Chamberlain descs = dma_alloc_coherent(dev->dev, max_entries * sizeof(*descs), 18964033f35dSChristoph Hellwig &descs_dma, GFP_KERNEL); 189787ad72a5SChristoph Hellwig if (!descs) 189887ad72a5SChristoph Hellwig goto out; 189987ad72a5SChristoph Hellwig 190087ad72a5SChristoph Hellwig bufs = kcalloc(max_entries, sizeof(*bufs), GFP_KERNEL); 190187ad72a5SChristoph Hellwig if (!bufs) 190287ad72a5SChristoph Hellwig goto out_free_descs; 190387ad72a5SChristoph Hellwig 1904244a8fe4SMinwoo Im for (size = 0; size < preferred && i < max_entries; size += len) { 190587ad72a5SChristoph Hellwig dma_addr_t dma_addr; 190687ad72a5SChristoph Hellwig 190750cdb7c6SChristoph Hellwig len = min_t(u64, chunk_size, preferred - size); 190887ad72a5SChristoph Hellwig bufs[i] = dma_alloc_attrs(dev->dev, len, &dma_addr, GFP_KERNEL, 190987ad72a5SChristoph Hellwig DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 191087ad72a5SChristoph Hellwig if (!bufs[i]) 191187ad72a5SChristoph Hellwig break; 191287ad72a5SChristoph Hellwig 191387ad72a5SChristoph Hellwig descs[i].addr = cpu_to_le64(dma_addr); 19146c3c05b0SChaitanya Kulkarni descs[i].size = cpu_to_le32(len / NVME_CTRL_PAGE_SIZE); 191587ad72a5SChristoph Hellwig i++; 191687ad72a5SChristoph Hellwig } 191787ad72a5SChristoph Hellwig 191892dc6895SChristoph Hellwig if (!size) 191987ad72a5SChristoph Hellwig goto out_free_bufs; 192087ad72a5SChristoph Hellwig 192187ad72a5SChristoph Hellwig dev->nr_host_mem_descs = i; 192287ad72a5SChristoph Hellwig dev->host_mem_size = size; 192387ad72a5SChristoph Hellwig dev->host_mem_descs = descs; 19244033f35dSChristoph Hellwig dev->host_mem_descs_dma = descs_dma; 192587ad72a5SChristoph Hellwig dev->host_mem_desc_bufs = bufs; 192687ad72a5SChristoph Hellwig return 0; 192787ad72a5SChristoph Hellwig 192887ad72a5SChristoph Hellwig out_free_bufs: 192987ad72a5SChristoph Hellwig while (--i >= 0) { 19306c3c05b0SChaitanya Kulkarni size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE; 193187ad72a5SChristoph Hellwig 1932cc667f6dSLiviu Dudau dma_free_attrs(dev->dev, size, bufs[i], 1933cc667f6dSLiviu Dudau le64_to_cpu(descs[i].addr), 1934cc667f6dSLiviu Dudau DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN); 193587ad72a5SChristoph Hellwig } 193687ad72a5SChristoph Hellwig 193787ad72a5SChristoph Hellwig kfree(bufs); 193887ad72a5SChristoph Hellwig out_free_descs: 19394033f35dSChristoph Hellwig dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs, 19404033f35dSChristoph Hellwig descs_dma); 194187ad72a5SChristoph Hellwig out: 194287ad72a5SChristoph Hellwig dev->host_mem_descs = NULL; 194387ad72a5SChristoph Hellwig return -ENOMEM; 194487ad72a5SChristoph Hellwig } 194587ad72a5SChristoph Hellwig 194692dc6895SChristoph Hellwig static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) 194792dc6895SChristoph Hellwig { 19489dc54a0dSChaitanya Kulkarni u64 min_chunk = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES); 19499dc54a0dSChaitanya Kulkarni u64 hmminds = max_t(u32, dev->ctrl.hmminds * 4096, PAGE_SIZE * 2); 19509dc54a0dSChaitanya Kulkarni u64 chunk_size; 195192dc6895SChristoph Hellwig 195292dc6895SChristoph Hellwig /* start big and work our way down */ 19539dc54a0dSChaitanya Kulkarni for (chunk_size = min_chunk; chunk_size >= hmminds; chunk_size /= 2) { 195492dc6895SChristoph Hellwig if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) { 195592dc6895SChristoph Hellwig if (!min || dev->host_mem_size >= min) 195692dc6895SChristoph Hellwig return 0; 195792dc6895SChristoph Hellwig nvme_free_host_mem(dev); 195892dc6895SChristoph Hellwig } 195992dc6895SChristoph Hellwig } 196092dc6895SChristoph Hellwig 196192dc6895SChristoph Hellwig return -ENOMEM; 196292dc6895SChristoph Hellwig } 196392dc6895SChristoph Hellwig 19649620cfbaSChristoph Hellwig static int nvme_setup_host_mem(struct nvme_dev *dev) 196587ad72a5SChristoph Hellwig { 196687ad72a5SChristoph Hellwig u64 max = (u64)max_host_mem_size_mb * SZ_1M; 196787ad72a5SChristoph Hellwig u64 preferred = (u64)dev->ctrl.hmpre * 4096; 196887ad72a5SChristoph Hellwig u64 min = (u64)dev->ctrl.hmmin * 4096; 196987ad72a5SChristoph Hellwig u32 enable_bits = NVME_HOST_MEM_ENABLE; 19706fbcde66SMinwoo Im int ret; 197187ad72a5SChristoph Hellwig 197287ad72a5SChristoph Hellwig preferred = min(preferred, max); 197387ad72a5SChristoph Hellwig if (min > max) { 197487ad72a5SChristoph Hellwig dev_warn(dev->ctrl.device, 197587ad72a5SChristoph Hellwig "min host memory (%lld MiB) above limit (%d MiB).\n", 197687ad72a5SChristoph Hellwig min >> ilog2(SZ_1M), max_host_mem_size_mb); 197787ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 19789620cfbaSChristoph Hellwig return 0; 197987ad72a5SChristoph Hellwig } 198087ad72a5SChristoph Hellwig 198187ad72a5SChristoph Hellwig /* 198287ad72a5SChristoph Hellwig * If we already have a buffer allocated check if we can reuse it. 198387ad72a5SChristoph Hellwig */ 198487ad72a5SChristoph Hellwig if (dev->host_mem_descs) { 198587ad72a5SChristoph Hellwig if (dev->host_mem_size >= min) 198687ad72a5SChristoph Hellwig enable_bits |= NVME_HOST_MEM_RETURN; 198787ad72a5SChristoph Hellwig else 198887ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 198987ad72a5SChristoph Hellwig } 199087ad72a5SChristoph Hellwig 199187ad72a5SChristoph Hellwig if (!dev->host_mem_descs) { 199292dc6895SChristoph Hellwig if (nvme_alloc_host_mem(dev, min, preferred)) { 199392dc6895SChristoph Hellwig dev_warn(dev->ctrl.device, 199492dc6895SChristoph Hellwig "failed to allocate host memory buffer.\n"); 19959620cfbaSChristoph Hellwig return 0; /* controller must work without HMB */ 199687ad72a5SChristoph Hellwig } 199787ad72a5SChristoph Hellwig 199892dc6895SChristoph Hellwig dev_info(dev->ctrl.device, 199992dc6895SChristoph Hellwig "allocated %lld MiB host memory buffer.\n", 200092dc6895SChristoph Hellwig dev->host_mem_size >> ilog2(SZ_1M)); 200192dc6895SChristoph Hellwig } 200292dc6895SChristoph Hellwig 20039620cfbaSChristoph Hellwig ret = nvme_set_host_mem(dev, enable_bits); 20049620cfbaSChristoph Hellwig if (ret) 200587ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 20069620cfbaSChristoph Hellwig return ret; 200757dacad5SJay Sternberg } 200857dacad5SJay Sternberg 2009612b7286SMing Lei /* 2010612b7286SMing Lei * nirqs is the number of interrupts available for write and read 2011612b7286SMing Lei * queues. The core already reserved an interrupt for the admin queue. 2012612b7286SMing Lei */ 2013612b7286SMing Lei static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) 20143b6592f7SJens Axboe { 2015612b7286SMing Lei struct nvme_dev *dev = affd->priv; 20162a5bcfddSWeiping Zhang unsigned int nr_read_queues, nr_write_queues = dev->nr_write_queues; 2017c45b1fa2SMing Lei 20183b6592f7SJens Axboe /* 2019ee0d96d3SBaolin Wang * If there is no interrupt available for queues, ensure that 2020612b7286SMing Lei * the default queue is set to 1. The affinity set size is 2021612b7286SMing Lei * also set to one, but the irq core ignores it for this case. 2022612b7286SMing Lei * 2023612b7286SMing Lei * If only one interrupt is available or 'write_queue' == 0, combine 2024612b7286SMing Lei * write and read queues. 2025612b7286SMing Lei * 2026612b7286SMing Lei * If 'write_queues' > 0, ensure it leaves room for at least one read 2027612b7286SMing Lei * queue. 20283b6592f7SJens Axboe */ 2029612b7286SMing Lei if (!nrirqs) { 2030612b7286SMing Lei nrirqs = 1; 2031612b7286SMing Lei nr_read_queues = 0; 20322a5bcfddSWeiping Zhang } else if (nrirqs == 1 || !nr_write_queues) { 2033612b7286SMing Lei nr_read_queues = 0; 20342a5bcfddSWeiping Zhang } else if (nr_write_queues >= nrirqs) { 2035612b7286SMing Lei nr_read_queues = 1; 20363b6592f7SJens Axboe } else { 20372a5bcfddSWeiping Zhang nr_read_queues = nrirqs - nr_write_queues; 20383b6592f7SJens Axboe } 2039612b7286SMing Lei 2040612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2041612b7286SMing Lei affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; 2042612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; 2043612b7286SMing Lei affd->set_size[HCTX_TYPE_READ] = nr_read_queues; 2044612b7286SMing Lei affd->nr_sets = nr_read_queues ? 2 : 1; 20453b6592f7SJens Axboe } 20463b6592f7SJens Axboe 20476451fe73SJens Axboe static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) 20483b6592f7SJens Axboe { 20493b6592f7SJens Axboe struct pci_dev *pdev = to_pci_dev(dev->dev); 20503b6592f7SJens Axboe struct irq_affinity affd = { 20513b6592f7SJens Axboe .pre_vectors = 1, 2052612b7286SMing Lei .calc_sets = nvme_calc_irq_sets, 2053612b7286SMing Lei .priv = dev, 20543b6592f7SJens Axboe }; 205521cc2f3fSJeffle Xu unsigned int irq_queues, poll_queues; 20566451fe73SJens Axboe 20576451fe73SJens Axboe /* 205821cc2f3fSJeffle Xu * Poll queues don't need interrupts, but we need at least one I/O queue 205921cc2f3fSJeffle Xu * left over for non-polled I/O. 20606451fe73SJens Axboe */ 206121cc2f3fSJeffle Xu poll_queues = min(dev->nr_poll_queues, nr_io_queues - 1); 206221cc2f3fSJeffle Xu dev->io_queues[HCTX_TYPE_POLL] = poll_queues; 20633b6592f7SJens Axboe 206421cc2f3fSJeffle Xu /* 206521cc2f3fSJeffle Xu * Initialize for the single interrupt case, will be updated in 206621cc2f3fSJeffle Xu * nvme_calc_irq_sets(). 206721cc2f3fSJeffle Xu */ 2068612b7286SMing Lei dev->io_queues[HCTX_TYPE_DEFAULT] = 1; 2069612b7286SMing Lei dev->io_queues[HCTX_TYPE_READ] = 0; 20703b6592f7SJens Axboe 207166341331SBenjamin Herrenschmidt /* 207221cc2f3fSJeffle Xu * We need interrupts for the admin queue and each non-polled I/O queue, 207321cc2f3fSJeffle Xu * but some Apple controllers require all queues to use the first 207421cc2f3fSJeffle Xu * vector. 207566341331SBenjamin Herrenschmidt */ 207666341331SBenjamin Herrenschmidt irq_queues = 1; 207721cc2f3fSJeffle Xu if (!(dev->ctrl.quirks & NVME_QUIRK_SINGLE_VECTOR)) 207821cc2f3fSJeffle Xu irq_queues += (nr_io_queues - poll_queues); 2079612b7286SMing Lei return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, 20803b6592f7SJens Axboe PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); 20813b6592f7SJens Axboe } 20823b6592f7SJens Axboe 20838fae268bSKeith Busch static void nvme_disable_io_queues(struct nvme_dev *dev) 20848fae268bSKeith Busch { 20858fae268bSKeith Busch if (__nvme_disable_io_queues(dev, nvme_admin_delete_sq)) 20868fae268bSKeith Busch __nvme_disable_io_queues(dev, nvme_admin_delete_cq); 20878fae268bSKeith Busch } 20888fae268bSKeith Busch 20892a5bcfddSWeiping Zhang static unsigned int nvme_max_io_queues(struct nvme_dev *dev) 20902a5bcfddSWeiping Zhang { 20912a5bcfddSWeiping Zhang return num_possible_cpus() + dev->nr_write_queues + dev->nr_poll_queues; 20922a5bcfddSWeiping Zhang } 20932a5bcfddSWeiping Zhang 209457dacad5SJay Sternberg static int nvme_setup_io_queues(struct nvme_dev *dev) 209557dacad5SJay Sternberg { 2096147b27e4SSagi Grimberg struct nvme_queue *adminq = &dev->queues[0]; 209757dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 20982a5bcfddSWeiping Zhang unsigned int nr_io_queues; 209997f6ef64SXu Yu unsigned long size; 21002a5bcfddSWeiping Zhang int result; 210157dacad5SJay Sternberg 21022a5bcfddSWeiping Zhang /* 21032a5bcfddSWeiping Zhang * Sample the module parameters once at reset time so that we have 21042a5bcfddSWeiping Zhang * stable values to work with. 21052a5bcfddSWeiping Zhang */ 21062a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 21072a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 2108d38e9f04SBenjamin Herrenschmidt 2109d38e9f04SBenjamin Herrenschmidt /* 2110d38e9f04SBenjamin Herrenschmidt * If tags are shared with admin queue (Apple bug), then 2111d38e9f04SBenjamin Herrenschmidt * make sure we only use one IO queue. 2112d38e9f04SBenjamin Herrenschmidt */ 2113d38e9f04SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2114d38e9f04SBenjamin Herrenschmidt nr_io_queues = 1; 21152a5bcfddSWeiping Zhang else 21162a5bcfddSWeiping Zhang nr_io_queues = min(nvme_max_io_queues(dev), 21172a5bcfddSWeiping Zhang dev->nr_allocated_queues - 1); 2118d38e9f04SBenjamin Herrenschmidt 21199a0be7abSChristoph Hellwig result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues); 21209a0be7abSChristoph Hellwig if (result < 0) 212157dacad5SJay Sternberg return result; 21229a0be7abSChristoph Hellwig 2123f5fa90dcSChristoph Hellwig if (nr_io_queues == 0) 2124a5229050SKeith Busch return 0; 212557dacad5SJay Sternberg 21264e224106SChristoph Hellwig clear_bit(NVMEQ_ENABLED, &adminq->flags); 21274e224106SChristoph Hellwig 21280f238ff5SLogan Gunthorpe if (dev->cmb_use_sqes) { 212957dacad5SJay Sternberg result = nvme_cmb_qdepth(dev, nr_io_queues, 213057dacad5SJay Sternberg sizeof(struct nvme_command)); 213157dacad5SJay Sternberg if (result > 0) 213257dacad5SJay Sternberg dev->q_depth = result; 213357dacad5SJay Sternberg else 21340f238ff5SLogan Gunthorpe dev->cmb_use_sqes = false; 213557dacad5SJay Sternberg } 213657dacad5SJay Sternberg 213757dacad5SJay Sternberg do { 213897f6ef64SXu Yu size = db_bar_size(dev, nr_io_queues); 213997f6ef64SXu Yu result = nvme_remap_bar(dev, size); 214097f6ef64SXu Yu if (!result) 214157dacad5SJay Sternberg break; 214257dacad5SJay Sternberg if (!--nr_io_queues) 214357dacad5SJay Sternberg return -ENOMEM; 214457dacad5SJay Sternberg } while (1); 214557dacad5SJay Sternberg adminq->q_db = dev->dbs; 214657dacad5SJay Sternberg 21478fae268bSKeith Busch retry: 214857dacad5SJay Sternberg /* Deregister the admin queue's interrupt */ 21490ff199cbSChristoph Hellwig pci_free_irq(pdev, 0, adminq); 215057dacad5SJay Sternberg 215157dacad5SJay Sternberg /* 215257dacad5SJay Sternberg * If we enable msix early due to not intx, disable it again before 215357dacad5SJay Sternberg * setting up the full range we need. 215457dacad5SJay Sternberg */ 2155dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 21563b6592f7SJens Axboe 21573b6592f7SJens Axboe result = nvme_setup_irqs(dev, nr_io_queues); 215822b55601SKeith Busch if (result <= 0) 2159dca51e78SChristoph Hellwig return -EIO; 21603b6592f7SJens Axboe 216122b55601SKeith Busch dev->num_vecs = result; 21624b04cc6aSJens Axboe result = max(result - 1, 1); 2163e20ba6e1SChristoph Hellwig dev->max_qid = result + dev->io_queues[HCTX_TYPE_POLL]; 216457dacad5SJay Sternberg 216557dacad5SJay Sternberg /* 216657dacad5SJay Sternberg * Should investigate if there's a performance win from allocating 216757dacad5SJay Sternberg * more queues than interrupt vectors; it might allow the submission 216857dacad5SJay Sternberg * path to scale better, even if the receive path is limited by the 216957dacad5SJay Sternberg * number of interrupts. 217057dacad5SJay Sternberg */ 2171dca51e78SChristoph Hellwig result = queue_request_irq(adminq); 21727c349ddeSKeith Busch if (result) 2173d4875622SKeith Busch return result; 21744e224106SChristoph Hellwig set_bit(NVMEQ_ENABLED, &adminq->flags); 21758fae268bSKeith Busch 21768fae268bSKeith Busch result = nvme_create_io_queues(dev); 21778fae268bSKeith Busch if (result || dev->online_queues < 2) 21788fae268bSKeith Busch return result; 21798fae268bSKeith Busch 21808fae268bSKeith Busch if (dev->online_queues - 1 < dev->max_qid) { 21818fae268bSKeith Busch nr_io_queues = dev->online_queues - 1; 21828fae268bSKeith Busch nvme_disable_io_queues(dev); 21838fae268bSKeith Busch nvme_suspend_io_queues(dev); 21848fae268bSKeith Busch goto retry; 21858fae268bSKeith Busch } 21868fae268bSKeith Busch dev_info(dev->ctrl.device, "%d/%d/%d default/read/poll queues\n", 21878fae268bSKeith Busch dev->io_queues[HCTX_TYPE_DEFAULT], 21888fae268bSKeith Busch dev->io_queues[HCTX_TYPE_READ], 21898fae268bSKeith Busch dev->io_queues[HCTX_TYPE_POLL]); 21908fae268bSKeith Busch return 0; 219157dacad5SJay Sternberg } 219257dacad5SJay Sternberg 21932a842acaSChristoph Hellwig static void nvme_del_queue_end(struct request *req, blk_status_t error) 2194db3cbfffSKeith Busch { 2195db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2196db3cbfffSKeith Busch 2197db3cbfffSKeith Busch blk_mq_free_request(req); 2198d1ed6aa1SChristoph Hellwig complete(&nvmeq->delete_done); 2199db3cbfffSKeith Busch } 2200db3cbfffSKeith Busch 22012a842acaSChristoph Hellwig static void nvme_del_cq_end(struct request *req, blk_status_t error) 2202db3cbfffSKeith Busch { 2203db3cbfffSKeith Busch struct nvme_queue *nvmeq = req->end_io_data; 2204db3cbfffSKeith Busch 2205d1ed6aa1SChristoph Hellwig if (error) 2206d1ed6aa1SChristoph Hellwig set_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags); 2207db3cbfffSKeith Busch 2208db3cbfffSKeith Busch nvme_del_queue_end(req, error); 2209db3cbfffSKeith Busch } 2210db3cbfffSKeith Busch 2211db3cbfffSKeith Busch static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) 2212db3cbfffSKeith Busch { 2213db3cbfffSKeith Busch struct request_queue *q = nvmeq->dev->ctrl.admin_q; 2214db3cbfffSKeith Busch struct request *req; 2215db3cbfffSKeith Busch struct nvme_command cmd; 2216db3cbfffSKeith Busch 2217db3cbfffSKeith Busch memset(&cmd, 0, sizeof(cmd)); 2218db3cbfffSKeith Busch cmd.delete_queue.opcode = opcode; 2219db3cbfffSKeith Busch cmd.delete_queue.qid = cpu_to_le16(nvmeq->qid); 2220db3cbfffSKeith Busch 2221eb71f435SChristoph Hellwig req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); 2222db3cbfffSKeith Busch if (IS_ERR(req)) 2223db3cbfffSKeith Busch return PTR_ERR(req); 2224db3cbfffSKeith Busch 2225db3cbfffSKeith Busch req->end_io_data = nvmeq; 2226db3cbfffSKeith Busch 2227d1ed6aa1SChristoph Hellwig init_completion(&nvmeq->delete_done); 2228db3cbfffSKeith Busch blk_execute_rq_nowait(q, NULL, req, false, 2229db3cbfffSKeith Busch opcode == nvme_admin_delete_cq ? 2230db3cbfffSKeith Busch nvme_del_cq_end : nvme_del_queue_end); 2231db3cbfffSKeith Busch return 0; 2232db3cbfffSKeith Busch } 2233db3cbfffSKeith Busch 22348fae268bSKeith Busch static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode) 2235db3cbfffSKeith Busch { 22365271edd4SChristoph Hellwig int nr_queues = dev->online_queues - 1, sent = 0; 2237db3cbfffSKeith Busch unsigned long timeout; 2238db3cbfffSKeith Busch 2239db3cbfffSKeith Busch retry: 2240*dc96f938SChaitanya Kulkarni timeout = NVME_ADMIN_TIMEOUT; 22415271edd4SChristoph Hellwig while (nr_queues > 0) { 22425271edd4SChristoph Hellwig if (nvme_delete_queue(&dev->queues[nr_queues], opcode)) 2243db3cbfffSKeith Busch break; 22445271edd4SChristoph Hellwig nr_queues--; 22455271edd4SChristoph Hellwig sent++; 22465271edd4SChristoph Hellwig } 2247d1ed6aa1SChristoph Hellwig while (sent) { 2248d1ed6aa1SChristoph Hellwig struct nvme_queue *nvmeq = &dev->queues[nr_queues + sent]; 2249d1ed6aa1SChristoph Hellwig 2250d1ed6aa1SChristoph Hellwig timeout = wait_for_completion_io_timeout(&nvmeq->delete_done, 22515271edd4SChristoph Hellwig timeout); 2252db3cbfffSKeith Busch if (timeout == 0) 22535271edd4SChristoph Hellwig return false; 2254d1ed6aa1SChristoph Hellwig 2255d1ed6aa1SChristoph Hellwig sent--; 22565271edd4SChristoph Hellwig if (nr_queues) 2257db3cbfffSKeith Busch goto retry; 2258db3cbfffSKeith Busch } 22595271edd4SChristoph Hellwig return true; 2260db3cbfffSKeith Busch } 2261db3cbfffSKeith Busch 22625d02a5c1SKeith Busch static void nvme_dev_add(struct nvme_dev *dev) 226357dacad5SJay Sternberg { 22642b1b7e78SJianchao Wang int ret; 22652b1b7e78SJianchao Wang 22665bae7f73SChristoph Hellwig if (!dev->ctrl.tagset) { 2267c6d962aeSChristoph Hellwig dev->tagset.ops = &nvme_mq_ops; 226857dacad5SJay Sternberg dev->tagset.nr_hw_queues = dev->online_queues - 1; 22698fe34be1Syangerkun dev->tagset.nr_maps = 2; /* default + read */ 2270ed92ad37SChristoph Hellwig if (dev->io_queues[HCTX_TYPE_POLL]) 2271ed92ad37SChristoph Hellwig dev->tagset.nr_maps++; 227257dacad5SJay Sternberg dev->tagset.timeout = NVME_IO_TIMEOUT; 2273d4ec47f1SMax Gurtovoy dev->tagset.numa_node = dev->ctrl.numa_node; 227461f3b896SChaitanya Kulkarni dev->tagset.queue_depth = min_t(unsigned int, dev->q_depth, 227561f3b896SChaitanya Kulkarni BLK_MQ_MAX_DEPTH) - 1; 2276d43f1ccfSChristoph Hellwig dev->tagset.cmd_size = sizeof(struct nvme_iod); 227757dacad5SJay Sternberg dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; 227857dacad5SJay Sternberg dev->tagset.driver_data = dev; 227957dacad5SJay Sternberg 2280d38e9f04SBenjamin Herrenschmidt /* 2281d38e9f04SBenjamin Herrenschmidt * Some Apple controllers requires tags to be unique 2282d38e9f04SBenjamin Herrenschmidt * across admin and IO queue, so reserve the first 32 2283d38e9f04SBenjamin Herrenschmidt * tags of the IO queue. 2284d38e9f04SBenjamin Herrenschmidt */ 2285d38e9f04SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) 2286d38e9f04SBenjamin Herrenschmidt dev->tagset.reserved_tags = NVME_AQ_DEPTH; 2287d38e9f04SBenjamin Herrenschmidt 22882b1b7e78SJianchao Wang ret = blk_mq_alloc_tag_set(&dev->tagset); 22892b1b7e78SJianchao Wang if (ret) { 22902b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 22912b1b7e78SJianchao Wang "IO queues tagset allocation failed %d\n", ret); 22925d02a5c1SKeith Busch return; 22932b1b7e78SJianchao Wang } 22945bae7f73SChristoph Hellwig dev->ctrl.tagset = &dev->tagset; 2295949928c1SKeith Busch } else { 2296949928c1SKeith Busch blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1); 2297949928c1SKeith Busch 2298949928c1SKeith Busch /* Free previously allocated queues that are no longer usable */ 2299949928c1SKeith Busch nvme_free_queues(dev, dev->online_queues); 230057dacad5SJay Sternberg } 2301949928c1SKeith Busch 2302e8fd41bbSMaxim Levitsky nvme_dbbuf_set(dev); 230357dacad5SJay Sternberg } 230457dacad5SJay Sternberg 2305b00a726aSKeith Busch static int nvme_pci_enable(struct nvme_dev *dev) 230657dacad5SJay Sternberg { 2307b00a726aSKeith Busch int result = -ENOMEM; 230857dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 230957dacad5SJay Sternberg 231057dacad5SJay Sternberg if (pci_enable_device_mem(pdev)) 231157dacad5SJay Sternberg return result; 231257dacad5SJay Sternberg 231357dacad5SJay Sternberg pci_set_master(pdev); 231457dacad5SJay Sternberg 23154fe06923SChristoph Hellwig if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64))) 231657dacad5SJay Sternberg goto disable; 231757dacad5SJay Sternberg 23187a67cbeaSChristoph Hellwig if (readl(dev->bar + NVME_REG_CSTS) == -1) { 231957dacad5SJay Sternberg result = -ENODEV; 2320b00a726aSKeith Busch goto disable; 232157dacad5SJay Sternberg } 232257dacad5SJay Sternberg 232357dacad5SJay Sternberg /* 2324a5229050SKeith Busch * Some devices and/or platforms don't advertise or work with INTx 2325a5229050SKeith Busch * interrupts. Pre-enable a single MSIX or MSI vec for setup. We'll 2326a5229050SKeith Busch * adjust this later. 232757dacad5SJay Sternberg */ 2328dca51e78SChristoph Hellwig result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); 2329dca51e78SChristoph Hellwig if (result < 0) 2330dca51e78SChristoph Hellwig return result; 233157dacad5SJay Sternberg 233220d0dfe6SSagi Grimberg dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 23337a67cbeaSChristoph Hellwig 23347442ddceSJohn Garry dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1, 2335b27c1e68Sweiping zhang io_queue_depth); 2336aa22c8e6SSagi Grimberg dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */ 233720d0dfe6SSagi Grimberg dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); 23387a67cbeaSChristoph Hellwig dev->dbs = dev->bar + 4096; 23391f390c1fSStephan Günther 23401f390c1fSStephan Günther /* 234166341331SBenjamin Herrenschmidt * Some Apple controllers require a non-standard SQE size. 234266341331SBenjamin Herrenschmidt * Interestingly they also seem to ignore the CC:IOSQES register 234366341331SBenjamin Herrenschmidt * so we don't bother updating it here. 234466341331SBenjamin Herrenschmidt */ 234566341331SBenjamin Herrenschmidt if (dev->ctrl.quirks & NVME_QUIRK_128_BYTES_SQES) 234666341331SBenjamin Herrenschmidt dev->io_sqes = 7; 234766341331SBenjamin Herrenschmidt else 2348c1e0cc7eSBenjamin Herrenschmidt dev->io_sqes = NVME_NVM_IOSQES; 23491f390c1fSStephan Günther 23501f390c1fSStephan Günther /* 23511f390c1fSStephan Günther * Temporary fix for the Apple controller found in the MacBook8,1 and 23521f390c1fSStephan Günther * some MacBook7,1 to avoid controller resets and data loss. 23531f390c1fSStephan Günther */ 23541f390c1fSStephan Günther if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) { 23551f390c1fSStephan Günther dev->q_depth = 2; 23569bdcfb10SChristoph Hellwig dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " 23579bdcfb10SChristoph Hellwig "set queue depth=%u to work around controller resets\n", 23581f390c1fSStephan Günther dev->q_depth); 2359d554b5e1SMartin K. Petersen } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && 2360d554b5e1SMartin K. Petersen (pdev->device == 0xa821 || pdev->device == 0xa822) && 236120d0dfe6SSagi Grimberg NVME_CAP_MQES(dev->ctrl.cap) == 0) { 2362d554b5e1SMartin K. Petersen dev->q_depth = 64; 2363d554b5e1SMartin K. Petersen dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " 2364d554b5e1SMartin K. Petersen "set queue depth=%u\n", dev->q_depth); 23651f390c1fSStephan Günther } 23661f390c1fSStephan Günther 2367d38e9f04SBenjamin Herrenschmidt /* 2368d38e9f04SBenjamin Herrenschmidt * Controllers with the shared tags quirk need the IO queue to be 2369d38e9f04SBenjamin Herrenschmidt * big enough so that we get 32 tags for the admin queue 2370d38e9f04SBenjamin Herrenschmidt */ 2371d38e9f04SBenjamin Herrenschmidt if ((dev->ctrl.quirks & NVME_QUIRK_SHARED_TAGS) && 2372d38e9f04SBenjamin Herrenschmidt (dev->q_depth < (NVME_AQ_DEPTH + 2))) { 2373d38e9f04SBenjamin Herrenschmidt dev->q_depth = NVME_AQ_DEPTH + 2; 2374d38e9f04SBenjamin Herrenschmidt dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n", 2375d38e9f04SBenjamin Herrenschmidt dev->q_depth); 2376d38e9f04SBenjamin Herrenschmidt } 2377d38e9f04SBenjamin Herrenschmidt 2378d38e9f04SBenjamin Herrenschmidt 2379f65efd6dSChristoph Hellwig nvme_map_cmb(dev); 2380202021c1SStephen Bates 2381a0a3408eSKeith Busch pci_enable_pcie_error_reporting(pdev); 2382a0a3408eSKeith Busch pci_save_state(pdev); 238357dacad5SJay Sternberg return 0; 238457dacad5SJay Sternberg 238557dacad5SJay Sternberg disable: 238657dacad5SJay Sternberg pci_disable_device(pdev); 238757dacad5SJay Sternberg return result; 238857dacad5SJay Sternberg } 238957dacad5SJay Sternberg 239057dacad5SJay Sternberg static void nvme_dev_unmap(struct nvme_dev *dev) 239157dacad5SJay Sternberg { 2392b00a726aSKeith Busch if (dev->bar) 2393b00a726aSKeith Busch iounmap(dev->bar); 2394a1f447b3SJohannes Thumshirn pci_release_mem_regions(to_pci_dev(dev->dev)); 2395b00a726aSKeith Busch } 2396b00a726aSKeith Busch 2397b00a726aSKeith Busch static void nvme_pci_disable(struct nvme_dev *dev) 2398b00a726aSKeith Busch { 239957dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 240057dacad5SJay Sternberg 2401dca51e78SChristoph Hellwig pci_free_irq_vectors(pdev); 240257dacad5SJay Sternberg 2403a0a3408eSKeith Busch if (pci_is_enabled(pdev)) { 2404a0a3408eSKeith Busch pci_disable_pcie_error_reporting(pdev); 240557dacad5SJay Sternberg pci_disable_device(pdev); 240657dacad5SJay Sternberg } 2407a0a3408eSKeith Busch } 240857dacad5SJay Sternberg 2409a5cdb68cSKeith Busch static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) 241057dacad5SJay Sternberg { 2411e43269e6SKeith Busch bool dead = true, freeze = false; 2412302ad8ccSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 241357dacad5SJay Sternberg 241477bf25eaSKeith Busch mutex_lock(&dev->shutdown_lock); 2415302ad8ccSKeith Busch if (pci_is_enabled(pdev)) { 2416302ad8ccSKeith Busch u32 csts = readl(dev->bar + NVME_REG_CSTS); 2417302ad8ccSKeith Busch 2418ebef7368SKeith Busch if (dev->ctrl.state == NVME_CTRL_LIVE || 2419e43269e6SKeith Busch dev->ctrl.state == NVME_CTRL_RESETTING) { 2420e43269e6SKeith Busch freeze = true; 2421302ad8ccSKeith Busch nvme_start_freeze(&dev->ctrl); 2422e43269e6SKeith Busch } 2423302ad8ccSKeith Busch dead = !!((csts & NVME_CSTS_CFS) || !(csts & NVME_CSTS_RDY) || 2424302ad8ccSKeith Busch pdev->error_state != pci_channel_io_normal); 242557dacad5SJay Sternberg } 2426c21377f8SGabriel Krisman Bertazi 2427302ad8ccSKeith Busch /* 2428302ad8ccSKeith Busch * Give the controller a chance to complete all entered requests if 2429302ad8ccSKeith Busch * doing a safe shutdown. 2430302ad8ccSKeith Busch */ 2431e43269e6SKeith Busch if (!dead && shutdown && freeze) 2432302ad8ccSKeith Busch nvme_wait_freeze_timeout(&dev->ctrl, NVME_IO_TIMEOUT); 243387ad72a5SChristoph Hellwig 24349a915a5bSJianchao Wang nvme_stop_queues(&dev->ctrl); 24359a915a5bSJianchao Wang 243664ee0ac0SKeith Busch if (!dead && dev->ctrl.queue_count > 0) { 24378fae268bSKeith Busch nvme_disable_io_queues(dev); 2438a5cdb68cSKeith Busch nvme_disable_admin_queue(dev, shutdown); 243957dacad5SJay Sternberg } 24408fae268bSKeith Busch nvme_suspend_io_queues(dev); 24418fae268bSKeith Busch nvme_suspend_queue(&dev->queues[0]); 2442b00a726aSKeith Busch nvme_pci_disable(dev); 2443fa46c6fbSKeith Busch nvme_reap_pending_cqes(dev); 244457dacad5SJay Sternberg 2445e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl); 2446e1958e65SMing Lin blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl); 2447622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&dev->tagset); 2448622b8b68SMing Lei blk_mq_tagset_wait_completed_request(&dev->admin_tagset); 2449302ad8ccSKeith Busch 2450302ad8ccSKeith Busch /* 2451302ad8ccSKeith Busch * The driver will not be starting up queues again if shutting down so 2452302ad8ccSKeith Busch * must flush all entered requests to their failed completion to avoid 2453302ad8ccSKeith Busch * deadlocking blk-mq hot-cpu notifier. 2454302ad8ccSKeith Busch */ 2455c8e9e9b7SKeith Busch if (shutdown) { 2456302ad8ccSKeith Busch nvme_start_queues(&dev->ctrl); 2457c8e9e9b7SKeith Busch if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) 2458c8e9e9b7SKeith Busch blk_mq_unquiesce_queue(dev->ctrl.admin_q); 2459c8e9e9b7SKeith Busch } 246077bf25eaSKeith Busch mutex_unlock(&dev->shutdown_lock); 246157dacad5SJay Sternberg } 246257dacad5SJay Sternberg 2463c1ac9a4bSKeith Busch static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown) 2464c1ac9a4bSKeith Busch { 2465c1ac9a4bSKeith Busch if (!nvme_wait_reset(&dev->ctrl)) 2466c1ac9a4bSKeith Busch return -EBUSY; 2467c1ac9a4bSKeith Busch nvme_dev_disable(dev, shutdown); 2468c1ac9a4bSKeith Busch return 0; 2469c1ac9a4bSKeith Busch } 2470c1ac9a4bSKeith Busch 247157dacad5SJay Sternberg static int nvme_setup_prp_pools(struct nvme_dev *dev) 247257dacad5SJay Sternberg { 247357dacad5SJay Sternberg dev->prp_page_pool = dma_pool_create("prp list page", dev->dev, 2474c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 2475c61b82c7SChristoph Hellwig NVME_CTRL_PAGE_SIZE, 0); 247657dacad5SJay Sternberg if (!dev->prp_page_pool) 247757dacad5SJay Sternberg return -ENOMEM; 247857dacad5SJay Sternberg 247957dacad5SJay Sternberg /* Optimisation for I/Os between 4k and 128k */ 248057dacad5SJay Sternberg dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev, 248157dacad5SJay Sternberg 256, 256, 0); 248257dacad5SJay Sternberg if (!dev->prp_small_pool) { 248357dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 248457dacad5SJay Sternberg return -ENOMEM; 248557dacad5SJay Sternberg } 248657dacad5SJay Sternberg return 0; 248757dacad5SJay Sternberg } 248857dacad5SJay Sternberg 248957dacad5SJay Sternberg static void nvme_release_prp_pools(struct nvme_dev *dev) 249057dacad5SJay Sternberg { 249157dacad5SJay Sternberg dma_pool_destroy(dev->prp_page_pool); 249257dacad5SJay Sternberg dma_pool_destroy(dev->prp_small_pool); 249357dacad5SJay Sternberg } 249457dacad5SJay Sternberg 2495770597ecSKeith Busch static void nvme_free_tagset(struct nvme_dev *dev) 2496770597ecSKeith Busch { 2497770597ecSKeith Busch if (dev->tagset.tags) 2498770597ecSKeith Busch blk_mq_free_tag_set(&dev->tagset); 2499770597ecSKeith Busch dev->ctrl.tagset = NULL; 2500770597ecSKeith Busch } 2501770597ecSKeith Busch 25021673f1f0SChristoph Hellwig static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl) 250357dacad5SJay Sternberg { 25041673f1f0SChristoph Hellwig struct nvme_dev *dev = to_nvme_dev(ctrl); 250557dacad5SJay Sternberg 2506f9f38e33SHelen Koike nvme_dbbuf_dma_free(dev); 2507770597ecSKeith Busch nvme_free_tagset(dev); 25081c63dc66SChristoph Hellwig if (dev->ctrl.admin_q) 25091c63dc66SChristoph Hellwig blk_put_queue(dev->ctrl.admin_q); 2510e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2511943e942eSJens Axboe mempool_destroy(dev->iod_mempool); 2512253fd4acSIsrael Rukshin put_device(dev->dev); 2513253fd4acSIsrael Rukshin kfree(dev->queues); 251457dacad5SJay Sternberg kfree(dev); 251557dacad5SJay Sternberg } 251657dacad5SJay Sternberg 25177c1ce408SChaitanya Kulkarni static void nvme_remove_dead_ctrl(struct nvme_dev *dev) 2518f58944e2SKeith Busch { 2519c1ac9a4bSKeith Busch /* 2520c1ac9a4bSKeith Busch * Set state to deleting now to avoid blocking nvme_wait_reset(), which 2521c1ac9a4bSKeith Busch * may be holding this pci_dev's device lock. 2522c1ac9a4bSKeith Busch */ 2523c1ac9a4bSKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 2524d22524a4SChristoph Hellwig nvme_get_ctrl(&dev->ctrl); 252569d9a99cSKeith Busch nvme_dev_disable(dev, false); 25269f9cafc1SJianchao Wang nvme_kill_queues(&dev->ctrl); 252703e0f3a6SMing Lei if (!queue_work(nvme_wq, &dev->remove_work)) 2528f58944e2SKeith Busch nvme_put_ctrl(&dev->ctrl); 2529f58944e2SKeith Busch } 2530f58944e2SKeith Busch 2531fd634f41SChristoph Hellwig static void nvme_reset_work(struct work_struct *work) 253257dacad5SJay Sternberg { 2533d86c4d8eSChristoph Hellwig struct nvme_dev *dev = 2534d86c4d8eSChristoph Hellwig container_of(work, struct nvme_dev, ctrl.reset_work); 2535a98e58e5SScott Bauer bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL); 2536e71afda4SChaitanya Kulkarni int result; 253757dacad5SJay Sternberg 2538e71afda4SChaitanya Kulkarni if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) { 2539e71afda4SChaitanya Kulkarni result = -ENODEV; 2540fd634f41SChristoph Hellwig goto out; 2541e71afda4SChaitanya Kulkarni } 2542fd634f41SChristoph Hellwig 2543fd634f41SChristoph Hellwig /* 2544fd634f41SChristoph Hellwig * If we're called to reset a live controller first shut it down before 2545fd634f41SChristoph Hellwig * moving on. 2546fd634f41SChristoph Hellwig */ 2547b00a726aSKeith Busch if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) 2548a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 2549d6135c3aSKeith Busch nvme_sync_queues(&dev->ctrl); 2550fd634f41SChristoph Hellwig 25515c959d73SKeith Busch mutex_lock(&dev->shutdown_lock); 2552b00a726aSKeith Busch result = nvme_pci_enable(dev); 255357dacad5SJay Sternberg if (result) 25544726bcf3SKeith Busch goto out_unlock; 255557dacad5SJay Sternberg 255601ad0990SSagi Grimberg result = nvme_pci_configure_admin_queue(dev); 255757dacad5SJay Sternberg if (result) 25584726bcf3SKeith Busch goto out_unlock; 255957dacad5SJay Sternberg 256057dacad5SJay Sternberg result = nvme_alloc_admin_tags(dev); 256157dacad5SJay Sternberg if (result) 25624726bcf3SKeith Busch goto out_unlock; 256357dacad5SJay Sternberg 2564943e942eSJens Axboe /* 2565943e942eSJens Axboe * Limit the max command size to prevent iod->sg allocations going 2566943e942eSJens Axboe * over a single page. 2567943e942eSJens Axboe */ 25687637de31SChristoph Hellwig dev->ctrl.max_hw_sectors = min_t(u32, 25697637de31SChristoph Hellwig NVME_MAX_KB_SZ << 1, dma_max_mapping_size(dev->dev) >> 9); 2570943e942eSJens Axboe dev->ctrl.max_segments = NVME_MAX_SEGS; 2571a48bc520SChristoph Hellwig 2572a48bc520SChristoph Hellwig /* 2573a48bc520SChristoph Hellwig * Don't limit the IOMMU merged segment size. 2574a48bc520SChristoph Hellwig */ 2575a48bc520SChristoph Hellwig dma_set_max_seg_size(dev->dev, 0xffffffff); 2576a48bc520SChristoph Hellwig 25775c959d73SKeith Busch mutex_unlock(&dev->shutdown_lock); 25785c959d73SKeith Busch 25795c959d73SKeith Busch /* 25805c959d73SKeith Busch * Introduce CONNECTING state from nvme-fc/rdma transports to mark the 25815c959d73SKeith Busch * initializing procedure here. 25825c959d73SKeith Busch */ 25835c959d73SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { 25845c959d73SKeith Busch dev_warn(dev->ctrl.device, 25855c959d73SKeith Busch "failed to mark controller CONNECTING\n"); 2586cee6c269SMinwoo Im result = -EBUSY; 25875c959d73SKeith Busch goto out; 25885c959d73SKeith Busch } 2589943e942eSJens Axboe 259095093350SMax Gurtovoy /* 259195093350SMax Gurtovoy * We do not support an SGL for metadata (yet), so we are limited to a 259295093350SMax Gurtovoy * single integrity segment for the separate metadata pointer. 259395093350SMax Gurtovoy */ 259495093350SMax Gurtovoy dev->ctrl.max_integrity_segments = 1; 259595093350SMax Gurtovoy 2596ce4541f4SChristoph Hellwig result = nvme_init_identify(&dev->ctrl); 2597ce4541f4SChristoph Hellwig if (result) 2598f58944e2SKeith Busch goto out; 2599ce4541f4SChristoph Hellwig 2600e286bcfcSScott Bauer if (dev->ctrl.oacs & NVME_CTRL_OACS_SEC_SUPP) { 2601e286bcfcSScott Bauer if (!dev->ctrl.opal_dev) 26024f1244c8SChristoph Hellwig dev->ctrl.opal_dev = 26034f1244c8SChristoph Hellwig init_opal_dev(&dev->ctrl, &nvme_sec_submit); 2604e286bcfcSScott Bauer else if (was_suspend) 26054f1244c8SChristoph Hellwig opal_unlock_from_suspend(dev->ctrl.opal_dev); 2606e286bcfcSScott Bauer } else { 2607e286bcfcSScott Bauer free_opal_dev(dev->ctrl.opal_dev); 2608e286bcfcSScott Bauer dev->ctrl.opal_dev = NULL; 2609e286bcfcSScott Bauer } 2610a98e58e5SScott Bauer 2611f9f38e33SHelen Koike if (dev->ctrl.oacs & NVME_CTRL_OACS_DBBUF_SUPP) { 2612f9f38e33SHelen Koike result = nvme_dbbuf_dma_alloc(dev); 2613f9f38e33SHelen Koike if (result) 2614f9f38e33SHelen Koike dev_warn(dev->dev, 2615f9f38e33SHelen Koike "unable to allocate dma for dbbuf\n"); 2616f9f38e33SHelen Koike } 2617f9f38e33SHelen Koike 26189620cfbaSChristoph Hellwig if (dev->ctrl.hmpre) { 26199620cfbaSChristoph Hellwig result = nvme_setup_host_mem(dev); 26209620cfbaSChristoph Hellwig if (result < 0) 26219620cfbaSChristoph Hellwig goto out; 26229620cfbaSChristoph Hellwig } 262387ad72a5SChristoph Hellwig 262457dacad5SJay Sternberg result = nvme_setup_io_queues(dev); 262557dacad5SJay Sternberg if (result) 2626f58944e2SKeith Busch goto out; 262757dacad5SJay Sternberg 262821f033f7SKeith Busch /* 262957dacad5SJay Sternberg * Keep the controller around but remove all namespaces if we don't have 263057dacad5SJay Sternberg * any working I/O queue. 263157dacad5SJay Sternberg */ 263257dacad5SJay Sternberg if (dev->online_queues < 2) { 26331b3c47c1SSagi Grimberg dev_warn(dev->ctrl.device, "IO queues not created\n"); 26343b24774eSKeith Busch nvme_kill_queues(&dev->ctrl); 26355bae7f73SChristoph Hellwig nvme_remove_namespaces(&dev->ctrl); 2636770597ecSKeith Busch nvme_free_tagset(dev); 263757dacad5SJay Sternberg } else { 263825646264SKeith Busch nvme_start_queues(&dev->ctrl); 2639302ad8ccSKeith Busch nvme_wait_freeze(&dev->ctrl); 26405d02a5c1SKeith Busch nvme_dev_add(dev); 2641302ad8ccSKeith Busch nvme_unfreeze(&dev->ctrl); 264257dacad5SJay Sternberg } 264357dacad5SJay Sternberg 26442b1b7e78SJianchao Wang /* 26452b1b7e78SJianchao Wang * If only admin queue live, keep it to do further investigation or 26462b1b7e78SJianchao Wang * recovery. 26472b1b7e78SJianchao Wang */ 26485d02a5c1SKeith Busch if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) { 26492b1b7e78SJianchao Wang dev_warn(dev->ctrl.device, 26505d02a5c1SKeith Busch "failed to mark controller live state\n"); 2651e71afda4SChaitanya Kulkarni result = -ENODEV; 2652bb8d261eSChristoph Hellwig goto out; 2653bb8d261eSChristoph Hellwig } 265492911a55SChristoph Hellwig 2655d09f2b45SSagi Grimberg nvme_start_ctrl(&dev->ctrl); 265657dacad5SJay Sternberg return; 265757dacad5SJay Sternberg 26584726bcf3SKeith Busch out_unlock: 26594726bcf3SKeith Busch mutex_unlock(&dev->shutdown_lock); 266057dacad5SJay Sternberg out: 26617c1ce408SChaitanya Kulkarni if (result) 26627c1ce408SChaitanya Kulkarni dev_warn(dev->ctrl.device, 26637c1ce408SChaitanya Kulkarni "Removing after probe failure status: %d\n", result); 26647c1ce408SChaitanya Kulkarni nvme_remove_dead_ctrl(dev); 266557dacad5SJay Sternberg } 266657dacad5SJay Sternberg 26675c8809e6SChristoph Hellwig static void nvme_remove_dead_ctrl_work(struct work_struct *work) 266857dacad5SJay Sternberg { 26695c8809e6SChristoph Hellwig struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 267057dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev->dev); 267157dacad5SJay Sternberg 267257dacad5SJay Sternberg if (pci_get_drvdata(pdev)) 2673921920abSKeith Busch device_release_driver(&pdev->dev); 26741673f1f0SChristoph Hellwig nvme_put_ctrl(&dev->ctrl); 267557dacad5SJay Sternberg } 267657dacad5SJay Sternberg 26771c63dc66SChristoph Hellwig static int nvme_pci_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val) 267857dacad5SJay Sternberg { 26791c63dc66SChristoph Hellwig *val = readl(to_nvme_dev(ctrl)->bar + off); 26801c63dc66SChristoph Hellwig return 0; 268157dacad5SJay Sternberg } 26821c63dc66SChristoph Hellwig 26835fd4ce1bSChristoph Hellwig static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val) 26845fd4ce1bSChristoph Hellwig { 26855fd4ce1bSChristoph Hellwig writel(val, to_nvme_dev(ctrl)->bar + off); 26865fd4ce1bSChristoph Hellwig return 0; 26875fd4ce1bSChristoph Hellwig } 26885fd4ce1bSChristoph Hellwig 26897fd8930fSChristoph Hellwig static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val) 26907fd8930fSChristoph Hellwig { 26913a8ecc93SArd Biesheuvel *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off); 26927fd8930fSChristoph Hellwig return 0; 26937fd8930fSChristoph Hellwig } 26947fd8930fSChristoph Hellwig 269597c12223SKeith Busch static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) 269697c12223SKeith Busch { 269797c12223SKeith Busch struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); 269897c12223SKeith Busch 26992db24e4aSMax Gurtovoy return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); 270097c12223SKeith Busch } 270197c12223SKeith Busch 27021c63dc66SChristoph Hellwig static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { 27031a353d85SMing Lin .name = "pcie", 2704e439bb12SSagi Grimberg .module = THIS_MODULE, 2705e0596ab2SLogan Gunthorpe .flags = NVME_F_METADATA_SUPPORTED | 2706e0596ab2SLogan Gunthorpe NVME_F_PCI_P2PDMA, 27071c63dc66SChristoph Hellwig .reg_read32 = nvme_pci_reg_read32, 27085fd4ce1bSChristoph Hellwig .reg_write32 = nvme_pci_reg_write32, 27097fd8930fSChristoph Hellwig .reg_read64 = nvme_pci_reg_read64, 27101673f1f0SChristoph Hellwig .free_ctrl = nvme_pci_free_ctrl, 2711f866fc42SChristoph Hellwig .submit_async_event = nvme_pci_submit_async_event, 271297c12223SKeith Busch .get_address = nvme_pci_get_address, 27131c63dc66SChristoph Hellwig }; 271457dacad5SJay Sternberg 2715b00a726aSKeith Busch static int nvme_dev_map(struct nvme_dev *dev) 2716b00a726aSKeith Busch { 2717b00a726aSKeith Busch struct pci_dev *pdev = to_pci_dev(dev->dev); 2718b00a726aSKeith Busch 2719a1f447b3SJohannes Thumshirn if (pci_request_mem_regions(pdev, "nvme")) 2720b00a726aSKeith Busch return -ENODEV; 2721b00a726aSKeith Busch 272297f6ef64SXu Yu if (nvme_remap_bar(dev, NVME_REG_DBS + 4096)) 2723b00a726aSKeith Busch goto release; 2724b00a726aSKeith Busch 2725b00a726aSKeith Busch return 0; 2726b00a726aSKeith Busch release: 2727a1f447b3SJohannes Thumshirn pci_release_mem_regions(pdev); 2728b00a726aSKeith Busch return -ENODEV; 2729b00a726aSKeith Busch } 2730b00a726aSKeith Busch 27318427bbc2SKai-Heng Feng static unsigned long check_vendor_combination_bug(struct pci_dev *pdev) 2732ff5350a8SAndy Lutomirski { 2733ff5350a8SAndy Lutomirski if (pdev->vendor == 0x144d && pdev->device == 0xa802) { 2734ff5350a8SAndy Lutomirski /* 2735ff5350a8SAndy Lutomirski * Several Samsung devices seem to drop off the PCIe bus 2736ff5350a8SAndy Lutomirski * randomly when APST is on and uses the deepest sleep state. 2737ff5350a8SAndy Lutomirski * This has been observed on a Samsung "SM951 NVMe SAMSUNG 2738ff5350a8SAndy Lutomirski * 256GB", a "PM951 NVMe SAMSUNG 512GB", and a "Samsung SSD 2739ff5350a8SAndy Lutomirski * 950 PRO 256GB", but it seems to be restricted to two Dell 2740ff5350a8SAndy Lutomirski * laptops. 2741ff5350a8SAndy Lutomirski */ 2742ff5350a8SAndy Lutomirski if (dmi_match(DMI_SYS_VENDOR, "Dell Inc.") && 2743ff5350a8SAndy Lutomirski (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") || 2744ff5350a8SAndy Lutomirski dmi_match(DMI_PRODUCT_NAME, "Precision 5510"))) 2745ff5350a8SAndy Lutomirski return NVME_QUIRK_NO_DEEPEST_PS; 27468427bbc2SKai-Heng Feng } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) { 27478427bbc2SKai-Heng Feng /* 27488427bbc2SKai-Heng Feng * Samsung SSD 960 EVO drops off the PCIe bus after system 2749467c77d4SJarosław Janik * suspend on a Ryzen board, ASUS PRIME B350M-A, as well as 2750467c77d4SJarosław Janik * within few minutes after bootup on a Coffee Lake board - 2751467c77d4SJarosław Janik * ASUS PRIME Z370-A 27528427bbc2SKai-Heng Feng */ 27538427bbc2SKai-Heng Feng if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") && 2754467c77d4SJarosław Janik (dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") || 2755467c77d4SJarosław Janik dmi_match(DMI_BOARD_NAME, "PRIME Z370-A"))) 27568427bbc2SKai-Heng Feng return NVME_QUIRK_NO_APST; 27571fae37acSShyjumon N } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 || 27581fae37acSShyjumon N pdev->device == 0xa808 || pdev->device == 0xa809)) || 27591fae37acSShyjumon N (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) { 27601fae37acSShyjumon N /* 27611fae37acSShyjumon N * Forcing to use host managed nvme power settings for 27621fae37acSShyjumon N * lowest idle power with quick resume latency on 27631fae37acSShyjumon N * Samsung and Toshiba SSDs based on suspend behavior 27641fae37acSShyjumon N * on Coffee Lake board for LENOVO C640 27651fae37acSShyjumon N */ 27661fae37acSShyjumon N if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) && 27671fae37acSShyjumon N dmi_match(DMI_BOARD_NAME, "LNVNB161216")) 27681fae37acSShyjumon N return NVME_QUIRK_SIMPLE_SUSPEND; 2769ff5350a8SAndy Lutomirski } 2770ff5350a8SAndy Lutomirski 2771ff5350a8SAndy Lutomirski return 0; 2772ff5350a8SAndy Lutomirski } 2773ff5350a8SAndy Lutomirski 2774df4f9bc4SDavid E. Box #ifdef CONFIG_ACPI 2775df4f9bc4SDavid E. Box static bool nvme_acpi_storage_d3(struct pci_dev *dev) 2776df4f9bc4SDavid E. Box { 2777df4f9bc4SDavid E. Box struct acpi_device *adev; 2778df4f9bc4SDavid E. Box struct pci_dev *root; 2779df4f9bc4SDavid E. Box acpi_handle handle; 2780df4f9bc4SDavid E. Box acpi_status status; 2781df4f9bc4SDavid E. Box u8 val; 2782df4f9bc4SDavid E. Box 2783df4f9bc4SDavid E. Box /* 2784df4f9bc4SDavid E. Box * Look for _DSD property specifying that the storage device on the port 2785df4f9bc4SDavid E. Box * must use D3 to support deep platform power savings during 2786df4f9bc4SDavid E. Box * suspend-to-idle. 2787df4f9bc4SDavid E. Box */ 2788df4f9bc4SDavid E. Box root = pcie_find_root_port(dev); 2789df4f9bc4SDavid E. Box if (!root) 2790df4f9bc4SDavid E. Box return false; 2791df4f9bc4SDavid E. Box 2792df4f9bc4SDavid E. Box adev = ACPI_COMPANION(&root->dev); 2793df4f9bc4SDavid E. Box if (!adev) 2794df4f9bc4SDavid E. Box return false; 2795df4f9bc4SDavid E. Box 2796df4f9bc4SDavid E. Box /* 2797df4f9bc4SDavid E. Box * The property is defined in the PXSX device for South complex ports 2798df4f9bc4SDavid E. Box * and in the PEGP device for North complex ports. 2799df4f9bc4SDavid E. Box */ 2800df4f9bc4SDavid E. Box status = acpi_get_handle(adev->handle, "PXSX", &handle); 2801df4f9bc4SDavid E. Box if (ACPI_FAILURE(status)) { 2802df4f9bc4SDavid E. Box status = acpi_get_handle(adev->handle, "PEGP", &handle); 2803df4f9bc4SDavid E. Box if (ACPI_FAILURE(status)) 2804df4f9bc4SDavid E. Box return false; 2805df4f9bc4SDavid E. Box } 2806df4f9bc4SDavid E. Box 2807df4f9bc4SDavid E. Box if (acpi_bus_get_device(handle, &adev)) 2808df4f9bc4SDavid E. Box return false; 2809df4f9bc4SDavid E. Box 2810df4f9bc4SDavid E. Box if (fwnode_property_read_u8(acpi_fwnode_handle(adev), "StorageD3Enable", 2811df4f9bc4SDavid E. Box &val)) 2812df4f9bc4SDavid E. Box return false; 2813df4f9bc4SDavid E. Box return val == 1; 2814df4f9bc4SDavid E. Box } 2815df4f9bc4SDavid E. Box #else 2816df4f9bc4SDavid E. Box static inline bool nvme_acpi_storage_d3(struct pci_dev *dev) 2817df4f9bc4SDavid E. Box { 2818df4f9bc4SDavid E. Box return false; 2819df4f9bc4SDavid E. Box } 2820df4f9bc4SDavid E. Box #endif /* CONFIG_ACPI */ 2821df4f9bc4SDavid E. Box 282218119775SKeith Busch static void nvme_async_probe(void *data, async_cookie_t cookie) 282318119775SKeith Busch { 282418119775SKeith Busch struct nvme_dev *dev = data; 282580f513b5SKeith Busch 2826bd46a906SKeith Busch flush_work(&dev->ctrl.reset_work); 282718119775SKeith Busch flush_work(&dev->ctrl.scan_work); 282880f513b5SKeith Busch nvme_put_ctrl(&dev->ctrl); 282918119775SKeith Busch } 283018119775SKeith Busch 283157dacad5SJay Sternberg static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 283257dacad5SJay Sternberg { 283357dacad5SJay Sternberg int node, result = -ENOMEM; 283457dacad5SJay Sternberg struct nvme_dev *dev; 2835ff5350a8SAndy Lutomirski unsigned long quirks = id->driver_data; 2836943e942eSJens Axboe size_t alloc_size; 283757dacad5SJay Sternberg 283857dacad5SJay Sternberg node = dev_to_node(&pdev->dev); 283957dacad5SJay Sternberg if (node == NUMA_NO_NODE) 28402fa84351SMasayoshi Mizuma set_dev_node(&pdev->dev, first_memory_node); 284157dacad5SJay Sternberg 284257dacad5SJay Sternberg dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); 284357dacad5SJay Sternberg if (!dev) 284457dacad5SJay Sternberg return -ENOMEM; 2845147b27e4SSagi Grimberg 28462a5bcfddSWeiping Zhang dev->nr_write_queues = write_queues; 28472a5bcfddSWeiping Zhang dev->nr_poll_queues = poll_queues; 28482a5bcfddSWeiping Zhang dev->nr_allocated_queues = nvme_max_io_queues(dev) + 1; 28492a5bcfddSWeiping Zhang dev->queues = kcalloc_node(dev->nr_allocated_queues, 28502a5bcfddSWeiping Zhang sizeof(struct nvme_queue), GFP_KERNEL, node); 285157dacad5SJay Sternberg if (!dev->queues) 285257dacad5SJay Sternberg goto free; 285357dacad5SJay Sternberg 285457dacad5SJay Sternberg dev->dev = get_device(&pdev->dev); 285557dacad5SJay Sternberg pci_set_drvdata(pdev, dev); 285657dacad5SJay Sternberg 2857b00a726aSKeith Busch result = nvme_dev_map(dev); 2858b00a726aSKeith Busch if (result) 2859b00c9b7aSChristophe JAILLET goto put_pci; 2860b00a726aSKeith Busch 2861d86c4d8eSChristoph Hellwig INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); 28625c8809e6SChristoph Hellwig INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); 286377bf25eaSKeith Busch mutex_init(&dev->shutdown_lock); 2864f3ca80fcSChristoph Hellwig 2865f3ca80fcSChristoph Hellwig result = nvme_setup_prp_pools(dev); 2866f3ca80fcSChristoph Hellwig if (result) 2867b00c9b7aSChristophe JAILLET goto unmap; 2868f3ca80fcSChristoph Hellwig 28698427bbc2SKai-Heng Feng quirks |= check_vendor_combination_bug(pdev); 2870ff5350a8SAndy Lutomirski 2871df4f9bc4SDavid E. Box if (!noacpi && nvme_acpi_storage_d3(pdev)) { 2872df4f9bc4SDavid E. Box /* 2873df4f9bc4SDavid E. Box * Some systems use a bios work around to ask for D3 on 2874df4f9bc4SDavid E. Box * platforms that support kernel managed suspend. 2875df4f9bc4SDavid E. Box */ 2876df4f9bc4SDavid E. Box dev_info(&pdev->dev, 2877df4f9bc4SDavid E. Box "platform quirk: setting simple suspend\n"); 2878df4f9bc4SDavid E. Box quirks |= NVME_QUIRK_SIMPLE_SUSPEND; 2879df4f9bc4SDavid E. Box } 2880df4f9bc4SDavid E. Box 2881943e942eSJens Axboe /* 2882943e942eSJens Axboe * Double check that our mempool alloc size will cover the biggest 2883943e942eSJens Axboe * command we support. 2884943e942eSJens Axboe */ 2885b13c6393SChaitanya Kulkarni alloc_size = nvme_pci_iod_alloc_size(); 2886943e942eSJens Axboe WARN_ON_ONCE(alloc_size > PAGE_SIZE); 2887943e942eSJens Axboe 2888943e942eSJens Axboe dev->iod_mempool = mempool_create_node(1, mempool_kmalloc, 2889943e942eSJens Axboe mempool_kfree, 2890943e942eSJens Axboe (void *) alloc_size, 2891943e942eSJens Axboe GFP_KERNEL, node); 2892943e942eSJens Axboe if (!dev->iod_mempool) { 2893943e942eSJens Axboe result = -ENOMEM; 2894943e942eSJens Axboe goto release_pools; 2895943e942eSJens Axboe } 2896943e942eSJens Axboe 2897b6e44b4cSKeith Busch result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops, 2898b6e44b4cSKeith Busch quirks); 2899b6e44b4cSKeith Busch if (result) 2900b6e44b4cSKeith Busch goto release_mempool; 2901b6e44b4cSKeith Busch 29021b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 29031b3c47c1SSagi Grimberg 2904bd46a906SKeith Busch nvme_reset_ctrl(&dev->ctrl); 290518119775SKeith Busch async_schedule(nvme_async_probe, dev); 29064caff8fcSSagi Grimberg 290757dacad5SJay Sternberg return 0; 290857dacad5SJay Sternberg 2909b6e44b4cSKeith Busch release_mempool: 2910b6e44b4cSKeith Busch mempool_destroy(dev->iod_mempool); 291157dacad5SJay Sternberg release_pools: 291257dacad5SJay Sternberg nvme_release_prp_pools(dev); 2913b00c9b7aSChristophe JAILLET unmap: 2914b00c9b7aSChristophe JAILLET nvme_dev_unmap(dev); 291557dacad5SJay Sternberg put_pci: 291657dacad5SJay Sternberg put_device(dev->dev); 291757dacad5SJay Sternberg free: 291857dacad5SJay Sternberg kfree(dev->queues); 291957dacad5SJay Sternberg kfree(dev); 292057dacad5SJay Sternberg return result; 292157dacad5SJay Sternberg } 292257dacad5SJay Sternberg 2923775755edSChristoph Hellwig static void nvme_reset_prepare(struct pci_dev *pdev) 292457dacad5SJay Sternberg { 292557dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 2926c1ac9a4bSKeith Busch 2927c1ac9a4bSKeith Busch /* 2928c1ac9a4bSKeith Busch * We don't need to check the return value from waiting for the reset 2929c1ac9a4bSKeith Busch * state as pci_dev device lock is held, making it impossible to race 2930c1ac9a4bSKeith Busch * with ->remove(). 2931c1ac9a4bSKeith Busch */ 2932c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, false); 2933c1ac9a4bSKeith Busch nvme_sync_queues(&dev->ctrl); 2934775755edSChristoph Hellwig } 293557dacad5SJay Sternberg 2936775755edSChristoph Hellwig static void nvme_reset_done(struct pci_dev *pdev) 2937775755edSChristoph Hellwig { 2938f263fbb8SLinus Torvalds struct nvme_dev *dev = pci_get_drvdata(pdev); 2939c1ac9a4bSKeith Busch 2940c1ac9a4bSKeith Busch if (!nvme_try_sched_reset(&dev->ctrl)) 2941c1ac9a4bSKeith Busch flush_work(&dev->ctrl.reset_work); 294257dacad5SJay Sternberg } 294357dacad5SJay Sternberg 294457dacad5SJay Sternberg static void nvme_shutdown(struct pci_dev *pdev) 294557dacad5SJay Sternberg { 294657dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 29474e523547SBaolin Wang 2948c1ac9a4bSKeith Busch nvme_disable_prepare_reset(dev, true); 294957dacad5SJay Sternberg } 295057dacad5SJay Sternberg 2951f58944e2SKeith Busch /* 2952f58944e2SKeith Busch * The driver's remove may be called on a device in a partially initialized 2953f58944e2SKeith Busch * state. This function must not have any dependencies on the device state in 2954f58944e2SKeith Busch * order to proceed. 2955f58944e2SKeith Busch */ 295657dacad5SJay Sternberg static void nvme_remove(struct pci_dev *pdev) 295757dacad5SJay Sternberg { 295857dacad5SJay Sternberg struct nvme_dev *dev = pci_get_drvdata(pdev); 295957dacad5SJay Sternberg 2960bb8d261eSChristoph Hellwig nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING); 296157dacad5SJay Sternberg pci_set_drvdata(pdev, NULL); 29620ff9d4e1SKeith Busch 29636db28edaSKeith Busch if (!pci_device_is_present(pdev)) { 29640ff9d4e1SKeith Busch nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); 29651d39e692SKeith Busch nvme_dev_disable(dev, true); 2966cb4bfda6SKeith Busch nvme_dev_remove_admin(dev); 29676db28edaSKeith Busch } 29680ff9d4e1SKeith Busch 2969d86c4d8eSChristoph Hellwig flush_work(&dev->ctrl.reset_work); 2970d09f2b45SSagi Grimberg nvme_stop_ctrl(&dev->ctrl); 2971d09f2b45SSagi Grimberg nvme_remove_namespaces(&dev->ctrl); 2972a5cdb68cSKeith Busch nvme_dev_disable(dev, true); 29739fe5c59fSKeith Busch nvme_release_cmb(dev); 297487ad72a5SChristoph Hellwig nvme_free_host_mem(dev); 297557dacad5SJay Sternberg nvme_dev_remove_admin(dev); 297657dacad5SJay Sternberg nvme_free_queues(dev, 0); 297757dacad5SJay Sternberg nvme_release_prp_pools(dev); 2978b00a726aSKeith Busch nvme_dev_unmap(dev); 2979726612b6SIsrael Rukshin nvme_uninit_ctrl(&dev->ctrl); 298057dacad5SJay Sternberg } 298157dacad5SJay Sternberg 298257dacad5SJay Sternberg #ifdef CONFIG_PM_SLEEP 2983d916b1beSKeith Busch static int nvme_get_power_state(struct nvme_ctrl *ctrl, u32 *ps) 2984d916b1beSKeith Busch { 2985d916b1beSKeith Busch return nvme_get_features(ctrl, NVME_FEAT_POWER_MGMT, 0, NULL, 0, ps); 2986d916b1beSKeith Busch } 2987d916b1beSKeith Busch 2988d916b1beSKeith Busch static int nvme_set_power_state(struct nvme_ctrl *ctrl, u32 ps) 2989d916b1beSKeith Busch { 2990d916b1beSKeith Busch return nvme_set_features(ctrl, NVME_FEAT_POWER_MGMT, ps, NULL, 0, NULL); 2991d916b1beSKeith Busch } 2992d916b1beSKeith Busch 2993d916b1beSKeith Busch static int nvme_resume(struct device *dev) 2994d916b1beSKeith Busch { 2995d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 2996d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 2997d916b1beSKeith Busch 29984eaefe8cSRafael J. Wysocki if (ndev->last_ps == U32_MAX || 2999d916b1beSKeith Busch nvme_set_power_state(ctrl, ndev->last_ps) != 0) 3000c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 3001d916b1beSKeith Busch return 0; 3002d916b1beSKeith Busch } 3003d916b1beSKeith Busch 300457dacad5SJay Sternberg static int nvme_suspend(struct device *dev) 300557dacad5SJay Sternberg { 300657dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 300757dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 3008d916b1beSKeith Busch struct nvme_ctrl *ctrl = &ndev->ctrl; 3009d916b1beSKeith Busch int ret = -EBUSY; 3010d916b1beSKeith Busch 30114eaefe8cSRafael J. Wysocki ndev->last_ps = U32_MAX; 30124eaefe8cSRafael J. Wysocki 3013d916b1beSKeith Busch /* 3014d916b1beSKeith Busch * The platform does not remove power for a kernel managed suspend so 3015d916b1beSKeith Busch * use host managed nvme power settings for lowest idle power if 3016d916b1beSKeith Busch * possible. This should have quicker resume latency than a full device 3017d916b1beSKeith Busch * shutdown. But if the firmware is involved after the suspend or the 3018d916b1beSKeith Busch * device does not support any non-default power states, shut down the 3019d916b1beSKeith Busch * device fully. 30204eaefe8cSRafael J. Wysocki * 30214eaefe8cSRafael J. Wysocki * If ASPM is not enabled for the device, shut down the device and allow 30224eaefe8cSRafael J. Wysocki * the PCI bus layer to put it into D3 in order to take the PCIe link 30234eaefe8cSRafael J. Wysocki * down, so as to allow the platform to achieve its minimum low-power 30244eaefe8cSRafael J. Wysocki * state (which may not be possible if the link is up). 3025b97120b1SChristoph Hellwig * 3026b97120b1SChristoph Hellwig * If a host memory buffer is enabled, shut down the device as the NVMe 3027b97120b1SChristoph Hellwig * specification allows the device to access the host memory buffer in 3028b97120b1SChristoph Hellwig * host DRAM from all power states, but hosts will fail access to DRAM 3029b97120b1SChristoph Hellwig * during S3. 3030d916b1beSKeith Busch */ 30314eaefe8cSRafael J. Wysocki if (pm_suspend_via_firmware() || !ctrl->npss || 3032cb32de1bSMario Limonciello !pcie_aspm_enabled(pdev) || 3033b97120b1SChristoph Hellwig ndev->nr_host_mem_descs || 3034c1ac9a4bSKeith Busch (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) 3035c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 3036d916b1beSKeith Busch 3037d916b1beSKeith Busch nvme_start_freeze(ctrl); 3038d916b1beSKeith Busch nvme_wait_freeze(ctrl); 3039d916b1beSKeith Busch nvme_sync_queues(ctrl); 3040d916b1beSKeith Busch 30415d02a5c1SKeith Busch if (ctrl->state != NVME_CTRL_LIVE) 3042d916b1beSKeith Busch goto unfreeze; 3043d916b1beSKeith Busch 3044d916b1beSKeith Busch ret = nvme_get_power_state(ctrl, &ndev->last_ps); 3045d916b1beSKeith Busch if (ret < 0) 3046d916b1beSKeith Busch goto unfreeze; 3047d916b1beSKeith Busch 30487cbb5c6fSMario Limonciello /* 30497cbb5c6fSMario Limonciello * A saved state prevents pci pm from generically controlling the 30507cbb5c6fSMario Limonciello * device's power. If we're using protocol specific settings, we don't 30517cbb5c6fSMario Limonciello * want pci interfering. 30527cbb5c6fSMario Limonciello */ 30537cbb5c6fSMario Limonciello pci_save_state(pdev); 30547cbb5c6fSMario Limonciello 3055d916b1beSKeith Busch ret = nvme_set_power_state(ctrl, ctrl->npss); 3056d916b1beSKeith Busch if (ret < 0) 3057d916b1beSKeith Busch goto unfreeze; 3058d916b1beSKeith Busch 3059d916b1beSKeith Busch if (ret) { 30607cbb5c6fSMario Limonciello /* discard the saved state */ 30617cbb5c6fSMario Limonciello pci_load_saved_state(pdev, NULL); 30627cbb5c6fSMario Limonciello 3063d916b1beSKeith Busch /* 3064d916b1beSKeith Busch * Clearing npss forces a controller reset on resume. The 306505d3046fSGeert Uytterhoeven * correct value will be rediscovered then. 3066d916b1beSKeith Busch */ 3067c1ac9a4bSKeith Busch ret = nvme_disable_prepare_reset(ndev, true); 3068d916b1beSKeith Busch ctrl->npss = 0; 3069d916b1beSKeith Busch } 3070d916b1beSKeith Busch unfreeze: 3071d916b1beSKeith Busch nvme_unfreeze(ctrl); 3072d916b1beSKeith Busch return ret; 3073d916b1beSKeith Busch } 3074d916b1beSKeith Busch 3075d916b1beSKeith Busch static int nvme_simple_suspend(struct device *dev) 3076d916b1beSKeith Busch { 3077d916b1beSKeith Busch struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev)); 30784e523547SBaolin Wang 3079c1ac9a4bSKeith Busch return nvme_disable_prepare_reset(ndev, true); 308057dacad5SJay Sternberg } 308157dacad5SJay Sternberg 3082d916b1beSKeith Busch static int nvme_simple_resume(struct device *dev) 308357dacad5SJay Sternberg { 308457dacad5SJay Sternberg struct pci_dev *pdev = to_pci_dev(dev); 308557dacad5SJay Sternberg struct nvme_dev *ndev = pci_get_drvdata(pdev); 308657dacad5SJay Sternberg 3087c1ac9a4bSKeith Busch return nvme_try_sched_reset(&ndev->ctrl); 308857dacad5SJay Sternberg } 308957dacad5SJay Sternberg 309021774222SYueHaibing static const struct dev_pm_ops nvme_dev_pm_ops = { 3091d916b1beSKeith Busch .suspend = nvme_suspend, 3092d916b1beSKeith Busch .resume = nvme_resume, 3093d916b1beSKeith Busch .freeze = nvme_simple_suspend, 3094d916b1beSKeith Busch .thaw = nvme_simple_resume, 3095d916b1beSKeith Busch .poweroff = nvme_simple_suspend, 3096d916b1beSKeith Busch .restore = nvme_simple_resume, 3097d916b1beSKeith Busch }; 3098d916b1beSKeith Busch #endif /* CONFIG_PM_SLEEP */ 309957dacad5SJay Sternberg 3100a0a3408eSKeith Busch static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev, 3101a0a3408eSKeith Busch pci_channel_state_t state) 3102a0a3408eSKeith Busch { 3103a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3104a0a3408eSKeith Busch 3105a0a3408eSKeith Busch /* 3106a0a3408eSKeith Busch * A frozen channel requires a reset. When detected, this method will 3107a0a3408eSKeith Busch * shutdown the controller to quiesce. The controller will be restarted 3108a0a3408eSKeith Busch * after the slot reset through driver's slot_reset callback. 3109a0a3408eSKeith Busch */ 3110a0a3408eSKeith Busch switch (state) { 3111a0a3408eSKeith Busch case pci_channel_io_normal: 3112a0a3408eSKeith Busch return PCI_ERS_RESULT_CAN_RECOVER; 3113a0a3408eSKeith Busch case pci_channel_io_frozen: 3114d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3115d011fb31SKeith Busch "frozen state error detected, reset controller\n"); 3116a5cdb68cSKeith Busch nvme_dev_disable(dev, false); 3117a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3118a0a3408eSKeith Busch case pci_channel_io_perm_failure: 3119d011fb31SKeith Busch dev_warn(dev->ctrl.device, 3120d011fb31SKeith Busch "failure state error detected, request disconnect\n"); 3121a0a3408eSKeith Busch return PCI_ERS_RESULT_DISCONNECT; 3122a0a3408eSKeith Busch } 3123a0a3408eSKeith Busch return PCI_ERS_RESULT_NEED_RESET; 3124a0a3408eSKeith Busch } 3125a0a3408eSKeith Busch 3126a0a3408eSKeith Busch static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) 3127a0a3408eSKeith Busch { 3128a0a3408eSKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 3129a0a3408eSKeith Busch 31301b3c47c1SSagi Grimberg dev_info(dev->ctrl.device, "restart after slot reset\n"); 3131a0a3408eSKeith Busch pci_restore_state(pdev); 3132d86c4d8eSChristoph Hellwig nvme_reset_ctrl(&dev->ctrl); 3133a0a3408eSKeith Busch return PCI_ERS_RESULT_RECOVERED; 3134a0a3408eSKeith Busch } 3135a0a3408eSKeith Busch 3136a0a3408eSKeith Busch static void nvme_error_resume(struct pci_dev *pdev) 3137a0a3408eSKeith Busch { 313872cd4cc2SKeith Busch struct nvme_dev *dev = pci_get_drvdata(pdev); 313972cd4cc2SKeith Busch 314072cd4cc2SKeith Busch flush_work(&dev->ctrl.reset_work); 3141a0a3408eSKeith Busch } 3142a0a3408eSKeith Busch 314357dacad5SJay Sternberg static const struct pci_error_handlers nvme_err_handler = { 314457dacad5SJay Sternberg .error_detected = nvme_error_detected, 314557dacad5SJay Sternberg .slot_reset = nvme_slot_reset, 314657dacad5SJay Sternberg .resume = nvme_error_resume, 3147775755edSChristoph Hellwig .reset_prepare = nvme_reset_prepare, 3148775755edSChristoph Hellwig .reset_done = nvme_reset_done, 314957dacad5SJay Sternberg }; 315057dacad5SJay Sternberg 315157dacad5SJay Sternberg static const struct pci_device_id nvme_id_table[] = { 3152972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0953), /* Intel 750/P3500/P3600/P3700 */ 315308095e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3154e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3155972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a53), /* Intel P3520 */ 315699466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3157e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3158972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */ 315999466e70SKeith Busch .driver_data = NVME_QUIRK_STRIPE_SIZE | 3160e850fd16SChristoph Hellwig NVME_QUIRK_DEALLOCATE_ZEROES, }, 3161972b13e2SDavid Fugate { PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */ 3162f99cb7afSDavid Wayne Fugate .driver_data = NVME_QUIRK_STRIPE_SIZE | 3163f99cb7afSDavid Wayne Fugate NVME_QUIRK_DEALLOCATE_ZEROES, }, 316450af47d0SAndy Lutomirski { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ 31659abd68efSJens Axboe .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 31666c6aa2f2SAkinobu Mita NVME_QUIRK_MEDIUM_PRIO_SQ | 3167ce4cc313SDavid Milburn NVME_QUIRK_NO_TEMP_THRESH_CHANGE | 3168ce4cc313SDavid Milburn NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 31696299358dSJames Dingwall { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 31706299358dSJames Dingwall .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3171540c801cSKeith Busch { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 31727b210e4eSChristoph Hellwig .driver_data = NVME_QUIRK_IDENTIFY_CNS | 31737b210e4eSChristoph Hellwig NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 31745bedd3afSChristoph Hellwig { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ 31755bedd3afSChristoph Hellwig .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, 31760302ae60SMicah Parrish { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 31770302ae60SMicah Parrish .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 317854adc010SGuilherme G. Piccoli { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 317954adc010SGuilherme G. Piccoli .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 31808c97eeccSJeff Lien { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */ 31818c97eeccSJeff Lien .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3182015282c9SWenbo Wang { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ 3183015282c9SWenbo Wang .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3184d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ 3185d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3186d554b5e1SMartin K. Petersen { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ 3187d554b5e1SMartin K. Petersen .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 3188608cc4b1SChristoph Hellwig { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ 3189608cc4b1SChristoph Hellwig .driver_data = NVME_QUIRK_LIGHTNVM, }, 3190608cc4b1SChristoph Hellwig { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ 3191608cc4b1SChristoph Hellwig .driver_data = NVME_QUIRK_LIGHTNVM, }, 3192ea48e877SWei Xu { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ 3193ea48e877SWei Xu .driver_data = NVME_QUIRK_LIGHTNVM, }, 319408b903b5SMisha Nasledov { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */ 319508b903b5SMisha Nasledov .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 3196f03e42c6SGabriel Craciunescu { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ 3197f03e42c6SGabriel Craciunescu .driver_data = NVME_QUIRK_NO_DEEPEST_PS | 3198f03e42c6SGabriel Craciunescu NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 31995611ec2bSKai-Heng Feng { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ 32005611ec2bSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 320102ca079cSKai-Heng Feng { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ 320202ca079cSKai-Heng Feng .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, 320398f7b86aSAndy Shevchenko { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), 320498f7b86aSAndy Shevchenko .driver_data = NVME_QUIRK_SINGLE_VECTOR }, 3205124298bdSDaniel Roschka { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, 320666341331SBenjamin Herrenschmidt { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005), 320766341331SBenjamin Herrenschmidt .driver_data = NVME_QUIRK_SINGLE_VECTOR | 3208d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_128_BYTES_SQES | 3209d38e9f04SBenjamin Herrenschmidt NVME_QUIRK_SHARED_TAGS }, 32100b85f59dSAndy Shevchenko 32110b85f59dSAndy Shevchenko { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, 321257dacad5SJay Sternberg { 0, } 321357dacad5SJay Sternberg }; 321457dacad5SJay Sternberg MODULE_DEVICE_TABLE(pci, nvme_id_table); 321557dacad5SJay Sternberg 321657dacad5SJay Sternberg static struct pci_driver nvme_driver = { 321757dacad5SJay Sternberg .name = "nvme", 321857dacad5SJay Sternberg .id_table = nvme_id_table, 321957dacad5SJay Sternberg .probe = nvme_probe, 322057dacad5SJay Sternberg .remove = nvme_remove, 322157dacad5SJay Sternberg .shutdown = nvme_shutdown, 3222d916b1beSKeith Busch #ifdef CONFIG_PM_SLEEP 322357dacad5SJay Sternberg .driver = { 322457dacad5SJay Sternberg .pm = &nvme_dev_pm_ops, 322557dacad5SJay Sternberg }, 3226d916b1beSKeith Busch #endif 322774d986abSAlexander Duyck .sriov_configure = pci_sriov_configure_simple, 322857dacad5SJay Sternberg .err_handler = &nvme_err_handler, 322957dacad5SJay Sternberg }; 323057dacad5SJay Sternberg 323157dacad5SJay Sternberg static int __init nvme_init(void) 323257dacad5SJay Sternberg { 323381101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64); 323481101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); 323581101540SChristoph Hellwig BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); 3236612b7286SMing Lei BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); 323717c33167SKeith Busch 32389a6327d2SSagi Grimberg return pci_register_driver(&nvme_driver); 323957dacad5SJay Sternberg } 324057dacad5SJay Sternberg 324157dacad5SJay Sternberg static void __exit nvme_exit(void) 324257dacad5SJay Sternberg { 324357dacad5SJay Sternberg pci_unregister_driver(&nvme_driver); 324403e0f3a6SMing Lei flush_workqueue(nvme_wq); 324557dacad5SJay Sternberg } 324657dacad5SJay Sternberg 324757dacad5SJay Sternberg MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>"); 324857dacad5SJay Sternberg MODULE_LICENSE("GPL"); 324957dacad5SJay Sternberg MODULE_VERSION("1.0"); 325057dacad5SJay Sternberg module_init(nvme_init); 325157dacad5SJay Sternberg module_exit(nvme_exit); 3252